hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
775865625a49195ef2ad910de91bcf02170a00c3 | 22,678 | py | Python | models/common/distributed_executor.py | skydooms/tpu | 4553e1ed26763769768b9ba6744431908f7e37c0 | [
"Apache-2.0"
] | null | null | null | models/common/distributed_executor.py | skydooms/tpu | 4553e1ed26763769768b9ba6744431908f7e37c0 | [
"Apache-2.0"
] | null | null | null | models/common/distributed_executor.py | skydooms/tpu | 4553e1ed26763769768b9ba6744431908f7e37c0 | [
"Apache-2.0"
] | 1 | 2020-01-30T21:12:05.000Z | 2020-01-30T21:12:05.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom training loop for running Estimator-like TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
#Standard imports
from __future__ import print_function
import json
import os
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
from typing import Optional, Dict, Text, Callable, Union, Iterator, Any
from hyperparameters import common_hparams_flags
from hyperparameters import common_tpu_flags
from hyperparameters import params_dict
tf.enable_v2_behavior()
FLAGS = flags.FLAGS
SUMMARY_TXT = 'training_summary.txt'
def initialize_common_flags():
"""Define the common flags across models."""
common_hparams_flags.define_common_hparams_flags()
common_tpu_flags.define_common_tpu_flags()
# Parameters for MultiWorkerMirroredStrategy
flags.DEFINE_string(
'worker_hosts',
default=None,
help='Comma-separated list of worker ip:port pairs for running '
'multi-worker models with distribution strategy. The user would '
'start the program on each host with identical value for this flag.')
flags.DEFINE_integer(
'task_index', 0,
'If multi-worker training, the task_index of this worker.')
def strategy_flags_dict():
"""Returns TPU related flags in a dictionary."""
return {
# TPUStrategy related flags.
'tpu': FLAGS.tpu,
# MultiWorkerMirroredStrategy related flags.
'worker_hosts': FLAGS.worker_hosts,
'task_index': FLAGS.task_index,
}
def hparam_flags_dict():
"""Returns model params related flags in a dictionary."""
return {
'data_dir': FLAGS.data_dir,
'model_dir': FLAGS.model_dir,
'train_batch_size': FLAGS.train_batch_size,
'eval_batch_size': FLAGS.eval_batch_size,
'precision': FLAGS.precision,
'config_file': FLAGS.config_file,
'params_override': FLAGS.params_override,
}
def primary_cpu_task(use_remote_tpu=False):
"""Returns primary CPU task to which input pipeline Ops are put."""
# Remote Eager Borg job configures the TPU worker with job name 'worker'.
return '/job:worker' if use_remote_tpu else ''
def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):
"""Saves model to model_dir with provided checkpoint prefix."""
checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
saved_path = checkpoint.save(checkpoint_path)
logging.info('Saving model as TF checkpoint: %s', saved_path)
def _no_metric():
return None
class SummaryWriter(object):
"""Simple SummaryWriter for writing dictionary of metrics.
Attributes:
_writer: The tf.SummaryWriter.
"""
def __init__(self, model_dir: Text, name: Text):
"""Inits SummaryWriter with paths.
Arguments:
model_dir: the model folder path.
name: the summary subfolder name.
"""
self._writer = tf.summary.create_file_writer(os.path.join(model_dir, name))
def __call__(self, metrics: Union[Dict[Text, float], float], step: int):
"""Write metrics to summary with the given writer.
Args:
metrics: a dictionary of metrics values. Prefer dictionary.
step: integer. The training step.
"""
if not isinstance(metrics, dict):
# Support scalar metric without name.
logging.warning('Warning: summary writer prefer metrics as dictionary.')
metrics = {'metric': metrics}
with self._writer.as_default():
for k, v in metrics.items():
tf.summary.scalar(k, v, step=step)
self._writer.flush()
class DistributedExecutor(object):
"""Interface to train and eval models with tf.distribute.Strategy.
Arguments:
strategy: an instance of tf.distribute.Strategy.
params: Model configuration needed to run distribution strategy.
model_fn: Keras model function. Signature:
(params: ParamsDict) -> tf.keras.models.Model.
loss_fn: loss function. Signature:
(y_true: Tensor, y_pred: Tensor) -> Tensor
metric_fn: metric function. Signature: () -> tf.keras.metrics.Metric.
use_remote_tpu: If True, run on remote TPU mode.
"""
def __init__(self,
strategy,
params,
model_fn,
loss_fn,
use_remote_tpu=False):
self._params = params
self._model_fn = model_fn
self._loss_fn = loss_fn
self._strategy = strategy
self._use_remote_tpu = use_remote_tpu
self._checkpoint_name = 'ctl_step_{step}.ckpt'
@property
def checkpoint_name(self):
"""Returns default checkpoint name."""
return self._checkpoint_name
@checkpoint_name.setter
def checkpoint_name(self, name):
"""Sets default summary writer for the current thread."""
self._checkpoint_name = name
def loss_fn(self):
return self._loss_fn()
def model_fn(self, params):
return self._model_fn(params)
def _save_config(self, model_dir):
"""Save parameters to config files if model_dir is defined."""
logging.info('Save config to model_dir %s.', model_dir)
if model_dir:
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
self._params.lock()
params_dict.save_params_dict_to_yaml(self._params,
model_dir + '/params.yaml')
else:
logging.warning('model_dir is empty, so skip the save config.')
def _get_input_iterator(
self, input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset],
strategy: tf.distribute.Strategy) -> Optional[Iterator[Any]]:
"""Returns distributed dataset iterator.
Args:
input_fn: (params: dict) -> tf.data.Dataset.
strategy: an instance of tf.distribute.Strategy.
Returns:
An iterator that yields input tensors.
"""
if input_fn is None:
return None
# When training with multiple TPU workers, datasets needs to be cloned
# across workers. Since Dataset instance cannot be cloned in eager mode,
# we instead pass callable that returns a dataset.
input_data = input_fn(self._params)
return iter(strategy.experimental_distribute_dataset(input_data))
# TODO(yeqing): Extract the train_step out as a class for re-usability.
def _create_train_step(self):
"""Creates a distributed training step."""
@tf.function
def train_step(strategy, model, loss_fn, optimizer, iterator, metric=None):
"""Performs a distributed training step.
Args:
strategy: an instance of tf.distribute.Strategy.
model: (Tensor, bool) -> Tensor. model function.
loss_fn: (y_true: Tensor, y_pred: Tensor) -> Tensor.
optimizer: tf.keras.optimizers.Optimizer.
iterator: an iterator that yields input tensors.
metric: tf.keras.metrics.Metric subclass.
Returns:
The loss tensor.
"""
def _replicated_step(inputs):
"""Replicated training step."""
inputs, labels = inputs
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
prediction_loss = loss_fn(labels, outputs)
loss = tf.reduce_mean(prediction_loss)
loss = loss / strategy.num_replicas_in_sync
if isinstance(metric, tf.keras.metrics.Metric):
metric.update_state(labels, outputs)
else:
logging.error('train metric is not an instance of '
'tf.keras.metrics.Metric.')
return loss
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
per_replica_losses = strategy.experimental_run_v2(
_replicated_step, args=(next(iterator),))
# For reporting, we returns the mean of losses.
loss = strategy.reduce(
tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None)
return loss
return train_step
def _create_test_step(self):
"""Creates a distributed test step."""
@tf.function
def test_step(strategy, model, metric, iterator):
"""Calculates evaluation metrics on distributed devices."""
if not metric:
logging.info('Skip test_step because metric is None (%s)', metric)
return None, None
if not isinstance(metric, tf.keras.metrics.Metric):
raise ValueError(
'Metric must be an instance of tf.keras.metrics.Metric '
'for running in test_step. Actual {}'.format(metric))
def _test_step_fn(inputs):
"""Replicated accuracy calculation."""
inputs, labels = inputs
model_outputs = model(inputs, training=False)
metric.update_state(labels, model_outputs)
return labels, model_outputs
return strategy.experimental_run_v2(_test_step_fn, args=(next(iterator),))
return test_step
def train(self,
train_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset],
eval_input_fn: Callable[[params_dict.ParamsDict],
tf.data.Dataset] = None,
model_dir: Text = None,
steps_per_epoch: int = 1,
steps_per_eval: int = 1,
epochs: int = 1,
train_metric_fn: Callable[[], Any] = _no_metric,
eval_metric_fn: Callable[[], Any] = _no_metric,
summary_writer_fn: Callable[[Text, Text],
SummaryWriter] = SummaryWriter,
save_config: bool = True):
"""Run distributed training on Mask RCNN model.
Args:
train_input_fn: (params: dict) -> tf.data.Dataset training data input
function.
eval_input_fn: (Optional) same type as train_input_fn. If not None, will
trigger evaluting metric on eval data. If None, will not run eval step.
model_dir: the folder path for model checkpoints.
steps_per_epoch: train steps per epoch.
steps_per_eval: test steps per evaluation.
epochs: number of training epoches.
train_metric_fn: metric_fn for evaluation in train_step.
eval_metric_fn: metric_fn for evaluation in test_step.
summary_writer_fn: function to create summary writer.
save_config: bool. Whether to save params to model_dir.
Returns:
Training summaries including the loss and the number of training steps.
"""
assert train_input_fn is not None
if train_metric_fn and not callable(train_metric_fn):
raise ValueError('if `train_metric_fn` is specified, '
'train_metric_fn must be a callable.')
if eval_metric_fn and not callable(eval_metric_fn):
raise ValueError('if `eval_metric_fn` is specified, '
'eval_metric_fn must be a callable.')
if save_config:
self._save_config(model_dir)
params = self._params
strategy = self._strategy
# To reduce unnecessary send/receive input pipeline operation, we place
# input pipeline ops in worker task.
with tf.device(primary_cpu_task(self._use_remote_tpu)):
train_iterator = self._get_input_iterator(train_input_fn, strategy)
train_step = self._create_train_step()
eval_iterator = self._get_input_iterator(eval_input_fn, strategy)
with strategy.scope():
total_training_steps = (steps_per_epoch * epochs)
# To correctly place the model weights on accelerators,
# model and optimizer should be created in scope.
model = self.model_fn(params.as_dict())
if not hasattr(model, 'optimizer'):
raise ValueError('User should set optimizer attribute to model '
'inside `model_fn`.')
optimizer = model.optimizer
# Training loop starts here.
# TODO(yeqing): Implementing checkpoints with Callbacks.
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)
if latest_checkpoint_file:
logging.info(
'Checkpoint file %s found and restoring from '
'checkpoint', latest_checkpoint_file)
checkpoint.restore(latest_checkpoint_file)
logging.info('Loading from checkpoint file completed')
current_step = optimizer.iterations.numpy()
checkpoint_name = self.checkpoint_name
eval_metric = eval_metric_fn()
train_metric = train_metric_fn()
train_summary_writer = summary_writer_fn(model_dir, 'eval_train')
test_summary_writer = summary_writer_fn(model_dir, 'eval_test')
# Continue training loop.
while current_step < total_training_steps:
current_step += 1
train_loss = train_step(
strategy,
model,
self.loss_fn(),
optimizer,
train_iterator,
metric=train_metric)
train_loss = train_loss.numpy().astype(float)
if train_metric:
train_metric_result = train_metric.result()
if isinstance(train_metric, tf.keras.metrics.Metric):
train_metric_result = tf.nest.map_structure(
lambda x: x.numpy().astype(float), train_metric_result)
if not isinstance(train_metric_result, dict):
train_metric_result = {'metric': train_metric_result}
train_metric_result['loss'] = train_loss
else:
train_metric_result = {'loss': train_loss}
logging.info('Train Step: %d/%d / loss = %s / training metric = %s',
current_step, total_training_steps, train_loss,
train_metric_result)
train_summary_writer(
metrics=train_metric_result, step=optimizer.iterations)
# Saves model checkpoints and run validation steps at every epoch end.
if current_step % steps_per_epoch == 0:
# To avoid repeated model saving, we do not save after the last
# step of training.
if current_step < total_training_steps:
_save_checkpoint(checkpoint, model_dir,
checkpoint_name.format(step=current_step))
if eval_input_fn and eval_metric:
eval_metric_result = self._run_evaluation(strategy, current_step,
model, eval_metric,
eval_iterator,
steps_per_eval)
logging.info('Step: %s evalation metric = %s.', current_step,
eval_metric_result)
test_summary_writer(
metrics=eval_metric_result, step=optimizer.iterations)
# Re-initialize evaluation metric, except the last step.
if eval_metric and current_step < total_training_steps:
eval_metric.reset_states()
if train_metric and current_step < total_training_steps:
train_metric.reset_states()
_save_checkpoint(checkpoint, model_dir,
checkpoint_name.format(step=current_step))
if eval_input_fn and eval_metric:
logging.info('Running final evaluation after training is complete.')
eval_metric_result = self._run_evaluation(strategy, current_step,
model, eval_metric,
eval_iterator,
steps_per_eval)
logging.info('Final evaluation metric = %s.', eval_metric_result)
test_summary_writer(
metrics=eval_metric_result, step=optimizer.iterations)
return model
def _run_evaluation(self, strategy, current_training_step, model, metric,
test_iterator, eval_steps):
"""Runs validation steps and aggregate metrics."""
if not test_iterator or not metric:
logging.warning(
'Both test_iterator (%s) and metrics (%s) must not be None.',
test_iterator, metric)
return None
logging.info('Running evaluation after step: %s.', current_training_step)
test_step = self._create_test_step()
for _ in range(eval_steps):
test_step(strategy, model, metric, test_iterator)
metric_result = metric.result()
if isinstance(metric, tf.keras.metrics.Metric):
metric_result = metric_result.numpy().astype(float)
logging.info('Step: [%d] Validation metric = %f', current_training_step,
metric_result)
return metric_result
def eval(self):
return NotImplementedError('Unimplemented function.')
def predict(self):
return NotImplementedError('Unimplmented function.')
# TODO(yeqing): Add unit test for MultiWorkerMirroredStrategy.
class ExecutorBuilder(object):
"""Builder of DistributedExecutor.
Example 1: Builds an executor with supported Strategy.
builder = ExecutorBuilder(
strategy_type='tpu',
strategy_config={'tpu': '/bns/xxx'})
dist_executor = builder.build_executor(
params=params,
model_fn=my_model_fn,
loss_fn=my_loss_fn,
metric_fn=my_metric_fn)
Example 2: Builds an executor with customized Strategy.
builder = ExecutorBuilder()
builder.strategy = <some customized Strategy>
dist_executor = builder.build_executor(
params=params,
model_fn=my_model_fn,
loss_fn=my_loss_fn,
metric_fn=my_metric_fn)
Example 3: Builds a customized executor with customized Strategy.
class MyDistributedExecutor(DistributedExecutor):
# implementation ...
builder = ExecutorBuilder()
builder.strategy = <some customized Strategy>
dist_executor = builder.build_executor(
class_ctor=MyDistributedExecutor,
params=params,
model_fn=my_model_fn,
loss_fn=my_loss_fn,
metric_fn=my_metric_fn)
Args:
strategy_type: string. One of 'tpu', 'mirrored', 'multi_worker_mirrored'. If
None. User is responsible to set the strategy before calling
build_executor(...).
strategy_config: necessary config for constructing the proper Strategy.
Check strategy_flags_dict() for examples of the structure.
"""
def __init__(self, strategy_type=None, strategy_config=None):
self._strategy_config = strategy_config
self._strategy = self._build_strategy(strategy_type)
@property
def strategy(self):
"""Returns default checkpoint name."""
return self._strategy
@strategy.setter
def strategy(self, new_strategy):
"""Sets default summary writer for the current thread."""
self._strategy = new_strategy
def _build_strategy(self, strategy_type):
"""Builds tf.distribute.Strategy instance.
Args:
strategy_type: string. One of 'tpu', 'mirrored', 'multi_worker_mirrored'.
Returns:
An tf.distribute.Strategy object. Returns None if strategy_type is None.
"""
if strategy_type is None:
return None
if strategy_type == 'tpu':
return self._build_tpu_strategy()
elif strategy_type == 'mirrored':
return self._build_mirrored_strategy()
else:
raise NotImplementedError('Unsupport accelerator type "%s"' %
strategy_type)
def _build_mirrored_strategy(self):
"""Builds a MirroredStrategy object."""
return tf.distribute.MirroredStrategy()
def _build_tpu_strategy(self):
"""Builds a TPUStrategy object."""
logging.info('Use TPU at %s', tpu if tpu is not None else '')
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu)
tf.config.experimental_connect_to_host(cluster_resolver.master()) # pylint: disable=line-too-long
# TODO(yeqing): Add logic to handle TPU pods connections.
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
return strategy
def _build_multiworker_mirrored_strategy(self):
"""Builds a MultiWorkerMirroredStrategy object."""
worker_hosts = self._strategy_config.worker_hosts
if worker_hosts is not None:
# Set TF_CONFIG environment variable
worker_hosts = worker_hosts.split(',')
task_index = self._strategy_config.task_index
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': worker_hosts
},
'task': {
'type': 'worker',
'index': task_index
}
})
multiworker_strategy = (
tf.distribute.experimental.MultiWorkerMirroredStrategy())
return multiworker_strategy
def build_executor(self,
class_ctor=DistributedExecutor,
params=None,
model_fn=None,
loss_fn=None,
**kwargs):
"""Creates an executor according to strategy type.
See doc string of the DistributedExecutor.__init__ for more information of
the
input arguments.
Args:
class_ctor: A constructor of executor (default: DistributedExecutor).
params: ParamsDict, all the model parameters and runtime parameters.
model_fn: Keras model function.
loss_fn: loss function.
**kwargs: other arguments to the executor constructor.
Returns:
An instance of DistributedExecutor or its subclass.
"""
if self._strategy is None:
raise ValueError('`strategy` should not be None. You need to specify '
'`strategy_type` in the builder contructor or directly '
'set the `strategy` property of the builder.')
if 'use_remote_tpu' not in kwargs:
use_remote_tpu = (
isinstance(self._strategy, tf.distribute.experimental.TPUStrategy) and
bool(self._strategy_config.tpu))
kwargs['use_remote_tpu'] = use_remote_tpu
return class_ctor(
strategy=self._strategy,
params=params,
model_fn=model_fn,
loss_fn=loss_fn,
**kwargs)
| 36.695793 | 102 | 0.661654 |
36720d90a5cec8e4350d34241c628abe41b60fd5 | 3,522 | py | Python | lnbits/core/views/generic.py | eillarra/lnbits | d8da88aefa43d443abf4dffadeb5ce41c0aac77b | [
"MIT"
] | null | null | null | lnbits/core/views/generic.py | eillarra/lnbits | d8da88aefa43d443abf4dffadeb5ce41c0aac77b | [
"MIT"
] | null | null | null | lnbits/core/views/generic.py | eillarra/lnbits | d8da88aefa43d443abf4dffadeb5ce41c0aac77b | [
"MIT"
] | null | null | null | from quart import g, abort, redirect, request, render_template, send_from_directory, url_for
from http import HTTPStatus
from os import path
from lnbits.core import core_app
from lnbits.decorators import check_user_exists, validate_uuids
from lnbits.settings import LNBITS_ALLOWED_USERS, SERVICE_FEE
from ..crud import (
create_account,
get_user,
update_user_extension,
create_wallet,
delete_wallet,
)
@core_app.route("/favicon.ico")
async def favicon():
return await send_from_directory(path.join(core_app.root_path, "static"), "favicon.ico")
@core_app.route("/")
async def home():
return await render_template("core/index.html", lnurl=request.args.get("lightning", None))
@core_app.route("/extensions")
@validate_uuids(["usr"], required=True)
@check_user_exists()
async def extensions():
extension_to_enable = request.args.get("enable", type=str)
extension_to_disable = request.args.get("disable", type=str)
if extension_to_enable and extension_to_disable:
abort(HTTPStatus.BAD_REQUEST, "You can either `enable` or `disable` an extension.")
if extension_to_enable:
update_user_extension(user_id=g.user.id, extension=extension_to_enable, active=1)
elif extension_to_disable:
update_user_extension(user_id=g.user.id, extension=extension_to_disable, active=0)
return await render_template("core/extensions.html", user=get_user(g.user.id))
@core_app.route("/wallet")
@validate_uuids(["usr", "wal"])
async def wallet():
user_id = request.args.get("usr", type=str)
wallet_id = request.args.get("wal", type=str)
wallet_name = request.args.get("nme", type=str)
service_fee = int(SERVICE_FEE) if int(SERVICE_FEE) == SERVICE_FEE else SERVICE_FEE
# just wallet_name: create a new user, then create a new wallet for user with wallet_name
# just user_id: return the first user wallet or create one if none found (with default wallet_name)
# user_id and wallet_name: create a new wallet for user with wallet_name
# user_id and wallet_id: return that wallet if user is the owner
# nothing: create everything
if not user_id:
user = get_user(create_account().id)
else:
user = get_user(user_id) or abort(HTTPStatus.NOT_FOUND, "User does not exist.")
if LNBITS_ALLOWED_USERS and user_id not in LNBITS_ALLOWED_USERS:
abort(HTTPStatus.UNAUTHORIZED, "User not authorized.")
if not wallet_id:
if user.wallets and not wallet_name:
wallet = user.wallets[0]
else:
wallet = create_wallet(user_id=user.id, wallet_name=wallet_name)
return redirect(url_for("core.wallet", usr=user.id, wal=wallet.id))
if wallet_id not in user.wallet_ids:
abort(HTTPStatus.FORBIDDEN, "Not your wallet.")
return await render_template(
"core/wallet.html", user=user, wallet=user.get_wallet(wallet_id), service_fee=service_fee
)
@core_app.route("/deletewallet")
@validate_uuids(["usr", "wal"], required=True)
@check_user_exists()
async def deletewallet():
wallet_id = request.args.get("wal", type=str)
user_wallet_ids = g.user.wallet_ids
if wallet_id not in user_wallet_ids:
abort(HTTPStatus.FORBIDDEN, "Not your wallet.")
else:
delete_wallet(user_id=g.user.id, wallet_id=wallet_id)
user_wallet_ids.remove(wallet_id)
if user_wallet_ids:
return redirect(url_for("core.wallet", usr=g.user.id, wal=user_wallet_ids[0]))
return redirect(url_for("core.home"))
| 34.871287 | 103 | 0.720613 |
3f23f6dd72f78e19bba580660d6fe75cd625da4c | 5,681 | py | Python | opencga-client/src/main/python/pyOpenCGA/pyopencga/rest_clients/clinical_client.py | fizquierdo/opencga | 9e307624bbcabe227e307686f1b5b83e5941f34d | [
"Apache-2.0"
] | null | null | null | opencga-client/src/main/python/pyOpenCGA/pyopencga/rest_clients/clinical_client.py | fizquierdo/opencga | 9e307624bbcabe227e307686f1b5b83e5941f34d | [
"Apache-2.0"
] | null | null | null | opencga-client/src/main/python/pyOpenCGA/pyopencga/rest_clients/clinical_client.py | fizquierdo/opencga | 9e307624bbcabe227e307686f1b5b83e5941f34d | [
"Apache-2.0"
] | null | null | null | from pyopencga.rest_clients._parent_rest_clients import _ParentRestClient, _ParentBasicCRUDClient, _ParentAclRestClient
class Interpretations(_ParentRestClient):
"""
This class contains the Interpretations client with methods for the
Analysis - Clinical webservices
"""
def __init__(self, configuration, session_id=None, login_handler=None, *args, **kwargs):
_category = 'analysis/clinical'
super(Interpretations, self).__init__(configuration, _category, session_id, login_handler, *args, **kwargs)
def tool_tiering(self, **options):
"""
GEL Tiering interpretation analysis (PENDING)
URL: /{apiVersion}/analysis/clinical/interpretation/tools/tiering
"""
return self._get('interpretation', subcategory='tools/tiering', **options)
def tool_team(self, **options):
"""
TEAM interpretation analysis
URL: /{apiVersion}/analysis/clinical/interpretation/tools/team
"""
return self._get('interpretation', subcategory='tools/team', **options)
def tool_custom(self, **options):
"""
Interpretation custom analysis
URL: /{apiVersion}/analysis/clinical/interpretation/tools/custom
"""
return self._get('interpretation', subcategory='tools/custom', **options)
def stats(self, **options):
"""
Clinical interpretation analysis
URL: /{apiVersion}/analysis/clinical/interpretation/stats
"""
return self._get('interpretation', subcategory='stats', **options)
def query(self, **options):
"""
Query for reported variants
URL: /{apiVersion}/analysis/clinical/interpretation/query
"""
return self._get('interpretation', subcategory='query', **options)
def index(self, **options):
"""
Index clinical analysis interpretations in the clinical variant database
URL: /{apiVersion}/analysis/clinical/interpretation/index
"""
return self._get('interpretation', subcategory='index', **options)
def update(self, clinical_analysis, data, action, **options):
"""
Add or remove Interpretations to/from a Clinical Analysis
URL: /{apiVersion}/analysis/clinical/{clinicalAnalysis}/interpretations/update
:param clinical_analysis: clinical analysis id
:param action: Action to be performed if the array of interpretations is being updated [ADD, REMOVE]
"""
return self._post('interpretations', query_id=clinical_analysis, subcategory='update', data=data, action=action, **options)
def update_interpretation(self, clinical_analysis, interpretation, data, **options):
"""
Update Interpretation fields
URL: /{apiVersion}/analysis/clinical/{clinicalAnalysis}/interpretations/{interpretation}/update
:param clinical_analysis: clinical analysis id
:param interpretation: interpretation id
"""
return self._post('interpretations', query_id=clinical_analysis, subcategory='update',
subquery_id=interpretation, data=data, **options)
def update_comments(self, clinical_analysis, interpretation, data, action, **options):
"""
Update comments of an Interpretation
URL: /{apiVersion}/analysis/clinical/{clinicalAnalysis}/interpretations/{interpretation}/comments/update
:param action: Action to be performed [ADD, SET, REMOVE]
"""
return self._post('interpretations', query_id=clinical_analysis, subquery_id=interpretation,
subcategory='comments/update', data=data, action=action, **options)
def update_reported_variants(self, clinical_analysis, interpretation, data, **options):
"""
Update reported variants of an interpretation
URL: /{apiVersion}/analysis/clinical/{clinicalAnalysis}/interpretations/{interpretation}/reportedVariants/update
"""
return self._post('interpretations', query_id=clinical_analysis, subquery_id=interpretation,
subcategory='reportedVariants/update', data=data, **options)
class Clinical(_ParentBasicCRUDClient, _ParentAclRestClient):
"""
This class contains methods for the Analysis - Clinical webservices
"""
def __init__(self, configuration, session_id=None, login_handler=None, *args, **kwargs):
_category = 'analysis/clinical'
super(Clinical, self).__init__(configuration, _category, session_id, login_handler, *args, **kwargs)
self.configuration = configuration
self.session_id = session_id
self.login_handler = login_handler
self._create_clients()
def _create_clients(self):
## undef all
self.interpretations = None
self.reports = None
self.cva = None
## [TODO] convert to @properties
## [@dgp] SHould I add auto_refresh = self.auto_refresh ??
self.interpretations = Interpretations(self.configuration, self.session_id, self.login_handler)
## self.reports = Reports(configuration, session_id, login_handler, *args, **kwargs)
## self.cva = CVAs(configuration, session_id, login_handler, *args, **kwargs)
self.clients = [self.interpretations]
for client in self.clients:
# only retry the ones with objects (instatiated clients)
if client is not None:
client.on_retry = self.on_retry
def search(self, **options):
"""
Clinical analysis search.
URL: /{apiVersion}/analysis/clinical/search
"""
return self._get('search', **options)
| 38.646259 | 131 | 0.670128 |
fe4f36d24580777043b16d7c33f47df7c988c236 | 2,062 | py | Python | demos/test_slippage_analysis.py | vedb/data_analysis | b46f58ba424680353d3abd0014a7d0a339bf6e6c | [
"MIT"
] | null | null | null | demos/test_slippage_analysis.py | vedb/data_analysis | b46f58ba424680353d3abd0014a7d0a339bf6e6c | [
"MIT"
] | null | null | null | demos/test_slippage_analysis.py | vedb/data_analysis | b46f58ba424680353d3abd0014a7d0a339bf6e6c | [
"MIT"
] | null | null | null | import os
import sys
import glob
import yaml
from data_analysis import scene
# Directory for the recording sessions
parameters_fpath = "/home/kamran/Code/data_analysis/config/visualization_parameters.yaml"
sessions_fpath = "/home/kamran/Code/data_analysis/config/slippage_sessions_list.yaml"
def parse_pipeline_parameters(parameters_fpath):
param_dict = dict()
with open(parameters_fpath,"r") as stream:
param_dict = yaml.safe_load(stream)
return param_dict
if __name__ == "__main__":
import numpy as np
param_dict = parse_pipeline_parameters(parameters_fpath)
sessions_dict = parse_pipeline_parameters(sessions_fpath)
session_directory = param_dict['directory']['session_directory']
gaze_directory = param_dict['directory']['gaze_directory']
# sessions = glob.glob(gaze_directory + "*")
sessions = sessions_dict['sessions']
print("total number of sessions", len(sessions))
print("all sessions: ", sessions)
val_points = []
val_gaze = []
cal_points = []
cal_gaze = []
for session_id in sessions:
session_folder = os.path.join(session_directory,session_id)
result = False
print("running analysis for:", session_folder)
# try:
result = scene.pipelines.detect_markers(
session_directory, session_folder , param_dict)
# except:
# print("Failed for session %s " % session_folder)
print("Gaze Calibratrion Result: ", result)
# imgpoints, image_list, marker_found_index
# val_points.append(result[0])
# val_gaze.append(result[1])
# cal_points.append(result[2])
# cal_gaze.append(result[3])
# file_name = "/home/kamran/Desktop/gaze_accuracy.npz"
# final_result = [np.asarray(val_points), np.asarray(val_gaze), np.asarray(cal_points), np.asarray(cal_gaze)]
# np.savez(file_name, validation_points = final_result[0], validation_gaze = final_result[1], calibration_points = final_result[2], calibration_gaze = final_result[3])
print("Final Result File Saved") | 42.958333 | 172 | 0.70805 |
8caa6c3f82e6961ac39ea2d5e2979e35d2fc5d65 | 1,566 | py | Python | M3_feature_zone/retipy/test/test_tortuosity.py | rmaphoh/AutoMorph | 0c82ce322c6cd8bd80f06bbd85c5c2542e534cb8 | [
"Apache-2.0"
] | 1 | 2022-01-28T00:56:23.000Z | 2022-01-28T00:56:23.000Z | M3_feature_zone/retipy/test/test_tortuosity.py | rmaphoh/AutoMorph | 0c82ce322c6cd8bd80f06bbd85c5c2542e534cb8 | [
"Apache-2.0"
] | null | null | null | M3_feature_zone/retipy/test/test_tortuosity.py | rmaphoh/AutoMorph | 0c82ce322c6cd8bd80f06bbd85c5c2542e534cb8 | [
"Apache-2.0"
] | null | null | null | # Retipy - Retinal Image Processing on Python
# Copyright (C) 2017 Alejandro Valdes
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""tests for tortuosity module"""
from unittest import TestCase
from retipy.retina import Retina
from retipy import tortuosity as t
class TestTortuosity(TestCase):
_resources = 'retipy/resources/images/'
_image_file_name = 'img01.png'
_image_path = _resources + _image_file_name
def setUp(self):
self.image = Retina(None, self._image_path)
def test_density(self):
result = t.density(self.image.np_image)
self.assertEqual(result["uri"], "tortuosity_density", "uri does not match")
self.assertEqual(len(result["data"]), 3, "data size does not match")
def test_fractal(self):
result = t.fractal(self.image.np_image)
self.assertEqual(result["uri"], "fractal_dimension", "uri does not match")
self.assertEqual(len(result["data"]), 16, "data size does not match")
| 38.195122 | 83 | 0.724777 |
83e80e5136316423808327a2c515d908d1f3ee6c | 190 | py | Python | textattack/attack_results/successful_attack_result.py | StatNLP/discretezoo | 565552b894a5c9632ac7b949d61a6f71123031e4 | [
"MIT"
] | null | null | null | textattack/attack_results/successful_attack_result.py | StatNLP/discretezoo | 565552b894a5c9632ac7b949d61a6f71123031e4 | [
"MIT"
] | null | null | null | textattack/attack_results/successful_attack_result.py | StatNLP/discretezoo | 565552b894a5c9632ac7b949d61a6f71123031e4 | [
"MIT"
] | 1 | 2022-03-25T16:45:12.000Z | 2022-03-25T16:45:12.000Z | """
Successful Attack Result
==========================
"""
from .attack_result import AttackResult
class SuccessfulAttackResult(AttackResult):
"""The result of a successful attack."""
| 17.272727 | 43 | 0.652632 |
d238a2d9174069bb6daccc36724009e623221de4 | 6,225 | py | Python | gym_decomp/scaii/__init__.py | UserAB1236872/gym_decomp | 1c3722b3ae81932659cc03e77c3ee8a61882ca76 | [
"BSD-3-Clause"
] | null | null | null | gym_decomp/scaii/__init__.py | UserAB1236872/gym_decomp | 1c3722b3ae81932659cc03e77c3ee8a61882ca76 | [
"BSD-3-Clause"
] | 3 | 2021-05-21T15:20:20.000Z | 2022-02-10T00:05:31.000Z | gym_decomp/scaii/__init__.py | UserAB1236872/gym_decomp | 1c3722b3ae81932659cc03e77c3ee8a61882ca76 | [
"BSD-3-Clause"
] | null | null | null | """
Wrappers for SCAII scenarios, primarily four towers derivatives.
"""
import os
from pathlib import Path
import logging
import gym
from gym import spaces
import gym_decomp.scaii.bootstrap as scaii_bootstrap
scaii_bootstrap.check_setup()
from scaii.env.sky_rts.env.scenarios.city_attack import CityAttack
from scaii.env.explanation import Explanation, BarChart, BarGroup, Bar
REPLAY_PATH = (Path.home() / ".scaii/replays/")
class FourTowersV1(gym.Env):
"""
The SCAII City Attack scenario (an expanded Four Towers with cities and enemy tanks)
"""
def __init__(self):
self.__world = CityAttack()
self.__record = False
self.recording_ep = 0
self.__flatten_state = True
self.__curr_state = None
self.action_space = spaces.Discrete(4)
if REPLAY_PATH.exists() and os.listdir(REPLAY_PATH):
logging.warning(
"Warning, replays in .scaii/replays may get clobbered,\
appending .bak and making a fresh directory")
num_baks = 1
basepath = REPLAY_PATH
suffix = '.bak'*num_baks
target = basepath.with_suffix(suffix)
while target.exists():
num_baks += 1
suffix = '.bak'*num_baks
target = basepath.with_suffix(suffix)
basepath.rename(target)
def change_map(self, map_name):
"""
Change the map to another one in the Sky-RTS backend maps directory.
Note, this does not and cannot verify properly that you load another Four Towers scenario!
Make sure you're loading it properly!
This is primarily useful for loading hand-coded or
"static" versions for testing hand-tailored states.
"""
self.__curr_state = None
self.__world = CityAttack(map_name=map_name)
return self.reset()
@property
def flatten_state(self):
"""
Whether to `.flatten()` the state in an observation.
By default this is true, but you can deactivate it to get
the true 40x40x8 map (e.g. if you want to use convolution)
"""
return self.__flatten_state
@flatten_state.setter
def flatten_state(self, val):
self.__flatten_state = val
@property
def record(self):
"""
Whether to dump this to a SCAII replay file
"""
return self.__record
@record.setter
def record(self, val):
self.__record = val
@property
def action_meanings(self):
"""
The meanings of the actions we can take, in order of index
"""
return ["Q4", "Q1", "Q3", "Q2"]
@property
def reward_types(self):
"""
The set of possible reward types, presented as a list
"""
return [*self.__world.reward_types()]
@property
def curr_state(self):
"""
The current state. Internally this is not flattened,
so if you temporarily turn off `flatten_state`
you can get a peek at the current state unflattened if you need it.
However, if you want this info just once, it's recommended to just use `unflattened_state`
"""
if self.__flatten_state:
return self.__curr_state.flatten()
else:
return self.__curr_state
@property
def unflattened_state(self):
"""
The raw, unflattened state, if you need it
"""
return self.__curr_state
def render(self, mode='print'):
return self.unflattened_state
def reset(self):
if self.record and not REPLAY_PATH.exists():
REPLAY_PATH.mkdir()
if self.record:
replay_file = REPLAY_PATH / "replay.scr"
if replay_file.exists():
target = REPLAY_PATH / ("replay%d.scr" % self.recording_ep)
(REPLAY_PATH / "replay.scr").replace(target)
self.__curr_state = self.__world.reset(
record=self.record).state
else:
self.__curr_state = self.__world.reset().state
if self.record:
self.recording_ep += 1
return self.curr_state
# pylint: disable=W0221
def step(self, action, q_vals=None):
"""
The normal step function overridden from gym.Env, however, it has a
`q_vals` optional argument for sending chart data back to the replay (if applicable).
If specified this should be in the form of a map with one key for each reward type.
Each of these entries should contain a list, in order, for each of the four actions.
"""
assert action in range(0, 4)
a = self.__world.new_action()
# pylint: disable=E1101
a.attack_quadrant(action+1)
obs = None
if self.record:
if q_vals is not None:
explanation = self.__build_explanation(q_vals)
obs = self.__world.act(a, explanation=explanation)
else:
explanation = Explanation(
"Attack %s" % self.action_meanings[action])
obs = self.__world.act(a, explanation=explanation)
else:
obs = self.__world.act(a)
self.__curr_state = obs.state
terminal = obs.is_terminal()
reward = 0.0
for val in obs.typed_reward.values():
reward += float(val)
for r_type in self.reward_types:
if r_type not in obs.typed_reward:
obs.typed_reward[r_type] = 0.0
info = {"reward_decomposition": dict(obs.typed_reward)}
return self.curr_state, reward, terminal, info
def __build_explanation(self, q_vals):
explanation = Explanation("Predicted Reward Per Quadrant")
chart = BarChart("Move Explanation", "Actions", "QVal By Reward Type")
for quad in range(0, 4):
group = BarGroup("Attack %s" % self.action_meanings[quad])
for r_type in self.reward_types:
r_bar = Bar(r_type, q_vals[r_type][quad])
group.add_bar(r_bar)
chart.add_bar_group(group)
explanation.with_bar_chart(chart)
return explanation
| 29.927885 | 98 | 0.606104 |
4466bbf3fe0ac74d725316518ce82dd8d850e136 | 4,672 | py | Python | heat/tests/engine/test_plugin_manager.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 1 | 2015-12-18T21:46:55.000Z | 2015-12-18T21:46:55.000Z | heat/tests/engine/test_plugin_manager.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | heat/tests/engine/test_plugin_manager.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 3 | 2018-07-19T17:43:37.000Z | 2019-11-15T22:13:30.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import types
import six
from heat.engine import plugin_manager
from heat.tests import common
def legacy_test_mapping():
return {'foo': 'bar', 'baz': 'quux'}
def current_test_mapping():
return {'blarg': 'wibble', 'bar': 'baz'}
def args_test_mapping(*args):
return dict(enumerate(args))
def kwargs_test_mapping(**kwargs):
return kwargs
def error_test_mapping():
raise MappingTestError
def error_test_exception_mapping():
raise Exception("exception")
def invalid_type_test_mapping():
return 'foo'
def none_return_test_mapping():
return
class MappingTestError(Exception):
pass
class TestPluginManager(common.HeatTestCase):
@staticmethod
def module():
return sys.modules[__name__]
def test_load_single_mapping(self):
pm = plugin_manager.PluginMapping('current_test')
self.assertEqual(current_test_mapping(),
pm.load_from_module(self.module()))
def test_load_first_alternative_mapping(self):
pm = plugin_manager.PluginMapping(['current_test', 'legacy_test'])
self.assertEqual(current_test_mapping(),
pm.load_from_module(self.module()))
def test_load_second_alternative_mapping(self):
pm = plugin_manager.PluginMapping(['nonexist', 'current_test'])
self.assertEqual(current_test_mapping(),
pm.load_from_module(self.module()))
def test_load_mapping_args(self):
pm = plugin_manager.PluginMapping('args_test', 'baz', 'quux')
expected = {0: 'baz', 1: 'quux'}
self.assertEqual(expected, pm.load_from_module(self.module()))
def test_load_mapping_kwargs(self):
pm = plugin_manager.PluginMapping('kwargs_test', baz='quux')
self.assertEqual({'baz': 'quux'}, pm.load_from_module(self.module()))
def test_load_mapping_non_existent(self):
pm = plugin_manager.PluginMapping('nonexist')
self.assertEqual({}, pm.load_from_module(self.module()))
def test_load_mapping_error(self):
pm = plugin_manager.PluginMapping('error_test')
self.assertRaises(MappingTestError, pm.load_from_module, self.module())
def test_load_mapping_exception(self):
pm = plugin_manager.PluginMapping('error_test_exception')
self.assertRaisesRegex(Exception,
"exception",
pm.load_from_module, self.module())
def test_load_mapping_invalidtype(self):
pm = plugin_manager.PluginMapping('invalid_type_test')
self.assertEqual({}, pm.load_from_module(self.module()))
def test_load_mapping_nonereturn(self):
pm = plugin_manager.PluginMapping('none_return_test')
self.assertEqual({}, pm.load_from_module(self.module()))
def test_modules(self):
mgr = plugin_manager.PluginManager('heat.tests')
for module in mgr.modules:
self.assertEqual(types.ModuleType, type(module))
self.assertTrue(module.__name__.startswith('heat.tests') or
module.__name__.startswith('heat.engine.plugins'))
def test_load_all_skip_tests(self):
mgr = plugin_manager.PluginManager('heat.tests')
pm = plugin_manager.PluginMapping('current_test')
all_items = pm.load_all(mgr)
for item in six.iteritems(current_test_mapping()):
self.assertNotIn(item, all_items)
def test_load_all(self):
import heat.tests.engine.test_plugin_manager
mgr = plugin_manager.PluginManager('heat.tests')
pm = plugin_manager.PluginMapping('current_test')
# NOTE(chmou): We force the modules to be ourself so we can get
# the current_test_mapping if not we will would be
# skipped by plugin_loader.load_modules since we are skipping
# the loading of the package with tests in there
mgr.modules = [heat.tests.engine.test_plugin_manager]
all_items = pm.load_all(mgr)
for item in six.iteritems(current_test_mapping()):
self.assertIn(item, all_items)
| 32.671329 | 79 | 0.681721 |
9dc6817f742c8ea0e581b747dbb3035513e8dc12 | 3,669 | py | Python | insta/settings.py | Roseoketch/Insta-Photo | f9416d106b03124b16244cc688a05785a6a0d13c | [
"MIT"
] | 1 | 2021-06-23T12:46:12.000Z | 2021-06-23T12:46:12.000Z | insta/settings.py | Roseoketch/Insta-Photo | f9416d106b03124b16244cc688a05785a6a0d13c | [
"MIT"
] | null | null | null | insta/settings.py | Roseoketch/Insta-Photo | f9416d106b03124b16244cc688a05785a6a0d13c | [
"MIT"
] | null | null | null | """
Django settings for insta project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from decouple import config
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# '8g^!po_smnz8evjzhh@e8z_j9w%#$bi@4et5rl(wtzld1rcbzq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG =config('DEBUG', default=False, cast=bool)
# True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'tinymce',
'bootstrap3',
'photo',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'insta.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'insta.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'insta',
'USER': 'rose',
'PASSWORD':'justin/',
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,"static"),
]
STATIC_ROOT = os.path.join(BASE_DIR,"staticfiles")
STATICFILES_STORAGE='whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_URL ='/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
| 26.586957 | 91 | 0.704007 |
4f6555354bd37fffe0d413b417a6261aa85cf351 | 947 | py | Python | server_scripts/get_odb.py | det-lab/exhttpd | e9e6ff8271f45a1068d0b5c5a1669b8dc035fbf6 | [
"MIT"
] | null | null | null | server_scripts/get_odb.py | det-lab/exhttpd | e9e6ff8271f45a1068d0b5c5a1669b8dc035fbf6 | [
"MIT"
] | null | null | null | server_scripts/get_odb.py | det-lab/exhttpd | e9e6ff8271f45a1068d0b5c5a1669b8dc035fbf6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import traceback
from subprocess import Popen, PIPE, call
#
# This script copies new Midas datafiles into a CDMS series-number structure
#
def getodb(odbkey):
command = "odbedit -c 'ls -l \""+ odbkey +"\"'"
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return stdout
def setodb(odbkey, val):
command = "odbedit -c 'set \"" + odbkey + "\" \"" + val + "\"'"
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return stdout
def loadodb(odbkey):
command = "odbedit -c 'ls -v \""+ odbkey +"\"'"
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return stdout
#the stuff below is so this functionality can be used as a script
########################################################################
if __name__ == "__main__":
key = sys.argv[1]
print getodb(key)
| 21.044444 | 76 | 0.620908 |
a8b20e65a1195138ba4824546a726c6b3ffd7155 | 8,730 | py | Python | examples/docs/source/conf.py | jlin27/elastic | 6900760fb0799381487cd5cd2c50c8449e5ea831 | [
"BSD-3-Clause"
] | null | null | null | examples/docs/source/conf.py | jlin27/elastic | 6900760fb0799381487cd5cd2c50c8449e5ea831 | [
"BSD-3-Clause"
] | null | null | null | examples/docs/source/conf.py | jlin27/elastic | 6900760fb0799381487cd5cd2c50c8449e5ea831 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import torch
import torchelastic
import pytorch_sphinx_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.6'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinxcontrib.katex',
'm2r',
]
# katex options
#
#
katex_options = r'''
delimiters : [
{left: "$$", right: "$$", display: true},
{left: "\\(", right: "\\)", display: false},
{left: "\\[", right: "\\]", display: true}
]
'''
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyTorch/Elastic'
copyright = '2020, PyTorch Elastic Contributors'
author = 'PyTorch Elastic Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
version = 'master '
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = 'master'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'pytorch_project': 'audio',
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
}
html_logo = '_static/img/pytorch-logo-dark.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
# NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value
# and can be moved outside of this function (and the setup(app) function
# can be deleted).
html_css_files = [
'https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css'
]
# In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is
# `add_stylesheet` (deprecated in 1.8).
add_css = getattr(app, 'add_css_file', getattr(app, 'add_stylesheet'))
for css_file in html_css_files:
add_css(css_file)
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TorchAudiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytorch.tex', 'Torchaudio Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Torchaudio', 'Torchaudio Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Torchaudio', 'Torchaudio Documentation',
author, 'Torchaudio', 'Load audio files into pytorch tensors.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'torch': ('https://pytorch.org/docs/stable/', None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# type: (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
typename = typename.replace('int', 'python:int')
typename = typename.replace('long', 'python:long')
typename = typename.replace('float', 'python:float')
typename = typename.replace('type', 'python:type')
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, **kw))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = patched_make_field
| 32.819549 | 79 | 0.659221 |
5361ca669776afedec101f5316d676374c20ffc7 | 1,129 | py | Python | sandbox/urls.py | itbabu/django-oscar-paypal | 0004fcb416b73529396c746940d5bf2cf8ded6c2 | [
"BSD-3-Clause"
] | 1 | 2018-05-17T20:16:34.000Z | 2018-05-17T20:16:34.000Z | sandbox/urls.py | itbabu/django-oscar-paypal | 0004fcb416b73529396c746940d5bf2cf8ded6c2 | [
"BSD-3-Clause"
] | null | null | null | sandbox/urls.py | itbabu/django-oscar-paypal | 0004fcb416b73529396c746940d5bf2cf8ded6c2 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.conf import settings
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from apps.app import application
from paypal.payflow.dashboard.app import application as payflow
from paypal.express.dashboard.app import application as express_dashboard
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^admin/', include(admin.site.urls)),
url(r'^i18n/', include('django.conf.urls.i18n')),
)
urlpatterns += i18n_patterns('',
# PayPal Express integration...
(r'^checkout/paypal/', include('paypal.express.urls')),
# Dashboard views for Payflow Pro
(r'^dashboard/paypal/payflow/', include(payflow.urls)),
# Dashboard views for Express
(r'^dashboard/paypal/express/', include(express_dashboard.urls)),
(r'', include(application.urls)),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.28125 | 73 | 0.749336 |
e80928b75ebd2f1ffe12a1178f78c1c40506a4cd | 140 | py | Python | siteApp/views.py | ledroid/gitDjango | cef6096a3e309bfc94b2b8586cf96de07abc36b0 | [
"MIT"
] | null | null | null | siteApp/views.py | ledroid/gitDjango | cef6096a3e309bfc94b2b8586cf96de07abc36b0 | [
"MIT"
] | null | null | null | siteApp/views.py | ledroid/gitDjango | cef6096a3e309bfc94b2b8586cf96de07abc36b0 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
def post_list(request):
return render(request, 'site/post_list.html', {}) | 28 | 53 | 0.75 |
8b3e0249012c1ff0ee2582b7f427bf8c14aea1f7 | 8,078 | py | Python | src/googleanalytics/account.py | pc/python-googleanalytics | 9389cbf0b76f2066365dc4142ccbb02148de1580 | [
"BSD-3-Clause"
] | 2 | 2016-05-09T13:24:10.000Z | 2022-02-04T13:25:59.000Z | src/googleanalytics/account.py | pc/python-googleanalytics | 9389cbf0b76f2066365dc4142ccbb02148de1580 | [
"BSD-3-Clause"
] | null | null | null | src/googleanalytics/account.py | pc/python-googleanalytics | 9389cbf0b76f2066365dc4142ccbb02148de1580 | [
"BSD-3-Clause"
] | null | null | null | from googleanalytics.exception import GoogleAnalyticsClientError
from googleanalytics.data import DataPoint, DataSet
import pprint
import urllib
filter_operators = ['==', '!=', '>', '<', '>=', '<=', '=~', '!~', '=@', '!@']
data_converters = {
'integer': int,
}
class Account:
def __init__(self, connection=None, title=None, link=None,
account_id=None, account_name=None, profile_id=None,
web_property_id=None, table_id=None, validated=False):
self.connection = connection
self.title = title
self.link = link
self.account_id = account_id
self.account_name = account_name
self.profile_id = profile_id
self.web_property_id = web_property_id
if table_id:
self.table_id = table_id
else:
if self.profile_id:
self.table_id = 'ga:' + self.profile_id
self.validated = validated
def __repr__(self):
if self.title:
return '<Account: %s>' % self.title
elif self.table_id:
return '<Account: %s>' % self.table_id
def get_data(self, start_date, end_date, dimensions=[], metrics=[], sort=[], filters=[]):
"""
Pulls data in from an account and returns a processed data structure for
easy post processing. This method requires the following inputs:
** Required Arguments **
``start_date``
A ``datetime`` object for the lower bound of your query
``end_date``
A ``datetime`` object for the upper bound of your query
** Optional Arguments **
``dimensions``
A list of dimensions, for example: ['country','browser']
See: http://code.google.com/apis/analytics/docs/gdata/gdataReferenceDimensionsMetrics.html
See: http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#dimensionsAndMetrics
``metrics``
A list of metrics, for example: ['pageviews', 'uniquePageviews']
See: http://code.google.com/apis/analytics/docs/gdata/gdataReferenceDimensionsMetrics.html
See: http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#dimensionsAndMetrics
``sort``
A list of dimensions or metrics to sort the output by, should probably
be one of the items you specified in ``dimensions`` or ``metrics``.
For example: ['browser', 'pageviews']
See: http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#sorting
``filters``
A list of filters. A filter expression has three parts:
name - The name of the dimension or metric to filter on.
For example: ga:pageviews will filter on the pageviews metric.
operator - Defines the type of filter match to use. Operators are
specific to either dimensions or metrics.
expression - States the values included or excluded from the results.
Expressions use regular expression syntax.
Learn more about valid operators and expressions here:
http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#filtering
The ``filters`` input accepts this data as a list of lists like so. Please
note that order matters, especially when using boolean operators (see
below).
[
['browser', '=~', 'Firefox', 'AND'], # Regular expression match on 'Firefox'
['browser', '=~', 'Internet (Explorer|Exploder)', 'OR'],
['city', '=@', 'York', 'OR'], # All cities with York as a substring
['state', '!=', 'California', 'AND'], # Everything but California
['timeOnPage', '<', '10'], # Reject results where timeonpage < 10sec
]
Filters can be combined with AND boolean logic as well as with OR
boolean logic. When using both operators, the OR operator has higher
precendence. When you are using more than one filter, please specify
a fourth item in your list 'AND' or 'OR' to explicitly spell out the
filters' relationships:
For example, this filter selects data from the United States from the
browser Firefox.
[
['country', '==', 'United States', 'OR'],
['browser', '=@', 'FireFox'],
]
This filter selects data from either the United States or Canada.
[
['country', '==', 'United States', 'AND'],
['country', '==', 'Canada'],
]
The first filter limits results to cities starting with 'L' and ending
with 'S'. The second limits results to browsers starting with 'Fire'
and the cities starting with 'L':
[
['city', '=~', '^L.*S$']
]
[
['city', '=~', '^L', 'AND'],
['browser', '=~', '^Fire']
]
"""
path = '/analytics/feeds/data'
if start_date > end_date:
raise GoogleAnalyticsClientError('Date orders are reversed')
data = {
'ids': self.table_id,
'start-date': start_date.strftime('%Y-%m-%d'),
'end-date': end_date.strftime('%Y-%m-%d'),
}
if dimensions:
data['dimensions'] = ",".join(['ga:'+d for d in dimensions])
if metrics:
data['metrics'] = ",".join(['ga:'+m for m in metrics])
if sort:
_sort = []
for s in sort:
pre= 'ga:'
if s[0] == '-':
pre = '-ga:'
s = s[1:]
_sort.append(pre+s)
data['sort'] = ",".join(_sort)
if filters:
filter_string = self.process_filters(filters)
data['filters'] = filter_string
processed_data = DataSet()
data = urllib.urlencode(data)
response = self.connection.make_request('GET', path=path, data=data)
raw_xml = response.read()
#print raw_xml
xml_tree = self.connection.parse_response(raw_xml)
data_rows = xml_tree.getiterator('{http://www.w3.org/2005/Atom}entry')
for row in data_rows:
values = {}
ms = row.findall('{http://schemas.google.com/analytics/2009}metric')
ds = row.findall('{http://schemas.google.com/analytics/2009}dimension')
title = row.find('{http://www.w3.org/2005/Atom}title').text
if len(ms) == 0 or len(ds) == 0:
continue
# detect datatype and convert if possible
for m in ms:
if m.attrib['type'] in data_converters.keys():
m.attrib['value'] = data_converters[m.attrib['type']](m.attrib['value'])
dp = DataPoint(
account=self,
connection=self.connection,
title=title,
metrics=[m.attrib['value'] for m in ms],
dimensions=[d.attrib['value'] for d in ds]
)
processed_data.append(dp)
return processed_data
def process_filters(self, filters):
processed_filters = []
multiple_filters = False
if len(filters) > 1:
multiple_filters = True
for filt in filters:
if len(filt) < 3:
continue
if len(filt) == 3:
name, operator, expression = filt
if multiple_filters:
comb = 'AND'
else:
comb = ''
elif len(filt) == 4:
name, operator, expression, comb = filt
if comb != 'AND' and comb != 'OR':
comb == 'AND'
# Reject any filters with invalid operators
if operator not in filter_operators:
continue
name = 'ga:' + name
# Mapping to GA's boolean operators
if comb == 'AND': comb = ';'
if comb == 'OR': comb = ','
# These three characters are special and must be escaped
if '\\' in expression:
expression = expression.replace('\\', '\\\\')
if ',' in expression:
expression = expression.replace(',', '\,')
if ';' in expression:
expression = expression.replace(';', '\;')
processed_filters.append("".join([name,operator,expression,comb]))
filter_string = "".join(processed_filters)
# Strip any trailing boolean symbols
if filter_string:
if filter_string[-1] == ';' or filter_string[-1] == ',':
filter_string = filter_string[:-1]
return filter_string
| 34.818966 | 100 | 0.599158 |
f06a7b92fab2f51b6588f5490ef47cbd96a916ec | 175 | py | Python | forgery_data/forgery/__init__.py | stamaimer/ForgeryData | b3bfde1d2c44db9074b440a38fd04987ee7cdae5 | [
"Unlicense"
] | null | null | null | forgery_data/forgery/__init__.py | stamaimer/ForgeryData | b3bfde1d2c44db9074b440a38fd04987ee7cdae5 | [
"Unlicense"
] | null | null | null | forgery_data/forgery/__init__.py | stamaimer/ForgeryData | b3bfde1d2c44db9074b440a38fd04987ee7cdae5 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
forgery_data.forgery
~~~~~~~~~~~~~~~~~~~~
stamaimer 08/29/16
"""
from basic import ForgeryData
from zh_cn import ForgeryDataZH_CN | 12.5 | 34 | 0.577143 |
e93ed63777149d03bdb7cab62a1823e9a6e22e4e | 22 | py | Python | alertaclient/version.py | gapitio/python-alerta-client | 943a624c2136e952c92fbdfaa80e61b73d949275 | [
"Apache-2.0"
] | null | null | null | alertaclient/version.py | gapitio/python-alerta-client | 943a624c2136e952c92fbdfaa80e61b73d949275 | [
"Apache-2.0"
] | null | null | null | alertaclient/version.py | gapitio/python-alerta-client | 943a624c2136e952c92fbdfaa80e61b73d949275 | [
"Apache-2.0"
] | null | null | null | __version__ = '8.3.0'
| 11 | 21 | 0.636364 |
a12fd4c1c3d751cb56313497332fa70e18bf5ee5 | 3,170 | py | Python | sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/aio/_configuration.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/aio/_configuration.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/aio/_configuration.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 1 | 2019-04-05T18:17:43.000Z | 2019-04-05T18:17:43.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
class EventHubManagementClientConfiguration(Configuration):
"""Configuration for EventHubManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "AsyncTokenCredential"
subscription_id, # type: str
**kwargs # type: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(EventHubManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'azure-mgmt-eventhub/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 48.769231 | 173 | 0.697792 |
b5b40d20a4defa0611d6933af04983eb08527c6d | 3,333 | py | Python | hubspot/crm/companies/models/filter_group.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/crm/companies/models/filter_group.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/crm/companies/models/filter_group.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | # coding: utf-8
"""
Companies
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.companies.configuration import Configuration
class FilterGroup(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"filters": "list[Filter]"}
attribute_map = {"filters": "filters"}
def __init__(self, filters=None, local_vars_configuration=None): # noqa: E501
"""FilterGroup - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._filters = None
self.discriminator = None
self.filters = filters
@property
def filters(self):
"""Gets the filters of this FilterGroup. # noqa: E501
:return: The filters of this FilterGroup. # noqa: E501
:rtype: list[Filter]
"""
return self._filters
@filters.setter
def filters(self, filters):
"""Sets the filters of this FilterGroup.
:param filters: The filters of this FilterGroup. # noqa: E501
:type: list[Filter]
"""
if self.local_vars_configuration.client_side_validation and filters is None: # noqa: E501
raise ValueError("Invalid value for `filters`, must not be `None`") # noqa: E501
self._filters = filters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FilterGroup):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FilterGroup):
return True
return self.to_dict() != other.to_dict()
| 30.027027 | 139 | 0.612061 |
a2629b9aff3254fe51d171dfb7fc84759ceb4fcc | 7,055 | py | Python | TCP_FTP/server/main.py | AlterFrozen/Python-NetworkProgramming | 6779966486c39805e6e6ead1514f4ad3ac083400 | [
"MIT"
] | 1 | 2021-11-26T11:36:21.000Z | 2021-11-26T11:36:21.000Z | TCP_FTP/server/main.py | AlterFrozen/Python-NetworkProgramming | 6779966486c39805e6e6ead1514f4ad3ac083400 | [
"MIT"
] | null | null | null | TCP_FTP/server/main.py | AlterFrozen/Python-NetworkProgramming | 6779966486c39805e6e6ead1514f4ad3ac083400 | [
"MIT"
] | null | null | null | '''Server入口'''
from socket import *
from users import authenticate
from FrozenToolKit import FrozenFile
import settings
import protocol
import json
import os
class Server:
'''FTP Server'''
def __init__(self):
#Server
self.socket = None
self.MAX_RECV = 4096 #单次最大接收字节
self.buffer_recv = ''#接受消息的缓存
self.buffer_sticky = ''#粘包分包剩余数据缓存
#Client
self.session = None
self.clientAddr = None
self.USER_HOME = None
def __del__(self):
self.socket.close()
def start(self):
'''读取配置文件并开启服务'''
self.socket = socket(AF_INET,SOCK_STREAM)
self.socket.setsockopt(SOL_SOCKET,SO_REUSEADDR,settings.PORT_REUSE)#端口复用
self.socket.bind((settings.HOST_IP,settings.HOST_PORT))
print("Server start at {} successfully!".format((settings.HOST_IP,settings.HOST_PORT)))
self.socket.listen(settings.MAX_LISTENING)
while True:
self.session,self.clientAddr = self.socket.accept()
print("Start a new seesion with {}".format(self.clientAddr))
self.handle()
def handle(self):
'''处理用户数据'''
while True:
try:
self.recvall_bySuffix(protocol.suffix)
mes = json.loads(self.buffer_recv)
self.buffer_recv = ''#清空
state = int(mes['state'])#解析STATECODE
if state == 1:#RPC
request = str(mes['message']).split(' ')
cmd = '_CIL_' + request[0]#防止调用其他方法
if hasattr(self,cmd):
#self.session.sendall(protocol.FTPMes('{} is operating'.format(request[0]),200))
func = getattr(self,cmd)
#len_para = len(request)-1
func(request[1:])#传入参数
else:
res = "No command {}, typing 'help/man' ask for more commands, Format: [cmd] [para1] [para2] ...".format(request[0])
self.session.sendall(protocol.FTPMes(res,201))
elif state == 100:#login
if authenticate(mes['uid'],mes['pwd']):
self.USER_HOME = os.path.join(settings.SERVER_ROOT,"home",mes['uid'])#为用户生成目录
if not os.path.exists(self.USER_HOME):
print("Makedir: {}".format(self.USER_HOME))
os.makedirs(self.USER_HOME)
self.session.sendall(protocol.FTPMes('OK',100))
else:self.session.sendall(protocol.FTPMes('NO',101))
elif state == 999:#Exit
print("connection {} Exit".format(self.clientAddr))
self.session.close()
break
except EOFError:
print("connection {} is lost……".format(self.clientAddr))
self.session.close()
break
def recvall_bySuffix(self,suffix):
'''根据suffix不断接受数据并保存在buffer_recv剩余数据缓存在buffer_sticky'''
self.buffer_recv = ''#清空
if self.buffer_sticky:
#print("REST")
self.buffer_recv += self.buffer_sticky #载入剩余数据
self.buffer_sticky = ''
pos_suffix = self.buffer_recv.find(suffix)
while pos_suffix == -1:
inflow = self.session.recv(self.MAX_RECV)
if not inflow:
raise EOFError('socket closed [Received: {} bytes]'.format(len(self.buffer_recv)))
self.buffer_recv += inflow.decode(protocol.coder)
#print(len(self.buffer_recv))
pos_suffix = self.buffer_recv.find(suffix)
self.buffer_sticky = self.buffer_recv[len(suffix)+pos_suffix:]#剩余数据缓存
self.buffer_recv = self.buffer_recv[:pos_suffix]#分包完成
print(self.buffer_recv)
return True
#各类处理方法 ↓(private)
def _CIL_help(self,*para):#para用于制造通用接口
self.session.sendall(protocol.FTPMes("help/man: 帮助文档'\nls: 当前目录文件\nget [filename]: 下载文件"))
def _CIL_man(self,*para):
self._CIL_help()
def _CIL_ls(self,*para):
files = ''
cnt = 0
for file in os.listdir(self.USER_HOME):
filePath = os.path.join(self.USER_HOME,file)
if os.path.isfile(filePath):
cnt += 1
suffix = 'B'
size = os.path.getsize(filePath)
if size >= 1024:
size = int(size/1024)
suffix = 'KB'
if size >= 1024:
size = int(size/1024)
suffix = 'MB'
if size >= 1024:
size = int(size/1024)
suffix = 'GB'
files += "[{}] {}{} {}\n".format(cnt,file.ljust(30),size,suffix)
#files += "".format(cnt) + file.ljust(30) + str(os.path.getsize(filePath)) + ' KB\n'
self.session.sendall(protocol.FTPMes(files))
def _CIL_get(self,*para):
if not para[0]:#必须传入文件名参数才能下载
self.session.sendall(protocol.FTPMes("Sorry please type 'get [filename]'",201))
else:
filePath = os.path.join(self.USER_HOME,str(para[0][0]))
print(filePath)
if not os.path.exists(filePath):#文件不存在
self.session.sendall(protocol.FTPMes("Sorry this file is not existed",201))
else:
Coder = FrozenFile.get_encoding(filePath)
Size = os.path.getsize(filePath)
#对Size进行判断,如果过大需要结合protocol.FTPMes_NoSuffix分批发送
if Size < 4194304:#32MB
with open(filePath,'rb') as fp:
mes = fp.read().decode(protocol.coder)
self.session.sendall(protocol.FTPMes(mes,200,coder=Coder,size=Size))
else:#大文件
totalSize = Size
self.session.sendall(protocol.FTPMes('',202,coder=Coder,size=Size))
self.recvall_bySuffix(protocol.suffix)
mes = json.loads(self.buffer_recv)
self.buffer_recv = ''#清空
res = int(mes['state'])#202接受203拒绝
if res == 202:
print("Big File is Sending")
clientSize = int(mes['buffsize'])
with open(filePath,'rb') as fp:
while True:
data = fp.read(clientSize).decode(protocol.coder)
totalSize -= len(data)
if totalSize <= clientSize:#最后一块
self.session.sendall(protocol.FTPMes(data,203))
break
else:
self.session.sendall(protocol.FTPMes(data,202))
elif res == 203: self.session.sendall(protocol.FTPMes('Get Cancel Successfully!'))#应付一下
if __name__ == "__main__":
server = Server()
server.start() | 43.549383 | 140 | 0.521332 |
b579f4995a459cfaf98de3acfba2cd8bfa520eee | 4,830 | py | Python | lewis_emulators/kynctm3k/device.py | ISISComputingGroup/EPICS-DeviceEmulator | 026c2a14a16bb204ea7527e3765daa182cafa814 | [
"BSD-3-Clause"
] | 2 | 2020-10-20T16:49:13.000Z | 2021-02-19T10:41:44.000Z | lewis_emulators/kynctm3k/device.py | ISISComputingGroup/EPICS-DeviceEmulator | 026c2a14a16bb204ea7527e3765daa182cafa814 | [
"BSD-3-Clause"
] | 9 | 2019-03-22T15:35:15.000Z | 2021-07-28T11:05:43.000Z | lewis_emulators/kynctm3k/device.py | ISISComputingGroup/EPICS-DeviceEmulator | 026c2a14a16bb204ea7527e3765daa182cafa814 | [
"BSD-3-Clause"
] | 1 | 2020-10-21T17:02:44.000Z | 2020-10-21T17:02:44.000Z | from collections import OrderedDict
from .states import DefaultState
from lewis.devices import StateMachineDevice
from lewis.core.logging import has_log
import random
import six
def truncate_if_set(f):
"""
Truncates the decorated function's string output if truncated_output is True
"""
@six.wraps(f)
def wrapper(self, *args, **kwargs):
output = f(self, *args, **kwargs)
if self.truncated_output:
output = output[:int(round(len(output) / 2.))]
return output
return wrapper
@has_log
def fake_auto_send(f):
"""
Changes the decorated functions's string output to a simulate a device in auto-send mode
"""
@six.wraps(f)
def wrapper(self, *args, **kwargs):
output = f(self, *args, **kwargs)
if self.auto_send:
output = "TG,{:02d},+FFFFFFF".format(random.randint(1, 4))
return output
return wrapper
@has_log
class SimulatedKynctm3K(StateMachineDevice):
INPUT_MODES = ("R0", "R1", "Q0")
def _initialize_data(self):
"""
Initialize all of the device's attributes.
OUT_values contains the measurement values to be returned. A False value is considered to not
be in the program, and will not be returned.
"""
self.OUT_values = None
self.truncated_output = False
self.auto_send = False
self.input_mode = "R0"
pass
def reset_device(self):
"""
Resets the device to a known state. This can be confirmed when the OUT channels equal -256.0
Returns: None
"""
self._initialize_data()
self.OUT_values = ["off"]*16
return None
def _get_state_handlers(self):
return {
'default': DefaultState(),
}
def _get_initial_state(self):
return 'default'
def _get_transition_handlers(self):
return OrderedDict([
])
def set_autosend_status(self, new_state):
"""
Sets the autosend status between True (autosend on) and False (autosend off)
Args:
new_state: Boolean, the new autosend state
Returns:
A string acknowledging the status change, or an error if the device is in the wrong state to change the setting
"""
try:
assert isinstance(new_state, int)
except AssertionError:
return "ER,SW,08"
if self.input_mode in ("R0", "R1"):
# Cannot change the autosend in this input mode
return "ER,SW,01"
else:
self.auto_send = bool(new_state)
return "SW,EA"
def set_input_mode(self, new_state):
"""
Changes the state of the device to a measurment screen (R0/R1) or a RS232C comms mode (Q0)
Args:
new_state: String, denoting measurement screen (R0/R1) or RS232C mode (Q0)
Returns:
new_state: String. Either the name of the new state, or an error code if the new state was not recognised
"""
if new_state in self.INPUT_MODES:
self.input_mode = new_state
return new_state
else:
return "ER,{:.2},01".format(new_state)
def parse_status(self, output_setting):
"""
Converts the status for one OUT channel to a formatted string
Args:
output_setting: String or float. If float, then output is on. If off or out_of_bounds, then a formatted string will be returned
Returns:
OUT_string: String. Contains the measurement value if on, or XXXXXXXX/FFFFFFF as appropriate if off or out of range
"""
out_of_range_return = "FFFFFFF"
off_return = "XXXXXXXX"
if output_setting == "off":
return off_return
elif output_setting == "out_of_range":
# Add a random sign to the out of range string
sign = random.sample(('+', '-'), 1)[0]
return sign + out_of_range_return
elif type(output_setting) is float:
return "{:+08.3f}".format(output_setting)
else:
return off_return
@fake_auto_send
@truncate_if_set
def format_output_data(self):
"""
Recalls and formats the measurement values
Returns:
A string containing the measurement values for the current program, formatted as per the user manual
"""
if self.OUT_values is None:
return None
else:
channel_strings = ["MM,1111111111111111", ]
for channel, output_value in enumerate(self.OUT_values):
# Only return output if the OUT value is in the program
channel_strings.append(self.parse_status(output_value))
return ','.join(channel_strings)
| 26.98324 | 139 | 0.60766 |
ace5a9ddc20fa8d89861e066745f237d88f549fa | 118 | py | Python | tests/func/utils/predicates.py | Joacchim/BookMyComics | 21ed9c4ebfd7de46220f5638c7b4a0af60b4201a | [
"Apache-2.0"
] | null | null | null | tests/func/utils/predicates.py | Joacchim/BookMyComics | 21ed9c4ebfd7de46220f5638c7b4a0af60b4201a | [
"Apache-2.0"
] | 64 | 2018-09-05T13:36:55.000Z | 2020-08-16T19:56:20.000Z | tests/func/utils/predicates.py | BookMyComics-Developers/BookMyComics | e2639f5dff91176c84a6bb8c3b4d72f559b5f3ff | [
"Apache-2.0"
] | 1 | 2018-09-05T11:14:59.000Z | 2018-09-05T11:14:59.000Z | def with_next_page(self):
return self.has_next_page()
def with_prev_page(self):
return self.has_prev_page()
| 16.857143 | 31 | 0.745763 |
0579e26612757d17cced76393b1206b9651fa693 | 57,728 | py | Python | tensorflow/python/distribute/coordinator/cluster_coordinator.py | jay-jang/tensorflow | 6c3b7f4c3e89718c1812e9132a3546cb0dc8f53c | [
"Apache-2.0"
] | 5 | 2021-04-01T15:14:48.000Z | 2021-04-02T02:56:07.000Z | tensorflow/python/distribute/coordinator/cluster_coordinator.py | jay-jang/tensorflow | 6c3b7f4c3e89718c1812e9132a3546cb0dc8f53c | [
"Apache-2.0"
] | 2 | 2021-01-26T13:15:46.000Z | 2021-01-26T16:46:46.000Z | tensorflow/python/distribute/coordinator/cluster_coordinator.py | jay-jang/tensorflow | 6c3b7f4c3e89718c1812e9132a3546cb0dc8f53c | [
"Apache-2.0"
] | 3 | 2021-01-26T11:51:18.000Z | 2021-01-26T12:13:40.000Z | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for `ClusterCoordinator` and relevant cluster-worker related library.
This is currently under development and the API is subject to change.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import enum
import functools
import os
import re
import sys
import threading
import time
import weakref
from six.moves import queue
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute.coordinator import metric_utils
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import executor
from tensorflow.python.eager import function as tf_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Maximum time for failed worker to come back is 1 hour
_WORKER_MAXIMUM_RECOVERY_SEC = 3600
# Maximum size for queued closures, "infinite" if set to 0.
# When the maximum queue size is reached, further schedule calls will become
# blocking until some previously queued closures are executed on workers.
# Note that using an "infinite" queue size can take a non-trivial portion of
# memory, and even lead to coordinator OOM. Modify the size to a smaller value
# for coordinator with constrained memory resource (only recommended for
# advanced users). Also used in unit tests to ensure the correctness when the
# queue is full.
_CLOSURE_QUEUE_MAX_SIZE = 256 * 1024
# RPC error message from PS
_RPC_ERROR_FROM_PS = "GRPC error information from remote target /job:ps"
# InvalidArgumentError (unknown device) will not have "GRPC error..." string.
_JOB_WORKER_STRING_IDENTIFIER = "/job:worker"
class _RemoteValueStatus(enum.Enum):
"""The status of a `RemoteValue` object.
A `RemoteValue` object can have three states:
1) not ready: no value, no non-retryable error and not aborted;
2) aborted: i.e. the execution of function was aborted because of task
failure, but can be retried;
3) ready: i.e. has value or has non-tryable error;
The initial state of a `RemoteValue` is "not ready". When its corresponding
closure has
been executed at least once, it will become aborted or ready. The state
transitions are:
1) not ready -> 2) aborted:
when the corresponding closure is aborted due to worker failure, and the
worker failure is not immediately handled.
1) not ready -> 3) ready:
when the corresponding closure has been executed successfully.
2) aborted -> 3) ready:
when the `RemoteValue` is rebuilt by rerunning the corresponding closure
and the closure has been executed successfully.
3) ready -> 2) aborted:
when the corresponding closure had been executed successfully but later
the corresponding remote worker failed. This is currently only implemented
for resource `RemoteValue` like iterators.
"""
NOT_READY = "NOT_READY"
ABORTED = "ABORTED"
READY = "READY"
@tf_export("distribute.experimental.coordinator.RemoteValue", v1=[])
class RemoteValue(object):
"""An asynchronously available value of a scheduled function.
This class is used as the return value of
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` where
the underlying value becomes available at a later time once the function has
been executed.
Using `tf.distribute.experimental.coordinator.RemoteValue` as an input to
a subsequent function scheduled with
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` is
currently not supported.
Example:
```python
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver=...)
coordinator = (
tf.distribute.experimental.coordinator.ClusterCoordinator(strategy))
with strategy.scope():
v1 = tf.Variable(initial_value=0.0)
v2 = tf.Variable(initial_value=1.0)
@tf.function
def worker_fn():
v1.assign_add(0.1)
v2.assign_sub(0.2)
return v1.read_value() / v2.read_value()
result = coordinator.schedule(worker_fn)
# Note that `fetch()` gives the actual result instead of a `tf.Tensor`.
assert result.fetch() == 0.125
for _ in range(10):
# `worker_fn` will be run on arbitrary workers that are available. The
# `result` value will be available later.
result = coordinator.schedule(worker_fn)
```
"""
def fetch(self):
"""Wait for the result of `RemoteValue` to be ready and return the result.
This makes the value concrete by copying the remote value to local.
Returns:
The actual output of the `tf.function` associated with this `RemoteValue`,
previously by a
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` call.
This can be a single value, or a structure of values, depending on the
output of the `tf.function`.
Raises:
tf.errors.CancelledError: If the function that produces this `RemoteValue`
is aborted or cancelled due to failure.
"""
raise NotImplementedError("Must be implemented in subclasses.")
class RemoteValueImpl(RemoteValue):
"""Implementation of `RemoteValue`."""
def __init__(self, closure, type_spec): # pylint: disable=super-init-not-called
"""Initializes a `RemoteValueImpl`.
Args:
closure: The closure from which the `RemoteValue` is created.
type_spec: The type spec for this `RemoteValue` which is used to trace
functions that take this `RemoteValue` as input.
"""
self._closure = closure
self._type_spec = type_spec
self._values = None
self._fetched_numpys = None
self._error = None
self._status_available_event = threading.Event()
self._status = _RemoteValueStatus.NOT_READY
def _set_aborted(self):
self._status = _RemoteValueStatus.ABORTED
self._values = None
self._error = None
# Wake up any waiting thread and clear the event.
self._status_available_event.set()
def _rebuild_on(self, worker):
self._status_available_event.clear()
# TODO(yuefengz): we may need to rebuild its inputs as well.
self._closure.execute_on(worker)
def _set_values(self, tensors):
self._status = _RemoteValueStatus.READY
self._values = tensors
self._error = None
self._status_available_event.set()
def _set_error(self, exception):
self._status = _RemoteValueStatus.READY
self._values = None
self._error = exception
self._status_available_event.set()
def _get_values(self):
self._status_available_event.wait()
return self._values
def _get_error(self):
self._status_available_event.wait()
return self._error
def fetch(self):
self._status_available_event.wait()
if self._status is _RemoteValueStatus.ABORTED:
raise errors.CancelledError(
None, None,
"The corresponding function is aborted. Please reschedule the "
"function.")
if self._error is not None:
raise self._error
if self._fetched_numpys is None:
self._fetched_numpys = nest.map_structure(
lambda x: x.numpy() if hasattr(x, "numpy") else x, self._values)
return self._fetched_numpys
class InputError(Exception):
def __init__(self, original_exception):
message = ("Input has an error, the original exception is %r, "
"error message is %s." %
(original_exception, str(original_exception)))
super().__init__(message)
def _maybe_rebuild_remote_values(worker, structure):
"""Attempts to return errors from `RemoteValue`s. Rebuilds them if needed."""
errors_in_structure = []
def _get_error(val):
if isinstance(val, RemoteValue):
if val._status is _RemoteValueStatus.ABORTED: # pylint: disable=protected-access
try:
with worker.failure_handler.wait_on_failure(
on_recovery_fn=functools.partial(val._rebuild_on, worker), # pylint: disable=protected-access
worker_device_name=worker.device_name):
val._rebuild_on(worker) # pylint: disable=protected-access
except Exception as e: # pylint: disable=broad-except
val._set_error(e) # pylint: disable=protected-access
error = val._get_error() # pylint: disable=protected-access
if error:
errors_in_structure.append(error)
nest.map_structure(_get_error, structure)
if errors_in_structure:
return errors_in_structure[0]
else:
return None
def _maybe_get_remote_value(val):
"""Gets the value of `val` if it is a `RemoteValue`."""
if isinstance(val, RemoteValue):
error = val._get_error() # pylint: disable=protected-access
if error:
raise AssertionError(
"RemoteValue doesn't have a value because it has errors.")
else:
return val._get_values() # pylint: disable=protected-access
else:
return val
def _maybe_as_type_spec(val):
if isinstance(val, RemoteValue):
if val._type_spec is None: # pylint: disable=protected-access
raise ValueError("Output of a scheduled function that is not "
"tf.function cannot be the input of another function.")
return val._type_spec # pylint: disable=protected-access
else:
return val
@tf_export("distribute.experimental.coordinator.PerWorkerValues", v1=[])
class PerWorkerValues(object):
"""A container that holds a list of values, one value per worker.
`tf.distribute.experimental.coordinator.PerWorkerValues` contains a collection
of values, where each of the values is located on its corresponding worker,
and upon being used as one of the `args` or `kwargs` of
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule()`, the
value specific to a worker will be passed into the function being executed at
that corresponding worker.
Currently, the only supported path to create an object of
`tf.distribute.experimental.coordinator.PerWorkerValues` is through calling
`iter` on a `ClusterCoordinator.create_per_worker_dataset`-returned
distributed dataset instance. The mechanism to create a custom
`tf.distribute.experimental.coordinator.PerWorkerValues` is not yet supported.
"""
def __init__(self, values):
self._values = tuple(values)
def _select_worker_slice(worker_id, structured):
"""Selects the worker slice of each of the items in `structured`."""
def _get(x):
return x._values[worker_id] if isinstance(x, PerWorkerValues) else x # pylint: disable=protected-access
return nest.map_structure(_get, structured)
def _disallow_remote_value_as_input(structured):
"""Raises if any element of `structured` is a RemoteValue."""
def _raise_if_remote_value(x):
if isinstance(x, RemoteValue):
raise ValueError(
"`tf.distribute.experimental.coordinator.RemoteValue` used "
"as an input to scheduled function is not yet "
"supported.")
nest.map_structure(_raise_if_remote_value, structured)
class Closure(object):
"""Hold a function to be scheduled and its arguments."""
def __init__(self, function, cancellation_mgr, args=None, kwargs=None):
if not callable(function):
raise ValueError("Function passed to `ClusterCoordinator.schedule` must "
"be a callable object.")
self._args = args or ()
self._kwargs = kwargs or {}
_disallow_remote_value_as_input(self._args)
_disallow_remote_value_as_input(self._kwargs)
if isinstance(function, def_function.Function):
replica_args = _select_worker_slice(0, self._args)
replica_kwargs = _select_worker_slice(0, self._kwargs)
# Note: no need to handle function registration failure since this kind of
# failure will not raise exceptions as designed in the runtime. The
# coordinator has to rely on subsequent operations that raise to catch
# function registration failure.
# Record the function tracing overhead. Note that we pass in the tracing
# count of the def_function.Function as a state tracker, so that metrics
# will only record the time for actual function tracing (i.e., excluding
# function cache lookups).
with metric_utils.monitored_timer(
"function_tracing", state_tracker=function._get_tracing_count): # pylint: disable=protected-access
self._concrete_function = function.get_concrete_function(
*nest.map_structure(_maybe_as_type_spec, replica_args),
**nest.map_structure(_maybe_as_type_spec, replica_kwargs))
elif isinstance(function, tf_function.ConcreteFunction):
self._concrete_function = function
if hasattr(self, "_concrete_function"):
# If we have a concrete function, we get to retrieve the output type spec
# via the structured_output.
output_type_spec = func_graph.convert_structure_to_signature(
self._concrete_function.structured_outputs)
self._function = cancellation_mgr.get_cancelable_function(
self._concrete_function)
else:
# Otherwise (i.e. what is passed in is a regular python function), we have
# no such information.
output_type_spec = None
self._function = function
self.output_remote_value = RemoteValueImpl(self, output_type_spec)
def mark_cancelled(self):
self.output_remote_value._set_error( # pylint: disable=protected-access
errors.CancelledError(
None, None, "The corresponding function is "
"cancelled. Please reschedule the function."))
def execute_on(self, worker):
"""Executes the closure on the given worker.
Args:
worker: a `Worker` object.
"""
replica_args = _select_worker_slice(worker.worker_index, self._args)
replica_kwargs = _select_worker_slice(worker.worker_index, self._kwargs)
e = (
_maybe_rebuild_remote_values(worker, replica_args) or
_maybe_rebuild_remote_values(worker, replica_kwargs))
if e:
if not isinstance(e, InputError):
e = InputError(e)
self.output_remote_value._set_error(e) # pylint: disable=protected-access
return
with ops.device(worker.device_name):
with context.executor_scope(worker.executor):
with metric_utils.monitored_timer("closure_execution"):
output_values = self._function(
*nest.map_structure(_maybe_get_remote_value, replica_args),
**nest.map_structure(_maybe_get_remote_value, replica_kwargs))
self.output_remote_value._set_values(output_values) # pylint: disable=protected-access
class _CoordinatedClosureQueue(object):
"""Manage a queue of closures, inflight count and errors from execution.
This class is thread-safe.
"""
def __init__(self):
# `self._inflight_closure_count` only tracks the number of inflight closures
# that are "in generation". Once an error occurs, error generation is
# incremented and all subsequent arriving closures (from inflight) are
# considered "out of generation".
self._inflight_closure_count = 0
self._queue_lock = threading.Lock()
# Condition indicating that all pending closures (either queued or inflight)
# have been processed, failed, or cancelled.
self._stop_waiting_condition = threading.Condition(self._queue_lock)
# Condition indicating that an item becomes available in queue (not empty).
self._closures_queued_condition = threading.Condition(self._queue_lock)
self._should_process_closures = True
# Condition indicating that a queue slot becomes available (not full).
# Note that even with "infinite" queue size, there is still a "practical"
# size limit for the queue depending on host memory capacity, and thus the
# queue will eventually become full with a lot of enqueued closures.
self._queue_free_slot_condition = threading.Condition(self._queue_lock)
# Condition indicating there is no inflight closures.
self._no_inflight_closure_condition = threading.Condition(self._queue_lock)
# Use to cancel in-flight closures.
self._cancellation_mgr = cancellation.CancellationManager()
if _CLOSURE_QUEUE_MAX_SIZE <= 0:
logging.warning(
"In a `ClusterCoordinator`, creating an infinite closure queue can "
"consume a significant amount of memory and even lead to OOM.")
self._queue = queue.Queue(maxsize=_CLOSURE_QUEUE_MAX_SIZE)
self._error = None
# The following is a lock to make sure when `wait` is called and before it
# returns no `put` can be executed during this period. It is because `wait`
# won't know what to do with newly put closures. This lock adds an cutoff
# for `wait` so that closures put into the queue while waiting would not be
# taken responsible by this `wait`.
#
# We cannot reuse the `self._queue_lock` since when `wait` waits for a
# condition, the `self._queue_lock` will be released.
#
# We don't use a reader/writer's lock on purpose to reduce the complexity
# of the code.
self._put_wait_lock = threading.Lock()
def stop(self):
with self._queue_lock:
self._should_process_closures = False
self._closures_queued_condition.notifyAll()
def _cancel_all_closures(self):
"""Clears the queue and sets remaining closures cancelled error.
This method expects self._queue_lock to be held prior to entry.
"""
self._cancellation_mgr.start_cancel()
while self._inflight_closure_count > 0:
self._no_inflight_closure_condition.wait()
while True:
try:
closure = self._queue.get(block=False)
self._queue_free_slot_condition.notify()
closure.mark_cancelled()
except queue.Empty:
break
# The cancellation manager cannot be reused once cancelled. After all
# closures (queued or inflight) are cleaned up, recreate the cancellation
# manager with clean state.
# Note on thread-safety: this is triggered when one of theses
# ClusterCoordinator APIs are called: `schedule`, `wait`, and `done`. At the
# same time, no new closures can be constructed (which reads the
# _cancellation_mgr to get cancellable functions).
self._cancellation_mgr = cancellation.CancellationManager()
def _raise_if_error(self):
"""Raises the error if one exists.
If an error exists, cancel the closures in queue, raises it, and clear
the error.
This method expects self._queue_lock to be held prior to entry.
"""
if self._error:
logging.error("Start cancelling closures due to error %r: %s",
self._error, self._error)
self._cancel_all_closures()
try:
raise self._error # pylint: disable=raising-bad-type
finally:
self._error = None
def put(self, closure):
"""Put a closure into the queue for later execution.
If `mark_failed` was called before `put`, the error from the first
invocation of `mark_failed` will be raised.
Args:
closure: The `Closure` to put into the queue.
"""
with self._put_wait_lock, self._queue_lock:
self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())
self._queue.put(closure, block=False)
self._raise_if_error()
self._closures_queued_condition.notify()
def get(self, timeout=None):
"""Return a closure from the queue to be executed."""
with self._queue_lock:
while self._queue.empty() and self._should_process_closures:
if not self._closures_queued_condition.wait(timeout=timeout):
return None
if not self._should_process_closures:
return None
closure = self._queue.get(block=False)
self._queue_free_slot_condition.notify()
self._inflight_closure_count += 1
return closure
def mark_finished(self):
"""Let the queue know that a closure has been successfully executed."""
with self._queue_lock:
if self._inflight_closure_count < 1:
raise AssertionError("There is no inflight closures to mark_finished.")
self._inflight_closure_count -= 1
if self._inflight_closure_count == 0:
self._no_inflight_closure_condition.notifyAll()
if self._queue.empty() and self._inflight_closure_count == 0:
self._stop_waiting_condition.notifyAll()
def put_back(self, closure):
"""Put the closure back into the queue as it was not properly executed."""
with self._queue_lock:
if self._inflight_closure_count < 1:
raise AssertionError("There is no inflight closures to put_back.")
if self._error:
closure.mark_cancelled()
else:
self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())
self._queue.put(closure, block=False)
self._closures_queued_condition.notify()
self._inflight_closure_count -= 1
if self._inflight_closure_count == 0:
self._no_inflight_closure_condition.notifyAll()
def wait(self, timeout=None):
"""Wait for all closures to be finished before returning.
If `mark_failed` was called before or during `wait`, the error from the
first invocation of `mark_failed` will be raised.
Args:
timeout: A float specifying a timeout for the wait in seconds.
Returns:
True unless the given timeout expired, in which case it returns False.
"""
with self._put_wait_lock, self._queue_lock:
while (not self._error and
(not self._queue.empty() or self._inflight_closure_count > 0)):
if not self._stop_waiting_condition.wait(timeout=timeout):
return False
self._raise_if_error()
return True
def mark_failed(self, e):
"""Sets error and unblocks any wait() call."""
with self._queue_lock:
# TODO(yuefengz): maybe record all failure and give users more
# information?
if self._inflight_closure_count < 1:
raise AssertionError("There is no inflight closures to mark_failed.")
if self._error is None:
self._error = e
self._inflight_closure_count -= 1
if self._inflight_closure_count == 0:
self._no_inflight_closure_condition.notifyAll()
self._stop_waiting_condition.notifyAll()
def done(self):
"""Returns true if the queue is empty and there is no inflight closure.
If `mark_failed` was called before `done`, the error from the first
invocation of `mark_failed` will be raised.
"""
with self._queue_lock:
self._raise_if_error()
return self._queue.empty() and self._inflight_closure_count == 0
class WorkerPreemptionHandler(object):
"""Handles worker preemptions."""
def __init__(self, server_def, cluster):
self._server_def = server_def
self._cluster = cluster
self._cluster_update_lock = threading.Lock()
self._cluster_due_for_update_or_finish = threading.Event()
self._worker_up_cond = threading.Condition(self._cluster_update_lock)
self._should_preemption_thread_run = True
threading.Thread(target=self._preemption_handler,
name="WorkerPreemptionHandler",
daemon=True).start()
def stop(self):
"""Ensure the worker preemption thread is closed."""
self._should_preemption_thread_run = False
with self._cluster_update_lock:
self._cluster_due_for_update_or_finish.set()
def _validate_preemption_failure(self, e):
"""Validates that the given exception represents worker preemption."""
if _is_worker_failure(e):
return
raise e
@contextlib.contextmanager
def wait_on_failure(self,
on_failure_fn=None,
on_recovery_fn=None,
worker_device_name="(unknown)"):
"""Catches worker preemption error and wait until failed workers are back.
Args:
on_failure_fn: an optional function to run if preemption happens.
on_recovery_fn: an optional function to run when a worker is recovered
from preemption.
worker_device_name: the device name of the worker instance that is passing
through the failure.
Yields:
None.
"""
try:
yield
except errors.OpError as e:
# If the error is due to temporary connectivity issues between worker and
# ps, put back closure, ignore error and do not mark worker as failure.
if self._cluster._record_and_ignore_transient_ps_failure(e): # pylint: disable=protected-access
if on_failure_fn:
on_failure_fn()
return
self._validate_preemption_failure(e)
logging.error("Worker %s failed with error: %s", worker_device_name, e)
if on_failure_fn:
on_failure_fn()
with self._cluster_update_lock:
self._cluster_due_for_update_or_finish.set()
self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC)
logging.info("Worker %s has been recovered.", worker_device_name)
if on_recovery_fn:
with self.wait_on_failure(
on_recovery_fn=on_recovery_fn,
worker_device_name=worker_device_name):
on_recovery_fn()
def _preemption_handler(self):
"""A loop that handles preemption.
This loop waits for signal of worker preemption and upon worker preemption,
it waits until all workers are back and updates the cluster about the
restarted workers.
"""
while True:
self._cluster_due_for_update_or_finish.wait()
if not self._should_preemption_thread_run:
break
with self._cluster_update_lock:
try:
# TODO(haoyuzhang): support partial cluster recovery
logging.info("Cluster now being recovered.")
context.context().update_server_def(self._server_def)
# Cluster updated successfully, clear the update signal, and notify
# all workers that they are recovered from failure.
logging.info("Cluster successfully recovered.")
self._worker_up_cond.notify_all()
self._cluster_due_for_update_or_finish.clear()
except Exception as e: # pylint: disable=broad-except
self._validate_preemption_failure(e)
# NOTE: Since the first RPC (GetStatus) of update_server_def is
# currently blocking by default, error should only happen if:
# (1) More workers failed while waiting for the previous workers to
# come back;
# (2) Worker failed when exchanging subsequent RPCs after the first
# RPC returns.
# Consider adding backoff retry logic if we see the error logged
# too frequently.
logging.error("Cluster update failed with error: %s. Retrying...", e)
class Worker(object):
"""A worker in a cluster.
Attributes:
worker_index: The index of the worker in the cluster.
device_name: The device string of the worker, e.g. "/job:worker/task:1".
executor: The worker's executor for remote function execution.
failure_handler: The failure handler used to handler worker preemption
failure.
"""
def __init__(self, worker_index, device_name, cluster):
self.worker_index = worker_index
self.device_name = device_name
self.executor = executor.new_executor(enable_async=False)
self.failure_handler = cluster.failure_handler
self._cluster = cluster
self._resource_remote_value_refs = []
self._should_worker_thread_run = True
# Worker threads need to start after `Worker`'s initialization.
threading.Thread(target=self._process_queue,
name="WorkerClosureProcessingLoop-%d" % self.worker_index,
daemon=True).start()
def stop(self):
"""Ensure the worker thread is closed."""
self._should_worker_thread_run = False
def _set_resources_aborted(self):
# TODO(yuefengz): maybe we can query whether a tensor is valid or not
# instead of marking a tensor aborted?
for weakref_resource in self._resource_remote_value_refs:
resource = weakref_resource()
if resource:
resource._set_aborted() # pylint: disable=protected-access
def _set_dead(self):
raise NotImplementedError("_set_dead is not implemented.")
def _process_closure(self, closure):
"""Runs a closure with preemption handling."""
assert closure is not None
try:
with self._cluster.failure_handler.wait_on_failure(
on_failure_fn=lambda: self._cluster._closure_queue.put_back(closure), # pylint: disable=protected-access
on_recovery_fn=self._set_resources_aborted,
worker_device_name=self.device_name):
closure.execute_on(self)
# TODO(yuefengz): we don't have to materialize results every step.
with metric_utils.monitored_timer("remote_value_fetch"):
closure.output_remote_value.fetch()
self._cluster._closure_queue.mark_finished() # pylint: disable=protected-access
except Exception as e: # pylint: disable=broad-except
# Avoid logging the derived cancellation error
if not isinstance(e, errors.CancelledError):
logging.error(
"/job:worker/task:%d encountered the following error when "
"processing closure: %r:%s", self.worker_index, e, e)
closure.output_remote_value._set_error(e) # pylint: disable=protected-access
self._cluster._closure_queue.mark_failed(e) # pylint: disable=protected-access
def _maybe_delay(self):
"""Delay if corresponding env vars are set."""
# If the following two env vars variables are set. Scheduling for workers
# will start in a staggered manner. Worker i will wait for
# `TF_COORDINATOR_SCHEDULE_START_DELAY` * i seconds, not exceeding
# `TF_COORDINATOR_SCHEDULE_START_DELAY_MAX`.
delay_secs = int(os.environ.get("TF_COORDINATOR_SCHEDULE_START_DELAY", "0"))
delay_cap = int(
os.environ.get("TF_COORDINATOR_SCHEDULE_START_DELAY_MAX", "0"))
if delay_cap:
delay_secs = min(delay_secs * self.worker_index, delay_cap)
if delay_secs > 0:
logging.info("Worker %d sleeping for %d seconds before running function",
self.worker_index, delay_secs)
time.sleep(delay_secs)
def _process_queue(self):
"""Function running in a thread to process closure queues."""
self._maybe_delay()
while self._should_worker_thread_run:
closure = self._cluster._closure_queue.get() # pylint: disable=protected-access
if not self._should_worker_thread_run or closure is None:
return
self._process_closure(closure)
def _create_resource(self, function, args=None, kwargs=None):
"""Synchronously creates a per-worker resource represented by a `RemoteValue`.
Args:
function: the resource function to be run remotely. It should be a
`tf.function`, a concrete function or a Python function.
args: positional arguments to be passed to the function.
kwargs: keyword arguments to be passed to the function.
Returns:
one or several RemoteValue objects depending on the function return
values.
"""
# Some notes about the concurrency: currently all the activities related to
# the same worker such as creating resources, setting resources' aborted
# status, and executing closures happen on the same thread. This allows us
# to have simpler logic of concurrency.
closure = Closure(
function,
self._cluster._closure_queue._cancellation_mgr, # pylint: disable=protected-access
args=args,
kwargs=kwargs)
resource_remote_value = closure.output_remote_value
self._register_resource(resource_remote_value)
# The following is a short-term solution to lazily create resources in
# parallel.
# TODO(b/160343165): we should create resources eagerly, i.e. schedule the
# resource creation function as soon as users call this method.
resource_remote_value._set_aborted() # pylint: disable=protected-access
return resource_remote_value
def _register_resource(self, resource_remote_value):
if not isinstance(resource_remote_value, RemoteValue):
raise ValueError("Resource being registered is not of type "
"`tf.distribute.experimental.coordinator.RemoteValue`.")
self._resource_remote_value_refs.append(weakref.ref(resource_remote_value))
class Cluster(object):
"""A cluster with workers.
We assume all function errors are fatal and based on this assumption our
error reporting logic is:
1) Both `schedule` and `join` can raise a non-retryable error which is the
first error seen by the coordinator from any previously scheduled functions.
2) When an error is raised, there is no guarantee on how many previously
scheduled functions have been executed; functions that have not been executed
will be thrown away and marked as cancelled.
3) After an error is raised, the internal state of error will be cleared.
I.e. functions can continue to be scheduled and subsequent calls of `schedule`
or `join` will not raise the same error again.
Attributes:
failure_handler: The failure handler used to handler worker preemption
failure.
workers: a list of `Worker` objects in the cluster.
"""
def __init__(self, strategy):
"""Initializes the cluster instance."""
self._num_workers = strategy._num_workers
self._num_ps = strategy._num_ps
# Ignore PS failures reported by workers due to transient connection errors.
# Transient connectivity issues between workers and PS are relayed by the
# workers to the coordinator, leading the coordinator to believe that there
# are PS failures. The difference between transient vs. permanent PS failure
# is the number of reports from the workers. When this env var is set to a
# positive integer K, the coordinator ignores up to K reports of a failed PS
# task, i.e., only when there are more than K trials of executing closures
# fail due to errors from the same PS instance do we consider the PS
# instance encounters a failure.
# TODO(b/164279603): Remove this workaround when the underlying connectivity
# issue in gRPC server is resolved.
self._transient_ps_failures_threshold = int(
os.environ.get("TF_COORDINATOR_IGNORE_TRANSIENT_PS_FAILURES", 3))
self._potential_ps_failures_lock = threading.Lock()
self._potential_ps_failures_count = [0] * self._num_ps
self._closure_queue = _CoordinatedClosureQueue()
self.failure_handler = WorkerPreemptionHandler(context.get_server_def(),
self)
worker_device_strings = [
"/job:worker/replica:0/task:%d" % i for i in range(self._num_workers)
]
self.workers = [
Worker(i, w, self) for i, w in enumerate(worker_device_strings)
]
self._strategy = strategy
def stop(self):
"""Stop worker, worker preemption threads, and the closure queue."""
self.failure_handler.stop()
for worker in self.workers:
worker.stop()
self._closure_queue.stop()
def _record_and_ignore_transient_ps_failure(self, e):
"""Records potential PS failures and return if failure should be ignored."""
if self._transient_ps_failures_threshold <= 0 or not _is_ps_failure(e):
return False
ps_tasks = _extract_failed_ps_instances(str(e))
with self._potential_ps_failures_lock:
for t in ps_tasks:
self._potential_ps_failures_count[t] += 1
# The number of UnavailableError encountered on this PS task exceeds the
# maximum number of ignored error
if (self._potential_ps_failures_count[t] >=
self._transient_ps_failures_threshold):
return False
return True
def schedule(self, function, args, kwargs):
"""Schedules `function` to be dispatched to a worker for execution.
Args:
function: The function to be dispatched to a worker for execution
asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `RemoteValue` object.
"""
self._strategy.extended._being_scheduled = True # pylint: disable=protected-access
closure = Closure(
function,
self._closure_queue._cancellation_mgr, # pylint: disable=protected-access
args=args,
kwargs=kwargs)
self._strategy.extended._being_scheduled = False # pylint: disable=protected-access
self._closure_queue.put(closure)
return closure.output_remote_value
def join(self):
"""Blocks until all scheduled functions are executed."""
self._closure_queue.wait()
def done(self):
"""Returns true if all scheduled functions are executed."""
return self._closure_queue.done()
@tf_export("distribute.experimental.coordinator.ClusterCoordinator", v1=[])
class ClusterCoordinator(object):
"""An object to schedule and coordinate remote function execution.
This class is used to create fault-tolerant resources and dispatch functions
to remote TensorFlow servers.
Currently, this class is not supported to be used in a standalone manner. It
should be used in conjunction with a `tf.distribute` strategy that is designed
to work with it. The `ClusterCoordinator` class currently only works
`tf.distribute.experimental.ParameterServerStrategy`.
__The `schedule`/`join` APIs__
The most important APIs provided by this class is the `schedule`/`join` pair.
The `schedule` API is non-blocking in that it queues a `tf.function` and
returns a `RemoteValue` immediately. The queued functions will be dispatched
to remote workers in background threads and their `RemoteValue`s will be
filled asynchronously. Since `schedule` doesn’t require worker assignment, the
`tf.function` passed in can be executed on any available worker. If the worker
it is executed on becomes unavailable before its completion, it will be
migrated to another worker. Because of this fact and function execution is not
atomic, a function may be executed more than once.
__Handling Task Failure__
This class when used with
`tf.distribute.experimental.ParameterServerStrategy`, comes with built-in
fault tolerance for worker failures. That is, when some workers are not
available for any reason to be reached from the coordinator, the training
progress continues to be made with the remaining workers. Upon recovery of a
failed worker, it will be added for function execution after datasets created
by `create_per_worker_dataset` are re-built on it.
When a parameter server fails, a `tf.errors.UnavailableError` is raised by
`schedule`, `join` or `done`. In this case, in addition to bringing back the
failed parameter server, users should restart the coordinator so that it
reconnects to workers and parameter servers, re-creates the variables, and
loads checkpoints. If the coordinator fails, after the user brings it back,
the program will automatically connect to workers and parameter servers, and
continue the progress from a checkpoint.
It is thus essential that in user's program, a checkpoint file is periodically
saved, and restored at the start of the program. If an
`tf.keras.optimizers.Optimizer` is checkpointed, after restoring from a
checkpoiont, its `iterations` property roughly indicates the number of steps
that have been made. This can be used to decide how many epochs and steps are
needed before the training completion.
See `tf.distribute.experimental.ParameterServerStrategy` docstring for an
example usage of this API.
This is currently under development, and the API as well as implementation
are subject to changes.
"""
def __init__(self, strategy):
"""Initialization of a `ClusterCoordinator` instance.
Args:
strategy: a supported `tf.distribute.Strategy` object. Currently, only
`tf.distribute.experimental.ParameterServerStrategy` is supported.
Raises:
ValueError: if the strategy being used is not supported.
"""
if not isinstance(strategy,
parameter_server_strategy_v2.ParameterServerStrategyV2):
raise ValueError(
"Only `tf.distribute.experimental.ParameterServerStrategy` "
"is supported to work with "
"`tf.distribute.experimental.coordinator.ClusterCoordinator` "
"currently.")
self._strategy = strategy
self._strategy.extended._used_with_coordinator = True
self._cluster = Cluster(strategy)
def __del__(self):
self._cluster.stop()
@property
def strategy(self):
"""Returns the `Strategy` associated with the `ClusterCoordinator`."""
return self._strategy
def schedule(self, fn, args=None, kwargs=None):
"""Schedules `fn` to be dispatched to a worker for asynchronous execution.
This method is non-blocking in that it queues the `fn` which will be
executed later and returns a
`tf.distribute.experimental.coordinator.RemoteValue` object immediately.
`fetch` can be called on it to wait for the function execution to finish
and retrieve its output from a remote worker. On the other hand, call
`tf.distribute.experimental.coordinator.ClusterCoordinator.join` to wait for
all scheduled functions to finish.
`schedule` guarantees that `fn` will be executed on a worker at least once;
it could be more than once if its corresponding worker fails in the middle
of its execution. Note that since worker can fail at any point when
executing the function, it is possible that the function is partially
executed, but `tf.distribute.experimental.coordinator.ClusterCoordinator`
guarantees that in those events, the function will eventually be executed on
any worker that is available.
If any previously scheduled function raises an error, `schedule` will raise
any one of those errors, and clear the errors collected so far. What happens
here, some of the previously scheduled functions may have not been executed.
User can call `fetch` on the returned
`tf.distribute.experimental.coordinator.RemoteValue` to inspect if they have
executed, failed, or cancelled, and reschedule the corresponding function if
needed.
When `schedule` raises, it guarantees that there is no function that is
still being executed.
At this time, there is no support of worker assignment for function
execution, or priority of the workers.
`args` and `kwargs` are the arguments passed into `fn`, when `fn` is
executed on a worker. They can be
`tf.distribute.experimental.coordinator.PerWorkerValues` and in this case,
the argument will be substituted with the corresponding component on the
target worker. Arguments that are not
`tf.distribute.experimental.coordinator.PerWorkerValues` will be passed into
`fn` as-is. Currently, `tf.distribute.experimental.coordinator.RemoteValue`
is not supported to be input `args` or `kwargs`.
Args:
fn: A `tf.function`; the function to be dispatched to a worker for
execution asynchronously. Regular python funtion is not supported to be
scheduled.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `tf.distribute.experimental.coordinator.RemoteValue` object that
represents the output of the function scheduled.
Raises:
Exception: one of the exceptions caught by the coordinator from any
previously scheduled function, since the last time an error was thrown
or since the beginning of the program.
"""
if not isinstance(fn,
(def_function.Function, tf_function.ConcreteFunction)):
raise TypeError(
"`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule`"
" only accepts a `tf.function` or a concrete function.")
# Slot variables are usually created during function tracing time; thus
# `schedule` needs to be called within the `strategy.scope()`.
with self.strategy.scope():
return self._cluster.schedule(fn, args=args, kwargs=kwargs)
def join(self):
"""Blocks until all the scheduled functions have finished execution.
If any previously scheduled function raises an error, `join` will fail by
raising any one of those errors, and clear the errors collected so far. If
this happens, some of the previously scheduled functions may have not been
executed. Users can call `fetch` on the returned
`tf.distribute.experimental.coordinator.RemoteValue` to inspect if they have
executed, failed, or cancelled. If some that have been cancelled need to be
rescheduled, users should call `schedule` with the function again.
When `join` returns or raises, it guarantees that there is no function that
is still being executed.
Raises:
Exception: one of the exceptions caught by the coordinator by any
previously scheduled function since the last time an error was thrown or
since the beginning of the program.
"""
self._cluster.join()
def done(self):
"""Returns whether all the scheduled functions have finished execution.
If any previously scheduled function raises an error, `done` will fail by
raising any one of those errors.
When `done` returns True or raises, it guarantees that there is no function
that is still being executed.
Returns:
Whether all the scheduled functions have finished execution.
Raises:
Exception: one of the exceptions caught by the coordinator by any
previously scheduled function since the last time an error was thrown or
since the beginning of the program.
"""
return self._cluster.done()
def create_per_worker_dataset(self, dataset_fn):
"""Create dataset on workers by calling `dataset_fn` on worker devices.
This creates the given dataset generated by dataset_fn on workers
and returns an object that represents the collection of those individual
datasets. Calling `iter` on such collection of datasets returns a
`tf.distribute.experimental.coordinator.PerWorkerValues`, which is a
collection of iterators, where the iterators have been placed on respective
workers.
Calling `next` on a `PerWorkerValues` of iterator is unsupported. The
iterator is meant to be passed as an argument into
`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule`. When
the scheduled function is about to be executed by a worker, the
function will receive the individual iterator that corresponds to the
worker. The `next` method can be called on an iterator inside a
scheduled function when the iterator is an input of the function.
Currently the `schedule` method assumes workers are all the same and thus
assumes the datasets on different workers are the same, except they may be
shuffled differently if they contain a `dataset.shuffle` operation and a
random seed is not set. Because of this, we also recommend the datasets to
be repeated indefinitely and schedule a finite number of steps instead of
relying on the `OutOfRangeError` from a dataset.
Example:
```python
strategy = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver=...)
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy=strategy)
@tf.function
def worker_fn(iterator):
return next(iterator)
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(
lambda x: tf.data.Dataset.from_tensor_slices([3] * 3))
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
per_worker_iter = iter(per_worker_dataset)
remote_value = coordinator.schedule(worker_fn, args=(per_worker_iter,))
assert remote_value.fetch() == 3
```
Args:
dataset_fn: The dataset function that returns a dataset. This is to be
executed on the workers.
Returns:
An object that represents the collection of those individual
datasets. `iter` is expected to be called on this object that returns
a `tf.distribute.experimental.coordinator.PerWorkerValues` of the
iterators (that are on the workers).
"""
input_workers = input_lib.InputWorkers([
(w.device_name, [w.device_name]) for w in self._cluster.workers
])
return _PerWorkerDistributedDataset(dataset_fn, input_workers, self)
def _create_per_worker_resources(self, fn, args=None, kwargs=None):
"""Synchronously create resources on the workers.
The resources are represented by
`tf.distribute.experimental.coordinator.RemoteValue`s.
Args:
fn: The function to be dispatched to all workers for execution
asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `tf.distribute.experimental.coordinator.PerWorkerValues` object, which
wraps a tuple of `tf.distribute.experimental.coordinator.RemoteValue`
objects.
"""
results = []
for w in self._cluster.workers:
results.append(w._create_resource(fn, args=args, kwargs=kwargs)) # pylint: disable=protected-access
return PerWorkerValues(tuple(results))
def fetch(self, val):
"""Blocking call to fetch results from the remote values.
This is a wrapper around
`tf.distribute.experimental.coordinator.RemoteValue.fetch` for a
`RemoteValue` structure; it returns the execution results of
`RemoteValue`s. If not ready, wait for them while blocking the caller.
Example:
```python
strategy = ...
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy)
def dataset_fn():
return tf.data.Dataset.from_tensor_slices([1, 1, 1])
with strategy.scope():
v = tf.Variable(initial_value=0)
@tf.function
def worker_fn(iterator):
def replica_fn(x):
v.assign_add(x)
return v.read_value()
return strategy.run(replica_fn, args=(next(iterator),))
distributed_dataset = coordinator.create_per_worker_dataset(dataset_fn)
distributed_iterator = iter(distributed_dataset)
result = coordinator.schedule(worker_fn, args=(distributed_iterator,))
assert coordinator.fetch(result) == 1
```
Args:
val: The value to fetch the results from. If this is structure of
`tf.distribute.experimental.coordinator.RemoteValue`, `fetch()` will be
called on the individual
`tf.distribute.experimental.coordinator.RemoteValue` to get the result.
Returns:
If `val` is a `tf.distribute.experimental.coordinator.RemoteValue` or a
structure of `tf.distribute.experimental.coordinator.RemoteValue`s,
return the fetched `tf.distribute.experimental.coordinator.RemoteValue`
values immediately if they are available, or block the call until they are
available, and return the fetched
`tf.distribute.experimental.coordinator.RemoteValue` values with the same
structure. If `val` is other types, return it as-is.
"""
def _maybe_fetch(val):
if isinstance(val, RemoteValue):
return val.fetch()
else:
return val
# TODO(yuefengz): we should fetch values in a batch.
return nest.map_structure(_maybe_fetch, val)
# pylint: disable=missing-function-docstring
@contextlib.contextmanager
def handle_parameter_server_failure():
try:
yield
except errors.UnavailableError as e: # pylint: disable=broad-except
restart_exit_code = os.environ.get("TF_CLIENT_NON_FATAL_RESTART_EXIT_CODE",
None)
if restart_exit_code is not None:
sys.exit(int(restart_exit_code))
else:
raise
class _PerWorkerDistributedDataset(object):
"""Represents worker-distributed datasets created from dataset function."""
def __init__(self, dataset_fn, input_workers, coordinator):
"""Makes an iterable from datasets created by the given function.
Args:
dataset_fn: A function that returns a `Dataset`.
input_workers: an `InputWorkers` object.
coordinator: a `ClusterCoordinator` object, used to create dataset
resources.
"""
def disallow_variable_creation(next_creator, **kwargs):
raise ValueError("Creating variables in `dataset_fn` is not allowed.")
if isinstance(dataset_fn, def_function.Function):
with variable_scope.variable_creator_scope(disallow_variable_creation):
dataset_fn = dataset_fn.get_concrete_function()
elif not isinstance(dataset_fn, tf_function.ConcreteFunction):
with variable_scope.variable_creator_scope(disallow_variable_creation):
dataset_fn = def_function.function(dataset_fn).get_concrete_function()
self._dataset_fn = dataset_fn
self._input_workers = input_workers
self._coordinator = coordinator
self._element_spec = None
def __iter__(self):
# We would like users to create iterators outside `tf.function`s so that we
# can track them.
if (not context.executing_eagerly() or
ops.get_default_graph().building_function):
raise RuntimeError(
"__iter__() is not supported inside of tf.function or in graph mode.")
def _create_per_worker_iterator():
dataset = self._dataset_fn()
return iter(dataset)
# If _PerWorkerDistributedDataset.__iter__ is called multiple
# times, for the same object it should only create and register resource
# once. Using object id to distinguish different iterator resources.
per_worker_iterator = self._coordinator._create_per_worker_resources(
_create_per_worker_iterator)
# Setting type_spec of each RemoteValue so that functions taking these
# RemoteValues as inputs can be traced.
for iterator_remote_value in per_worker_iterator._values:
iterator_remote_value._type_spec = ( # pylint: disable=protected-access
iterator_ops.IteratorSpec(
self._dataset_fn.structured_outputs.element_spec))
return _PerWorkerDistributedIterator(per_worker_iterator._values)
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
raise NotImplementedError("Passing `AsyncDistributedDataset` to a "
"tf.function is not supported.")
class _PerWorkerDistributedIterator(PerWorkerValues):
"""Distributed iterator for `ClusterCoordinator`."""
def __next__(self):
return self.get_next()
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
raise NotImplementedError("Iterating over an `AsyncDistributedIterator` "
"is not supported right now.")
def _extract_failed_ps_instances(err_msg):
"""Return a set of potentially failing ps instances from error message."""
tasks = re.findall("/job:ps/replica:0/task:[0-9]+", err_msg)
return set(int(t.split(":")[-1]) for t in tasks)
def _is_ps_failure(error):
"""Whether the error is considered a parameter server failure."""
return (isinstance(error, errors.UnavailableError) and
_RPC_ERROR_FROM_PS in str(error))
def _is_worker_failure(error):
"""Whether the error is considered a worker failure."""
if _JOB_WORKER_STRING_IDENTIFIER not in str(error):
return False
if _RPC_ERROR_FROM_PS in str(error):
return False
# TODO(haoyuzhang): Consider using special status code if error from a
# remote is derived from RPC errors originated from other hosts.
if isinstance(error, (errors.UnavailableError, errors.AbortedError)):
return True
# The following error could happen when the remote task fails and restarts
# in a very short interval during which no RPCs were exchanged to detect the
# failure. In that case, gRPC allows channel (which is different from a
# connection) to be reused for a replaced server listening to same address.
if isinstance(error, errors.InvalidArgumentError):
if ("unknown device" in str(error) or
"Unable to find the relevant tensor remote_handle" in str(error)):
# TODO(b/159961667): Fix "Unable to find the relevant tensor
# remote_handle" part.
return True
# TODO(b/162541228): The following 2 types of errors are very rare and only
# observed in large-scale testing. The types of errors should be reduced.
# This could happen when the function registration fails. In the observed
# cases this only happens to the dataset related functions.
if isinstance(error, errors.NotFoundError):
if ("is neither a type of a primitive operation nor a name of a function "
"registered" in str(error)):
return True
return False
| 40.539326 | 115 | 0.722076 |
9ec79c3fa47ed896a76ce7b01e1b0af8d8cfaeb4 | 10,053 | py | Python | setup.py | chryswoods/python_pack_and_doc | 6d63d4e91dd93abf8f618d75c161ab65f943dadd | [
"Apache-2.0"
] | 5 | 2020-09-23T14:04:39.000Z | 2020-10-02T14:01:16.000Z | setup.py | chryswoods/python_pack_and_doc | 6d63d4e91dd93abf8f618d75c161ab65f943dadd | [
"Apache-2.0"
] | null | null | null | setup.py | chryswoods/python_pack_and_doc | 6d63d4e91dd93abf8f618d75c161ab65f943dadd | [
"Apache-2.0"
] | 1 | 2020-09-25T16:25:24.000Z | 2020-09-25T16:25:24.000Z | # Package setup copied from
# https://github.com/FedericoStra/cython-package-example
# Thanks - this was really helpful :-)
import os
import versioneer
from setuptools import setup, Extension
import distutils.sysconfig
import distutils.ccompiler
import multiprocessing
from glob import glob
import platform
import tempfile
import shutil
import sys
try:
from Cython.Build import cythonize
have_cython = True
except Exception:
have_cython = False
_system_input = input
# has the user asked for a build?
is_build = False
for arg in sys.argv[1:]:
lower = arg.lower()
if arg in ["build", "bdist_wheel", "build_py"]:
is_build = True
break
def setup_package():
# First, set some flags regarding the distribution
IS_MAC = False
IS_LINUX = False
IS_WINDOWS = False
MACHINE = platform.machine()
openmp_flags = ["-fopenmp", "-openmp"]
if platform.system() == "Darwin":
IS_MAC = True
if is_build:
print(f"\nCompiling on a Mac ({MACHINE})")
elif platform.system() == "Windows":
IS_WINDOWS = True
openmp_flags.insert(0, "/openmp") # MSVC flag
if is_build:
print(f"\nCompiling on Windows ({MACHINE})")
elif platform.system() == "Linux":
IS_LINUX = True
if is_build:
print(f"\nCompiling on Linux ({MACHINE})")
else:
if is_build:
print(f"Unrecognised platform {platform.system()}. Assuming Linux")
IS_LINUX = True
# Get the compiler that (I think) distutils will use
# - I will need this to add options etc.
compiler = distutils.ccompiler.new_compiler()
distutils.sysconfig.customize_compiler(compiler)
user_openmp_flag = os.getenv("OPENMP_FLAG", None)
if user_openmp_flag is not None:
openmp_flags.insert(user_openmp_flag, 0)
# override 'input' so that defaults can be used when this is run in batch
# or CI/CD
def input(prompt: str, default="y"):
"""Wrapper for 'input' that returns 'default' if it detected
that this is being run from within a batch job or other
service that doesn't have access to a tty
"""
import sys
try:
if sys.stdin.isatty():
return _system_input(prompt)
else:
print(f"Not connected to a console, so having to use "
f"the default ({default})")
return default
except Exception as e:
print(f"Unable to get the input: {e.__class__} {e}")
print(f"Using the default ({default}) instead")
return default
# Check if compiler support openmp (and find the correct openmp flag)
def get_openmp_flag():
openmp_test = \
r"""
#include <omp.h>
#include <stdio.h>
int main(int argc, char **argv)
{
int nthreads, thread_id;
#pragma omp parallel private(nthreads, thread_id)
{
thread_id = omp_get_thread_num();
nthreads = omp_get_num_threads();
printf("I am thread %d of %d\n", thread_id, nthreads);
}
return 0;
}
"""
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
filename = r'openmp_test.c'
with open(filename, 'w') as file:
file.write(openmp_test)
file.flush()
openmp_flag = None
if user_openmp_flag:
openmp_flags.insert(0, user_openmp_flag)
for flag in openmp_flags:
try:
# Compiler and then link using each openmp flag...
compiler.compile(sources=["openmp_test.c"],
extra_preargs=[flag])
openmp_flag = flag
break
except Exception as e:
print(f"Cannot compile: {e.__class__} {e}")
pass
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
return openmp_flag
if is_build:
openmp_flag = get_openmp_flag()
else:
openmp_flag = None
include_dirs = []
if is_build and (openmp_flag is None):
print(f"\nYour compiler {compiler.compiler_so[0]} does not support "
f"OpenMP with any of the known OpenMP flags {openmp_flags}. "
f"If you know which flag to use can you specify it using "
f"the environent variable OPENMP_FLAG. Otherwise, we will "
f"have to compile the serial version of the code.")
if IS_MAC:
print(f"\nThis is common on Mac, as the default compiler does not "
f"support OpenMP. If you want to compile with OpenMP then "
f"install llvm via homebrew, e.g. 'brew install llvm', see "
f"https://embeddedartistry.com/blog/2017/02/24/installing-llvm-clang-on-osx/")
print(f"\nRemember then to choose that compiler by setting the "
f"CC environment variable, or passing it on the 'make' line, "
f"e.g. 'CC=/usr/local/opt/llvm/bin/clang make'")
result = input("\nDo you want compile without OpenMP? (y/n) ",
default="y")
if result is None or result.strip().lower()[0] != "y":
sys.exit(-1)
include_dirs.append("src/pack_and_doc/disable_openmp")
cflags = "-O3"
lflags = []
if openmp_flag:
cflags = f"{cflags} {openmp_flag}"
lflags.append(openmp_flag)
nbuilders = int(os.getenv("CYTHON_NBUILDERS", 2))
if nbuilders < 1:
nbuilders = 1
if is_build:
print(f"Number of builders equals {nbuilders}\n")
compiler_directives = {"language_level": 3, "embedsignature": True,
"boundscheck": False, "cdivision": True,
"initializedcheck": False,
"cdivision_warnings": False,
"wraparound": False, "binding": False,
"nonecheck": False, "overflowcheck": False}
if os.getenv("CYTHON_LINETRACE", 0):
if is_build:
print("Compiling with Cython line-tracing support - will be SLOW")
define_macros = [("CYTHON_TRACE", "1")]
compiler_directives["linetrace"] = True
else:
define_macros = []
# Thank you Priyaj for pointing out this little documented feature - finally
# I can build the C code into a library!
# https://www.edureka.co/community/21524/setuptools-shared-libary-cython-wrapper-linked-shared-libary
ext_lib_path = "src/pack_and_doc/example_library"
sources = ["library.c"]
ext_libraries = [['example_library', {
'sources': [os.path.join(ext_lib_path, src)
for src in sources],
'include_dirs': [],
'macros': [],
}]]
def no_cythonize(extensions, **_ignore):
# https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html#distributing-cython-modules
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in (".pyx", ".py"):
if extension.language == "c++":
ext = ".cpp"
else:
ext = ".c"
sfile = path + ext
sources.append(sfile)
extension.sources[:] = sources
return extensions
main_pyx_files = glob("src/pack_and_doc/*.pyx")
submodule_pyx_files = glob("src/pack_and_doc/submodule/*.pyx")
libraries = ["example_library"]
extensions = []
for pyx in submodule_pyx_files:
_, name = os.path.split(pyx)
name = name[0:-4]
module = f"pack_and_doc.submodule.{name}"
extensions.append(Extension(module, [pyx], define_macros=define_macros,
libraries=libraries,
extra_compile_args=lflags,
include_dirs=include_dirs))
for pyx in main_pyx_files:
_, name = os.path.split(pyx)
name = name[0:-4]
module = f"pack_and_doc.{name}"
extensions.append(Extension(module, [pyx], define_macros=define_macros,
libraries=libraries,
extra_compile_args=lflags,
include_dirs=include_dirs))
CYTHONIZE = bool(int(os.getenv("CYTHONIZE", 0)))
if not have_cython:
CYTHONIZE = False
os.environ['CFLAGS'] = cflags
if CYTHONIZE:
extensions = cythonize(extensions,
compiler_directives=compiler_directives,
nthreads=nbuilders)
else:
extensions = no_cythonize(extensions)
with open("requirements.txt") as fp:
install_requires = fp.read().strip().split("\n")
with open("requirements-dev.txt") as fp:
dev_requires = fp.read().strip().split("\n")
setup(
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
ext_modules=extensions,
install_requires=install_requires,
libraries=ext_libraries,
extras_require={
"dev": dev_requires,
"docs": ["sphinx", "sphinx-rtd-theme"]
},
entry_points={
"console_scripts": [
"pack_and_doc = pack_and_doc.scripts.main:cli"
]
},
data_files=[("share/pack_and_doc/requirements",
["requirements.txt"])]
)
if __name__ == "__main__":
# Freeze to support parallel compilation when using spawn instead of fork
# (thanks to pandas for showing how to do this in their setup.py)
multiprocessing.freeze_support()
setup_package()
| 32.221154 | 125 | 0.570079 |
89255e6b6464647030c7ccf719b77b416efb5af8 | 561 | py | Python | grace_dl/torch/communicator/allreduce.py | aoranwu/grace | 1e28915f6f6e8189ef33c0c7d8d3ce314e0a493e | [
"BSD-2-Clause"
] | 88 | 2020-05-07T15:36:10.000Z | 2022-03-13T06:13:31.000Z | grace_dl/torch/communicator/allreduce.py | aoranwu/grace | 1e28915f6f6e8189ef33c0c7d8d3ce314e0a493e | [
"BSD-2-Clause"
] | 21 | 2020-05-25T08:37:03.000Z | 2022-03-30T10:08:14.000Z | grace_dl/torch/communicator/allreduce.py | aoranwu/grace | 1e28915f6f6e8189ef33c0c7d8d3ce314e0a493e | [
"BSD-2-Clause"
] | 33 | 2020-05-07T23:11:39.000Z | 2022-03-25T03:33:49.000Z | from grace_dl.torch import Communicator
from horovod.torch import allreduce_async_, synchronize
class Allreduce(Communicator):
def async_send(self, tensors_compressed, name):
handles = []
for i, tensor_compressed in enumerate(tensors_compressed):
handles.append(allreduce_async_(tensor_compressed, self.compressor.average, name + str(i)))
return handles
def wait_receive(self, handles, ctx):
output = [synchronize(h) for h in handles]
return self.compressor.decompress(output, ctx)
| 35.0625 | 104 | 0.700535 |
2d6b3683807f28dfbf2594f08c41d1ce2b5d1a2c | 1,025 | py | Python | packaging/setup/plugins/ovirt-engine-common/core/__init__.py | SunOfShine/ovirt-engine | 7684597e2d38ff854e629e5cbcbb9f21888cb498 | [
"Apache-2.0"
] | 1 | 2021-02-02T05:38:35.000Z | 2021-02-02T05:38:35.000Z | packaging/setup/plugins/ovirt-engine-common/core/__init__.py | SunOfShine/ovirt-engine | 7684597e2d38ff854e629e5cbcbb9f21888cb498 | [
"Apache-2.0"
] | null | null | null | packaging/setup/plugins/ovirt-engine-common/core/__init__.py | SunOfShine/ovirt-engine | 7684597e2d38ff854e629e5cbcbb9f21888cb498 | [
"Apache-2.0"
] | null | null | null | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""ovirt-host-remove core plugin."""
from otopi import util
from . import offlinepackager
from . import misc
from . import engine
from . import answerfile
@util.export
def createPlugins(context):
offlinepackager.Plugin(context=context)
misc.Plugin(context=context)
engine.Plugin(context=context)
answerfile.Plugin(context=context)
# vim: expandtab tabstop=4 shiftwidth=4
| 25.625 | 74 | 0.750244 |
5c12feb8293c9ed8700db67b2ccc39405366ba73 | 2,342 | py | Python | src/arch/x86/isa/insts/simd64/integer/data_transfer/__init__.py | hyu-iot/gem5 | aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5 | [
"BSD-3-Clause"
] | 765 | 2015-01-14T16:17:04.000Z | 2022-03-28T07:46:28.000Z | src/arch/x86/isa/insts/simd64/integer/data_transfer/__init__.py | hyu-iot/gem5 | aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5 | [
"BSD-3-Clause"
] | 148 | 2018-07-20T00:58:36.000Z | 2021-11-16T01:52:33.000Z | src/arch/x86/isa/insts/simd64/integer/data_transfer/__init__.py | hyu-iot/gem5 | aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5 | [
"BSD-3-Clause"
] | 807 | 2015-01-06T09:55:38.000Z | 2022-03-30T10:23:36.000Z | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
categories = ["move",
"move_non_temporal",
"move_mask"]
microcode = '''
# 64 bit multimedia instructions
'''
for category in categories:
exec("from . import %s as cat" % category)
microcode += cat.microcode
| 50.913043 | 72 | 0.779249 |
3f01943283f1ee2839fe01160da8706f9c049ad4 | 5,164 | py | Python | fluids/assets/shape.py | BerkeleyAutomation/FLUIDS | 728da0d0fec5028ca4506aa9cc8e37a5b072e7a9 | [
"MIT"
] | 26 | 2017-12-28T18:15:36.000Z | 2022-01-21T13:00:27.000Z | fluids/assets/shape.py | BerkeleyAutomation/FLUIDS | 728da0d0fec5028ca4506aa9cc8e37a5b072e7a9 | [
"MIT"
] | 61 | 2018-01-30T05:18:42.000Z | 2021-05-19T15:00:05.000Z | fluids/assets/shape.py | BerkeleyAutomation/FLUIDS | 728da0d0fec5028ca4506aa9cc8e37a5b072e7a9 | [
"MIT"
] | 14 | 2017-12-11T04:59:21.000Z | 2021-05-19T12:21:31.000Z | import numpy as np
import pygame
import shapely.geometry
from fluids.utils import rotation_array
class Shape(object):
def __init__(self, x=0, y=0,
xdim=0, ydim=0,
points=[],
mass=0,
type=None,
angle=0, angle_deg=0,
color=(255, 255, 255),
border_color=(0xE4, 0xE4, 0xE4),
vis_level=1,
state=None,
collideables=[],
waypoints=None):
if angle_deg:
angle = np.deg2rad(angle_deg)
if not len(points):
corner_offsets = np.array([xdim / 2.0, ydim / 2.0])
centers = np.array([x, y])
signs = np.array([[1,1], [1,-1], [-1,-1], [-1,1]])
corner_offsets = signs * corner_offsets
rotation_mat = rotation_array(angle)
self.x, self.y = x, y
self.origin_points = corner_offsets
else:
xs, ys = zip(*points)
self.x, self.y = sum(xs) / len(xs), sum(ys) / len(ys)
self.origin_points = points - np.array([self.x, self.y])
self.points = self.origin_points.dot(rotation_array(angle)) + np.array([self.x,
self.y])
xs, ys = zip(*self.points)
self.minx, self.maxx = min(xs), max(xs)
self.miny, self.maxy = min(ys), max(ys)
centers = np.array([self.x, self.y])
self.radius = max(np.linalg.norm([p - centers for p in self.points], axis=1))
self.xdim = xdim
self.ydim = ydim
self.angle = angle
self.mass = mass
self.vis_level = vis_level
self.collideables = collideables
self.shapely_obj = shapely.geometry.Polygon(self.points)
self.color = color
self.border_color = border_color
self.state = state
self.waypoints = [] if not waypoints else waypoints
def intersects(self, other):
return self.shapely_obj.intersects(other.shapely_obj)
def get_relative(self, other, offset=(0,0)):
if type(other) == tuple:
x, y, angle = other
else:
x, y, angle = other.x, other.y, other.angle
new_points = np.array(self.shapely_obj.exterior.coords) - np.array([x, y])
new_points = new_points.dot(rotation_array(-angle))
new_points = new_points + np.array(offset)
shape = Shape(points=new_points[:,:2], color=self.color)
shape.__class__ = type(self)
return shape
def center_distance_to(self, other):
return np.linalg.norm([self.x-other.x, self.y-other.y])
def can_collide(self, other):
return type(other) in self.collideables and self is not other
def collides(self, other):
return self.can_collide(other) and self.intersects(other)
def contains_point(self, point, buf=0):
if point[0] + buf < self.minx or point[0] - buf > self.maxx \
or point[1] + buf < self.miny or point[1] - buf > self.maxy:
return False
if buf:
return self.shapely_obj.buffer(buf).contains(shapely.geometry.Point(point))
return self.shapely_obj.contains(shapely.geometry.Point(point))
def dist_to(self, other):
return self.shapely_obj.distance(other.shapely_obj)
def render(self, surface, border=4, color=None):
if not color:
color = self.color
if self.xdim != 1 or self.ydim != 1:
if color:
pygame.draw.polygon(surface, color, self.points)
if border:
pygame.draw.polygon(surface, self.border_color, self.points, border)
else:
pygame.draw.circle(surface, color, (int(self.x), int(self.y)), 5)
def render_debug(self, surface, color=(255, 0, 0), width=10):
pygame.draw.polygon(surface, color, self.points, width)
def step(self, actions):
pass
def update_points(self, x, y, angle):
dx = self.x - x
dy = self.y - y
dangle = (angle - self.angle + 6 * np.pi) % (2 * np.pi)
self.x = x
self.y = y
self.angle = angle % (2 * np.pi)
# origin = np.array([self.x, self.y])
# self.points = self.origin_points.dot(rotation_array(self.angle)) + origin
# xs, ys = self.points[:,0], self.points[:,1]
self.shapely_obj = shapely.affinity.translate(self.shapely_obj,
-dx, -dy)
self.shapely_obj = shapely.affinity.rotate(self.shapely_obj,
-dangle,
(self.x, self.y),
use_radians=True)
self.points = np.array(self.shapely_obj.exterior.coords)
xs, ys = self.points[:,0], self.points[:,1]
self.minx, self.maxx = min(xs), max(xs)
self.miny, self.maxy = min(ys), max(ys)
#self.shapely_obj = shapely.geometry.Polygon(self.points)
| 38.251852 | 88 | 0.535438 |
931152dbe808a1cff5c34a49067b653328f064b6 | 16,483 | py | Python | custom_components/openhasp/light.py | HASwitchPlate/hasp-lvgl-custom-component | 46985e4685e6fc65645f1b88c72908ad700fd7f3 | [
"MIT"
] | 19 | 2021-04-02T13:45:04.000Z | 2022-03-07T12:49:19.000Z | custom_components/openhasp/light.py | HASwitchPlate/hasp-lvgl-custom-component | 46985e4685e6fc65645f1b88c72908ad700fd7f3 | [
"MIT"
] | 43 | 2021-04-03T23:35:56.000Z | 2022-03-15T20:39:57.000Z | custom_components/openhasp/light.py | HASwitchPlate/hasp-lvgl-custom-component | 46985e4685e6fc65645f1b88c72908ad700fd7f3 | [
"MIT"
] | 8 | 2021-04-18T12:07:23.000Z | 2022-01-10T00:49:15.000Z | """Support for HASP LVGL moodlights."""
import json
import logging
from typing import Callable
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.color as color_util
import voluptuous as vol
from .common import HASP_IDLE_SCHEMA, HASPToggleEntity
from .const import (
ATTR_AWAKE_BRIGHTNESS,
ATTR_IDLE_BRIGHTNESS,
CONF_DIMLIGHTS,
CONF_HWID,
CONF_IDLE_BRIGHTNESS,
CONF_LIGHTS,
CONF_TOPIC,
HASP_IDLE_LONG,
HASP_IDLE_OFF,
HASP_IDLE_SHORT,
)
_LOGGER = logging.getLogger(__name__)
HASP_MOODLIGHT_SCHEMA = vol.Schema(
{
vol.Required("state"): cv.boolean,
vol.Required("r"): vol.All(int, vol.Range(min=0, max=255)),
vol.Required("g"): vol.All(int, vol.Range(min=0, max=255)),
vol.Required("b"): vol.All(int, vol.Range(min=0, max=255)),
vol.Required("brightness"): vol.All(int, vol.Range(min=0, max=255)),
vol.Optional("color"): str,
},
)
HASP_BACKLIGHT_SCHEMA = vol.Schema(
{
vol.Required("state"): cv.boolean,
vol.Required("brightness"): vol.All(int, vol.Range(min=0, max=255)),
}
)
HASP_LIGHT_SCHEMA = vol.Schema(
{
vol.Required("state"): cv.boolean,
vol.Optional("brightness"): vol.All(int, vol.Range(min=0, max=255)),
}
)
# pylint: disable=R0801, W0613
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable
):
"""Set up Plate Light sensors based on a config entry."""
async_add_entities(
[
HASPBackLight(
entry.data[CONF_NAME],
entry.data[CONF_HWID],
entry.data[CONF_TOPIC],
entry.options.get(
CONF_IDLE_BRIGHTNESS, entry.data[CONF_IDLE_BRIGHTNESS]
),
),
HASPMoodLight(
entry.data[CONF_NAME], entry.data[CONF_HWID], entry.data[CONF_TOPIC]
),
]
+ [
HASPLight(
entry.data[CONF_NAME],
entry.data[CONF_HWID],
entry.data[CONF_TOPIC],
gpio,
)
for gpio in entry.data[CONF_LIGHTS]
]
+ [
HASPDimmableLight(
entry.data[CONF_NAME],
entry.data[CONF_HWID],
entry.data[CONF_TOPIC],
gpio,
)
for gpio in entry.data[CONF_DIMLIGHTS]
]
)
return True
class HASPLight(HASPToggleEntity, LightEntity):
"""Representation of openHASP Light."""
def __init__(self, name, hwid, topic, gpio):
"""Initialize the light."""
super().__init__(name, hwid, topic, gpio)
@property
def name(self):
"""Return the name of the light."""
return f"{self._name} light {self._gpio}"
async def refresh(self):
"""Sync local state back to plate."""
await self.hass.components.mqtt.async_publish(
self.hass,
f"{self._topic}/command/output{self._gpio}",
json.dumps(HASP_LIGHT_SCHEMA({"state": int(self._state)})),
qos=0,
retain=False,
)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
@callback
async def light_state_message_received(msg):
"""Process State."""
try:
self._available = True
message = HASP_LIGHT_SCHEMA(json.loads(msg.payload))
_LOGGER.debug("received light %s: %s", self.name, message)
self._state = message["state"]
self.async_write_ha_state()
except vol.error.Invalid as err:
_LOGGER.error(err)
self._subscriptions.append(
await self.hass.components.mqtt.async_subscribe(
f"{self._topic}/state/output{self._gpio}", light_state_message_received
)
)
# Force immediatable state update from plate
await self.hass.components.mqtt.async_publish(
self.hass,
f"{self._topic}/command/output{self._gpio}",
"",
qos=0,
retain=False,
)
class HASPDimmableLight(HASPToggleEntity, LightEntity):
"""Representation of openHASP Light."""
def __init__(self, name, hwid, topic, gpio):
"""Initialize the dimmable light."""
super().__init__(name, hwid, topic, gpio)
self._brightness = None
@property
def name(self):
"""Return the name of the light."""
return f"{self._name} dimmable light {self._gpio}"
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
async def refresh(self):
"""Sync local state back to plate."""
_LOGGER.debug(
"refresh dim %s state = %s, brightness = %s",
self.name,
self._state,
self._brightness,
)
await self.hass.components.mqtt.async_publish(
self.hass,
f"{self._topic}/command/output{self._gpio}",
json.dumps(
HASP_LIGHT_SCHEMA(
{"state": self._state, "brightness": self._brightness}
)
),
qos=0,
retain=False,
)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
@callback
async def dimmable_light_message_received(msg):
"""Process State."""
try:
self._available = True
message = HASP_LIGHT_SCHEMA(json.loads(msg.payload))
_LOGGER.debug("received dimmable light %s: %s", self.name, message)
self._state = message["state"]
self._brightness = message["brightness"]
self.async_write_ha_state()
except vol.error.Invalid as err:
_LOGGER.error(err)
self._subscriptions.append(
await self.hass.components.mqtt.async_subscribe(
f"{self._topic}/state/output{self._gpio}",
dimmable_light_message_received,
)
)
# Force immediatable state update from plate
await self.hass.components.mqtt.async_publish(
self.hass,
f"{self._topic}/command/output{self._gpio}",
"",
qos=0,
retain=False,
)
async def async_turn_on(self, **kwargs):
"""Turn on the dimmable light."""
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self._state = True
await self.refresh()
class HASPBackLight(HASPToggleEntity, LightEntity, RestoreEntity):
"""Representation of HASP LVGL Backlight."""
def __init__(self, name, hwid, topic, brightness):
"""Initialize the light."""
super().__init__(name, hwid, topic)
self._awake_brightness = 255
self._brightness = None
self._idle_brightness = brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def name(self):
"""Return the name of the light."""
return f"{self._name} backlight"
@property
def unique_id(self):
"""Return the identifier of the light."""
return f"{self._hwid}.backlight"
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attributes = {
ATTR_AWAKE_BRIGHTNESS: self._awake_brightness,
ATTR_IDLE_BRIGHTNESS: self._idle_brightness,
}
return attributes
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._state = state.state
self._brightness = state.attributes.get(ATTR_BRIGHTNESS)
self._awake_brightness = state.attributes.get(ATTR_AWAKE_BRIGHTNESS, 255)
_LOGGER.debug(
"Restoring %s self.brigthness = %s; awake_brightness = %s",
self.name,
self._brightness,
self._awake_brightness,
)
if not self._brightness:
self._brightness = self._awake_brightness
await self.async_listen_idleness()
cmd_topic = f"{self._topic}/command"
state_topic = f"{self._topic}/state/backlight"
@callback
async def backlight_message_received(msg):
"""Process Backlight State."""
try:
self._available = True
message = HASP_BACKLIGHT_SCHEMA(json.loads(msg.payload))
_LOGGER.debug("received backlight %s: %s", self.name, message)
self._state = message["state"]
self._brightness = message["brightness"]
self.async_write_ha_state()
except vol.error.Invalid as err:
_LOGGER.error(
"While proccessing backlight: %s, original message was: %s",
err,
msg,
)
self._subscriptions.append(
await self.hass.components.mqtt.async_subscribe(
state_topic, backlight_message_received
)
)
await self.hass.components.mqtt.async_publish(
self.hass, cmd_topic, "backlight", qos=0, retain=False
)
async def async_listen_idleness(self):
"""Listen to messages on MQTT for HASP idleness."""
@callback
async def idle_message_received(msg):
"""Process MQTT message from plate."""
message = HASP_IDLE_SCHEMA(msg.payload)
if message == HASP_IDLE_OFF:
brightness = self._awake_brightness
backlight = 1
elif message == HASP_IDLE_SHORT:
brightness = self._idle_brightness
backlight = 1
elif message == HASP_IDLE_LONG:
brightness = self._awake_brightness
backlight = 0
else:
return
_LOGGER.debug(
"Idle state for %s is %s - Dimming to %s; Backlight to %s",
self.name,
message,
brightness,
backlight,
)
new_state = {"state": backlight, "brightness": brightness}
await self.hass.components.mqtt.async_publish(
self.hass,
f"{self._topic}/command",
f"backlight {json.dumps(new_state)}",
qos=0,
retain=False,
)
self.async_write_ha_state()
self._subscriptions.append(
await self.hass.components.mqtt.async_subscribe(
f"{self._topic}/state/idle", idle_message_received
)
)
async def refresh(self):
"""Sync local state back to plate."""
cmd_topic = f"{self._topic}/command"
new_state = {"state": self._state, "brightness": self._brightness}
_LOGGER.debug("refresh(%s) backlight - %s", self.name, new_state)
await self.hass.components.mqtt.async_publish(
self.hass,
cmd_topic,
f"backlight {json.dumps(new_state)}",
qos=0,
retain=False,
)
self.async_write_ha_state()
async def async_turn_on(self, **kwargs):
"""Turn on the backlight."""
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self._awake_brightness = (
self._brightness
) # save this value for later recall
self._state = True
await self.refresh()
class HASPMoodLight(HASPToggleEntity, LightEntity, RestoreEntity):
"""Representation of HASP LVGL Moodlight."""
def __init__(self, name, hwid, topic):
"""Initialize the light."""
super().__init__(name, hwid, topic)
self._hs = None
self._brightness = None
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_COLOR | SUPPORT_BRIGHTNESS
@property
def hs_color(self):
"""Return the color property."""
return self._hs
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def name(self):
"""Return the name of the light."""
return f"{self._name} moodlight"
@property
def unique_id(self):
"""Return the identifier of the light."""
return f"{self._hwid}.moodlight"
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._state = state.state
self._brightness = state.attributes.get(ATTR_BRIGHTNESS)
self._hs = state.attributes.get(ATTR_HS_COLOR)
_LOGGER.debug(
"Restoring %s self.brigthness = %s; hs_color = %s",
self.name,
self._brightness,
self._hs,
)
@callback
async def moodlight_message_received(msg):
"""Process Moodlight State."""
try:
self._available = True
message = HASP_MOODLIGHT_SCHEMA(json.loads(msg.payload))
_LOGGER.debug("received moodlight %s: %s", self.name, message)
self._state = message["state"]
self._hs = color_util.color_RGB_to_hs(
message["r"], message["g"], message["b"]
)
self._brightness = message["brightness"]
self.async_write_ha_state()
except vol.error.Invalid as err:
_LOGGER.error("While proccessing moodlight: %s", err)
self._subscriptions.append(
await self.hass.components.mqtt.async_subscribe(
f"{self._topic}/state/moodlight", moodlight_message_received
)
)
await self.hass.components.mqtt.async_publish(
self.hass, f"{self._topic}/command", "moodlight", qos=0, retain=False
)
async def refresh(self):
"""Sync local state back to plate."""
cmd_topic = f"{self._topic}/command"
new_state = {"state": self._state}
if self._hs:
rgb = color_util.color_hs_to_RGB(*self._hs)
new_state = {**new_state, **dict(zip("rgb", rgb))}
if self._brightness:
new_state["brightness"] = self._brightness
_LOGGER.debug("refresh(%s) moodlight - %s", self.name, new_state)
await self.hass.components.mqtt.async_publish(
self.hass,
cmd_topic,
f"moodlight {json.dumps(new_state)}",
qos=0,
retain=False,
)
async def async_turn_on(self, **kwargs):
"""Turn on the moodlight."""
if ATTR_HS_COLOR in kwargs:
self._hs = kwargs[ATTR_HS_COLOR]
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self._state = True
_LOGGER.debug(
"Turn on %s - %s - %s",
self._topic,
color_util.color_hs_to_RGB(*self._hs) if self._hs else None,
self._brightness,
)
await self.refresh()
| 30.867041 | 87 | 0.571013 |
a3ee73d85b1c95881146a7ad2abeb2062b8a5f03 | 8,023 | py | Python | nettoyage_df_secteurs_activites.py | Projet-energie/Projet-Energie | c138339937300c5d4571345185a1fd72ea59de9f | [
"MIT"
] | null | null | null | nettoyage_df_secteurs_activites.py | Projet-energie/Projet-Energie | c138339937300c5d4571345185a1fd72ea59de9f | [
"MIT"
] | null | null | null | nettoyage_df_secteurs_activites.py | Projet-energie/Projet-Energie | c138339937300c5d4571345185a1fd72ea59de9f | [
"MIT"
] | null | null | null | #Ouverture du fichier
import pandas as pd
df=pd.read_csv('conso-elec-gaz-annuelle-par-secteur-dactivite-agregee-departement.csv',sep=';')
df.head(50)
#les valeurs uniques de la variable année
df["Année"].unique()
#Les valeurs de la variable filière
df["Filière"].value_counts()
#df.info()
#Suppression de la valeur gaz
indexNames = df[ df['Filière'] == 'Gaz' ].index
# Delete these row indexes from dataFrame
df.drop(indexNames , inplace=True)
df.head(50)
#Vérification des valerus de filière
df["Filière"].unique()
# Comptage des valeurs manquantes
def valeur_maquante(df):
flag = 0
for col in df.columns:
if df[col].isna().sum() > 0:
flag = 1
print(f' "{col}": {df[col].isna().sum()} valeurs manquantes')
if flag == 0:
print("Aucune valeur manquante dans le dataset.")
valeur_maquante(df)
#Visualisation des lignes aux valeurs manquantes
nan_rows = df[df['Libellé Région'].isnull()]
nan_rows
#Suppression de la colonne geom
df.drop(columns=["geom"],inplace=True)
df.head()
# Comptage des valeurs manquantes
def valeur_maquante(df):
flag = 0
for col in df.columns:
if df[col].isna().sum() > 0:
flag = 1
print(f' "{col}": {df[col].isna().sum()} valeurs manquantes')
if flag == 0:
print("Aucune valeur manquante dans le dataset.")
valeur_maquante(df)
#Suppression des valeurs manquantes
df=df.dropna()
# Comptage des valeurs manquantes
def valeur_maquante(df):
flag = 0
for col in df.columns:
if df[col].isna().sum() > 0:
flag = 1
print(f' "{col}": {df[col].isna().sum()} valeurs manquantes')
if flag == 0:
print("Aucune valeur manquante dans le dataset.")
valeur_maquante(df)
#Visualisation des premières lignes
df.head()
#Comptage des valeurs par région
df["Libellé Région"].value_counts()
#Visualisation des colonnes
df.columns
#Suppression des colonnes inutiles à notre étude
df.drop(columns=['Filière','Opérateur','Nombre de points Agriculture','Nombre de mailles secretisées (agriculture)',
'Indique qualité Agriculture','Nombre de points Industrie',
'Nombre de mailles secretisées (industrie)', 'Indice qualité Industrie','Nombre de points Tertiaire',
'Nombre de mailles secretisées (tertiaire)', 'Indice qualité Tertiaire','Nombre de points Résidentiel',
'Nombre de mailles secretisées (résidentiel)',
'Indice qualité Résidentiel','Nombre de points Secteur Inconnu',
'Nombre de mailles secretisées (secteur inconnu)',
'Indice qualité Non Affecté','Code Département','Libellé Département','id_filiere'],inplace=True)
df.head()
#df.columns
#df["Libellé Région"].unique()
#df.info()
#Régions à supprimer
REGION_sup= df.loc[(df['Libellé Région'] =='Corse') |
(df['Libellé Région'] =='Guadeloupe')|
(df['Libellé Région'] =='La Réunion')|
(df['Libellé Région'] =='Martinique') |
(df['Libellé Région'] =='Guyane') |
(df['Libellé Région'] =='Mayotte')].index
# Delete these row indexes from dataFrame
df.drop(REGION_sup, inplace=True)
df.head()
#Tableau des consos par région, code insee et année
import numpy as np
#df3=pd.pivot_table(df, index=['Libellé Région','Année'], values=['Consommation Agriculture (MWh)',
#'Consommation Industrie (MWh)', 'Consommation Tertiaire (MWh)',
#'Consommation Résidentiel (MWh)', 'Consommation Secteur Inconnu (MWh)',
#'Code Région', 'Consommation totale (MWh)'], aggfunc=np.sum)
df3=pd.pivot_table(df, index=['Libellé Région', "Code Région", "Année"], values=['Consommation Agriculture (MWh)',
'Consommation Industrie (MWh)', 'Consommation Tertiaire (MWh)',
'Consommation Résidentiel (MWh)', 'Consommation Secteur Inconnu (MWh)', 'Consommation totale (MWh)'], aggfunc=np.sum)
df3.head(15).round().astype(int)
#Création d'un df réorganisé
df_SA_annee=pd.DataFrame(data=df3,
#index=['Libellé Région', "Code Région", "Année"],
columns=["Consommation Agriculture (MWh)",
'Consommation Industrie (MWh)', 'Consommation Tertiaire (MWh)',
'Consommation Résidentiel (MWh)', 'Consommation Secteur Inconnu (MWh)', 'Consommation totale (MWh)'])
#df_SA_annee["Année"]=[2011,2012,2013,2014,2015,2016,2017,2018,2019]
#df_SA_annee['Code Région']=[53,75,11,27,84,28,76,24,32,44,93,52]
#df_SA_annee['Libellé Région']=['Bretagne', 'Nouvelle-Aquitaine', 'Île-de-France',
#'Bourgogne-Franche-Comté', 'Auvergne-Rhône-Alpes', 'Normandie',
#'Occitanie', 'Centre-Val de Loire', 'Hauts-de-France', 'Grand Est',
#"Provence-Alpes-Côte d'Azur", 'Pays de la Loire']
df_SA_annee.head(12).round()
#df dont les valeurs en ligne sont passées au format int
df_SA_annee.astype(int)
#df_SA_annee["Code Région"]=df_SA_annee["Code Région"].astype(int)
#Création du df conso secteur d'activités
df_SA_annee.to_csv (path_or_buf= "df_conso_secteur_activités_annee.csv",
sep= ";",
header=True)
#Vérification de son enregistrement
import pandas as pd
df_SA_annee= pd.read_csv("df_conso_secteur_activités_annee.csv",
sep= ";",index_col="Libellé Région")
df_SA_annee.head()
# Renommage des colonnes
df_SA_annee.rename(columns={"Code Région": "Code INSEE région", "Année": "Annee"})
#idem
df_SA_annee.rename(index = {"Libellé Région": 'Région'}, inplace = True)
#agrégé moyen des consommations par région entre 2011 et 2019
df2=pd.pivot_table(df, index=['Libellé Région'], values=['Consommation Agriculture (MWh)',
'Consommation Industrie (MWh)', 'Consommation Tertiaire (MWh)',
'Consommation Résidentiel (MWh)', 'Consommation Secteur Inconnu (MWh)',
'Code Région', 'Consommation totale (MWh)'], aggfunc=np.mean)
df2.head(15).round()
df2.astype(int)
#Agrégé moyen des consommations des 12 régions (échelle France)
df2.describe().round().astype(int)
#Cammembert de la répartition des consos par secteur en France
name = ['Industrie','Agriculture','Tertiaire', 'Résidentiel', 'inconnu']
#data = [8294969,315008,8324294,14202430,246303]
data=[466137,23245,500310,849719,13235]
plt.pie(data, labels=name, autopct='%1.0f%%', startangle=10, )
plt.show()
#Histogramme des conso par région
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
barWidth = 0.8
df2.plot.bar(x="Libellé Région",y=['Consommation Agriculture (MWh)','Consommation Industrie (MWh)','Consommation Tertiaire (MWh)','Consommation Résidentiel (MWh)','Consommation Secteur Inconnu (MWh)'],figsize=(20,10),width = barWidth)
plt.title("Consommation par secteurs d'activités en fonction des régions")
plt.show()
#Création d'un df
df_SA=pd.DataFrame(data=df2,
columns=['Libellé Région','Consommation Agriculture (MWh)',
'Consommation Industrie (MWh)', 'Consommation Tertiaire (MWh)',
'Consommation Résidentiel (MWh)', 'Consommation Secteur Inconnu (MWh)',
'Code Région', 'Consommation totale (MWh)'])
#df_SA['Code Région']=[53,75,11,27,84,28,76,24,32,44,93,52]
df_SA['Libellé Région']=['Bretagne', 'Nouvelle-Aquitaine', 'Île-de-France',
'Bourgogne-Franche-Comté', 'Auvergne-Rhône-Alpes', 'Normandie',
'Occitanie', 'Centre-Val de Loire', 'Hauts-de-France', 'Grand Est',
"Provence-Alpes-Côte d'Azur", 'Pays de la Loire']
df_SA.head(12).round()
#Modification du format
df_SA["Code Région"]=df_SA["Code Région"].astype(int)
#Affichage des premières lignes du df arrondis
df_SA.head(12).round()
#Passage de Libellé Région en index
df_SA= df_SA.set_index('Libellé Région')
#valeur de la consommation à l'échelle France
df_SA.agg("sum")
#Création du df conso secteur d'activités
df_SA.to_csv (path_or_buf= "df_conso_secteur_activités.csv",
sep= ";",
header=True)
#Vérification
df_SA= pd.read_csv("df_conso_secteur_activités.csv",
sep= ";",index_col="Libellé Région")
df_SA.head()
| 35.188596 | 236 | 0.683659 |
e1268f731978277513993f7a195968a942416b55 | 727 | py | Python | tests/project/conftest.py | Exef/brownie | f111d299974f11e451b0acd790e12b40986659e4 | [
"MIT"
] | 1 | 2019-10-03T08:31:21.000Z | 2019-10-03T08:31:21.000Z | tests/project/conftest.py | Exef/brownie | f111d299974f11e451b0acd790e12b40986659e4 | [
"MIT"
] | null | null | null | tests/project/conftest.py | Exef/brownie | f111d299974f11e451b0acd790e12b40986659e4 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import pytest
test_source = """
pragma solidity ^0.5.0;
library Bar {
function baz(uint a, uint b) external pure returns (uint) {
return a + b;
}
}
contract Foo {
address payable owner;
function baz(uint a, uint b) external view returns (uint) {
return Bar.baz(a, b);
}
}
"""
@pytest.fixture()
def btsource(testproject):
path = testproject._project_path.joinpath("contracts/BrownieTester.sol")
with path.open() as fs:
return fs.read()
@pytest.fixture
def solc5source():
return test_source
@pytest.fixture
def solc4source():
source = test_source.replace("payable ", "")
source = source.replace("^0.5.0", "^0.4.25")
return source
| 17.309524 | 76 | 0.642366 |
771635cbceb9eba6e53579defdd0878046cc803d | 3,841 | py | Python | src/hotels/postgres_upload_data.py | bee-travels/data-generator | a27d446daa24b93b5c0ecba9e1b3a8228815023e | [
"Apache-2.0"
] | 2 | 2021-06-23T03:52:09.000Z | 2021-07-26T06:14:15.000Z | src/hotels/postgres_upload_data.py | bee-travels/data-generator | a27d446daa24b93b5c0ecba9e1b3a8228815023e | [
"Apache-2.0"
] | 1 | 2020-04-16T17:28:50.000Z | 2020-04-16T17:28:50.000Z | src/hotels/postgres_upload_data.py | bee-travels/data-generator | a27d446daa24b93b5c0ecba9e1b3a8228815023e | [
"Apache-2.0"
] | 2 | 2021-07-26T06:59:04.000Z | 2022-01-14T06:58:04.000Z | import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import sys
import json
import os
import logging
import utils
def get_connection():
try:
if "DATABASE_CERT" in os.environ:
with open("./cert.pem",'w') as cert_file:
cert_file.write(os.environ["DATABASE_CERT"])
os.environ["PGSSLROOTCERT"] = "./cert.pem"
conn = psycopg2.connect(user=os.environ["PG_USER"], host=os.environ["PG_HOST"], password=os.environ["PG_PASSWORD"], port=os.environ["PG_PORT"], sslmode="verify-full", dbname=os.environ["PG_DB"])
else:
conn = psycopg2.connect(user=os.environ["PG_USER"], host=os.environ["PG_HOST"], password=os.environ["PG_PASSWORD"], port=os.environ["PG_PORT"])
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute("CREATE DATABASE beetravels;")
logging.debug("create beetravels database")
cur.close()
conn.close()
except Exception as e:
logging.warning("Unable to create to the database")
logging.info(e)
try:
if "DATABASE_CERT" in os.environ:
conn = psycopg2.connect(user=os.environ["PG_USER"], host=os.environ["PG_HOST"], password=os.environ["PG_PASSWORD"], port=os.environ["PG_PORT"], sslmode="verify-full", database="beetravels")
else:
conn = psycopg2.connect(user=os.environ["PG_USER"], host=os.environ["PG_HOST"], password=os.environ["PG_PASSWORD"], port=os.environ["PG_PORT"], database="beetravels")
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
return conn
except Exception as e:
logging.warning("Error: Unable to connect to the database")
logging.info(e)
exit(e)
def drop_table(cursor, table_name):
try:
cursor.execute("""
DROP TABLE %s;
""" % table_name)
logging.info("dropped table "+table_name)
except Exception as e:
logging.warning("drop unsuccessful")
logging.info(e)
def populate_postgres(data, info):
conn = get_connection()
cur = conn.cursor()
drop_table(cur, "hotels")
drop_table(cur, "hotel_info")
try:
logging.info("creating hotel info DB")
cur.execute("""
CREATE TABLE IF NOT EXISTS hotel_info (
ID VARCHAR(255) PRIMARY KEY NOT NULL,
NAME VARCHAR(255) NOT NULL,
Superchain VARCHAR(255) NOT NULL,
Type VARCHAR(255) NOT NULL
);
""")
logging.info("writing to hotel info DB")
cur.executemany("""
INSERT INTO hotel_info VALUES (%(id)s, %(name)s, %(superchain)s, %(type)s);
""", info)
logging.info("creating hotel DB")
cur.execute("""
CREATE TABLE IF NOT EXISTS hotels (
ID VARCHAR(255) PRIMARY KEY NOT NULL,
HOTEL_ID VARCHAR(255) REFERENCES hotel_info(id),
City VARCHAR(255) NOT NULL,
Country VARCHAR(255) NOT NULL,
Cost decimal NOT NULL,
Images TEXT [],
Tags TEXT []
);
""")
logging.info("writing to hotel DB")
cur.executemany("""
INSERT INTO hotels VALUES (%(id)s, %(hotel_id)s, %(city)s, %(country)s, %(cost)s, %(images)s, %(tags)s);
""", data)
conn.commit()
except Exception as e:
logging.error("Error: Unable to create and populate database")
logging.error(e)
logging.info("data generated")
cur.close()
conn.close()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
hotel_data = utils.load_json("hotel-data.json")
hotel_info = utils.load_json("hotel-info.json")
populate_postgres(hotel_data, hotel_info)
| 35.564815 | 206 | 0.60479 |
0edb8b568d8a989b0598955c11c562ae093a4ff6 | 2,527 | py | Python | CIM15/CDPSM/Balanced/IEC61970/Wires/PhaseTapChangerAsymetrical.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 58 | 2015-04-22T10:41:03.000Z | 2022-03-29T16:04:34.000Z | CIM15/CDPSM/Balanced/IEC61970/Wires/PhaseTapChangerAsymetrical.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 12 | 2015-08-26T03:57:23.000Z | 2020-12-11T20:14:42.000Z | CIM15/CDPSM/Balanced/IEC61970/Wires/PhaseTapChangerAsymetrical.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 35 | 2015-01-10T12:21:03.000Z | 2020-09-09T08:18:16.000Z | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.CDPSM.Balanced.IEC61970.Wires.PhaseTapChangerNonLinear import PhaseTapChangerNonLinear
class PhaseTapChangerAsymetrical(PhaseTapChangerNonLinear):
"""In a PhaseTapChangerAsymetrical tranformer the difference voltage vector adds to the primary side voltage. The angle between the primary side voltage and the difference voltage is named the winding connection angle. The phase shift, α, depends on both the difference voltage magnitude, ΔU, and the winding connection angle.
"""
def __init__(self, windingConnectionAngle=0.0, *args, **kw_args):
"""Initialises a new 'PhaseTapChangerAsymetrical' instance.
@param windingConnectionAngle: The phase angle between the in-phase winding and the out-of -phase winding used for creating phase shift. It is only possible to have a symmemtrical transformer if this angle is 90 degrees.
"""
#: The phase angle between the in-phase winding and the out-of -phase winding used for creating phase shift. It is only possible to have a symmemtrical transformer if this angle is 90 degrees.
self.windingConnectionAngle = windingConnectionAngle
super(PhaseTapChangerAsymetrical, self).__init__(*args, **kw_args)
_attrs = ["windingConnectionAngle"]
_attr_types = {"windingConnectionAngle": float}
_defaults = {"windingConnectionAngle": 0.0}
_enums = {}
_refs = []
_many_refs = []
| 57.431818 | 342 | 0.764939 |
b8f87e5766ca227ea2b05a40f0e262895cab53de | 642 | py | Python | source/pic2card/tests/base_test_class.py | sivasakthiv/AdaptiveCards | dfa4bfef70c1111e1a5cc8eed90b2f1e8d76f75c | [
"MIT"
] | null | null | null | source/pic2card/tests/base_test_class.py | sivasakthiv/AdaptiveCards | dfa4bfef70c1111e1a5cc8eed90b2f1e8d76f75c | [
"MIT"
] | null | null | null | source/pic2card/tests/base_test_class.py | sivasakthiv/AdaptiveCards | dfa4bfef70c1111e1a5cc8eed90b2f1e8d76f75c | [
"MIT"
] | 1 | 2022-02-03T04:53:52.000Z | 2022-02-03T04:53:52.000Z | import os
import unittest
from tests.utils import (
img_to_base64,
headers,
payload_empty_dict_data,
payload_data_some_string,
generate_base64,
)
from app.api import app
class BaseAPITest(unittest.TestCase):
"""Base test class """
def setUp(self):
"""Define test variables and initialize app."""
self.data = img_to_base64(os.environ["test_img_path"])
self.gt_3mb_data = generate_base64()
self.empty_data = payload_empty_dict_data
self.wrong_data = payload_data_some_string
self.headers = headers
app.testing = True
self.client = app.test_client()
| 25.68 | 62 | 0.683801 |
2c1c404597136ec31687708b3b83ff5a846a70af | 12,273 | py | Python | train.py | gicheonkang/sglkt-visdial | b2927e8bc8e45c2d2a2a76fbf75a15f8ecb78b88 | [
"MIT"
] | 9 | 2021-08-30T16:43:46.000Z | 2022-03-19T12:22:08.000Z | train.py | gicheonkang/sglkt-visdial | b2927e8bc8e45c2d2a2a76fbf75a15f8ecb78b88 | [
"MIT"
] | null | null | null | train.py | gicheonkang/sglkt-visdial | b2927e8bc8e45c2d2a2a76fbf75a15f8ecb78b88 | [
"MIT"
] | 4 | 2021-09-01T01:33:17.000Z | 2021-10-12T05:33:20.000Z | """
This code is modified from batra-mlp-lab's repository.
https://github.com/batra-mlp-lab/visdial-challenge-starter-pytorch
"""
import os
import argparse
import itertools
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import yaml
from bisect import bisect
import datetime
import random
from visdialch.data.dataset import VisDialDataset
from visdialch.encoders import Encoder
from visdialch.decoders import Decoder
from visdialch.metrics import SparseGTMetrics, NDCG
from visdialch.model import EncoderDecoderModel
from visdialch.utils.checkpointing import CheckpointManager, load_checkpoint
from visdialch.utils.logging import Logger
from visdialch.utils.scheduler import get_optim, adjust_lr
parser = argparse.ArgumentParser()
parser.add_argument(
"--config-yml",
default="configs/sglkt.yml",
help="Path to a config file listing reader, model and solver parameters.",
)
parser.add_argument(
"--train-json",
default="data/visdial_1.0_train.json",
help="Path to json file containing VisDial v1.0 training data.",
)
parser.add_argument(
"--val-json",
default="data/visdial_1.0_val.json",
help="Path to json file containing VisDial v1.0 validation data.",
)
parser.add_argument(
"--train-structure-json",
default="data/visdial_1.0_train_coref_structure.json"
)
parser.add_argument(
"--val-structure-json",
default="data/visdial_1.0_val_coref_structure.json"
)
parser.add_argument(
"--train-neural-dense-json",
default="data/visdial_1.0_train_dense_labels.json"
)
parser.add_argument(
"--val-dense-json",
default="data/visdial_1.0_val_dense_annotations.json",
help="Path to json file containing VisDial v1.0 validation dense ground "
"truth annotations.",
)
parser.add_argument_group(
"Arguments independent of experiment reproducibility"
)
parser.add_argument(
"--gpu-ids",
nargs="+",
type=int,
default=[0, 1],
help="List of ids of GPUs to use.",
)
parser.add_argument(
"--cpu-workers",
type=int,
default=8,
help="Number of CPU workers for dataloader.",
)
parser.add_argument(
"--overfit",
default=False,
help="Overfit model on 5 examples, meant for debugging.",
)
parser.add_argument(
"--validate",
default=True,
help="Whether to validate on val split after every epoch.",
)
parser.add_argument(
"--in-memory",
default=False,
help="Load the whole dataset and pre-extracted image features in memory. "
"Use only in presence of large RAM, atleast few tens of GBs.",
)
parser.add_argument_group("Checkpointing related arguments")
parser.add_argument(
"--save-dirpath",
default="checkpoints/",
help="Path of directory to create checkpoint directory and save "
"checkpoints.",
)
parser.add_argument(
"--load-pthpath",
default="",
help="To continue training, path to .pth file of saved checkpoint.",
)
# =============================================================================
# RANDOM SEED
# =============================================================================
seed = random.randint(0, 99999999)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = True
# =============================================================================
# INPUT ARGUMENTS AND CONFIG
# =============================================================================
args = parser.parse_args()
# keys: {"dataset", "model", "solver"}
config = yaml.safe_load(open(args.config_yml))
if isinstance(args.gpu_ids, int):
args.gpu_ids = [args.gpu_ids]
device = (
torch.device("cuda", args.gpu_ids[0])
if args.gpu_ids[0] >= 0
else torch.device("cpu")
)
# Print config and args.
print(yaml.dump(config, default_flow_style=False))
for arg in vars(args):
print("{:<20}: {}".format(arg, getattr(args, arg)))
# =============================================================================
# SETUP DATASET, DATALOADER, MODEL, CRITERION, OPTIMIZER, SCHEDULER
# =============================================================================
train_dataset = VisDialDataset(
config=config["dataset"],
dialogs_jsonpath=args.train_json,
coref_dependencies_jsonpath=args.train_structure_json,
answer_plausibility_jsonpath=args.train_neural_dense_json,
overfit=args.overfit,
in_memory=args.in_memory,
return_options=True if config["model"]["decoder"] == "disc" else False,
add_boundary_toks=False if config["model"]["decoder"] == "disc" else True,
)
train_dataloader = DataLoader(
train_dataset,
batch_size=config["solver"]["batch_size"],
num_workers=args.cpu_workers,
shuffle=True,
collate_fn=train_dataset.collate_fn
)
val_dataset = VisDialDataset(
config=config["dataset"],
dialogs_jsonpath=args.val_json,
coref_dependencies_jsonpath=args.val_structure_json,
dense_annotations_jsonpath=args.val_dense_json,
overfit=args.overfit,
in_memory=args.in_memory,
return_options=True,
add_boundary_toks=False if config["model"]["decoder"] == "disc" else True,
)
val_dataloader = DataLoader(
val_dataset,
batch_size=config["solver"]["batch_size"]
if config["model"]["decoder"] == "disc"
else 5,
num_workers=args.cpu_workers,
collate_fn=val_dataset.collate_fn
)
# Pass vocabulary to construct Embedding layer.
encoder = Encoder(config["model"], train_dataset.vocabulary)
decoder = Decoder(config["model"], train_dataset.vocabulary)
print("Encoder: {}".format(config["model"]["encoder"]))
print("Decoder: {}".format(config["model"]["decoder"]))
# New: Initializing word_embed using GloVe
if config["dataset"]["glove_npy"] != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(config["dataset"]["glove_npy"]))
print("Loaded glove vectors from {}".format(config["dataset"]["glove_npy"]))
# Share word embedding between encoder and decoder.
decoder.word_embed = encoder.word_embed
# Wrap encoder and decoder in a model.
model = EncoderDecoderModel(encoder, decoder).to(device)
if -1 not in args.gpu_ids:
model = nn.DataParallel(model, args.gpu_ids)
# Loss function.
if config["model"]["decoder"] == "disc":
criterion1 = nn.BCEWithLogitsLoss()
criterion2 = nn.CrossEntropyLoss()
criterion3 = nn.MSELoss()
elif config["model"]["decoder"] == "gen":
criterion1 = nn.CrossEntropyLoss(
ignore_index=train_dataset.vocabulary.PAD_INDEX
)
criterion2 = nn.MSELoss()
else:
raise NotImplementedError
if config["solver"]["training_splits"] == "trainval":
iterations = (len(train_dataset) + len(val_dataset)) // config["solver"][
"batch_size"
] + 1
else:
iterations = len(train_dataset) // config["solver"]["batch_size"] + 1
# =============================================================================
# SETUP BEFORE TRAINING LOOP
# =============================================================================
start_time = datetime.datetime.strftime(datetime.datetime.utcnow(), '%d-%b-%Y-%H:%M:%S')
if args.save_dirpath == 'checkpoints/':
args.save_dirpath += '%s' % start_time
os.makedirs(args.save_dirpath, exist_ok=True)
logger = Logger(os.path.join(args.save_dirpath, 'log.txt'))
logger.write("{}".format(seed))
sparse_metrics = SparseGTMetrics()
ndcg = NDCG()
# If loading from checkpoint, adjust start epoch and load parameters.
if args.load_pthpath == "":
start_epoch = 1
optim = get_optim(config, model, len(train_dataset))
else:
# "path/to/checkpoint_xx.pth" -> xx
start_epoch = int(args.load_pthpath.split("_")[-1][:-4]) + 1
model_state_dict, optimizer_state_dict = load_checkpoint(args.load_pthpath)
if isinstance(model, nn.DataParallel):
model.module.load_state_dict(model_state_dict)
else:
model.load_state_dict(model_state_dict)
optim = get_optim(config, model, len(train_dataset))
optim._step = iterations * (start_epoch - 1)
optim.optimizer.load_state_dict(optimizer_state_dict)
print("Loaded model from {}".format(args.load_pthpath))
checkpoint_manager = CheckpointManager(
model, optim.optimizer, args.save_dirpath, last_epoch=start_epoch-1, config=config
)
# =============================================================================
# TRAINING LOOP
# =============================================================================
running_loss = 0.0
train_begin = datetime.datetime.utcnow()
for epoch in range(start_epoch, config["solver"]["num_epochs"]+1):
# -------------------------------------------------------------------------
# ADJUST LEARNING RATE
# -------------------------------------------------------------------------
if epoch in config["solver"]["lr_decay_list"]:
adjust_lr(optim, config["solver"]["lr_decay_rate"])
# -------------------------------------------------------------------------
# ON EPOCH START
# -------------------------------------------------------------------------
combined_dataloader = itertools.chain(train_dataloader)
print(f"\nTraining for epoch {epoch}:")
for i, batch in enumerate(combined_dataloader):
for key in batch:
if not isinstance(batch[key], list):
batch[key] = batch[key].cuda()
optim.zero_grad()
output, structures = model(batch)
target = (
batch["ans_ind"]
if config["model"]["decoder"] == "disc"
else batch["ans_out"]
)
if epoch < 5:
batch_loss = criterion2(output.view(-1, output.size(-1)), batch["ans_ind"].view(-1))
else:
batch_loss = criterion1(output, batch["teacher_scores"])
batch_loss += criterion3(structures, batch["structures"])
batch_loss.backward()
optim.step()
# --------------------------------------------------------------------
# update running loss and decay learning rates
# --------------------------------------------------------------------
if running_loss > 0.0:
running_loss = 0.95 * running_loss + 0.05 * batch_loss.item()
else:
running_loss = batch_loss.item()
torch.cuda.empty_cache()
if i % 100 == 0:
# print current time, running average, learning rate, iteration, epoch
logger.write("[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.datetime.utcnow() - train_begin, epoch,
(epoch - 1) * iterations + i, running_loss,
optim.optimizer.param_groups[0]['lr']))
# -------------------------------------------------------------------------
# ON EPOCH END (checkpointing and validation)
# -------------------------------------------------------------------------
checkpoint_manager.step()
# Validate and report automatic metrics.
if args.validate:
# Switch dropout, batchnorm etc to the correct mode.
model.eval()
logger.write("\nValidation after epoch {}:".format(epoch))
total_hist_usage = 0
for i, batch in enumerate(tqdm(val_dataloader)):
for key in batch:
batch[key] = batch[key].to(device)
with torch.no_grad():
output, structures = model(batch)
total_hist_usage += torch.sum(structures)
sparse_metrics.observe(output, batch["ans_ind"])
if "gt_relevance" in batch:
output = output[
torch.arange(output.size(0)), batch["round_id"] - 1, :
]
ndcg.observe(output, batch["gt_relevance"])
all_metrics = {}
all_metrics.update(sparse_metrics.retrieve(reset=True))
all_metrics.update(ndcg.retrieve(reset=True))
for metric_name, metric_value in all_metrics.items():
logger.write("{}: {:4f}".format(metric_name, metric_value))
total_connct = config["dataset"]["total_connection_val"]
ratio = total_hist_usage / total_connct
logger.write("sparsity: {:4f}\n".format(1-torch.sum(ratio).item()))
model.train()
torch.cuda.empty_cache()
| 35.368876 | 96 | 0.602298 |
731ce7ffb72817521b1a46f425e6eba662b85015 | 4,650 | py | Python | Mission3.py | magicbycalvin/StochasticTargetMonitoring | bb5fc71b34149c6af40b6f25f0401cfec9f2a9ef | [
"MIT"
] | 1 | 2020-03-10T19:34:38.000Z | 2020-03-10T19:34:38.000Z | Mission3.py | magicbycalvin/StochasticTargetMonitoring | bb5fc71b34149c6af40b6f25f0401cfec9f2a9ef | [
"MIT"
] | null | null | null | Mission3.py | magicbycalvin/StochasticTargetMonitoring | bb5fc71b34149c6af40b6f25f0401cfec9f2a9ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 20 15:23:08 2020
@author: ckielasjensen
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import numpy as np
from agent import Agent
from parameters import Parameters
from target import Target
def plot1():
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.set_xlim(-60, 110)
ax.set_ylim(-60, 110)
# Initialize classes
params = Parameters()
target = Target(25, 25, 0)
agents = []
for i in range(params.nveh):
agents.append(Agent(0, 25*i, 0, params.monSpeed, 0, params, ax=ax))
# Give the target a commanded speed
target.send_cmd(3, 0)
# Get first plan
for i, agent in enumerate(agents):
agent.detect_target(target.get_state())
agent.compute_flight_traj(tf=params.tflight + i*params.tmon)
# Plot initial states
pts = target.get_state()
trgtPlot = ax.plot(pts[0], pts[1], 'r*', markersize=10, label='Target')
for i, agent in enumerate(agents):
agent.plot_arrow()
# Plot the inner and outer radii
cir1 = Circle(target.get_state()[:2], ls=':', fill=False, ec='r',
label='Outer Radius', radius=params.outerR)
cir2 = Circle(target.get_state()[:2], ls=':', fill=False, ec='r',
label='Outer Radius', radius=params.innerR)
cir3 = Circle(target.get_state()[:2], lw=None, fc='gray', alpha=0.5,
label='No Fly Zone', radius=params.noflyR)
ax.add_artist(cir1)
ax.add_artist(cir2)
ax.add_artist(cir3)
# Draw legend and clean up Agent class
ax.legend([trgtPlot[0]] + [agent._arrow for agent in agents],
['Target',
'Agent 1',
'Agent 2',
'Agent 3'])
Agent.agentIdx = 0
Agent.trajList = []
Agent.timeList = []
plt.title('$t = 0$')
return
def plot2():
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.set_xlim(50, 250)
ax.set_ylim(50, 250)
# Initialize classes
params = Parameters()
target = Target(25, 25, 0)
agents = []
for i in range(params.nveh):
agents.append(Agent(0, 25*i, 0, params.monSpeed, 0, params, ax=ax))
# Give the target a commanded speed
target.send_cmd(3, 0)
# Get first plan
for i, agent in enumerate(agents):
agent.detect_target(target.get_state())
agent.compute_flight_traj(tf=params.tflight + i*params.tmon)
agent.plot_arrow()
# Plot initial states
pts = target.get_state()
trgtPlot = ax.plot(pts[0], pts[1], 'r*', markersize=10, label='Target')
for i, agent in enumerate(agents):
agent.plot_arrow()
# Run the simulation
for t in np.arange(0, params.tflight + params.nveh*params.tmon + 0.1, 0.1):
# Update states
target.update(t)
for agent in agents:
agent.update(t)
# Detect target
if t % params.detPer < 1e-6:
for agent in agents:
agent.detect_target(target.get_state())
# Update plots
pts = target.get_state()
trgtPlot[0].set_data(pts[0], pts[1])
for i, agent in enumerate(agents):
agent.plot_arrow()
if t >= 1:
target.send_cmd(3, np.pi/2)
if t >= 1.5:
target.send_cmd(3, 0)
plt.pause(0.01)
# Plot the inner and outer radii
cir1 = Circle(target.get_state()[:2], ls=':', fill=False, ec='r',
label='Outer Radius', radius=params.outerR)
cir2 = Circle(target.get_state()[:2], ls=':', fill=False, ec='r',
label='Inner Radius', radius=params.innerR)
cir3 = Circle(target.get_state()[:2], lw=None, fc='gray', alpha=0.5,
label='No Fly Zone', radius=params.noflyR)
ax.add_artist(cir1)
ax.add_artist(cir2)
ax.add_artist(cir3)
# Draw legend and clean up Agent class
ax.legend([trgtPlot[0]] + [agent._arrow for agent in agents],
['Target',
'Agent 1',
'Agent 2',
'Agent 3'])
Agent.agentIdx = 0
Agent.trajList = []
Agent.timeList = []
plt.title(f'$t = {t}$')
return
def main():
# plot2()
plot1()
plot2()
if __name__ == '__main__':
plt.rcParams.update({
'font.size': 20,
'pdf.fonttype': 42,
'ps.fonttype': 42,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'lines.linewidth': 2,
'lines.markersize': 9
})
# plt.rcParams.update(plt.rcParamsDefault)
# plt.ion()
plt.close('all')
main()
| 27.514793 | 79 | 0.573118 |
6cd66feddc8e8c0431fdc005a7608d418ef0c924 | 6,690 | py | Python | 36-ratios/model/loss/yolo_loss.py | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 | [
"MIT"
] | null | null | null | 36-ratios/model/loss/yolo_loss.py | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 | [
"MIT"
] | null | null | null | 36-ratios/model/loss/yolo_loss.py | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 | [
"MIT"
] | null | null | null | import sys
sys.path.append("../utils")
import torch
import torch.nn as nn
from utils import tools
import config.yolov3_config_voc as cfg
class FocalLoss(nn.Module):
def __init__(self, gamma=2.0, alpha=1.0, reduction="mean"):
super(FocalLoss, self).__init__()
self.__gamma = gamma
self.__alpha = alpha
self.__loss = nn.BCEWithLogitsLoss(reduction=reduction)
def forward(self, input, target):
loss = self.__loss(input=input, target=target)
loss *= self.__alpha * torch.pow(torch.abs(target - torch.sigmoid(input)), self.__gamma)
return loss
class YoloV3Loss(nn.Module):
def __init__(self, anchors, strides, iou_threshold_loss=0.5):
super(YoloV3Loss, self).__init__()
self.__iou_threshold_loss = iou_threshold_loss
self.__strides = strides
def forward(self, p, p_d, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes):
"""
:param p: Predicted offset values for three detection layers.
The shape is [p0, p1, p2], ex. p0=[bs, grid, grid, anchors, tx+ty+tw+th+conf+cls_20+ratio_16]
:param p_d: Decodeed predicted value. The size of value is for image size.
ex. p_d0=[bs, grid, grid, anchors, x+y+w+h+conf+cls_20+ratio_16]
:param label_sbbox: Small detection layer's label. The size of value is for original image size.
shape is [bs, grid, grid, anchors, x+y+w+h+conf+mix+cls_20+ratio_16]
:param label_mbbox: Same as label_sbbox.
:param label_lbbox: Same as label_sbbox.
:param sbboxes: Small detection layer bboxes.The size of value is for original image size.
shape is [bs, 150, x+y+w+h]
:param mbboxes: Same as sbboxes.
:param lbboxes: Same as sbboxes
"""
strides = self.__strides
loss_s, loss_s_giou, loss_s_conf, loss_s_cls, loss_s_ratios = self.__cal_loss_per_layer(p[0], p_d[0], label_sbbox,
sbboxes, strides[0])
loss_m, loss_m_giou, loss_m_conf, loss_m_cls, loss_m_ratios = self.__cal_loss_per_layer(p[1], p_d[1], label_mbbox,
mbboxes, strides[1])
loss_l, loss_l_giou, loss_l_conf, loss_l_cls,loss_l_ratios = self.__cal_loss_per_layer(p[2], p_d[2], label_lbbox,
lbboxes, strides[2])
loss = loss_l + loss_m + loss_s
loss_giou = loss_s_giou + loss_m_giou + loss_l_giou
loss_conf = loss_s_conf + loss_m_conf + loss_l_conf
loss_cls = loss_s_cls + loss_m_cls + loss_l_cls
loss_ratios = loss_s_ratios + loss_m_ratios + loss_l_ratios
return loss, loss_giou, loss_conf, loss_cls, loss_ratios
def __cal_loss_per_layer(self, p, p_d, label, bboxes, stride):
"""
(1)The loss of regression of boxes.
GIOU loss is defined in https://arxiv.org/abs/1902.09630.
Note: The loss factor is 2-w*h/(img_size**2), which is used to influence the
balance of the loss value at different scales.
(2)The loss of confidence.
Includes confidence loss values for foreground and background.
Note: The backgroud loss is calculated when the maximum iou of the box predicted
by the feature point and all GTs is less than the threshold.
(3)The loss of classes。
The category loss is BCE, which is the binary value of each class.
:param stride: The scale of the feature map relative to the original image
:return: The average loss(loss_giou, loss_conf, loss_cls) of all batches of this detection layer.
"""
BCE = nn.BCEWithLogitsLoss(reduction="none")
FOCAL = FocalLoss(gamma=2, alpha=1.0, reduction="none")
batch_size, grid = p.shape[:2]
img_size = stride * grid
p_conf = p[..., 4:5]
p_cls = p[..., 5:25]
p_ratio = p[..., 25:]
p_d_xywh = p_d[..., :4]
label_xywh = label[..., :4]
label_obj_mask = label[..., 4:5]
label_cls = label[..., 6:26]
label_ratio = label[..., 26:]
label_mix = label[..., 5:6]
# print(label, label_obj_mask, label_cls, label_ratio, label_mix)
# loss giou
giou = tools.GIOU_xywh_torch(p_d_xywh, label_xywh).unsqueeze(-1)
# The scaled weight of bbox is used to balance the impact of small objects and large objects on loss.
bbox_loss_scale = 2.0 - 1.0 * label_xywh[..., 2:3] * label_xywh[..., 3:4] / (img_size ** 2)
loss_giou = label_obj_mask * bbox_loss_scale * (1.0 - giou) * label_mix
# loss confidence
iou = tools.iou_xywh_torch(p_d_xywh.unsqueeze(4), bboxes.unsqueeze(1).unsqueeze(1).unsqueeze(1))
iou_max = iou.max(-1, keepdim=True)[0]
label_noobj_mask = (1.0 - label_obj_mask) * (iou_max < self.__iou_threshold_loss).float()
loss_conf = (label_obj_mask * FOCAL(input=p_conf, target=label_obj_mask) +
label_noobj_mask * FOCAL(input=p_conf, target=label_obj_mask)) * label_mix
# loss classes
loss_cls = label_obj_mask * BCE(input=p_cls, target=label_cls) * label_mix
# loss ratios
loss_ratios = label_obj_mask * BCE(input=p_ratio, target=label_ratio) * label_mix
loss_giou = (torch.sum(loss_giou)) / batch_size
loss_conf = (torch.sum(loss_conf)) / batch_size
loss_cls = (torch.sum(loss_cls)) / batch_size
loss_ratios = (torch.sum(loss_ratios)) / batch_size
loss = loss_giou + loss_conf + loss_cls + loss_ratios
return loss, loss_giou, loss_conf, loss_cls, loss_ratios
if __name__ == "__main__":
from model.yolov3 import Yolov3
net = Yolov3()
p, p_d = net(torch.rand(3, 3, 416, 416))
label_sbbox = torch.rand(3, 52, 52, 3, 62)
label_mbbox = torch.rand(3, 26, 26, 3, 62)
label_lbbox = torch.rand(3, 13, 13, 3, 62)
sbboxes = torch.rand(3, 150, 4)
mbboxes = torch.rand(3, 150, 4)
lbboxes = torch.rand(3, 150, 4)
loss, loss_xywh, loss_conf, loss_cls, loss_ratios = YoloV3Loss(cfg.MODEL["ANCHORS"], cfg.MODEL["STRIDES"])(p, p_d, label_sbbox,
label_mbbox,
label_lbbox, sbboxes,
mbboxes, lbboxes)
print(loss)
| 44.899329 | 131 | 0.595067 |
be14cafe9b1cb5cd2221f97fd590e7f6077ae3df | 77 | py | Python | __init__.py | cclauss/saxo | 862846d05f39a90d91e037a0adcc5b2ac0510a30 | [
"Apache-2.0"
] | 24 | 2015-01-20T17:25:17.000Z | 2021-11-11T15:35:45.000Z | __init__.py | cclauss/saxo | 862846d05f39a90d91e037a0adcc5b2ac0510a30 | [
"Apache-2.0"
] | 44 | 2015-01-01T16:46:49.000Z | 2021-05-20T08:13:38.000Z | __init__.py | cclauss/saxo | 862846d05f39a90d91e037a0adcc5b2ac0510a30 | [
"Apache-2.0"
] | 12 | 2015-04-26T19:41:34.000Z | 2020-10-22T08:35:46.000Z | # http://inamidst.com/saxo/
# Created by Sean B. Palmer
from .core import *
| 15.4 | 27 | 0.688312 |
9e8d15b5bb5a89f97295dd615a356668341ef9a3 | 4,777 | py | Python | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
] | 680 | 2016-12-03T14:38:28.000Z | 2022-02-16T04:06:45.000Z | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
] | 38 | 2016-11-17T08:43:51.000Z | 2019-11-12T12:27:04.000Z | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | connectthefuture/tensorflow | 93812423fcd5878aa2c1d0b68dc0496980c8519d | [
"Apache-2.0"
] | 250 | 2016-12-05T10:37:17.000Z | 2022-03-18T21:26:55.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()}
class _FeedingFunctionsTestCase(tf.test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {"index_placeholder": list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder": np.arange(32).reshape([16, 2]).tolist() * 6
+ [[0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
tf.test.main()
| 35.125 | 85 | 0.634917 |
a3c6d5213486cf5cd99c9a33ff40e0d436ff216b | 2,908 | py | Python | test/test_day10.py | daniel-stockhausen/adventofcode2021 | e21969a9bf6fe3e9d520cf36dc34de02e24dd0d2 | [
"MIT"
] | null | null | null | test/test_day10.py | daniel-stockhausen/adventofcode2021 | e21969a9bf6fe3e9d520cf36dc34de02e24dd0d2 | [
"MIT"
] | null | null | null | test/test_day10.py | daniel-stockhausen/adventofcode2021 | e21969a9bf6fe3e9d520cf36dc34de02e24dd0d2 | [
"MIT"
] | null | null | null | import os
import unittest
from aoc.day10.day10 import calc_completion_string_middle_score, calc_file_error_score, \
calc_missing_tokens_for_incomplete_line, get_example_data, get_input_data, score_line_completion, score_line_error
class TestDay10(unittest.TestCase):
def test_score_line_error(self):
self.assertEqual(0, score_line_error("([])"))
self.assertEqual(0, score_line_error("{()()()}"))
self.assertEqual(0, score_line_error("<([{}])>"))
self.assertEqual(0, score_line_error("[<>({}){}[([])<>]]"))
self.assertEqual(0, score_line_error("(((((((((())))))))))"))
self.assertEqual(1197, score_line_error("{([(<{}[<>[]}>{[]{[(<()>"))
self.assertEqual(3, score_line_error("[[<[([]))<([[{}[[()]]]"))
self.assertEqual(57, score_line_error("[{[{({}]{}}([{[{{{}}([]"))
self.assertEqual(3, score_line_error("[<(<(<(<{}))><([]([]()"))
self.assertEqual(25137, score_line_error("<{([([[(<>()){}]>(<<{{"))
with self.assertRaises(ValueError):
score_line_error("([!])")
def test_10a_example(self):
self.assertEqual(26397, calc_file_error_score(get_example_data()))
def test_10a(self):
self.assertEqual(394647, calc_file_error_score(get_input_data()))
def test_calc_missing_tokens_for_incomplete_line(self):
self.assertEqual("}}]])})]",
calc_missing_tokens_for_incomplete_line("[({(<(())[]>[[{[]{<()<>>"))
self.assertEqual(")}>]})",
calc_missing_tokens_for_incomplete_line("[(()[<>])]({[<{<<[]>>("))
self.assertEqual("}}>}>))))",
calc_missing_tokens_for_incomplete_line("(((({<>}<{<{<>}{[]{[]{}"))
self.assertEqual("]]}}]}]}>",
calc_missing_tokens_for_incomplete_line("{<[[]]>}<{[{[{[]{()[[[]"))
self.assertEqual("])}>",
calc_missing_tokens_for_incomplete_line("<{([{{}}[<[[[<>{}]]]>[]]"))
self.assertEqual("])}>",
calc_missing_tokens_for_incomplete_line("<{([{{}}[<[[[<>{}]]]>[]]"))
def test_score_line_completion(self):
self.assertEqual(288957, score_line_completion("[({(<(())[]>[[{[]{<()<>>"))
self.assertEqual(5566, score_line_completion("[(()[<>])]({[<{<<[]>>("))
self.assertEqual(1480781, score_line_completion("(((({<>}<{<{<>}{[]{[]{}"))
self.assertEqual(995444, score_line_completion("{<[[]]>}<{[{[{[]{()[[[]"))
self.assertEqual(294, score_line_completion("<{([{{}}[<[[[<>{}]]]>[]]"))
def test_10b_example(self):
self.assertEqual(288957, calc_completion_string_middle_score(get_example_data()))
def test_10b(self):
self.assertEqual(2380061249, calc_completion_string_middle_score(get_input_data()))
def test_10_main(self):
self.assertEqual(0, os.system("python -m aoc.day10.day10"))
| 48.466667 | 118 | 0.567744 |
878735a430844be26b70f0caccbf7ced0c3391a1 | 4,901 | py | Python | modin/core/io/sql/sql_dispatcher.py | yizx-1017/modin | 2eee697135b30a9694c202456db0635c52c9e6c9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/core/io/sql/sql_dispatcher.py | yizx-1017/modin | 2eee697135b30a9694c202456db0635c52c9e6c9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/core/io/sql/sql_dispatcher.py | yizx-1017/modin | 2eee697135b30a9694c202456db0635c52c9e6c9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module houses `SQLDispatcher` class.
`SQLDispatcher` contains utils for handling SQL queries or database tables,
inherits util functions for handling files from `FileDispatcher` class and can be
used as base class for dipatchers of SQL queries.
"""
import math
import numpy as np
import pandas
import warnings
from modin.core.io.file_dispatcher import FileDispatcher
from modin.db_conn import ModinDatabaseConnection
from modin.config import NPartitions, ReadSqlEngine
class SQLDispatcher(FileDispatcher):
"""Class handles utils for reading SQL queries or database tables."""
@classmethod
def _read(cls, sql, con, index_col=None, **kwargs):
"""
Read a SQL query or database table into a query compiler.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, sqlite3 connection, or ModinDatabaseConnection
Connection object to database.
index_col : str or list of str, optional
Column(s) to set as index(MultiIndex).
**kwargs : dict
Parameters to pass into `pandas.read_sql` function.
Returns
-------
BaseQueryCompiler
Query compiler with imported data for further processing.
"""
if isinstance(con, str):
con = ModinDatabaseConnection("sqlalchemy", con)
if not isinstance(con, ModinDatabaseConnection):
warnings.warn(
"To use parallel implementation of `read_sql`, pass either "
+ "the SQL connection string or a ModinDatabaseConnection "
+ "with the arguments required to make a connection, instead "
+ f"of {type(con)}. For documentation of ModinDatabaseConnection, see "
+ "https://modin.readthedocs.io/en/latest/supported_apis/io_supported.html#connecting-to-a-database-for-read-sql"
)
return cls.single_worker_read(
sql,
con=con,
index_col=index_col,
read_sql_engine=ReadSqlEngine.get(),
**kwargs,
)
row_count_query = con.row_count_query(sql)
connection_for_pandas = con.get_connection()
colum_names_query = con.column_names_query(sql)
row_cnt = pandas.read_sql(row_count_query, connection_for_pandas).squeeze()
cols_names_df = pandas.read_sql(
colum_names_query, connection_for_pandas, index_col=index_col
)
cols_names = cols_names_df.columns
num_partitions = NPartitions.get()
partition_ids = [None] * num_partitions
index_ids = [None] * num_partitions
dtypes_ids = [None] * num_partitions
limit = math.ceil(row_cnt / num_partitions)
for part in range(num_partitions):
offset = part * limit
query = con.partition_query(sql, limit, offset)
*partition_ids[part], index_ids[part], dtypes_ids[part] = cls.deploy(
cls.parse,
num_returns=num_partitions + 2,
num_splits=num_partitions,
sql=query,
con=con,
index_col=index_col,
read_sql_engine=ReadSqlEngine.get(),
**kwargs,
)
partition_ids[part] = [
cls.frame_partition_cls(obj) for obj in partition_ids[part]
]
if index_col is None: # sum all lens returned from partitions
index_lens = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(index_lens))
else: # concat index returned from partitions
index_lst = [
x for part_index in cls.materialize(index_ids) for x in part_index
]
new_index = pandas.Index(index_lst).set_names(index_col)
new_frame = cls.frame_cls(np.array(partition_ids), new_index, cols_names)
new_frame.synchronize_labels(axis=0)
return cls.query_compiler_cls(new_frame)
| 43.371681 | 129 | 0.653948 |
81c87abf084f119ac770d956e76994173ce899d2 | 107 | py | Python | zpipe/stages/__init__.py | jheo4/zpipe | 8cc3ac38b0f999ae4c1a29e63818457c4b7d6cf6 | [
"MIT"
] | 1 | 2020-06-28T02:31:08.000Z | 2020-06-28T02:31:08.000Z | zpipe/stages/__init__.py | jheo4/zpipe | 8cc3ac38b0f999ae4c1a29e63818457c4b7d6cf6 | [
"MIT"
] | 6 | 2020-06-28T01:52:45.000Z | 2020-07-10T03:55:41.000Z | zpipe/stages/__init__.py | jheo4/zpipe | 8cc3ac38b0f999ae4c1a29e63818457c4b7d6cf6 | [
"MIT"
] | null | null | null | from .stage import Stage
from .nonworker_stage import NonWorkerStage
from .worker_stage import WorkerStage
| 26.75 | 43 | 0.859813 |
9eb8696d76e193fc4161ab13c49b3db436e0e80c | 11,385 | py | Python | pump_mbf.py | couchbaselabs/couchbase-cli | 596f3564d08881af83f93910ac3d2c53a82b1d64 | [
"Apache-2.0"
] | null | null | null | pump_mbf.py | couchbaselabs/couchbase-cli | 596f3564d08881af83f93910ac3d2c53a82b1d64 | [
"Apache-2.0"
] | null | null | null | pump_mbf.py | couchbaselabs/couchbase-cli | 596f3564d08881af83f93910ac3d2c53a82b1d64 | [
"Apache-2.0"
] | 1 | 2018-08-23T11:14:28.000Z | 2018-08-23T11:14:28.000Z | #!/usr/bin/env python
import glob
import logging
import os
import sys
import socket
import couchbaseConstants
from cbcollections import defaultdict
from pump import EndPoint, Source, Batch
try:
import ctypes
except ImportError:
cb_path = '/opt/couchbase/lib/python'
while cb_path in sys.path:
sys.path.remove(cb_path)
try:
import ctypes
except ImportError:
sys.exit('error: could not import ctypes module')
else:
sys.path.insert(0, cb_path)
MIN_SQLITE_VERSION = '3.3'
import_stmts = (
'from pysqlite2 import dbapi2 as sqlite3',
'import sqlite3',
)
for status, stmt in enumerate(import_stmts):
try:
exec stmt
if sqlite3.sqlite_version >= MIN_SQLITE_VERSION:
status = 0
break
except ImportError:
pass
if status:
sys.exit("Error: could not import required version of sqlite3 module")
MBF_VERSION = 2 # sqlite pragma user version for Couchbase 1.8.
class MBFSource(Source):
"""Can read 1.8 server master and *.mb data files."""
def __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(MBFSource, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.cursor_todo = None
self.cursor_done = False
self.s = """SELECT vbucket, k, flags, exptime, cas, v, vb_version
FROM `%s`.`%s`"""
@staticmethod
def can_handle(opts, spec):
return os.path.isfile(spec) and MBFSource.version(spec) == 2
@staticmethod
def check_base(opts, spec):
# Skip immediate superclass Source.check_base(),
# since MBFSource can handle different vbucket states.
return EndPoint.check_base(opts, spec)
@staticmethod
def check(opts, spec):
spec = os.path.normpath(spec)
if not os.path.isfile(spec):
return "error: backup_dir is not a file: " + spec, None
db_files = MBFSource.db_files(spec)
versions = MBFSource.db_file_versions(db_files)
logging.debug(" MBFSource check db file versions: %s" % (versions))
if max(versions.values()) < 2:
err = ("error: wrong backup/db file versions;\n" +
" either the metadata db file is not specified\n" +
" or the backup files need upgrading to version %s;\n" +
" please use cbdbupgrade or contact support.") \
% (MBF_VERSION)
return err, None
# Map of state string (e.g., 'active') to map of vbucket_id to info.
vbucket_states = defaultdict(dict)
sql = """SELECT vbid, vb_version, state, checkpoint_id
FROM vbucket_states"""
db_file = spec
try:
db = sqlite3.connect(db_file)
cur = db.cursor()
for row in cur.execute(sql):
vbucket_id = row[0]
state = str(row[2])
vbucket_states[state][vbucket_id] = {
'vbucket_id': vbucket_id,
'vb_version': row[1],
'state': state,
'checkpoint_id': row[3]
}
cur.close()
db.close()
except sqlite3.DatabaseError, e:
pass # A missing vbucket_states table is expected.
return 0, {'spec': spec,
'buckets':
[{'name': os.path.basename(spec),
'nodes': [{'hostname': 'N/A',
'vbucket_states': vbucket_states
}]}]}
@staticmethod
def db_file_versions(db_files):
rv = {}
for db_file in db_files:
rv[db_file] = MBFSource.version(db_file)
return rv
@staticmethod
def version(db_file):
try:
return int(MBFSource.run_sql(db_file, "PRAGMA user_version;")[0])
except sqlite3.DatabaseError, e:
logging.error("error: could not access user_version from: %s" +
"; exception: %s" +
"; perhaps it is being used by another program" +
" like couchbase-server", db_file, e)
return 0
@staticmethod
def db_files(spec):
return [spec] + glob.glob(spec + "-*.mb")
@staticmethod
def run_sql(db_file, sql):
db = sqlite3.connect(db_file)
cur = db.cursor()
cur.execute(sql)
rv = cur.fetchone()
cur.close()
db.close()
return rv
@staticmethod
def provide_design(opts, source_spec, source_bucket, source_map):
return 0, None
def provide_batch(self):
if self.cursor_done:
return 0, None
batch = Batch(self)
batch_max_size = self.opts.extra['batch_max_size']
batch_max_bytes = self.opts.extra['batch_max_bytes']
source_vbucket_state = \
getattr(self.opts, 'source_vbucket_state', 'active')
try:
if self.cursor_todo is None:
rv, db, attached_dbs, table_dbs, vbucket_states = self.connect_db()
if rv != 0:
return rv, None
# Determine which db the state table is in.
try:
(state_db,) = table_dbs[u'vbucket_states']
except ValueError:
db.close()
return "error: no unique vbucket_states table", None
kv_names = []
for kv_name, db_name in table_dbs.iteritems():
if (self.opts.id is None and
not kv_name.startswith('kv_')):
continue
if (self.opts.id is not None and
kv_name != "kv_%s" % (self.opts.id) ):
continue
kv_names.append(kv_name)
db_kv_names = []
for kv_name in sorted(kv_names,
key=lambda x: int(x.split('_')[-1])):
for db_name in sorted(table_dbs[kv_name]):
db_kv_names.append((db_name, kv_name))
self.cursor_todo = (db, db_kv_names, None, vbucket_states)
db, db_kv_names, cursor, vbucket_states = self.cursor_todo
if not db:
self.cursor_done = True
self.cursor_todo = None
return 0, None
while (not self.cursor_done and
batch.size() < batch_max_size and
batch.bytes < batch_max_bytes):
if not cursor:
if not db_kv_names:
self.cursor_done = True
self.cursor_todo = None
db.close()
break
db_name, kv_name = db_kv_names.pop()
vbucket_id = int(kv_name.split('_')[-1])
if not vbucket_states[source_vbucket_state].has_key(vbucket_id):
break
logging.debug(" MBFSource db/kv table: %s/%s" %
(db_name, kv_name))
cursor = db.cursor()
cursor.execute(self.s % (db_name, kv_name))
self.cursor_todo = (db, db_kv_names, cursor, vbucket_states)
row = cursor.fetchone()
if row:
vbucket_id = row[0]
key = row[1]
flg = row[2]
exp = row[3]
cas = row[4]
val = row[5]
version = int(row[6])
if self.skip(key, vbucket_id):
continue
if version != vbucket_states[source_vbucket_state][vbucket_id]:
continue
meta = ''
flg = socket.ntohl(ctypes.c_uint32(flg).value)
batch.append((couchbaseConstants.CMD_TAP_MUTATION,
vbucket_id, key, flg, exp, cas, meta, val, 0, 0, 0), len(val))
else:
cursor.close()
self.cursor_todo = (db, db_kv_names, None, vbucket_states)
break # Close the batch; next pass hits new db_name/kv_name.
except Exception, e:
self.cursor_done = True
self.cursor_todo = None
return "error: MBFSource exception: " + str(e), None
return 0, batch
@staticmethod
def total_msgs(opts, source_bucket, source_node, source_map):
total = None
vb_state = getattr(opts, "source_vbucket_state", None)
if vb_state not in ["active", "replica"]:
return 0, total
try:
spec = source_map['spec']
db = sqlite3.connect(spec)
cursor = db.cursor()
stmt = "SELECT value FROM stats_snap where name like 'vb_%s_curr_items'" % vb_state
cursor.execute(stmt)
row = cursor.fetchone()
if row:
#Either we can find the stats in the first row, or we don't.
total = int(str(row[0]))
cursor.close()
db.close()
except Exception,e:
pass
return 0, total
def connect_db(self):
#Build vbucket state hash table
vbucket_states = defaultdict(dict)
sql = """SELECT vbid, vb_version, state FROM vbucket_states"""
try:
db = sqlite3.connect(self.spec)
cur = db.cursor()
for row in cur.execute(sql):
vbucket_id = int(row[0])
vb_version = int(row[1])
state = str(row[2])
vbucket_states[state][vbucket_id] = vb_version
cur.close()
db.close()
except sqlite3.DatabaseError, e:
return "error: no vbucket_states table was found;" + \
" check if db files are correct", None, None, None
db = sqlite3.connect(':memory:')
logging.debug(" MBFSource connect_db: %s" % self.spec)
db_files = MBFSource.db_files(self.spec)
logging.debug(" MBFSource db_files: %s" % db_files)
attached_dbs = ["db%s" % (i) for i in xrange(len(db_files))]
db.executemany("attach ? as ?", zip(db_files, attached_dbs))
# Find all tables, filling a table_name => db_name map.
table_dbs = {}
for db_name in attached_dbs:
cursor = db.cursor()
cursor.execute("SELECT name FROM %s.sqlite_master"
" WHERE type = 'table'" % db_name)
for (table_name,) in cursor:
table_dbs.setdefault(table_name, []).append(db_name)
cursor.close()
if not filter(lambda table_name: table_name.startswith("kv_"),
table_dbs):
db.close()
return "error: no kv data was found;" + \
" check if db files are correct", None, None, None
logging.debug(" MBFSource total # tables: %s" % len(table_dbs))
return 0, db, attached_dbs, table_dbs, vbucket_states
| 35.247678 | 96 | 0.52253 |
86e1f399a838fc5b3e09dc590b49a710233e9dbf | 4,879 | py | Python | s2e/source/s2e/guest/windows/scripts/gendriver.py | Albocoder/KOOBE | dd8acade05b0185771e1ea9950de03ab5445a981 | [
"MIT"
] | 55 | 2019-12-20T03:25:14.000Z | 2022-01-16T07:19:47.000Z | s2e/source/s2e/guest/windows/scripts/gendriver.py | Albocoder/KOOBE | dd8acade05b0185771e1ea9950de03ab5445a981 | [
"MIT"
] | 2 | 2020-11-02T08:01:00.000Z | 2022-03-27T02:59:18.000Z | s2e/source/s2e/guest/windows/scripts/gendriver.py | Albocoder/KOOBE | dd8acade05b0185771e1ea9950de03ab5445a981 | [
"MIT"
] | 11 | 2020-08-06T03:59:45.000Z | 2022-02-25T02:31:59.000Z | #!/usr/bin/env python
# Copyright (C) 2014-2017, Cyberhaven
# Copyright (C) 2017, Dependable Systems Laboratory, EPFL
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pprint
from common import extract
def get_info(pdb):
ret = {
'version': pdb.product_version,
'checksum': pdb.checksum,
'bits': pdb.bits,
'IopDeleteDriver': pdb.get_function_address('IopDeleteDriver'),
'KeBugCheck2': pdb.get_function_address('KeBugCheck2'),
'KdDebuggerDataBlock': pdb.get_function_address('KdDebuggerDataBlock'),
'KdCopyDataBlock': pdb.get_function_address('KdCopyDataBlock', True),
'KdpDataBlockEncoded': pdb.get_function_address('KdpDataBlockEncoded', True),
'PsActiveProcessHead': pdb.get_function_address('PsActiveProcessHead'),
'PsLoadedModuleList': pdb.get_function_address('PsLoadedModuleList'),
'PerfLogImageUnload': pdb.get_function_address('PerfLogImageUnload', True),
'ObpCreateHandle': pdb.get_function_address('ObpCreateHandle'),
'MmAccessFault': pdb.get_function_address('MmAccessFault'),
'_EPROCESS_VadRoot': pdb.get_field_offset('_EPROCESS:VadRoot'),
'NtAllocateVirtualMemory': pdb.get_function_address('NtAllocateVirtualMemory'),
'NtFreeVirtualMemory': pdb.get_function_address('NtFreeVirtualMemory'),
'NtProtectVirtualMemory': pdb.get_function_address('NtProtectVirtualMemory'),
'NtMapViewOfSection': pdb.get_function_address('NtMapViewOfSection'),
'NtUnmapViewOfSection': pdb.get_function_address('NtUnmapViewOfSection'),
'MiUnmapViewOfSection': pdb.get_function_address('MiUnmapViewOfSection'),
#'NtUnmapViewOfSectionEx': pdb.get_function_address('NtUnmapViewOfSectionEx'),
'KiInitialPCR': pdb.get_function_address('KiInitialPCR', True),
'_KPRCB_ProcessorState': pdb.get_field_offset('_KPRCB:ProcessorState'),
'_EPROCESS_ActiveProcessLinks': pdb.get_field_offset('_EPROCESS:ActiveProcessLinks'),
'_EPROCESS_ThreadListHead': pdb.get_field_offset('_EPROCESS:ThreadListHead'),
'_EPROCESS_UniqueProcessId': pdb.get_field_offset('_EPROCESS:UniqueProcessId'),
'_EPROCESS_CommitCharge': pdb.get_field_offset('_EPROCESS:CommitCharge'),
'_EPROCESS_VirtualSize': pdb.get_field_offset('_EPROCESS:VirtualSize'),
'_EPROCESS_PeakVirtualSize': pdb.get_field_offset('_EPROCESS:PeakVirtualSize'),
'_EPROCESS_CommitChargePeak': pdb.get_field_offset('_EPROCESS:CommitChargePeak'),
'_EPROCESS_ExitStatus': pdb.get_field_offset('_EPROCESS:ExitStatus'),
'_ETHREAD_ThreadListEntry': pdb.get_field_offset('_ETHREAD:ThreadListEntry'),
'_ETHREAD_Cid': pdb.get_field_offset('_ETHREAD:Cid'),
'_KPRCB_CurrentThread': pdb.get_field_offset('_KPRCB:CurrentThread'),
'_KPCR_Prcb': pdb.get_field_offset('_KPCR:Prcb'),
'_KPCR_KdVersionBlock': pdb.get_field_offset('_KPCR:KdVersionBlock'),
'_KTHREAD_StackBase': pdb.get_field_offset('_KTHREAD:StackBase'),
'_KTHREAD_StackLimit': pdb.get_field_offset('_KTHREAD:StackLimit'),
'_KTHREAD_Process': pdb.get_field_offset('_KTHREAD:Process'),
'_KPRCB_DpcStack': pdb.get_field_offset('_KPRCB:DpcStack'),
}
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(ret)
process = pdb.get_field_offset('_KTHREAD:Process')
if process is not None:
ret['_KTHREAD_Process'] = process
else:
process = pdb.get_field_offset('_ETHREAD:ThreadsProcess')
if process is None:
raise RuntimeError('Could not find process field')
ret['_KTHREAD_Process'] = process
if ret['version'][0] == 5:
ret['_KPCR_PrcbData'] = pdb.get_field_offset('_KPCR:PrcbData')
return ret
def main():
extract(get_info, 'gendriver.tpl')
if __name__ == '__main__':
main()
| 46.028302 | 93 | 0.733962 |
bef3385209058f3b46984840f49389184a3dbf58 | 2,594 | py | Python | tests/_tests_scripts/z_mvp_mnist_unet.py | balakhonoff/catalyst | 82d904aee97045efbaef3963e36c2ce5173ddac4 | [
"Apache-2.0"
] | null | null | null | tests/_tests_scripts/z_mvp_mnist_unet.py | balakhonoff/catalyst | 82d904aee97045efbaef3963e36c2ce5173ddac4 | [
"Apache-2.0"
] | null | null | null | tests/_tests_scripts/z_mvp_mnist_unet.py | balakhonoff/catalyst | 82d904aee97045efbaef3963e36c2ce5173ddac4 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from catalyst import dl
from catalyst.utils import metrics
class ClassifyUnet(nn.Module):
def __init__(self, in_channels, in_hw, out_features):
super().__init__()
self.encoder = nn.Sequential(
nn.Conv2d(in_channels, in_channels, 3, 1, 1), nn.Tanh()
)
self.decoder = nn.Conv2d(in_channels, in_channels, 3, 1, 1)
self.clf = nn.Linear(in_channels * in_hw * in_hw, out_features)
def forward(self, x):
z = self.encoder(x)
z_ = z.view(z.size(0), -1)
y_hat = self.clf(z_)
x_ = self.decoder(z)
return y_hat, x_
class CustomRunner(dl.Runner):
def _handle_batch(self, batch):
x, y = batch
x_noise = (x + torch.rand_like(x)).clamp_(0, 1)
y_hat, x_ = self.model(x_noise)
loss_clf = F.cross_entropy(y_hat, y)
iou = metrics.iou(x_, x)
loss_iou = 1 - iou
loss = loss_clf + loss_iou
accuracy01, accuracy03, accuracy05 = metrics.accuracy(
y_hat, y, topk=(1, 3, 5)
)
self.state.batch_metrics = {
"loss_clf": loss_clf,
"loss_iou": loss_iou,
"loss": loss,
"iou": iou,
"accuracy01": accuracy01,
"accuracy03": accuracy03,
"accuracy05": accuracy05,
}
if self.state.is_train_loader:
loss.backward()
self.state.optimizer.step()
self.state.optimizer.zero_grad()
def main():
model = ClassifyUnet(1, 28, 10)
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
loaders = {
"train": DataLoader(
MNIST(
os.getcwd(),
train=False,
download=True,
transform=transforms.ToTensor(),
),
batch_size=32,
),
"valid": DataLoader(
MNIST(
os.getcwd(),
train=False,
download=True,
transform=transforms.ToTensor(),
),
batch_size=32,
),
}
runner = CustomRunner()
runner.train(
model=model,
optimizer=optimizer,
loaders=loaders,
verbose=True,
check=True,
)
if __name__ == "__main__":
if os.getenv("USE_APEX", "0") == "0" and os.getenv("USE_DDP", "0") == "0":
main()
| 25.94 | 78 | 0.544719 |
15ccfa26470577bd1a02d164b6d10a3b0bcd4619 | 529 | py | Python | algorithms/square_root_bisection_search.py | ivanlmj/SnakeEggs | 540843dcf6fba4b3fe0d6d57dd19654f33cccb74 | [
"MIT"
] | 1 | 2019-05-20T18:19:45.000Z | 2019-05-20T18:19:45.000Z | algorithms/square_root_bisection_search.py | ivanlmj/SnakeEggs | 540843dcf6fba4b3fe0d6d57dd19654f33cccb74 | [
"MIT"
] | null | null | null | algorithms/square_root_bisection_search.py | ivanlmj/SnakeEggs | 540843dcf6fba4b3fe0d6d57dd19654f33cccb74 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
x = 25
epsilon = 0.01
numGuesses = 0
low = 1.0
high = x
ans = (high + low) / 2.0
while abs(ans**2 - x) >= epsilon:
print("\nlow: {0:<9} high: {1:<9} ans: (high+low)/2 == {2}".format(low,high,ans))
numGuesses += 1
if ans**2 < x:
print("R:",ans, "squared is LESS than", x)
low = ans
else:
print("R:",ans, "squared is GREATER than", x)
high = ans
ans = (high + low) / 2.0
print("\nnumGuesses = ",numGuesses)
print(ans, 'is close to square root of ',x)
| 21.16 | 89 | 0.536862 |
f47766116d79209748a3ef241f58dbfb7c21786e | 767 | py | Python | KaSaAn/__init__.py | hmedina/KaSaAn | 83e4e31ff0e0062762aacfbc65bbdd290808bb51 | [
"MIT"
] | 1 | 2020-05-11T14:31:54.000Z | 2020-05-11T14:31:54.000Z | KaSaAn/__init__.py | hmedina/KaSaAn | 83e4e31ff0e0062762aacfbc65bbdd290808bb51 | [
"MIT"
] | 4 | 2017-08-31T11:16:08.000Z | 2020-07-10T22:31:45.000Z | KaSaAn/__init__.py | hmedina/KaSaAn | 83e4e31ff0e0062762aacfbc65bbdd290808bb51 | [
"MIT"
] | 2 | 2018-02-06T20:53:26.000Z | 2019-05-11T18:05:38.000Z | #!/usr/bin/env python3
"""
Tools and utilities for analyzing Kappa objects. The `scripts` submodule contains command line utilities (installed via
SetupTools EntryPoints under the namespace `kappa_[object type]`), for which the functions are located under
`functions`. A low-level API is provided under `core`.
To view the set of installed scripts, view the `setup.py` file. Examples include:
* `kappa_observable_coplotter`, to plot the a specific observable name jointly from multiple observable files (e.g.
`data.csv`)
* `kappa_snapshot_visualizer_network`, to plot a snapshot as a ball & stick network
* `kappa_trace_movie_maker`, to make a gif of a series of snapshots derived from a trace to visualize the size and
composition of the mixture through time
"""
| 47.9375 | 119 | 0.783572 |
70ebcb3a3ff1b184b32c7ef4e6401ca24f6a5174 | 39,222 | py | Python | sdks/python/apache_beam/typehints/typehints_test.py | VaclavPlajt/beam | 247a62ff1d4368f1e7c2ade6bed5dec71d8d2bcc | [
"Apache-2.0"
] | 2 | 2018-07-27T08:48:24.000Z | 2018-10-22T06:31:04.000Z | sdks/python/apache_beam/typehints/typehints_test.py | VaclavPlajt/beam | 247a62ff1d4368f1e7c2ade6bed5dec71d8d2bcc | [
"Apache-2.0"
] | 2 | 2018-05-21T10:35:40.000Z | 2018-05-21T10:46:10.000Z | sdks/python/apache_beam/typehints/typehints_test.py | VaclavPlajt/beam | 247a62ff1d4368f1e7c2ade6bed5dec71d8d2bcc | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the type-hint objects and decorators."""
import functools
import inspect
import unittest
import apache_beam.typehints.typehints as typehints
from apache_beam.typehints import Any
from apache_beam.typehints import Tuple
from apache_beam.typehints import TypeCheckError
from apache_beam.typehints import Union
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
from apache_beam.typehints.decorators import GeneratorWrapper
from apache_beam.typehints.decorators import _check_instance_type
from apache_beam.typehints.decorators import _interleave_type_check
from apache_beam.typehints.decorators import _positional_arg_hints
from apache_beam.typehints.decorators import get_type_hints
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.typehints import is_consistent_with
def check_or_interleave(hint, value, var):
if hint is None:
return value
elif isinstance(hint, typehints.IteratorHint.IteratorTypeConstraint):
return _interleave_type_check(hint, var)(value)
_check_instance_type(hint, value, var)
return value
def check_type_hints(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
hints = get_type_hints(f)
if hints.input_types: # pylint: disable=too-many-nested-blocks
input_hints = getcallargs_forhints(
f, *hints.input_types[0], **hints.input_types[1])
inputs = inspect.getcallargs(f, *args, **kwargs)
for var, hint in input_hints.items():
value = inputs[var]
new_value = check_or_interleave(hint, value, var)
if new_value is not value:
if var in kwargs:
kwargs[var] = new_value
else:
args = list(args)
for ix, pvar in enumerate(inspect.getargspec(f).args):
if pvar == var:
args[ix] = new_value
break
else:
raise NotImplementedError('Iterable in nested argument %s' % var)
res = f(*args, **kwargs)
return check_or_interleave(hints.simple_output_type('typecheck'), res, None)
return wrapper
class DummyTestClass1(object):
pass
class DummyTestClass2(object):
pass
class SuperClass(object):
pass
class SubClass(SuperClass):
pass
class TypeHintTestCase(unittest.TestCase):
def assertCompatible(self, base, sub): # pylint: disable=invalid-name
self.assertTrue(
is_consistent_with(sub, base),
'%s is not consistent with %s' % (sub, base))
def assertNotCompatible(self, base, sub): # pylint: disable=invalid-name
self.assertFalse(
is_consistent_with(sub, base),
'%s is consistent with %s' % (sub, base))
class AnyTypeConstraintTestCase(TypeHintTestCase):
def test_any_compatibility(self):
self.assertCompatible(typehints.Any, typehints.List[int])
self.assertCompatible(typehints.Any, DummyTestClass1)
self.assertCompatible(typehints.Union[int, bool], typehints.Any)
self.assertCompatible(typehints.Optional[int], typehints.Any)
self.assertCompatible(typehints.Tuple[int], typehints.Any)
self.assertCompatible(typehints.KV[int, str], typehints.Any)
self.assertCompatible(typehints.Dict[int, bool], typehints.Any)
self.assertCompatible(typehints.Set[int], typehints.Any)
self.assertCompatible(typehints.Iterable[int], typehints.Any)
self.assertCompatible(typehints.Iterator[int], typehints.Any)
self.assertCompatible(typehints.Generator[int], typehints.Any)
self.assertCompatible(object, typehints.Any)
self.assertCompatible(typehints.Any, object)
def test_repr(self):
self.assertEqual('Any', repr(typehints.Any))
def test_type_check(self):
# This test passes if the type_check call does not raise any exception.
typehints.Any.type_check(1)
typehints.Any.type_check([1, 2, 3])
typehints.Any.type_check(DummyTestClass1())
class UnionHintTestCase(TypeHintTestCase):
def test_getitem_must_be_valid_type_param_cant_be_object_instance(self):
with self.assertRaises(TypeError) as e:
typehints.Union[5]
self.assertEqual('Cannot create Union without a sequence of types.',
e.exception.args[0])
def test_getitem_must_be_valid_type_param(self):
t = [2, 3]
with self.assertRaises(TypeError) as e:
typehints.Union[t]
self.assertEqual('All parameters to a Union hint must be a non-sequence, '
'a type, or a TypeConstraint. 2 is an instance of int.',
e.exception.args[0])
def test_getitem_duplicates_ignored(self):
# Types should be de-duplicated.
hint = typehints.Union[int, int, str]
self.assertEqual(len(hint.union_types), 2)
def test_getitem_nested_unions_flattened(self):
# The two Union's should be merged into 1.
hint = typehints.Union[typehints.Union[int, str],
typehints.Union[float, bool]]
self.assertTrue(len(hint.union_types) == 4)
self.assertTrue(all(t in hint.union_types for t in (int, str, float, bool)))
def test_union_hint_compatibility(self):
self.assertCompatible(typehints.Union[int, float], int)
self.assertCompatible(typehints.Union[int, str], typehints.Union[str, int])
self.assertCompatible(typehints.Union[int, float, str],
typehints.Union[str, int])
self.assertCompatible(
typehints.Union[DummyTestClass1, str],
typehints.Union[DummyTestClass1, str])
self.assertCompatible(typehints.Union[int, str],
typehints.Union[str, typehints.Union[int, str]])
self.assertNotCompatible(typehints.Union[float, bool],
typehints.Union[int, bool])
self.assertNotCompatible(typehints.Union[bool, str],
typehints.Union[float, bool, int])
def test_nested_compatibility(self):
self.assertCompatible(Union[int, Tuple[Any, int]], Tuple[int, int])
self.assertCompatible(Union[int, Tuple[Any, Any]],
Union[Tuple[int, Any], Tuple[Any, int]])
self.assertCompatible(Union[int, SuperClass], SubClass)
self.assertCompatible(Union[int, float, SuperClass], Union[int, SubClass])
self.assertNotCompatible(Union[int, SubClass], SuperClass)
self.assertNotCompatible(Union[int, float, SubClass],
Union[int, SuperClass])
self.assertNotCompatible(Union[int, SuperClass],
Union[int, float, SubClass])
self.assertCompatible(Tuple[Any, Any],
Union[Tuple[str, int], Tuple[str, float]])
def test_union_hint_repr(self):
hint = typehints.Union[DummyTestClass1, str]
self.assertIn(
str(hint),
# Uses frozen set internally, so order not guaranteed.
['Union[str, DummyTestClass1]',
'Union[DummyTestClass1, str]']
)
def test_union_hint_enforcement_composite_type_in_union(self):
o = DummyTestClass1()
hint = typehints.Union[int, DummyTestClass1]
self.assertIsNone(hint.type_check(4))
self.assertIsNone(hint.type_check(o))
def test_union_hint_enforcement_part_of_union(self):
hint = typehints.Union[int, str]
self.assertIsNone(hint.type_check(5))
self.assertIsNone(hint.type_check('test'))
def test_union_hint_enforcement_not_part_of_union(self):
hint = typehints.Union[int, float]
with self.assertRaises(TypeError) as e:
hint.type_check('test')
self.assertEqual("Union[float, int] type-constraint violated. Expected an "
"instance of one of: ('float', 'int'), received str "
"instead.",
e.exception.args[0])
class OptionalHintTestCase(TypeHintTestCase):
def test_getitem_sequence_not_allowed(self):
with self.assertRaises(TypeError) as e:
typehints.Optional[int, str]
self.assertTrue(e.exception.args[0].startswith(
'An Option type-hint only accepts a single type parameter.'))
def test_getitem_proxy_to_union(self):
hint = typehints.Optional[int]
self.assertTrue(isinstance(hint, typehints.UnionHint.UnionConstraint))
class TupleHintTestCase(TypeHintTestCase):
def test_getitem_invalid_ellipsis_type_param(self):
error_msg = ('Ellipsis can only be used to type-hint an arbitrary length '
'tuple of containing a single type: Tuple[A, ...].')
with self.assertRaises(TypeError) as e:
typehints.Tuple[int, int, ...]
self.assertEqual(error_msg, e.exception.args[0])
with self.assertRaises(TypeError) as e:
typehints.Tuple[...]
self.assertEqual(error_msg, e.exception.args[0])
def test_getitem_params_must_be_type_or_constraint(self):
expected_error_prefix = 'All parameters to a Tuple hint must be'
with self.assertRaises(TypeError) as e:
typehints.Tuple[5, [1, 3]]
self.assertTrue(e.exception.args[0].startswith(expected_error_prefix))
with self.assertRaises(TypeError) as e:
typehints.Tuple[list, dict]
self.assertTrue(e.exception.args[0].startswith(expected_error_prefix))
def test_compatibility_arbitrary_length(self):
self.assertNotCompatible(
typehints.Tuple[int, int], typehints.Tuple[int, ...])
self.assertCompatible(
typehints.Tuple[int, ...], typehints.Tuple[int, int])
self.assertCompatible(
typehints.Tuple[Any, ...], typehints.Tuple[int, float])
self.assertCompatible(
typehints.Tuple[SuperClass, ...], typehints.Tuple[SubClass, SuperClass])
self.assertCompatible(typehints.Iterable[int], typehints.Tuple[int, ...])
self.assertCompatible(typehints.Iterable[SuperClass],
typehints.Tuple[SubClass, ...])
def test_compatibility(self):
self.assertCompatible(typehints.Tuple[int, str], typehints.Tuple[int, str])
self.assertCompatible(typehints.Tuple[int, Any], typehints.Tuple[int, str])
self.assertCompatible(typehints.Tuple[int, str], typehints.Tuple[int, Any])
self.assertCompatible(typehints.Tuple[typehints.Union[int, str], bool],
typehints.Tuple[typehints.Union[int, str], bool])
self.assertCompatible(typehints.Tuple[typehints.Union[str, int], int],
typehints.Tuple[typehints.Union[int, str], int])
self.assertCompatible(typehints.Tuple[SuperClass, int],
typehints.Tuple[SubClass, int])
self.assertNotCompatible(typehints.Tuple[int, int],
typehints.Tuple[int, int, int])
def test_raw_tuple(self):
self.assertCompatible(tuple, typehints.Tuple[int])
self.assertCompatible(tuple, typehints.Tuple[int, float])
self.assertCompatible(tuple, typehints.Tuple[int, ...])
def test_repr(self):
hint = typehints.Tuple[int, str, float]
self.assertEqual('Tuple[int, str, float]', str(hint))
hint = typehints.Tuple[DummyTestClass1, DummyTestClass2]
self.assertEqual('Tuple[DummyTestClass1, DummyTestClass2]',
str(hint))
hint = typehints.Tuple[float, ...]
self.assertEqual('Tuple[float, ...]', str(hint))
def test_type_check_must_be_tuple(self):
hint = typehints.Tuple[int, str]
expected_error_prefix = 'Tuple type constraint violated. Valid object'
invalid_instances = ([1, 2, 3], {4: 'f'}, 9, 'test', None)
for t in invalid_instances:
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertTrue(e.exception.args[0].startswith(expected_error_prefix))
def test_type_check_must_have_same_arity(self):
# A 2-tuple of ints.
hint = typehints.Tuple[int, int]
t = (1, 2, 3)
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertEqual('Passed object instance is of the proper type, but '
'differs in length from the hinted type. Expected a '
'tuple of length 2, received a tuple of length 3.',
e.exception.args[0])
def test_type_check_invalid_simple_types(self):
hint = typehints.Tuple[str, bool]
with self.assertRaises(TypeError) as e:
hint.type_check((4, False))
self.assertEqual('Tuple[str, bool] hint type-constraint violated. The '
'type of element #0 in the passed tuple is incorrect.'
' Expected an instance of type str, instead received '
'an instance of type int.',
e.exception.args[0])
def test_type_check_invalid_composite_type(self):
hint = typehints.Tuple[DummyTestClass1, DummyTestClass2]
t = (DummyTestClass2(), DummyTestClass1())
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertEqual('Tuple[DummyTestClass1, DummyTestClass2] hint '
'type-constraint violated. The type of element #0 in the '
'passed tuple is incorrect. Expected an instance of type '
'DummyTestClass1, instead received an instance of type '
'DummyTestClass2.',
e.exception.args[0])
def test_type_check_valid_simple_types(self):
hint = typehints.Tuple[float, bool]
self.assertIsNone(hint.type_check((4.3, True)))
hint = typehints.Tuple[int]
self.assertIsNone(hint.type_check((1,)))
def test_type_check_valid_composite_types(self):
hint = typehints.Tuple[typehints.Tuple[int, str],
typehints.Tuple[int, bool]]
self.assertIsNone(hint.type_check(
((4, 'test'), (4, True))
))
def test_type_check_valid_simple_type_arbitrary_length(self):
hint = typehints.Tuple[int, ...]
t = (1, 2, 3, 4)
self.assertIsNone(hint.type_check(t))
def test_type_check_valid_composite_type_arbitrary_length(self):
hint = typehints.Tuple[typehints.List[str], ...]
t = (['h', 'e'], ['l', 'l'], ['o'])
self.assertIsNone(hint.type_check(t))
def test_type_check_invalid_simple_type_arbitrary_length(self):
hint = typehints.Tuple[str, ...]
t = ('t', 'e', 5, 't')
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertEqual('Tuple[str, ...] hint type-constraint violated. The type '
'of element #2 in the passed tuple is incorrect. Expected '
'an instance of type str, instead received an instance of '
'type int.',
e.exception.args[0])
def test_type_check_invalid_composite_type_arbitrary_length(self):
hint = typehints.Tuple[typehints.List[int], ...]
t = ([1, 2], 'e', 's', 't')
with self.assertRaises(TypeError) as e:
hint.type_check(t)
self.assertEqual("Tuple[List[int], ...] hint type-constraint violated. The "
"type of element #1 in the passed tuple is incorrect: "
"List type-constraint violated. Valid object instance "
"must be of type 'list'. Instead, an instance of 'str' "
"was received.",
e.exception.args[0])
class ListHintTestCase(TypeHintTestCase):
def test_getitem_invalid_composite_type_param(self):
with self.assertRaises(TypeError):
typehints.List[4]
def test_list_constraint_compatibility(self):
hint1 = typehints.List[typehints.Tuple[int, str]]
hint2 = typehints.List[typehints.Tuple[float, bool]]
self.assertCompatible(hint1, hint1)
self.assertNotCompatible(hint1, hint2)
self.assertCompatible(typehints.List[SuperClass], typehints.List[SubClass])
def test_list_repr(self):
hint = (
typehints.List[typehints.Tuple[DummyTestClass1, DummyTestClass2]]
)
self.assertEqual('List[Tuple[DummyTestClass1, DummyTestClass2]]',
repr(hint))
def test_enforce_list_type_constraint_valid_simple_type(self):
hint = typehints.List[int]
self.assertIsNone(hint.type_check([1, 2, 3]))
def test_enforce_list_type_constraint_valid_composite_type(self):
hint = typehints.List[DummyTestClass1]
l = [DummyTestClass1(), DummyTestClass1()]
self.assertIsNone(hint.type_check(l))
def test_enforce_list_type_constraint_invalid_simple_type(self):
hint = typehints.List[int]
l = ['f', 'd', 'm']
with self.assertRaises(TypeError) as e:
hint.type_check(l)
self.assertEqual('List[int] hint type-constraint violated. The type of '
'element #0 in the passed list is incorrect. Expected an '
'instance of type int, instead received an instance of '
'type str.',
e.exception.args[0])
def test_enforce_list_type_constraint_invalid_composite_type(self):
hint = typehints.List[typehints.Tuple[int, int]]
l = [('f', 1), ('m', 5)]
with self.assertRaises(TypeError) as e:
hint.type_check(l)
self.assertEqual('List[Tuple[int, int]] hint type-constraint violated.'
' The type of element #0 in the passed list is '
'incorrect: Tuple[int, int] hint type-constraint '
'violated. The type of element #0 in the passed tuple'
' is incorrect. Expected an instance of type int, '
'instead received an instance of type str.',
e.exception.args[0])
class KVHintTestCase(TypeHintTestCase):
def test_getitem_param_must_be_tuple(self):
with self.assertRaises(TypeError) as e:
typehints.KV[4]
self.assertEqual('Parameter to KV type-hint must be a tuple of types: '
'KV[.., ..].',
e.exception.args[0])
def test_getitem_param_must_have_length_2(self):
with self.assertRaises(TypeError) as e:
typehints.KV[int, str, bool]
self.assertEqual("Length of parameters to a KV type-hint must be "
"exactly 2. Passed parameters: (<type 'int'>, <type "
"'str'>, <type 'bool'>), have a length of 3.",
e.exception.args[0])
def test_getitem_proxy_to_tuple(self):
hint = typehints.KV[int, str]
self.assertTrue(isinstance(hint, typehints.Tuple.TupleConstraint))
def test_enforce_kv_type_constraint(self):
hint = typehints.KV[str, typehints.Tuple[int, int, int]]
t = ('test', (1, 2, 3))
self.assertIsNone(hint.type_check(t))
class DictHintTestCase(TypeHintTestCase):
def test_getitem_param_must_be_tuple(self):
with self.assertRaises(TypeError) as e:
typehints.Dict[4]
self.assertEqual('Parameter to Dict type-hint must be a tuple of '
'types: Dict[.., ..].',
e.exception.args[0])
def test_getitem_param_must_have_length_2(self):
with self.assertRaises(TypeError) as e:
typehints.Dict[float, int, bool]
self.assertEqual("Length of parameters to a Dict type-hint must be "
"exactly 2. Passed parameters: (<type 'float'>, <type "
"'int'>, <type 'bool'>), have a length of 3.",
e.exception.args[0])
def test_key_type_must_be_valid_composite_param(self):
with self.assertRaises(TypeError):
typehints.Dict[list, int]
def test_value_type_must_be_valid_composite_param(self):
with self.assertRaises(TypeError):
typehints.Dict[str, 5]
def test_compatibility(self):
hint1 = typehints.Dict[int, str]
hint2 = typehints.Dict[bool, int]
hint3 = typehints.Dict[int, typehints.List[typehints.Tuple[str, str, str]]]
self.assertCompatible(hint1, hint1)
self.assertCompatible(hint3, hint3)
self.assertNotCompatible(hint3, 4)
self.assertNotCompatible(hint2, hint1)
def test_repr(self):
hint3 = typehints.Dict[int, typehints.List[typehints.Tuple[str, str, str]]]
self.assertEqual('Dict[int, List[Tuple[str, str, str]]]', repr(hint3))
def test_type_checks_not_dict(self):
hint = typehints.Dict[int, str]
l = [1, 2]
with self.assertRaises(TypeError) as e:
hint.type_check(l)
self.assertEqual('Dict type-constraint violated. All passed instances '
'must be of type dict. [1, 2] is of type list.',
e.exception.args[0])
def test_type_check_invalid_key_type(self):
hint = typehints.Dict[typehints.Tuple[int, int, int],
typehints.List[str]
]
d = {(1, 2): ['m', '1', '2', '3']}
with self.assertRaises((TypeError, TypeError)) as e:
hint.type_check(d)
self.assertEqual('Dict[Tuple[int, int, int], List[str]] hint key-type '
'constraint violated. All keys should be of type '
'Tuple[int, int, int]. Instead: Passed object '
'instance is of the proper type, but differs in '
'length from the hinted type. Expected a tuple of '
'length 3, received a tuple of length 2.',
e.exception.args[0])
def test_type_check_invalid_value_type(self):
hint = typehints.Dict[str, typehints.Dict[int, str]]
d = {'f': [1, 2, 3]}
with self.assertRaises(TypeError) as e:
hint.type_check(d)
self.assertEqual('Dict[str, Dict[int, str]] hint value-type constraint'
' violated. All values should be of type '
'Dict[int, str]. Instead: Dict type-constraint '
'violated. All passed instances must be of type dict.'
' [1, 2, 3] is of type list.',
e.exception.args[0])
def test_type_check_valid_simple_type(self):
hint = typehints.Dict[int, str]
d = {4: 'f', 9: 'k'}
self.assertIsNone(hint.type_check(d))
def test_type_check_valid_composite_type(self):
hint = typehints.Dict[typehints.Tuple[str, str], typehints.List[int]]
d = {('f', 'k'): [1, 2, 3], ('m', 'r'): [4, 6, 9]}
self.assertIsNone(hint.type_check(d))
def test_match_type_variables(self):
S = typehints.TypeVariable('S') # pylint: disable=invalid-name
T = typehints.TypeVariable('T') # pylint: disable=invalid-name
hint = typehints.Dict[S, T]
self.assertEqual({S: int, T: str},
hint.match_type_variables(typehints.Dict[int, str]))
class SetHintTestCase(TypeHintTestCase):
def test_getitem_invalid_composite_type_param(self):
with self.assertRaises(TypeError) as e:
typehints.Set[list]
self.assertEqual("Parameter to a Set hint must be a non-sequence, a "
"type, or a TypeConstraint. <type 'list'> is an "
"instance of type.",
e.exception.args[0])
def test_compatibility(self):
hint1 = typehints.Set[typehints.List[str]]
hint2 = typehints.Set[typehints.Tuple[int, int]]
self.assertCompatible(hint1, hint1)
self.assertNotCompatible(hint2, hint1)
def test_repr(self):
hint = typehints.Set[typehints.List[bool]]
self.assertEqual('Set[List[bool]]', repr(hint))
def test_type_check_must_be_set(self):
hint = typehints.Set[str]
with self.assertRaises(TypeError) as e:
hint.type_check(4)
self.assertEqual("Set type-constraint violated. Valid object instance "
"must be of type 'set'. Instead, an instance of 'int'"
" was received.",
e.exception.args[0])
def test_type_check_invalid_elem_type(self):
hint = typehints.Set[float]
with self.assertRaises(TypeError):
hint.type_check('f')
def test_type_check_valid_elem_simple_type(self):
hint = typehints.Set[str]
s = set(['f', 'm', 'k'])
self.assertIsNone(hint.type_check(s))
def test_type_check_valid_elem_composite_type(self):
hint = typehints.Set[typehints.Union[int, str]]
s = set([9, 'm', 'k'])
self.assertIsNone(hint.type_check(s))
class IterableHintTestCase(TypeHintTestCase):
def test_getitem_invalid_composite_type_param(self):
with self.assertRaises(TypeError) as e:
typehints.Iterable[5]
self.assertEqual('Parameter to an Iterable hint must be a '
'non-sequence, a type, or a TypeConstraint. 5 is '
'an instance of int.',
e.exception.args[0])
def test_compatibility(self):
self.assertCompatible(typehints.Iterable[int], typehints.List[int])
self.assertCompatible(typehints.Iterable[int], typehints.Set[int])
self.assertCompatible(typehints.Iterable[typehints.Any],
typehints.List[typehints.Tuple[int, bool]])
self.assertCompatible(typehints.Iterable[int], typehints.Iterable[int])
self.assertCompatible(typehints.Iterable[typehints.Union[int, str]],
typehints.Iterable[typehints.Union[int, str]])
self.assertNotCompatible(typehints.Iterable[str], typehints.Iterable[bool])
self.assertCompatible(typehints.Iterable[int], typehints.List[int])
self.assertCompatible(typehints.Iterable[int], typehints.Set[int])
self.assertCompatible(typehints.Iterable[typehints.Any],
typehints.List[typehints.Tuple[int, bool]])
def test_tuple_compatibility(self):
self.assertCompatible(typehints.Iterable[int], typehints.Tuple[int, ...])
self.assertCompatible(typehints.Iterable[SuperClass],
typehints.Tuple[SubClass, ...])
self.assertCompatible(typehints.Iterable[int], typehints.Tuple[int, int])
self.assertCompatible(typehints.Iterable[Any], typehints.Tuple[int, float])
self.assertCompatible(typehints.Iterable[typehints.Union[int, float]],
typehints.Tuple[int, ...])
self.assertCompatible(typehints.Iterable[typehints.Union[int, float]],
typehints.Tuple[int, float])
self.assertCompatible(typehints.Iterable[typehints.Union[int, float]],
typehints.Tuple[int, float, int])
def test_repr(self):
hint = typehints.Iterable[typehints.Set[str]]
self.assertEqual('Iterable[Set[str]]', repr(hint))
def test_type_check_must_be_iterable(self):
with self.assertRaises(TypeError) as e:
hint = typehints.Iterable[int]
hint.type_check(5)
self.assertEqual("Iterable type-constraint violated. Valid object "
"instance must be of type 'iterable'. Instead, an "
"instance of 'int' was received.",
e.exception.args[0])
def test_type_check_violation_invalid_simple_type(self):
hint = typehints.Iterable[float]
l = set([1, 2, 3, 4])
with self.assertRaises(TypeError):
hint.type_check(l)
def test_type_check_violation_valid_simple_type(self):
hint = typehints.Iterable[str]
l = ('t', 'e', 's', 't')
self.assertIsNone(hint.type_check(l))
def test_type_check_violation_invalid_composite_type(self):
hint = typehints.Iterable[typehints.List[int]]
l = ([['t', 'e'], ['s', 't']])
with self.assertRaises(TypeError):
hint.type_check(l)
def test_type_check_violation_valid_composite_type(self):
hint = typehints.Iterable[typehints.List[int]]
l = ([[1, 2], [3, 4, 5]])
self.assertIsNone(hint.type_check(l))
class TestGeneratorWrapper(TypeHintTestCase):
def test_functions_as_regular_generator(self):
def count(n):
for i in range(n):
yield i
l = []
interleave_func = lambda x: l.append(x)
wrapped_gen = GeneratorWrapper(count(4), interleave_func)
# Should function as a normal generator.
self.assertEqual(0, next(wrapped_gen))
self.assertEqual((1, 2, 3), tuple(wrapped_gen))
# Interleave function should have been called each time.
self.assertEqual([0, 1, 2, 3], l)
class GeneratorHintTestCase(TypeHintTestCase):
def test_repr(self):
hint = typehints.Iterator[typehints.Set[str]]
self.assertEqual('Iterator[Set[str]]', repr(hint))
def test_compatibility(self):
self.assertCompatible(typehints.Iterator[int], typehints.Iterator[int])
self.assertNotCompatible(typehints.Iterator[str], typehints.Iterator[float])
def test_generator_return_hint_invalid_yield_type(self):
@check_type_hints
@with_output_types(typehints.Iterator[int])
def all_upper(s):
for e in s:
yield e.upper()
with self.assertRaises(TypeCheckError) as e:
next(all_upper('hello'))
self.assertEqual('Type-hint for return type violated: Iterator[int] '
'hint type-constraint violated. Expected a iterator '
'of type int. Instead received a iterator of type '
'str.',
e.exception.args[0])
def test_generator_argument_hint_invalid_yield_type(self):
def wrong_yield_gen():
for e in ['a', 'b']:
yield e
@check_type_hints
@with_input_types(a=typehints.Iterator[int])
def increment(a):
return [e + 1 for e in a]
with self.assertRaises(TypeCheckError) as e:
increment(wrong_yield_gen())
self.assertEqual("Type-hint for argument: 'a' violated: Iterator[int] "
"hint type-constraint violated. Expected a iterator "
"of type int. Instead received a iterator of type "
"str.",
e.exception.args[0])
class TakesDecoratorTestCase(TypeHintTestCase):
def test_must_be_primitive_type_or_constraint(self):
with self.assertRaises(TypeError) as e:
t = [1, 2]
@with_input_types(a=t)
def unused_foo(a):
pass
self.assertEqual('All type hint arguments must be a non-sequence, a '
'type, or a TypeConstraint. [1, 2] is an instance of '
'list.',
e.exception.args[0])
with self.assertRaises(TypeError) as e:
t = 5
@check_type_hints
@with_input_types(a=t)
def unused_foo(a):
pass
self.assertEqual('All type hint arguments must be a non-sequence, a type, '
'or a TypeConstraint. 5 is an instance of int.',
e.exception.args[0])
def test_basic_type_assertion(self):
@check_type_hints
@with_input_types(a=int)
def foo(a):
return a + 1
with self.assertRaises(TypeCheckError) as e:
m = 'a'
foo(m)
self.assertEqual("Type-hint for argument: 'a' violated. Expected an "
"instance of <type 'int'>, instead found an "
"instance of <type 'str'>.",
e.exception.args[0])
def test_composite_type_assertion(self):
@check_type_hints
@with_input_types(a=typehints.List[int])
def foo(a):
a.append(1)
return a
with self.assertRaises(TypeCheckError) as e:
m = ['f', 'f']
foo(m)
self.assertEqual("Type-hint for argument: 'a' violated: List[int] hint "
"type-constraint violated. The type of element #0 in "
"the passed list is incorrect. Expected an instance of "
"type int, instead received an instance of type str.",
e.exception.args[0])
def test_valid_simple_type_arguments(self):
@with_input_types(a=str)
def upper(a):
return a.upper()
# Type constraints should pass, and function will be evaluated as normal.
self.assertEqual('M', upper('m'))
def test_any_argument_type_hint(self):
@check_type_hints
@with_input_types(a=typehints.Any)
def foo(a):
return 4
self.assertEqual(4, foo('m'))
def test_valid_mix_positional_and_keyword_arguments(self):
@check_type_hints
@with_input_types(typehints.List[int], elem=typehints.List[int])
def combine(container, elem):
return container + elem
self.assertEqual([1, 2, 3], combine([1, 2], [3]))
def test_invalid_only_positional_arguments(self):
@check_type_hints
@with_input_types(int, int)
def sub(a, b):
return a - b
with self.assertRaises(TypeCheckError) as e:
sub(1, 'two')
self.assertEqual("Type-hint for argument: 'b' violated. Expected an "
"instance of <type 'int'>, instead found an instance "
"of <type 'str'>.",
e.exception.args[0])
def test_valid_only_positional_arguments(self):
@with_input_types(int, int)
def add(a, b):
return a + b
self.assertEqual(3, add(1, 2))
class ReturnsDecoratorTestCase(TypeHintTestCase):
def test_no_kwargs_accepted(self):
with self.assertRaises(ValueError):
@with_output_types(m=int)
def unused_foo():
return 5
def test_must_be_primitive_type_or_type_constraint(self):
with self.assertRaises(TypeError):
@with_output_types(5)
def unused_foo():
pass
with self.assertRaises(TypeError):
@with_output_types([1, 2])
def unused_foo():
pass
def test_must_be_single_return_type(self):
with self.assertRaises(ValueError):
@with_output_types(int, str)
def unused_foo():
return 4, 'f'
def test_type_check_violation(self):
@check_type_hints
@with_output_types(int)
def foo(a):
return 'test'
with self.assertRaises(TypeCheckError) as e:
foo(4)
self.assertEqual("Type-hint for return type violated. Expected an "
"instance of <type 'int'>, instead found an instance "
"of <type 'str'>.",
e.exception.args[0])
def test_type_check_simple_type(self):
@with_output_types(str)
def upper(a):
return a.upper()
self.assertEqual('TEST', upper('test'))
def test_type_check_composite_type(self):
@with_output_types(typehints.List[typehints.Tuple[int, int]])
def bar():
return [(i, i+1) for i in range(5)]
self.assertEqual([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], bar())
def test_any_return_type_hint(self):
@with_output_types(typehints.Any)
def bar():
return 'foo'
self.assertEqual('foo', bar())
class CombinedReturnsAndTakesTestCase(TypeHintTestCase):
def test_enable_and_disable_type_checking_takes(self):
@with_input_types(a=int)
def int_to_str(a):
return str(a)
# The function call below violates the argument type-hint above, but won't
# result in an exception since run-time type-checking was disabled above.
self.assertEqual('a', int_to_str('a'))
# Must re-define since the conditional is in the (maybe)wrapper.
@check_type_hints
@with_input_types(a=int)
def int_to_str(a):
return str(a)
# With run-time type checking enabled once again the same call-atttempt
# should result in a TypeCheckError.
with self.assertRaises(TypeCheckError):
int_to_str('a')
def test_enable_and_disable_type_checking_returns(self):
@with_output_types(str)
def int_to_str(a):
return a
# The return value of the function above violates the return-type
# type-hint above, but won't result in an exception since run-time
# type-checking was disabled above.
self.assertEqual(9, int_to_str(9))
# Must re-define since the conditional is in the (maybe)wrapper.
@check_type_hints
@with_output_types(str)
def int_to_str(a):
return a
# With type-checking enabled once again we should get a TypeCheckError here.
with self.assertRaises(TypeCheckError):
int_to_str(9)
def test_valid_mix_pos_and_keyword_with_both_orders(self):
@with_input_types(str, start=int)
@with_output_types(str)
def to_upper_with_slice(string, start):
return string.upper()[start:]
self.assertEqual('ELLO', to_upper_with_slice('hello', 1))
def test_simple_takes_and_returns_hints(self):
@check_type_hints
@with_output_types(str)
@with_input_types(a=str)
def to_lower(a):
return a.lower()
# Return type and argument type satisfied, should work as normal.
self.assertEqual('m', to_lower('M'))
# Invalid argument type should raise a TypeCheckError
with self.assertRaises(TypeCheckError):
to_lower(5)
@check_type_hints
@with_output_types(str)
@with_input_types(a=str)
def to_lower(a):
return 9
# Modified function now has an invalid return type.
with self.assertRaises(TypeCheckError):
to_lower('a')
def test_composite_takes_and_returns_hints(self):
@check_type_hints
@with_input_types(it=typehints.List[int])
@with_output_types(typehints.List[typehints.Tuple[int, int]])
def expand_ints(it):
return [(i, i + 1) for i in it]
# Return type and argument type satisfied, should work as normal.
self.assertEqual([(0, 1), (1, 2), (2, 3)], expand_ints(list(range(3))))
# Invalid argument, list of str instead of int.
with self.assertRaises(TypeCheckError):
expand_ints('t e s t'.split())
@check_type_hints
@with_output_types(typehints.List[typehints.Tuple[int, int]])
@with_input_types(it=typehints.List[int])
def expand_ints(it):
return [str(i) for i in it]
# Modified function now has invalid return type.
with self.assertRaises(TypeCheckError):
expand_ints(list(range(2)))
class DecoratorHelpers(TypeHintTestCase):
def test_hint_helper(self):
self.assertTrue(is_consistent_with(Any, int))
self.assertTrue(is_consistent_with(int, Any))
self.assertTrue(is_consistent_with(str, object))
self.assertFalse(is_consistent_with(object, str))
self.assertTrue(is_consistent_with(str, Union[str, int]))
self.assertFalse(is_consistent_with(Union[str, int], str))
def test_positional_arg_hints(self):
self.assertEquals(typehints.Any, _positional_arg_hints('x', {}))
self.assertEquals(int, _positional_arg_hints('x', {'x': int}))
self.assertEquals(typehints.Tuple[int, typehints.Any],
_positional_arg_hints(['x', 'y'], {'x': int}))
def test_getcallargs_forhints(self):
def func(a, b_c, *d):
b, c = b_c # pylint: disable=unused-variable
return None
self.assertEquals(
{'a': Any, 'b_c': Any, 'd': Tuple[Any, ...]},
getcallargs_forhints(func, *[Any, Any]))
self.assertEquals(
{'a': Any, 'b_c': Any, 'd': Tuple[Any, ...]},
getcallargs_forhints(func, *[Any, Any, Any, int]))
self.assertEquals(
{'a': int, 'b_c': Tuple[str, Any], 'd': Tuple[Any, ...]},
getcallargs_forhints(func, *[int, Tuple[str, Any]]))
if __name__ == '__main__':
unittest.main()
| 36.828169 | 80 | 0.661516 |
c3cb73ad92a3202df9cc009027753a74865d09f3 | 422 | py | Python | yocto/poky/bitbake/lib/prserv/__init__.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 53 | 2018-02-28T08:51:32.000Z | 2022-02-28T06:49:23.000Z | yocto/poky/bitbake/lib/prserv/__init__.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 415 | 2016-12-20T17:20:45.000Z | 2018-09-23T07:59:23.000Z | yocto/poky/bitbake/lib/prserv/__init__.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 51 | 2018-02-21T04:46:08.000Z | 2022-03-02T04:20:41.000Z | __version__ = "1.0.0"
import os, time
import sys,logging
def init_logger(logfile, loglevel):
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=numeric_level, filename=logfile, format=FORMAT)
class NotFoundError(Exception):
pass
| 28.133333 | 77 | 0.718009 |
51ab05956190712029bc353213938bdc078213dd | 5,633 | py | Python | examples/add_two_numbers.py | sarah-almaghrabi/keras-attention-mechanism | e516874803ca62fc718e36e808e3eff6e208fb92 | [
"Apache-2.0"
] | null | null | null | examples/add_two_numbers.py | sarah-almaghrabi/keras-attention-mechanism | e516874803ca62fc718e36e808e3eff6e208fb92 | [
"Apache-2.0"
] | null | null | null | examples/add_two_numbers.py | sarah-almaghrabi/keras-attention-mechanism | e516874803ca62fc718e36e808e3eff6e208fb92 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import numpy
import numpy as np
from keract import get_activations
from tensorflow.keras import Input
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.layers import Dense, Dropout, LSTM, Flatten, Conv1D
from tensorflow.keras.models import load_model, Model
# from tensorflow.python.keras.utils.vis_utils import plot_model
from keras.utils.vis_utils import plot_model
# KERAS_ATTENTION_DEBUG: If set to 1. Will switch to debug mode.
# In debug mode, the class Attention is no longer a Keras layer.
# What it means in practice is that we can have access to the internal values
# of each tensor. If we don't use debug, Keras treats the object
# as a layer and we can only get the final output.
# In this example we need it because we want to extract all the intermediate output values.
os.environ['KERAS_ATTENTION_DEBUG'] = '1'
from attention import Attention
def task_add_two_numbers_after_delimiter(
n: int, seq_length: int, delimiter: float = 0.0,
index_1: int = None, index_2: int = None ) -> (np.array, np.array):
"""
Task: Add the two numbers that come right after the delimiter.
x = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8]. Result is y = 4 + 7 = 11.
@param n: number of samples in (x, y).
@param seq_length: length of the sequence of x.
@param delimiter: value of the delimiter. Default is 0.0
@param index_1: index of the number that comes after the first 0.
@param index_2: index of the number that comes after the second 0.
@return: returns two numpy.array x and y of shape (n, seq_length, 1) and (n, 1).
"""
x = np.random.uniform(0, 1, (n, seq_length,2))
y = np.zeros(shape=(n, 1))
for i in range(len(x)):
if index_1 is None and index_2 is None:
a, b = np.random.choice(range(1, len(x[i])), size=2, replace=False)
else:
a, b = index_1, index_2
y[i,0] = 0.5 * x[i, a:a + 1,0] + 0.5 * x[i, b:b + 1,0]
# y[i,1] = 0.5 * x[i, a:a + 1] + 0.5 * x[i, b:b + 1]
# y[i,2] = 0.5 * x[i, a:a + 1] + 0.5 * x[i, b:b + 1]
x[i, a - 1:a] = delimiter
x[i, b - 1:b] = delimiter
# x = np.expand_dims(x, axis=-1)
return x, y
def main():
numpy.random.seed(7)
max_epoch = int(sys.argv[1]) if len(sys.argv) > 1 else 150
# data. definition of the problem.
seq_length = 20
x_train, y_train = task_add_two_numbers_after_delimiter(20_000, seq_length)
x_val, y_val = task_add_two_numbers_after_delimiter(4_000, seq_length)
# just arbitrary values. it's for visual purposes. easy to see than random values.
test_index_1 = 1
test_index_2 = 19
x_test, _ = task_add_two_numbers_after_delimiter(10, seq_length, 0, test_index_1, test_index_2)
# x_test_mask is just a mask that, if applied to x_test, would still contain the information to solve the problem.
# we expect the attention map to look like this mask.
x_test_mask = np.zeros_like(x_test[..., 0])
x_test_mask[:, test_index_1:test_index_1 + 1] = 1
x_test_mask[:, test_index_2:test_index_2 + 1] = 1
# Define/compile the model.
model_input = Input(shape=( seq_length,2))
x = LSTM(100, return_sequences=True, name='encoder_')(model_input)
# x = Conv1D(100,3, padding='same', name='encoder_')(model_input)
# x = Flatten()(x)
# x = Dense(20, use_bias=False, activation='tanh', name='attention_weight') (x)
x = Attention()(x)
x = Dropout(0.2)(x)
x = Dense(1, activation='linear')(x)
model = Model(model_input, x)
model.compile(loss='mae', optimizer='adam')
# Visualize the model.
model.summary()
plot_model(model,show_dtype=True,show_shapes=True,expand_nested=True,show_layer_activations=True)
# Will display the activation map in task_add_two_numbers/
output_dir = Path('task_add_two_numbers')
if output_dir.exists():
shutil.rmtree(str(output_dir))
output_dir.mkdir(parents=True, exist_ok=True)
class VisualiseAttentionMap(Callback):
def on_epoch_end(self, epoch, logs=None):
attention_map = get_activations(model, x_test)['attention_weight']
# attention_map = get_activations(model, x_test)['encoder_']
print("x_test")
print(x_test.shape)
# print(x_test)
print("attention_map")
print(attention_map.shape)
print(model.output.get_shape())
# exit()
# top is attention map, bottom is ground truth.
plt.imshow(np.concatenate([attention_map, x_test_mask]), cmap='hot')
iteration_no = str(epoch).zfill(3)
plt.axis('off')
plt.title(f'Iteration {iteration_no} / {max_epoch}')
output_filename = f'{output_dir}/epoch_{iteration_no}.png'
print(f'Saving to {output_filename}.')
plt.savefig(output_filename)
plt.close()
# train.
print('x_train:',x_train.shape)
print('y_train:',y_train.shape)
print('x_val:',x_val.shape)
print('y_val:',y_val.shape)
model.fit(
x_train, y_train, validation_data=(x_val, y_val),
epochs=max_epoch, verbose=2, batch_size=64,
callbacks=[VisualiseAttentionMap()]
)
# test save/reload model.
pred1 = model.predict(x_val)
model.save('test_model.h5')
model_h5 = load_model('test_model.h5')
pred2 = model_h5.predict(x_val)
np.testing.assert_almost_equal(pred1, pred2)
print('Success.')
if __name__ == '__main__':
# pip install pydot
# pip install keract
main()
| 39.391608 | 118 | 0.657199 |
d0d878e306a5537e00d1a477c4478c41d34efc6f | 3,074 | py | Python | src/obj.py | APPIAN-PET/PyPA | 316c9675e6049443dd06556b046c79e5a82e5d3e | [
"MIT"
] | 26 | 2017-10-20T22:45:19.000Z | 2022-02-10T14:41:28.000Z | src/obj.py | APPIAN-PET/PyPA | 316c9675e6049443dd06556b046c79e5a82e5d3e | [
"MIT"
] | 15 | 2019-02-06T16:30:23.000Z | 2021-09-17T05:17:15.000Z | src/obj.py | APPIAN-PET/PyPA | 316c9675e6049443dd06556b046c79e5a82e5d3e | [
"MIT"
] | 15 | 2017-09-14T21:30:30.000Z | 2021-11-05T13:29:04.000Z | import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
import nipype.algorithms.misc as misc
from nipype.interfaces.utility import Function
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
from nipype.utils.filemanip import (load_json, save_json, split_filename, fname_presuffix, copyfile)
from nipype.interfaces.base import CommandLine, CommandLineInputSpec
import os
class objOutput(TraitedSpec):
out_file=traits.File(argstr="%s", desc="Out file")
class objInput(CommandLineInputSpec):
in_file=traits.File(argstr="%s",position=1, desc="In obj file")
tfm_file=traits.File(argstr="%s",position=2, desc="Transform file")
out_file=traits.File(argstr="%s",position=3, desc="Out file")
class transform_objectCommand(CommandLine ):
input_spec = objInput
output_spec = objOutput
_cmd = "transform_objects"
_suffix="_tfm"
def _gen_outputs(self, fn) :
fn_split = os.path.splitext(fn)
return os.getcwd() + os.sep + os.path.basename( fn_split[0] ) + self._suffix + fn_split[1]
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_outputs(self.inputs.in_file)
outputs["out_file"] = self.inputs.out_file
return outputs
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_outputs(self.inputs.in_file)
return super(transform_objectCommand, self)._parse_inputs(skip=skip)
class volume_object_evaluateOutput(TraitedSpec):
out_file=traits.File(argstr="%s", desc="Out file")
class volume_object_evaluateInput(CommandLineInputSpec):
vol_file=traits.File(argstr="%s",position=1, desc="In obj file")
obj_file=traits.File(argstr="%s",position=2, desc="Transform file")
out_file=traits.File(argstr="%s",position=3, desc="Out file")
class volume_object_evaluateCommand( CommandLine ):
input_spec = volume_object_evaluateInput
output_spec = volume_object_evaluateOutput
_cmd = "volume_object_evaluate"
_suffix="_surf-intersect"
_ext=".txt"
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_outputs(self.inputs.vol_file)
return super(volume_object_evaluateCommand, self)._parse_inputs(skip=skip)
def _gen_outputs(self, fn) :
fn_split = os.path.splitext(fn)
return os.getcwd() + os.sep + os.path.basename( fn_split[0] ) + self._suffix + self._ext
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_outputs(self.inputs.vol_file)
outputs["out_file"] = self.inputs.out_file
return outputs
| 37.036145 | 102 | 0.699414 |
49e482b5770e1f81f898d107c7944df2dbf6fd5b | 575 | py | Python | conf/logger.py | yc19890920/dble_fastapi_blog | dd9b8984d849df893d4fea270e8b75ac12d01241 | [
"Apache-2.0"
] | null | null | null | conf/logger.py | yc19890920/dble_fastapi_blog | dd9b8984d849df893d4fea270e8b75ac12d01241 | [
"Apache-2.0"
] | 2 | 2021-03-31T19:56:46.000Z | 2021-04-30T21:19:15.000Z | conf/logger.py | yc19890920/dble_fastapi_blog | dd9b8984d849df893d4fea270e8b75ac12d01241 | [
"Apache-2.0"
] | null | null | null | """
@Author: YangCheng
@contact: 1248644045@qq.com
@Software: Y.C
@Time: 2020/7/21 15:29
"""
import logging
from .settings import LOGGER_CONSOLE_LEVEL
class Logger:
@staticmethod
def get_logger(name):
_handler = logging.StreamHandler()
log_format = '%(asctime)s %(threadName)-10s %(process)d %(levelname)-8s (%(filename)s:%(lineno)d) %(message)s'
_handler.setFormatter(logging.Formatter(log_format))
log = logging.getLogger(name)
log.addHandler(_handler)
log.setLevel(LOGGER_CONSOLE_LEVEL)
return log
| 26.136364 | 118 | 0.669565 |
b0f0c7f0ff866e2844df7145800946fc1a82ca4b | 4,634 | py | Python | edward2/tensorflow/program_transformations.py | miksu/edward2 | 973acdb23701f320ebaee8a56fc44d4414acfa4e | [
"Apache-2.0"
] | 1 | 2022-01-05T12:26:05.000Z | 2022-01-05T12:26:05.000Z | edward2/tensorflow/program_transformations.py | kthakore/edward2 | 6fd50d4b9351616c3cd8b5e9224d118ba48c935f | [
"Apache-2.0"
] | null | null | null | edward2/tensorflow/program_transformations.py | kthakore/edward2 | 6fd50d4b9351616c3cd8b5e9224d118ba48c935f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformations of Edward2 programs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from edward2.trace import trace
import six
import tensorflow.compat.v2 as tf
def make_log_joint_fn(model):
"""Takes Edward probabilistic program and returns its log joint function.
Args:
model: Python callable which executes the generative process of a
computable probability distribution using `ed.RandomVariable`s.
Returns:
A log-joint probability function. Its inputs are `model`'s original inputs
and random variables which appear during the program execution. Its output
is a scalar tf.Tensor.
#### Examples
Below we define Bayesian logistic regression as an Edward program,
representing the model's generative process. We apply `make_log_joint_fn` in
order to represent the model in terms of its joint probability function.
```python
import edward2 as ed
def logistic_regression(features):
coeffs = ed.Normal(loc=0., scale=1.,
sample_shape=features.shape[1], name="coeffs")
outcomes = ed.Bernoulli(logits=tf.tensordot(features, coeffs, [[1], [0]]),
name="outcomes")
return outcomes
log_joint = ed.make_log_joint_fn(logistic_regression)
features = tf.random.normal([3, 2])
coeffs_value = tf.random.normal([2])
outcomes_value = tf.round(tf.random.uniform([3]))
output = log_joint(features, coeffs=coeffs_value, outcomes=outcomes_value)
```
"""
def log_joint_fn(*args, **kwargs):
"""Log-probability of inputs according to a joint probability distribution.
Args:
*args: Positional arguments. They are the model's original inputs and can
alternatively be specified as part of `kwargs`.
**kwargs: Keyword arguments, where for each key-value pair `k` and `v`,
`v` is passed as a `value` to the random variable(s) whose keyword
argument `name` during construction is equal to `k`.
Returns:
Scalar tf.Tensor, which represents the model's log-probability summed
over all Edward random variables and their dimensions.
Raises:
TypeError: If a random variable in the model has no specified value in
`**kwargs`.
"""
log_probs = []
def tracer(rv_constructor, *rv_args, **rv_kwargs):
"""Overrides a random variable's `value` and accumulates its log-prob."""
# Set value to keyword argument indexed by `name` (an input tensor).
rv_name = rv_kwargs.get("name")
if rv_name is None:
raise KeyError("Random variable constructor {} has no name "
"in its arguments.".format(rv_constructor.__name__))
value = kwargs.get(rv_name)
if value is None:
raise LookupError("Keyword argument specifying value for {} is "
"missing.".format(rv_name))
rv_kwargs["value"] = value
rv = rv_constructor(*rv_args, **rv_kwargs)
log_prob = tf.reduce_sum(rv.distribution.log_prob(rv.value))
log_probs.append(log_prob)
return rv
model_kwargs = _get_function_inputs(model, kwargs)
with trace(tracer):
model(*args, **model_kwargs)
log_prob = sum(log_probs)
return log_prob
return log_joint_fn
def _get_function_inputs(f, src_kwargs):
"""Filters inputs to be compatible with function `f`'s signature.
Args:
f: Function according to whose input signature we filter arguments.
src_kwargs: Keyword arguments to filter according to `f`.
Returns:
kwargs: Dict of key-value pairs in `src_kwargs` which exist in `f`'s
signature.
"""
if hasattr(f, "_func"): # functions returned by tf.make_template
f = f._func # pylint: disable=protected-access
try: # getargspec was deprecated in Python 3.6
argspec = inspect.getfullargspec(f)
except AttributeError:
argspec = inspect.getargspec(f)
fkwargs = {k: v for k, v in six.iteritems(src_kwargs) if k in argspec.args}
return fkwargs
| 35.106061 | 79 | 0.703064 |
0d4c8447a8b8e17efd9074a8a01abac98acd1c98 | 17,365 | py | Python | tensorflow_data_validation/utils/mutual_information_util_test.py | rtg0795/data-validation | 16e57d7d5f1aeb4b7b9b897c5021abf006261bbd | [
"Apache-2.0"
] | 621 | 2018-09-10T19:27:18.000Z | 2022-03-31T06:43:24.000Z | tensorflow_data_validation/utils/mutual_information_util_test.py | rtg0795/data-validation | 16e57d7d5f1aeb4b7b9b897c5021abf006261bbd | [
"Apache-2.0"
] | 157 | 2018-09-10T08:53:18.000Z | 2022-03-31T14:07:51.000Z | tensorflow_data_validation/utils/mutual_information_util_test.py | rtg0795/data-validation | 16e57d7d5f1aeb4b7b9b897c5021abf006261bbd | [
"Apache-2.0"
] | 141 | 2018-09-10T06:38:13.000Z | 2022-03-31T07:27:16.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for estimating the mutual information with kNN algorithm."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow_data_validation.utils import mutual_information_util
_MI = mutual_information_util.mutual_information
_AMI = mutual_information_util.adjusted_mutual_information
class RanklabMutualInformationTest(parameterized.TestCase):
def _MakeCorrelatedFeatures(self, means, rho):
# Make n correlated Gaussian random features, and also compute the
# theoretical mutual information between the first n-1 features and the last
# feature.
np.random.seed(30)
means = np.array(means)
n = means.size
cov = np.ones((n, n)) * rho
cov[range(n), range(n)] = 1
dat = np.random.multivariate_normal(means, cov, 50000)
# Theoretical value of the mutual information.
expected_mi = -0.5 * (
np.log2(np.linalg.det(cov)) - np.log2(np.linalg.det(cov[:-1, :-1])))
return [dat[:, i] for i in range(n)], expected_mi
def testOrdinalIndependentFeatures(self):
np.random.seed(29)
r0 = np.random.randn(50000)
r1 = np.random.randn(50000)
for method in ['smaller_data', 'larger_data']:
result = _MI([r0], [r1], [False], [False],
estimate_method=method,
seed=21)
self.assertAlmostEqual(result, 0, places=2)
def testEntropy(self):
# Estimate the entropy by computing the mutual information with itself.
np.random.seed(23)
r = np.random.randint(0, 8, 50000) # 8 categories.
for method in ['smaller_data', 'larger_data']:
result = _MI([r], [r], [True], [True], estimate_method=method, seed=21)
self.assertAlmostEqual(result, 3, delta=1e-2)
# Treat it as a ordinal variable.
result = _MI([r], [r], [False], [False], estimate_method=method, seed=21)
self.assertAlmostEqual(result, 3, delta=1e-2)
def testCorrelatedGaussians(self):
# The mutual information between correlated Gaussian random variables can be
# theoretically computed, which provides a nice test for the code.
rho = 0.4
[f0, f1], expected = self._MakeCorrelatedFeatures([10, 20], rho)
result = _MI([f0], [f1], [False], [False],
estimate_method='smaller_data',
seed=21)
self.assertAlmostEqual(result, expected, places=2)
result = _MI([f0], [f1], [False], [False],
estimate_method='larger_data',
seed=21)
self.assertAlmostEqual(result, expected, places=2)
# Higher dimension.
rho = 0.9 # fairly strongly dependent features
[f0, f1, f2, f3], expected = self._MakeCorrelatedFeatures([1, 2, -3, 4],
rho)
for method in ['smaller_data', 'larger_data']:
result = _MI([f1, f2, f3], [f0], [False] * 3, [False],
estimate_method=method,
seed=21)
self.assertAlmostEqual(result, expected, delta=2e-2)
def testAddingIndependentFeature(self):
# Adding an independent feature into the computation, does not alter the
# mutual information.
np.random.seed(23)
r = np.random.randint(0, 8, 50000)
s = np.random.randint(0, 3, 50000) + r
w = np.random.randn(50000)
for method in ['smaller_data', 'larger_data']:
mi_rs = _MI([r], [s], [False], [False], estimate_method=method, seed=21)
mi_rws = _MI([r, w], [s], [False] * 2, [False],
estimate_method=method,
seed=21)
self.assertAlmostEqual(mi_rws, mi_rs, places=2)
def testMissingValues(self):
np.random.seed(23)
fz = np.array([1.] * 10000)
fx = np.random.random(10000)
fa = np.array([1] * 5000 + [2] * 5000, dtype=float)
fb = np.array([2.3] * 5000 + [None] * 5000)
fc = np.array([0.] * 5000 + [10.] * 5000)
for method in ['smaller_data', 'larger_data']:
result = _MI([fz], [fa], [False], [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result), 1e-2)
result = _MI([fc], [fa], [False], [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result - 1), 1e-2)
result = _MI([fb], [fa], [False], [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result - 1), 1e-2)
# Add an independent feature does not affect.
result = _MI([fc, fx], [fa], [False] * 2, [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result - 1), 1e-2)
result = _MI([fb, fx], [fa], [False] * 2, [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result - 1), 1e-2)
def testFilterFeat(self):
np.random.seed(3)
fa = np.array(['cat0'] * 2000 + ['cat1'] * 2000 + ['cat2'] * 2000 +
['cat3'] * 2000) # 4 categories
fg = np.array([1] * 2000 + [2] * 2000 + [3] * 2000 + [4] * 2000)
filter_feat = np.array([1] * 6000 + [None] * 2000)
filter_arr = np.array([True] * 6000 + [False] * 2000)
for method in ['smaller_data', 'larger_data']:
result = _MI([fg], [fa], [True], [True],
filter_feature=filter_arr,
seed=20,
estimate_method=method)
self.assertAlmostEqual(result, np.log2(3), places=2)
result = _MI([fg], [fa], [False], [True],
filter_feature=filter_arr,
seed=20,
estimate_method=method)
self.assertAlmostEqual(result, np.log2(3), places=2)
result = _MI([fg], [filter_feat], [False], [False],
seed=23,
estimate_method=method)
self.assertAlmostEqual(result, (3 / 4) * (np.log2(4 / 3)) + 0.5, places=2)
result = _MI([fg], [filter_feat], [False], [False],
filter_feature=filter_arr,
seed=23,
estimate_method=method)
self.assertLess(abs(result), 1e-2)
def testWeightFeat(self):
np.random.seed(3)
fa = np.array(['cat0'] * 2000 + ['cat1'] * 2000 + ['cat2'] * 2000 +
['cat3'] * 2000) # 4 categories
fg = np.array([1] * 2000 + [2] * 2000 + [3] * 2000 + [4] * 2000)
weight_feat = np.array([1] * 2000 + [0.5] * 2000 + [0.25] * 2000 +
[0] * 2000)
for method in ['smaller_data', 'larger_data']:
result = _MI([fg], [fa], [True], [True],
weight_feature=weight_feat,
seed=20,
estimate_method=method)
self.assertAlmostEqual(result, 7 / 8, delta=1e-2)
result = _MI([fg], [weight_feat], [False], [False],
weight_feature=weight_feat,
seed=23,
estimate_method=method)
self.assertAlmostEqual(result, 7 / 8, delta=1e-2)
def testAssertions(self):
np.random.seed(23)
fx = np.random.random(1000)
fy = np.array([1.] * 1000)
with self.assertRaises(AssertionError):
_MI([], [fy], [False], [False])
with self.assertRaises(AssertionError):
_MI([fx], [], [False], [False])
with self.assertRaises(AssertionError):
_MI(fx, [fy], [False], [False])
with self.assertRaises(AssertionError):
_MI([fx], [fy], [False] * 2, [False])
with self.assertRaises(AssertionError):
_MI([fx], [fy], [False], [False], output_each='False')
def testOutputEachSanityCheck(self):
np.random.seed(23)
fx = np.random.randn(1000)
fy = np.array([1.] * 1000)
fz = np.array([True] * 700 + [False] * 300)
for method in ['smaller_data', 'larger_data']:
result, each_mi = _MI([fx], [fy], [False], [False],
seed=3,
output_each=True,
estimate_method=method)
self.assertLess(abs(result), 1e-2)
self.assertLen(each_mi, 1000)
self.assertLess(max(0, np.mean(each_mi)), 1e-2)
result, each_mi = _MI([fx], [fy], [False], [False],
filter_feature=fz,
seed=4,
output_each=True,
estimate_method=method)
self.assertLess(abs(result), 1e-2)
self.assertLen(each_mi, 700)
self.assertLess(max(0, np.mean(each_mi)), 1e-2)
def testOutputEach(self):
np.random.seed(97)
n = 10000
fx = np.random.randint(0, 8, n)
for method in ['smaller_data', 'larger_data']:
for categorical0, categorical1 in [(True, True), (False, True),
(False, False)]:
# Test categorical vs categorical, ordinal vs categorical, ordinal
# vs ordinal.
result, each_mi = _MI([fx], [fx], [categorical0], [categorical1],
output_each=True,
estimate_method=method,
seed=5)
self.assertAlmostEqual(result, 3, places=1)
self.assertLen(each_mi, n)
self.assertAlmostEqual(np.mean(each_mi), 3, places=1)
self.assertAlmostEqual(
np.sum(each_mi[fx == 0]) / n, 3. / 8, places=None, delta=1e-2)
for method in ['smaller_data', 'larger_data']:
for categorical0, categorical1, categorical2 in [(False, False, True),
(False, True, True)]:
result, each_mi = _MI([fx, fx], [fx], [categorical0, categorical1],
[categorical2],
output_each=True,
estimate_method=method,
seed=9)
self.assertAlmostEqual(result, 3, places=2)
self.assertLen(each_mi, n)
self.assertAlmostEqual(np.mean(each_mi), 3, places=2)
self.assertAlmostEqual(
np.sum(each_mi[fx == 0]) / n, 3. / 8, places=None, delta=1e-2)
def testCategorical(self):
np.random.seed(3)
a = np.array([b'cat0'] * 2000 + [b'cat1'] * 2000 + [b'cat2'] * 2000 +
[b'\xc5\x8cmura'] * 2000) # 4 categories
b = np.random.randn(a.size)
c = np.arange(0.1, 100, 0.001)[:a.size] + 2 * b
d = (
np.random.normal(0.5, 1.0, a.size) +
np.random.normal(-0.5, 1.0, a.size) + np.random.normal(0., 0.3, a.size))
e = np.arange(0.1, 100, 0.001)[:a.size]
# Build some features that repeat N times the same value sequence.
g = np.array([i // (a.size // 8) for i in range(a.size)])
h = np.array([b'cat%d' % (i // (a.size // 16)) for i in range(a.size)])
for method in ['smaller_data', 'larger_data']:
result = _MI([b], [a], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertLess(abs(result), 2e-2)
result = _MI([c], [a], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertAlmostEqual(result, 0.565, delta=1e+2)
result = _MI([d], [a], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertLess(abs(result), 1e-2)
result = _MI([e], [h], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertAlmostEqual(result, 4, delta=1e+2)
result = _MI([g], [h], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertAlmostEqual(result, 3, delta=1e+2)
result = _MI([a, b], [b, a], [True, False], [False, True],
estimate_method=method,
seed=20)
self.assertAlmostEqual(result, 13.15, delta=1e+2)
def testCategoricalOrdinal(self):
np.random.seed(3)
# Feature B has PDF 3/4 in [0, 1] vs 1/4 in [1, 2], and differential entropy
# H(B) = - 3/4 * log(3/4) - 1/4 * log(1/4)
# while, given A, it has conditional entropy
# H(B | A) = 1/2 * H(B | A == 0) + 1/2 * H(B | A == 1)
# H(B | A) = 1/2 * 0. - 1/2 * log(1/2) = - 1/2 * log(1/2)
# hence their mutual information is
# I(A, B) = H(B) - H(B | A) = - 3/4 * log(3/4)
# using whatever log base we're using, in this case base 2.
a = np.array([i % 2 for i in range(1000)])
b = np.array([np.random.random() * (1. + i % 2) for i in range(1000)])
filt = np.array([True if i % 2 else False for i in range(1000)])
for method in ['smaller_data', 'larger_data']:
self.assertAlmostEqual(
-0.75 * np.log2(0.75),
_MI([a], [b], [True], [False], estimate_method=method, seed=20),
delta=2e-2)
# If we filter out 1 of the 2 A labels however, no information is left.
self.assertEqual(
0.,
_MI([a], [b], [True], [False],
estimate_method=method,
seed=20,
filter_feature=filt))
def testAdjustedMutualInformation(self):
np.random.seed(11)
f0 = np.random.randint(0, 10000, 10000)
label = np.array([0, 1] * 5000)
result = mutual_information_util.mutual_information([f0], [label], [True],
[True],
seed=11)
adjusted_result = _AMI([f0], [label], [True], [True], seed=11)
self.assertAlmostEqual(result, 0.625, delta=2e-2)
self.assertAlmostEqual(adjusted_result, 0.0, delta=2e-2)
def testMergeCategorical(self):
actual = mutual_information_util._merge_categorical([
np.array(['a', 'b', 'c']),
np.array(['1', '2', '3']),
np.array(['alpha', 'beta', 'gamma'])
])
self.assertTrue(
np.array_equal(
np.array([b'a:1:alpha', b'b:2:beta', b'c:3:gamma']), actual))
def testEntropyD(self):
discrete_f = np.array(['foo', 'bar', 'baz', 'foo'])
entropy, each = mutual_information_util._entropy_discrete(
discrete_f, np.ones_like(discrete_f, dtype=float))
expected_entropy = -(np.log2(0.5) * 0.5 + np.log2(0.25) * 0.25 * 2)
expected_each = np.array(
[-np.log2(0.5), -np.log2(0.25), -np.log2(0.25), -np.log2(0.5)])
self.assertTrue(np.allclose(expected_entropy, entropy, atol=1e-5))
self.assertTrue(np.allclose(expected_each, each, atol=1e-5))
def testReplaceNoneC(self):
arr = np.array([1.0, 2.0, np.nan])
expected = np.array(
[1.0, 2.0, 2 * 2.0 - 1.0 + mutual_information_util._NONE_NUM])
actual = mutual_information_util._replace_none_categorical(arr)
self.assertTrue(np.array_equal(expected, actual))
def testUnitVarianceScale(self):
arr = np.array([1.0, 2.0, np.nan])
actual = mutual_information_util._unit_variance_scale(arr)
stdev = np.std([1.0, 2.0], ddof=1)
self.assertTrue(
np.allclose(
np.array([(1.0 - 1.5) / stdev, (2 - 1.5) / stdev]),
actual[~np.isnan(actual)],
atol=1e-5))
def testUnitVarianceScale_UniformValues(self):
arr = np.array([1.0, 1.0, np.nan])
expected = np.array([0.0, 0.0, np.nan])
actual = mutual_information_util._unit_variance_scale(arr)
np.testing.assert_equal(actual[np.isnan(actual)],
expected[np.isnan(expected)])
self.assertTrue(
np.allclose(
expected[~np.isnan(expected)], actual[~np.isnan(actual)],
atol=1e-5))
def testFeatureToNumpyArray(self):
feat = np.array([1.0, 2.0, None])
expected = np.array([1.0, 2.0, np.nan])
actual = mutual_information_util._fill_missing_values(feat, False)
np.testing.assert_equal(actual[np.isnan(actual)],
expected[np.isnan(expected)])
np.testing.assert_equal(expected, actual)
feat = np.array([b'a', b'b', None])
expected = np.array([b'a', b'b', np.nan], dtype=object)
actual = mutual_information_util._fill_missing_values(feat, True)
self.assertEqual([
i for i, v in enumerate(actual) if isinstance(v, float) and np.isnan(v)
], [
i for i, v in enumerate(expected)
if isinstance(v, float) and np.isnan(v)
])
self.assertEqual([v for v in actual if not isinstance(v, float)],
[v for v in expected if not isinstance(v, float)])
def testDiscreteLabelsAppearingExactlyOnce(self):
feat0 = np.arange(10)
feat1 = np.arange(10, 20).astype(int)
with self.assertRaisesRegex(
ValueError, '.* tuples .* discrete features .* are all unique.*'):
mutual_information_util._mi_for_arrays([feat0], [], [], [feat1],
np.ones_like(feat1))
if __name__ == '__main__':
absltest.main()
| 39.198646 | 80 | 0.564238 |
ea5d4f230c5db2fdfec9389d9b99c87b250bd27a | 6,292 | py | Python | infrastructure-provisioning/src/general/lib/os/redhat/common_lib.py | karthisuku/incubator-dlab | 09d2d7bfce94d82c5b02f533782051537fbac551 | [
"Apache-2.0"
] | 1 | 2021-04-07T00:37:00.000Z | 2021-04-07T00:37:00.000Z | infrastructure-provisioning/src/general/lib/os/redhat/common_lib.py | karthisuku/incubator-dlab | 09d2d7bfce94d82c5b02f533782051537fbac551 | [
"Apache-2.0"
] | null | null | null | infrastructure-provisioning/src/general/lib/os/redhat/common_lib.py | karthisuku/incubator-dlab | 09d2d7bfce94d82c5b02f533782051537fbac551 | [
"Apache-2.0"
] | 1 | 2021-07-15T13:36:25.000Z | 2021-07-15T13:36:25.000Z | #!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
from fabric.api import *
from fabric.contrib.files import exists
import sys
import os
def manage_pkg(command, environment, requisites):
try:
allow = False
counter = 0
while not allow:
if counter > 60:
print("Notebook is broken please recreate it.")
sys.exit(1)
else:
print('Package manager is:')
if environment == 'remote':
if sudo('pgrep yum -a && echo "busy" || echo "ready"') == 'busy':
counter += 1
time.sleep(10)
else:
allow = True
sudo('yum {0} {1}'.format(command, requisites))
elif environment == 'local':
if local('sudo pgrep yum -a && echo "busy" || echo "ready"', capture=True) == 'busy':
counter += 1
time.sleep(10)
else:
allow = True
local('sudo yum {0} {1}'.format(command, requisites), capture=True)
else:
print('Wrong environment')
except:
sys.exit(1)
def ensure_pkg(user, requisites='git vim gcc python-devel openssl-devel nmap libffi libffi-devel unzip libxml2-devel'):
try:
if not exists('/home/{}/.ensure_dir/pkg_upgraded'.format(user)):
print("Updating repositories and installing requested tools: {}".format(requisites))
if sudo("systemctl list-units --all | grep firewalld | awk '{print $1}'") != '':
sudo('systemctl disable firewalld.service')
sudo('systemctl stop firewalld.service')
sudo('setenforce 0')
sudo("sed -i '/^SELINUX=/s/SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config")
mirror = 'mirror.centos.org'
with cd('/etc/yum.repos.d/'):
sudo('echo "[Centos-repo]" > centOS-base.repo')
sudo('echo "name=Centos 7 Repository" >> centOS-base.repo')
sudo('echo "baseurl=http://{}/centos/7/os/x86_64/" >> centOS-base.repo'.format(mirror))
sudo('echo "enabled=1" >> centOS-base.repo')
sudo('echo "gpgcheck=1" >> centOS-base.repo')
sudo('echo "gpgkey=http://{}/centos/7/os/x86_64/RPM-GPG-KEY-CentOS-7" >> centOS-base.repo'.format(mirror))
sudo('yum-config-manager --enable rhui-REGION-rhel-server-optional')
manage_pkg('update-minimal --security -y', 'remote', '')
manage_pkg('-y install', 'remote', 'wget')
sudo('wget --no-check-certificate https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm')
sudo('rpm -ivh epel-release-latest-7.noarch.rpm')
manage_pkg('repolist', 'remote', '')
manage_pkg('-y install', 'remote', 'python-pip gcc')
sudo('rm -f epel-release-latest-7.noarch.rpm')
sudo('export LC_ALL=C')
manage_pkg('-y install', 'remote', requisites)
sudo('touch /home/{}/.ensure_dir/pkg_upgraded'.format(user))
except:
sys.exit(1)
def change_pkg_repos():
if not exists('/tmp/pkg_china_ensured'):
put('/root/files/sources.list', '/tmp/sources.list')
sudo('mv /tmp/sources.list /etc/yum.repos.d/CentOS-Base-aliyun.repo')
sudo('touch /tmp/pkg_china_ensured')
def find_java_path_remote():
java_path = sudo("alternatives --display java | grep 'slave jre: ' | awk '{print $3}'")
return java_path
def find_java_path_local():
java_path = local("alternatives --display java | grep 'slave jre: ' | awk '{print $3}'", capture=True)
return java_path
def ensure_ntpd(user, edge_private_ip=''):
try:
if not exists('/home/{}/.ensure_dir/ntpd_ensured'.format(user)):
sudo('systemctl disable chronyd')
manage_pkg('-y install', 'remote', 'ntp')
sudo('echo "tinker panic 0" >> /etc/ntp.conf')
sudo('systemctl start ntpd')
if os.environ['conf_resource'] != 'ssn' and os.environ['conf_resource'] != 'edge':
sudo('echo "server {} prefer iburst" >> /etc/ntp.conf'.format(edge_private_ip))
sudo('systemctl restart ntpd')
sudo('systemctl enable ntpd')
sudo('touch /home/{}/.ensure_dir/ntpd_ensured'.format(user))
except:
sys.exit(1)
def ensure_java(user):
try:
if not exists('/home/{}/.ensure_dir/java_ensured'.format(user)):
manage_pkg('-y install', 'remote', 'java-1.8.0-openjdk-devel')
sudo('touch /home/{}/.ensure_dir/java_ensured'.format(user))
except:
sys.exit(1)
def ensure_step(user):
try:
if not exists('/home/{}/.ensure_dir/step_ensured'.format(user)):
manage_pkg('-y install', 'remote', 'wget')
sudo('wget https://github.com/smallstep/cli/releases/download/v0.13.3/step_0.13.3_linux_amd64.tar.gz '
'-O /tmp/step_0.13.3_linux_amd64.tar.gz')
sudo('tar zxvf /tmp/step_0.13.3_linux_amd64.tar.gz -C /tmp/')
sudo('mv /tmp/step_0.13.3/bin/step /usr/bin/')
sudo('touch /home/{}/.ensure_dir/step_ensured'.format(user))
except:
sys.exit(1)
| 44 | 122 | 0.572473 |
d7c44da2d28d1915f78b00c09d13922f7139003b | 335 | py | Python | Learnpy/Python/scripts/timenextsecond.py | firstoney/python-scripts | 490eb9668bda6db004ae87d204588fb6ffe56051 | [
"Apache-2.0"
] | 1 | 2019-11-29T14:39:36.000Z | 2019-11-29T14:39:36.000Z | Learnpy/Python/scripts/timenextsecond.py | JackyYuanjie/python-scripts | 490eb9668bda6db004ae87d204588fb6ffe56051 | [
"Apache-2.0"
] | null | null | null | Learnpy/Python/scripts/timenextsecond.py | JackyYuanjie/python-scripts | 490eb9668bda6db004ae87d204588fb6ffe56051 | [
"Apache-2.0"
] | 1 | 2020-01-09T07:29:17.000Z | 2020-01-09T07:29:17.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
timestr = input("请以'时:分:秒'格式进行输入:")
timeList = timestr.split(":")
h = int(timeList[0])
m = int(timeList[1])
s = int(timeList[2])
s += 1
if s == 60:
m += 1
s = 0
if m == 60:
h += 1
m = 0
if h == 24:
h = 0
print("{}:{}:{}".format(h,m,s))
| 13.958333 | 35 | 0.441791 |
ac385470285ee96c772ebe86f5598a80fc462be3 | 19,824 | py | Python | tests/integration/test_push_updates.py | FilipSchad/packit | d57da48c649ed9e0212b6a7ca5fc6428c217bc71 | [
"MIT"
] | null | null | null | tests/integration/test_push_updates.py | FilipSchad/packit | d57da48c649ed9e0212b6a7ca5fc6428c217bc71 | [
"MIT"
] | null | null | null | tests/integration/test_push_updates.py | FilipSchad/packit | d57da48c649ed9e0212b6a7ca5fc6428c217bc71 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from flexmock import flexmock
from munch import Munch
@pytest.fixture()
def query_response():
return Munch(
{
"updates": [
Munch(
{
"autokarma": True,
"autotime": True,
"stable_karma": 3,
"stable_days": 7,
"unstable_karma": -3,
"requirements": "",
"require_bugs": True,
"require_testcases": True,
"display_name": "",
"notes": "New upstream release: 0.5.1",
"type": "enhancement",
"status": "testing",
"request": None,
"severity": "unspecified",
"suggest": "unspecified",
"locked": False,
"pushed": True,
"critpath": False,
"close_bugs": True,
"date_submitted": "2019-08-26 10:53:13",
"date_modified": None,
"date_approved": None,
"date_pushed": "2019-08-26 18:21:12",
"date_testing": "2019-08-26 18:21:12",
"date_stable": None,
"alias": "FEDORA-2019-89c99f680c",
"test_gating_status": "ignored",
"meets_testing_requirements": False,
"url": "https://bodhi.fedoraproject.org/updates/FEDORA-2019-89c99f680c",
"title": "packit-0.5.1-1.fc30",
"release": Munch(
{
"name": "F30",
"long_name": "Fedora 30",
"version": "30",
"id_prefix": "FEDORA",
"branch": "f30",
"dist_tag": "f30",
"stable_tag": "f30-updates",
"testing_tag": "f30-updates-testing",
"candidate_tag": "f30-updates-candidate",
"pending_signing_tag": "f30-signing-pending",
"pending_testing_tag": "f30-updates-testing-pending",
"pending_stable_tag": "f30-updates-pending",
"override_tag": "f30-override",
"mail_template": "fedora_errata_template",
"state": "current",
"composed_by_bodhi": True,
"create_automatic_updates": None,
"package_manager": "unspecified",
"testing_repository": None,
"composes": [],
}
),
"comments": [
Munch(
{
"id": 1014223,
"karma": 0,
"karma_critpath": 0,
"text": "This update has been submitted for testing by ttomece",
"timestamp": "2019-08-26 10:53:13",
"update_id": 149516,
"user_id": 91,
"bug_feedback": [],
"testcase_feedback": [],
"user": Munch(
{
"id": 91,
"name": "bodhi",
"email": None,
"avatar": None,
"openid": "bodhi.id.fedoraproject.org",
"groups": [],
}
),
}
),
Munch(
{
"id": 1014224,
"karma": 0,
"karma_critpath": 0,
"text": "This update's test gating status has been changed.",
"timestamp": "2019-08-26 10:53:13",
"update_id": 149516,
"user_id": 91,
"bug_feedback": [],
"testcase_feedback": [],
"user": Munch(
{
"id": 91,
"name": "bodhi",
"email": None,
"avatar": None,
"openid": "bodhi.id.fedoraproject.org",
"groups": [],
}
),
}
),
Munch(
{
"id": 1014225,
"karma": 0,
"karma_critpath": 0,
"text": "This update's test gating status has been changed.",
"timestamp": "2019-08-26 10:53:16",
"update_id": 149516,
"user_id": 91,
"bug_feedback": [],
"testcase_feedback": [],
"user": Munch(
{
"id": 91,
"name": "bodhi",
"email": None,
"avatar": None,
"openid": "bodhi.id.fedoraproject.org",
"groups": [],
}
),
}
),
Munch(
{
"id": 1016059,
"karma": 0,
"karma_critpath": 0,
"text": "This update has been pushed to testing.",
"timestamp": "2019-08-27 18:22:32",
"update_id": 149516,
"user_id": 91,
"bug_feedback": [],
"testcase_feedback": [],
"user": Munch(
{
"id": 91,
"name": "bodhi",
"email": None,
"avatar": None,
"openid": "bodhi.id.fedoraproject.org",
"groups": [],
}
),
}
),
],
"builds": [
Munch(
{
"nvr": "packit-0.5.1-1.fc30",
"release_id": 28,
"signed": True,
"type": "rpm",
"epoch": 0,
}
)
],
"compose": None,
"bugs": [],
"user": Munch(
{
"id": 754,
"name": "ttomecek",
"email": "ttomecek@redhat.com",
"avatar": None,
"openid": "ttomecek.id.fedoraproject.org",
"groups": [
Munch({"name": "provenpackager"}),
Munch({"name": "packager"}),
],
}
),
"updateid": "FEDORA-2019-89c99f680c",
"karma": 0,
"content_type": "rpm",
"test_cases": [],
}
)
]
}
)
@pytest.fixture()
def request_response():
return Munch(
{
"update": Munch(
{
"autokarma": True,
"autotime": True,
"stable_karma": 3,
"stable_days": 7,
"unstable_karma": -3,
"requirements": "",
"require_bugs": True,
"require_testcases": True,
"display_name": "",
"notes": "New upstream release: 0.5.1",
"type": "enhancement",
"status": "stable",
"request": None,
"severity": "unspecified",
"suggest": "unspecified",
"locked": False,
"pushed": True,
"critpath": False,
"close_bugs": True,
"date_submitted": "2019-08-26 10:53:13",
"date_modified": None,
"date_approved": None,
"date_pushed": "2019-08-26 18:21:12",
"date_testing": "2019-08-26 18:21:12",
"date_stable": "2019-09-03 08:21:12",
"alias": "FEDORA-2019-89c99f680c",
"test_gating_status": "ignored",
"meets_testing_requirements": False,
"url": "https://bodhi.fedoraproject.org/updates/FEDORA-2019-89c99f680c",
"title": "packit-0.5.1-1.fc30",
"release": Munch(
{
"name": "F30",
"long_name": "Fedora 30",
"version": "30",
"id_prefix": "FEDORA",
"branch": "f30",
"dist_tag": "f30",
"stable_tag": "f30-updates",
"testing_tag": "f30-updates-testing",
"candidate_tag": "f30-updates-candidate",
"pending_signing_tag": "f30-signing-pending",
"pending_testing_tag": "f30-updates-testing-pending",
"pending_stable_tag": "f30-updates-pending",
"override_tag": "f30-override",
"mail_template": "fedora_errata_template",
"state": "current",
"composed_by_bodhi": True,
"create_automatic_updates": None,
"package_manager": "unspecified",
"testing_repository": None,
"composes": [],
}
),
"comments": [
Munch(
{
"id": 1014223,
"karma": 0,
"karma_critpath": 0,
"text": "This update has been submitted for testing by ttomecek. ",
"timestamp": "2019-08-26 10:53:13",
"update_id": 149516,
"user_id": 91,
"bug_feedback": [],
"testcase_feedback": [],
"user": Munch(
{
"id": 91,
"name": "bodhi",
"email": None,
"avatar": None,
"openid": "bodhi.id.fedoraproject.org",
"groups": [],
}
),
}
),
Munch(
{
"id": 1014224,
"karma": 0,
"karma_critpath": 0,
"text": "This update's test gating status has been changed.",
"timestamp": "2019-08-26 10:53:13",
"update_id": 149516,
"user_id": 91,
"bug_feedback": [],
"testcase_feedback": [],
"user": Munch(
{
"id": 91,
"name": "bodhi",
"email": None,
"avatar": None,
"openid": "bodhi.id.fedoraproject.org",
"groups": [],
}
),
}
),
Munch(
{
"id": 1014225,
"karma": 0,
"karma_critpath": 0,
"text": "This update's test gating status has been changed.",
"timestamp": "2019-08-26 10:53:16",
"update_id": 149516,
"user_id": 91,
"bug_feedback": [],
"testcase_feedback": [],
"user": Munch(
{
"id": 91,
"name": "bodhi",
"email": None,
"avatar": None,
"openid": "bodhi.id.fedoraproject.org",
"groups": [],
}
),
}
),
Munch(
{
"id": 1016059,
"karma": 0,
"karma_critpath": 0,
"text": "This update has been pushed to testing.",
"timestamp": "2019-08-27 18:22:32",
"update_id": 149516,
"user_id": 91,
"bug_feedback": [],
"testcase_feedback": [],
"user": Munch(
{
"id": 91,
"name": "bodhi",
"email": None,
"avatar": None,
"openid": "bodhi.id.fedoraproject.org",
"groups": [],
}
),
}
),
],
"builds": [
Munch(
{
"nvr": "packit-0.5.1-1.fc30",
"release_id": 28,
"signed": True,
"type": "rpm",
"epoch": 0,
}
)
],
"compose": None,
"bugs": [],
"user": Munch(
{
"id": 754,
"name": "ttomecek",
"email": "ttomecek@redhat.com",
"avatar": None,
"openid": "ttomecek.id.fedoraproject.org",
"groups": [
Munch({"name": "provenpackager"}),
Munch({"name": "packager"}),
],
}
),
"updateid": "FEDORA-2019-89c99f680c",
"karma": 0,
"content_type": "rpm",
"test_cases": [],
}
)
}
)
def test_push_updates(
cwd_upstream_or_distgit, api_instance, query_response, request_response
):
from bodhi.client.bindings import BodhiClient
u, d, api = api_instance
flexmock(BodhiClient)
BodhiClient.should_receive("query").and_return(query_response).once()
BodhiClient.should_receive("request").with_args(
update="FEDORA-2019-89c99f680c", request="stable"
).and_return(request_response).once()
api.push_updates()
| 46.754717 | 100 | 0.292776 |
148fbf90badcad5fec8b6e8aaad709791e9d568d | 305 | py | Python | python/uri_1021.py | tainahemmanuele/uri_iniciante | afacf6682d77465efcada7f3e2c3126143497ed9 | [
"MIT"
] | null | null | null | python/uri_1021.py | tainahemmanuele/uri_iniciante | afacf6682d77465efcada7f3e2c3126143497ed9 | [
"MIT"
] | null | null | null | python/uri_1021.py | tainahemmanuele/uri_iniciante | afacf6682d77465efcada7f3e2c3126143497ed9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import math
a, b, c= raw_input().split()
a = float(a)
b = float(b)
c = float(c)
d = (b*b)-(4*a*c)
if (a==0 or d<0):
print ("Impossivel calcular")
else:
x1= (-b+ math.sqrt(d))/(2*a)
x2= (-b-math.sqrt(d))/(2*a)
print ("R1 = %.5f" %x1)
print ("R2 = %.5f" %x2)
| 16.052632 | 33 | 0.488525 |
8bf3020344e7601dc1dde21797cf9d7663c71756 | 95 | py | Python | Python2/lonely-integer.py | LaughDonor/hackerrank | 07fc0e596b2b456aa72a3cd66036d718253777f0 | [
"Apache-2.0"
] | null | null | null | Python2/lonely-integer.py | LaughDonor/hackerrank | 07fc0e596b2b456aa72a3cd66036d718253777f0 | [
"Apache-2.0"
] | null | null | null | Python2/lonely-integer.py | LaughDonor/hackerrank | 07fc0e596b2b456aa72a3cd66036d718253777f0 | [
"Apache-2.0"
] | null | null | null | from collections import Counter
input()
print Counter(raw_input().split()).most_common()[-1][0] | 31.666667 | 55 | 0.757895 |
72b2f2c3bbd92b7f4c6c26d938799e09efba3ebc | 13,012 | py | Python | etl/transform/relation.py | akudan/purr | ca6a57217a2cd83fb267d06e8a77657e213f73ce | [
"MIT"
] | 4 | 2019-11-14T18:15:42.000Z | 2022-03-22T22:23:35.000Z | etl/transform/relation.py | akudan/purr | ca6a57217a2cd83fb267d06e8a77657e213f73ce | [
"MIT"
] | 1 | 2020-01-17T22:33:53.000Z | 2020-01-29T21:53:51.000Z | etl/transform/relation.py | akudan/purr | ca6a57217a2cd83fb267d06e8a77657e213f73ce | [
"MIT"
] | 1 | 2021-07-07T19:31:26.000Z | 2021-07-07T19:31:26.000Z | from etl.load import table, row, constraint
from etl.transform import type_checker
from etl.transform import unnester
def init_values(attrs):
"""
Sets value for every column to None
"""
for k, v in attrs.items():
attrs[k]["value"] = None
return attrs
def set_values(attr_details, doc, _extra_props=None):
"""
Casts values for a whole document
"""
for key, field_value in doc.items():
keys_cm = list(attr_details.keys())
if key in keys_cm:
field_type = attr_details[key]["type_cm"]
value = unnester.cast(
field_value, field_type)
if value == 'undefined' and _extra_props is not None:
_extra_props.update({key: field_value})
else:
attr_details[key]["value"] = value
else:
if _extra_props is not None:
_extra_props.update({key: field_value})
if _extra_props is not None:
return attr_details, _extra_props
else:
return attr_details
def prepare_row_for_insert(attrs, doc, include_extra_props=None):
_extra_props = {}
attrs = init_values(attrs)
(attrs, _extra_props) = set_values(attrs, doc, _extra_props)
if include_extra_props is True:
_extra_props = unnester.cast(_extra_props, 'jsonb')
attrs["extraProps"]["value"] = _extra_props
attrs_pg = [v["name_cm"]
for k, v in attrs.items() if k in doc.keys()]
values = [v["value"] for k, v in attrs.items() if k in doc.keys()]
if len(doc.keys()) > len(attrs_pg) and include_extra_props:
attrs_pg.append('_extra_props')
values.append(_extra_props)
return attrs_pg, values
def is_schema_changed(attrs_pg, types_pg, attrs_cm, types_cm):
equal_attrs = set(attrs_cm) == set(attrs_pg)
equal_types = set(types_cm) == set(types_pg)
if len(attrs_pg) and (equal_attrs and equal_types):
return False
return True
class Relation():
"""
This is the main parents class for transforming data.
"""
def __init__(self, pg, schema, relation, created=False):
"""Constructor for Relation"""
self.relation_name = relation
self.created = created
self.db = pg
self.schema = schema
def exists(self):
self.created = table.exists(self.db, self.schema, self.relation_name)
return self.created
def insert(self, docs, attr_details, include_extra_props=True, tailing=False):
if include_extra_props is True:
self.insert_bulk(
docs,
attr_details,
include_extra_props)
else:
if tailing is True:
self.insert_bulk_no_extra_props_tailed(
docs,
attr_details,
include_extra_props)
else:
self.insert_bulk_no_extra_props(
docs,
attr_details,
include_extra_props)
def insert_bulk(self, docs, attrs,
include_extra_props=True):
"""
Transforms document and inserts it into the corresponding table.
Parameters
----------
doc : dict
the document we want to insert
TODO add unset
"""
# This is needed because
# sometimes there is no value for attributes (null)
result = []
if type(docs) is not list:
docs = [docs]
for doc in docs:
(attrs_pg, values) = prepare_row_for_insert(attrs, doc)
result.append(tuple(values))
if self.created is True:
row.upsert_bulk(self.db, self.schema,
self.relation_name, attrs_pg, result)
else:
row.insert_bulk(self.db, self.schema,
self.relation_name, attrs_pg, result)
def insert_bulk_no_extra_props(self, docs, attrs,
include_extra_props=True):
"""
Transforms document and inserts it into the corresponding table.
Parameters
----------
docs : dict
the documents we want to insert
unset: string[]
list of fields to unset
"""
# This is needed because
# sometimes there is no value for attributes (null)
result = []
if type(docs) is not list:
docs = [docs]
for doc in docs:
attrs = init_values(attrs)
attrs = set_values(attrs, doc)
if len(docs) > 1:
attrs_pg = [v["name_cm"] for k, v in attrs.items()]
values = [v["value"] for k, v in attrs.items()]
else:
attrs_pg = [v["name_cm"]
for k, v in attrs.items() if k in doc.keys()]
values = [v["value"]
for k, v in attrs.items() if k in doc.keys()]
result.append(tuple(values))
if self.created is True or len(docs) == 1:
row.upsert_bulk(self.db, self.schema,
self.relation_name, attrs_pg, result)
else:
row.insert_bulk(self.db, self.schema,
self.relation_name, attrs_pg, result)
def insert_bulk_no_extra_props_tailed(self,
docs,
attrs,
include_extra_props=True):
"""
Transforms document and inserts it into the corresponding table.
Parameters
----------
docs : dict
the documents we want to insert
"""
# This is needed because
# sometimes there is no value for attributes (null)
if type(docs) is not list:
docs = [docs]
grouped_values = {}
for i in range(len(docs)):
attrs_pg = []
values = []
doc = docs[i]
attrs = init_values(attrs)
(attrs) = set_values(
attrs, doc)
attrs_pg = tuple([v["name_cm"]
for k, v in attrs.items() if k in doc.keys()])
values = [v["value"]
for k, v in attrs.items() if k in doc.keys()]
if attrs_pg not in grouped_values.keys():
grouped_values[attrs_pg] = []
grouped_values[attrs_pg].append(values)
for k, v in grouped_values.items():
attrs_pg = list(k)
row.upsert_bulk_tail(self.db, self.schema,
self.relation_name, attrs_pg, v)
def delete(self, docs):
ids = []
if type(docs) is list:
for doc in docs:
ids.append(str(doc["_id"]))
else:
ids.append(str(docs["_id"]))
row.delete(self.db, self.schema, self.relation_name, ids)
def create(self, attrs, types):
table.create(self.db, self.schema, self.relation_name, attrs, types)
def add_pk(self, attr):
constraint.add_pk(self.db, self.schema, self.relation_name, attr)
def columns_remove(self, attrs_pg, types_pg, attrs_cm, types_cm):
"""
Check if attributes in Postgres exist in the collection map.
If not, remove the column.
"""
temp_attrs_cm = []
temp_types_cm = []
for i in range(0, len(attrs_pg)):
try:
# check if attributes in PG are part of the collection map)
idx = attrs_cm.index(attrs_pg[i])
except ValueError:
# remove extra columns from PG (because they are no longer
# part of the collection map)
table.remove_column(
self.db, self.schema, self.relation_name, attrs_pg[i])
attrs_pg[i] = None
types_pg[i] = None
continue
temp_attrs_cm.append(attrs_cm[idx])
temp_types_cm.append(types_cm[idx])
del attrs_cm[idx]
del types_cm[idx]
attrs_pg = [x for x in attrs_pg if x is not None]
types_pg = [x for x in types_pg if x is not None]
attrs_cm = temp_attrs_cm + (attrs_cm)
types_cm = temp_types_cm + (types_cm)
return attrs_pg, types_pg, attrs_cm, types_cm
def columns_add(self, attrs_pg, types_pg, attrs_cm, types_cm):
"""
Adds columns to Postgres OR updates column types.
"""
type_convert_fail = []
if set(attrs_cm).issubset(set(attrs_pg)):
# update types
for i in range(len(attrs_pg)):
if attrs_pg[i] in attrs_cm:
# type from the db and type from the config file
type_old = types_pg[i].lower()
type_new = types_cm[i].lower()
name_ts_no_tz = 'timestamp without time zone'
name_ts = 'timestamp'
if type_old == name_ts_no_tz and type_new == name_ts:
continue
elif type_old != type_new:
# type was changed
if type_checker.is_convertable(type_old, type_new):
table.column_change_type(
self.db,
self.schema,
self.relation_name,
attrs_pg[i],
type_new)
else:
type_convert_fail.append(
(attrs_pg[i], type_old))
continue
return type_convert_fail
else:
# add new columns
diff = list(set(attrs_cm) - set(attrs_pg))
# get type of new attributes
attrs_to_add = []
types_to_add = []
for d in diff:
attrs_to_add.append(d)
idx = attrs_cm.index(d)
types_to_add.append(types_cm[idx])
table.add_multiple_columns(
self.db,
self.schema,
self.relation_name,
attrs_to_add,
types_to_add
)
def update_schema(self, attrs_types_cm):
"""
Checks if there were any changes in the schema and adds/changes
attributes if needed.
If relation is already created in the database then we need
to get the existing attributes and types and compare them
to our new attributes and types from the config file.
- find what's different in attributes from pg and conf
- check existing attribute names
- check types
- check check is one is castable to the other
"""
attrs_cm = []
types_cm = []
attrs_pg = []
types_pg = []
column_info = table.get_column_names_and_types(
self.db, self.schema, self.relation_name)
if self.exists() is True and column_info is not None:
# Every attribute from pg and conf has to have the same order.
# We are sorting by pg column names.
attrs_types_pg = dict(column_info)
attrs_pg = [k for k in sorted(attrs_types_pg.keys())]
types_pg = [attrs_types_pg[k]
for k in sorted(attrs_types_pg.keys())]
attrs_cm = [attrs_types_cm[k]["name_cm"]
for k in attrs_types_cm.keys()]
types_cm = [attrs_types_cm[k]["type_cm"]
for k in attrs_types_cm.keys()]
# if attributes from PG and the collection map are
# the same, do nothing
schema_changed = is_schema_changed(
attrs_pg, types_pg, attrs_cm, types_cm)
if schema_changed is False:
return
(attrs_pg, types_pg, attrs_cm, types_cm) = self.columns_remove(
attrs_pg, types_pg, attrs_cm, types_cm)
self.columns_add(attrs_pg, types_pg, attrs_cm, types_cm)
else:
attrs_cm = [v["name_cm"] for k, v in attrs_types_cm.items()]
types_cm = [v["type_cm"] for k, v in attrs_types_cm.items()]
if self.exists() is False:
self.create(attrs_cm, types_cm)
return
# TODO if table was dropped or schema was reset
# then there is no need to have fun
# with the type checking.
# if len(attrs_types_from_db) == 0:
# When new attributes are fully contained in the attribute list from DB
# we need to check if the types are equal and if not,
# we need to check if it is possible to convert the
# old type into the new one.
# Anything can be converted to JSONB.
def vacuum(self):
table.vacuum(self.db, self.schema, self.relation_name)
| 35.455041 | 82 | 0.538503 |
a540f1ff619ed3133058580d1a1da053fde29f50 | 91 | py | Python | busspottingbackend/spotting/apps.py | bmsleight/busspottingapp | 603a31eca9551852c1bdd917233b767d7fb776d7 | [
"MIT"
] | null | null | null | busspottingbackend/spotting/apps.py | bmsleight/busspottingapp | 603a31eca9551852c1bdd917233b767d7fb776d7 | [
"MIT"
] | null | null | null | busspottingbackend/spotting/apps.py | bmsleight/busspottingapp | 603a31eca9551852c1bdd917233b767d7fb776d7 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class SpottingConfig(AppConfig):
name = 'spotting'
| 15.166667 | 33 | 0.758242 |
8d939c4a1583d04095ffe426cc07e269eb03b971 | 2,288 | py | Python | train.py | fmisser/mlflow-example | 0a2699ad3c18987f73e3506efdf7b970c4bcfd2c | [
"Apache-2.0"
] | null | null | null | train.py | fmisser/mlflow-example | 0a2699ad3c18987f73e3506efdf7b970c4bcfd2c | [
"Apache-2.0"
] | null | null | null | train.py | fmisser/mlflow-example | 0a2699ad3c18987f73e3506efdf7b970c4bcfd2c | [
"Apache-2.0"
] | null | null | null | # The data set used in this example is from http://archive.ics.uci.edu/ml/datasets/Wine+Quality
# P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis.
# Modeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.
import os
import warnings
import sys
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
import mlflow
import mlflow.sklearn
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
if __name__ == "__main__":
warnings.filterwarnings("ignore")
np.random.seed(40)
# Read the wine-quality csv file (make sure you're running this from the root of MLflow!)
wine_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "wine-quality.csv")
data = pd.read_csv(wine_path)
# Split the data into training and test sets. (0.75, 0.25) split.
train, test = train_test_split(data)
# The predicted column is "quality" which is a scalar from [3, 9]
train_x = train.drop(["quality"], axis=1)
test_x = test.drop(["quality"], axis=1)
train_y = train[["quality"]]
test_y = test[["quality"]]
alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.5
l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.5
mlflow.set_tracking_uri("http://127.0.0.1:5000")
with mlflow.start_run():
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
lr.fit(train_x, train_y)
predicted_qualities = lr.predict(test_x)
(rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
print(" RMSE: %s" % rmse)
print(" MAE: %s" % mae)
print(" R2: %s" % r2)
mlflow.log_param("alpha", alpha)
mlflow.log_param("l1_ratio", l1_ratio)
mlflow.log_metric("rmse", rmse)
mlflow.log_metric("r2", r2)
mlflow.log_metric("mae", mae)
mlflow.sklearn.log_model(lr, "model")
| 33.15942 | 135 | 0.676573 |
23564ff3974c7e1fcc87fdef42ed2424afcf6109 | 21,771 | py | Python | GPy/testing/plotting_tests.py | mclaughlin6464/GPy | d006325ace0d82bcdde05ab384eac1d6a9c21c71 | [
"BSD-3-Clause"
] | 1 | 2021-07-19T13:02:39.000Z | 2021-07-19T13:02:39.000Z | GPy/testing/plotting_tests.py | mclaughlin6464/GPy | d006325ace0d82bcdde05ab384eac1d6a9c21c71 | [
"BSD-3-Clause"
] | null | null | null | GPy/testing/plotting_tests.py | mclaughlin6464/GPy | d006325ace0d82bcdde05ab384eac1d6a9c21c71 | [
"BSD-3-Clause"
] | null | null | null | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
#===============================================================================
# SKIPPING PLOTTING BECAUSE IT BEHAVES DIFFERENTLY ON DIFFERENT
# SYSTEMS, AND WILL MISBEHAVE
from nose import SkipTest
#raise SkipTest("Skipping Matplotlib testing")
#===============================================================================
try:
import matplotlib
matplotlib.use('agg', warn=False)
except ImportError:
# matplotlib not installed
from nose import SkipTest
raise SkipTest("Error importing matplotlib")
from unittest.case import TestCase
import numpy as np
import GPy, os
import logging
from GPy.util.config import config
from GPy.plotting import change_plotting_library, plotting_library
class ConfigTest(TestCase):
def tearDown(self):
change_plotting_library('matplotlib')
def test_change_plotting(self):
self.assertRaises(ValueError, change_plotting_library, 'not+in9names')
change_plotting_library('none')
self.assertRaises(RuntimeError, plotting_library)
change_plotting_library('matplotlib')
if config.get('plotting', 'library') != 'matplotlib':
raise SkipTest("Matplotlib not installed, not testing plots")
try:
from matplotlib import cbook, pyplot as plt
from matplotlib.testing.compare import compare_images
except ImportError:
raise SkipTest("Matplotlib not installed, not testing plots")
extensions = ['npz']
basedir = os.path.dirname(os.path.relpath(os.path.abspath(__file__)))
def _image_directories():
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
#module_name = __init__.__module__
#mods = module_name.split('.')
#basedir = os.path.join(*mods)
result_dir = os.path.join(basedir, 'testresult','.')
baseline_dir = os.path.join(basedir, 'baseline','.')
if not os.path.exists(result_dir):
os.makedirs(result_dir)
return baseline_dir, result_dir
baseline_dir, result_dir = _image_directories()
if not os.path.exists(baseline_dir):
raise SkipTest("Not installed from source, baseline not available. Install from source to test plotting")
def _image_comparison(baseline_images, extensions=['pdf','svg','png'], tol=11, rtol=1e-3, **kwargs):
for num, base in zip(plt.get_fignums(), baseline_images):
for ext in extensions:
fig = plt.figure(num)
try:
fig.canvas.draw()
except Exception as e:
logging.error(base)
#raise SkipTest(e)
#fig.axes[0].set_axis_off()
#fig.set_frameon(False)
if ext in ['npz']:
figdict = flatten_axis(fig)
np.savez_compressed(os.path.join(result_dir, "{}.{}".format(base, ext)), **figdict)
try:
fig.savefig(os.path.join(result_dir, "{}.{}".format(base, 'png')),
transparent=True,
edgecolor='none',
facecolor='none',
#bbox='tight'
)
except:
logging.error(base)
# raise
else:
fig.savefig(os.path.join(result_dir, "{}.{}".format(base, ext)),
transparent=True,
edgecolor='none',
facecolor='none',
#bbox='tight'
)
for num, base in zip(plt.get_fignums(), baseline_images):
for ext in extensions:
#plt.close(num)
actual = os.path.join(result_dir, "{}.{}".format(base, ext))
expected = os.path.join(baseline_dir, "{}.{}".format(base, ext))
if ext == 'npz':
def do_test():
if not os.path.exists(expected):
import shutil
shutil.copy2(actual, expected)
#shutil.copy2(os.path.join(result_dir, "{}.{}".format(base, 'png')), os.path.join(baseline_dir, "{}.{}".format(base, 'png')))
raise IOError("Baseline file {} not found, copying result {}".format(expected, actual))
else:
exp_dict = dict(np.load(expected).items())
act_dict = dict(np.load(actual).items())
for name in act_dict:
if name in exp_dict:
try:
np.testing.assert_allclose(exp_dict[name], act_dict[name], err_msg="Mismatch in {}.{}".format(base, name), rtol=rtol, **kwargs)
except AssertionError as e:
raise SkipTest(e)
else:
def do_test():
err = compare_images(expected, actual, tol, in_decorator=True)
if err:
raise SkipTest("Error between {} and {} is {:.5f}, which is bigger then the tolerance of {:.5f}".format(actual, expected, err['rms'], tol))
yield do_test
plt.close('all')
def flatten_axis(ax, prevname=''):
import inspect
members = inspect.getmembers(ax)
arrays = {}
def _flatten(l, pre):
arr = {}
if isinstance(l, np.ndarray):
if l.size:
arr[pre] = np.asarray(l)
elif isinstance(l, dict):
for _n in l:
_tmp = _flatten(l, pre+"."+_n+".")
for _nt in _tmp.keys():
arrays[_nt] = _tmp[_nt]
elif isinstance(l, list) and len(l)>0:
for i in range(len(l)):
_tmp = _flatten(l[i], pre+"[{}]".format(i))
for _n in _tmp:
arr["{}".format(_n)] = _tmp[_n]
else:
return flatten_axis(l, pre+'.')
return arr
for name, l in members:
if isinstance(l, np.ndarray):
arrays[prevname+name] = np.asarray(l)
elif isinstance(l, list) and len(l)>0:
for i in range(len(l)):
_tmp = _flatten(l[i], prevname+name+"[{}]".format(i))
for _n in _tmp:
arrays["{}".format(_n)] = _tmp[_n]
return arrays
def _a(x,y,decimal):
np.testing.assert_array_almost_equal(x, y, decimal)
def compare_axis_dicts(x, y, decimal=6):
try:
assert(len(x)==len(y))
for name in x:
_a(x[name], y[name], decimal)
except AssertionError as e:
raise SkipTest(e.message)
def test_figure():
np.random.seed(1239847)
from GPy.plotting import plotting_library as pl
#import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax, _ = pl().new_canvas(num="imshow_interact")
def test_func(x):
return x[:, 0].reshape(3,3)
pl().imshow_interact(ax, test_func, extent=(-1,1,-1,1), resolution=3)
ax, _ = pl().new_canvas()
def test_func_2(x):
y = x[:, 0].reshape(3,3)
anno = np.argmax(x, axis=1).reshape(3,3)
return y, anno
pl().annotation_heatmap_interact(ax, test_func_2, extent=(-1,1,-1,1), resolution=3)
pl().annotation_heatmap_interact(ax, test_func_2, extent=(-1,1,-1,1), resolution=3, imshow_kwargs=dict(interpolation='nearest'))
ax, _ = pl().new_canvas(figsize=(4,3))
x = np.linspace(0,1,100)
y = [0,1,2]
array = np.array([.4,.5])
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('WhToColor', ('r', 'b'), N=array.size)
pl().fill_gradient(ax, x, y, facecolors=['r', 'g'], array=array, cmap=cmap)
ax, _ = pl().new_canvas(num="3d_plot", figsize=(4,3), projection='3d', xlabel='x', ylabel='y', zlabel='z', title='awsome title', xlim=(-1,1), ylim=(-1,1), zlim=(-3,3))
z = 2-np.abs(np.linspace(-2,2,(100)))+1
x, y = z*np.sin(np.linspace(-2*np.pi,2*np.pi,(100))), z*np.cos(np.linspace(-np.pi,np.pi,(100)))
pl().plot(ax, x, y, z, linewidth=2)
for do_test in _image_comparison(
baseline_images=['coverage_{}'.format(sub) for sub in ["imshow_interact",'annotation_interact','gradient','3d_plot',]],
extensions=extensions):
yield (do_test, )
def test_kernel():
np.random.seed(1239847)
#import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
k = GPy.kern.RBF(5, ARD=True) * GPy.kern.Linear(3, active_dims=[0,2,4], ARD=True) + GPy.kern.Bias(2)
k.randomize()
k2 = GPy.kern.RBF(5, ARD=True) * GPy.kern.Linear(3, active_dims=[0,2,4], ARD=True) + GPy.kern.Bias(2) + GPy.kern.White(4)
k2[:-1] = k[:]
k2.plot_ARD(['rbf', 'linear', 'bias'], legend=True)
k2.plot_covariance(visible_dims=[0, 3], plot_limits=(-1,3))
k2.plot_covariance(visible_dims=[2], plot_limits=(-1, 3))
k2.plot_covariance(visible_dims=[2, 4], plot_limits=((-1, 0), (5, 3)), projection='3d', rstride=10, cstride=10)
k2.plot_covariance(visible_dims=[1, 4])
for do_test in _image_comparison(
baseline_images=['kern_{}'.format(sub) for sub in ["ARD", 'cov_2d', 'cov_1d', 'cov_3d', 'cov_no_lim']],
extensions=extensions):
yield (do_test, )
def test_plot():
np.random.seed(111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = np.random.uniform(-2, 2, (40, 1))
f = .2 * np.sin(1.3*X) + 1.3*np.cos(2*X)
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPRegression(X, Y, X_variance=np.ones_like(X)*[0.06])
#m.optimize()
m.plot_data()
m.plot_mean()
m.plot_confidence()
m.plot_density()
m.plot_errorbars_trainset()
m.plot_samples()
m.plot_data_error()
for do_test in _image_comparison(baseline_images=['gp_{}'.format(sub) for sub in ["data", "mean", 'conf',
'density',
'out_error',
'samples', 'in_error']], extensions=extensions):
yield (do_test, )
def test_twod():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 2))
f = .2 * np.sin(1.3*X[:,[0]]) + 1.3*np.cos(2*X[:,[1]])
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPRegression(X, Y, X_variance=np.ones_like(X)*[0.01, 0.2])
#m.optimize()
m.plot_data()
m.plot_mean()
m.plot_inducing(legend=False, marker='s')
#m.plot_errorbars_trainset()
m.plot_data_error()
for do_test in _image_comparison(baseline_images=['gp_2d_{}'.format(sub) for sub in ["data", "mean",
'inducing',
#'out_error',
'in_error',
]], extensions=extensions):
yield (do_test, )
def test_threed():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 2))
f = .2 * np.sin(1.3*X[:,[0]]) + 1.3*np.cos(2*X[:,[1]])
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPRegression(X, Y)
m.likelihood.variance = .1
#m.optimize()
m.plot_samples(projection='3d', samples=1)
m.plot_samples(projection='3d', plot_raw=False, samples=1)
plt.close('all')
m.plot_data(projection='3d')
m.plot_mean(projection='3d', rstride=10, cstride=10)
m.plot_inducing(projection='3d')
#m.plot_errorbars_trainset(projection='3d')
for do_test in _image_comparison(baseline_images=[
'gp_3d_{}'.format(sub) for sub in ["data", "mean", 'inducing',
]], extensions=extensions):
yield (do_test, )
def test_sparse():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 1))
f = .2 * np.sin(1.3*X) + 1.3*np.cos(2*X)
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPRegression(X, Y, X_variance=np.ones_like(X)*0.1)
#m.optimize()
#m.plot_inducing()
_, ax = plt.subplots()
m.plot_data(ax=ax)
m.plot_data_error(ax=ax)
for do_test in _image_comparison(baseline_images=['sparse_gp_{}'.format(sub) for sub in ['data_error']], extensions=extensions):
yield (do_test, )
def test_classification():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 1))
f = .2 * np.sin(1.3*X) + 1.3*np.cos(2*X)
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.GPClassification(X, Y>Y.mean())
#m.optimize()
_, ax = plt.subplots()
m.plot(plot_raw=False, apply_link=False, ax=ax, samples=3)
m.plot_errorbars_trainset(plot_raw=False, apply_link=False, ax=ax)
_, ax = plt.subplots()
m.plot(plot_raw=True, apply_link=False, ax=ax, samples=3)
m.plot_errorbars_trainset(plot_raw=True, apply_link=False, ax=ax)
_, ax = plt.subplots()
m.plot(plot_raw=True, apply_link=True, ax=ax, samples=3)
m.plot_errorbars_trainset(plot_raw=True, apply_link=True, ax=ax)
for do_test in _image_comparison(baseline_images=['gp_class_{}'.format(sub) for sub in ["likelihood", "raw", 'raw_link']], extensions=extensions):
yield (do_test, )
def test_sparse_classification():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 1))
f = .2 * np.sin(1.3*X) + 1.3*np.cos(2*X)
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPClassification(X, Y>Y.mean())
#m.optimize()
m.plot(plot_raw=False, apply_link=False, samples_likelihood=3)
np.random.seed(111)
m.plot(plot_raw=True, apply_link=False, samples=3)
np.random.seed(111)
m.plot(plot_raw=True, apply_link=True, samples=3)
for do_test in _image_comparison(baseline_images=['sparse_gp_class_{}'.format(sub) for sub in ["likelihood", "raw", 'raw_link']], extensions=extensions, rtol=2):
yield (do_test, )
def test_gplvm():
from GPy.models import GPLVM
np.random.seed(12345)
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
#Q = 3
# Define dataset
#N = 60
#k1 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,10,10,0.1,0.1]), ARD=True)
#k2 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,0.1,10,0.1,10]), ARD=True)
#k3 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[0.1,0.1,10,10,10]), ARD=True)
#X = np.random.normal(0, 1, (N, 5))
#A = np.random.multivariate_normal(np.zeros(N), k1.K(X), Q).T
#B = np.random.multivariate_normal(np.zeros(N), k2.K(X), Q).T
#C = np.random.multivariate_normal(np.zeros(N), k3.K(X), Q).T
#Y = np.vstack((A,B,C))
#labels = np.hstack((np.zeros(A.shape[0]), np.ones(B.shape[0]), np.ones(C.shape[0])*2))
#k = RBF(Q, ARD=True, lengthscale=2) # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
pars = np.load(os.path.join(basedir, 'b-gplvm-save.npz'))
Y = pars['Y']
Q = pars['Q']
labels = pars['labels']
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always') # always print
m = GPLVM(Y, Q, initialize=False)
m.update_model(False)
m.initialize_parameter()
m[:] = pars['gplvm_p']
m.update_model(True)
#m.optimize(messages=0)
np.random.seed(111)
m.plot_latent(labels=labels)
np.random.seed(111)
m.plot_scatter(projection='3d', labels=labels)
np.random.seed(111)
m.plot_magnification(labels=labels)
m.plot_steepest_gradient_map(resolution=10, data_labels=labels)
for do_test in _image_comparison(baseline_images=['gplvm_{}'.format(sub) for sub in ["latent", "latent_3d", "magnification", 'gradient']],
extensions=extensions,
tol=12):
yield (do_test, )
def test_bayesian_gplvm():
from ..models import BayesianGPLVM
np.random.seed(12345)
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
#Q = 3
# Define dataset
#N = 10
#k1 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,10,10,0.1,0.1]), ARD=True)
#k2 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,0.1,10,0.1,10]), ARD=True)
#k3 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[0.1,0.1,10,10,10]), ARD=True)
#X = np.random.normal(0, 1, (N, 5))
#A = np.random.multivariate_normal(np.zeros(N), k1.K(X), Q).T
#B = np.random.multivariate_normal(np.zeros(N), k2.K(X), Q).T
#C = np.random.multivariate_normal(np.zeros(N), k3.K(X), Q).T
#Y = np.vstack((A,B,C))
#labels = np.hstack((np.zeros(A.shape[0]), np.ones(B.shape[0]), np.ones(C.shape[0])*2))
#k = RBF(Q, ARD=True, lengthscale=2) # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
pars = np.load(os.path.join(basedir, 'b-gplvm-save.npz'))
Y = pars['Y']
Q = pars['Q']
labels = pars['labels']
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always') # always print
m = BayesianGPLVM(Y, Q, initialize=False)
m.update_model(False)
m.initialize_parameter()
m[:] = pars['bgplvm_p']
m.update_model(True)
#m.optimize(messages=0)
np.random.seed(111)
m.plot_inducing(projection='2d')
np.random.seed(111)
m.plot_inducing(projection='3d')
np.random.seed(111)
m.plot_latent(projection='2d', labels=labels)
np.random.seed(111)
m.plot_scatter(projection='3d', labels=labels)
np.random.seed(111)
m.plot_magnification(labels=labels)
np.random.seed(111)
m.plot_steepest_gradient_map(resolution=10, data_labels=labels)
for do_test in _image_comparison(baseline_images=['bayesian_gplvm_{}'.format(sub) for sub in ["inducing", "inducing_3d", "latent", "latent_3d", "magnification", 'gradient']], extensions=extensions):
yield (do_test, )
if __name__ == '__main__':
import nose
nose.main(defaultTest='./plotting_tests.py')
| 42.688235 | 202 | 0.594231 |
8eb7081ca1edeb9c34310ac266b87748f90c7670 | 2,546 | py | Python | src/arch/x86/isa/insts/simd64/integer/__init__.py | qianlong4526888/haha | 01baf923693873c11ae072ce4dde3d8f1d7b6239 | [
"BSD-3-Clause"
] | 135 | 2016-10-21T03:31:49.000Z | 2022-03-25T01:22:20.000Z | src/arch/x86/isa/insts/simd64/integer/__init__.py | qianlong4526888/haha | 01baf923693873c11ae072ce4dde3d8f1d7b6239 | [
"BSD-3-Clause"
] | 35 | 2017-03-10T17:57:46.000Z | 2022-02-18T17:34:16.000Z | src/arch/x86/isa/insts/simd64/integer/__init__.py | qianlong4526888/haha | 01baf923693873c11ae072ce4dde3d8f1d7b6239 | [
"BSD-3-Clause"
] | 48 | 2016-12-08T12:03:13.000Z | 2022-02-16T09:16:13.000Z | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["exit_media_state",
"data_transfer",
"data_conversion",
"data_reordering",
"arithmetic",
"shift",
"compare",
"logical",
"save_and_restore_state"]
microcode = '''
# 64 bit multimedia instructions
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| 47.148148 | 72 | 0.752946 |
ee3aedd1e3298af3794542fe3e5245e32c98219d | 1,576 | py | Python | gatewayconfig/processors/diagnostics_processor.py | ganey/hm-config | b24dce9d9b10367010d163400b0a1bc8a482b252 | [
"MIT"
] | null | null | null | gatewayconfig/processors/diagnostics_processor.py | ganey/hm-config | b24dce9d9b10367010d163400b0a1bc8a482b252 | [
"MIT"
] | null | null | null | gatewayconfig/processors/diagnostics_processor.py | ganey/hm-config | b24dce9d9b10367010d163400b0a1bc8a482b252 | [
"MIT"
] | null | null | null | import json
import requests
from time import sleep
from gatewayconfig.logger import get_logger
from gatewayconfig.gatewayconfig_shared_state import GatewayconfigSharedState
logger = get_logger(__name__)
DIAGNOSTICS_REFRESH_SECONDS = 60
class DiagnosticsProcessor:
def __init__(self, diagnostics_json_url, shared_state: GatewayconfigSharedState):
self.shared_state = shared_state
self.diagnostics_json_url = diagnostics_json_url
def run(self):
while True:
logger.debug("Running DiagnosticsProcessor")
self.read_diagnostics()
logger.debug(self.shared_state)
sleep(DIAGNOSTICS_REFRESH_SECONDS)
def read_diagnostics_and_get_ok(self):
logger.debug("Reading diagnostics from %s" % self.diagnostics_json_url)
response = requests.get(self.diagnostics_json_url)
diagnostics_json = response.json()
logger.debug("Read diagnostics %s" % diagnostics_json)
are_diagnostics_ok = diagnostics_json['PF']
return are_diagnostics_ok
def read_diagnostics(self):
try:
self.shared_state.are_diagnostics_ok = self.read_diagnostics_and_get_ok()
except FileNotFoundError:
self.shared_state.are_diagnostics_ok = False
except json.JSONDecodeError:
self.shared_state.are_diagnostics_ok = False
except ValueError:
self.shared_state.are_diagnostics_ok = False
except Exception as e:
logger.warn("Unexpected error when trying to read diagnostics file: %s" % e)
| 33.531915 | 88 | 0.714467 |
9c4f93982cf9e328004ec8449e9b4b5ba99687dc | 1,242 | py | Python | bot_plugins/zhihu_daily.py | UtopiaXC/Utopia-Bot-For-QQ | 87281f509e20c2d5d25367614d5202f6e53cea50 | [
"MIT"
] | 5 | 2021-03-25T15:18:18.000Z | 2021-03-31T02:29:28.000Z | bot_plugins/zhihu_daily.py | UtopiaXC/Utopia-Bot-For-QQ | 87281f509e20c2d5d25367614d5202f6e53cea50 | [
"MIT"
] | null | null | null | bot_plugins/zhihu_daily.py | UtopiaXC/Utopia-Bot-For-QQ | 87281f509e20c2d5d25367614d5202f6e53cea50 | [
"MIT"
] | null | null | null | import asyncio
import random
import time
from nonebot.command import CommandSession
from nonebot.experimental.plugin import on_command
from aiocqhttp.message import MessageSegment # aiocqhttp 是 nonebot 的自带依赖
import requests
import json
__plugin_name__ = 'daily'
__plugin_usage__ = '用法: 对我说 "daily",我会回复随机一条知乎今日日报'
@on_command('daily', aliases='日报')
async def _(session: CommandSession):
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/89.0.4389.90 "
"Safari/537.36 "
}
res = requests.get("https://news-at.zhihu.com/api/3/stories/latest", headers=header)
json_str = json.loads(res.text)
index = random.randint(0, len(json_str["stories"]) - 1)
title = json_str["stories"][index]["title"]
image = json_str["stories"][index]["images"][0]
url = json_str["stories"][index]["url"]
print(title)
print(image)
print(url)
localtime = time.asctime(time.localtime(time.time()))
await session.send('北京时间:' + localtime + " 知乎日报"
+ "\n文章标题:" + title
+ "\n文章链接:" + url
+ "\n文章封面:" + MessageSegment.image(image))
| 34.5 | 105 | 0.628019 |
28d72aefe0fadab55700194957aab9cf00aae605 | 1,924 | py | Python | homeassistant/components/auth/client.py | veresspeter/home-assistant | a3c22c6eec2c77a28a8fa859e899984269708666 | [
"Apache-2.0"
] | 2 | 2018-02-14T07:29:46.000Z | 2018-02-20T08:15:51.000Z | homeassistant/components/auth/client.py | veresspeter/home-assistant | a3c22c6eec2c77a28a8fa859e899984269708666 | [
"Apache-2.0"
] | 3 | 2021-09-08T03:43:17.000Z | 2022-03-12T01:01:30.000Z | homeassistant/components/auth/client.py | veresspeter/home-assistant | a3c22c6eec2c77a28a8fa859e899984269708666 | [
"Apache-2.0"
] | 1 | 2020-04-12T17:17:42.000Z | 2020-04-12T17:17:42.000Z | """Helpers to resolve client ID/secret."""
import base64
from functools import wraps
import hmac
import aiohttp.hdrs
def verify_client(method):
"""Decorator to verify client id/secret on requests."""
@wraps(method)
async def wrapper(view, request, *args, **kwargs):
"""Verify client id/secret before doing request."""
client_id = await _verify_client(request)
if client_id is None:
return view.json({
'error': 'invalid_client',
}, status_code=401)
return await method(
view, request, *args, client_id=client_id, **kwargs)
return wrapper
async def _verify_client(request):
"""Method to verify the client id/secret in consistent time.
By using a consistent time for looking up client id and comparing the
secret, we prevent attacks by malicious actors trying different client ids
and are able to derive from the time it takes to process the request if
they guessed the client id correctly.
"""
if aiohttp.hdrs.AUTHORIZATION not in request.headers:
return None
auth_type, auth_value = \
request.headers.get(aiohttp.hdrs.AUTHORIZATION).split(' ', 1)
if auth_type != 'Basic':
return None
decoded = base64.b64decode(auth_value).decode('utf-8')
try:
client_id, client_secret = decoded.split(':', 1)
except ValueError:
# If no ':' in decoded
return None
client = await request.app['hass'].auth.async_get_client(client_id)
if client is None:
# Still do a compare so we run same time as if a client was found.
hmac.compare_digest(client_secret.encode('utf-8'),
client_secret.encode('utf-8'))
return None
if hmac.compare_digest(client_secret.encode('utf-8'),
client.secret.encode('utf-8')):
return client_id
return None
| 30.0625 | 78 | 0.644491 |
2a9400aa3477c80b252f7df9520b9095f7903d17 | 2,302 | py | Python | src/run_video.py | PartitionChoi/SSJointTracker_mod | 28aa6642a5ff74ab2a3abc6e5bd53b4a457585e5 | [
"Apache-2.0"
] | null | null | null | src/run_video.py | PartitionChoi/SSJointTracker_mod | 28aa6642a5ff74ab2a3abc6e5bd53b4a457585e5 | [
"Apache-2.0"
] | null | null | null | src/run_video.py | PartitionChoi/SSJointTracker_mod | 28aa6642a5ff74ab2a3abc6e5bd53b4a457585e5 | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import time
import cv2
import numpy as np
from estimator import TfPoseEstimator
from networks import get_graph_path, model_wh
logger = logging.getLogger('TfPoseEstimator-Video')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fps_time = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation Video')
parser.add_argument('--video', type=str, default='/home/soosang/바탕화면/demo_video/동작1 52~106.mp4')
parser.add_argument('--zoom', type=float, default=1.0)
parser.add_argument('--resolution', type=str, default='432x368', help='network input resolution. default=432x368')
parser.add_argument('--model', type=str, default='cmu', help='cmu / mobilenet_thin')
parser.add_argument('--show-process', type=bool, default='true',
help='for debug purpose, if enabled, speed for inference is dropped.')
args = parser.parse_args()
logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
w, h = model_wh(args.resolution)
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
#logger.debug('cam read+')
#cam = cv2.VideoCapture(args.camera)
cap = cv2.VideoCapture(args.video)
#ret_val, image = cap.read()
#logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))
if (cap.isOpened()== False):
print("Error opening video stream or file")
while(cap.isOpened()):
ret_val, image = cap.read()
humans = e.inference(image)
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
#logger.debug('show+')
cv2.putText(image,
"FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
res=(640,480)
image = cv2.resize(image,res,interpolation=cv2.INTER_AREA)
cv2.imshow('tf-pose-estimation result', image)
fps_time = time.time()
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
logger.debug('finished+')
| 35.96875 | 118 | 0.652911 |
00788346e9785ab5ef59a72c494786341d4f8426 | 1,604 | py | Python | app/tasks.py | tinkerNamedFerro/5head.biz | 413da41f03e3b3f0ea0d86c0b56a8203867c7c3b | [
"MIT"
] | null | null | null | app/tasks.py | tinkerNamedFerro/5head.biz | 413da41f03e3b3f0ea0d86c0b56a8203867c7c3b | [
"MIT"
] | null | null | null | app/tasks.py | tinkerNamedFerro/5head.biz | 413da41f03e3b3f0ea0d86c0b56a8203867c7c3b | [
"MIT"
] | null | null | null | import os
import time
from celery import Celery
from celery.schedules import crontab
from datetime import datetime
from .dash.biz_insights import data_parsing
from .dash.biz_insights import getThreads
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', 'redis://localhost:6379'),
CELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND', 'redis://localhost:6379')
PSQL_HOST = os.environ.get('PSQL_HOST', '127.0.0.1')
celery = Celery('tasks', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)
@celery.task(name='tasks.add')
def add(x: int, y: int) -> int:
time.sleep(5)
return x + y
@celery.task(name='tasks.check')
def check():
print("I am checking your stuff")
@celery.task(name='tasks.regenGraphData')
def regenGraphData():
# updates formatting data pickle
data_parsing.updateAllTickerData()
@celery.task(name='tasks.getNewChanData')
def getNewChanData():
# If db is being called through a ssh tunnel port (locally) don't run
if PSQL_HOST != "127.0.0.1":
getThreads.LoopPages()
celery.conf.beat_schedule = {
# "run-me-every-ten-seconds": {
# "task": "tasks.check",
# "schedule": 10.0
# },
"scrap-recent-chan-data": {
"task": "tasks.getNewChanData",
'schedule': crontab(minute=(datetime.now().minute + 1) % 10)
},
"regenGraph": {
"task": "tasks.regenGraphData",
'schedule': crontab(minute=(datetime.now().minute + 2) % 180), #Runs hourly starting 2 minutes from app start https://stackoverflow.com/questions/31764528/running-celery-task-when-celery-beat-starts
}
}
| 28.642857 | 206 | 0.692643 |
12ff2a9789410a10299974aaa8bef141ea107fb0 | 552 | py | Python | docker/demo9/main.py | aloa04/practice | 0f11874a597450a70f3c6f01fe64b6aa9e9d5b9f | [
"Apache-2.0"
] | null | null | null | docker/demo9/main.py | aloa04/practice | 0f11874a597450a70f3c6f01fe64b6aa9e9d5b9f | [
"Apache-2.0"
] | null | null | null | docker/demo9/main.py | aloa04/practice | 0f11874a597450a70f3c6f01fe64b6aa9e9d5b9f | [
"Apache-2.0"
] | null | null | null | from flask import Flask,request,abort
app = Flask(__name__)
usuarios=[1,2,3,4,5]
@app.route('/')
def index():
return 'Este es el ejercicio 3 y no te estas equivocando!'
@app.route('/users/<user_id>', methods = ['GET', 'POST', 'DELETE'])
def getuser(user_id):
if request.method == 'GET':
return "Lo tienes"
if request.method == 'POST':
return "Lo enviaste"
if request.method == 'DELETE':
return "Lo eliminaste"
else:
abort(405,description="Method not allowed")
app.run(host='0.0.0.0', port=5000)
| 24 | 67 | 0.626812 |
c23c407dc687935b9b9d5d3c1e3ad82fc4bb7ef6 | 3,497 | py | Python | azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/express_route_link.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/express_route_link.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/express_route_link.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ExpressRouteLink(SubResource):
"""ExpressRouteLink.
ExpressRouteLink child resource definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar router_name: Name of Azure router associated with physical port.
:vartype router_name: str
:ivar interface_name: Name of Azure router interface.
:vartype interface_name: str
:ivar patch_panel_id: Mapping between physical port to patch panel port.
:vartype patch_panel_id: str
:ivar rack_id: Mapping of physical patch panel to rack.
:vartype rack_id: str
:ivar connector_type: Physical fiber port type. Possible values include:
'LC', 'SC'
:vartype connector_type: str or
~azure.mgmt.network.v2019_02_01.models.ExpressRouteLinkConnectorType
:param admin_state: Administrative state of the physical port. Possible
values include: 'Enabled', 'Disabled'
:type admin_state: str or
~azure.mgmt.network.v2019_02_01.models.ExpressRouteLinkAdminState
:ivar provisioning_state: The provisioning state of the ExpressRouteLink
resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and
'Failed'.
:vartype provisioning_state: str
:param name: Name of child port resource that is unique among child port
resources of the parent.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'router_name': {'readonly': True},
'interface_name': {'readonly': True},
'patch_panel_id': {'readonly': True},
'rack_id': {'readonly': True},
'connector_type': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'router_name': {'key': 'properties.routerName', 'type': 'str'},
'interface_name': {'key': 'properties.interfaceName', 'type': 'str'},
'patch_panel_id': {'key': 'properties.patchPanelId', 'type': 'str'},
'rack_id': {'key': 'properties.rackId', 'type': 'str'},
'connector_type': {'key': 'properties.connectorType', 'type': 'str'},
'admin_state': {'key': 'properties.adminState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteLink, self).__init__(**kwargs)
self.router_name = None
self.interface_name = None
self.patch_panel_id = None
self.rack_id = None
self.connector_type = None
self.admin_state = kwargs.get('admin_state', None)
self.provisioning_state = None
self.name = kwargs.get('name', None)
self.etag = None
| 40.195402 | 85 | 0.631398 |
d66f4e1061e782f7cdbf1452c6b692ced550f168 | 6,581 | py | Python | streamlit_app.py | konvica/read-presonal-data | e3cfe0565c39f8226c91ac6e5d20c737da38283b | [
"MIT"
] | null | null | null | streamlit_app.py | konvica/read-presonal-data | e3cfe0565c39f8226c91ac6e5d20c737da38283b | [
"MIT"
] | null | null | null | streamlit_app.py | konvica/read-presonal-data | e3cfe0565c39f8226c91ac6e5d20c737da38283b | [
"MIT"
] | null | null | null | import io
import logging
import os
import cv2 as cv
import en_core_web_lg
import fitz
import numpy as np
import streamlit as st
from PIL import Image
from presidio_analyzer import AnalyzerEngine
from presidio_image_redactor import ImageAnalyzerEngine
SCORE_THRESHOLD = 0.5
def local_css(file_name):
with open(file_name) as f:
st.markdown('<style>{}</style>'.format(f.read()), unsafe_allow_html=True)
# Load models - Because this is cached it will only happen once.
@st.cache(allow_output_mutation=True)
def load_nlp():
logging.info("loading spacy nlp")
return en_core_web_lg.load()
@st.cache(allow_output_mutation=True)
def load_analyzer():
logging.info("loading analyzer")
return AnalyzerEngine()
@st.cache(allow_output_mutation=True)
def load_image_analyzer():
logging.info("loading image analyzer")
return ImageAnalyzerEngine()
@st.cache(allow_output_mutation=True)
def load_face_detector():
logging.info("loading face cascade detector")
face_cascade = cv.CascadeClassifier()
face_cascade.load("require/haarcascade_frontalface_default.xml")
return face_cascade
def process_face(image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray = cv.equalizeHist(gray)
face_detector = load_face_detector()
faces = face_detector.detectMultiScale(gray)
return faces
def process_image_ocr(image):
ocr_analyzer = load_image_analyzer()
res = ocr_analyzer.analyze(image)
return np.array([[spot.left, spot.top, spot.width, spot.height] for spot in res])
def process_image(image):
faces = process_face(image)
spots = process_image_ocr(image)
if (len(faces) > 0) & (len(spots) > 0):
return np.concatenate((faces, spots), axis=0)
elif len(faces) > 0:
return faces
elif len(spots) > 0:
return spots
else:
return np.array([])
def process_text(text):
analyzer = load_analyzer()
# Call analyzer to get results
results = analyzer.analyze(text=text, language='en', )
return filter(lambda pii: pii.score > SCORE_THRESHOLD, results)
def process_page(page):
## scan for images
data = page.get_text('dict')
img_data = list(filter(lambda block: block['type'] == 1, data['blocks']))
for i, img in enumerate(img_data):
img_bytes = img['image']
if img['bpc'] == 8:
decoded = cv.imdecode(np.frombuffer(img_bytes, np.uint8), -1)
elif img['bpc'] == 16:
decoded = cv.imdecode(np.frombuffer(img_bytes, np.uint16), -1)
else:
logging.error(f"Unknown bitrate - img.bpc:{img['bpc']}")
raise (f"Unknown bitrate - img.bpc:{img['bpc']}")
faces_spots = process_image(decoded)
img_data[i]['faces_spots'] = faces_spots
## scan text
text = page.get_text('text')
# text_data = list(filter(lambda block: block['type'] == 0, data['blocks']))
# text_lines = [span for block in text_data for line in block['lines'] for span in line['spans']]
piis = process_text(text)
return img_data, piis
def process_pdf(doc):
anot_texts = []
page_imgs = []
for j in range(doc.page_count):
page = doc.load_page(j)
pdf_img_data, pdf_page_piis = process_page(page)
## convert page to numpy image
pix = page.get_pixmap()
with Image.frombytes("RGB", [pix.width, pix.height], pix.samples) as pil_:
page_img = np.array(pil_)
## draw faces on page
for block in pdf_img_data:
x0, y0, x1, y1 = block['bbox']
for face in block['faces_spots']:
x, y, w, h = face
rel_x0 = (x / block['width']) * (x1 - x0)
rel_x1 = ((x + w) / block['width']) * (x1 - x0)
rel_y0 = (y / block['height']) * (y1 - y0)
rel_y1 = ((y + h) / block['height']) * (y1 - y0)
cv.rectangle(page_img, (int(x0 + rel_x0), int(y0 + rel_y0)),
(int(x0 + rel_x1), int(y0 + rel_y1)), (255, 0, 0), 2)
# construct colored text
text = page.get_text('text')
i = 0
pretty_text = "<div>"
for pii in pdf_page_piis:
pretty_text += text[i:pii.start]
pretty_text += " <span class='highlight red'>" + \
text[pii.start:pii.end] + \
f"<span class='bold'>{str(pii.entity_type)}</span> </span>"
i = pii.end
pretty_text += "</div>"
page_imgs.append(page_img)
anot_texts.append(pretty_text)
return anot_texts, page_imgs
def process_image_final(image):
faces_spots = process_image(image)
output_img = image.copy()
for spot in faces_spots:
x, y, w, h = spot
cv.rectangle(output_img, (int(x), int(y)), (int(x + w), int(y + h)), (255, 0, 0), 2)
anot_texts = [] # text from ocr is not used now, only detected PII
page_imgs = [output_img]
return anot_texts, page_imgs
def process_file(buff, ext):
anot_texts, page_imgs = [], []
if ext == 'pdf':
with fitz.open(stream=buff.read(), filetype="pdf") as doc:
anot_texts, page_imgs = process_pdf(doc)
elif ext in ["jpg", "jpeg", "png", "PNG", 'JPG']:
with Image.open(buff) as buf:
image = np.array(buf)
if image.shape[-1] == 4:
image = cv.cvtColor(image, cv.COLOR_RGBA2RGB)
anot_texts, page_imgs = process_image_final(image)
with st.beta_expander("Analyzed text"):
# svg = page.get_svg_image()
# b64 = base64.b64encode(svg.encode("utf-8")).decode("utf-8")
# html = r'<img src="data:image/svg+xml;base64,{}"/>'.format(b64)
# st.write(html, unsafe_allow_html=True)
for pretty_text in anot_texts:
st.markdown(pretty_text, unsafe_allow_html=True)
for page_img in page_imgs:
st.image(page_img)
def main():
st.title('Detect Personal Information')
img_file_buffer = st.file_uploader("Upload an pdf/image", type=["png", "jpg", "jpeg", "pdf", "PNG", 'JPG'])
if img_file_buffer is not None:
ext = os.path.splitext(img_file_buffer.name)[-1].replace(".", "")
file_buff = img_file_buffer
else:
logging.info("No input file - using sample image.")
ext = 'png'
with open("data/tds_jessica.png", 'rb') as buf:
file_buff = io.BytesIO(buf.read())
process_file(file_buff, ext)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# logging.info(fitz.__doc__)
local_css("style.css")
main()
| 32.579208 | 111 | 0.618447 |
2b79e058778226be0e1389bfb854952403efa106 | 6,435 | py | Python | feedreader/test_utils.py | fundor333/django-feedreader | 791eefb22720000723e4da293e97aeae2f4513b0 | [
"BSD-3-Clause"
] | 76 | 2015-02-12T01:07:18.000Z | 2022-02-20T13:53:36.000Z | feedreader/test_utils.py | fundor333/django-feedreader | 791eefb22720000723e4da293e97aeae2f4513b0 | [
"BSD-3-Clause"
] | 24 | 2015-02-22T16:20:08.000Z | 2021-09-25T08:37:29.000Z | feedreader/test_utils.py | fundor333/django-feedreader | 791eefb22720000723e4da293e97aeae2f4513b0 | [
"BSD-3-Clause"
] | 33 | 2015-03-31T15:51:29.000Z | 2022-02-20T13:46:59.000Z | """Feedreader Utils Unit Test."""
from datetime import datetime
from io import StringIO
from django.test import TestCase
from .models import Feed
from .utils import poll_feed
from unittest.mock import Mock, patch
import pytz
class PollFeedTest(TestCase):
"""
Test polling feeds.
"""
def setUp(self):
# Create feedparser.parse_mock object
parse_mock = Mock()
del parse_mock.return_value.feed.bozo_exception
parse_mock.return_value.feed.published_parsed = (2014, 1, 1,
12, 0, 0,
2, 1, 0) # 2014-01-01 12:00:00
parse_mock.return_value.entries = []
self.parse_mock = parse_mock
# Create .models.Feed mock object
feed_mock = Mock(spec=Feed)
feed_mock.xml_url = 'test-feed-url'
feed_mock.published_time = None
self.feed_mock = feed_mock
def test_published_time(self):
"""Test Published Time variations"""
with patch('feedreader.utils.feedparser.parse', self.parse_mock):
# No published time in DB
feed_mock = self.feed_mock
feed_mock.published_time = None
with patch('sys.stdout', new=StringIO()): # Suppress printed output from test
poll_feed(feed_mock, verbose=True)
# Published time in DB later than on feed
feed_mock.published_time = pytz.utc.localize(datetime(2014, 1, 1, 13, 0, 0))
with patch('sys.stdout', new=StringIO()): # Suppress printed output from test
poll_feed(feed_mock, verbose=True)
def test_missing_attribute(self):
"""Test with missing attribute: description_detail"""
parse_mock = self.parse_mock
del parse_mock.return_value.feed.description_detail
with patch('feedreader.utils.feedparser.parse', parse_mock):
with patch('sys.stdout', new=StringIO()): # Suppress printed output from test
poll_feed(self.feed_mock, verbose=True)
def test_with_feed_description(self):
"""Test with description_detail present"""
parse_mock = self.parse_mock
parse_mock.return_value.feed.description_detail.type = 'text/plain'
parse_mock.return_value.feed.description = 'Test Feed Description'
with patch('feedreader.utils.feedparser.parse', parse_mock):
with patch('sys.stdout', new=StringIO()): # Suppress printed output from test
poll_feed(self.feed_mock, verbose=True)
@patch('feedreader.utils.feedparser.parse')
class PollFeedBozoExceptionTest(TestCase):
"""
Test polling feeds where Bozo Exception returned.
"""
def setUp(self):
feed_mock = Mock(spec=Feed)
feed_mock.xml_url = 'test-feed-url'
feed_mock.published_time = None
self.feed_mock = feed_mock
def test_bozo_exception(self, parse_mock):
"""Test with Bozo Exception returned"""
parse_mock.return_value.feed.bozo_exception = 'bozo_exception returned'
with patch('sys.stdout', new=StringIO()): # Suppress printed output from test
poll_feed(self.feed_mock, verbose=True)
class PollEntriesTest(TestCase):
def setUp(self):
# Create feedparser.parse_mock object
parse_mock = Mock()
del parse_mock.return_value.feed.bozo_exception
parse_mock.return_value.feed.published_parsed = (2014, 1, 1,
12, 0, 0,
2, 1, 0) # 2014-01-01 12:00:00
self.parse_mock = parse_mock
# Create .models.Feed mock object
feed_mock = Mock(spec=Feed)
feed_mock.xml_url = 'test-feed-url'
feed_mock.published_time = None
self.feed_mock = feed_mock
def test_feed_entry_blank_title(self):
"""Test with missing attribute: description_detail"""
parse_mock = self.parse_mock
entry_attrs = {'link': 'test_entry_link',
'published_parsed': (2014, 1, 1, 12, 0, 0, 2, 1, 0), # 2014-01-01 12:00:00
}
entry_mock = Mock(**entry_attrs)
entry_mock.title = ''
parse_mock.return_value.entries = [entry_mock]
db_entry_mock = Mock()
db_entry_mock.objects.get_or_create.return_value = (Mock(), True)
with patch('feedreader.utils.feedparser.parse', parse_mock):
with patch('feedreader.utils.Entry', db_entry_mock):
with patch('sys.stdout', new=StringIO()): # Suppress printed output from test
poll_feed(self.feed_mock, verbose=True)
def test_feed_entry_missing_description(self):
"""Test with missing attribute: description_detail"""
parse_mock = self.parse_mock
entry_attrs = {'link': 'test_entry_link',
'published_parsed': (2014, 1, 1, 12, 0, 0, 2, 1, 0), # 2014-01-01 12:00:00
}
entry_mock = Mock(**entry_attrs)
del entry_mock.description
parse_mock.return_value.entries = [entry_mock]
db_entry_mock = Mock()
db_entry_mock.objects.get_or_create.return_value = (Mock(), True)
with patch('feedreader.utils.feedparser.parse', parse_mock):
with patch('feedreader.utils.Entry', db_entry_mock):
with patch('sys.stdout', new=StringIO()): # Suppress printed output from test
poll_feed(self.feed_mock, verbose=True)
def test_feed_entry_future_published_time(self):
"""Test with future entry published time"""
parse_mock = self.parse_mock
entry_attrs = {'link': 'test_entry_link',
'published_parsed': (2114, 1, 1, 12, 0, 0, 2, 1, 0), # 2114-01-01 12:00:00
}
entry_mock = Mock(**entry_attrs)
entry_mock.description_detail.type = 'text/plain'
entry_mock.description = 'Test Feed Description'
parse_mock.return_value.entries = [entry_mock]
db_entry_mock = Mock()
db_entry_mock.objects.get_or_create.return_value = (Mock(), True)
with patch('feedreader.utils.feedparser.parse', parse_mock):
with patch('feedreader.utils.Entry', db_entry_mock):
with patch('sys.stdout', new=StringIO()): # Suppress printed output from test
poll_feed(self.feed_mock, verbose=True)
| 42.615894 | 98 | 0.623465 |
489b063a02c675a50c7e745eaf9da5a5f7f318c5 | 5,098 | py | Python | slash2/utils/tsuite2/tsuite_browser/tsuite_browser/api.py | zhihui-slash2/slash2-next | d174a735f9860d2d9e31c47548bda67257400804 | [
"0BSD"
] | null | null | null | slash2/utils/tsuite2/tsuite_browser/tsuite_browser/api.py | zhihui-slash2/slash2-next | d174a735f9860d2d9e31c47548bda67257400804 | [
"0BSD"
] | null | null | null | slash2/utils/tsuite2/tsuite_browser/tsuite_browser/api.py | zhihui-slash2/slash2-next | d174a735f9860d2d9e31c47548bda67257400804 | [
"0BSD"
] | null | null | null | from flask.ext.pymongo import PyMongo
class API(object):
def __init__(self, app):
self.app = app
self.mongo = PyMongo(app)
def _error(self, errid, msg):
"""Returns a generic error response."""
return {
"err": errid,
"err_msg": msg
}
def get_tset(self, tsid):
"""Get a specific slash2 test set.
Args:
tsid: tsid of the set.
Returns:
tset if it is found, else None"""
return self.mongo.db.tsets.find_one({"tsid": tsid})
def get_tsets(self, limit=None, sort=-1):
"""List all tsets."""
result = self.mongo.db.tsets.find().sort([("_id", sort),])
if limit:
result = result.limit(limit)
return list(result)
def get_latest_tset(self):
"""Get last slash2 test set.
Returns:
Returns the latest set if it exists, None elsewise."""
tset = self.mongo.db.tsets.find().sort([("_id", -1),])
return None if tset.count() == 0 else tset[0]
def get_neighboring_tests(self, tsid, test_name, positions):
"""Get neighboring test results n positions away from tsid.
Args:
tsid: test set id to serve as center.
test_name: test to be found.
positions: how many spaces away relative to tsid.
Returns:
list of relevant tsets."""
tsets = self.get_tsets(sort=1)
tsid -= 1
lower, higher, adj_tests = {}, {}, {}
for test in tsets[tsid]["tests"]:
test_name = test["test_name"]
lower[test_name] = []
higher[test_name] = []
adj_tests[test_name] = []
for test_name in lower.keys():
i = tsid - 1
while i >= 0 and i < tsid and len(lower[test_name]) < positions:
for test in tsets[i]["tests"]:
if test["test_name"] == test_name and test["pass"]:
test["tsid"] = i+1
assert(i+1 == test["tsid"])
lower[test_name] = [test] + lower[test_name]
i -= 1
i = tsid
while i < len(tsets) and len(higher[test_name]) < positions:
for test in tsets[i]["tests"]:
if test["test_name"] == test_name and test["pass"]:
test["tsid"] = i+1
higher[test_name].append(test)
i += 1
for test_name in higher.keys():
llen, hlen = len(lower[test_name]), len(higher[test_name])
m = min(llen, hlen)
lindex = m
hindex = m
if llen >= hindex:
lindex = max(positions - 2*m, llen)
else:
hindex = max(positions - 2*m, hlen)
print lindex, hindex
for l in range(lindex):
adj_tests[test_name].append(lower[test_name][l])
for h in range(hindex):
adj_tests[test_name].append(higher[test_name][h])
return adj_tests
def get_tset_averages(self, tset):
"""Get averages over all clients of the tests in a single tset.
Args:
tset: tset to get the average test data from."""
test_averages = {}
print tset
for test_name, clients in tset["tests"].items():
print test_name, clients
valid_clients = 0
test_averages[test_name] = {
"average": 0.0,
"passed": 0,
"failed": 0
}
for client in clients:
if client["result"]["pass"]:
test_averages[test_name]["average"] += client["result"]["elapsed"]
test_averages[test_name]["passed"] += 1
else:
test_averages[test_name]["failed"] += 1
if test_averages[test_name]["average"] != 0.0:
test_averages[test_name]["average"] /= test_averages[test_name]["passed"]
return test_averages
def get_tset_display(self, tsid):
"""Get tset ready for display with simple statistics.
Args:
tsid: tset id for display."""
tset = self.get_tset(tsid)
tset_averages = self.get_tset_averages(tset)
recent_tsets = list(self.mongo.db.tsets.find(
{
"tsid": {"$lt": tsid},
}
).sort([("_id", -1),]))
#Needs optimized
for test in tset_averages:
for old_tset in recent_tsets:
recent_averages = self.get_tset_averages(old_tset)
if test in recent_averages:
tset_averages[test]["change_delta"] = tset_averages[test]["average"] - recent_averages[test]["average"]
tset_averages[test]["change_percent"] = round(tset_averages[test]["change_delta"] / max(1, recent_averages[test]["average"]), 3) * 100
tset_averages[test]["change_tsid"] = old_tset["tsid"]
break
return tset_averages
| 31.276074 | 154 | 0.518831 |
c503b1c975f6dd156bc635359f5bdfb3018fc1b1 | 1,068 | py | Python | app/core/models.py | klimentru1986/drf-api | 8227ddc2833d0b79857d36c0832b7ca06c9a9046 | [
"MIT"
] | null | null | null | app/core/models.py | klimentru1986/drf-api | 8227ddc2833d0b79857d36c0832b7ca06c9a9046 | [
"MIT"
] | null | null | null | app/core/models.py | klimentru1986/drf-api | 8227ddc2833d0b79857d36c0832b7ca06c9a9046 | [
"MIT"
] | null | null | null | from django.contrib.auth.base_user import BaseUserManager
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
if not email:
raise ValueError("email is required")
user = self.model(email=self.normalize_email(email), **kwargs)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password=None, **kwargs):
user = self.create_user(email, password, **kwargs)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 30.514286 | 75 | 0.701311 |
a54f7ffa7651b8b8fa1da2de9b8d125082d1ca27 | 66,704 | py | Python | python3/diff_match_patch.py | 0xVK/diff-match-patch | 139df59d7f592652a01ff879c1c2498874e716f7 | [
"Apache-2.0"
] | 1 | 2017-02-21T19:03:28.000Z | 2017-02-21T19:03:28.000Z | python3/diff_match_patch.py | 0xVK/diff-match-patch | 139df59d7f592652a01ff879c1c2498874e716f7 | [
"Apache-2.0"
] | null | null | null | python3/diff_match_patch.py | 0xVK/diff-match-patch | 139df59d7f592652a01ff879c1c2498874e716f7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import re
import math
import sys
import time
import urllib.parse
class diff_match_patch:
"""Class containing the diff, match and patch methods.
Also contains the behaviour settings.
"""
def __init__(self):
"""Inits a diff_match_patch object with default settings.
Redefine these in your program to override the defaults.
"""
# Number of seconds to map a diff before giving up (0 for infinity).
self.Diff_Timeout = 1.0
# Cost of an empty edit operation in terms of edit characters.
self.Diff_EditCost = 4
# At what point is no match declared (0.0 = perfection, 1.0 = very loose).
self.Match_Threshold = 0.5
# How far to search for a match (0 = exact location, 1000+ = broad match).
# A match this many characters away from the expected location will add
# 1.0 to the score (0.0 is a perfect match).
self.Match_Distance = 1000
# When deleting a large block of text (over ~64 characters), how close do
# the contents have to be to match the expected contents. (0.0 = perfection,
# 1.0 = very loose). Note that Match_Threshold controls how closely the
# end points of a delete need to match.
self.Patch_DeleteThreshold = 0.5
# Chunk size for context length.
self.Patch_Margin = 4
# The number of bits in an int.
# Python has no maximum, thus to disable patch splitting set to 0.
# However to avoid long patches in certain pathological cases, use 32.
# Multiple short patches (using native ints) are much faster than long ones.
self.Match_MaxBits = 32
# DIFF FUNCTIONS
# The data structure representing a diff is an array of tuples:
# [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")]
# which means: delete "Hello", add "Goodbye" and keep " world."
DIFF_DELETE = -1
DIFF_INSERT = 1
DIFF_EQUAL = 0
def diff_main(self, text1, text2, checklines=True, deadline=None):
"""Find the differences between two texts. Simplifies the problem by
stripping any common prefix or suffix off the texts before diffing.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Optional speedup flag. If present and false, then don't run
a line-level diff first to identify the changed areas.
Defaults to true, which does a faster, slightly less optimal diff.
deadline: Optional time when the diff should be complete by. Used
internally for recursive calls. Users should set DiffTimeout instead.
Returns:
Array of changes.
"""
# Set a deadline by which time the diff must be complete.
if deadline == None:
# Unlike in most languages, Python counts time in seconds.
if self.Diff_Timeout <= 0:
deadline = sys.maxsize
else:
deadline = time.time() + self.Diff_Timeout
# Check for null inputs.
if text1 == None or text2 == None:
raise ValueError("Null inputs. (diff_main)")
# Check for equality (speedup).
if text1 == text2:
if text1:
return [(self.DIFF_EQUAL, text1)]
return []
# Trim off common prefix (speedup).
commonlength = self.diff_commonPrefix(text1, text2)
commonprefix = text1[:commonlength]
text1 = text1[commonlength:]
text2 = text2[commonlength:]
# Trim off common suffix (speedup).
commonlength = self.diff_commonSuffix(text1, text2)
if commonlength == 0:
commonsuffix = ''
else:
commonsuffix = text1[-commonlength:]
text1 = text1[:-commonlength]
text2 = text2[:-commonlength]
# Compute the diff on the middle block.
diffs = self.diff_compute(text1, text2, checklines, deadline)
# Restore the prefix and suffix.
if commonprefix:
diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]
if commonsuffix:
diffs.append((self.DIFF_EQUAL, commonsuffix))
self.diff_cleanupMerge(diffs)
return diffs
def diff_compute(self, text1, text2, checklines, deadline):
"""Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(self.DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(self.DIFF_DELETE, text1)]
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext),
(self.DIFF_INSERT, longtext[i + len(shorttext):])]
# Swap insertions for deletions if diff is reversed.
if len(text1) > len(text2):
diffs[0] = (self.DIFF_DELETE, diffs[0][1])
diffs[2] = (self.DIFF_DELETE, diffs[2][1])
return diffs
if len(shorttext) == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
# Check to see if the problem can be split in two.
hm = self.diff_halfMatch(text1, text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
# Merge the results.
return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
if checklines and len(text1) > 100 and len(text2) > 100:
return self.diff_lineMode(text1, text2, deadline)
return self.diff_bisect(text1, text2, deadline)
def diff_lineMode(self, text1, text2, deadline):
"""Do a quick line-level diff on both strings, then rediff the parts for
greater accuracy.
This speedup can produce non-minimal diffs.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
# Scan the text on a line-by-line basis first.
(text1, text2, linearray) = self.diff_linesToChars(text1, text2)
diffs = self.diff_main(text1, text2, False, deadline)
# Convert the diff back to original text.
self.diff_charsToLines(diffs, linearray)
# Eliminate freak matches (e.g. blank lines)
self.diff_cleanupSemantic(diffs)
# Rediff any replacement blocks, this time character-by-character.
# Add a dummy entry at the end.
diffs.append((self.DIFF_EQUAL, ''))
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete >= 1 and count_insert >= 1:
# Delete the offending records and add the merged ones.
a = self.diff_main(text_delete, text_insert, False, deadline)
diffs[pointer - count_delete - count_insert : pointer] = a
pointer = pointer - count_delete - count_insert + len(a)
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
pointer += 1
diffs.pop() # Remove the dummy entry at the end.
return diffs
def diff_bisect(self, text1, text2, deadline):
"""Find the 'middle snake' of a diff, split the problem in two
and return the recursively constructed diff.
See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
max_d = (text1_length + text2_length + 1) // 2
v_offset = max_d
v_length = 2 * max_d
v1 = [-1] * v_length
v1[v_offset + 1] = 0
v2 = v1[:]
delta = text1_length - text2_length
# If the total number of characters is odd, then the front path will
# collide with the reverse path.
front = (delta % 2 != 0)
# Offsets for start and end of k loop.
# Prevents mapping of space beyond the grid.
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in range(max_d):
# Bail out if deadline is reached.
if time.time() > deadline:
break
# Walk the front path one step.
for k1 in range(-d + k1start, d + 1 - k1end, 2):
k1_offset = v_offset + k1
if k1 == -d or (k1 != d and
v1[k1_offset - 1] < v1[k1_offset + 1]):
x1 = v1[k1_offset + 1]
else:
x1 = v1[k1_offset - 1] + 1
y1 = x1 - k1
while (x1 < text1_length and y1 < text2_length and
text1[x1] == text2[y1]):
x1 += 1
y1 += 1
v1[k1_offset] = x1
if x1 > text1_length:
# Ran off the right of the graph.
k1end += 2
elif y1 > text2_length:
# Ran off the bottom of the graph.
k1start += 2
elif front:
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - v2[k2_offset]
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Walk the reverse path one step.
for k2 in range(-d + k2start, d + 1 - k2end, 2):
k2_offset = v_offset + k2
if k2 == -d or (k2 != d and
v2[k2_offset - 1] < v2[k2_offset + 1]):
x2 = v2[k2_offset + 1]
else:
x2 = v2[k2_offset - 1] + 1
y2 = x2 - k2
while (x2 < text1_length and y2 < text2_length and
text1[-x2 - 1] == text2[-y2 - 1]):
x2 += 1
y2 += 1
v2[k2_offset] = x2
if x2 > text1_length:
# Ran off the left of the graph.
k2end += 2
elif y2 > text2_length:
# Ran off the top of the graph.
k2start += 2
elif not front:
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
x1 = v1[k1_offset]
y1 = v_offset + x1 - k1_offset
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - x2
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Diff took too long and hit the deadline or
# number of diffs equals number of characters, no commonality at all.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
def diff_bisectSplit(self, text1, text2, x, y, deadline):
"""Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
# Compute both diffs serially.
diffs = self.diff_main(text1a, text2a, False, deadline)
diffsb = self.diff_main(text1b, text2b, False, deadline)
return diffs + diffsb
def diff_linesToChars(self, text1, text2):
"""Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
"""
lineArray = [] # e.g. lineArray[4] == "Hello\n"
lineHash = {} # e.g. lineHash["Hello\n"] == 4
# "\x00" is a valid character, but various debuggers don't like it.
# So we'll insert a junk entry to avoid generating a null character.
lineArray.append('')
def diff_linesToCharsMunge(text):
"""Split a text into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Modifies linearray and linehash through being a closure.
Args:
text: String to encode.
Returns:
Encoded string.
"""
chars = []
# Walk the text, pulling out a substring for each line.
# text.split('\n') would would temporarily double our memory footprint.
# Modifying text would create many large strings to garbage collect.
lineStart = 0
lineEnd = -1
while lineEnd < len(text) - 1:
lineEnd = text.find('\n', lineStart)
if lineEnd == -1:
lineEnd = len(text) - 1
line = text[lineStart:lineEnd + 1]
lineStart = lineEnd + 1
if line in lineHash:
chars.append(chr(lineHash[line]))
else:
lineArray.append(line)
lineHash[line] = len(lineArray) - 1
chars.append(chr(len(lineArray) - 1))
return "".join(chars)
chars1 = diff_linesToCharsMunge(text1)
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
def diff_charsToLines(self, diffs, lineArray):
"""Rehydrate the text in a diff from a string of line hashes to real lines
of text.
Args:
diffs: Array of diff tuples.
lineArray: Array of unique strings.
"""
for x in range(len(diffs)):
text = []
for char in diffs[x][1]:
text.append(lineArray[ord(char)])
diffs[x] = (diffs[x][0], "".join(text))
def diff_commonPrefix(self, text1, text2):
"""Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonSuffix(self, text1, text2):
"""Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len(text1) - pointerend] ==
text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonOverlap(self, text1, text2):
"""Determine if the suffix of one string is the prefix of another.
Args:
text1 First string.
text2 Second string.
Returns:
The number of characters common to the end of the first
string and the start of the second string.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
# Eliminate the null case.
if text1_length == 0 or text2_length == 0:
return 0
# Truncate the longer string.
if text1_length > text2_length:
text1 = text1[-text2_length:]
elif text1_length < text2_length:
text2 = text2[:text1_length]
text_length = min(text1_length, text2_length)
# Quick check for the worst case.
if text1 == text2:
return text_length
# Start by looking for a single character match
# and increase length until no match is found.
# Performance analysis: http://neil.fraser.name/news/2010/11/04/
best = 0
length = 1
while True:
pattern = text1[-length:]
found = text2.find(pattern)
if found == -1:
return best
length += found
if found == 0 or text1[-length:] == text2[:length]:
best = length
length += 1
def diff_halfMatch(self, text1, text2):
"""Do the two texts share a substring which is at least half the length of
the longer text?
This speedup can produce non-minimal diffs.
Args:
text1: First string.
text2: Second string.
Returns:
Five element Array, containing the prefix of text1, the suffix of text1,
the prefix of text2, the suffix of text2 and the common middle. Or None
if there was no match.
"""
if self.Diff_Timeout <= 0:
# Don't risk returning a non-optimal diff if we have unlimited time.
return None
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):
return None # Pointless.
def diff_halfMatchI(longtext, shorttext, i):
"""Does a substring of shorttext exist within longtext such that the
substring is at least half the length of longtext?
Closure, but does not reference any external variables.
Args:
longtext: Longer string.
shorttext: Shorter string.
i: Start index of quarter length substring within longtext.
Returns:
Five element Array, containing the prefix of longtext, the suffix of
longtext, the prefix of shorttext, the suffix of shorttext and the
common middle. Or None if there was no match.
"""
seed = longtext[i:i + len(longtext) // 4]
best_common = ''
j = shorttext.find(seed)
while j != -1:
prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])
suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])
if len(best_common) < suffixLength + prefixLength:
best_common = (shorttext[j - suffixLength:j] +
shorttext[j:j + prefixLength])
best_longtext_a = longtext[:i - suffixLength]
best_longtext_b = longtext[i + prefixLength:]
best_shorttext_a = shorttext[:j - suffixLength]
best_shorttext_b = shorttext[j + prefixLength:]
j = shorttext.find(seed, j + 1)
if len(best_common) * 2 >= len(longtext):
return (best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b, best_common)
else:
return None
# First check if the second quarter is the seed for a half-match.
hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4)
# Check again based on the third quarter.
hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2)
if not hm1 and not hm2:
return None
elif not hm2:
hm = hm1
elif not hm1:
hm = hm2
else:
# Both matched. Select the longest.
if len(hm1[4]) > len(hm2[4]):
hm = hm1
else:
hm = hm2
# A half-match was found, sort out the return data.
if len(text1) > len(text2):
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
else:
(text2_a, text2_b, text1_a, text1_b, mid_common) = hm
return (text1_a, text1_b, text2_a, text2_b, mid_common)
def diff_cleanupSemantic(self, diffs):
"""Reduce the number of edits by eliminating semantically trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
# Number of chars that changed prior to the equality.
length_insertions1, length_deletions1 = 0, 0
# Number of chars that changed after the equality.
length_insertions2, length_deletions2 = 0, 0
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
equalities.append(pointer)
length_insertions1, length_insertions2 = length_insertions2, 0
length_deletions1, length_deletions2 = length_deletions2, 0
lastequality = diffs[pointer][1]
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_INSERT:
length_insertions2 += len(diffs[pointer][1])
else:
length_deletions2 += len(diffs[pointer][1])
# Eliminate an equality that is smaller or equal to the edits on both
# sides of it.
if (lastequality and (len(lastequality) <=
max(length_insertions1, length_deletions1)) and
(len(lastequality) <= max(length_insertions2, length_deletions2))):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
# Throw away the equality we just deleted.
equalities.pop()
# Throw away the previous equality (it needs to be reevaluated).
if len(equalities):
equalities.pop()
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
# Reset the counters.
length_insertions1, length_deletions1 = 0, 0
length_insertions2, length_deletions2 = 0, 0
lastequality = None
changes = True
pointer += 1
# Normalize the diff.
if changes:
self.diff_cleanupMerge(diffs)
self.diff_cleanupSemanticLossless(diffs)
# Find any overlaps between deletions and insertions.
# e.g: <del>abcxxx</del><ins>xxxdef</ins>
# -> <del>abc</del>xxx<ins>def</ins>
# e.g: <del>xxxabc</del><ins>defxxx</ins>
# -> <ins>def</ins>xxx<del>abc</del>
# Only extract an overlap if it is as big as the edit ahead or behind it.
pointer = 1
while pointer < len(diffs):
if (diffs[pointer - 1][0] == self.DIFF_DELETE and
diffs[pointer][0] == self.DIFF_INSERT):
deletion = diffs[pointer - 1][1]
insertion = diffs[pointer][1]
overlap_length1 = self.diff_commonOverlap(deletion, insertion)
overlap_length2 = self.diff_commonOverlap(insertion, deletion)
if overlap_length1 >= overlap_length2:
if (overlap_length1 >= len(deletion) / 2.0 or
overlap_length1 >= len(insertion) / 2.0):
# Overlap found. Insert an equality and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL,
insertion[:overlap_length1]))
diffs[pointer - 1] = (self.DIFF_DELETE,
deletion[:len(deletion) - overlap_length1])
diffs[pointer + 1] = (self.DIFF_INSERT,
insertion[overlap_length1:])
pointer += 1
else:
if (overlap_length2 >= len(deletion) / 2.0 or
overlap_length2 >= len(insertion) / 2.0):
# Reverse overlap found.
# Insert an equality and swap and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2]))
diffs[pointer - 1] = (self.DIFF_INSERT,
insertion[:len(insertion) - overlap_length2])
diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:])
pointer += 1
pointer += 1
pointer += 1
def diff_cleanupSemanticLossless(self, diffs):
"""Look for single edits surrounded on both sides by equalities
which can be shifted sideways to align the edit to a word boundary.
e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
Args:
diffs: Array of diff tuples.
"""
def diff_cleanupSemanticScore(one, two):
"""Given two strings, compute a score representing whether the
internal boundary falls on logical boundaries.
Scores range from 6 (best) to 0 (worst).
Closure, but does not reference any external variables.
Args:
one: First string.
two: Second string.
Returns:
The score.
"""
if not one or not two:
# Edges are the best.
return 6
# Each port of this function behaves slightly differently due to
# subtle differences in each language's definition of things like
# 'whitespace'. Since this function's purpose is largely cosmetic,
# the choice has been made to use each language's native features
# rather than force total conformity.
char1 = one[-1]
char2 = two[0]
nonAlphaNumeric1 = not char1.isalnum()
nonAlphaNumeric2 = not char2.isalnum()
whitespace1 = nonAlphaNumeric1 and char1.isspace()
whitespace2 = nonAlphaNumeric2 and char2.isspace()
lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n")
lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n")
blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one)
blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two)
if blankLine1 or blankLine2:
# Five points for blank lines.
return 5
elif lineBreak1 or lineBreak2:
# Four points for line breaks.
return 4
elif nonAlphaNumeric1 and not whitespace1 and whitespace2:
# Three points for end of sentences.
return 3
elif whitespace1 or whitespace2:
# Two points for whitespace.
return 2
elif nonAlphaNumeric1 or nonAlphaNumeric2:
# One point for non-alphanumeric.
return 1
return 0
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
equality1 = diffs[pointer - 1][1]
edit = diffs[pointer][1]
equality2 = diffs[pointer + 1][1]
# First, shift the edit as far left as possible.
commonOffset = self.diff_commonSuffix(equality1, edit)
if commonOffset:
commonString = edit[-commonOffset:]
equality1 = equality1[:-commonOffset]
edit = commonString + edit[:-commonOffset]
equality2 = commonString + equality2
# Second, step character by character right, looking for the best fit.
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
bestScore = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
while edit and equality2 and edit[0] == equality2[0]:
equality1 += edit[0]
edit = edit[1:] + equality2[0]
equality2 = equality2[1:]
score = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
# The >= encourages trailing rather than leading whitespace on edits.
if score >= bestScore:
bestScore = score
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
if diffs[pointer - 1][1] != bestEquality1:
# We have an improvement, save it back to the diff.
if bestEquality1:
diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1)
else:
del diffs[pointer - 1]
pointer -= 1
diffs[pointer] = (diffs[pointer][0], bestEdit)
if bestEquality2:
diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2)
else:
del diffs[pointer + 1]
pointer -= 1
pointer += 1
# Define some regex patterns for matching boundaries.
BLANKLINEEND = re.compile(r"\n\r?\n$");
BLANKLINESTART = re.compile(r"^\r?\n\r?\n");
def diff_cleanupEfficiency(self, diffs):
"""Reduce the number of edits by eliminating operationally trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
pre_ins = False # Is there an insertion operation before the last equality.
pre_del = False # Is there a deletion operation before the last equality.
post_ins = False # Is there an insertion operation after the last equality.
post_del = False # Is there a deletion operation after the last equality.
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
if (len(diffs[pointer][1]) < self.Diff_EditCost and
(post_ins or post_del)):
# Candidate found.
equalities.append(pointer)
pre_ins = post_ins
pre_del = post_del
lastequality = diffs[pointer][1]
else:
# Not a candidate, and can never become one.
equalities = []
lastequality = None
post_ins = post_del = False
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_DELETE:
post_del = True
else:
post_ins = True
# Five types to be split:
# <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
# <ins>A</ins>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<ins>C</ins>
# <ins>A</del>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<del>C</del>
if lastequality and ((pre_ins and pre_del and post_ins and post_del) or
((len(lastequality) < self.Diff_EditCost / 2) and
(pre_ins + pre_del + post_ins + post_del) == 3)):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
equalities.pop() # Throw away the equality we just deleted.
lastequality = None
if pre_ins and pre_del:
# No changes made which could affect previous entry, keep going.
post_ins = post_del = True
equalities = []
else:
if len(equalities):
equalities.pop() # Throw away the previous equality.
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
post_ins = post_del = False
changes = True
pointer += 1
if changes:
self.diff_cleanupMerge(diffs)
def diff_cleanupMerge(self, diffs):
"""Reorder and merge like edit sections. Merge equalities.
Any edit section can move as long as it doesn't cross an equality.
Args:
diffs: Array of diff tuples.
"""
diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end.
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete + count_insert > 1:
if count_delete != 0 and count_insert != 0:
# Factor out any common prefixies.
commonlength = self.diff_commonPrefix(text_insert, text_delete)
if commonlength != 0:
x = pointer - count_delete - count_insert - 1
if x >= 0 and diffs[x][0] == self.DIFF_EQUAL:
diffs[x] = (diffs[x][0], diffs[x][1] +
text_insert[:commonlength])
else:
diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength]))
pointer += 1
text_insert = text_insert[commonlength:]
text_delete = text_delete[commonlength:]
# Factor out any common suffixies.
commonlength = self.diff_commonSuffix(text_insert, text_delete)
if commonlength != 0:
diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] +
diffs[pointer][1])
text_insert = text_insert[:-commonlength]
text_delete = text_delete[:-commonlength]
# Delete the offending records and add the merged ones.
if count_delete == 0:
diffs[pointer - count_insert : pointer] = [
(self.DIFF_INSERT, text_insert)]
elif count_insert == 0:
diffs[pointer - count_delete : pointer] = [
(self.DIFF_DELETE, text_delete)]
else:
diffs[pointer - count_delete - count_insert : pointer] = [
(self.DIFF_DELETE, text_delete),
(self.DIFF_INSERT, text_insert)]
pointer = pointer - count_delete - count_insert + 1
if count_delete != 0:
pointer += 1
if count_insert != 0:
pointer += 1
elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL:
# Merge this equality with the previous one.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer][1])
del diffs[pointer]
else:
pointer += 1
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
if diffs[-1][1] == '':
diffs.pop() # Remove the dummy entry at the end.
# Second pass: look for single edits surrounded on both sides by equalities
# which can be shifted sideways to eliminate an equality.
# e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
changes = False
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
# Shift the edit over the previous equality.
diffs[pointer] = (diffs[pointer][0],
diffs[pointer - 1][1] +
diffs[pointer][1][:-len(diffs[pointer - 1][1])])
diffs[pointer + 1] = (diffs[pointer + 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
del diffs[pointer - 1]
changes = True
elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
# Shift the edit over the next equality.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
diffs[pointer] = (diffs[pointer][0],
diffs[pointer][1][len(diffs[pointer + 1][1]):] +
diffs[pointer + 1][1])
del diffs[pointer + 1]
changes = True
pointer += 1
# If shifts were made, the diff needs reordering and another shift sweep.
if changes:
self.diff_cleanupMerge(diffs)
def diff_xIndex(self, diffs, loc):
"""loc is a location in text1, compute and return the equivalent location
in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
Args:
diffs: Array of diff tuples.
loc: Location within text1.
Returns:
Location within text2.
"""
chars1 = 0
chars2 = 0
last_chars1 = 0
last_chars2 = 0
for x in range(len(diffs)):
(op, text) = diffs[x]
if op != self.DIFF_INSERT: # Equality or deletion.
chars1 += len(text)
if op != self.DIFF_DELETE: # Equality or insertion.
chars2 += len(text)
if chars1 > loc: # Overshot the location.
break
last_chars1 = chars1
last_chars2 = chars2
if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:
# The location was deleted.
return last_chars2
# Add the remaining len(character).
return last_chars2 + (loc - last_chars1)
def diff_prettyHtml(self, diffs):
"""Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
"""
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "¶<br>"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
def diff_text1(self, diffs):
"""Compute and return the source text (all equalities and deletions).
Args:
diffs: Array of diff tuples.
Returns:
Source text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_INSERT:
text.append(data)
return "".join(text)
def diff_text2(self, diffs):
"""Compute and return the destination text (all equalities and insertions).
Args:
diffs: Array of diff tuples.
Returns:
Destination text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_DELETE:
text.append(data)
return "".join(text)
def diff_levenshtein(self, diffs):
"""Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes.
"""
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == self.DIFF_INSERT:
insertions += len(data)
elif op == self.DIFF_DELETE:
deletions += len(data)
elif op == self.DIFF_EQUAL:
# A deletion and an insertion is one substitution.
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein
def diff_toDelta(self, diffs):
"""Crush the diff into an encoded string which describes the operations
required to transform text1 into text2.
E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
Operations are tab-separated. Inserted text is escaped using %xx notation.
Args:
diffs: Array of diff tuples.
Returns:
Delta text.
"""
text = []
for (op, data) in diffs:
if op == self.DIFF_INSERT:
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append("+" + urllib.parse.quote(data, "!~*'();/?:@&=+$,# "))
elif op == self.DIFF_DELETE:
text.append("-%d" % len(data))
elif op == self.DIFF_EQUAL:
text.append("=%d" % len(data))
return "\t".join(text)
def diff_fromDelta(self, text1, delta):
"""Given the original text1, and an encoded string which describes the
operations required to transform text1 into text2, compute the full diff.
Args:
text1: Source string for the diff.
delta: Delta text.
Returns:
Array of diff tuples.
Raises:
ValueError: If invalid input.
"""
diffs = []
pointer = 0 # Cursor in text1
tokens = delta.split("\t")
for token in tokens:
if token == "":
# Blank tokens are ok (from a trailing \t).
continue
# Each token begins with a one character parameter which specifies the
# operation of this token (delete, insert, equality).
param = token[1:]
if token[0] == "+":
param = urllib.parse.unquote(param)
diffs.append((self.DIFF_INSERT, param))
elif token[0] == "-" or token[0] == "=":
try:
n = int(param)
except ValueError:
raise ValueError("Invalid number in diff_fromDelta: " + param)
if n < 0:
raise ValueError("Negative number in diff_fromDelta: " + param)
text = text1[pointer : pointer + n]
pointer += n
if token[0] == "=":
diffs.append((self.DIFF_EQUAL, text))
else:
diffs.append((self.DIFF_DELETE, text))
else:
# Anything else is an error.
raise ValueError("Invalid diff operation in diff_fromDelta: " +
token[0])
if pointer != len(text1):
raise ValueError(
"Delta length (%d) does not equal source text length (%d)." %
(pointer, len(text1)))
return diffs
# MATCH FUNCTIONS
def match_main(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc'.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Check for null inputs.
if text == None or pattern == None:
raise ValueError("Null inputs. (match_main)")
loc = max(0, min(loc, len(text)))
if text == pattern:
# Shortcut (potentially not guaranteed by the algorithm)
return 0
elif not text:
# Nothing to match.
return -1
elif text[loc:loc + len(pattern)] == pattern:
# Perfect match at the perfect spot! (Includes case of null pattern)
return loc
else:
# Do a fuzzy compare.
match = self.match_bitap(text, pattern, loc)
return match
def match_bitap(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc' using the
Bitap algorithm.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Python doesn't have a maxint limit, so ignore this check.
#if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:
# raise ValueError("Pattern too long for this application.")
# Initialise the alphabet.
s = self.match_alphabet(pattern)
def match_bitapScore(e, x):
"""Compute and return the score for a match with e errors and x location.
Accesses loc and pattern through being a closure.
Args:
e: Number of errors in match.
x: Location of match.
Returns:
Overall score for match (0.0 = good, 1.0 = bad).
"""
accuracy = float(e) / len(pattern)
proximity = abs(loc - x)
if not self.Match_Distance:
# Dodge divide by zero error.
return proximity and 1.0 or accuracy
return accuracy + (proximity / float(self.Match_Distance))
# Highest score beyond which we give up.
score_threshold = self.Match_Threshold
# Is there a nearby exact match? (speedup)
best_loc = text.find(pattern, loc)
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# What about in the other direction? (speedup)
best_loc = text.rfind(pattern, loc + len(pattern))
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# Initialise the bit arrays.
matchmask = 1 << (len(pattern) - 1)
best_loc = -1
bin_max = len(pattern) + len(text)
# Empty initialization added to appease pychecker.
last_rd = None
for d in range(len(pattern)):
# Scan for the best match each iteration allows for one more error.
# Run a binary search to determine how far from 'loc' we can stray at
# this error level.
bin_min = 0
bin_mid = bin_max
while bin_min < bin_mid:
if match_bitapScore(d, loc + bin_mid) <= score_threshold:
bin_min = bin_mid
else:
bin_max = bin_mid
bin_mid = (bin_max - bin_min) // 2 + bin_min
# Use the result from this iteration as the maximum for the next.
bin_max = bin_mid
start = max(1, loc - bin_mid + 1)
finish = min(loc + bin_mid, len(text)) + len(pattern)
rd = [0] * (finish + 2)
rd[finish + 1] = (1 << d) - 1
for j in range(finish, start - 1, -1):
if len(text) <= j - 1:
# Out of range.
charMatch = 0
else:
charMatch = s.get(text[j - 1], 0)
if d == 0: # First pass: exact match.
rd[j] = ((rd[j + 1] << 1) | 1) & charMatch
else: # Subsequent passes: fuzzy match.
rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | (
((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1]
if rd[j] & matchmask:
score = match_bitapScore(d, j - 1)
# This match will almost certainly be better than any existing match.
# But check anyway.
if score <= score_threshold:
# Told you so.
score_threshold = score
best_loc = j - 1
if best_loc > loc:
# When passing loc, don't exceed our current distance from loc.
start = max(1, 2 * loc - best_loc)
else:
# Already passed loc, downhill from here on in.
break
# No hope for a (better) match at greater error levels.
if match_bitapScore(d + 1, loc) > score_threshold:
break
last_rd = rd
return best_loc
def match_alphabet(self, pattern):
"""Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
"""
s = {}
for char in pattern:
s[char] = 0
for i in range(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s
# PATCH FUNCTIONS
def patch_addContext(self, patch, text):
"""Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text.
"""
if len(text) == 0:
return
pattern = text[patch.start2 : patch.start2 + patch.length1]
padding = 0
# Look for the first and last matches of pattern in text. If two different
# matches are found, increase the pattern length.
while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits ==
0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin -
self.Patch_Margin)):
padding += self.Patch_Margin
pattern = text[max(0, patch.start2 - padding) :
patch.start2 + patch.length1 + padding]
# Add one chunk for good luck.
padding += self.Patch_Margin
# Add the prefix.
prefix = text[max(0, patch.start2 - padding) : patch.start2]
if prefix:
patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
# Add the suffix.
suffix = text[patch.start2 + patch.length1 :
patch.start2 + patch.length1 + padding]
if suffix:
patch.diffs.append((self.DIFF_EQUAL, suffix))
# Roll back the start points.
patch.start1 -= len(prefix)
patch.start2 -= len(prefix)
# Extend lengths.
patch.length1 += len(prefix) + len(suffix)
patch.length2 += len(prefix) + len(suffix)
def patch_make(self, a, b=None, c=None):
"""Compute a list of patches to turn text1 into text2.
Use diffs if provided, otherwise compute it ourselves.
There are four ways to call this function, depending on what data is
available to the caller:
Method 1:
a = text1, b = text2
Method 2:
a = diffs
Method 3 (optimal):
a = text1, b = diffs
Method 4 (deprecated, use method 3):
a = text1, b = text2, c = diffs
Args:
a: text1 (methods 1,3,4) or Array of diff tuples for text1 to
text2 (method 2).
b: text2 (methods 1,4) or Array of diff tuples for text1 to
text2 (method 3) or undefined (method 2).
c: Array of diff tuples for text1 to text2 (method 4) or
undefined (methods 1,2,3).
Returns:
Array of Patch objects.
"""
text1 = None
diffs = None
if isinstance(a, str) and isinstance(b, str) and c is None:
# Method 1: text1, text2
# Compute diffs from text1 and text2.
text1 = a
diffs = self.diff_main(text1, b, True)
if len(diffs) > 2:
self.diff_cleanupSemantic(diffs)
self.diff_cleanupEfficiency(diffs)
elif isinstance(a, list) and b is None and c is None:
# Method 2: diffs
# Compute text1 from diffs.
diffs = a
text1 = self.diff_text1(diffs)
elif isinstance(a, str) and isinstance(b, list) and c is None:
# Method 3: text1, diffs
text1 = a
diffs = b
elif (isinstance(a, str) and isinstance(b, str) and
isinstance(c, list)):
# Method 4: text1, text2, diffs
# text2 is not used.
text1 = a
diffs = c
else:
raise ValueError("Unknown call format to patch_make.")
if not diffs:
return [] # Get rid of the None case.
patches = []
patch = patch_obj()
char_count1 = 0 # Number of characters into the text1 string.
char_count2 = 0 # Number of characters into the text2 string.
prepatch_text = text1 # Recreate the patches to determine context info.
postpatch_text = text1
for x in range(len(diffs)):
(diff_type, diff_text) = diffs[x]
if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL:
# A new patch starts here.
patch.start1 = char_count1
patch.start2 = char_count2
if diff_type == self.DIFF_INSERT:
# Insertion
patch.diffs.append(diffs[x])
patch.length2 += len(diff_text)
postpatch_text = (postpatch_text[:char_count2] + diff_text +
postpatch_text[char_count2:])
elif diff_type == self.DIFF_DELETE:
# Deletion.
patch.length1 += len(diff_text)
patch.diffs.append(diffs[x])
postpatch_text = (postpatch_text[:char_count2] +
postpatch_text[char_count2 + len(diff_text):])
elif (diff_type == self.DIFF_EQUAL and
len(diff_text) <= 2 * self.Patch_Margin and
len(patch.diffs) != 0 and len(diffs) != x + 1):
# Small equality inside a patch.
patch.diffs.append(diffs[x])
patch.length1 += len(diff_text)
patch.length2 += len(diff_text)
if (diff_type == self.DIFF_EQUAL and
len(diff_text) >= 2 * self.Patch_Margin):
# Time for a new patch.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
patch = patch_obj()
# Unlike Unidiff, our patch lists have a rolling context.
# http://code.google.com/p/google-diff-match-patch/wiki/Unidiff
# Update prepatch text & pos to reflect the application of the
# just completed patch.
prepatch_text = postpatch_text
char_count1 = char_count2
# Update the current character count.
if diff_type != self.DIFF_INSERT:
char_count1 += len(diff_text)
if diff_type != self.DIFF_DELETE:
char_count2 += len(diff_text)
# Pick up the leftover patch if not empty.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
return patches
def patch_deepCopy(self, patches):
"""Given an array of patches, return another array that is identical.
Args:
patches: Array of Patch objects.
Returns:
Array of Patch objects.
"""
patchesCopy = []
for patch in patches:
patchCopy = patch_obj()
# No need to deep copy the tuples since they are immutable.
patchCopy.diffs = patch.diffs[:]
patchCopy.start1 = patch.start1
patchCopy.start2 = patch.start2
patchCopy.length1 = patch.length1
patchCopy.length2 = patch.length2
patchesCopy.append(patchCopy)
return patchesCopy
def patch_apply(self, patches, text):
"""Merge a set of patches onto the text. Return a patched text, as well
as a list of true/false values indicating which patches were applied.
Args:
patches: Array of Patch objects.
text: Old text.
Returns:
Two element Array, containing the new text and an array of boolean values.
"""
if not patches:
return (text, [])
# Deep copy the patches so that no changes are made to originals.
patches = self.patch_deepCopy(patches)
nullPadding = self.patch_addPadding(patches)
text = nullPadding + text + nullPadding
self.patch_splitMax(patches)
# delta keeps track of the offset between the expected and actual location
# of the previous patch. If there are patches expected at positions 10 and
# 20, but the first patch was found at 12, delta is 2 and the second patch
# has an effective expected position of 22.
delta = 0
results = []
for patch in patches:
expected_loc = patch.start2 + delta
text1 = self.diff_text1(patch.diffs)
end_loc = -1
if len(text1) > self.Match_MaxBits:
# patch_splitMax will only provide an oversized pattern in the case of
# a monster delete.
start_loc = self.match_main(text, text1[:self.Match_MaxBits],
expected_loc)
if start_loc != -1:
end_loc = self.match_main(text, text1[-self.Match_MaxBits:],
expected_loc + len(text1) - self.Match_MaxBits)
if end_loc == -1 or start_loc >= end_loc:
# Can't find valid trailing context. Drop this patch.
start_loc = -1
else:
start_loc = self.match_main(text, text1, expected_loc)
if start_loc == -1:
# No match found. :(
results.append(False)
# Subtract the delta for this failed patch from subsequent patches.
delta -= patch.length2 - patch.length1
else:
# Found a match. :)
results.append(True)
delta = start_loc - expected_loc
if end_loc == -1:
text2 = text[start_loc : start_loc + len(text1)]
else:
text2 = text[start_loc : end_loc + self.Match_MaxBits]
if text1 == text2:
# Perfect match, just shove the replacement text in.
text = (text[:start_loc] + self.diff_text2(patch.diffs) +
text[start_loc + len(text1):])
else:
# Imperfect match.
# Run a diff to get a framework of equivalent indices.
diffs = self.diff_main(text1, text2, False)
if (len(text1) > self.Match_MaxBits and
self.diff_levenshtein(diffs) / float(len(text1)) >
self.Patch_DeleteThreshold):
# The end points match, but the content is unacceptably bad.
results[-1] = False
else:
self.diff_cleanupSemanticLossless(diffs)
index1 = 0
for (op, data) in patch.diffs:
if op != self.DIFF_EQUAL:
index2 = self.diff_xIndex(diffs, index1)
if op == self.DIFF_INSERT: # Insertion
text = text[:start_loc + index2] + data + text[start_loc +
index2:]
elif op == self.DIFF_DELETE: # Deletion
text = text[:start_loc + index2] + text[start_loc +
self.diff_xIndex(diffs, index1 + len(data)):]
if op != self.DIFF_DELETE:
index1 += len(data)
# Strip the padding off.
text = text[len(nullPadding):-len(nullPadding)]
return (text, results)
def patch_addPadding(self, patches):
"""Add some padding on text start and end so that edges can match
something. Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
Returns:
The padding string added to each side.
"""
paddingLength = self.Patch_Margin
nullPadding = ""
for x in range(1, paddingLength + 1):
nullPadding += chr(x)
# Bump all the patches forward.
for patch in patches:
patch.start1 += paddingLength
patch.start2 += paddingLength
# Add some padding on start of first diff.
patch = patches[0]
diffs = patch.diffs
if not diffs or diffs[0][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.insert(0, (self.DIFF_EQUAL, nullPadding))
patch.start1 -= paddingLength # Should be 0.
patch.start2 -= paddingLength # Should be 0.
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[0][1]):
# Grow first equality.
extraLength = paddingLength - len(diffs[0][1])
newText = nullPadding[len(diffs[0][1]):] + diffs[0][1]
diffs[0] = (diffs[0][0], newText)
patch.start1 -= extraLength
patch.start2 -= extraLength
patch.length1 += extraLength
patch.length2 += extraLength
# Add some padding on end of last diff.
patch = patches[-1]
diffs = patch.diffs
if not diffs or diffs[-1][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.append((self.DIFF_EQUAL, nullPadding))
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[-1][1]):
# Grow last equality.
extraLength = paddingLength - len(diffs[-1][1])
newText = diffs[-1][1] + nullPadding[:extraLength]
diffs[-1] = (diffs[-1][0], newText)
patch.length1 += extraLength
patch.length2 += extraLength
return nullPadding
def patch_splitMax(self, patches):
"""Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
"""
patch_size = self.Match_MaxBits
if patch_size == 0:
# Python has the option of not splitting strings due to its ability
# to handle integers of arbitrary precision.
return
for x in range(len(patches)):
if patches[x].length1 <= patch_size:
continue
bigpatch = patches[x]
# Remove the big old patch.
del patches[x]
x -= 1
start1 = bigpatch.start1
start2 = bigpatch.start2
precontext = ''
while len(bigpatch.diffs) != 0:
# Create one of several smaller patches.
patch = patch_obj()
empty = True
patch.start1 = start1 - len(precontext)
patch.start2 = start2 - len(precontext)
if precontext:
patch.length1 = patch.length2 = len(precontext)
patch.diffs.append((self.DIFF_EQUAL, precontext))
while (len(bigpatch.diffs) != 0 and
patch.length1 < patch_size - self.Patch_Margin):
(diff_type, diff_text) = bigpatch.diffs[0]
if diff_type == self.DIFF_INSERT:
# Insertions are harmless.
patch.length2 += len(diff_text)
start2 += len(diff_text)
patch.diffs.append(bigpatch.diffs.pop(0))
empty = False
elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and
patch.diffs[0][0] == self.DIFF_EQUAL and
len(diff_text) > 2 * patch_size):
# This is a large deletion. Let it pass in one chunk.
patch.length1 += len(diff_text)
start1 += len(diff_text)
empty = False
patch.diffs.append((diff_type, diff_text))
del bigpatch.diffs[0]
else:
# Deletion or equality. Only take as much as we can stomach.
diff_text = diff_text[:patch_size - patch.length1 -
self.Patch_Margin]
patch.length1 += len(diff_text)
start1 += len(diff_text)
if diff_type == self.DIFF_EQUAL:
patch.length2 += len(diff_text)
start2 += len(diff_text)
else:
empty = False
patch.diffs.append((diff_type, diff_text))
if diff_text == bigpatch.diffs[0][1]:
del bigpatch.diffs[0]
else:
bigpatch.diffs[0] = (bigpatch.diffs[0][0],
bigpatch.diffs[0][1][len(diff_text):])
# Compute the head context for the next patch.
precontext = self.diff_text2(patch.diffs)
precontext = precontext[-self.Patch_Margin:]
# Append the end context for this patch.
postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin]
if postcontext:
patch.length1 += len(postcontext)
patch.length2 += len(postcontext)
if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] +
postcontext)
else:
patch.diffs.append((self.DIFF_EQUAL, postcontext))
if not empty:
x += 1
patches.insert(x, patch)
def patch_toText(self, patches):
"""Take a list of patches and return a textual representation.
Args:
patches: Array of Patch objects.
Returns:
Text representation of patches.
"""
text = []
for patch in patches:
text.append(str(patch))
return "".join(text)
def patch_fromText(self, textline):
"""Parse a textual representation of patches and return a list of patch
objects.
Args:
textline: Text representation of patches.
Returns:
Array of Patch objects.
Raises:
ValueError: If invalid input.
"""
patches = []
if not textline:
return patches
text = textline.split('\n')
while len(text) != 0:
m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0])
if not m:
raise ValueError("Invalid patch string: " + text[0])
patch = patch_obj()
patches.append(patch)
patch.start1 = int(m.group(1))
if m.group(2) == '':
patch.start1 -= 1
patch.length1 = 1
elif m.group(2) == '0':
patch.length1 = 0
else:
patch.start1 -= 1
patch.length1 = int(m.group(2))
patch.start2 = int(m.group(3))
if m.group(4) == '':
patch.start2 -= 1
patch.length2 = 1
elif m.group(4) == '0':
patch.length2 = 0
else:
patch.start2 -= 1
patch.length2 = int(m.group(4))
del text[0]
while len(text) != 0:
if text[0]:
sign = text[0][0]
else:
sign = ''
line = urllib.parse.unquote(text[0][1:])
if sign == '+':
# Insertion.
patch.diffs.append((self.DIFF_INSERT, line))
elif sign == '-':
# Deletion.
patch.diffs.append((self.DIFF_DELETE, line))
elif sign == ' ':
# Minor equality.
patch.diffs.append((self.DIFF_EQUAL, line))
elif sign == '@':
# Start of next patch.
break
elif sign == '':
# Blank line? Whatever.
pass
else:
# WTF?
raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line))
del text[0]
return patches
class patch_obj:
"""Class representing one patch operation.
"""
def __init__(self):
"""Initializes with an empty list of diffs.
"""
self.diffs = []
self.start1 = None
self.start2 = None
self.length1 = 0
self.length2 = 0
def __str__(self):
"""Emmulate GNU diff's format.
Header: @@ -382,8 +481,9 @@
Indicies are printed as 1-based, not 0-based.
Returns:
The GNU diff string.
"""
if self.length1 == 0:
coords1 = str(self.start1) + ",0"
elif self.length1 == 1:
coords1 = str(self.start1 + 1)
else:
coords1 = str(self.start1 + 1) + "," + str(self.length1)
if self.length2 == 0:
coords2 = str(self.start2) + ",0"
elif self.length2 == 1:
coords2 = str(self.start2 + 1)
else:
coords2 = str(self.start2 + 1) + "," + str(self.length2)
text = ["@@ -", coords1, " +", coords2, " @@\n"]
# Escape the body of the patch with %xx notation.
for (op, data) in self.diffs:
if op == diff_match_patch.DIFF_INSERT:
text.append("+")
elif op == diff_match_patch.DIFF_DELETE:
text.append("-")
elif op == diff_match_patch.DIFF_EQUAL:
text.append(" ")
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append(urllib.parse.quote(data, "!~*'();/?:@&=+$,# ") + "\n")
return "".join(text)
dmp = diff_match_patch()
text2 = 'Hello, my friend!'
text1 = 'Helo my frind)'
d = dmp.diff_main(text1=text1, text2=text2)
dmp.diff_cleanupSemantic(d)
r = dmp.diff_prettyHtml(d)
print(text1)
print(text2)
print(d)
print(r)
| 35.144362 | 80 | 0.602962 |
93de84a5b45e31fa7adc00a1ea01364609fbf5c9 | 2,789 | py | Python | tests/conftest.py | Arax1/JobFunnel | 461aca3fd8d5c07fc4a57bf82d8bdc08a775e82b | [
"MIT"
] | null | null | null | tests/conftest.py | Arax1/JobFunnel | 461aca3fd8d5c07fc4a57bf82d8bdc08a775e82b | [
"MIT"
] | 1 | 2021-05-05T01:39:59.000Z | 2021-05-05T01:39:59.000Z | tests/conftest.py | Arax1/JobFunnel | 461aca3fd8d5c07fc4a57bf82d8bdc08a775e82b | [
"MIT"
] | null | null | null | import pytest
import sys
from unittest.mock import patch
from jobfunnel.config.parser import parse_config
from jobfunnel.tools.tools import config_factory
@pytest.fixture()
def configure_options():
def setup(options: list):
"""Assigns the options to argv(as if JobFunnel were called from the command line with those options)
and calls parse_config(). This fixture assumes that the test_parse module has been tested
and passes.
"""
with patch.object(sys, 'argv', options):
config = parse_config()
return config
return setup
@pytest.fixture()
def job_listings():
def setup(attr_list: list):
"""
This function generates job listings.
If attr_list is empty, then it returns a single job with
the contents of job_format, which is a default job listing defined on this fixture.
If attr_list is not empty, it returns a job listing for each attribute pair on
attr_list.
The expected format for each item on attr_list is
[['key1', 'key2', 'keyN'], 'value']
"""
job_format = {'status': 'new', 'title': 'Python Engineer', 'company': 'Python Corp', 'location': 'Waterloo, ON', 'date': '10 days ago', 'blurb': '', 'tags': '',
'link':
'https://job-openings.best-job-board.domain/python-engineer-waterloo-on-ca-pro'
'com/216808420', 'id': '216808420', 'provider': 'monster', 'query': 'Python'}
if len(attr_list) > 0:
return config_factory(job_format, attr_list)
else:
return job_format
return setup
@pytest.fixture()
def per_id_job_listings(job_listings):
def setup(attr_list: list, first_job_id: int = 0):
"""
This function generates job_listings in the {'job_id':{job_listing}}
fashion. This is particularly useful for functions like tfidf_filter that expect job listings in this format.
Args:
attr_list: an attribute list in the [['key1', 'key2', 'keyN'], 'value'] format.
first_job_id: At what number to start generating job ids. This is particular useful when you want different job ids but the len of attr_list is the same across multiple calls to this function.
Returns:
A dictionary of the format {'job_id#1':{job_listing},'job_id#2':{job_listing},
'job_id#3':{job_listing}}. Please note that every job_id is unique.
"""
job_list = job_listings(attr_list)
new_job_id = first_job_id
per_id_job_list = {}
for job in job_list:
job['id'] = str(new_job_id)
per_id_job_list.update({job['id']: job})
new_job_id += 1
return per_id_job_list
return setup
| 40.42029 | 204 | 0.634995 |
6b015c7b8d5009e7101db5d3b399d2e31f165e64 | 4,320 | py | Python | kinow_client/models/product_price_prices.py | kinow-io/kinow-python-sdk | 4c1699a3c78048b84287bd049a669651a5b4e2d5 | [
"Apache-2.0"
] | 1 | 2019-06-26T14:24:54.000Z | 2019-06-26T14:24:54.000Z | kinow_client/models/product_price_prices.py | kinow-io/kinow-python-sdk | 4c1699a3c78048b84287bd049a669651a5b4e2d5 | [
"Apache-2.0"
] | null | null | null | kinow_client/models/product_price_prices.py | kinow-io/kinow-python-sdk | 4c1699a3c78048b84287bd049a669651a5b4e2d5 | [
"Apache-2.0"
] | 1 | 2018-02-01T10:08:40.000Z | 2018-02-01T10:08:40.000Z | # coding: utf-8
"""
Kinow API
Client API for Kinow back-office
OpenAPI spec version: 1.4.18
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ProductPricePrices(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id_attribute=None, price=None, price_noreduc=None):
"""
ProductPricePrices - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id_attribute': 'float',
'price': 'float',
'price_noreduc': 'float'
}
self.attribute_map = {
'id_attribute': 'id_attribute',
'price': 'price',
'price_noreduc': 'price_noreduc'
}
self._id_attribute = id_attribute
self._price = price
self._price_noreduc = price_noreduc
@property
def id_attribute(self):
"""
Gets the id_attribute of this ProductPricePrices.
:return: The id_attribute of this ProductPricePrices.
:rtype: float
"""
return self._id_attribute
@id_attribute.setter
def id_attribute(self, id_attribute):
"""
Sets the id_attribute of this ProductPricePrices.
:param id_attribute: The id_attribute of this ProductPricePrices.
:type: float
"""
self._id_attribute = id_attribute
@property
def price(self):
"""
Gets the price of this ProductPricePrices.
:return: The price of this ProductPricePrices.
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""
Sets the price of this ProductPricePrices.
:param price: The price of this ProductPricePrices.
:type: float
"""
self._price = price
@property
def price_noreduc(self):
"""
Gets the price_noreduc of this ProductPricePrices.
:return: The price_noreduc of this ProductPricePrices.
:rtype: float
"""
return self._price_noreduc
@price_noreduc.setter
def price_noreduc(self, price_noreduc):
"""
Sets the price_noreduc of this ProductPricePrices.
:param price_noreduc: The price_noreduc of this ProductPricePrices.
:type: float
"""
self._price_noreduc = price_noreduc
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.832298 | 78 | 0.536111 |
d016eb25dd2b0d8a744f19effd5c41e76d8e5f60 | 8,360 | py | Python | main.py | jensen8786/snap-it-find-it | 2abbaa3bc63f94699b302a3f06e10d2bc64124d7 | [
"MIT"
] | null | null | null | main.py | jensen8786/snap-it-find-it | 2abbaa3bc63f94699b302a3f06e10d2bc64124d7 | [
"MIT"
] | null | null | null | main.py | jensen8786/snap-it-find-it | 2abbaa3bc63f94699b302a3f06e10d2bc64124d7 | [
"MIT"
] | null | null | null | import cv2
import pandas as pd
from io import BytesIO, StringIO
from PIL import Image
import math
import os.path
import image_search #this is to search similar image from extracted features
import object_detection_background_removal #this is using detectron
import logging
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
CallbackContext,
)
BOT_TOKEN = "replace_this_with_your_token"
# the database of ikea and hipvan products
ikea_data = pd.read_csv('data/ikea_products.csv', index_col=False)
hipvan_data = pd.read_csv('data/hipvan_products.csv', index_col=False)
# ## conversation bot
# https://github.com/python-telegram-bot/python-telegram-bot/blob/master/examples/conversationbot.py
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
START, PHOTO, SELECT_OBJ = range(3)
def start(update: Update, _: CallbackContext) -> int:
update.message.reply_text(
'This bot can fetch you similar products from Ikea! '
'Simply send me a picture to tell me what you are looking for. '
'If the object is not detected in the first try, crop the image and resubmit the photo!\n\n ',
)
return PHOTO
def photo(update: Update, context: CallbackContext) -> int:
photo_file = update.message.photo[-1].get_file()
photo_file.download('user_photo.jpg')
img = cv2.cvtColor(cv2.imread("user_photo.jpg"), cv2.COLOR_BGR2RGB)
update.message.reply_text(
'It may take up to 10 seconds to process your image, please be patient...'
)
context.user_data['ori_img'] = img
img_w_label, classes, output_img = object_detection_background_removal.detect_object(img)
data = dict( zip( classes, output_img))
context.user_data['data'] = data
if len(classes) > 1:
classes.append('Entire photo')
col = 3
row = math.ceil(len(classes) / col)
#rearrange the list to list of list to show up better on screen
#https://stackoverflow.com/questions/10124751/convert-a-flat-list-to-list-of-lists-in-python
classes_rearrange = [classes[col*i : col*(i+1)] for i in range(row)]
reply_keyboard = classes_rearrange
markup = ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True, resize_keyboard=True)
update.message.reply_text(
'👍 {} objects detected. Please select one object to search.'.format(len(classes)-1),
reply_markup=markup,
)
img_crop_pil = Image.fromarray(img_w_label)
byte_io = BytesIO()
img_crop_pil.save(byte_io, format="JPEG")
jpg_buffer = byte_io.getvalue()
byte_io.close()
update.message.reply_photo(jpg_buffer)
return SELECT_OBJ
else:
img_to_search = context.user_data['ori_img']
update.message.reply_text(
'Searching for matching products...'
)
img_path = image_search.search_image(img_to_search, 20)
count = 0
for i, path in enumerate(img_path):
if os.path.isfile(path):
try:
if path.split('/')[0] == 'ikea_image':
caption = ikea_data[ikea_data.id == path.split('/')[-1].strip('.jpg').split('_')[-1]].pipUrl.iloc[0]
elif path.split('/')[0] == 'hipvan_image':
main_URL = 'http://hipvan.com/'
caption = main_URL + hipvan_data[hipvan_data.id == int(path.split('/')[-1].strip('.jpg').split('_')[1])].relative_url.iloc[0]
update.message.reply_text('Top {} match ⤵️'.format(count+1))
update.message.reply_photo(open(path, 'rb'), caption=caption) # replay to bot
count += 1
if count == 5:
break
except:
pass
return ConversationHandler.END
def select_obj(update: Update, context: CallbackContext) -> int:
selected_class = update.message.text
if selected_class in list(context.user_data['data'].keys()):
update.message.reply_text(
'You have selected {}, searching for matching products...'.format(selected_class)
)
data = context.user_data['data']
img_to_search = data[selected_class]
img_path = image_search.search_image(img_to_search, 20)
count = 0
for i, path in enumerate(img_path):
if os.path.isfile(path):
try:
if path.split('/')[0] == 'ikea_image':
caption = ikea_data[ikea_data.id == path.split('/')[-1].strip('.jpg').split('_')[-1]].pipUrl.iloc[0]
elif path.split('/')[0] == 'hipvan_image':
main_URL = 'http://hipvan.com/'
caption = main_URL + hipvan_data[hipvan_data.id == int(path.split('/')[-1].strip('.jpg').split('_')[1])].relative_url.iloc[0]
update.message.reply_text('Top {} match ⤵️'.format(count+1))
update.message.reply_photo(open(path, 'rb'), caption=caption) # replay to bot
count += 1
if count == 5:
break
except:
pass
elif selected_class == 'Entire photo':
update.message.reply_text(
'Searching for matching products using entire photo...'
)
img_to_search = context.user_data['ori_img']
img_path = image_search.search_image(img_to_search, 20)
count = 0
for i, path in enumerate(img_path):
if os.path.isfile(path):
try:
if path.split('/')[0] == 'ikea_image':
caption = ikea_data[ikea_data.id == path.split('/')[-1].strip('.jpg').split('_')[-1]].pipUrl.iloc[0]
elif path.split('/')[0] == 'hipvan_image':
main_URL = 'http://hipvan.com/'
caption = main_URL + hipvan_data[hipvan_data.id == int(path.split('/')[-1].strip('.jpg').split('_')[1])].relative_url.iloc[0]
update.message.reply_text('Top {} match ⤵️'.format(count+1))
update.message.reply_photo(open(path, 'rb'), caption=caption) # replay to bot
count += 1
if count == 5:
break
except:
pass
else:
update.message.reply_text(
'Your input is invalid, please upload a new photo to try again...'
)
return ConversationHandler.END
# for future enhancement
def cancel(update: Update, _: CallbackContext) -> int:
user = update.message.from_user
logger.info("User %s canceled the conversation.", user.first_name)
update.message.reply_text(
'Bye! I hope we can talk again some day.', reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
def main() -> None:
# Create the Updater and pass it your bot's token.
updater = Updater(BOT_TOKEN)
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start),
MessageHandler(Filters.photo, photo),
],
states={
START: [CommandHandler('start', start)],
PHOTO: [MessageHandler(Filters.photo, photo)],#, CommandHandler('skip', skip_photo)],
SELECT_OBJ: [MessageHandler(Filters.text, select_obj), MessageHandler(Filters.photo, photo)],
},
fallbacks=[CommandHandler('cancel', cancel)],
)
dispatcher.add_handler(conv_handler)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
main()
| 37.321429 | 149 | 0.599641 |
4c54b20ea8da4db716770c70d8938a493b6da61e | 1,750 | py | Python | test_db/test_odd.py | robertchase/spindrift | e1ee59fd6822558cc47738d3221815064117457c | [
"MIT"
] | 1 | 2020-09-21T19:51:22.000Z | 2020-09-21T19:51:22.000Z | test_db/test_odd.py | robertchase/spindrift | e1ee59fd6822558cc47738d3221815064117457c | [
"MIT"
] | null | null | null | test_db/test_odd.py | robertchase/spindrift | e1ee59fd6822558cc47738d3221815064117457c | [
"MIT"
] | null | null | null | import pytest
import test_db.models as models
ROOTNAME = 'whatever'
@pytest.fixture
def data(sync):
root = models.OddKeysRoot(name=ROOTNAME).save(sync)
models.OddKeysNode(oddkeysroot=root, name=models.NODE1).save(sync)
models.OddKeysNode(oddkeysroot=root, name=models.NODE2).save(sync)
def test_simple(data, sync):
result = models.OddKeysRoot.list(sync)[0]
assert isinstance(result, models.OddKeysRoot)
assert result.name == ROOTNAME
def test_list_nodes(data, sync):
result = models.OddKeysNode.list(sync)
assert result
names = set([r.name for r in result])
assert set((models.NODE1, models.NODE2)) == names
def test_load(data, sync):
root = models.OddKeysRoot.list(sync)[0]
result = models.OddKeysRoot.load(sync, root.my_key)
assert result
assert result.name == ROOTNAME
def test_insert(data, sync):
key = models.OddKeysRoot.list(sync)[0].my_key * 2
root = models.OddKeysRoot(name=ROOTNAME)
result = root.insert(sync, key)
assert result
assert result.name == ROOTNAME
def test_update(data, sync):
root = models.OddKeysRoot.list(sync)[0]
root.name = ROOTNAME + ROOTNAME
root.save(sync)
result = root.load(sync, root.my_key)
assert result
assert result.name == ROOTNAME + ROOTNAME
def test_children(data, sync):
result = models.OddKeysRoot.query.execute(sync, one=True)
assert result
child = result.odd_nodes(sync)
assert child
assert len(child) == 2
names = set([c.name for c in child])
assert set((models.NODE1, models.NODE2)) == names
def test_foreign(data, sync):
result = models.OddKeysNode.query.execute(sync, one=True)
assert result
parent = result.oddkeysroot(sync)
assert parent
| 25.735294 | 70 | 0.700571 |
07166f7dce2d289b3cc7e12ee9f49aeb8f247780 | 404 | py | Python | MUNDO1/Ex018_Trigon.py | KayDeVC/Python-CeV | ad65b5d35987475479795fa752f6bd1f79c9953a | [
"MIT"
] | null | null | null | MUNDO1/Ex018_Trigon.py | KayDeVC/Python-CeV | ad65b5d35987475479795fa752f6bd1f79c9953a | [
"MIT"
] | null | null | null | MUNDO1/Ex018_Trigon.py | KayDeVC/Python-CeV | ad65b5d35987475479795fa752f6bd1f79c9953a | [
"MIT"
] | null | null | null | import math
print('@.@ T R I G O N O M Ê T R I A @.@')
ang = float(input('Diga o valor do ângulo:'))
sen = math.sin(math.radians(ang))
print('O SENO do ângulo {} é igual á: {:.2f}.'.format(ang, sen))
cos = math.cos(math.radians(ang))
print('O COSENO do ângulo {} é igual á: {:.2f}.'.format(ang, cos))
tan = math.tan(math.radians(ang))
print('A TANGENTE do ângulo {} é igual á: {:.2f}.'.format(ang, tan))
| 40.4 | 68 | 0.628713 |
c2399d7659e1e40900721ea830efa4ba8f79d4d6 | 4,200 | py | Python | scripts/format_pubs.py | James11222/James_CV | 5887c0ab6a9a8981db25bf30fcbed0b91f54ad30 | [
"CC-BY-4.0"
] | null | null | null | scripts/format_pubs.py | James11222/James_CV | 5887c0ab6a9a8981db25bf30fcbed0b91f54ad30 | [
"CC-BY-4.0"
] | null | null | null | scripts/format_pubs.py | James11222/James_CV | 5887c0ab6a9a8981db25bf30fcbed0b91f54ad30 | [
"CC-BY-4.0"
] | null | null | null | """
Heavily inspired by dfm/cv/scripts/render.py
"""
import ads
from datetime import date
from operator import itemgetter
import json
import importlib.util
import os
here = os.path.abspath('')
spec = importlib.util.spec_from_file_location(
"utf8totex", os.path.join(here, "utf8totex.py")
)
utf8totex = importlib.util.module_from_spec(spec)
spec.loader.exec_module(utf8totex)
JOURNAL_MAP = {
"ArXiv e-prints": "ArXiv",
"arXiv e-prints": "ArXiv",
"Monthly Notices of the Royal Astronomical Society": "\\mnras",
"The Astrophysical Journal": "\\apj",
"The Astronomical Journal": "\\aj",
"Publications of the Astronomical Society of the Pacific": "\\pasp",
"IAU General Assembly": "IAU",
"Astronomy and Astrophysics": "\\aanda",
"American Astronomical Society Meeting Abstracts": "AAS",
}
def format_pub(args):
ind, pub = args
fmt = "\\item "
n = [
i
for i in range(len(pub["authors"]))
if "Sunseri" in pub["authors"][i]
][0]
pub["authors"][n] = "\\textbf{Sunseri, James}"
pub_title = pub["title"].replace('{\\&}amp;', '\&') # for latex literal interp.
if len(pub["authors"]) > 5:
fmt += "; ".join(pub["authors"][:4])
fmt += "; \\etal"
if n >= 4:
others = len(pub['authors']) - 4
fmt += "\\ ({{{0}}} other co-authors, ".format(others)
fmt += "incl.\\ \\textbf{Sunseri, James})"
elif len(pub["authors"]) > 1:
fmt += "; ".join(pub["authors"][:-1])
fmt += "; \\& " + pub["authors"][-1]
else:
fmt += pub["authors"][0]
fmt += ", {0}".format(pub["year"])
if pub["doi"] is not None:
fmt += ", \\doi{{{0}}}{{{1}}}".format(pub["doi"], pub_title)
else:
fmt += ", \\emph{{{0}}}".format(pub_title)
if not pub["pub"] in [None, "ArXiv e-prints"]:
fmt += ", " + JOURNAL_MAP.get(
pub["pub"].strip("0123456789# "), pub["pub"]
)
if pub["volume"] is not None:
fmt += ", \\textbf{{{0}}}".format(pub["volume"])
if pub["page"] is not None:
fmt += ", {0}".format(pub["page"])
if pub["arxiv"] is not None:
fmt += " (\\arxiv{{{0}}})".format(pub["arxiv"])
if pub["url"] is not None and pub["citations"] == 1:
fmt += " [\\href{{{0}}}{{{1} citation}}]".format(
pub["url"], pub["citations"]
)
elif pub["url"] is not None and pub["citations"] > 1:
fmt += " [\\href{{{0}}}{{{1} citations}}]".format(
pub["url"], pub["citations"]
)
#elif pub["url"] is not None and pub["citations"] == 0:
# fmt += " [\\href{{{0}}}]".format(
# pub["url"]
# )
return fmt
if __name__ == "__main__":
with open("../data/ads_scrape.json", "r") as f:
pubs = json.load(f)
pubs = sorted(pubs, key=itemgetter("pubdate"), reverse=True)
pubs = [
p
for p in pubs
if (
p["doctype"] in ["article", "eprint"]
and p["pub"] != "Zenodo Software Release"
)
]
ref = [p for p in pubs if p["doctype"] == "article"]
unref = [p for p in pubs if p["doctype"] == "eprint"]
# Compute citation stats
npapers = len(ref)
nfirst = sum(1 for p in pubs if "Sunseri" in p["authors"][0])
cites = sorted((p["citations"] for p in pubs), reverse=True)
ncitations = sum(cites)
hindex = sum(c > i for i, c in enumerate(cites))
# summary = (
# "refereed: {1} / first author: {2} / citations: {3} / "
# "h-index: {4} ({0})"
# ).format(date.today(), npapers, nfirst, ncitations, hindex)
summary = (
"citations: {1} / "
"h-index: {2} ({0})"
).format(date.today(), ncitations, hindex)
with open("../supp_tex/pubs_summary.tex", "w") as f:
f.write(summary)
ref = list(map(format_pub, zip(range(len(ref), 0, -1), ref)))
unref = list(map(format_pub, zip(range(len(unref), 0, -1), unref)))
with open("../supp_tex/pubs_ref.tex", "w") as f:
f.write("\n\n".join(ref))
with open("../supp_tex/pubs_unref.tex", "w") as f:
f.write("\n\n".join(unref))
| 29.577465 | 83 | 0.526429 |
ec7411b5f172a7ac6a100a0190bcbdabbad66c0e | 2,703 | py | Python | lib/surface/functions/remove_invoker_policy_binding.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/functions/remove_invoker_policy_binding.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/functions/remove_invoker_policy_binding.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Removes an invoker binding from the IAM policy of a Google Cloud Function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.functions import flags
from googlecloudsdk.command_lib.functions.v2.remove_invoker_policy_binding import command
from googlecloudsdk.command_lib.iam import iam_util
def _CommonArgs(parser, track):
"""Registers flags for this command."""
del track
flags.AddFunctionResourceArg(parser, 'to remove the invoker binding from')
iam_util.AddMemberFlag(parser, 'to remove from the IAM policy', False)
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class RemoveInvokerPolicyBindingBeta(base.Command):
"""Removes an invoker binding from the IAM policy of a Google Cloud Function.
This command applies to Cloud Functions V2 only.
"""
@staticmethod
def Args(parser):
"""Registers flags for this command."""
_CommonArgs(parser, base.ReleaseTrack.BETA)
def Run(self, args):
"""Runs the command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The updated IAM policy for the service.
"""
return command.Run(args, self.ReleaseTrack())
# Note: does not inherit from RemoveInvokerPolicyBindingBeta to avoid inheriting
# _is_hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class RemoveInvokerPolicyBindingAlpha(base.Command):
"""Removes an invoker binding from the IAM policy of a Google Cloud Function.
This command applies to Cloud Functions V2 only.
"""
@staticmethod
def Args(parser):
"""Registers flags for this command."""
_CommonArgs(parser, base.ReleaseTrack.ALPHA)
def Run(self, args):
"""Runs the command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The updated IAM policy for the service.
"""
return command.Run(args, self.ReleaseTrack())
| 31.8 | 89 | 0.749168 |
c3ff1ba364c0b884e2e0740da54e94878190d931 | 1,332 | py | Python | tests/test_doubles/sandbox_test.py | Avvir/pyne | 864885a8fb632b72c00af164f150b1daa38a346f | [
"MIT"
] | 4 | 2018-08-10T20:05:10.000Z | 2019-07-24T15:29:32.000Z | tests/test_doubles/sandbox_test.py | Avvir/pyne | 864885a8fb632b72c00af164f150b1daa38a346f | [
"MIT"
] | 6 | 2018-09-25T20:15:51.000Z | 2021-12-22T17:09:52.000Z | tests/test_doubles/sandbox_test.py | Avvir/pyne | 864885a8fb632b72c00af164f150b1daa38a346f | [
"MIT"
] | null | null | null | from pynetest.test_doubles.sandbox import Sandbox
from tests.test_helpers.some_class import SomeClass
def test__sandbox_spies():
sandbox = Sandbox()
some_class_instance = SomeClass()
sandbox.spy(some_class_instance.some_method)
assert len(sandbox._spies) == 1
def test__sandbox_restore():
sandbox = Sandbox()
some_class_instance = SomeClass()
some_other_instace = SomeClass()
sandbox.spy(some_class_instance.some_method)
sandbox.spy(some_other_instace.some_positional_args_method)
some_class_instance.some_method()
assert sandbox._spies[0].method_name == 'some_method'
assert sandbox._spies[1].method_name == 'some_positional_args_method'
sandbox.restore()
assert len(sandbox._spies) == 0
def test__sandbox_reset():
sandbox = Sandbox()
some_class_instance = SomeClass()
sandbox.spy(some_class_instance.some_method)
some_class_instance.some_method("some_arg")
assert some_class_instance.some_method.last_call == (("some_arg",), {})
sandbox.reset()
assert some_class_instance.some_method.last_call is None
def test__sandbox_when_calling_then_return():
sandbox = Sandbox()
some_class_instance = SomeClass()
sandbox.when_calling(some_class_instance.some_method).then_return(5)
assert some_class_instance.some_method() == 5
| 29.6 | 75 | 0.759009 |
fd5e18196e6fb1f54cc150d8a89b49a14e9213a1 | 142 | py | Python | src/tess_atlas/cfg.py | dfm/tess-atlas | 0831d15b65ce2ac345dd3f7a4eac8a14883dcccc | [
"MIT"
] | 10 | 2019-03-26T19:47:03.000Z | 2021-12-15T11:44:31.000Z | src/tess_atlas/cfg.py | dfm/tess-atlas | 0831d15b65ce2ac345dd3f7a4eac8a14883dcccc | [
"MIT"
] | 98 | 2019-03-27T16:12:02.000Z | 2022-03-15T21:32:09.000Z | src/tess_atlas/cfg.py | dfm/tess-atlas | 0831d15b65ce2ac345dd3f7a4eac8a14883dcccc | [
"MIT"
] | 7 | 2019-03-26T19:47:11.000Z | 2020-10-07T03:26:21.000Z | c = get_config()
c.NbConvertApp.export_format = "html"
c.Exporter.preprocessors = [
"nbconvert.preprocessors.ExtractOutputPreprocessor"
]
| 23.666667 | 55 | 0.774648 |
e31da3a86918824ec976191bba52d10614a25517 | 261 | py | Python | config/my_config.py | GeorgeManakanatas/PPDM | 9e6af80681db497447197cac14b26b99e588f231 | [
"MIT",
"Unlicense"
] | 3 | 2016-11-18T07:24:39.000Z | 2019-07-06T07:45:15.000Z | config/my_config.py | GeorgeManakanatas/PPDM | 9e6af80681db497447197cac14b26b99e588f231 | [
"MIT",
"Unlicense"
] | 2 | 2017-02-14T15:24:34.000Z | 2019-11-25T19:18:05.000Z | config/my_config.py | GeorgeManakanatas/PPDM | 9e6af80681db497447197cac14b26b99e588f231 | [
"MIT",
"Unlicense"
] | 3 | 2017-12-19T07:04:24.000Z | 2021-08-20T15:42:13.000Z | import json
def config_file():
'''
Reads configuration file and saves in global variable
'''
global config_values
# read configuration file
with open('config/config.json', 'r') as conf:
config_values = json.load(conf)
return | 23.727273 | 57 | 0.659004 |
d379be349165ff30c2713ed809be80027cf9d342 | 2,036 | py | Python | kargo/contrib/aws_inventory/kargo-aws-inventory.py | sameerawickramasekara/k8s-on-openstack | aca59b30220568a9b90f371291587d63a37e8429 | [
"Apache-2.0"
] | null | null | null | kargo/contrib/aws_inventory/kargo-aws-inventory.py | sameerawickramasekara/k8s-on-openstack | aca59b30220568a9b90f371291587d63a37e8429 | [
"Apache-2.0"
] | 2 | 2018-02-19T12:27:11.000Z | 2018-02-21T18:57:14.000Z | kargo/contrib/aws_inventory/kargo-aws-inventory.py | sameerawickramasekara/k8s-on-openstack | aca59b30220568a9b90f371291587d63a37e8429 | [
"Apache-2.0"
] | 4 | 2018-02-09T11:31:50.000Z | 2018-02-21T12:38:51.000Z | #!/usr/bin/env python
import boto3
import os
import argparse
import json
class SearchEC2Tags(object):
def __init__(self):
self.parse_args()
if self.args.list:
self.search_tags()
if self.args.host:
data = {}
print json.dumps(data, indent=2)
def parse_args(self):
##Check if VPC_VISIBILITY is set, if not default to private
if "VPC_VISIBILITY" in os.environ:
self.vpc_visibility = os.environ['VPC_VISIBILITY']
else:
self.vpc_visibility = "private"
##Support --list and --host flags. We largely ignore the host one.
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true', default=False, help='List instances')
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def search_tags(self):
hosts = {}
hosts['_meta'] = { 'hostvars': {} }
##Search ec2 three times to find nodes of each group type. Relies on kargo-role key/value.
for group in ["kube-master", "kube-node", "etcd"]:
hosts[group] = []
tag_key = "kargo-role"
tag_value = ["*"+group+"*"]
region = os.environ['REGION']
ec2 = boto3.resource('ec2', region)
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
if self.vpc_visibility == "public":
hosts[group].append(instance.public_dns_name)
hosts['_meta']['hostvars'][instance.public_dns_name] = {
'ansible_ssh_host': instance.public_ip_address
}
else:
hosts[group].append(instance.private_dns_name)
hosts['_meta']['hostvars'][instance.private_dns_name] = {
'ansible_ssh_host': instance.private_ip_address
}
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
print json.dumps(hosts, sort_keys=True, indent=2)
SearchEC2Tags()
| 32.83871 | 151 | 0.642927 |
83f0694680cbfaec06031dcda77e31d31c75b164 | 2,032 | py | Python | applications/collect_ppmi_volumes_direct_reg_seg_mjff_20210401.py | stnava/superiq | a13befe5f525bbef02cd095031952db62c5d054e | [
"Apache-2.0"
] | null | null | null | applications/collect_ppmi_volumes_direct_reg_seg_mjff_20210401.py | stnava/superiq | a13befe5f525bbef02cd095031952db62c5d054e | [
"Apache-2.0"
] | null | null | null | applications/collect_ppmi_volumes_direct_reg_seg_mjff_20210401.py | stnava/superiq | a13befe5f525bbef02cd095031952db62c5d054e | [
"Apache-2.0"
] | null | null | null | from superiq import VolumeData
from superiq.pipeline_utils import *
import boto3
import pandas as pd
from datetime import datetime
def collect_brain_age(bucket, version):
prefix = f"superres-pipeline-{version}/"
objects = list_images(bucket, prefix)
brain_age = [i for i in objects if i.endswith('brain_age.csv')]
dfs = []
for i in brain_age:
ba = get_s3_object(bucket, i, '/tmp')
filename = ba.split('/')[-1]
splits = filename.split('-')
ba_df = pd.read_csv(ba)
ba_df['Repeat'] = splits[4]
dfs.append(ba_df)
dfs = pd.concat(dfs)
return dfs
if __name__ == "__main__":
bucket = "mjff-ppmi"
version = "mjff-20210401"
prefix = f"superres-pipeline-{version}/"
stack_filename = f'ppmi_stacked_volumes_{version}.csv'
pivoted_filename = f'ppmi_pivoted_volumes_{version}.csv'
upload_prefix = "volume_measures/"
filter_suffixes = ['OR_seg.csv', 'SR_ljflseg.csv', 'SR_seg.csv', 'SR_regseg.csv']
vd = VolumeData(bucket, prefix, filter_suffixes, upload_prefix)
local_stack = vd.stack_volumes(stack_filename)
local_pivot = vd.pivot_data(local_stack, pivoted_filename)
local_pivot_df = pd.read_csv(local_pivot)
local_pivot_df = local_pivot_df
ba = collect_brain_age(bucket, version)
local_pivot_df['join_date'] = [str(i)[:6] for i in local_pivot_df['Date']]
print(local_pivot_df.shape)
local_pivot_df = pd.merge(local_pivot_df, ba, on='Repeat')
print(local_pivot_df.shape)
s3 = boto3.client('s3')
local_pivot_df.to_csv('local_pivot.csv')
s3.upload_file('local_pivot.csv', bucket, "volume_measures/20210401_direct_reg_seg_ppmi_volumes.csv")
metadata = False
if metadata:
metadata_bucket = 'mjff-ppmi'
metadata_key = 's3://ppmi-metadata/PPMIFullMetadata.csv'
metadata_df = pd.read_csv(metadata_key)
merged = pd.merge(
metadata_df,
local_pivot_df,
right_on=['Subject', 'join_date'],
left_on=['PATNO', 'join_date'],
how='outer'
)
merged_path = "full_" + "simple_reg_sr_ppmi_volumes.csv"
merged.to_csv(merged_path, index=False)
s3.upload_file(merged_path, bucket, merged_path)
| 35.034483 | 102 | 0.744587 |
41cb241d377fe959bcf3a0dacb7cc1de9ea1a93b | 2,967 | py | Python | tests/components/sonos/test_switch.py | NikoM87/core | 7403ba1e81579b4ab83da24e570d4afe864e6312 | [
"Apache-2.0"
] | 2 | 2020-07-24T07:40:10.000Z | 2020-09-20T18:02:12.000Z | tests/components/sonos/test_switch.py | NikoM87/core | 7403ba1e81579b4ab83da24e570d4afe864e6312 | [
"Apache-2.0"
] | 79 | 2020-07-23T07:13:37.000Z | 2022-03-22T06:02:37.000Z | tests/components/sonos/test_switch.py | kmdm/home-assistant | 4007430d7262ef035bb80affea13657fdc993b1d | [
"Apache-2.0"
] | null | null | null | """Tests for the Sonos Alarm switch platform."""
from homeassistant.components.sonos import DOMAIN
from homeassistant.components.sonos.switch import (
ATTR_DURATION,
ATTR_ID,
ATTR_INCLUDE_LINKED_ZONES,
ATTR_PLAY_MODE,
ATTR_RECURRENCE,
ATTR_VOLUME,
)
from homeassistant.const import ATTR_TIME, STATE_ON
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from homeassistant.setup import async_setup_component
async def setup_platform(hass, config_entry, config):
"""Set up the switch platform for testing."""
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
async def test_entity_registry(hass, config_entry, config):
"""Test sonos device with alarm registered in the device registry."""
await setup_platform(hass, config_entry, config)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
assert "media_player.zone_a" in entity_registry.entities
assert "switch.sonos_alarm_14" in entity_registry.entities
async def test_alarm_attributes(hass, config_entry, config):
"""Test for correct sonos alarm state."""
await setup_platform(hass, config_entry, config)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
alarm = entity_registry.entities["switch.sonos_alarm_14"]
alarm_state = hass.states.get(alarm.entity_id)
assert alarm_state.state == STATE_ON
assert alarm_state.attributes.get(ATTR_TIME) == "07:00:00"
assert alarm_state.attributes.get(ATTR_ID) == "14"
assert alarm_state.attributes.get(ATTR_DURATION) == "02:00:00"
assert alarm_state.attributes.get(ATTR_RECURRENCE) == "DAILY"
assert alarm_state.attributes.get(ATTR_VOLUME) == 0.25
assert alarm_state.attributes.get(ATTR_PLAY_MODE) == "SHUFFLE_NOREPEAT"
assert not alarm_state.attributes.get(ATTR_INCLUDE_LINKED_ZONES)
async def test_alarm_create_delete(
hass, config_entry, config, soco, alarm_clock, alarm_clock_extended, alarm_event
):
"""Test for correct creation and deletion of alarms during runtime."""
soco.alarmClock = alarm_clock_extended
await setup_platform(hass, config_entry, config)
subscription = alarm_clock_extended.subscribe.return_value
sub_callback = subscription.callback
sub_callback(event=alarm_event)
await hass.async_block_till_done()
entity_registry = async_get_entity_registry(hass)
assert "switch.sonos_alarm_14" in entity_registry.entities
assert "switch.sonos_alarm_15" in entity_registry.entities
alarm_clock_extended.ListAlarms.return_value = alarm_clock.ListAlarms.return_value
alarm_event.increment_variable("alarm_list_version")
sub_callback(event=alarm_event)
await hass.async_block_till_done()
assert "switch.sonos_alarm_14" in entity_registry.entities
assert "switch.sonos_alarm_15" not in entity_registry.entities
| 38.038462 | 88 | 0.782272 |
555bfaa44be639e849d7d2478c32b54e5b4426f6 | 383 | py | Python | src/UnderGUI/__init__.py | underwatergrasshopper/PyUnderGUI | 9a3107bbcf04168eb131a6dae5d50ff35b00ea7f | [
"MIT"
] | null | null | null | src/UnderGUI/__init__.py | underwatergrasshopper/PyUnderGUI | 9a3107bbcf04168eb131a6dae5d50ff35b00ea7f | [
"MIT"
] | null | null | null | src/UnderGUI/__init__.py | underwatergrasshopper/PyUnderGUI | 9a3107bbcf04168eb131a6dae5d50ff35b00ea7f | [
"MIT"
] | null | null | null | from .Exceptions import *
from .Commons import *
from .Utility import *
from .Color import *
from .GlyphCodeBlocks import *
from .FontFetcher import *
from .Font import *
from .TextDrawer import *
from .FramedText import *
def print_heyo():
"""
Function which prints heyo.
"""
print("heyo")
| 22.529412 | 32 | 0.558747 |
ae6865ad79384db5403610dedfb774413c0be80e | 27,925 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_network_security_groups_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_network_security_groups_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_network_security_groups_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkSecurityGroupsOperations(object):
"""NetworkSecurityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkSecurityGroup"
"""Gets the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.NetworkSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.NetworkSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkSecurityGroup"]
"""Creates or updates a network security group in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to the create or update network security group
operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.NetworkSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkSecurityGroup"
"""Updates a network security group tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to update network security group tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkSecurityGroupListResult"]
"""Gets all network security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkSecurityGroupListResult"]
"""Gets all network security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
| 49.955277 | 211 | 0.669329 |
30c97f3462168acdec7bbb1f631aa2a91cc7163a | 16,024 | py | Python | tests/shakelib/sites_test.py | ynthdhj/shakemap | 2771b8aee6b22f065cc80632c894a0ba77829619 | [
"CC0-1.0"
] | null | null | null | tests/shakelib/sites_test.py | ynthdhj/shakemap | 2771b8aee6b22f065cc80632c894a0ba77829619 | [
"CC0-1.0"
] | null | null | null | tests/shakelib/sites_test.py | ynthdhj/shakemap | 2771b8aee6b22f065cc80632c894a0ba77829619 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# stdlib imports
import sys
import os.path
import numpy as np
import pytest
# local imports
from shakelib.sites import Sites
import shakelib.sites as sites
homedir = os.path.dirname(os.path.abspath(__file__)) # where is this script?
shakedir = os.path.abspath(os.path.join(homedir, '..', '..'))
sys.path.insert(0, shakedir)
def test_depthpars():
vs30 = np.linspace(200, 700, 6)
cy14 = sites.Sites._z1pt0_from_vs30_cy14_cal(vs30)
cb14 = sites.Sites._z2pt5_from_vs30_cb14_cal(vs30)
ask14 = sites.Sites._z1pt0_from_vs30_ask14_cal(vs30)
cy08 = sites.Sites._z1pt0_from_vs30_cy08(vs30)
cb07 = sites.Sites._z2pt5_from_z1pt0_cb07(cy08)
cy14t = np.array(
[509.34591289, 458.77871089, 355.71703571, 228.88509539,
125.83403858, 63.32230756])
cb14t = np.array(
[2794.70046489, 1757.46573662, 1264.61099128, 979.69726864,
795.25892497, 666.68613683])
ask14t = np.array(
[494.57881929, 453.37522656, 365.19411327, 247.50118189,
142.44704445, 73.48751385])
cy08t = np.array(
[336.55300716, 315.06784809, 215.89565373, 111.17436474,
57.50382896, 32.18118661])
cb07t = np.array(
[1728.90806073, 1651.6689139, 1295.14487514, 918.67184124,
725.72626513, 634.69136587])
np.testing.assert_allclose(cy14, cy14t)
np.testing.assert_allclose(cb14, cb14t)
np.testing.assert_allclose(ask14, ask14t)
np.testing.assert_allclose(cy08, cy08t)
np.testing.assert_allclose(cb07, cb07t)
def test_sites(vs30file=None):
vs30file = os.path.join(homedir, 'sites_data/Vs30_test.grd')
cx = -118.2
cy = 34.1
dx = 0.0083
dy = 0.0083
xspan = 0.0083 * 5
yspan = 0.0083 * 5
mysite = Sites.fromCenter(cx, cy, xspan, yspan, dx, dy,
vs30File=vs30file, padding=True,
resample=False)
grd = mysite.getVs30Grid().getData()
grd_target = np.array([[426.64892578, 398.89712524, 428.88549805,
428.78335571, 428.58578491, 430.54354858,
433.59750366],
[426.20635986, 425.57946777, 428.21954346,
426.06726074, 421.86233521, 423.53192139,
426.25296021],
[428.14602661, 430.05944824, 429.3427124,
426.13626099, 409.76391602, 383.07299805,
372.39117432],
[432.64077759, 434.55209351, 432.21600342,
395.53771973, 419.31866455, 421.67749023,
426.23449707],
[345.14605713, 403.78097534, 385.49118042,
413.04779053, 428.22869873, 427.00268555,
426.8951416],
[336.48217773, 347.82220459, 425.96798706,
432.0640564, 429.40097046, 427.74179077,
427.00006104],
[330.57504272, 392.33255005, 430.33862305,
432.01391602, 429.43969727, 427.30435181,
425.96151733]])
np.testing.assert_allclose(grd, grd_target)
sc = mysite.getSitesContext()
scr = mysite.getSitesContext(rock_vs30=760.0)
grd = sc.backarc
grdt = np.array([[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False]],
dtype=bool)
np.testing.assert_allclose(grd, grdt)
grd = sc.lats
grdt = np.array([34.075, 34.08333333, 34.09166667, 34.1,
34.10833333, 34.11666667, 34.125])
np.testing.assert_allclose(grd, grdt)
grd = sc.lons
grdt = np.array([-118.225, -118.21666667, -118.20833333, -118.2,
-118.19166667, -118.18333333, -118.175])
np.testing.assert_allclose(grd, grdt)
grd = sc.vs30
grdt = np.array(
[[426.64892578, 398.89712524, 428.88549805, 428.78335571,
428.58578491, 430.54354858, 433.59750366],
[426.20635986, 425.57946777, 428.21954346, 426.06726074,
421.86233521, 423.53192139, 426.25296021],
[428.14602661, 430.05944824, 429.3427124, 426.13626099,
409.76391602, 383.07299805, 372.39117432],
[432.64077759, 434.55209351, 432.21600342, 395.53771973,
419.31866455, 421.67749023, 426.23449707],
[345.14605713, 403.78097534, 385.49118042, 413.04779053,
428.22869873, 427.00268555, 426.8951416],
[336.48217773, 347.82220459, 425.96798706, 432.0640564,
429.40097046, 427.74179077, 427.00006104],
[330.57504272, 392.33255005, 430.33862305, 432.01391602,
429.43969727, 427.30435181, 425.96151733]])
np.testing.assert_allclose(grd, grdt)
grd = scr.vs30
grdt = np.array([[760., 760., 760., 760., 760., 760., 760.],
[760., 760., 760., 760., 760., 760., 760.],
[760., 760., 760., 760., 760., 760., 760.],
[760., 760., 760., 760., 760., 760., 760.],
[760., 760., 760., 760., 760., 760., 760.],
[760., 760., 760., 760., 760., 760., 760.],
[760., 760., 760., 760., 760., 760., 760.]])
np.testing.assert_allclose(grd, grdt)
grd = sc.vs30measured
grdt = np.array([[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False]],
dtype=bool)
np.testing.assert_allclose(grd, grdt)
grd = sc.z1pt0_ask14_cal
grdt = np.array(
[[335.06579012, 366.39725582, 332.4593083, 332.57855835,
332.80916279, 330.52077553, 326.93706615],
[335.58036296, 336.30856215, 333.23643861, 335.74201097,
340.60915547, 338.68122322, 335.52619949],
[333.32217548, 331.08730312, 331.92526979, 335.66183031,
354.37798647, 383.19730994, 394.00166561],
[328.06152136, 325.81357093, 328.56025522, 370.03738338,
343.53423548, 340.82221914, 335.54765968],
[419.3225465, 361.03999163, 380.68893324, 350.67806177,
333.2257608, 334.65418562, 334.77934134],
[426.64586941, 416.9870982, 335.85735317, 328.73858131,
331.85719411, 333.79341356, 334.65724021],
[431.42758271, 373.4746626, 330.76064671, 328.79741743,
331.81193751, 334.30299304, 335.86486938]])
np.testing.assert_allclose(grd, grdt)
grd = sc.z1pt0_cy08
grdt = np.array(
[[183.2043947, 217.27787758, 180.56690603, 180.68687904,
180.91907101, 178.625999, 175.08452094],
[183.72884833, 184.47314285, 181.34994816, 183.89385557,
188.91901585, 186.91536614, 183.67358657],
[181.43651087, 179.19139187, 180.03044953, 183.81199342,
203.71493023, 237.03939223, 250.05192732],
[176.18920323, 173.98674225, 176.68107716, 221.49002942,
191.99154431, 189.14149784, 183.69548028],
[280.25774719, 211.16371072, 234.04298229, 199.65723106,
181.33916991, 182.78577761, 182.91298179],
[288.5669674, 277.54780981, 184.01166928, 176.85723522,
179.96216197, 181.91290358, 182.78888133],
[293.80330679, 225.506479, 178.86520526, 176.91538893,
179.91677656, 182.42922842, 184.01934871]])
np.testing.assert_allclose(grd, grdt)
grd = sc.z1pt0_cy14_cal
grdt = np.array(
[[322.09231215, 357.07647045, 319.22097485, 319.3522119,
319.60603217, 317.08933532, 313.15733537],
[322.65987959, 323.46347258, 320.07644714, 322.83822344,
328.21884905, 326.08502532, 322.60012698],
[320.17085962, 317.71195569, 318.63340828, 322.74975843,
343.55342441, 376.19315199, 388.62006436],
[314.38985785, 311.92697475, 314.93687894, 361.19727923,
331.46256533, 328.4548676, 322.62380134],
[418.15243574, 351.03313981, 373.32295288, 339.41631214,
320.0646893, 321.63848529, 321.7764637],
[426.80110483, 415.40447342, 322.96549285, 315.13252356,
318.55852723, 320.68989724, 321.64185267],
[432.47426567, 365.09924657, 317.35292206, 315.19707981,
318.50874868, 321.25138528, 322.9737867]])
np.testing.assert_allclose(grd, grdt)
grd = sc.z2pt5_cb07
grdt = np.array(
[[1177.61979893, 1300.11396989, 1168.13802718, 1168.56933015,
1169.4040603, 1161.16046639, 1148.4288528],
[1179.50520975, 1182.18094855, 1170.95306363, 1180.09841078,
1198.16386197, 1190.96074128, 1179.30654372],
[1171.26425658, 1163.19305376, 1166.20946607, 1179.80411634,
1251.35517419, 1371.15661506, 1417.9366787],
[1152.40018562, 1144.48233838, 1154.16847239, 1315.25665576,
1209.20960179, 1198.96368474, 1179.3852516],
[1526.52660116, 1278.13354004, 1360.38452132, 1236.76774567,
1170.91431583, 1176.11487052, 1176.57216955],
[1556.3982478, 1516.78437627, 1180.52195107, 1154.8017606,
1165.96397227, 1172.97688837, 1176.12602836],
[1575.22288791, 1329.69579201, 1162.02041292, 1155.01082322,
1165.80081172, 1174.83307617, 1180.5495586]])
np.testing.assert_allclose(grd, grdt)
grd = sc.z2pt5_cb14_cal
grdt = np.array(
[[1.17466154, 1.26861168, 1.1676564, 1.16797461, 1.16859058,
1.16251358, 1.15315136],
[1.17605704, 1.17803908, 1.16973403, 1.17649629, 1.18992131,
1.18455663, 1.17590996],
[1.16996381, 1.16401074, 1.166234, 1.17627836, 1.230198,
1.32873844, 1.37243028],
[1.15606906, 1.15025389, 1.15736893, 1.2809454, 1.19818264,
1.19051805, 1.17596823],
[1.49705669, 1.25107322, 1.31920733, 1.21901552, 1.16970542,
1.1735483, 1.17388652],
[1.54123541, 1.48388699, 1.17680997, 1.15783457, 1.16605299,
1.17122878, 1.17355655],
[1.57278236, 1.29292406, 1.1631469, 1.1579883, 1.16593269,
1.17260055, 1.17683042]])
np.testing.assert_allclose(grd, grdt)
lldict = {'lats': np.array([34.1, 34.111]),
'lons': np.array([-118.2, -118.222])}
scsamp = mysite.getSitesContext(lldict)
vs30 = scsamp.vs30
vs30t = np.array([395.53771973, 428.14602661])
np.testing.assert_allclose(vs30, vs30t)
lldict = {'lats': np.array([34.1, 34.111]),
'lons': np.array([-118.2, -118.222])}
scrsamp = mysite.getSitesContext(lldict, rock_vs30=760)
vs30 = scrsamp.vs30
vs30t = np.array([760., 760.])
np.testing.assert_allclose(vs30, vs30t)
lats = np.array([34.1, 34.111])
lons = np.array([-118.2, -118.222])
lldict = {'lats': lats, 'lons': lons}
scsamp = mysite.getSitesContext(lldict)
grd = scsamp.vs30measured
grdt = np.array([False, False], dtype=bool)
np.testing.assert_allclose(grd, grdt)
with pytest.raises(Exception) as e: # noqa
scsamp = mysite.getSitesContextFromLatLon(
np.array([34.1, 34.111, 34.5]),
np.array([-118.2, -118.222]))
mysite = Sites.fromCenter(cx, cy, xspan, yspan, dx, dy,
vs30File=None, padding=True,
resample=False)
grd = mysite.getVs30Grid().getData()
grd_target = np.array(
[[686., 686., 686., 686., 686., 686.],
[686., 686., 686., 686., 686., 686.],
[686., 686., 686., 686., 686., 686.],
[686., 686., 686., 686., 686., 686.],
[686., 686., 686., 686., 686., 686.],
[686., 686., 686., 686., 686., 686.]])
np.testing.assert_allclose(grd, grd_target)
xmin = -118.2
xmax = -118.12
ymin = 34.05
ymax = 34.1
dx = 0.0083
dy = 0.0083
mysite = Sites.fromBounds(xmin, xmax, ymin, ymax, dx, dy,
vs30File=vs30file, padding=False,
resample=False)
grd = mysite.getVs30Grid().getData()
grd_target = np.array([[428.22869873, 427.00268555, 426.8951416 , 425.62023926,
419.60952759, 411.7961731 , 407.53509521, 406.22122192,
405.31622314],
[429.40097046, 427.74179077, 427.00006104, 419.5062561 ,
411.10830688, 407.48901367, 406.53305054, 406.59658813,
406.24887085],
[429.43969727, 427.30435181, 425.96151733, 426.15856934,
427.56121826, 397.67102051, 399.21054077, 404.54968262,
407.18515015],
[428.86270142, 425.99606323, 423.5692749 , 423.59835815,
425.92758179, 408.44885254, 406.55810547, 409.06945801,
413.75210571],
[427.91104126, 424.53796387, 419.47485352, 418.177948 ,
424.14065552, 428.57913208, 432.95300293, 427.77731323,
431.46524048],
[423.15557861, 424.48355103, 419.27658081, 418.60211182,
423.86721802, 428.06176758, 432.42089844, 438.54446411,
448.37237549]])
np.testing.assert_allclose(grd, grd_target)
mysite = Sites.fromBounds(xmin, xmax, ymin, ymax, dx, dy,
vs30File=None, padding=False,
resample=False)
grd = mysite.getVs30Grid().getData()
grd_target = np.array(
[[686., 686., 686., 686., 686., 686., 686., 686., 686.,
686., 686.],
[686., 686., 686., 686., 686., 686., 686., 686., 686.,
686., 686.],
[686., 686., 686., 686., 686., 686., 686., 686., 686.,
686., 686.],
[686., 686., 686., 686., 686., 686., 686., 686., 686.,
686., 686.],
[686., 686., 686., 686., 686., 686., 686., 686., 686.,
686., 686.],
[686., 686., 686., 686., 686., 686., 686., 686., 686.,
686., 686.],
[686., 686., 686., 686., 686., 686., 686., 686., 686.,
686., 686.],
[686., 686., 686., 686., 686., 686., 686., 686., 686.,
686., 686.]])
np.testing.assert_allclose(grd, grd_target)
if __name__ == '__main__':
test_depthpars()
test_sites()
| 47.690476 | 88 | 0.536757 |
3c6ec2b2820b866ccc303071c6960fd3f84b28ca | 12,613 | py | Python | tests/test_phoenix_channel_subscription.py | chadfurman/gql | ca4021df2cd7ee9b5057ebddb7bf0408e45a1dba | [
"MIT"
] | 2 | 2021-08-22T16:12:15.000Z | 2021-11-09T11:58:42.000Z | tests/test_phoenix_channel_subscription.py | chadfurman/gql | ca4021df2cd7ee9b5057ebddb7bf0408e45a1dba | [
"MIT"
] | null | null | null | tests/test_phoenix_channel_subscription.py | chadfurman/gql | ca4021df2cd7ee9b5057ebddb7bf0408e45a1dba | [
"MIT"
] | null | null | null | import asyncio
import json
import sys
import pytest
from parse import search
from gql import Client, gql
# Marking all tests in this file with the websockets marker
pytestmark = pytest.mark.websockets
test_channel = "test_channel"
test_subscription_id = "test_subscription"
# A server should send this after receiving a 'phx_leave' request message.
# 'query_id' should be the value of the 'ref' in the 'phx_leave' request.
# With only one listener, the transport is closed automatically when
# it exits a subscription, so this is not used in current tests.
channel_leave_reply_template = (
"{{"
'"topic":"{channel_name}",'
'"event":"phx_reply",'
'"payload":{{'
'"response":{{}},'
'"status":"ok"'
"}},"
'"ref":{query_id}'
"}}"
)
# A server should send this after sending the 'channel_leave_reply'
# above, to confirm to the client that the channel was actually closed.
# With only one listener, the transport is closed automatically when
# it exits a subscription, so this is not used in current tests.
channel_close_reply_template = (
"{{"
'"topic":"{channel_name}",'
'"event":"phx_close",'
'"payload":{{}},'
'"ref":null'
"}}"
)
# A server sends this when it receives a 'subscribe' request,
# after creating a unique subscription id. 'query_id' should be the
# value of the 'ref' in the 'subscribe' request.
subscription_reply_template = (
"{{"
'"topic":"{channel_name}",'
'"event":"phx_reply",'
'"payload":{{'
'"response":{{'
'"subscriptionId":"{subscription_id}"'
"}},"
'"status":"ok"'
"}},"
'"ref":{query_id}'
"}}"
)
countdown_data_template = (
"{{"
'"topic":"{subscription_id}",'
'"event":"subscription:data",'
'"payload":{{'
'"subscriptionId":"{subscription_id}",'
'"result":{{'
'"data":{{'
'"countdown":{{'
'"number":{number}'
"}}"
"}}"
"}}"
"}},"
'"ref":null'
"}}"
)
async def server_countdown(ws, path):
import websockets
from .conftest import MS, PhoenixChannelServerHelper
try:
await PhoenixChannelServerHelper.send_connection_ack(ws)
result = await ws.recv()
json_result = json.loads(result)
assert json_result["event"] == "doc"
channel_name = json_result["topic"]
query_id = json_result["ref"]
payload = json_result["payload"]
query = payload["query"]
count_found = search("count: {:d}", query)
count = count_found[0]
print(f"Countdown started from: {count}")
await ws.send(
subscription_reply_template.format(
subscription_id=test_subscription_id,
channel_name=channel_name,
query_id=query_id,
)
)
async def counting_coro():
for number in range(count, -1, -1):
await ws.send(
countdown_data_template.format(
subscription_id=test_subscription_id, number=number
)
)
await asyncio.sleep(2 * MS)
counting_task = asyncio.ensure_future(counting_coro())
async def stopping_coro():
nonlocal counting_task
while True:
result = await ws.recv()
json_result = json.loads(result)
if json_result["event"] == "unsubscribe":
query_id = json_result["ref"]
payload = json_result["payload"]
subscription_id = payload["subscriptionId"]
assert subscription_id == test_subscription_id
print("Sending unsubscribe reply")
await ws.send(
subscription_reply_template.format(
subscription_id=subscription_id,
channel_name=channel_name,
query_id=query_id,
)
)
counting_task.cancel()
stopping_task = asyncio.ensure_future(stopping_coro())
try:
await counting_task
except asyncio.CancelledError:
print("Now counting task is cancelled")
# Waiting for a clean stop
try:
await asyncio.wait_for(stopping_task, 3)
except asyncio.CancelledError:
print("Now stopping task is cancelled")
except asyncio.TimeoutError:
print("Now stopping task is in timeout")
# await PhoenixChannelServerHelper.send_close(ws)
except websockets.exceptions.ConnectionClosedOK:
print("Connection closed")
finally:
await ws.wait_closed()
countdown_subscription_str = """
subscription {{
countdown (count: {count}) {{
number
}}
}}
"""
@pytest.mark.asyncio
@pytest.mark.parametrize("server", [server_countdown], indirect=True)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
@pytest.mark.parametrize("end_count", [0, 5])
async def test_phoenix_channel_subscription(
event_loop, server, subscription_str, end_count
):
"""Parameterized test.
:param end_count: Target count at which the test will 'break' to unsubscribe.
"""
import logging
from gql.transport.phoenix_channel_websockets import (
PhoenixChannelWebsocketsTransport,
)
from gql.transport.phoenix_channel_websockets import log as phoenix_logger
from gql.transport.websockets import log as websockets_logger
websockets_logger.setLevel(logging.DEBUG)
phoenix_logger.setLevel(logging.DEBUG)
path = "/graphql"
url = f"ws://{server.hostname}:{server.port}{path}"
sample_transport = PhoenixChannelWebsocketsTransport(
channel_name=test_channel, url=url, close_timeout=5
)
count = 10
subscription = gql(subscription_str.format(count=count))
async with Client(transport=sample_transport) as session:
async for result in session.subscribe(subscription):
number = result["countdown"]["number"]
print(f"Number received: {number}")
assert number == count
if number == end_count:
# Note: we need to run generator.aclose() here or the finally block in
# the subscribe will not be reached in pypy3 (python version 3.6.1)
# In more recent versions, 'break' will trigger __aexit__.
if sys.version_info < (3, 7):
await session._generator.aclose()
print("break")
break
count -= 1
assert count == end_count
@pytest.mark.asyncio
@pytest.mark.parametrize("server", [server_countdown], indirect=True)
@pytest.mark.parametrize("subscription_str", [countdown_subscription_str])
async def test_phoenix_channel_subscription_no_break(
event_loop, server, subscription_str
):
import logging
from gql.transport.phoenix_channel_websockets import (
PhoenixChannelWebsocketsTransport,
)
from gql.transport.phoenix_channel_websockets import log as phoenix_logger
from gql.transport.websockets import log as websockets_logger
websockets_logger.setLevel(logging.DEBUG)
phoenix_logger.setLevel(logging.DEBUG)
path = "/graphql"
url = f"ws://{server.hostname}:{server.port}{path}"
async def testing_stopping_without_break():
sample_transport = PhoenixChannelWebsocketsTransport(
channel_name=test_channel, url=url, close_timeout=5
)
count = 10
subscription = gql(subscription_str.format(count=count))
async with Client(transport=sample_transport) as session:
async for result in session.subscribe(subscription):
number = result["countdown"]["number"]
print(f"Number received: {number}")
# Simulate a slow consumer
await asyncio.sleep(0.1)
if number == 9:
# When we consume the number 9 here in the async generator,
# all the 10 numbers have already been sent by the backend and
# are present in the listener queue
# we simulate here an unsubscribe message
# In that case, all the 10 numbers should be consumed in the
# generator and then the generator should be closed properly
await session.transport._send_stop_message(2)
assert number == count
count -= 1
assert count == -1
try:
await asyncio.wait_for(testing_stopping_without_break(), timeout=5)
except asyncio.TimeoutError:
assert False, "The async generator did not stop"
heartbeat_data_template = (
"{{"
'"topic":"{subscription_id}",'
'"event":"subscription:data",'
'"payload":{{'
'"subscriptionId":"{subscription_id}",'
'"result":{{'
'"data":{{'
'"heartbeat":{{'
'"heartbeat_count":{count}'
"}}"
"}}"
"}}"
"}},"
'"ref":null'
"}}"
)
async def phoenix_heartbeat_server(ws, path):
import websockets
from .conftest import PhoenixChannelServerHelper
try:
await PhoenixChannelServerHelper.send_connection_ack(ws)
result = await ws.recv()
json_result = json.loads(result)
assert json_result["event"] == "doc"
channel_name = json_result["topic"]
query_id = json_result["ref"]
await ws.send(
subscription_reply_template.format(
subscription_id=test_subscription_id,
channel_name=channel_name,
query_id=query_id,
)
)
async def heartbeat_coro():
i = 0
while True:
heartbeat_result = await ws.recv()
json_result = json.loads(heartbeat_result)
if json_result["event"] == "heartbeat":
await ws.send(
heartbeat_data_template.format(
subscription_id=test_subscription_id, count=i
)
)
i = i + 1
elif json_result["event"] == "unsubscribe":
query_id = json_result["ref"]
payload = json_result["payload"]
subscription_id = payload["subscriptionId"]
assert subscription_id == test_subscription_id
print("Sending unsubscribe reply")
await ws.send(
subscription_reply_template.format(
subscription_id=subscription_id,
channel_name=channel_name,
query_id=query_id,
)
)
await asyncio.wait_for(heartbeat_coro(), 60)
# await PhoenixChannelServerHelper.send_close(ws)
except websockets.exceptions.ConnectionClosedOK:
print("Connection closed")
finally:
await ws.wait_closed()
heartbeat_subscription_str = """
subscription {
heartbeat {
heartbeat_count
}
}
"""
@pytest.mark.asyncio
@pytest.mark.parametrize("server", [phoenix_heartbeat_server], indirect=True)
@pytest.mark.parametrize("subscription_str", [heartbeat_subscription_str])
async def test_phoenix_channel_heartbeat(event_loop, server, subscription_str):
from gql.transport.phoenix_channel_websockets import (
PhoenixChannelWebsocketsTransport,
)
path = "/graphql"
url = f"ws://{server.hostname}:{server.port}{path}"
sample_transport = PhoenixChannelWebsocketsTransport(
channel_name=test_channel, url=url, heartbeat_interval=0.1
)
subscription = gql(heartbeat_subscription_str)
async with Client(transport=sample_transport) as session:
i = 0
async for result in session.subscribe(subscription):
heartbeat_count = result["heartbeat"]["heartbeat_count"]
print(f"Heartbeat count received: {heartbeat_count}")
assert heartbeat_count == i
if heartbeat_count == 5:
# Note: we need to run generator.aclose() here or the finally block in
# the subscribe will not be reached in pypy3 (python version 3.6.1)
# In more recent versions, 'break' will trigger __aexit__.
if sys.version_info < (3, 7):
await session._generator.aclose()
break
i += 1
| 31.690955 | 86 | 0.602157 |
cff6141467332bb1acfe1d3cc7b7f01c5560a439 | 496 | py | Python | variation/tokenizers/coding_dna_deletion.py | cancervariants/variant-normalization | e89a9f8366a659c82b2042aeb7effe339851bfb4 | [
"MIT"
] | 1 | 2022-01-19T18:17:49.000Z | 2022-01-19T18:17:49.000Z | variation/tokenizers/coding_dna_deletion.py | cancervariants/variation-normalization | 9c8fbab1562591ae9445d82ddd15df29f1ea1f5a | [
"MIT"
] | 99 | 2021-06-07T12:50:34.000Z | 2022-03-23T13:38:29.000Z | variation/tokenizers/coding_dna_deletion.py | cancervariants/variant-normalization | e89a9f8366a659c82b2042aeb7effe339851bfb4 | [
"MIT"
] | null | null | null | """A module for Coding DNA Deletion Tokenization."""
from variation.schemas.token_response_schema import CodingDNADeletionToken
from variation.tokenizers.deletion_base import DeletionBase
class CodingDNADeletion(DeletionBase):
"""Class for tokenizing Deletion at the coding dna reference sequence."""
def return_token(self, params):
"""Return coding DNA Deletion token."""
if self.parts['reference_sequence'] == 'c':
return CodingDNADeletionToken(**params)
| 38.153846 | 77 | 0.745968 |
507728e576fac03d9b561932cbf42a352c94f536 | 7,815 | py | Python | training/ModelsGaussianProcessRegression.py | grimmlab/evars-gpr | 8587f39edaba3b753c62fbd09ec6a78476a3612e | [
"MIT"
] | 3 | 2021-07-13T21:45:54.000Z | 2022-01-27T07:54:58.000Z | training/ModelsGaussianProcessRegression.py | grimmlab/evars-gpr | 8587f39edaba3b753c62fbd09ec6a78476a3612e | [
"MIT"
] | null | null | null | training/ModelsGaussianProcessRegression.py | grimmlab/evars-gpr | 8587f39edaba3b753c62fbd09ec6a78476a3612e | [
"MIT"
] | 1 | 2021-09-22T18:25:03.000Z | 2021-09-22T18:25:03.000Z | import sklearn.gaussian_process
import pandas as pd
import numpy as np
import copy
from training import ModelsBaseClass
class GaussianProcessRegression(ModelsBaseClass.BaseModel):
"""Class containing Gaussian Process Regression Model"""
def __init__(self, target_column: str, seasonal_periods: int, kernel=None, alpha: float = 1e-10,
n_restarts_optimizer: int = 10, one_step_ahead: bool = False, standardize: bool = False,
normalize_y: bool = False):
"""
:param target_column: target_column for prediction
:param seasonal_periods: period of seasonality
:param kernel: kernel to use for GPR
:param alpha: value added to diagonal of kernel matrix
:param n_restarts_optimizer: number of restarts of optimizer
:param one_step_ahead: perform one step ahead prediction
:param standardize: standardize all features according to train mean and std
:param normalize_y: normalize only target variable
"""
super().__init__(target_column=target_column, seasonal_periods=seasonal_periods,
name='GaussianProcessRegression_sklearn', one_step_ahead=one_step_ahead)
self.model = sklearn.gaussian_process.GaussianProcessRegressor(kernel=kernel, alpha=alpha, copy_X_train=True,
optimizer='fmin_l_bfgs_b', random_state=42,
n_restarts_optimizer=n_restarts_optimizer,
normalize_y=normalize_y)
self.standardize = standardize
self.train_mean = None
self.train_std = None
def train(self, train: pd.DataFrame, cross_val_call: bool = False) -> dict:
"""
Train GPR model
:param train: train set
:param cross_val_call: called to perform cross validation
:return dictionary with cross validated scores (if specified)
"""
cross_val_score_dict = {}
if cross_val_call:
cross_val_score_dict, self.model = self.get_cross_val_score(train=train)
if self.standardize:
self.train_mean = train.mean()
self.train_std = train.std().replace(to_replace=0, value=1)
train = train.copy().apply(
lambda x: ((x - self.train_mean[x.name]) / self.train_std[x.name])
if (not(x.name.startswith('public') or x.name.startswith('school') or x.name == self.target_column))
else x, axis=0)
self.model.fit(X=train.drop([self.target_column], axis=1), y=train[self.target_column])
return cross_val_score_dict
def update(self, train: pd.DataFrame, model: sklearn.gaussian_process.GaussianProcessRegressor) \
-> sklearn.gaussian_process.GaussianProcessRegressor:
"""
Update existing GPR model due to new samples
:param train: train set with new samples
:param model: model to update
:return: updated model
"""
return model.fit(X=train.drop([self.target_column], axis=1), y=train[self.target_column])
def insample(self, train: pd.DataFrame) -> pd.DataFrame:
"""
Deliver (back-transformed) insample predictions
:param train: train set
:return: DataFrame with insample predictions
"""
insample = pd.DataFrame(data=self.model.predict(X=self.model.X_train_), index=train.index, columns=['Insample'])
return insample
def predict(self, test: pd.DataFrame, train: pd.DataFrame, cv_call: bool = False) -> pd.DataFrame:
"""
Deliver (back-transformed), if specified one step ahead, out-of-sample predictions
:param test: test set
:param train: train set
:param cv_call: do not perform one_step_ahead for cv calls
:return: DataFrame with predictions, upper and lower confidence level
"""
if self.one_step_ahead is not False and cv_call is False:
predict_lst = []
sig_lst = []
bias = 0
train_mean = self.train_mean
train_std = self.train_std
train_manip = train.copy()
# deep copy model as predict function should not change class model
model = copy.deepcopy(self.model)
for i in range(0, test.shape[0]):
test_sample = test.iloc[[i]]
train_manip = train_manip.append(test_sample)
if self.one_step_ahead == 'mw':
train_manip = train_manip.iloc[1:]
if self.standardize:
# Normalize test sample
test_sample = test_sample.copy().apply(
lambda x: ((x - train_mean[x.name]) / train_std[x.name])
if (not (x.name.startswith('public') or
x.name.startswith('school') or
x.name == self.target_column))
else x, axis=0)
fc, sigma = model.predict(X=test_sample.drop([self.target_column], axis=1),
return_std=True)
if self.standardize:
# Update train mean and std
train_mean = train_manip.mean()
train_std = train_manip.std().replace(to_replace=0, value=1)
# Refit model if refitting cycle is reached
if (self.one_step_ahead == 'mw') or \
((self.one_step_ahead != 0) and ((i + 1) % self.one_step_ahead == 0)):
if self.standardize:
# Normalize train_manip_refit with updated mean and std
train_manip_refit = train_manip.copy().apply(
lambda x: ((x - train_mean[x.name]) / train_std[x.name])
if (not (x.name.startswith('public') or
x.name.startswith('school') or
x.name == self.target_column))
else x, axis=0)
else:
train_manip_refit = train_manip
model = self.update(train=train_manip_refit, model=model)
if self.one_step_ahead == 'mw':
# Add Bias
fc += bias
sigma += bias
# Bias Update: Initial bias = 0
# bias_0(k) = y_obs(k-1) - y_mod(k-1)
bias_0 = (test_sample[self.target_column].values - fc)[0]
# bias(k) = w*bias_0(k) + (1-w)*bias(k-1), w = 0.1...0.9, here: w = 0.8 (see (Ni, 2012))
w = 0.8
bias = w * bias_0 + (1-w) * bias
predict_lst.append(fc)
sig_lst.append(sigma)
predict = np.array(predict_lst).flatten()
sig = np.array(sig_lst).flatten()
else:
if self.standardize:
test = test.copy().apply(
lambda x: ((x - self.train_mean[x.name]) / self.train_std[x.name])
if (not (x.name.startswith('public') or
x.name.startswith('school') or
x.name == self.target_column))
else x, axis=0)
predict, sig = self.model.predict(X=test.drop([self.target_column], axis=1), return_std=True)
predictions = pd.DataFrame({'Prediction': predict,
'LowerConf': predict-1.96*sig, 'UpperConf': predict+1.96*sig},
index=test.index)
return predictions
| 51.754967 | 120 | 0.550736 |
b289002544a36080e565b1302d97d35e4f067ff4 | 2,470 | py | Python | pythran/tests/rosetta/permutation_rank.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,647 | 2015-01-13T01:45:38.000Z | 2022-03-28T01:23:41.000Z | pythran/tests/rosetta/permutation_rank.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,116 | 2015-01-01T09:52:05.000Z | 2022-03-18T21:06:40.000Z | pythran/tests/rosetta/permutation_rank.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 180 | 2015-02-12T02:47:28.000Z | 2022-03-14T10:28:18.000Z | #from http://rosettacode.org/wiki/Permutations/Rank_of_a_permutation#Python
#pythran export test()
#runas test()
from math import factorial as fact
from random import randrange
def identity_perm(n):
return list(range(n))
def unranker1(n, r, pi):
while n > 0:
n1, (rdivn, rmodn) = n-1, divmod(r, n)
pi[n1], pi[rmodn] = pi[rmodn], pi[n1]
n = n1
r = rdivn
return pi
def init_pi1(n, pi):
pi1 = [-1] * n
for i in range(n):
pi1[pi[i]] = i
return pi1
def ranker1(n, pi, pi1):
if n == 1:
return 0
n1 = n-1
s = pi[n1]
pi[n1], pi[pi1[n1]] = pi[pi1[n1]], pi[n1]
pi1[s], pi1[n1] = pi1[n1], pi1[s]
return s + n * ranker1(n1, pi, pi1)
def unranker2(n, r, pi):
while n > 0:
n1 = n-1
s, rmodf = divmod(r, fact(n1))
pi[n1], pi[s] = pi[s], pi[n1]
n = n1
r = rmodf
return pi
def ranker2(n, pi, pi1):
if n == 1:
return 0
n1 = n-1
s = pi[n1]
pi[n1], pi[pi1[n1]] = pi[pi1[n1]], pi[n1]
pi1[s], pi1[n1] = pi1[n1], pi1[s]
return s * fact(n1) + ranker2(n1, pi, pi1)
def get_random_ranks(permsize, samplesize):
perms = fact(permsize)
ranks = set()
while len(ranks) < samplesize:
ranks |= set( randrange(perms)
for r in range(samplesize - len(ranks)) )
return ranks
def test1(comment, unranker, ranker):
n, samplesize, n2 = 3, 4, 12
print(comment)
perms = []
for r in range(fact(n)):
pi = identity_perm(n)
perm = unranker(n, r, pi)
perms.append((r, perm))
for r, pi in perms:
pi1 = init_pi1(n, pi)
print(' From rank %s to %s back to %s' % (r, pi, ranker(n, pi[:], pi1)))
print('\n %s random individual samples of %s items:' % (samplesize, n2))
for r in get_random_ranks(n2, samplesize):
pi = identity_perm(n2)
print(' ' + ' '.join('%s' % i for i in unranker(n2, r, pi)))
print('')
def test2(comment, unranker):
samplesize, n2 = 4, 10
print(comment)
print(' %s random individual samples of %s items:' % (samplesize, n2))
txt = ''
for r in get_random_ranks(n2, samplesize):
pi = identity_perm(n2)
txt += '\n' + ''.join(str(unranker(n2, r, pi)))
print(txt, '')
def test():
test1('First ordering:', unranker1, ranker1)
test1('Second ordering:', unranker2, ranker2)
test2('First ordering, large number of perms:', unranker1)
| 27.444444 | 81 | 0.554251 |
0b3c9d5655359ad0e151206ba5e056932d290a9d | 209,347 | py | Python | test_gm.py | wadeguthrie/combat-accountant | e6a2140baafe71191cf68e01390b1f91446e17aa | [
"Apache-2.0"
] | null | null | null | test_gm.py | wadeguthrie/combat-accountant | e6a2140baafe71191cf68e01390b1f91446e17aa | [
"Apache-2.0"
] | 58 | 2019-10-16T21:48:37.000Z | 2021-09-08T22:33:22.000Z | test_gm.py | wadeguthrie/combat-accountant | e6a2140baafe71191cf68e01390b1f91446e17aa | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
import argparse
import copy
import curses
import pprint
import random
import unittest
import ca # combat accountant
import ca_fighter
import ca_gurps_ruleset
import ca_timers
'''
FWIW, I realize that many of the Mocks in here are actually Fakes.
'''
# TODO: test options
# TODO: there should be a test.reset() that would clear out the
# set_menu_response and the set_input_box_response values (and
# maybe some other stuff I'm not thinking about). It should be called prior
# to each test's init.
# Save a fight
# TODO: test that saving a fight and starting up again doesn't change the
# fight (pending actions, injuries, fight order) -- check out test_save
# Looting bodies
# TODO: test that looting bodies works:
# * moving something from one body to another works properly
# * only loot unconscious and dead monsters
# TODO: test that quitting a fight offers to loot and save when appropriate
# and not when not:
# (4 tests: loot, save; loot, no save; no loot, save; no loot, no save)
# Notes
# TODO: test that notes are saved properly
# TODO: test 'search'
# TODO: test 'resurrect fight'
# TODO: test equipping characters
class TestRuleset(ca_gurps_ruleset.GurpsRuleset):
def __init__(self, window_manager):
super(TestRuleset, self).__init__(window_manager)
# Our test creatures aren't totally consistent so we don't want to mess
# with the test.
def is_creature_consistent(self,
name, # string: creature's name
creature, # dict from Game File
fight_handler=None
):
return True
class TestPersonnelHandler(ca.PersonnelHandler):
def __init__(self,
window_manager,
world,
creature_type # one of: NPCs, PCs, or MONSTERs
):
super(TestPersonnelHandler, self).__init__(
window_manager,
world,
creature_type, # one of: NPCs, PCs, or MONSTERs
)
self.__command_ribbon_input = []
self.__saved_thing = None
def set_command_ribbon_input(self,
character # command ribbon input
):
if ARGS.verbose:
if character < 256:
print '\n set_command_ribbon_input: add: %c' % character
else:
print '\n set_command_ribbon_input: add: %r' % character
if character in [curses.KEY_HOME, curses.KEY_UP, curses.KEY_DOWN,
curses.KEY_PPAGE, curses.KEY_NPAGE, curses.KEY_LEFT,
curses.KEY_RIGHT]:
self.__command_ribbon_input.append(character)
else:
self.__command_ribbon_input.append(ord(character))
if ARGS.verbose:
print ' gives us a response queue of:'
print ' ',
queue = []
for c in self.__command_ribbon_input:
queue.append(chr(c) if c < 256 else c)
PP.pprint(queue)
def handle_user_input_until_done(self):
if len(self.__command_ribbon_input) == 0:
print ('** command ribbon input is empty, can\'t respond')
assert False
keep_going = True
while keep_going:
if len(self.__command_ribbon_input) <= 0:
self._window_manager.error(
['Empty handle_user_input_until_done queue'])
return
# FIFO queue
string = self.__command_ribbon_input.pop(0)
if ARGS.verbose:
if string < 256:
print '\n handle_user_input_until_done: got %c' % string
else:
print '\n handle_user_input_until_done: got %r' % string
print ' gives us a response queue of:',
queue = []
for c in self.__command_ribbon_input:
queue.append(chr(c) if c < 256 else c)
PP.pprint(queue)
if string in self._choices:
keep_going = self._choices[string]['func']()
elif string < 256:
self._window_manager.error(
['Invalid command: "%c" ' % chr(string)])
else:
self._window_manager.error(
['Invalid command: "<%d>" ' % string])
def set_obj_from_index(self,
thing, # ThingsInFight (fighter or venue)
):
self.__saved_thing = thing
def get_obj_from_index(self):
saved_thing = self.__saved_thing
self.__saved_thing = None
return saved_thing
class WorldData(object):
def __init__(self, world_dict):
self.read_data = copy.deepcopy(world_dict)
class MockProgram(object):
def __init__(self):
pass
def add_snapshot(self, tag, filename):
pass
def make_bug_report(self, history, user_description, snapshot, file_tag=None):
return 'NO FILE'
class MockWorld(object):
def __init__(self):
self.playing_back = False
class MockFightHandler(object):
def __init__(self):
self.world = MockWorld()
self.clear_opponents()
self.__fighter_objects = {}
def add_to_history(self, action):
pass
def clear_opponents(self):
self.__opponents = {} # group: {name: object, name: object}
def get_fighter_object(self,
name,
group):
index = 2 # arbitrary
return index, self.__fighter_objects[group][name]
def get_opponent_for(self,
fighter # Fighter object
):
if fighter.group not in self.__opponents:
return None
if fighter.name not in self.__opponents[fighter.group]:
return None
return self.__opponents[fighter.group][fighter.name]
def get_round(self):
return 1 # Don't really need this for anything but timing
def modify_index(self, adjustment):
pass
def pick_opponent(self):
pass
def set_fighter_object(self,
name,
group,
fighter_object):
if group not in self.__fighter_objects:
self.__fighter_objects[group] = {}
self.__fighter_objects[group][name] = fighter_object
def set_opponent_for(self,
fighter, # Fighter object
opponent # Fighter object
):
if fighter.group not in self.__opponents:
self.__opponents[fighter.group] = {}
self.__opponents[fighter.group][fighter.name] = opponent
def wait_end_action(self, # Public so it can be called by the ruleset.
name, # String: name of fighter
group, # String: group of fighter
in_place=False # bool: move fighter to new init?
):
pass # Since we're not, yet, testing initiative holding
class MockMainGmWindow(object):
def __init__(self, window_manager=None):
pass
def char_detail_home(self):
pass
def char_list_home(self):
pass
def clear(self):
pass
def command_ribbon(self):
pass
def status_ribbon(self, input_filename, maintain_json):
pass
def show_description(self,
character # Fighter or Fight object
):
pass
def show_creatures(self,
char_list, # [ {'name': xxx,
# 'group': xxx,
# 'details':xxx}, ...
current_index,
standout=False
):
pass
class MockGmWindow(object):
def clear(self):
pass
def close(self):
pass
def command_ribbon(self):
pass
def getmaxyx(self):
return 10, 10
class MockPersonnelGmWindow(MockGmWindow):
def __init__(self):
pass
def status_ribbon(self,
group, # name of group being modified,
template, # name of template
input_filename, # passthru to base class
maintain_json # passthru to base class
):
pass
def show_creatures(self,
new_creatures, # {name: {details}, ...} like in JSON
new_char_name, # name of character to highlight
viewing_index # index into creature list:
# dict: {'new'=True, index=0}
):
pass
def char_detail_home(self):
pass
class MockFightGmWindow(MockGmWindow):
def __init__(self,
ruleset # throw away
):
self.fighter_win_width = 10
self.len_timer_leader = 1
pass
def start_fight(self):
pass
def show_fighters(self,
current_fighter,
opponent,
fighters,
index,
new_round):
pass
def round_ribbon(self,
fight_round,
next_PC_name,
input_filename,
maintain_json):
pass
def status_ribbon(self, input_filename, maintain_json):
pass
class MockWindowManager(object):
(FOUND_NO_ERROR,
FOUND_EXPECTED_ERROR,
FOUND_WRONG_ERROR, # Error state won't advance from here
FOUND_EXTRA_ERROR # Error state won't advance from here
) = range(4)
def __init__(self):
self.__menu_responses = {} # {menu_title: [selection, selection...]
# {input_box_title: [selection, selection...]
self.__input_box_responses = {}
self.__char_responses = [] # array of characters
self.__expected_error = [] # array of single-line strings
self.error_state = MockWindowManager.FOUND_NO_ERROR
def reset_error_state(self):
self.error_state = MockWindowManager.FOUND_NO_ERROR
def expect_error(self, string_array):
'''
Use this like so:
mock_window_manager.expect_error(xxx)
<do your test>
assert(mock_window_manager.error_state ==
MockWindowManager.FOUND_EXPECTED_ERROR)
'''
self.__expected_error = string_array
def error(self, string_array):
if len(self.__expected_error) > 0:
if string_array == self.__expected_error:
self.error_state = MockWindowManager.FOUND_EXPECTED_ERROR
else:
self.error_state == MockWindowManager.FOUND_WRONG_ERROR
print '\n** Found wrong error:'
PP.pprint(string_array)
elif self.error_state == MockWindowManager.FOUND_NO_ERROR:
self.error_state == MockWindowManager.FOUND_EXTRA_ERROR
elif self.error_state == MockWindowManager.FOUND_EXPECTED_ERROR:
self.error_state == MockWindowManager.FOUND_EXTRA_ERROR
print '\n** Found extra error:'
PP.pprint(string_array)
else:
print '\n** Found another error:'
PP.pprint(string_array)
def get_build_fight_gm_window(self, command_ribbon_choices):
return MockPersonnelGmWindow()
def display_window(self,
title,
lines # [{'text', 'mode'}, ...]
):
pass
def clear_menu_responses(self):
self.__menu_responses = {}
def set_menu_response(self,
title,
selection # SECOND part of string_results tuple
):
# print 'set_menu_response: title: %s, add selection:' % title
# print ' ',
# PP.pprint(selection)
if title not in self.__menu_responses:
self.__menu_responses[title] = []
self.__menu_responses[title].append(selection)
# print ' gives us a response queue of:'
# print ' ',
# PP.pprint(self.__menu_responses)
def menu(self,
title,
strings_results, # array of tuples (string, return value)
starting_index=0 # Who is selected when the menu starts
):
if ARGS.verbose:
print '\n menu title: "%s"' % title
# If the menu has only one entry, just return that -- no need to check
# responses.
# Now, go check responses for longer menus
if title not in self.__menu_responses:
print ('\n** menu: title "%s" not found in stored responses' %
title)
PP.pprint(self.__menu_responses)
assert False
if len(self.__menu_responses[title]) == 0:
print ('\n** menu: responses["%s"] is empty, can\'t respond' %
title)
assert False
# FIFO queue
menu_result = self.__menu_responses[title].pop(0)
if isinstance(menu_result, dict):
while 'menu' in menu_result:
menu_result = self.menu('Which', menu_result['menu'])
if menu_result is None: # Bail out regardless of nesting level
return None, None # Keep going
if 'doit' in menu_result and menu_result['doit'] is not None:
param = (None if 'param' not in menu_result
else menu_result['param'])
menu_result = (menu_result['doit'])(param)
if ARGS.verbose:
print ' menu: title: "%s", returning:' % title,
PP.pprint(menu_result)
print ' gives us a response queue of:'
print ' ',
PP.pprint(self.__menu_responses)
return menu_result, 0 # supply a dummy index to the menu
def set_input_box_response(self,
title,
selection # first part of string_results tuple
):
'''
NOTE: |input_box| and |input_box_number| share the same response queue
'''
# print 'set_input_box_response: title: %s, add selection:' % title,
# PP.pprint(selection)
if title not in self.__input_box_responses:
self.__input_box_responses[title] = []
self.__input_box_responses[title].append(selection)
# print ' gives us a response queue of:'
# PP.pprint(self.__input_box_responses)
def input_box(self,
height, # ignore
width, # ignore
title):
if title not in self.__input_box_responses:
print ('** input_box: title "%s" not found in stored responses' %
title)
assert False
if len(self.__input_box_responses[title]) == 0:
print ('** input_boxes: responses["%s"] is empty, can\'t respond' %
title)
assert False
# FIFO queue
result = self.__input_box_responses[title].pop(0)
if ARGS.verbose:
print '\n input_box title: "%s", returning:' % title,
PP.pprint(result)
print ' gives us a response queue of:'
print ' ',
PP.pprint(self.__input_box_responses)
return result
def input_box_number(self,
height, # ignore
width, # ignore
title):
if title not in self.__input_box_responses:
print ('** input_box_number: title "%s" not found in stored responses' %
title)
PP.pprint(self.__input_box_responses)
assert False
if len(self.__input_box_responses[title]) == 0:
print ('** input_box_number: responses["%s"] is empty, can\'t respond' %
title)
assert False
# FIFO queue
result = self.__input_box_responses[title].pop(0)
if ARGS.verbose:
print '\n input_box_number title: "%s", returning:' % title,
PP.pprint(result)
print ' gives us a response queue of:'
print ' ',
PP.pprint(self.__input_box_responses)
return result
def get_fight_gm_window(self,
ruleset,
command_ribbon_choices,
fight_handler):
return MockFightGmWindow(ruleset)
def get_main_gm_window(self, command_ribbon_choices):
return MockMainGmWindow() # it takes a 'window manager' param
def set_char_response(self,
selection # character
):
# print 'set_char_response: add selection:'
# print ' ',
# PP.pprint(chr(selection))
self.__char_responses.append(selection)
# print ' gives us a response queue of:'
# print ' ',
# PP.pprint(self.__char_responses)
def get_one_character(self):
if len(self.__char_responses) == 0:
print '** character responses is empty, can\'t respond'
assert False
result = self.__char_responses.pop()
# print 'get_one_character: returning:'
# print ' ',
# PP.pprint(chr(result))
# print ' gives us a response queue of:'
# print ' ',
# PP.pprint(self.__char_responses)
return result
class GmTestCase(unittest.TestCase): # Derive from unittest.TestCase
def setUp(self):
# 'crawling': {'attack': -4, 'defense': -3, 'target': -2},
self.__crawling_attack_mod = -4
self.__crawling_defense_mod = -3
self.__colt_pistol_acc = 3
self.__vodou_priest_fighter_pistol_skill = 15
self.__vodou_priest_armor_dr = 3
self.__vodou_priest_ht = 11
self.__vodou_pistol_index = 0
self.__vodou_priest_ammo_index = 1
self.__vodou_armor_index = 2
self.__vodou_priest_ammo_count = 5
self.__vodou_priest_initial_shots = 9
self.__vodou_priest_spell_index = {
"Awaken": 0,
"Animate Shadow": 1,
"Explosive Lightning": 2,
"Itch": 3,
"Death Vision": 4,
}
self.__vodou_priest_fighter = {
"shock": 0,
"stunned": False,
"actions_this_turn": [],
"open-container": [],
"aim": {"rounds": 0, "braced": False},
"weapon-index": [],
"current-weapon": 0,
"armor-index": [],
"preferred-weapon-index": [],
"preferred-armor-index": [],
"stuff": [
{"name": "pistol, Colt 170D",
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": self.__colt_pistol_acc,
"ammo": {"name": "C Cell",
"shots_left": self.__vodou_priest_initial_shots,
"shots": self.__vodou_priest_initial_shots},
"clip": {"name": "C Cell",
"type": ["misc"],
"count": 1,
"notes": "",
"owners": None},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": 1,
"notes": None}, # index 0
{"name": "C Cell",
"type": ["misc"],
"count": self.__vodou_priest_ammo_count,
"notes": "",
"owners": None}, # index 1
{"count": 1,
"type": ["armor"],
"notes": "Enchanted w/fortify spell [M66]",
"dr": self.__vodou_priest_armor_dr,
"name": "Sport coat/Jeans"} # index 2
],
"spells": [
{
"skill": 18,
"name": "Awaken"
},
{
"skill": 16,
"name": "Animate Shadow"
},
{
"skill": 16,
"name": "Explosive Lightning"
},
{
"skill": 12,
"name": "Itch"
},
{
"skill": 16,
"name": "Death Vision"
},
],
"skills": {"Guns (Pistol)":
self.__vodou_priest_fighter_pistol_skill,
"Brawling": 12},
"advantages": {"Combat Reflexes": 15},
"state": "alive",
"posture": "standing",
"current": {
"fp": 12, "iq": 13, "wi": 13, "hp": 10,
"ht": self.__vodou_priest_ht, "st": 10,
"dx": 11, "basic-speed": 5.5, "basic-move": 5
},
"permanent": {
"fp": 12, "iq": 13, "wi": 13, "hp": 10,
"ht": self.__vodou_priest_ht, "st": 10,
"dx": 11, "basic-speed": 5.5, "basic-move": 5
},
"timers": [],
"check_for_death": False,
"opponent": None
}
# self.__one_more_guy is identical to the Vodou Priest Fighter except
# that his dex is different. I know that makes the calculation for
# basic speed wrong but that's not really the point of this exercise
self.__one_more_guy = {
"shock": 0,
"stunned": False,
"actions_this_turn": [],
"open-container": [],
"aim": {"rounds": 0, "braced": False},
"weapon-index": [],
"current-weapon": 0,
"armor-index": None,
"preferred-weapon-index": [],
"preferred-armor-index": [],
"stuff": [
{"name": "pistol, Colt 170D",
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": 3,
"ammo": {"name": "C Cell", "shots_left": 9, "shots": 9},
"clip": {"name": "C Cell",
"type": ["misc"],
"count": 1,
"notes": "",
"owners": None},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": None,
"notes": ""},
{"name": "C Cell", "type": ["misc"], "count": 5, "notes": "",
"owners": None}
],
"skills": {"Guns (Pistol)": 15, "Brawling": 12},
"advantages": {"Combat Reflexes": 15},
"state": "alive",
"posture": "standing",
"current": {
"fp": 12, "iq": 13, "wi": 13, "hp": 10, "ht": 11, "st": 10,
"dx": 12, "basic-speed": 5.5
},
"permanent": {
"fp": 12, "iq": 13, "wi": 13, "hp": 10, "ht": 11, "st": 10,
"dx": 12, "basic-speed": 5.5
},
"timers": [],
"check_for_death": False,
"opponent": None
}
self.__bokor_fighter = {
"shock": 0,
"stunned": False,
"actions_this_turn": [],
"open-container": [],
"aim": {"rounds": 0, "braced": False},
"weapon-index": [],
"current-weapon": 0,
"armor-index": [],
"preferred-weapon-index": [],
"preferred-armor-index": [],
"stuff": [
{"name": "pistol, Kalashnikov Makarov",
"type": ["ranged weapon"],
"damage": {"dice": "1d+3"},
"acc": 2,
"ammo": {"name": "C Cell", "shots_left": 8, "shots": 8},
"clip": {"name": "C Cell",
"type": ["misc"],
"count": 1,
"notes": "",
"owners": None},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": None,
"notes": ""},
{"name": "C Cell", "type": ["misc"], "count": 5, "notes": "",
"owners": None}
],
"skills": {"Guns (Pistol)": 13, "Brawling": 12},
"advantages": {"Combat Reflexes": 15},
"state": "alive",
"posture": "standing",
"current": {
"fp": 11, "iq": 12, "wi": 12, "hp": 10, "ht": 11, "st": 10,
"dx": 10, "basic-speed": 5.25
},
"permanent": {
"fp": 11, "iq": 12, "wi": 12, "hp": 10, "ht": 11, "st": 10,
"dx": 10, "basic-speed": 5.25
},
"timers": [],
"check_for_death": False,
"opponent": None
}
self.__tank_fighter_pistol_index = 0
self.__tank_fighter_sickstick_index = 1
self.__tank_fighter_stuff_count = 3
self.__tank_fighter = {
"shock": 0,
"stunned": False,
"actions_this_turn": [],
"open-container": [],
"aim": {"rounds": 0, "braced": False},
"weapon-index": [],
"current-weapon": 0,
"armor-index": [],
"preferred-weapon-index": [],
"preferred-armor-index": [],
"stuff": [
{"name": "pistol, Sig D65", # the index of this is stored
# in __tank_fighter_pistol_index
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": 4,
"ammo": {"name": "C Cell", "shots_left": 9, "shots": 9},
"clip": {"name": "C Cell",
"type": ["misc"],
"count": 1,
"notes": "",
"owners": None},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": None,
"notes": ""},
{"name": "sick stick", # the index of this is stored in
# __tank_fighter_sickstick_index
"type": ["melee weapon"],
"damage": {"dice": "1d+1 fat"},
"skill": {"Axe/Mace": 0},
"count": 1,
"owners": None,
"notes": ""},
{"name": "C Cell", "type": ["misc"], "count": 5, "notes": "",
"owners": None}
],
"skills": {"Guns (Pistol)": 16, "Brawling": 16, "Axe/Mace": 14},
"advantages": {"Combat Reflexes": 15},
"state": "alive",
"posture": "standing",
"current": {
"st": 10, "dx": 12, "iq": 12, "wi": 12, "ht": 11, "fp": 11,
"hp": 11, "basic-speed": 5.75
},
"permanent": {
"fp": 11, "iq": 12, "wi": 12, "hp": 11, "ht": 11, "st": 10,
"dx": 12, "basic-speed": 5.75
},
"timers": [],
"check_for_death": False,
"opponent": None
}
self.__thief_knife_skill = 14
self.__thief_fighter = {
"shock": 0,
"stunned": False,
"actions_this_turn": [],
"open-container": [],
"aim": {"rounds": 0, "braced": False},
"weapon-index": [],
"current-weapon": 0,
"armor-index": [],
"preferred-weapon-index": [],
"preferred-armor-index": [],
"stuff": [
{"name": "pistol, Baretta DX 192",
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": 2,
"ammo": {"name": "C Cell", "shots_left": 8, "shots": 8},
"clip": {"name": "C Cell",
"type": ["misc"],
"count": 1,
"notes": "",
"owners": None},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": None,
"notes": ""},
{"name": "Large Knife",
"type": ["melee weapon"],
"damage": {"dice": "1d-2", "type": "imp"},
"skill": {"Knife": 0},
"parry": -1,
"count": 1,
"owners": None,
"notes": ""},
{"count": 1,
"name": "brass knuckles",
"notes": "B271",
"damage": {"thr": {"plus": 0, "type": "cr"}},
"parry": 0,
"skill": {"Brawling": 0, "Boxing": 0, "Karate": 0},
"owners": None,
"type": ["melee weapon"]},
{"name": "C Cell", "type": ["misc"], "count": 5, "notes": "",
"owners": None}
],
"skills": {"Guns (Pistol)": 12,
"Brawling": 14,
"Knife": self.__thief_knife_skill},
"advantages": {},
"state": "alive",
"posture": "standing",
"current": {
"fp": 11, "iq": 12, "wi": 12, "hp": 12, "ht": 11, "st": 10,
"dx": 12, "basic-speed": 5.75
},
"permanent": {
"fp": 11, "iq": 12, "wi": 12, "hp": 12, "ht": 11, "st": 10,
"dx": 12, "basic-speed": 5.75
},
"timers": [],
"check_for_death": False,
"opponent": None
}
# WORLD: 1
self.base_world_dict = {
"options": {},
"templates": {
"Arena Combat": {
"VodouCleric": {
"permanent": {
"fp": {"type": "value", "value": 12},
"iq": {"type": "value", "value": 13},
"wi": {"type": "value", "value": 13},
"hp": {"type": "value", "value": 10},
"ht": {"type": "value", "value": 11},
"st": {"type": "value", "value": 10},
"dx": {"type": "value", "value": 11},
"basic-speed": {"type": "value", "value": 5.5}
},
"timers": {"type": "value", "value": []},
},
}
}, # Templates
"PCs": {
"Vodou Priest": self.__vodou_priest_fighter,
"One More Guy": self.__one_more_guy,
}, # PCs
"dead-monsters": [
{"name": "Arena Attack Monsters",
"fight": {
"5-Tank-B": {
"state": "alive",
"current": {"fp": 11, "iq": 12, "wi": 12, "hp": 11,
"ht": 11, "st": 10, "dx": 12},
"permanent": {"fp": 11, "iq": 12, "wi": 12, "hp": 11,
"ht": 11, "st": 10, "dx": 12},
}, # 5-Tank-B
"date": None
}
} # Arena Attack Monsters
], # dead-monsters
"current-fight": {
"index": 0,
"monsters": "Anybody",
"fighters": [
{"group": "Anybody", "name": "Bokor Fighter"},
{"group": "PCs", "name": "Vodou Priest"},
{"group": "PCs", "name": "One More Guy"},
{"group": "Anybody", "name": "Tank Fighter"},
],
"saved": False,
"round": 0,
"history": [
"--- Round 1 ---"
]
}, # current-fight
"NPCs": {
"Bokor Requiem": {
"state": "alive",
"current":
{"fp": 11, "iq": 12, "wi": 12, "hp": 11,
"ht": 11, "st": 10, "dx": 12},
"permanent":
{"fp": 11, "iq": 12, "wi": 12, "hp": 11,
"ht": 11, "st": 10, "dx": 12},
"timers": []
},
"One More Guy": self.__one_more_guy
}, # NPCs
"fights": {
"Dima's Crew": {
"monsters": {
"Bokor Fighter": self.__bokor_fighter,
"Tank Fighter": self.__tank_fighter,
"One More Guy": {"redirect": "NPCs"}
}
},
"1st Hunting Party": {
"monsters": {
"5: Amelia": self.__thief_fighter,
}
}
} # fights
} # End of the world
# WORLD: 2
self.init_world_dict = {
# Don't need dead-monsters, equipment, names
'templates': {
'dudes': {
'a dude': copy.deepcopy(self.__bokor_fighter)
},
},
'PCs': {
# 5.25, 10, rand=1
'Manny': copy.deepcopy(self.__bokor_fighter),
# 5.75, 12, rand=2
'Jack': copy.deepcopy(self.__tank_fighter),
# 5.5, 12, rand=4
'Moe': copy.deepcopy(self.__one_more_guy),
},
'NPCs': {
# Same body for these as the PCs and horseman fights
'Groucho': copy.deepcopy(self.__tank_fighter),
'Zeppo': copy.deepcopy(self.__thief_fighter),
'Chico': copy.deepcopy(self.__bokor_fighter),
},
'fights': {
'horsemen': {
'monsters': {
# 5.75, 12, rand=4
'Famine': copy.deepcopy(self.__thief_fighter),
# 5.5, 11, rand=4
'Pestilence': copy.deepcopy(self.__vodou_priest_fighter),
}
}
},
'current-fight': {
# Needed
'saved': False,
'history': [], # Needed (maybe)
'index': 1,
'fighters': [],
'round': 2,
'monsters': 'horsemen',
},
}
# WORLD: 3
self.init_world_dict_2 = {
# Don't need templates, dead-monsters, equipment, names
'PCs': {
# 5.5, 11, rand=2
'Bob': copy.deepcopy(self.__vodou_priest_fighter),
# 5.75, 12, rand=3
'Ted': copy.deepcopy(self.__tank_fighter),
},
'fights': {
'marx': {
'monsters': {
# 5.5, 12, rand=4
'Groucho': copy.deepcopy(self.__one_more_guy),
# 5.75, 12, rand=5
'Harpo': copy.deepcopy(self.__thief_fighter),
# 5.25, 10, rand=3
'Chico': copy.deepcopy(self.__bokor_fighter),
}
}
},
'current-fight': {
# Needed
'saved': False,
'history': [], # Needed (maybe)
# Not needed if not saved
'index': 1,
'fighters': [],
'round': 2,
'monsters': 'marx',
},
} # End of world
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
def tearDown(self):
pass
def __are_equal(self, lhs, rhs):
if isinstance(lhs, dict):
if not isinstance(rhs, dict):
print '** lhs is a dict but rhs is not'
print '\nlhs'
PP.pprint(lhs)
print '\nrhs'
PP.pprint(rhs)
return False
for key in rhs.iterkeys():
if key not in lhs:
print '** KEY "%s" not in lhs' % key
print '\nlhs'
PP.pprint(lhs)
print '\nrhs'
PP.pprint(rhs)
return False
are_equal = True
for key in lhs.iterkeys():
if key not in rhs:
print '** KEY "%s" not in rhs' % key
print '\nlhs'
PP.pprint(lhs)
print '\nrhs'
PP.pprint(rhs)
are_equal = False
elif not self.__are_equal(lhs[key], rhs[key]):
print 'lhs[%r] != rhs[%r]' % (key, key)
print '\nlhs'
PP.pprint(lhs)
print '\nrhs'
PP.pprint(rhs)
are_equal = False
return are_equal
elif isinstance(lhs, list):
if not isinstance(rhs, list):
print '** lhs is a list but rhs is not'
print '\nlhs'
PP.pprint(lhs)
print '\nrhs'
PP.pprint(rhs)
return False
if len(lhs) != len(rhs):
print '** length lhs=%d != len rhs=%d' % (len(lhs), len(rhs))
print '\nlhs'
PP.pprint(lhs)
print '\nrhs'
PP.pprint(rhs)
return False
are_equal = True
for i in range(len(lhs)):
if not self.__are_equal(lhs[i], rhs[i]):
print '** lhs[%d] != rhs[%d]' % (i, i)
print '\nlhs'
PP.pprint(lhs)
print '\nrhs'
PP.pprint(rhs)
are_equal = False
return are_equal
else:
if lhs != rhs:
print '** lhs=%r != rhs=%r' % (lhs, rhs)
print '\nlhs'
PP.pprint(lhs)
print '\nrhs'
PP.pprint(rhs)
return False
else:
return True
def __is_in_dead_monsters(self, world_obj, fight_name):
for fight in world_obj.read_data['dead-monsters']:
if fight_name == fight['name']:
return True
return False
def __get_current_weapon(self,
fighter # Fighter object
):
# NOTE: assumes a single weapon
weapons = fighter.get_current_weapons()
weapon_indexes = fighter.get_current_weapon_indexes()
weapon = None if len(weapons) == 0 else weapons[0]
weapon_index = None if len(weapon_indexes) == 0 else weapon_indexes[0]
return weapon, weapon_index
#
# Actual Tests #
#
def test_get_dodge_skill(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_get_dodge_skill ===\n'
# Deepcopy so that we don't taint the original
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
dodge_skill, dodge_why = self.__ruleset.get_dodge_skill(vodou_priest)
assert dodge_skill == 9
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'crawling'},
mock_fight_handler)
dodge_skill, dodge_why = self.__ruleset.get_dodge_skill(vodou_priest)
assert dodge_skill == (9 + self.__crawling_defense_mod)
# Next guy
bokor_fighter = ca_fighter.Fighter(
'Bokor',
'group',
copy.deepcopy(self.__bokor_fighter),
self.__ruleset,
self.__window_manager)
self.__ruleset.do_action(bokor_fighter,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
dodge_skill, dodge_why = self.__ruleset.get_dodge_skill(bokor_fighter)
assert dodge_skill == 9
self.__ruleset.do_action(bokor_fighter,
{'action-name': 'change-posture',
'posture': 'crawling'},
mock_fight_handler)
dodge_skill, dodge_why = self.__ruleset.get_dodge_skill(bokor_fighter)
assert dodge_skill == (9 + self.__crawling_defense_mod)
tank_fighter = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
dodge_skill, dodge_why = self.__ruleset.get_dodge_skill(tank_fighter)
assert dodge_skill == 9
thief_fighter = ca_fighter.Fighter(
'Thief',
'group',
copy.deepcopy(self.__thief_fighter),
self.__ruleset,
self.__window_manager)
dodge_skill, dodge_why = self.__ruleset.get_dodge_skill(thief_fighter)
assert dodge_skill == 8
def test_get_block_skill(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_get_block_skill ===\n'
# TODO: need non-trivial block tests
vodou_priest_fighter = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
block_skill, block_why = self.__ruleset.get_block_skill(
vodou_priest_fighter, None)
assert block_skill is None
bokor_fighter = ca_fighter.Fighter(
'Bokor',
'group',
copy.deepcopy(self.__bokor_fighter),
self.__ruleset,
self.__window_manager)
block_skill, block_why = self.__ruleset.get_block_skill(bokor_fighter,
None)
assert block_skill is None
tank_fighter = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
block_skill, block_why = self.__ruleset.get_block_skill(tank_fighter,
None)
assert block_skill is None
thief_fighter = ca_fighter.Fighter(
'Thief',
'group',
copy.deepcopy(self.__thief_fighter),
self.__ruleset,
self.__window_manager)
block_skill, block_why = self.__ruleset.get_block_skill(thief_fighter,
None)
assert block_skill is None
def test_get_parry_skill(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_get_parry_skill ===\n'
# Unarmed
weapon = None
mock_fight_handler = MockFightHandler()
vodou_priest_fighter = ca_fighter.Fighter(
'Vodou Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
parry_skill, parry_why = self.__ruleset.get_parry_skill(
vodou_priest_fighter, weapon)
assert parry_skill is None # None w/weapon; still OK hand-to-hand
# Unarmed
weapon = None
bokor_fighter = ca_fighter.Fighter(
'Bokor',
'group',
copy.deepcopy(self.__bokor_fighter),
self.__ruleset,
self.__window_manager)
parry_skill, parry_why = self.__ruleset.get_parry_skill(bokor_fighter,
weapon)
assert parry_skill is None # None w/weapon; still OK hand-to-hand
# Unarmed
weapon = None
tank_fighter = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
parry_skill, parry_why = self.__ruleset.get_parry_skill(tank_fighter,
weapon)
assert parry_skill is None # None w/weapon; still OK hand-to-hand
# Armed (sick stick)
tank_fighter = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
weapon_index, weapon = tank_fighter.draw_weapon_by_name('sick stick')
#self.__ruleset.do_action(tank_fighter,
# {'action-name': 'draw-weapon',
# 'weapon-index': weapon_index},
# mock_fight_handler)
self.__ruleset.do_action(tank_fighter,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
parry_skill, parry_why = self.__ruleset.get_parry_skill(tank_fighter,
weapon)
assert parry_skill == 11
self.__ruleset.do_action(tank_fighter,
{'action-name': 'change-posture',
'posture': 'crawling'},
mock_fight_handler)
parry_skill, parry_why = self.__ruleset.get_parry_skill(tank_fighter,
weapon)
assert parry_skill == (11 + self.__crawling_defense_mod)
# Unarmed
weapon = None
thief_fighter = ca_fighter.Fighter(
'Thief',
'group',
copy.deepcopy(self.__thief_fighter),
self.__ruleset,
self.__window_manager)
parry_skill, parry_why = self.__ruleset.get_parry_skill(thief_fighter,
weapon)
assert parry_skill is None # None w/weapon; still OK hand-to-hand
# Armed (Knife)
thief_fighter = ca_fighter.Fighter(
'Thief',
'group',
copy.deepcopy(self.__thief_fighter),
self.__ruleset,
self.__window_manager)
weapon_index, weapon = thief_fighter.draw_weapon_by_name('Large Knife')
#self.__ruleset.do_action(tank_fighter,
# {'action-name': 'draw-weapon',
# 'weapon-index': weapon_index},
# mock_fight_handler)
self.__ruleset.do_action(thief_fighter,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
parry_skill, parry_why = self.__ruleset.get_parry_skill(thief_fighter,
weapon)
assert parry_skill == 9
self.__ruleset.do_action(thief_fighter,
{'action-name': 'change-posture',
'posture': 'crawling'},
mock_fight_handler)
parry_skill, parry_why = self.__ruleset.get_parry_skill(thief_fighter,
weapon)
assert parry_skill == (9 + self.__crawling_defense_mod)
def test_get_unarmed_info(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_get_unarmed_info ===\n'
# Vodou Priest
mock_fight_handler = MockFightHandler()
vodou_priest_fighter = ca_fighter.Fighter(
'Vodou Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
hand_to_hand_info = self.__ruleset.get_unarmed_info(
vodou_priest_fighter,
None,
None)
assert hand_to_hand_info['punch_skill'] == 12
assert hand_to_hand_info['punch_damage'] == '1d-3 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 10
assert hand_to_hand_info['kick_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 10
# Bokor
bokor_fighter = ca_fighter.Fighter(
'Bokor',
'group',
copy.deepcopy(self.__bokor_fighter),
self.__ruleset,
self.__window_manager)
hand_to_hand_info = self.__ruleset.get_unarmed_info(bokor_fighter,
None,
None)
# PP.pprint(hand_to_hand_info)
assert hand_to_hand_info['punch_skill'] == 12
assert hand_to_hand_info['punch_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 10 # thr-1, st=10
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 10
# Tank
tank_fighter = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
hand_to_hand_info = self.__ruleset.get_unarmed_info(tank_fighter,
None,
None)
assert hand_to_hand_info['punch_skill'] == 16
assert hand_to_hand_info['punch_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 14
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 12
# Thief
thief_fighter = ca_fighter.Fighter(
'Thief',
'group',
copy.deepcopy(self.__thief_fighter),
self.__ruleset,
self.__window_manager)
hand_to_hand_info = self.__ruleset.get_unarmed_info(thief_fighter,
None,
None)
assert hand_to_hand_info['punch_skill'] == 14
assert hand_to_hand_info['punch_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 12
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 10
# Thief with posture additions
self.__ruleset.do_action(thief_fighter,
{'action-name': 'change-posture',
'posture': 'crawling'},
mock_fight_handler)
hand_to_hand_info = self.__ruleset.get_unarmed_info(thief_fighter,
None,
None)
assert hand_to_hand_info['punch_skill'] == (
14 + self.__crawling_attack_mod)
assert hand_to_hand_info['punch_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == (
12 + self.__crawling_attack_mod)
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == (
10 + self.__crawling_defense_mod)
# Thief w/o brass knuckles
thief_fighter = ca_fighter.Fighter(
'Thief',
'group',
copy.deepcopy(self.__thief_fighter),
self.__ruleset,
self.__window_manager)
hand_to_hand_info = self.__ruleset.get_unarmed_info(thief_fighter,
None,
None)
assert hand_to_hand_info['punch_skill'] == 14
assert hand_to_hand_info['punch_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 12
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 10
# w/brass knuckles -- Note: that the punch damage is +1
ignore, weapon = thief_fighter.draw_weapon_by_name('brass knuckles')
if self.__ruleset.does_weapon_use_unarmed_skills(weapon):
hand_to_hand_info = self.__ruleset.get_unarmed_info(thief_fighter,
None,
weapon)
assert hand_to_hand_info['punch_skill'] == 14
assert hand_to_hand_info['punch_damage'] == 'thr: 1d-1 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 12
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 10
# back to unarmed
hand_to_hand_info = self.__ruleset.get_unarmed_info(thief_fighter,
None,
None)
assert hand_to_hand_info['punch_skill'] == 14
assert hand_to_hand_info['punch_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 12
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 10
# --- Opponents w/ posture ---
self.__ruleset.do_action(thief_fighter,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
# Picking opponent doesn't change things
hand_to_hand_info = self.__ruleset.get_unarmed_info(tank_fighter,
thief_fighter,
None)
assert hand_to_hand_info['punch_skill'] == 16
assert hand_to_hand_info['punch_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 14
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 12
# change posture of thief (opponent) -- note: posture of opponent does
# not modify melee attacks
self.__ruleset.do_action(thief_fighter,
{'action-name': 'change-posture',
'posture': 'crawling'}, # -2
mock_fight_handler)
hand_to_hand_info = self.__ruleset.get_unarmed_info(tank_fighter,
thief_fighter,
None)
assert hand_to_hand_info['punch_skill'] == 16
assert hand_to_hand_info['punch_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 14
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 12
# change posture of thief (back to standing)
self.__ruleset.do_action(thief_fighter,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
hand_to_hand_info = self.__ruleset.get_unarmed_info(tank_fighter,
thief_fighter,
None)
assert hand_to_hand_info['punch_skill'] == 16
assert hand_to_hand_info['punch_damage'] == '1d-2 (cr=x1.0)'
assert hand_to_hand_info['kick_skill'] == 14
assert hand_to_hand_info['kick_damage'] == '1d-1 (cr=x1.0)'
assert hand_to_hand_info['parry_skill'] == 12
def test_initiative_order(self):
'''
Partially GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_initiative_order ===\n'
world_data = WorldData(self.init_world_dict)
mock_program = MockProgram()
world = ca.World("internal_source_file",
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
# Famine and Jack have the same basic speed and dx -- it's up to rand
# Pestilence and Moe have same basic speed but different dx
expected = [{'name': 'Famine', 'group': 'horsemen'}, # 5.75, 12, 4
{'name': 'Jack', 'group': 'PCs'}, # 5.75, 12, 2
{'name': 'Moe', 'group': 'PCs'}, # 5.5, 12, 4
{'name': 'Pestilence', 'group': 'horsemen'}, # 5.5, 11, 4
{'name': 'Manny', 'group': 'PCs'}] # 5.25, 10, 1
# Do this multiple times just to verify that the random stuff works
for i in range(10):
# random.randint(1, 6) should generate: 1 2 4 4 4 4 5 6 4 4
random.seed(9001) # 9001 is an arbitrary number
fight_handler = ca.FightHandler(self.__window_manager,
world,
'horsemen',
None, # Playback history
save_snapshot=False)
fighters = fight_handler.get_fighters()
# Check the order against the one that I expect
for fighter, expected_value in zip(fighters, expected):
assert fighter['name'] == expected_value['name']
assert fighter['group'] == expected_value['group']
# test that modify index wraps
# test that cycling a whole round goes to each fighter in order
expected_index = 0
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
fight_handler.modify_index(1)
expected_index = 1
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
injured_fighter = current_fighter
injured_index = expected_index
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
fight_handler.modify_index(1)
expected_index = 2
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
unconscious_fighter = current_fighter
unconscious_index = expected_index
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
fight_handler.modify_index(1)
expected_index = 3
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
dead_fighter = current_fighter
dead_index = expected_index
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
fight_handler.modify_index(1)
expected_index = 4
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
fight_handler.modify_index(1)
expected_index = 0 # wraps
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
# test that an unconscious fighter is not skipped but a dead one is
injured_hp = 3 # arbitrary amount
injured_fighter.details['current']['hp'] -= injured_hp
unconscious_fighter.set_consciousness(ca_fighter.Fighter.UNCONSCIOUS,
None)
dead_fighter.set_consciousness(ca_fighter.Fighter.DEAD, None)
assert injured_fighter.get_state() == ca_fighter.Fighter.INJURED
assert (unconscious_fighter.get_state() ==
ca_fighter.Fighter.UNCONSCIOUS)
assert dead_fighter.get_state() == ca_fighter.Fighter.DEAD
expected_index = 0
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
# This is the injured fighter -- should still see this one
fight_handler.modify_index(1)
expected_index = 1
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
# This is the unconscious fighter -- should still see this one
fight_handler.modify_index(1)
expected_index = 2
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
# Should skip the dead fighter
fight_handler.modify_index(1)
expected_index = 4
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
fight_handler.modify_index(1)
expected_index = 0 # wraps
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
assert current_fighter.name == expected[expected_index]['name']
assert current_fighter.group == expected[expected_index]['group']
# verify that the only thing that's changed among the fighters is that
# one is injured, one is unconscious, and one is dead.
expected_fighters = [
copy.deepcopy(self.__thief_fighter),
copy.deepcopy(self.__tank_fighter),
copy.deepcopy(self.__one_more_guy),
copy.deepcopy(self.__vodou_priest_fighter),
copy.deepcopy(self.__bokor_fighter)]
expected_fighters[injured_index]['current']['hp'] -= injured_hp
expected_fighters[unconscious_index]['state'] = "unconscious"
expected_fighters[dead_index]['state'] = "dead"
fighters = fight_handler.get_fighters()
assert len(expected_fighters) == len(fighters)
assert self.__are_equal(expected_fighters[0], fighters[0]['details'])
assert self.__are_equal(expected_fighters[1], fighters[1]['details'])
assert self.__are_equal(expected_fighters[2], fighters[2]['details'])
assert self.__are_equal(expected_fighters[3], fighters[3]['details'])
assert self.__are_equal(expected_fighters[4], fighters[4]['details'])
def test_initiative_order_again(self):
'''
Partially GURPS-specific test
This is just like test_initiative_order except the fighters are
reordered randomly and a different random seed is used.
'''
if ARGS.verbose:
print '\n=== test_initiative_order_again ===\n'
world_data = WorldData(self.init_world_dict_2)
mock_program = MockProgram()
world = ca.World("internal_source_file",
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
# Famine and Jack have the same basic speed and dx -- it's up to rand
# Pestilence and Moe have same basic speed but different dx
expected = [
{'name': 'Harpo', 'group': 'marx'}, # 5.75, 12, 5
{'name': 'Ted', 'group': 'PCs'}, # 5.75, 12, 3
{'name': 'Groucho', 'group': 'marx'}, # 5.5, 12, 4
{'name': 'Bob', 'group': 'PCs'}, # 5.5, 11, 2
{'name': 'Chico', 'group': 'marx'}, # 5.25, 10, 3
]
# Do this multiple times just to verify that the random stuff works
for i in range(10):
# random.randint(1, 6) should generate: 2 3 4 5 3
random.seed(8534) # 8534 is an arbitrary number
fight_handler = ca.FightHandler(self.__window_manager,
world,
'marx',
None, # Playback history
save_snapshot=False)
fighters = fight_handler.get_fighters()
# Check the order against the one that I expect
for fighter, expected_value in zip(fighters, expected):
assert fighter['name'] == expected_value['name']
assert fighter['group'] == expected_value['group']
def test_change_opponents(self):
'''
Test that changing opponents from one that's damaged doesn't affect
any of the fighters (except that the opponent was changed). This
mirrors a bug that I thought I saw a while ago.
'''
if ARGS.verbose:
print '\n=== test_change_opponents ===\n'
world_data = WorldData(self.init_world_dict)
mock_program = MockProgram()
world = ca.World("internal_source_file",
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
# Famine and Jack have the same basic speed and dx -- it's up to rand
# Pestilence and Moe have same basic speed but different dx
expected = [{'name': 'Famine', 'group': 'horsemen'}, # 5.75, 12, 4
{'name': 'Jack', 'group': 'PCs'}, # 5.75, 12, 2
{'name': 'Moe', 'group': 'PCs'}, # 5.5, 12, 4
{'name': 'Pestilence', 'group': 'horsemen'}, # 5.5, 11, 4
{'name': 'Manny', 'group': 'PCs'}] # 5.25, 10, 1
injured_hp = 3 # This is arbitrary
injured_index = 2
random.seed(9001) # 9001 is an arbitrary number
fight_handler = ca.FightHandler(self.__window_manager,
world,
'horsemen',
None, # Playback history
save_snapshot=False)
fighters = fight_handler.get_fighters()
expected_index = 0
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
# Make fighter 0 fight figher 2
self.__ruleset.do_action(current_fighter,
{'action-name': 'pick-opponent',
'opponent': {'name': 'Moe', 'group': 'PCs'}},
fight_handler)
# Make sure pick opponent worked as advertised
opponent = fight_handler.get_opponent_for(current_fighter)
assert opponent is not None
assert opponent.name == 'Moe'
assert opponent.group == 'PCs'
# Move ahead to fighter 1
fight_handler.modify_index(1)
expected_index = 1
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
# Wound fighter 2
fighters[injured_index]['details']['current']['hp'] -= injured_hp
# Cycle around to fighter 0
fight_handler.modify_index(1)
expected_index = 2
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
unconscious_fighter = current_fighter
unconscious_index = expected_index
fight_handler.modify_index(1)
expected_index = 3
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
dead_fighter = current_fighter
dead_index = expected_index
fight_handler.modify_index(1)
expected_index = 4
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
fight_handler.modify_index(1)
expected_index = 0 # wraps
assert world_data.read_data['current-fight']['index'] == expected_index
current_fighter = fight_handler.get_current_fighter()
# Change opponent of fighter 0 to fighter 1 -- At one time, I saw a
# bug where it appeared that changing an opponent from an injured one
# (in this case, fighter 2/Moe) to a different fighter (in this case,
# fighter 1/Jack) caused the damage to be transferred to the new
# opponent.
self.__ruleset.do_action(current_fighter,
{'action-name': 'pick-opponent',
'opponent': {'name': 'Jack',
'group': 'PCs'}},
fight_handler)
# Make sure pick opponent worked as advertised
opponent = fight_handler.get_opponent_for(current_fighter)
assert opponent is not None
assert opponent.name == 'Jack'
assert opponent.group == 'PCs'
# cycle completely around to fighter 1
fight_handler.modify_index(1) # index 1
fight_handler.modify_index(1) # index 2
fight_handler.modify_index(1) # index 3
fight_handler.modify_index(1) # index 4
fight_handler.modify_index(1) # index 0
fight_handler.modify_index(1) # index 1
expected_index = 1
assert world_data.read_data['current-fight']['index'] == expected_index
# Set expectations to the final configuration.
expected_fighters = [
copy.deepcopy(self.__thief_fighter),
copy.deepcopy(self.__tank_fighter),
copy.deepcopy(self.__one_more_guy),
copy.deepcopy(self.__vodou_priest_fighter),
copy.deepcopy(self.__bokor_fighter)]
expected_fighters[0]['opponent'] = {'group': 'PCs', 'name': 'Jack'}
expected_fighters[0]['actions_this_turn'] = ['pick-opponent',
'pick-opponent']
expected_fighters[injured_index]['current']['hp'] -= injured_hp
# Check that everything is as it should be
assert len(expected_fighters) == len(fighters)
assert self.__are_equal(expected_fighters[0], fighters[0]['details'])
assert self.__are_equal(expected_fighters[1], fighters[1]['details'])
assert self.__are_equal(expected_fighters[2], fighters[2]['details'])
assert self.__are_equal(expected_fighters[3], fighters[3]['details'])
assert self.__are_equal(expected_fighters[4], fighters[4]['details'])
def test_ranged_to_hit(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_ranged_to_hit ===\n'
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
requested_weapon_index = self.__vodou_pistol_index
self.__ruleset.do_action(vodou_priest,
{'action-name': 'draw-weapon',
'weapon-index': requested_weapon_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(vodou_priest)
assert actual_weapon_index == requested_weapon_index
# ranged to-hit should be skill + acc (if aimed) + 1 (if braced)
# + size modifier + range/speed modifier + special conditions
# aim for 1 turn += acc, 2 turns += 1, 3+ turns += 1
# brace += 1
# no aim, no posture
expected_to_hit = self.__vodou_priest_fighter_pistol_skill
self.__ruleset.reset_aim(vodou_priest)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# aim / braced, no posture
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc
+ 1) # braced
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 2 # aiming for 3 rounds
# 4 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 2 # no further benefit
# aim / not braced, no posture
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 2 # aiming for 3 rounds
# 4 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 2 # no further benefit
# no aim, posture (posture doesn't matter for ranged attacks: B551)
expected_to_hit = self.__vodou_priest_fighter_pistol_skill
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'crawling'},
mock_fight_handler)
self.__ruleset.reset_aim(vodou_priest)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# aim / braced, posture (posture not counted for ranged attacks: B551)
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc # aim
+ 1) # braced
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'crawling'},
mock_fight_handler)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 2 # aiming for 3 rounds
# 4 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 2 # no further benefit
# aim / not braced, posture (no posture minus for ranged attacks: B551)
self.__ruleset.reset_aim(vodou_priest)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc) # aim
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'crawling'},
mock_fight_handler)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 2 # aiming for 3 rounds
# 4 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 2 # no further benefit
# --- Opponents w/ posture ---
expected_to_hit = self.__vodou_priest_fighter_pistol_skill
self.__ruleset.reset_aim(vodou_priest)
tank = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
# Picking opponent doesn't change things
self.__ruleset.do_action(tank,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, tank, weapon)
assert to_hit == expected_to_hit
# change posture of thief (-2)
self.__ruleset.do_action(tank,
{'action-name': 'change-posture',
'posture': 'crawling'}, # -2
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, tank, weapon)
assert to_hit == (expected_to_hit - 2)
# change posture of thief (back to standing)
self.__ruleset.do_action(tank,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, tank, weapon)
assert to_hit == expected_to_hit
def test_messed_up_aim(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_messed_up_aim ===\n'
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
requested_weapon_index = self.__vodou_pistol_index
self.__ruleset.do_action(vodou_priest,
{'action-name': 'draw-weapon',
'weapon-index': requested_weapon_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(vodou_priest)
assert actual_weapon_index == requested_weapon_index
# Regular, no aim - for a baseline
expected_to_hit = self.__vodou_priest_fighter_pistol_skill
self.__ruleset.reset_aim(vodou_priest)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# Damage _would_ ruin aim except for successful Will roll
self.__ruleset.reset_aim(vodou_priest)
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc
+ 1) # braced
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# adjust_hp but MADE Will roll
# action['action-name'] == 'adjust-hp':
damage = -1
self.__window_manager.set_menu_response(
'roll <= WILL (13) or lose aim', True)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': damage},
mock_fight_handler)
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
# aiming for 3 rounds (1st round+brace already in expected_to_hit)
# + shock
assert to_hit == expected_to_hit + 2 + damage
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler) # clear out shock
# Damage ruins aim -- miss will roll
self.__ruleset.reset_aim(vodou_priest)
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc
+ 1) # braced
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# adjust_hp and MISSES Will roll
self.__window_manager.set_menu_response(
'roll <= WILL (13) or lose aim', False)
damage = -1
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': damage},
mock_fight_handler)
# 3 rounds (well, 1 round)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + damage # aiming for 1 round + shock
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler) # clear out shock
# Draw weapon ruins aim
self.__ruleset.reset_aim(vodou_priest)
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc
+ 1) # braced
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# Move to spoil the aim
self.__ruleset.do_action(vodou_priest,
{'action-name': 'move'},
mock_fight_handler)
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit # aiming for 1 round
# Posture ruins aim
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc
+ 1) # braced
self.__ruleset.reset_aim(vodou_priest)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# Change posture
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'lying'},
mock_fight_handler)
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit # aiming for 1 round
# Defense ruins aim
self.__ruleset.reset_aim(vodou_priest)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc
+ 1) # braced
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# Defend
self.__ruleset.do_action(vodou_priest,
{'action-name': 'defend'},
mock_fight_handler)
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit # aiming for 3 rounds
# Last One is Regular - shows nothing carries over
expected_to_hit = self.__vodou_priest_fighter_pistol_skill
self.__ruleset.reset_aim(vodou_priest)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
def test_melee_to_hit(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_melee_to_hit ===\n'
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
thief = ca_fighter.Fighter(
'Thief',
'group',
copy.deepcopy(self.__thief_fighter),
self.__ruleset,
self.__window_manager)
requested_weapon_index = 1 # Knife
self.__ruleset.do_action(thief,
{'action-name': 'draw-weapon',
'weapon-index': requested_weapon_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(thief)
assert actual_weapon_index == requested_weapon_index
# melee to-hit should be skill + special conditions
# no posture
expected_to_hit = self.__thief_knife_skill
self.__ruleset.do_action(thief,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(thief, None, weapon)
assert to_hit == expected_to_hit
# posture
expected_to_hit = (self.__thief_knife_skill
+ self.__crawling_attack_mod)
self.__ruleset.do_action(thief,
{'action-name': 'change-posture',
'posture': 'crawling'},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(thief, None, weapon)
assert to_hit == expected_to_hit
# --- Opponents w/ posture (shouldn't change melee attack) ---
tank_fighter = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
self.__ruleset.do_action(thief,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
# Picking opponent doesn't change things
expected_to_hit = self.__thief_knife_skill
self.__ruleset.do_action(tank_fighter,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(thief, tank_fighter, weapon)
assert to_hit == expected_to_hit
# change posture of tank (opponent)
self.__ruleset.do_action(tank_fighter,
{'action-name': 'change-posture',
'posture': 'crawling'}, # -2
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(thief, tank_fighter, weapon)
assert to_hit == expected_to_hit
# change posture of thief (back to standing)
self.__ruleset.do_action(tank_fighter,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(thief, tank_fighter, weapon)
assert to_hit == expected_to_hit
# --- Aiming does not help ---
self.__ruleset.reset_aim(thief)
expected_to_hit = self.__thief_knife_skill
to_hit, why = self.__ruleset.get_to_hit(thief, None, weapon)
# 1 round
self.__ruleset.do_action(thief,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(thief, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(thief,
{'action-name': 'aim', 'braced': False},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(thief, None, weapon)
assert to_hit == expected_to_hit
def test_adjust_hp(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_adjust_hp ===\n'
# Setup
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
requested_weapon_index = self.__vodou_pistol_index
self.__ruleset.do_action(vodou_priest,
{'action-name': 'draw-weapon',
'weapon-index': requested_weapon_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(vodou_priest)
assert actual_weapon_index == requested_weapon_index
assert weapon.details['name'] == "pistol, Colt 170D"
requested_armor_index = 2
self.__ruleset.do_action(vodou_priest,
{'action-name': 'don-armor',
'armor-index': requested_armor_index},
mock_fight_handler)
armor_index_list = vodou_priest.get_current_armor_indexes()
armor_list = vodou_priest.get_items_from_indexes(armor_index_list)
assert len(armor_index_list) == 1
assert armor_index_list[0] == requested_armor_index
assert armor_list[0]['name'] == "Sport coat/Jeans"
original_to_hit, ignore = self.__ruleset.get_to_hit(vodou_priest,
None,
weapon)
original_hand_to_hand_info = self.__ruleset.get_unarmed_info(
vodou_priest,
None,
None)
original_dodge_skill, ignore = self.__ruleset.get_dodge_skill(
vodou_priest)
# Test that the HP are reduced withOUT DR adjustment
damage_1st = -3
self.__window_manager.set_menu_response('Use Armor\'s DR?', False)
original_hp = vodou_priest.details['current']['hp']
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': damage_1st},
mock_fight_handler)
modified_hp = vodou_priest.details['current']['hp']
assert modified_hp == original_hp + damage_1st
# Shock (B419)
to_hit, ignore = self.__ruleset.get_to_hit(vodou_priest,
None,
weapon)
assert to_hit == original_to_hit + damage_1st # damage is less than 4
hand_to_hand_info = self.__ruleset.get_unarmed_info(vodou_priest,
None,
None)
assert (hand_to_hand_info['punch_skill'] ==
original_hand_to_hand_info['punch_skill'] + damage_1st)
assert (hand_to_hand_info['kick_skill'] ==
original_hand_to_hand_info['kick_skill'] + damage_1st)
assert (hand_to_hand_info['parry_skill'] ==
original_hand_to_hand_info['parry_skill']) # no shock
# Test that the HP are NOT reduced WITH DR adjustment
damage_2nd = -1
self.__window_manager.set_menu_response('Use Armor\'s DR?', True)
original_hp = vodou_priest.details['current']['hp']
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': damage_2nd},
mock_fight_handler)
modified_hp = vodou_priest.details['current']['hp']
assert modified_hp == original_hp # No damage because of DR
# Shock (B419) is only from the 1st attack since this did no damage
# -hp to DX/IQ (not defense) on your next turn
to_hit, ignore = self.__ruleset.get_to_hit(vodou_priest,
None,
weapon)
assert to_hit == original_to_hit + damage_1st # damage is less than 4
hand_to_hand_info = self.__ruleset.get_unarmed_info(vodou_priest,
None,
None)
assert (hand_to_hand_info['punch_skill'] ==
original_hand_to_hand_info['punch_skill'] + damage_1st)
assert (hand_to_hand_info['kick_skill'] ==
original_hand_to_hand_info['kick_skill'] + damage_1st)
assert (hand_to_hand_info['parry_skill'] ==
original_hand_to_hand_info['parry_skill']) # no shock
# Test that the HP ARE reduced WITH DR adjustment
expected_damage = -2
pre_armor_damage = expected_damage - self.__vodou_priest_armor_dr
self.__window_manager.set_menu_response('Use Armor\'s DR?', True)
original_hp = vodou_priest.details['current']['hp']
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': pre_armor_damage},
mock_fight_handler)
modified_hp = vodou_priest.details['current']['hp']
assert modified_hp == original_hp + expected_damage
# Shock is capped at -4
max_shock = -4
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == original_to_hit + max_shock
hand_to_hand_info = self.__ruleset.get_unarmed_info(vodou_priest,
None,
None)
assert (hand_to_hand_info['punch_skill'] ==
original_hand_to_hand_info['punch_skill'] + max_shock)
assert (hand_to_hand_info['kick_skill'] ==
original_hand_to_hand_info['kick_skill'] + max_shock)
assert (hand_to_hand_info['parry_skill'] ==
original_hand_to_hand_info['parry_skill']) # no shock
#
# Let's heal the guy
#
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler) # clear out shock
# Check for death, check for unconscious
self.__ruleset.do_action(vodou_priest,
{'action-name': 'start-turn'},
mock_fight_handler)
vodou_priest.details['current']['hp'] = (
vodou_priest.details['permanent']['hp'])
# Major wound (B420) - Make HT roll (no knockdown or stun)
# +1 to make sure that the damage is more than half
major_damage = - ((vodou_priest.details['permanent']['hp'] / 2) + 1)
self.__window_manager.set_menu_response('Use Armor\'s DR?', False)
# TODO: clear the 'pass-out-immediately' flag for this. make a
# separate test for 'pass-out-immediately'. Remember to put the
# original value back into that flag.
self.__window_manager.set_menu_response(
('Major Wound (B420): Roll vs HT (%d) or be Stunned and Knocked Down' %
self.__vodou_priest_ht),
ca_gurps_ruleset.GurpsRuleset.MAJOR_WOUND_SUCCESS)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': major_damage},
mock_fight_handler)
hand_to_hand_info = self.__ruleset.get_unarmed_info(vodou_priest,
None,
None)
assert (hand_to_hand_info['parry_skill'] ==
original_hand_to_hand_info['parry_skill']) # no shock
dodge_skill, ignore = self.__ruleset.get_dodge_skill(vodou_priest)
assert dodge_skill == original_dodge_skill # shock
assert vodou_priest.details['posture'] == 'standing'
# Major wound (B420) - miss HT roll (knockdown and stunned)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler) # clear out shock
# Check for death, check for unconscious
self.__ruleset.do_action(vodou_priest,
{'action-name': 'start-turn'},
mock_fight_handler)
vodou_priest.details['current']['hp'] = (
vodou_priest.details['permanent']['hp'])
self.__window_manager.set_menu_response('Use Armor\'s DR?', False)
self.__window_manager.set_menu_response(
('Major Wound (B420): Roll vs HT (%d) or be Stunned and Knocked Down' %
self.__vodou_priest_ht),
ca_gurps_ruleset.GurpsRuleset.MAJOR_WOUND_SIMPLE_FAIL)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': major_damage},
mock_fight_handler)
hand_to_hand_info = self.__ruleset.get_unarmed_info(vodou_priest,
None,
None)
stun_penalty = -4
posture_penalty = -3
total_penalty = stun_penalty + posture_penalty
assert (hand_to_hand_info['parry_skill'] ==
original_hand_to_hand_info['parry_skill'] + total_penalty)
dodge_skill, ignore = self.__ruleset.get_dodge_skill(vodou_priest)
assert dodge_skill == original_dodge_skill + total_penalty
assert vodou_priest.details['posture'] == 'lying'
# End of the turn -- check for stun (B420) to be over
self.__window_manager.set_menu_response(
'Priest Stunned (B420): Roll <= HT to recover',
True)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler) # clear out shock
hand_to_hand_info = self.__ruleset.get_unarmed_info(vodou_priest,
None,
None)
# Stun should be over -- now there's only the posture penalty
posture_penalty = -3
total_penalty = posture_penalty
assert (hand_to_hand_info['parry_skill'] ==
original_hand_to_hand_info['parry_skill'] + total_penalty)
dodge_skill, ignore = self.__ruleset.get_dodge_skill(vodou_priest)
assert dodge_skill == original_dodge_skill + total_penalty
assert vodou_priest.details['posture'] == 'lying'
# Check for death, check for unconscious
self.__ruleset.do_action(vodou_priest,
{'action-name': 'start-turn'},
mock_fight_handler)
# Major wound (B420) - bad fail (unconscious)
vodou_priest.details['current']['hp'] = (
vodou_priest.details['permanent']['hp'])
self.__window_manager.set_menu_response('Use Armor\'s DR?', False)
self.__window_manager.set_menu_response(
('Major Wound (B420): Roll vs HT (%d) or be Stunned and Knocked Down' %
self.__vodou_priest_ht),
ca_gurps_ruleset.GurpsRuleset.MAJOR_WOUND_BAD_FAIL)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': major_damage},
mock_fight_handler)
assert not vodou_priest.is_conscious()
# # # # # # # # # # # # # # # #
# Aim (B324) on injury, will roll or lose aim
# fail will roll #
# Start by healing him up
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler) # clear out shock
# Check for death, check for unconscious
self.__ruleset.do_action(vodou_priest,
{'action-name': 'start-turn'},
mock_fight_handler)
vodou_priest.details['current']['hp'] = (
vodou_priest.details['permanent']['hp'])
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
#
self.__ruleset.reset_aim(vodou_priest)
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc
+ 1) # braced
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# Take damage, fail will roll
self.__window_manager.set_menu_response('Use Armor\'s DR?', False)
self.__window_manager.set_menu_response(
"roll <= WILL (13) or lose aim", False)
damage = -1
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': damage},
mock_fight_handler)
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + damage # aiming for 1 round + shock
# make will roll #
# Start by healing him up
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler) # clear out shock
# Check for death, check for unconscious
self.__ruleset.do_action(vodou_priest,
{'action-name': 'start-turn'},
mock_fight_handler)
vodou_priest.details['current']['hp'] = (
vodou_priest.details['permanent']['hp'])
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
self.__ruleset.reset_aim(vodou_priest)
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc
+ 1) # braced
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
expected_to_hit += 1 # aiming for 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# Take damage, make will roll
expected_to_hit += 1 # aiming for 3 rounds
self.__window_manager.set_menu_response('Use Armor\'s DR?', False)
self.__window_manager.set_menu_response(
"roll <= WILL (13) or lose aim", True)
damage = -1
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': damage},
mock_fight_handler)
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + damage # aiming for 1 round + shock
# B327
# TODO: if adjusted_hp <= -(5 * fighter.details['permanent']['hp']):
# fighter.details['state'] = 'dead'
# Start by healing him up
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler) # clear out shock
# Check for death, check for unconscious
self.__ruleset.do_action(vodou_priest,
{'action-name': 'start-turn'},
mock_fight_handler)
vodou_priest.details['current']['hp'] = (
vodou_priest.details['permanent']['hp'])
self.__ruleset.do_action(vodou_priest,
{'action-name': 'change-posture',
'posture': 'standing'},
mock_fight_handler)
self.__ruleset.reset_aim(vodou_priest)
def test_adjust_hp_2(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_adjust_hp_2 ===\n'
# Setup
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
del vodou_priest.details['advantages']['Combat Reflexes']
original_hand_to_hand_info = self.__ruleset.get_unarmed_info(
vodou_priest,
None,
None)
original_dodge_skill, ignore = self.__ruleset.get_dodge_skill(
vodou_priest)
# high pain threshold (B59)
# - no shock / +3 to HT roll for knockdown and stunning
'''
There's:
Any damage (NONE for high pain threshold):
- shock: -damage (-4 max), DX-based skills, NOT defense, 1 round
Major wound damage (over 1/2 permanent HP)
- stunning: -4 defense, do nothing, roll at end of turn
- knockdown
'''
# Test High Pain Threshold
vodou_priest.details['advantages']['High Pain Threshold'] = 10
self.__window_manager.set_menu_response('Use Armor\'s DR?', False)
high_pain_thrshold_margin = 3
stun_roll = self.__vodou_priest_ht + high_pain_thrshold_margin
self.__window_manager.set_menu_response(
('Major Wound (B420): Roll vs. HT+3 (%d) or be Stunned and Knocked Down' %
stun_roll),
ca_gurps_ruleset.GurpsRuleset.MAJOR_WOUND_SIMPLE_FAIL)
# failed the high stun roll so knockdown & stun is still in effect
# +1 to make sure that the damage is more than half
major_damage = - ((vodou_priest.details['permanent']['hp'] / 2) + 1)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'adjust-hp',
'adj': major_damage},
mock_fight_handler)
hand_to_hand_info = self.__ruleset.get_unarmed_info(vodou_priest,
None,
None)
attack_lying_penalty = -4 # B551
defense_lying_penalty = -3 # B551
assert (hand_to_hand_info['punch_skill'] ==
original_hand_to_hand_info['punch_skill'] +
attack_lying_penalty)
assert (hand_to_hand_info['kick_skill'] ==
original_hand_to_hand_info['kick_skill'] +
attack_lying_penalty)
# Defense is at -4 (stun); shock is the HP stuff
stun_penalty = -4
total_penalty = stun_penalty + defense_lying_penalty
assert (hand_to_hand_info['parry_skill'] ==
original_hand_to_hand_info['parry_skill'] + total_penalty)
dodge_skill, ignore = self.__ruleset.get_dodge_skill(vodou_priest)
assert dodge_skill == original_dodge_skill + total_penalty
assert vodou_priest.details['posture'] == 'lying'
# # low pain threshold (B142)
# # - 2x shock / -4 to HT roll for knockdown and stunning
# # - according to KROMM, the max is -8 for LPT
# del vodou_priest.details['advantages']['High Pain Threshold']
# vodou_priest.details['advantages']['Low Pain Threshold'] = -10
# '''
# There's:
# Any damage (x2 for Low Pain threshold -- According to KROMM, the
# max is -8)
# - shock: -damage (-4 max), DX-based skills, NOT defense, 1 round
# Major wound damage (over 1/2 permanent HP)
# - stunning: -4 defense, do nothing, roll at end of turn
# - knockdown
# '''
# Test High Pain Threshold
# vodou_priest.details['advantages']['High Pain Threshold'] = 10
# self.__window_manager.set_menu_response('Use Armor\'s DR?', False)
# high_pain_thrshold_margin = 3
# stun_roll = self.__vodou_priest_ht + high_pain_thrshold_margin
# self.__window_manager.set_menu_response(
# ('Major Wound (B420): Roll vs. HT+3 (%d) or be Stunned and Knocked Down' %
# stun_roll),
# ca_gurps_ruleset.GurpsRuleset.MAJOR_WOUND_SIMPLE_FAIL)
# # failed the high stun roll so knockdown & stun is still in effect
# # +1 to make sure that the damage is more than half
# major_damage = - ((vodou_priest.details['permanent']['hp'] / 2) + 1)
# self.__ruleset.do_action(vodou_priest,
# {'action-name': 'adjust-hp',
# 'adj': major_damage},
# mock_fight_handler)
# hand_to_hand_info = self.__ruleset.get_unarmed_info(vodou_priest,
# None,
# None,
# unarmed_skills)
# attack_lying_penalty = -4 # B551
# defense_lying_penalty = -3 # B551
# assert (hand_to_hand_info['punch_skill'] ==
# original_hand_to_hand_info['punch_skill'] + attack_lying_penalty)
# assert (hand_to_hand_info['kick_skill'] ==
# original_hand_to_hand_info['kick_skill'] + attack_lying_penalty)
# # Defense is at -4 (stun); shock is the HP stuff
# stun_penalty = -4
# total_penalty = stun_penalty + defense_lying_penalty
# assert (hand_to_hand_info['parry_skill'] ==
# original_hand_to_hand_info['parry_skill'] + total_penalty)
# dodge_skill, ignore = self.__ruleset.get_dodge_skill(vodou_priest)
# assert dodge_skill == original_dodge_skill + total_penalty
# assert vodou_priest.details['posture'] == 'lying'
def test_spell_casting(self):
'''
GURPS-specific test
'''
if ARGS.verbose:
print '\n=== test_spell_casting ===\n'
# Setup
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
opponent = ca_fighter.Fighter(
'Opponent',
'other_group',
copy.deepcopy(self.__one_more_guy),
self.__ruleset,
self.__window_manager)
mock_fight_handler.set_opponent_for(vodou_priest, opponent)
mock_fight_handler.set_fighter_object('Priest',
'group',
vodou_priest)
mock_fight_handler.set_fighter_object('Opponent',
'other_group',
opponent)
trials = [
{'name': "Animate Shadow",
'cost': 4,
'casting time': 2,
'skill': 16,
'skill-bonus': -1,
'duration': 5,
'notes': "M154, Subject's shadow attacks them",
'range': 'reguar',
'save': ['ht']},
# opponent must roll save
# mark opponent with spell
{'name': "Awaken",
'cost': 1,
'casting time': 1,
'skill': 18,
'skill-bonus': -1,
'duration': 0,
'notes': "M90",
'range': 'area',
'save': []},
# Radius of spell effect
# Mark opponent with spell
{'name': "Death Vision",
'cost': 2,
'casting time': 3,
'skill': 16,
'skill-bonus': -1,
'duration': 1,
'notes': "M149, until IQ roll made",
'range': 'reguar',
'save': []},
# Mark opponent with spell
{'name': "Explosive Lightning",
'cost': 2,
'casting time': 3,
'skill': 16,
'skill-bonus': -1,
'duration': 0,
'notes': "M196, cost 2-mage level, damage 1d-1 /2",
'range': 'missile',
'save': []},
# Cost to cast
# Seconds to cast
# Make a Ranged Attack
# Mark samuel - Erik with
{'name': "Itch",
'cost': 2,
'casting time': 1,
'skill': 12,
'skill-bonus': 0,
'duration': 2,
'notes': "M35",
'range': 'regular',
'save': ['ht']},
# duration
]
original_fp = vodou_priest.details['current']['fp']
assert original_fp == vodou_priest.details['permanent']['fp']
for trial in trials:
opponent.timers.clear_all()
vodou_priest.timers.clear_all()
vodou_priest.details['current']['fp'] = original_fp
if (ca_gurps_ruleset.GurpsRuleset.spells[
trial['name']]['range'] == 'area'):
self.__window_manager.set_input_box_response(
'Radius of spell effect (%s) in yards' % trial['name'],
trial['cost'])
if (ca_gurps_ruleset.GurpsRuleset.spells[
trial['name']]['cost'] is None):
self.__window_manager.set_input_box_response(
'Cost to cast (%s) - see (%s) ' % (trial['name'],
trial['notes']),
trial['cost'])
if (ca_gurps_ruleset.GurpsRuleset.spells[
trial['name']]['casting time'] is None):
self.__window_manager.set_input_box_response(
'Seconds to cast (%s) - see (%s) ' % (trial['name'],
trial['notes']),
trial['casting time'])
if (ca_gurps_ruleset.GurpsRuleset.spells[
trial['name']]['duration'] is None):
self.__window_manager.set_input_box_response(
'Duration for (%s) - see (%s) ' % (trial['name'],
trial['notes']),
trial['duration'])
if (ca_gurps_ruleset.GurpsRuleset.spells[
trial['name']]['range'] == 'missile'):
self.__window_manager.set_menu_response(
'Make a Ranged Attack', True)
# TODO: need to deal with the ONE spell (Evisceration) that has
# two saves
if (len(ca_gurps_ruleset.GurpsRuleset.spells[
trial['name']]['save']) > 0):
self.__window_manager.set_menu_response(
('%s must roll %s save against %s (skill %d)' % (
opponent.name,
trial['save'][0],
trial['name'],
trial['skill'])),
False) # False: they didn't save
self.__window_manager.set_menu_response(
'Mark %s with spell' % opponent.name, True)
action = {
'action-name': 'cast-spell',
'spell-index': self.__vodou_priest_spell_index[trial['name']]
}
self.__ruleset.do_action(vodou_priest, action, mock_fight_handler)
# Cost
expected_cost = trial['cost'] + trial['skill-bonus']
assert (vodou_priest.details['current']['fp'] ==
original_fp - expected_cost)
# Watch the casting time and the spell duration
casting_text = [('Casting (%s) @ skill (%d): %s' % (
trial['name'], trial['skill'], trial['notes'])),
' Defense: none',
' Move: none']
opponent_casting_text = ('Waiting for "%s" spell to take affect' %
trial['name'])
# Check each round of casting
for turn in range(trial['casting time']):
# For the caster, you're doing the check, end-turn, then
# start-turn because the action takes place in the middle of a
# turn.
assert len(vodou_priest.details['timers']) == 1
assert (vodou_priest.details['timers'][0]['string'] ==
casting_text)
assert vodou_priest.details['timers'][0]['busy']
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'start-turn'},
mock_fight_handler)
# For the opponent, you need to do start-turn, check, then
# end-turn because they get affected after the caster does
# their thing.
self.__ruleset.do_action(opponent,
{'action-name': 'start-turn'},
mock_fight_handler)
assert len(opponent.details['timers']) == 1
assert (opponent.details['timers'][0]['string'] ==
opponent_casting_text)
self.__ruleset.do_action(opponent,
{'action-name': 'end-turn'},
mock_fight_handler)
# One extra round for the opponent so that the timers get deleted.
# Note the proper progression of start-turn / end-turn from the
# casting loop through this through the duration loop.
self.__ruleset.do_action(opponent,
{'action-name': 'start-turn'},
mock_fight_handler)
# Check each round of active spell
active_text = 'CAST SPELL (%s) ACTIVE' % trial['name']
opponent_active_text = 'SPELL "%s" AGAINST ME' % trial['name']
for turn in range(trial['duration']):
assert len(vodou_priest.details['timers']) == 1
assert (vodou_priest.details['timers'][0]['string'] ==
active_text)
if 'busy' in vodou_priest.details['timers'][0]:
assert not vodou_priest.details['timers'][0]['busy']
# else, it's OK
self.__ruleset.do_action(vodou_priest,
{'action-name': 'end-turn'},
mock_fight_handler)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'start-turn'},
mock_fight_handler)
# Opponent
assert len(opponent.details['timers']) == 1
assert (opponent.details['timers'][0]['string'] ==
opponent_active_text)
self.__ruleset.do_action(opponent,
{'action-name': 'end-turn'},
mock_fight_handler)
self.__ruleset.do_action(opponent,
{'action-name': 'start-turn'},
mock_fight_handler)
# Make sure that all of the timers are dead
assert len(vodou_priest.details['timers']) == 0
assert len(opponent.details['timers']) == 0
def test_don_doff_armor(self):
'''
General test
'''
if ARGS.verbose:
print '\n=== test_don_doff_armor ===\n'
# Setup
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
# Don armor
requested_armor_index = self.__vodou_armor_index
self.__ruleset.do_action(vodou_priest,
{'action-name': 'don-armor',
'armor-index': requested_armor_index},
mock_fight_handler)
armor_index_list = vodou_priest.get_current_armor_indexes()
armor_list = vodou_priest.get_items_from_indexes(armor_index_list)
assert len(armor_index_list) == 1
assert armor_index_list[0] == requested_armor_index
assert armor_list[0]['name'] == "Sport coat/Jeans"
# Doff armor
armor_index_list = vodou_priest.get_current_armor_indexes()
self.__ruleset.do_action(vodou_priest,
{'action-name': 'doff-armor',
'armor-index': armor_index_list[0]},
mock_fight_handler)
armor_index_list = vodou_priest.get_current_armor_indexes()
assert len(armor_index_list) == 0
# The effect of the armor is tested in 'hp'
def test_draw_sheathe_weapon(self):
'''
General test
'''
if ARGS.verbose:
print '\n=== test_draw_sheathe_weapon ===\n'
# Setup
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
# Draw Weapon
requested_weapon_index = self.__vodou_pistol_index
self.__ruleset.do_action(vodou_priest,
{'action-name': 'draw-weapon',
'weapon-index': requested_weapon_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(vodou_priest)
assert actual_weapon_index == requested_weapon_index
assert weapon.details['name'] == "pistol, Colt 170D"
# Sheathe Weapon
self.__ruleset.do_action(vodou_priest,
{'action-name': 'holster-weapon',
'weapon-index': requested_weapon_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(vodou_priest)
assert actual_weapon_index == None
# The effect of the weapon is tested throughout the testing
def test_reload(self):
'''
General test
'''
if ARGS.verbose:
print '\n=== test_reload ===\n'
# Setup
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
# Draw Weapon
requested_weapon_index = self.__vodou_pistol_index
self.__ruleset.do_action(vodou_priest,
{'action-name': 'draw-weapon',
'weapon-index': requested_weapon_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(vodou_priest)
assert actual_weapon_index == requested_weapon_index
assert weapon.details['name'] == "pistol, Colt 170D"
assert weapon.shots_left() == self.__vodou_priest_initial_shots
clip = vodou_priest.equipment.get_item_by_index(
self.__vodou_priest_ammo_index)
# check the number of clips/batteries
assert clip['count'] == self.__vodou_priest_ammo_count
# A. Fire twice and verify that the shots left went down
shots_taken = 2
for shot in range(shots_taken):
self.__ruleset.do_action(vodou_priest,
{'action-name': 'attack'},
mock_fight_handler)
# To simulate the start of the round
vodou_priest.details['current-weapon'] = 0
assert (weapon.shots_left() ==
(self.__vodou_priest_initial_shots - shots_taken))
# Discard the rest of the shots in the clip
shots_taken = weapon.shots_left()
for shot in range(shots_taken):
self.__ruleset.do_action(vodou_priest,
{'action-name': 'attack'},
mock_fight_handler)
# To simulate the start of the round
vodou_priest.details['current-weapon'] = 0
# Now, reload
self.__window_manager.set_menu_response('Reload With What', 1)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'reload'},
mock_fight_handler)
assert weapon.shots_left() == self.__vodou_priest_initial_shots
assert clip['count'] == (self.__vodou_priest_ammo_count - 1)
index, virtual_clip = vodou_priest.equipment.get_item_by_name('C Cell')
assert(virtual_clip is not None)
index, second_clip = vodou_priest.equipment.get_item_by_name('C Cell',
index)
assert(second_clip is None)
# B. Fire and reload until there are no clips available
# Note: when the last clip is ejected, a new item takes its place and
# clip['count'] will never go to zero
clip_count = clip['count']
for clip_number in range(clip_count):
# Shoot until this clip is empty (so the reload discards the empty
# clip)
shots_left = weapon.shots_left()
for shot in range(shots_left):
self.__ruleset.do_action(vodou_priest,
{'action-name': 'attack'},
mock_fight_handler)
# To simulate the start of the round
vodou_priest.details['current-weapon'] = 0
self.__window_manager.set_menu_response('Reload With What', 1)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'reload'},
mock_fight_handler)
# C. Shoot a number of times and verify that we have the number of
# shots left that we expect (in our last clip).
shots_taken = 3 # Pick a number at random
for shot in range(shots_taken):
self.__ruleset.do_action(vodou_priest,
{'action-name': 'attack'},
mock_fight_handler)
# To simulate the start of the round
vodou_priest.details['current-weapon'] = 0
assert (weapon.shots_left() ==
(self.__vodou_priest_initial_shots - shots_taken))
# D. Reload when there's nothing left with which to reload
self.__window_manager.set_menu_response('Reload With What', None)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'reload'},
mock_fight_handler)
assert (weapon.details['ammo']['shots_left'] ==
(self.__vodou_priest_initial_shots - shots_taken))
throw_away, virtual_clip = vodou_priest.equipment.get_item_by_name(
'C Cell')
assert(virtual_clip is None)
def test_reload_2(self):
'''
General test
'''
if ARGS.verbose:
print '\n=== test_reload_2 ===\n'
# Setup
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
# Draw Weapon
requested_weapon_index = self.__vodou_pistol_index
self.__ruleset.do_action(vodou_priest,
{'action-name': 'draw-weapon',
'weapon-index': requested_weapon_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(vodou_priest)
assert actual_weapon_index == requested_weapon_index
assert weapon.details['name'] == "pistol, Colt 170D"
assert weapon.shots_left() == self.__vodou_priest_initial_shots
clip = vodou_priest.equipment.get_item_by_index(
self.__vodou_priest_ammo_index)
# check the number of clips/batteries
assert clip['count'] == self.__vodou_priest_ammo_count
# A. Fire twice and verify that the shots left went down
shots_taken = 2
for shot in range(shots_taken):
self.__ruleset.do_action(vodou_priest,
{'action-name': 'attack'},
mock_fight_handler)
# To simulate the start of the round
vodou_priest.details['current-weapon'] = 0
assert (weapon.shots_left() ==
(self.__vodou_priest_initial_shots - shots_taken))
# Reload and verify that there are two different types of clips (a
# bunch of full ones and one partially full one)
self.__window_manager.set_menu_response('Reload With What', 1)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'reload'},
mock_fight_handler)
assert clip['count'] == (self.__vodou_priest_ammo_count - 1)
first_index, virtual_clip = vodou_priest.equipment.get_item_by_name(
'C Cell')
assert(virtual_clip is not None)
second_index, second_clip = vodou_priest.equipment.get_item_by_name(
'C Cell', first_index)
assert(second_clip is not None)
assert(second_clip['shots_left'] == self.__vodou_priest_initial_shots
- shots_taken)
# Now, try to reload other clips
# Start off by shooting off one shot so the reload will work
self.__ruleset.do_action(vodou_priest,
{'action-name': 'attack'},
mock_fight_handler)
# To simulate the start of the round
vodou_priest.details['current-weapon'] = 0
# Reload with the partial (the previously ejected one)
self.__window_manager.set_menu_response('Reload With What',
second_index)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'reload'},
mock_fight_handler)
assert (weapon.shots_left() ==
(self.__vodou_priest_initial_shots - shots_taken))
# Now, reload with a full one
self.__window_manager.set_menu_response('Reload With What',
first_index)
self.__ruleset.do_action(vodou_priest,
{'action-name': 'reload'},
mock_fight_handler)
assert (weapon.shots_left() == self.__vodou_priest_initial_shots)
def test_stun_and_consciousness(self):
'''
MIXED test
'''
if ARGS.verbose:
print '\n=== test_stun_and_consciousness ===\n'
# Setup
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
world_data = WorldData(self.init_world_dict)
mock_program = MockProgram()
world = ca.World('internal source file',
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
fight_handler = ca.FightHandler(self.__window_manager,
world,
'horsemen',
None, # Playback history
save_snapshot=False)
current_fighter = fight_handler.get_current_fighter()
# Consciousness
# check consciousness level
state_number = ca_fighter.Fighter.get_fighter_state(
current_fighter.details)
assert state_number == ca_fighter.Fighter.ALIVE
self.__ruleset.do_action(current_fighter,
{'action-name': 'pick-opponent',
'opponent': {'name': 'Moe', 'group': 'PCs'}},
fight_handler)
# Test
new_state = ca_fighter.Fighter.ALIVE
self.__ruleset.do_action(current_fighter,
{'action-name': 'set-consciousness',
'level': new_state},
fight_handler)
state_number = ca_fighter.Fighter.get_fighter_state(
current_fighter.details)
assert state_number == new_state
opponent = fight_handler.get_opponent_for(current_fighter)
assert opponent.name == 'Moe'
assert opponent.group == 'PCs'
new_state = ca_fighter.Fighter.UNCONSCIOUS
self.__ruleset.do_action(current_fighter,
{'action-name': 'set-consciousness',
'level': new_state},
fight_handler)
state_number = ca_fighter.Fighter.get_fighter_state(
current_fighter.details)
assert state_number == new_state
opponent = fight_handler.get_opponent_for(current_fighter)
assert opponent is None
# Setup Stun Test
original_hand_to_hand_info = self.__ruleset.get_unarmed_info(
current_fighter,
None,
None)
original_dodge_skill, ignore = self.__ruleset.get_dodge_skill(
current_fighter)
# Stun
self.__ruleset.do_action(current_fighter,
{'action-name': 'stun', 'stun': True},
fight_handler)
# Check whether stunned
hand_to_hand_info = self.__ruleset.get_unarmed_info(current_fighter,
None,
None)
stun_penalty = -4
total_penalty = stun_penalty
assert (hand_to_hand_info['parry_skill'] ==
original_hand_to_hand_info['parry_skill'] + total_penalty)
dodge_skill, ignore = self.__ruleset.get_dodge_skill(current_fighter)
assert dodge_skill == original_dodge_skill + total_penalty
def test_defend(self):
'''
GURPS test
'''
if ARGS.verbose:
print '\n=== test_defend ===\n'
# Setup
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
mock_fight_handler = MockFightHandler()
vodou_priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
requested_weapon_index = self.__vodou_pistol_index
self.__ruleset.do_action(vodou_priest,
{'action-name': 'draw-weapon',
'weapon-index': requested_weapon_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(vodou_priest)
assert actual_weapon_index == requested_weapon_index
# The only way you can see a 'defend' action is because aim is lost.
# 1 round
expected_to_hit = (self.__vodou_priest_fighter_pistol_skill
+ self.__colt_pistol_acc
+ 1) # braced
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit
# 2 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit + 1 # aiming for 2 rounds
# DEFEND, LOSE AIM #
self.__ruleset.do_action(vodou_priest,
{'action-name': 'defend'},
mock_fight_handler)
# 3 rounds
self.__ruleset.do_action(vodou_priest,
{'action-name': 'aim', 'braced': True},
mock_fight_handler)
to_hit, why = self.__ruleset.get_to_hit(vodou_priest, None, weapon)
assert to_hit == expected_to_hit # aiming for 1 round
def test_timers(self):
'''
Basic test
'''
if ARGS.verbose:
print '\n=== test_timers ===\n'
fighter = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
# Test a standard timer
timer_id = 0
round_count = 3
timer_text = '%d' % timer_id
timer_obj = ca_timers.Timer(None)
timer_obj.from_pieces({'parent-name': fighter.name,
'rounds': round_count,
'string': timer_text})
fighter.timers.add(timer_obj)
for i in range(round_count):
assert len(fighter.details['timers']) == 1
assert fighter.details['timers'][0]['string'] == timer_text
# At the _end_ of a fighter's turn, we remove all his expired
# timers. That causes the timer expiring this round to be shown.
fighter.timers.remove_expired_kill_dying()
fighter.timers.decrement_all()
fighter.timers.remove_expired_kill_dying()
assert len(fighter.details['timers']) == 0
# Test 3 timers simultaneously
timer_id = 0
round_count = [1, 2, 3]
timer_count = 3
for i in range(timer_count):
timer_text = '%d' % timer_id
timer_id += 1
timer_obj = ca_timers.Timer(None)
timer_obj.from_pieces({'parent-name': fighter.name,
'rounds': round_count[i],
'string': timer_text})
fighter.timers.add(timer_obj)
# round 0
fighter.timers.remove_expired_kill_dying()
fighter.timers.decrement_all()
assert len(fighter.details['timers']) == 3
expected = ['0', '1', '2']
for timer in fighter.details['timers']:
assert timer['string'] in expected
expected.remove(timer['string'])
# round 1
fighter.timers.remove_expired_kill_dying()
fighter.timers.decrement_all()
assert len(fighter.details['timers']) == 2
expected = ['1', '2']
for timer in fighter.details['timers']:
assert timer['string'] in expected
expected.remove(timer['string'])
# round 2
fighter.timers.remove_expired_kill_dying()
fighter.timers.decrement_all()
assert len(fighter.details['timers']) == 1
expected = ['2']
for timer in fighter.details['timers']:
assert timer['string'] in expected
expected.remove(timer['string'])
fighter.timers.remove_expired_kill_dying()
assert len(fighter.details['timers']) == 0
# Test a 0.9 timer. The 0.9 round timer is supposed to show during
# the current round but not the beginning of the next round. The
# normal way this works is:
# - start turn
# - set 0.9 timer (shows through the end of this round)
# - end turn # kills regular timers that showed this turn
# - start turn # kills 0.9 timer before stuff is shown this turn
# add 1 turn timer -- a regular timer are show through the next turn
timer_id = 0
round_count = 1
timer0_text = '%d' % timer_id
timer_obj = ca_timers.Timer(None)
timer_obj.from_pieces({'parent-name': fighter.name,
'rounds': round_count,
'string': timer0_text})
fighter.timers.add(timer_obj)
# start turn -- decrement 1-turn timer, timer = 0, keep it this turn
fighter.timers.decrement_all()
fighter.timers.remove_expired_keep_dying()
# assert 1 timer -- didn't kill the 1-turn timer
assert len(fighter.details['timers']) == 1
assert fighter.details['timers'][0]['string'] == timer0_text
# add 0.9 timer -- shown through this turn, killed before next turn
timer_id = 1
round_count = 0.9
timer1_text = '%d' % timer_id
timer_obj = ca_timers.Timer(None)
timer_obj.from_pieces({'parent-name': fighter.name,
'rounds': round_count,
'string': timer1_text})
fighter.timers.add(timer_obj)
# assert 2 timers -- right: both timers are there
assert len(fighter.details['timers']) == 2
expected = ['0', '1']
for timer in fighter.details['timers']:
assert timer['string'] in expected
expected.remove(timer['string'])
# end turn -- kills 1 turn timer
fighter.timers.remove_expired_kill_dying()
# assert 1 timer -- show that the 1-turn timer was killed
assert len(fighter.details['timers']) == 1
assert fighter.details['timers'][0]['string'] == timer1_text
# start turn - kills 0.9 timer before the next turn's stuff is shown
fighter.timers.decrement_all()
fighter.timers.remove_expired_keep_dying()
# assert 0 timers -- yup, 0.9 timer is now gone
assert len(fighter.details['timers']) == 0
def test_save(self):
'''
Basic test
'''
if ARGS.verbose:
print '\n=== test_save ===\n'
base_world_dict = copy.deepcopy(self.base_world_dict)
self.__window_manager = MockWindowManager()
self.__ruleset = TestRuleset(self.__window_manager)
# Test that leaving a fight moves the bad guys to the dead monster
# list
if ARGS.verbose:
print '\n----------- LEAVE FIGHT -----------\n'
world_data = WorldData(base_world_dict)
mock_program = MockProgram()
world = ca.World('internal source file',
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
fight_handler = ca.FightHandler(self.__window_manager,
world,
"Dima's Crew",
None, # Playback history
save_snapshot=False)
assert "Dima's Crew" in world_data.read_data['fights']
assert not self.__is_in_dead_monsters(world_data, "Dima's Crew")
# assert not world_data.read_data['current-fight']['saved']
self.__window_manager.set_char_response(ord('q'))
self.__window_manager.set_menu_response('Leaving Fight', False)
fight_handler.handle_user_input_until_done()
assert "Dima's Crew" not in world_data.read_data['fights']
assert self.__is_in_dead_monsters(world_data, "Dima's Crew")
assert not world_data.read_data['current-fight']['saved']
#
# test that SAVING the fight works
#
if ARGS.verbose:
print '\n----------- SAVE FIGHT -----------\n'
world_data = WorldData(base_world_dict)
mock_program = MockProgram()
world = ca.World('internal source file',
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
assert (world_data.read_data['current-fight']['monsters'] !=
"Dima's Crew")
fight_handler = ca.FightHandler(self.__window_manager,
world,
"Dima's Crew",
None, # Playback history
save_snapshot=False)
assert "Dima's Crew" in world_data.read_data['fights']
assert not self.__is_in_dead_monsters(world_data, "Dima's Crew")
# assert not world_data.read_data['current-fight']['saved']
self.__window_manager.set_char_response(ord('q'))
self.__window_manager.set_menu_response(
'Leaving Fight', {'doit': fight_handler.save_fight})
self.__window_manager.set_menu_response('Leaving Fight', False)
fight_handler.handle_user_input_until_done()
assert "Dima's Crew" in world_data.read_data['fights']
assert not self.__is_in_dead_monsters(world_data, "Dima's Crew")
assert world_data.read_data['current-fight']['saved']
assert (world_data.read_data['current-fight']['monsters'] ==
"Dima's Crew")
#
# test that KEEPING the fight works
#
if ARGS.verbose:
print '\n----------- KEEP FIGHT -----------\n'
world_data = WorldData(base_world_dict)
mock_program = MockProgram()
world = ca.World('internal source file',
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
fight_handler = ca.FightHandler(self.__window_manager,
world,
"Dima's Crew",
None, # Playback history
save_snapshot=False)
assert "Dima's Crew" in world_data.read_data['fights']
assert not self.__is_in_dead_monsters(world_data, "Dima's Crew")
# assert not world_data.read_data['current-fight']['saved']
self.__window_manager.set_char_response(ord('q'))
self.__window_manager.set_menu_response(
'Leaving Fight', {'doit': fight_handler.keep_fight})
self.__window_manager.set_menu_response('Leaving Fight', False)
fight_handler.handle_user_input_until_done()
assert "Dima's Crew" in world_data.read_data['fights']
assert not self.__is_in_dead_monsters(world_data, "Dima's Crew")
assert not world_data.read_data['current-fight']['saved']
def test_add_remove_equipment(self):
'''
Basic test
'''
if ARGS.verbose:
print '\n=== test_add_remove_equipment ===\n'
fighter = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
mock_fight_handler = MockFightHandler()
original_item = fighter.details['stuff'][
self.__tank_fighter_pistol_index]
current_count = len(fighter.details['stuff'])
original_stuff = copy.deepcopy(fighter.details['stuff'])
# Same item - verify that the count goes up
assert original_item['count'] == 1
same_item = copy.deepcopy(original_item)
same_item['count'] = 2
before_item_count = fighter.equipment.get_item_count()
ignore = fighter.add_equipment(same_item, 'test')
after_item_count = fighter.equipment.get_item_count()
assert original_item['count'] == 3
assert before_item_count == after_item_count
# Similar item - verify that it doesn't just bump the count
similar_item = copy.deepcopy(original_item)
similar_item['count'] = 1
similar_item['acc'] = original_item['acc'] + 1
assert len(fighter.details['stuff']) == current_count
self.__window_manager.set_menu_response(
'Make pistol, Sig D65 the preferred weapon?',
False)
ignore = fighter.add_equipment(similar_item, 'test')
current_count += 1
assert len(fighter.details['stuff']) == current_count
# Different item
different_item = {"name": "pistol, Baretta DX 192",
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": 2,
"ammo": {"name": "C Cell",
"shots_left": 8,
"shots": 8},
"clip": {"name": "C Cell",
"type": ["misc"],
"count": 1,
"notes": "",
"owners": None},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": None,
"notes": ""}
assert len(fighter.details['stuff']) == current_count
self.__window_manager.set_menu_response(
'Make pistol, Baretta DX 192 the preferred weapon?',
False)
new_pistol_index = fighter.add_equipment(different_item, 'test')
current_count += 1
assert len(fighter.details['stuff']) == current_count
# Make sure we only add to the end
for i, original_item in enumerate(original_stuff):
# We've changed the count on the fighter's pistol
if i != self.__tank_fighter_pistol_index:
assert self.__are_equal(original_item,
fighter.details['stuff'][i])
# Remove counted item
self.__window_manager.set_input_box_response(
'How Many Items (3 Available)?', 1)
fighter.remove_equipment(self.__tank_fighter_pistol_index)
weapon = fighter.equipment.get_item_by_index(
self.__tank_fighter_pistol_index)
assert weapon is not None
assert weapon['count'] == 2 # one less than before
# Remove uncounted item
fighter.remove_equipment(new_pistol_index)
weapon = fighter.equipment.get_item_by_index(new_pistol_index)
assert weapon is None
# Check the whole list
'''
[
0=> {"name": "pistol, Sig D65", # the index of this is stored
# in __tank_fighter_pistol_index
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": 4,
"ammo": {"name": "C Cell", "shots_left": 9, "shots": 9},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1, <------------------------------------------- now 2
"owners": None,
"notes": ""
},
1=> {"name": "sick stick",
"type": ["melee weapon"],
"damage": {"dice": "1d+1 fat"},
"skill": {"Axe/Mace": 0},
"count": 1,
"owners": None,
"notes": ""
},
2=> {"name": "C Cell", "type": ["misc"], "count": 5, "notes": "",
"owners": None,
},
3=> {"name": "pistol, Sig D65", # the index of this is stored
# in __tank_fighter_pistol_index
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": 4, <---------------------- now 5 -- this is similar item
"ammo": {"name": "C Cell", "shots_left": 9, "shots": 9},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": None,
"notes": ""
},
4=> {"name": "pistol, Baretta DX 192", XXXXX--different item-removed
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": 2,
"ammo": {"name": "C Cell", "shots_left": 8, "shots": 8},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": None,
"notes": ""
}
]
'''
weapon = fighter.equipment.get_item_by_index(0)
assert weapon['name'] == "pistol, Sig D65"
assert weapon['acc'] == 4
assert weapon['count'] == 2
weapon = fighter.equipment.get_item_by_index(1)
assert weapon['name'] == "sick stick"
assert weapon['count'] == 1
weapon = fighter.equipment.get_item_by_index(2)
assert weapon['name'] == "C Cell"
assert weapon['count'] == 5
weapon = fighter.equipment.get_item_by_index(3)
assert weapon['name'] == "pistol, Sig D65"
assert weapon['acc'] == 5
assert weapon['count'] == 1
weapon = fighter.equipment.get_item_by_index(4) # Removed
assert weapon is None
# check weapon index
sick_stick_index = 1
self.__ruleset.do_action(fighter,
{'action-name': 'draw-weapon',
'weapon-index': sick_stick_index},
mock_fight_handler)
weapon, actual_weapon_index = self.__get_current_weapon(fighter)
assert actual_weapon_index == sick_stick_index
# remove counted item before weapon index
sig_acc_4_index = 0
self.__window_manager.set_input_box_response(
'How Many Items (2 Available)?', 1)
fighter.remove_equipment(sig_acc_4_index) # Should just reduce the count
weapon = fighter.equipment.get_item_by_index(0)
assert weapon['name'] == "pistol, Sig D65"
assert weapon['acc'] == 4
assert weapon['count'] == 1
weapon, actual_weapon_index = self.__get_current_weapon(fighter)
assert actual_weapon_index == sick_stick_index
# remove non-counted item before weapon index
fighter.remove_equipment(sig_acc_4_index) # Should remove item
sick_stick_index -= 1
weapon = fighter.equipment.get_item_by_index(sick_stick_index)
assert weapon['name'] == "sick stick"
weapon, actual_weapon_index = self.__get_current_weapon(fighter)
assert weapon.name == "sick stick"
assert actual_weapon_index == sick_stick_index
# remove item after weapon index
sig_acc_5_index = 2
fighter.remove_equipment(sig_acc_5_index)
weapon = fighter.equipment.get_item_by_index(sick_stick_index)
assert weapon['name'] == "sick stick"
weapon, actual_weapon_index = self.__get_current_weapon(fighter)
assert weapon.name == "sick stick"
assert actual_weapon_index == sick_stick_index
# remove item at weapon index
fighter.remove_equipment(sick_stick_index)
weapon, actual_weapon_index = self.__get_current_weapon(fighter)
assert weapon is None
assert actual_weapon_index is None
def test_preferred_add_remove_weapon(self):
'''
Basic test
'''
if ARGS.verbose:
print '\n=== test_preferred_add_remove_weapon ===\n'
fighter = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
'''
"stuff": [
{"name": "pistol, Sig D65", # the index of this is stored
# in __tank_fighter_pistol_index
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": 4,
"ammo": {"name": "C Cell", "shots_left": 9, "shots": 9},
"clip": {"name": "C Cell",
"type": ["misc"],
"count": 1,
"notes": "",
"owners": None},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": None,
"notes": ""},
{"name": "sick stick",
"type": ["melee weapon"],
"damage": {"dice": "1d+1 fat"},
"skill": {"Axe/Mace": 0},
"count": 1,
"owners": None,
"notes": ""},
{"name": "C Cell", "type": ["misc"], "count": 5, "notes": "",
"owners": None}
],
'''
mock_fight_handler = MockFightHandler()
original_item = fighter.details['stuff'][
self.__tank_fighter_pistol_index]
current_count = len(fighter.details['stuff'])
original_stuff = copy.deepcopy(fighter.details['stuff'])
# Adds an identical weapon to an existing one. Since there isn't a
# preferred weapon, it should up the count and make it preferred.
assert len(fighter.details['preferred-weapon-index']) == 0
assert original_item['count'] == 1
same_item = copy.deepcopy(original_item)
same_item['count'] = 2 # 2 items to add
before_item_count = fighter.equipment.get_item_count()
ignore = fighter.add_equipment(same_item, 'test')
after_item_count = fighter.equipment.get_item_count()
assert original_item['count'] == 3 # we've added 2 new items
assert before_item_count == after_item_count
assert len(fighter.details['preferred-weapon-index']) == 1
new_preferred_weapon_index = fighter.details['preferred-weapon-index'][0]
assert new_preferred_weapon_index == self.__tank_fighter_pistol_index
# Add the same weapon again and show that we don't get asked to make
# the newly added weapon a preferred weapon
before_item_count = fighter.equipment.get_item_count()
ignore = fighter.add_equipment(same_item, 'fourth')
after_item_count = fighter.equipment.get_item_count()
assert original_item['count'] == 5 # we've added 2 MORE new items
assert before_item_count == after_item_count
assert len(fighter.details['preferred-weapon-index']) == 1
new_preferred_weapon_index = fighter.details['preferred-weapon-index'][0]
assert new_preferred_weapon_index == self.__tank_fighter_pistol_index
# Add weapon to list w/preferred weapon: should ask whether to make
# new weapon preferred - answer = No
similar_item = copy.deepcopy(original_item)
similar_item['count'] = 1
similar_item['acc'] = original_item['acc'] + 1 # just so it's different
previous_preferred_weapon = fighter.details['preferred-weapon-index'][0]
self.__window_manager.set_menu_response(
'Make pistol, Sig D65 the preferred weapon?', False)
ignore = fighter.add_equipment(similar_item, 'sixth')
new_preferred_weapon = fighter.details['preferred-weapon-index'][0]
assert len(fighter.details['preferred-weapon-index']) == 1
assert new_preferred_weapon == previous_preferred_weapon
# Add weapon to list w/preferred weapon: should ask whether to make
# new weapon preferred - answer = Yes, replace existing preference
similar_item = copy.deepcopy(similar_item)
similar_item['acc'] += 1
self.__window_manager.set_menu_response(
'Make pistol, Sig D65 the preferred weapon?',
ca_fighter.Fighter.REPLACE_PREFERRED)
self.__window_manager.set_menu_response('Replace which weapon?', 0)
new_index = fighter.add_equipment(similar_item, 'eighth')
# The current preferred weapon should be the most recently added item
current_count = len(fighter.details['stuff'])
new_preferred_weapon = fighter.details['preferred-weapon-index'][0]
assert len(fighter.details['preferred-weapon-index']) == 1
assert new_preferred_weapon == current_count - 1
assert new_index == new_preferred_weapon
# [ index 0: { 'name': 'pistol, Sig D65', 'acc': 4, 'count': 5},
# index 1: { 'name': 'sick stick', 'count': 1 }
# index 2: { 'name': 'C Cell' },
# index 3: { 'name': 'pistol, Sig D65', 'acc': 5, 'count': 1},
# index 4: { 'name': 'pistol, Sig D65', 'acc': 6, 'count': 1} ]
# Remove preferred weapon, preferred weapon should be none
old_preferred_weapon = fighter.details['preferred-weapon-index'][0]
self.__window_manager.set_input_box_response(
'How Many Items (5 Available)?', 5)
fighter.remove_equipment(old_preferred_weapon)
assert len(fighter.details['preferred-weapon-index']) == 0
# Remove weapon before preferred weapon: preferred index should move
# to continue pointing to preferred weapon
# [ index 0: { 'name': 'pistol, Sig D65', 'acc': 4, 'count': 5},
# index 1: { 'name': 'sick stick', 'count': 1 } <--- PREFERRED
# index 2: { 'name': 'C Cell' },
# index 3: { 'name': 'pistol, Sig D65', 'acc': 5, 'count': 1}]
fighter.details['preferred-weapon-index'] = [self.__tank_fighter_sickstick_index]
old_preferred_weapon = fighter.details['preferred-weapon-index'][0]
index_to_remove = old_preferred_weapon - 1 # index 0
self.__window_manager.set_input_box_response(
'How Many Items (5 Available)?', 5)
fighter.remove_equipment(index_to_remove)
new_preferred_weapon = fighter.details['preferred-weapon-index'][0]
assert new_preferred_weapon == old_preferred_weapon - 1
# Remove weapon after preferred weapon: preferred index should
# remain unchanged and point to preferred weapon
# [ index 0: { 'name': 'sick stick', 'count': 1 } <- PREFERRED
# index 1: { 'name': 'C Cell', 'count': 5 },
# index 2: { 'name': 'pistol, Sig D65', 'acc': 5, 'count': 1}]
old_preferred_weapon = fighter.details['preferred-weapon-index'][0]
index_to_remove = old_preferred_weapon + 1 # index 1
self.__window_manager.set_input_box_response(
'How Many Items (5 Available)?', 5)
fighter.remove_equipment(index_to_remove)
new_preferred_weapon = fighter.details['preferred-weapon-index'][0]
assert new_preferred_weapon == old_preferred_weapon
# Add weapon to empty list: should make weapon preferred
while len(fighter.details['stuff']) > 0:
fighter.remove_equipment(0)
assert len(fighter.details['stuff']) == 0
assert len(fighter.details['preferred-weapon-index']) == 0
original_item = self.__tank_fighter['stuff'][
self.__tank_fighter_pistol_index]
same_item = copy.deepcopy(original_item)
new_index = fighter.add_equipment(same_item, 'test')
assert len(fighter.details['preferred-weapon-index']) == 1
new_preferred_weapon = fighter.details['preferred-weapon-index'][0]
assert new_preferred_weapon == new_index
# [ index 0: { 'name': 'pistol, Sig D65', 'acc': 5, 'count': 1} <- PREFERRED ]
# Add weapon to list w/preferred weapon: should ask whether to make
# new weapon preferred - answer = No
similar_item = copy.deepcopy(similar_item)
similar_item['name'] = 'Ray Gun'
similar_item['acc'] += 1
self.__window_manager.set_menu_response(
'Make Ray Gun the preferred weapon?',
ca_fighter.Fighter.NOT_PREFERRED)
old_preferred_weapon = fighter.details['preferred-weapon-index'][0]
new_index = fighter.add_equipment(similar_item, 'eighth')
new_preferred_weapon = fighter.details['preferred-weapon-index'][0]
assert len(fighter.details['preferred-weapon-index']) == 1
assert new_preferred_weapon == old_preferred_weapon
# Add weapon to list w/preferred weapon: should ask whether to make
# new weapon preferred - answer = Yes, add to existing list
similar_item = copy.deepcopy(similar_item)
similar_item['name'] = 'Ray Gun 2'
similar_item['acc'] += 1
self.__window_manager.set_menu_response(
'Make Ray Gun 2 the preferred weapon?',
ca_fighter.Fighter.ADD_PREFERRED)
old_preferred_weapon = fighter.details['preferred-weapon-index'][0]
new_index = fighter.add_equipment(similar_item, 'eighth')
assert len(fighter.details['preferred-weapon-index']) == 2
assert fighter.details['preferred-weapon-index'][0] == old_preferred_weapon
assert fighter.details['preferred-weapon-index'][1] == new_index
def test_give_equipment(self):
'''
Basic test
'''
if ARGS.verbose:
print '\n=== test_give_equipment ===\n'
tank = ca_fighter.Fighter(
'Tank',
'group',
copy.deepcopy(self.__tank_fighter),
self.__ruleset,
self.__window_manager)
tank_after_gift = [
{"name": "sick stick",
"type": ["melee weapon"],
"damage": {"dice": "1d+1 fat"},
"skill": {"Axe/Mace": 0},
"count": 1,
"owners": None,
"notes": ""},
{"name": "C Cell", "type": ["misc"], "count": 5, "notes": "",
"owners": None}]
priest = ca_fighter.Fighter(
'Priest',
'group',
copy.deepcopy(self.__vodou_priest_fighter),
self.__ruleset,
self.__window_manager)
priest_after_gift = [
{"name": "pistol, Colt 170D",
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": self.__colt_pistol_acc,
"ammo": {"name": "C Cell",
"shots_left": self.__vodou_priest_initial_shots,
"shots": self.__vodou_priest_initial_shots},
"clip": {"name": "C Cell",
"type": ["misc"],
"count": 1,
"notes": "",
"owners": None},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": 1,
"notes": None}, # index 0
{"name": "C Cell",
"type": ["misc"],
"count": self.__vodou_priest_ammo_count,
"notes": "",
"owners": None}, # index 1
{"count": 1,
"type": ["armor"],
"notes": "Enchanted w/fortify spell [M66]",
"dr": self.__vodou_priest_armor_dr,
"name": "Sport coat/Jeans"},
{"name": "pistol, Sig D65", # the index of this is stored
# in __tank_fighter_pistol_index
"type": ["ranged weapon"],
"damage": {"dice": "1d+4"},
"acc": 4,
"ammo": {"name": "C Cell", "shots_left": 9, "shots": 9},
"clip": {"name": "C Cell",
"type": ["misc"],
"count": 1,
"notes": "",
"owners": None},
"reload": 3,
"skill": {"Guns (Pistol)": 0},
"count": 1,
"owners": None,
"notes": ""},
]
# Give item from tank to priest
mock_fight_handler = MockFightHandler()
mock_fight_handler.set_fighter_object('Priest', 'group', priest)
self.__ruleset.do_action(
tank,
{'action-name': 'give-equipment',
'item-index': self.__tank_fighter_pistol_index,
'count': 1,
'recipient': {'name': 'Priest', 'group': 'group'},
'comment': '%s gave pistol to %s' % (tank.name, priest.name)
},
mock_fight_handler)
assert self.__are_equal(tank_after_gift, tank.details['stuff'])
assert self.__are_equal(priest_after_gift, priest.details['stuff'])
def test_redirects(self):
'''
Basic test
'''
if ARGS.verbose:
print '\n=== test_redirects ===\n'
base_world_dict = copy.deepcopy(self.base_world_dict)
world_data = WorldData(base_world_dict)
mock_program = MockProgram()
world = ca.World('internal source file',
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
# Verify that redirect that's in the World object works the way I
# expect it to.
source_char = world.get_creature_details('One More Guy', 'NPCs')
dest_char = world.get_creature_details('One More Guy', 'Dima\'s Crew')
assert self.__are_equal(source_char, dest_char)
def test_redirects_promote_to_NPC(self):
'''
Basic test
'''
if ARGS.verbose:
print '\n=== test_redirects_promote_to_NPC ===\n'
init_world_dict = copy.deepcopy(self.init_world_dict)
world_data = WorldData(init_world_dict)
mock_program = MockProgram()
world = ca.World('internal source file',
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
self.__window_manager.reset_error_state()
# random.randint(1, 6) should generate: 1 2 4 4 4 4 5 6 4 4
random.seed(9001) # 9001 is an arbitrary number
# expected = [{'name': 'Famine', 'group': 'horsemen'}, # 5.75, 12, 4
monster_famine_index = 0
# {'name': 'Jack', 'group': 'PCs'}, # 5.75, 12, 2
pc_jack_index = 1
# {'name': 'Moe', 'group': 'PCs'}, # 5.5, 12, 4
pc_moe_index = 2
# {'name': 'Pestilence', 'group': 'horsemen'}, # 5.5, 11, 4
monster_pestilence_index = 3
# {'name': 'Manny', 'group': 'PCs'}] # 5.25, 10, 1
pc_manny_index = 4
fight_handler = ca.FightHandler(self.__window_manager,
world,
"horsemen",
None, # Playback history
save_snapshot=False)
# FightHandler.promote_to_NPC - check good change #
fight_handler.set_viewing_index(monster_pestilence_index)
fight_handler.promote_to_NPC()
# There should now be an NPC named pestilence
source_char = world.get_creature_details('Pestilence', 'horsemen')
dest_char = world.get_creature_details('Pestilence', 'NPCs')
assert self.__are_equal(source_char, dest_char)
# FightHandler.promote_to_NPC - check destination has an NPC #
self.__window_manager.expect_error(
['There\'s already an NPC named Pestilence'])
fight_handler.set_viewing_index(monster_pestilence_index)
fight_handler.promote_to_NPC()
assert(self.__window_manager.error_state ==
MockWindowManager.FOUND_EXPECTED_ERROR)
# TODO: FightHandler.promote_to_NPC - check source already an NPC #
# if npc_name not in self.__world.details['NPCs']:
# self._window_manager.error(['%s is already an NPC' %
# new_NPC.name])
def test_NPC_joins(self):
'''
Basic test
'''
if ARGS.verbose:
print '\n=== test_NPC_joins ===\n'
# NOTE: These indexes assume that we're NOT creating a fight. When we
# create a fight, a Fight object '<< ROOM >>' will be created and
# added to the beginning of the fight. The indexes, in that case,
# will be increased by 1 since the Fight will have index 0. These
# indexes, however, are not used in the tests that create a new fight.
# << ROOM >> -- not in the tests that use indexes
# {'name': 'Jack', 'group': 'PCs'}, # 5.75, 12, 2
pc_jack_index = 0
# {'name': 'Manny', 'group': 'PCs'}] # 5.25, 10, 1
pc_manny_index = 1
# {'name': 'Moe', 'group': 'PCs'}, # 5.5, 12, 4
pc_moe_index = 2
last_pc_index = 2
# Chico
chico_index = 0
# Grouch
groucho_index = 1
# Zeppo
zeppo_index = 2
# 'fights': {
# 'horsemen': {
# 'monsters': {
# # 5.75, 12, rand=4
# 'Famine': copy.deepcopy(self.__thief_fighter),
#
# # 5.5, 11, rand=4
# 'Pestilence': copy.deepcopy(self.__vodou_priest_fighter),
# }
# }
# },
init_world_dict = copy.deepcopy(self.init_world_dict)
world_data = WorldData(init_world_dict)
mock_program = MockProgram()
world = ca.World('internal source file',
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False)
self.__window_manager.set_menu_response('Which Template Group',
'dudes')
npc_handler = ca.PersonnelHandler(self.__window_manager,
world,
ca.PersonnelHandler.NPCs)
self.__window_manager.set_menu_response('Which Template Group',
'dudes')
pc_handler = ca.PersonnelHandler(self.__window_manager,
world,
ca.PersonnelHandler.PCs)
# PersonnelHandler.NPC_joins_monsters - not an NPC #
if ARGS.verbose:
print '\n----------- NPC_joins_monsters - not an NPC -----------\n'
self.__window_manager.reset_error_state()
pc_handler.set_viewing_index(pc_manny_index)
fighter = pc_handler.get_obj_from_index()
self.__window_manager.expect_error(['"Manny" not an NPC'])
self.__window_manager.set_menu_response('Join Which Fight', 'horsemen')
pc_handler.NPC_joins_monsters()
assert(self.__window_manager.error_state ==
MockWindowManager.FOUND_EXPECTED_ERROR)
# PersonnelHandler.NPC_joins_monsters - works #
if ARGS.verbose:
print '\n----------- NPC_joins_monsters - works -----------\n'
self.__window_manager.reset_error_state()
npc_handler.set_viewing_index(groucho_index)
fighter = npc_handler.get_obj_from_index()
assert fighter.name == 'Groucho'
self.__window_manager.set_menu_response('Join Which Fight', 'horsemen')
npc_handler.NPC_joins_monsters()
source_char = world.get_creature_details('Groucho', 'NPCs')
dest_char = world.get_creature_details('Groucho', 'horsemen')
assert self.__are_equal(source_char, dest_char)
# PersonnelHandler.NPC_joins_monsters - NPC already in fight #
if ARGS.verbose:
print '\n--- NPC_joins_monsters - NPC already with monster ---\n'
npc_handler.set_viewing_index(groucho_index)
fighter = npc_handler.get_obj_from_index()
assert fighter.name == 'Groucho'
self.__window_manager.set_menu_response('Join Which Fight', 'horsemen')
self.__window_manager.expect_error(
['"Groucho" already in fight "horsemen"'])
npc_handler.NPC_joins_monsters()
assert(self.__window_manager.error_state ==
MockWindowManager.FOUND_EXPECTED_ERROR)
# PersonnelHandler.NPC_joins_PCs -- not a PC #
if ARGS.verbose:
print '\n----------- NPC_joins_PCs - not a PC -----------\n'
pc_handler.set_viewing_index(pc_manny_index)
fighter = pc_handler.get_obj_from_index()
assert fighter.name == 'Manny'
self.__window_manager.expect_error(['"Manny" not an NPC'])
pc_handler.NPC_joins_monsters()
assert(self.__window_manager.error_state ==
MockWindowManager.FOUND_EXPECTED_ERROR)
# PersonnelHandler.NPC_joins_PCs -- works #
if ARGS.verbose:
print '\n----------- NPC_joins_PCs - works -----------\n'
self.__window_manager.reset_error_state()
# Doing zeppo so he gets put at the end of the alphabetized PC list
# to make the next test work.
npc_handler.set_viewing_index(zeppo_index)
fighter = npc_handler.get_obj_from_index()
assert fighter.name == 'Zeppo'
npc_handler.NPC_joins_PCs()
source_char = world.get_creature_details('Zeppo', 'NPCs')
dest_char = world.get_creature_details('Zeppo', 'PCs')
assert self.__are_equal(source_char, dest_char)
# PersonnelHandler.NPC_joins_PCs -- already a PC #
#
# There isn't a case where something's already a PC where it doesn't
# fire the 'Not an NPC' error.
# if ARGS.verbose:
# print '\n-------- NPC_joins_PCs - NPC already a PC --------\n'
# Zeppo should have been put at the end of the alphabetized PC list by
# the last test. Now we know the index of an NPC that is also a PC.
# npc_handler.set_viewing_index(last_pc_index + 1)
# fighter = npc_handler.get_obj_from_index()
# assert fighter.name == 'Zeppo'
# self.__window_manager.expect_error(['"Zeppo" already a PC'])
# npc_handler.NPC_joins_monsters()
# assert(self.__window_manager.error_state ==
# MockWindowManager.FOUND_EXPECTED_ERROR)
def test_new_fight_new_creatures(self):
'''
Basic test
'''
# CREATE FIGHT -- WORKING #
if ARGS.verbose:
print '\n=== test_new_fight_new_creatures ===\n'
world_dict = copy.deepcopy(self.base_world_dict)
world_data = WorldData(world_dict)
mock_program = MockProgram()
world = ca.World('internal source file',
world_data,
self.__ruleset,
mock_program,
self.__window_manager,
save_snapshot=False
)
self.__window_manager.clear_menu_responses()
self.__window_manager.set_menu_response('New or Pre-Existing', 'new')
self.__window_manager.set_menu_response('Which Template Group',
'Arena Combat')
self.__window_manager.set_input_box_response('New Fight Name',
'test_new_fight')
self.__window_manager.set_menu_response('Monster', 'VodouCleric')
self.__window_manager.set_input_box_response('Monster Name', 'Horatio')
self.__window_manager.set_menu_response('What Next', 'quit')
build_fight = TestPersonnelHandler(self.__window_manager,
world,
ca.PersonnelHandler.MONSTERs)
build_fight.set_command_ribbon_input('q')
build_fight.handle_user_input_until_done()
fights = world.get_fights()
assert 'test_new_fight' in fights # verify that fight exists
if 'test_new_fight' in fights:
creatures = world.get_creature_details_list('test_new_fight')
if ARGS.verbose:
print 'Expect: Room, Horatio:'
PP.pprint(creatures)
# The 'creatures' should be '<< ROOM >>', '1 - Horatio'
assert '1 - Horatio' in creatures
# FIGHT ALREADY EXISTS #
if ARGS.verbose:
print '\n--- Test: Fight Already Exists ---\n'
self.__window_manager.reset_error_state()
self.__window_manager.clear_menu_responses()
self.__window_manager.set_menu_response('New or Pre-Existing', 'new')
self.__window_manager.set_menu_response('Which Template Group',
'Arena Combat')
# This one should error out
self.__window_manager.set_input_box_response('New Fight Name',
'test_new_fight')
# This one should work
self.__window_manager.set_input_box_response('New Fight Name', 'foo')
self.__window_manager.expect_error(
['Fight name "test_new_fight" already exists'])
# These are just so that the test finishes.
self.__window_manager.set_menu_response('Monster', 'VodouCleric')
self.__window_manager.set_input_box_response('Monster Name', 'Horatio')
self.__window_manager.set_menu_response('What Next', 'quit')
build_fight = TestPersonnelHandler(self.__window_manager,
world,
ca.PersonnelHandler.MONSTERs)
assert(self.__window_manager.error_state ==
MockWindowManager.FOUND_EXPECTED_ERROR)
build_fight.set_command_ribbon_input('q')
build_fight.handle_user_input_until_done()
# ADD A CREATURE, DELETE A MONSTER -- WORKS #
if ARGS.verbose:
print '\n--- Test: Add and Delete Monster ---\n'
self.__window_manager.clear_menu_responses()
self.__window_manager.set_menu_response('New or Pre-Existing',
'existing')
self.__window_manager.set_menu_response('Which Template Group',
'Arena Combat')
self.__window_manager.set_menu_response('To Which Group',
'test_new_fight')
self.__window_manager.set_menu_response('Monster', 'VodouCleric')
self.__window_manager.set_input_box_response('Monster Name', 'Ophelia')
build_fight = TestPersonnelHandler(self.__window_manager,
world,
ca.PersonnelHandler.MONSTERs)
build_fight.set_command_ribbon_input('a') # Add Ophelia
# The 'creatures' should be '<< ROOM >>', '1 - Horatio', '2 - Ophelia'
# Delete Horatio
build_fight.set_command_ribbon_input(curses.KEY_UP)
build_fight.set_command_ribbon_input('d') # Delete Horatio
self.__window_manager.set_menu_response(
'Delete "1 - Horatio" ARE YOU SURE?', 'yes')
# finish up the test
self.__window_manager.set_menu_response('What Next', 'quit')
build_fight.set_command_ribbon_input('q') # Quit
build_fight.handle_user_input_until_done()
fights = world.get_fights()
assert 'test_new_fight' in fights # verify that fight exists
if 'test_new_fight' in fights:
creatures = world.get_creature_details_list('test_new_fight')
assert '1 - Horatio' not in creatures
assert '2 - Ophelia' in creatures
# ADD PCs -- WORKS #
if ARGS.verbose:
print '\n--- Test: Add PCs ---\n'
group = 'PCs'
self.__window_manager.clear_menu_responses()
self.__window_manager.set_menu_response('Which Template Group',
'Arena Combat')
self.__window_manager.set_menu_response('Monster', 'VodouCleric')
self.__window_manager.set_input_box_response('Monster Name', 'Skippy')
self.__window_manager.set_menu_response('What Next', 'quit')
build_fight = TestPersonnelHandler(self.__window_manager,
world,
ca.PersonnelHandler.PCs)
build_fight.set_command_ribbon_input('a')
build_fight.set_command_ribbon_input('q')
build_fight.handle_user_input_until_done()
creatures = world.get_creature_details_list(group)
assert 'Skippy' in creatures
# ADD NPCs #
if ARGS.verbose:
print '\n--- Test: Add NPCs ---\n'
group = 'NPCs'
self.__window_manager.clear_menu_responses()
self.__window_manager.set_menu_response('Which Template Group',
'Arena Combat')
self.__window_manager.set_menu_response('Monster', 'VodouCleric')
self.__window_manager.set_input_box_response('Monster Name', 'Stinky')
self.__window_manager.set_menu_response('What Next', 'quit')
build_fight = TestPersonnelHandler(self.__window_manager,
world,
ca.PersonnelHandler.NPCs)
build_fight.set_command_ribbon_input('a')
build_fight.set_command_ribbon_input('q')
build_fight.handle_user_input_until_done()
creatures = world.get_creature_details_list(group)
assert 'Stinky' in creatures
def test_containers(self):
'''
Basic test
'''
# CREATE FIGHT -- WORKING #
if ARGS.verbose:
print '\n=== test_containers ===\n'
mock_fight_handler = MockFightHandler()
fighter_dict = copy.deepcopy(self.__thief_fighter)
# NOTE: containers are always 1st (that way the indexes don't get
# messed up and I don't have to go around recalculating for the
# tests.
fighter_dict['stuff'] = [
{"name": "Container 11", "type": ["container"], "count": 1, "notes": "",
"owners": None, "stuff": [
{"name": "Container 22", "type": ["container"], "count": 1, "notes": "",
"owners": None, "stuff": [
{"name": "Random Thing 31", "type": ["misc"], "count": 1, "notes": "",
"owners": None},
]},
{"name": "Random Thing 22", "type": ["misc"], "count": 1, "notes": "",
"owners": None},
]},
{"name": "Random Thing 12", "type": ["misc"], "count": 1, "notes": "",
"owners": None},
{"name": "Random Thing 13", "type": ["misc"], "count": 1, "notes": "",
"owners": None},
]
fighter = ca_fighter.Fighter(
'Thief',
'group',
fighter_dict,
self.__ruleset,
self.__window_manager)
# just checking starting conditions
# verify: 3 things at top level, 2 things at 2nd level, 1 thing at 3rd
# 3, 2, 1
container = fighter.equipment.get_container([])
assert len(container) == 3
container = fighter.equipment.get_container([0])
assert len(container) == 2
container = fighter.equipment.get_container([0, 0])
assert len(container) == 1
# TEST MOVE - move something from level 1 to level 2
# verify: top level: 2 things, 2nd level: 3 things, 3rd level: 1 thing
# index is arbitrary but not '1' since that's a container -- nothing
# wrong with moving a container but that's not what we're testing.
container = fighter.equipment.get_container([])
index = 2
item = container[index]
self.__ruleset.do_action(
fighter,
{'action-name': 'move-between-container',
'item-index': index,
'item-name': item['name'],
'destination-index': [0],
},
mock_fight_handler)
container = fighter.equipment.get_container([])
assert len(container) == 2
container = fighter.equipment.get_container([0])
assert len(container) == 3
container = fighter.equipment.get_container([0, 0])
assert len(container) == 1
# 2, 3, 1
# TEST OPEN - go to level 2 container
self.__ruleset.do_action(
fighter,
{'action-name': 'open-container', 'container-index': 0},
mock_fight_handler)
# move a couple things to level 3
for index in [2, 1]:
container = fighter.equipment.get_container([0])
item = container[index]
self.__ruleset.do_action(
fighter,
{'action-name': 'move-between-container',
'item-index': index,
'item-name': item['name'],
'destination-index': [0, 0],
},
mock_fight_handler)
# verify: 2, 1, 3
container = fighter.equipment.get_container([])
assert len(container) == 2
container = fighter.equipment.get_container([0])
assert len(container) == 1
container = fighter.equipment.get_container([0, 0])
assert len(container) == 3
# 2, 1, 3
# TEST 2 LEVELS - go to level 3 container
self.__ruleset.do_action(
fighter,
{'action-name': 'open-container', 'container-index': 0},
mock_fight_handler)
# move one thing back to level 1
container = fighter.equipment.get_container([0, 0])
index = 0
item = container[index]
self.__ruleset.do_action(
fighter,
{'action-name': 'move-between-container',
'item-index': index,
'item-name': item['name'],
'destination-index': [],
},
mock_fight_handler)
# verify: 3, 1, 2
container = fighter.equipment.get_container([])
assert len(container) == 3
container = fighter.equipment.get_container([0])
assert len(container) == 1
container = fighter.equipment.get_container([0, 0])
assert len(container) == 2
# 3, 1, 2
# TEST CLOSE - Go back to top, move something to level 3
# go back to level 1 by closing 2 containers
self.__ruleset.do_action(
fighter,
{'action-name': 'close-container'},
mock_fight_handler)
self.__ruleset.do_action(
fighter,
{'action-name': 'close-container'},
mock_fight_handler)
# move one thing back to level 3
container = fighter.equipment.get_container([0, 0])
index = 1
item = container[index]
self.__ruleset.do_action(
fighter,
{'action-name': 'move-between-container',
'item-index': index,
'item-name': item['name'],
'destination-index': [0,0],
},
mock_fight_handler)
# verify: 2, 1, 3
container = fighter.equipment.get_container([])
assert len(container) == 2
container = fighter.equipment.get_container([0])
assert len(container) == 1
container = fighter.equipment.get_container([0, 0])
assert len(container) == 3
class MyArgumentParser(argparse.ArgumentParser):
'''
Code to add better error messages to argparse.
'''
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
if __name__ == '__main__':
parser = MyArgumentParser()
# parser.add_argument('filename',
# nargs='?', # We get the filename elsewhere if you don't say here
# help='Input JSON file containing characters and monsters')
parser.add_argument('-v', '--verbose', help='verbose', action='store_true',
default=False)
ARGS = parser.parse_args()
PP = pprint.PrettyPrinter(indent=3, width=150)
unittest.main() # runs all tests in this file
| 40.143241 | 91 | 0.52038 |
7f1e17518075a9abc7bc3c5cbb5967f5b7bdba2a | 1,653 | py | Python | kas-structure.py | clarinsi/kas-struct | ba1083cbbb19836b8f9ff4a6a4ffeed56e62a52a | [
"Apache-2.0"
] | null | null | null | kas-structure.py | clarinsi/kas-struct | ba1083cbbb19836b8f9ff4a6a4ffeed56e62a52a | [
"Apache-2.0"
] | null | null | null | kas-structure.py | clarinsi/kas-struct | ba1083cbbb19836b8f9ff4a6a4ffeed56e62a52a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#-*-encoding:utf-8-*-
import re
import sys
import pickle
import numpy as np
import pycrfsuite
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from collections import Counter
from train_main import extract,enforce_constraints
import os
reldir=os.path.dirname(os.path.abspath(__file__))
models={'dipl':['train.all.main.crfsuite','train.all.front.crfsuite','train.all.back.crfsuite'],'mag':['train.all.main.crfsuite','train.all.front.crfsuite','train.all.back.crfsuite'],'dr':['train.all.main.crfsuite','train.all.front.crfsuite','train.all.back.crfsuite']}
tagger=pycrfsuite.Tagger()
tagger.open(os.path.join(reldir,models[sys.argv[1]][0]))
x=[]
for line in open(sys.argv[2]):
page=line.strip().split(' ')
x.append(extract(page))
#for i in range(10):
# print i
# print x[i]
# print '###'
print 'Data read'
pred_main=tagger.tag(x)
enforce_constraints(pred_main)
print 'Main tagged'
front=[]
back=[]
for idx,pred in enumerate(pred_main):
if pred=='FRONT':
front.append(x[idx])
elif pred=='BACK':
back.append(x[idx])
tagger=pycrfsuite.Tagger()
tagger.open(os.path.join(reldir,models[sys.argv[1]][1]))
pred_front=tagger.tag(front)
print 'Front tagged'
tagger=pycrfsuite.Tagger()
tagger.open(os.path.join(reldir,models[sys.argv[1]][2]))
pred_back=tagger.tag(back)
print 'Back tagged'
f=open(sys.argv[2]+'.anno','w')
front_idx=0
back_idx=0
for tag in pred_main:
tag2='O'
if tag=='FRONT':
tag2=pred_front[front_idx]
front_idx+=1
elif tag=='BACK':
tag2=pred_back[back_idx]
back_idx+=1
f.write(tag+'\t'+tag2+'\n')
f.close()
| 26.238095 | 269 | 0.724743 |
3ee0332ffb0b0cb77ae418265f4bbf19199ae3ac | 2,242 | py | Python | dice_rolling_simulator.py | RamPrasath008/Demo | 3d4ee06249cf5697c72bae4e2732e814dde97dec | [
"MIT"
] | 2 | 2018-08-11T21:54:07.000Z | 2018-08-11T21:54:13.000Z | dice_rolling_simulator.py | rajasree-r/Python | 3d4ee06249cf5697c72bae4e2732e814dde97dec | [
"MIT"
] | null | null | null | dice_rolling_simulator.py | rajasree-r/Python | 3d4ee06249cf5697c72bae4e2732e814dde97dec | [
"MIT"
] | 1 | 2018-09-20T23:41:31.000Z | 2018-09-20T23:41:31.000Z | #Made on May 27th, 2017
#Made by SlimxShadyx
#Editted by CaptMcTavish, June 17th, 2017
#Comments edits by SlimxShadyx, August 11th, 2017
#Dice Rolling Simulator
import random
global user_exit_checker
user_exit_checker="exit"
#Our start function (What the user will first see when starting the program)
def start():
print "Welcome to dice rolling simulator: \nPress Enter to proceed"
raw_input(">")
#Starting our result function (The dice picker function)
result()
#Our exit function (What the user will see when choosing to exit the program)
def bye():
print "Thanks for using the Dice Rolling Simulator! Have a great day! =)"
#Result function which is our dice chooser function
def result():
#user_dice_chooser No idea how this got in here, thanks EroMonsterSanji.
print "\r\nGreat! Begin by choosing a die! [6] [8] [12]?\r\n"
user_dice_chooser = raw_input(">")
user_dice_chooser = int(user_dice_chooser)
#Below is the references to our dice functions (Below), when the user chooses a dice.
if user_dice_chooser == 6:
dice6()
elif user_dice_chooser == 8:
dice8()
elif user_dice_chooser == 12:
dice12()
#If the user doesn't choose an applicable option
else:
print "\r\nPlease choose one of the applicable options!\r\n"
result()
#Below are our dice functions.
def dice6():
#Getting a random number between 1 and 6 and printing it.
dice_6 = random.randint(1,6)
print "\r\nYou rolled a " + str(dice_6) + "!\r\n"
user_exit_checker()
def dice8():
dice_8 = random.randint(1,8)
print "\r\nYou rolled a " + str(dice_8) + "!"
user_exit_checker()
def dice12():
dice_12 = random.randint(1,12)
print "\r\nYou rolled a " + str(dice_12) + "!"
user_exit_checker()
def user_exit_checker():
#Checking if the user would like to roll another die, or to exit the program
user_exit_checker_raw = raw_input("\r\nIf you want to roll another die, type [roll]. To exit, type [exit].\r\n?>")
user_exit_checker = (user_exit_checker_raw.lower())
if user_exit_checker=="roll":
start()
else:
bye()
#Actually starting the program now.
start()
| 25.770115 | 118 | 0.672168 |
86d6b1696f13a9b7ce83a612a2558d6f2e4ac233 | 791 | py | Python | api-gateway/tests/test_get_scans.py | Niweera/DNSTool-Middleware-API | 0e83d9f62fb65d9223b86a7876b3f30b2771befb | [
"Apache-2.0"
] | null | null | null | api-gateway/tests/test_get_scans.py | Niweera/DNSTool-Middleware-API | 0e83d9f62fb65d9223b86a7876b3f30b2771befb | [
"Apache-2.0"
] | 9 | 2021-06-12T05:39:59.000Z | 2021-08-14T09:20:00.000Z | api-gateway/tests/test_get_scans.py | Niweera/DNSTool-Middleware-API | 0e83d9f62fb65d9223b86a7876b3f30b2771befb | [
"Apache-2.0"
] | 2 | 2021-05-22T15:33:50.000Z | 2021-08-28T08:51:25.000Z | from typing import Dict, Any
from unittest import TestCase
from server import app
from flask.testing import FlaskClient
from werkzeug.test import TestResponse
from tests.test_firebaser import get_id_token
class TestScansController(TestCase):
def setUp(self) -> None:
self.app: FlaskClient = app.test_client()
self.uid = "UchQlgJb9ibBoV991fqtQ5ykfHz2"
def test_get_scans(self) -> None:
response: TestResponse = self.app.get(
"/scans",
headers=dict(Authorization=f"Bearer {get_id_token(self.uid)}"),
)
result: Dict[str, Any] = response.json
code: int = response.status_code
self.assertIsInstance(result, dict)
self.assertIsInstance(result.get("data"), dict)
self.assertEqual(code, 200)
| 32.958333 | 75 | 0.687737 |
5ad8a5a4c2fbb84cc9ceefaff9d9848c65e78ef7 | 2,047 | py | Python | analytical/templatetags/crazy_egg.py | likecodeingloveproblems/django-analytical | e73cc8b87565381370e9825fdfa036935512bc42 | [
"MIT"
] | null | null | null | analytical/templatetags/crazy_egg.py | likecodeingloveproblems/django-analytical | e73cc8b87565381370e9825fdfa036935512bc42 | [
"MIT"
] | null | null | null | analytical/templatetags/crazy_egg.py | likecodeingloveproblems/django-analytical | e73cc8b87565381370e9825fdfa036935512bc42 | [
"MIT"
] | null | null | null | """
Crazy Egg template tags and filters.
"""
import re
from django.template import Library, Node, TemplateSyntaxError
from analytical.utils import is_internal_ip, disable_html, get_required_setting
ACCOUNT_NUMBER_RE = re.compile(r'^\d+$')
SETUP_CODE = '<script type="text/javascript" src="{placeholder_url}">' \
'</script>'.\
format(placeholder_url='//dnn506yrbagrg.cloudfront.net/pages/scripts/'
'%(account_nr_1)s/%(account_nr_2)s.js')
USERVAR_CODE = "CE2.set(%(varnr)d, '%(value)s');"
register = Library()
@register.tag
def crazy_egg(parser, token):
"""
Crazy Egg tracking template tag.
Renders Javascript code to track page clicks. You must supply
your Crazy Egg account number (as a string) in the
``CRAZY_EGG_ACCOUNT_NUMBER`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return CrazyEggNode()
class CrazyEggNode(Node):
def __init__(self):
self.account_nr = get_required_setting(
'CRAZY_EGG_ACCOUNT_NUMBER',
ACCOUNT_NUMBER_RE, "must be (a string containing) a number"
)
def render(self, context):
html = SETUP_CODE % {
'account_nr_1': self.account_nr[:4],
'account_nr_2': self.account_nr[4:],
}
values = (context.get('crazy_egg_var%d' % i) for i in range(1, 6))
params = [(i, v) for i, v in enumerate(values, 1) if v is not None]
if params:
js = " ".join(USERVAR_CODE % {
'varnr': varnr,
'value': value,
} for (varnr, value) in params)
html = '%s\n' \
'<script type="text/javascript">%s</script>' % (html, js)
if is_internal_ip(context, 'CRAZY_EGG'):
html = disable_html(html, 'Crazy Egg')
return html
def contribute_to_analytical(add_node):
CrazyEggNode() # ensure properly configured
add_node('body_bottom', CrazyEggNode)
| 30.552239 | 79 | 0.614558 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.