hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d5b9541d91c7f9bd602b921667a1f278331a2340 | 3,610 | py | Python | tests/python/big/test_glm_ipums.py | pnijhara/h2o4gpu | 6257112c134136471420b68241f57190a445b67d | [
"Apache-2.0"
] | 458 | 2017-09-20T08:32:10.000Z | 2022-02-28T18:40:57.000Z | tests/python/big/test_glm_ipums.py | Jun-NIBS/h2o4gpu | 9885416deb3285f5d0f33023d6c07373ac4fc0b7 | [
"Apache-2.0"
] | 461 | 2017-09-20T11:39:04.000Z | 2021-11-21T15:51:42.000Z | tests/python/big/test_glm_ipums.py | Jun-NIBS/h2o4gpu | 9885416deb3285f5d0f33023d6c07373ac4fc0b7 | [
"Apache-2.0"
] | 114 | 2017-09-20T12:08:07.000Z | 2021-11-29T14:15:40.000Z | """
:copyright: 2017-2018 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import time
import sys
import os
import numpy as np
# import pandas as pd
import logging
import feather
print(sys.path)
from h2o4gpu.util.testing_utils import find_file, run_glm
logging.basicConfig(level=logging.DEBUG)
def fun(nGPUs=1, nFolds=1, nLambdas=100, nAlphas=8, validFraction=0.2):
name = str(sys._getframe().f_code.co_name)
t = time.time()
print("cwd: %s" % (os.getcwd()))
sys.stdout.flush()
name = sys._getframe(1).f_code.co_name
# pipes = startfunnel(os.path.join(os.getcwd(), "tmp/"), name)
print("Reading Data")
# from numpy.random import randn
# m=1000
# n=100
# A=randn(m,n)
# x_true=(randn(n)/n)*float64(randn(n)<0.8)
# b=A.dot(x_true)+0.5*randn(m)
# Rscript ipums.R runs glmnet on ipums
#
df = feather.read_dataframe("./data/ipums.feather")
print(df.shape)
X = np.array(df.iloc[:, :df.shape[1] - 1], dtype='float32', order='C')
y = np.array(df.iloc[:, df.shape[1] - 1], dtype='float32', order='C')
t1 = time.time()
rmse_train, rmse_test = run_glm(X, y, nGPUs=nGPUs, nlambda=nLambdas, nfolds=nFolds, nalpha=nAlphas,
validFraction=validFraction, verbose=0, name=name)
# check rmse
print(rmse_train[0, 0])
print(rmse_train[0, 1])
print(rmse_train[0, 2])
print(rmse_test[0, 2])
sys.stdout.flush()
if validFraction==0.0:
assert rmse_train[0, 0] < 34000
assert rmse_train[0, 1] < 34000
assert rmse_train[0, 2] < 34000
assert rmse_test[0, 2] < 34000
else:
if nLambdas>20:
assert rmse_train[0, 0] < 30000
assert rmse_train[0, 1] < 30000
assert rmse_train[0, 2] < 30000
assert rmse_test[0, 2] < 30000
else:
assert rmse_train[0, 0] < 34000
assert rmse_train[0, 1] < 34000
assert rmse_train[0, 2] < 34000
assert rmse_test[0, 2] < 37000
print('/n Total execution time:%d' % (time.time() - t1))
print("TEST PASSED")
sys.stdout.flush()
print("Time taken: {}".format(time.time() - t))
# endfunnel(pipes)
print("DONE.")
sys.stdout.flush()
def test_glm_ipums_gpu_fold1_quick_0(): fun(1, 1, 3, 3, validFraction=0)
def test_glm_ipums_gpu_fold1_0(): fun(1, 1, 20, 3, validFraction=0)
def test_glm_ipums_gpu_fold5_0(): fun(1, 5, 20, 3, validFraction=0)
def test_glm_ipums_gpu_fold1_quick(): fun(1, 1, 5, 3, validFraction=0.2)
def test_glm_ipums_gpu_fold1(): fun(1, 1, 20, 3, validFraction=0.2)
def test_glm_ipums_gpu_fold5(): fun(1, 5, 20, 3, validFraction=0.2)
def test_glm_ipums_gpu2_fold1_quick(): fun(2, 1, 3, 3, validFraction=0.2)
def test_glm_ipums_gpu2_fold1(): fun(2, 1, 20, 3, validFraction=0.2)
def test_glm_ipums_gpu2_fold5(): fun(3, 5, 20, 3, validFraction=0.2)
#def test_glm_ipums_cpu_fold1_quick(): fun(0, 1, 3, 3, validFraction=0.2)
#def test_glm_ipums_cpu_fold1(): fun(0, 1, 20, 3, validFraction=0.2)
#def test_glm_ipums_cpu_fold5(): fun(0, 5, 20, 3, validFraction=0.2)
if __name__ == '__main__':
test_glm_ipums_gpu_fold1_quick_0()
test_glm_ipums_gpu_fold1_0()
test_glm_ipums_gpu_fold5_0()
test_glm_ipums_gpu_fold1_quick()
test_glm_ipums_gpu_fold1()
test_glm_ipums_gpu_fold5()
test_glm_ipums_gpu2_fold1_quick()
test_glm_ipums_gpu2_fold1()
test_glm_ipums_gpu2_fold5()
# test_glm_ipums_cpu_fold1_quick()
# test_glm_ipums_cpu_fold1()
# test_glm_ipums_cpu_fold5()
| 26.544118 | 103 | 0.649584 |
4085240380f23ed8afb45f05bf24ad6c18ebb4a2 | 3,194 | py | Python | src/lm_core/model.py | Lawliet19189/Euller | d4b989182e3d4a4bd27707001b560e8b606396cd | [
"MIT"
] | null | null | null | src/lm_core/model.py | Lawliet19189/Euller | d4b989182e3d4a4bd27707001b560e8b606396cd | [
"MIT"
] | null | null | null | src/lm_core/model.py | Lawliet19189/Euller | d4b989182e3d4a4bd27707001b560e8b606396cd | [
"MIT"
] | null | null | null | import numpy as np
import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from .utils import get_available_devices
class GPTLM():
def __init__(self, model_name_or_path='gpt2'):
self.start_token = "<|endoftext|>"
self.tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path, bos_token=self.start_token)
self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path)
self.device, gpu_ids = get_available_devices()
self.model.to(self.device)
self.model.eval()
def get_probabilities(self, raw_text, top_k=1000):
inputs = self.tokenizer(raw_text, return_tensors="pt", truncation= True, max_length = 1024).to(self.device
)
# add input_seq as labels to predict probabilities for each timestep
logits = self.model(**inputs, labels=inputs['input_ids']) # (1, input_size, vocab_size)
# do softmax to get probabilities
pred_probs = torch.softmax(logits.logits[0, :-1], dim=-1)
y = inputs['input_ids'][0, 1:]
# Sort the probabiltiies for each timestep
sorted_preds = np.argsort(-pred_probs.data.cpu().numpy())
# find where the true token is positioned in the predicted sorted probabilities list
true_topk_pos = [int(np.where(sorted_preds[i] == y[i].item())[0][0]) for i in range(y.shape[0])]
# Probabilities for our original input sequences
true_topk_probs = pred_probs[np.arange(0, y.shape[0], 1), y].data.cpu().numpy().tolist()
true_topk_probs = list(map(lambda x: round(x, 5), true_topk_probs))
true_topk = list(zip(true_topk_pos, true_topk_probs))
bpe_strings = [self.tokenizer.decoder[s.item()] for s in inputs['input_ids'][0]]
bpe_strings = [self.postprocess(s) for s in bpe_strings]
# Get the k predicted probabilties for each timestep
pred_topk = [
list(zip([self.tokenizer.decoder[p] for p in sorted_preds[i][:top_k]],
list(map(lambda x: round(x, 5),
pred_probs[i][sorted_preds[i][
:top_k]].data.cpu().numpy().tolist()))))
for i in range(y.shape[0])]
pred_topk = [[(self.postprocess(t[0]), t[1]) for t in pred] for pred in pred_topk]
response = {
'bpe_strings': bpe_strings,
'true_topk': true_topk,
'pred_topk': pred_topk
}
if torch.cuda.is_available():
torch.cuda.empty_cache()
return response
@staticmethod
def postprocess(token):
with_space = False
with_break = False
if token.startswith('Ġ'):
with_space = True
token = token[1:]
elif token.startswith('â'):
token = ' '
elif token.startswith('Ċ'):
token = ' '
with_break = True
token = '-' if token.startswith('â') else token
token = '“' if token.startswith('ľ') else token
token = '”' if token.startswith('Ŀ') else token
token = "'" if token.startswith('Ļ') else token
return token | 38.95122 | 114 | 0.597683 |
3868585144f97a180c9ec1044c155ab33d2262e8 | 2,520 | py | Python | django_mailbox/south_migrations/0009_remove_references_table.py | JBwebkrone/django-mailbox-1 | 40263b66703332d82c179d79f5ea0d80fc1ea388 | [
"MIT"
] | 225 | 2015-01-02T14:53:59.000Z | 2022-03-04T23:07:34.000Z | django_mailbox/south_migrations/0009_remove_references_table.py | JBwebkrone/django-mailbox-1 | 40263b66703332d82c179d79f5ea0d80fc1ea388 | [
"MIT"
] | 182 | 2015-02-06T23:29:50.000Z | 2022-01-20T21:50:39.000Z | django_mailbox/south_migrations/0009_remove_references_table.py | JBwebkrone/django-mailbox-1 | 40263b66703332d82c179d79f5ea0d80fc1ea388 | [
"MIT"
] | 138 | 2015-01-18T16:57:34.000Z | 2022-03-24T19:33:38.000Z | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field references on 'Message'
db.delete_table('django_mailbox_message_references')
def backwards(self, orm):
# Adding M2M table for field references on 'Message'
db.create_table('django_mailbox_message_references', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_message', models.ForeignKey(orm['django_mailbox.message'], null=False)),
('to_message', models.ForeignKey(orm['django_mailbox.message'], null=False))
))
db.create_unique('django_mailbox_message_references', ['from_message_id', 'to_message_id'])
models = {
'django_mailbox.mailbox': {
'Meta': {'object_name': 'Mailbox'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_mailbox.message': {
'Meta': {'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
'from_header': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replies'", 'null': 'True', 'to': "orm['django_mailbox.Message']"}),
'mailbox': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django_mailbox.Mailbox']"}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'outgoing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to_header': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['django_mailbox']
| 52.5 | 181 | 0.600794 |
be1f8c631148113507450b9bd0e3bf7d5d9be22c | 7,675 | py | Python | examples/plot_metricframe_beyond_binary_classification.py | marymlucas/fairlearn | 73e6a8ff797d5f6e6b2f5932313b72ffb95331c8 | [
"MIT"
] | 79 | 2018-05-21T14:52:34.000Z | 2019-04-17T13:38:45.000Z | examples/plot_metricframe_beyond_binary_classification.py | marymlucas/fairlearn | 73e6a8ff797d5f6e6b2f5932313b72ffb95331c8 | [
"MIT"
] | 28 | 2019-06-09T13:17:03.000Z | 2019-10-10T19:00:19.000Z | examples/plot_metricframe_beyond_binary_classification.py | marymlucas/fairlearn | 73e6a8ff797d5f6e6b2f5932313b72ffb95331c8 | [
"MIT"
] | 10 | 2019-05-20T17:24:54.000Z | 2019-10-10T13:24:01.000Z | # Copyright (c) Fairlearn contributors.
# Licensed under the MIT License.
"""
=========================================
MetricFrame: Beyond Binary Classification
=========================================
"""
# %%
# This notebook contains examples of using :class:`~fairlearn.metrics.MetricFrame`
# for tasks which go beyond simple binary classification.
import functools
import numpy as np
import sklearn.metrics as skm
from fairlearn.metrics import MetricFrame
# %%
# Multiclass & Nonscalar Results
# ==============================
#
# Suppose we have a multiclass problem, with labels :math:`\in {0, 1, 2}`,
# and that we wish to generate confusion matrices for each subgroup
# identified by the sensitive feature :math:`\in { a, b, c, d}`.
# This is supported readily by
# :class:`~fairlearn.metrics.MetricFrame`, which does not require
# the result of a metric to be a scalar.
#
# First, let us generate some random input data:
rng = np.random.default_rng(seed=96132)
n_rows = 1000
n_classes = 3
n_sensitive_features = 4
y_true = rng.integers(n_classes, size=n_rows)
y_pred = rng.integers(n_classes, size=n_rows)
temp = rng.integers(n_sensitive_features, size=n_rows)
s_f = [chr(ord("a") + x) for x in temp]
# %%
# To use :func:`~sklearn.metrics.confusion_matrix`, we
# need to prebind the `labels` argument, since it is possible
# that some of the subgroups will not contain all of
# the possible labels
conf_mat = functools.partial(skm.confusion_matrix, labels=np.unique(y_true))
# %%
# With this now available, we can create our
# :class:`~fairlearn.metrics.MetricFrame`:
mf = MetricFrame(
metrics={"conf_mat": conf_mat}, y_true=y_true, y_pred=y_pred, sensitive_features=s_f
)
# %%
# From this, we can view the overall confusion matrix:
mf.overall
# %%
# And also the confusion matrices for each subgroup:
mf.by_group
# %%
# Obviously, the other methods such as
# :meth:`~fairlearn.metrics.MetricFrame.group_min`
# will not work, since operations such as 'less than'
# are not well defined for matrices.
# %%
# Metric functions with different return types can also
# be mixed in a single :class:`~fairlearn.metrics.MetricFrame`.
# For example:
recall = functools.partial(skm.recall_score, average="macro")
mf2 = MetricFrame(
metrics={"conf_mat": conf_mat, "recall": recall},
y_true=y_true,
y_pred=y_pred,
sensitive_features=s_f,
)
print("Overall values")
print(mf2.overall)
print("Values by group")
print(mf2.by_group)
# %%
# Non-scalar Inputs
# =================
#
# :class:`~fairlearn.metrics.MetricFrame` does not require
# its inputs to be scalars either. To demonstrate this, we
# will use an image recognition example (kindly supplied by
# Ferdane Bekmezci, Hamid Vaezi Joze and Samira Pouyanfar).
#
# Image recognition algorithms frequently construct a bounding
# box around regions where they have found their target features.
# For example, if an algorithm detects a face in an image, it
# will place a bounding box around it. These bounding boxes
# constitute `y_pred` for :class:`~fairlearn.metrics.MetricFrame`.
# The `y_true` values then come from bounding boxes marked by
# human labellers.
#
# Bounding boxes are often compared using the 'iou' metric.
# This computes the intersection and the union of the two
# bounding boxes, and returns the ratio of their areas.
# If the bounding boxes are identical, then the metric will
# be 1; if disjoint then it will be 0. A function to do this is:
def bounding_box_iou(box_A_input, box_B_input):
# The inputs are array-likes in the form
# [x_0, y_0, delta_x,delta_y]
# where the deltas are positive
box_A = np.array(box_A_input)
box_B = np.array(box_B_input)
if box_A[2] < 0:
raise ValueError("Bad delta_x for box_A")
if box_A[3] < 0:
raise ValueError("Bad delta y for box_A")
if box_B[2] < 0:
raise ValueError("Bad delta x for box_B")
if box_B[3] < 0:
raise ValueError("Bad delta y for box_B")
# Convert deltas to co-ordinates
box_A[2:4] = box_A[0:2] + box_A[2:4]
box_B[2:4] = box_B[0:2] + box_B[2:4]
# Determine the (x, y)-coordinates of the intersection rectangle
x_A = max(box_A[0], box_B[0])
y_A = max(box_A[1], box_B[1])
x_B = min(box_A[2], box_B[2])
y_B = min(box_A[3], box_B[3])
if (x_B < x_A) or (y_B < y_A):
return 0
# Compute the area of intersection rectangle
interArea = (x_B - x_A) * (y_B - y_A)
# Compute the area of both the prediction and ground-truth
# rectangles
box_A_area = (box_A[2] - box_A[0]) * (box_A[3] - box_A[1])
box_B_area = (box_B[2] - box_B[0]) * (box_B[3] - box_B[1])
# Compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the intersection area
iou = interArea / float(box_A_area + box_B_area - interArea)
return iou
# %%
# This is a metric for two bounding boxes, but for :class:`~fairlearn.metrics.MetricFrame`
# we need to compare two lists of bounding boxes. For the sake of
# simplicity, we will return the mean value of 'iou' for the
# two lists, but this is by no means the only choice:
def mean_iou(true_boxes, predicted_boxes):
if len(true_boxes) != len(predicted_boxes):
raise ValueError("Array size mismatch")
all_iou = [
bounding_box_iou(y_true, y_pred)
for y_true, y_pred in zip(true_boxes, predicted_boxes)
]
return np.mean(all_iou)
# %%
# We need to generate some input data, so first create a function to
# generate a single random bounding box:
def generate_bounding_box(max_coord, max_delta, rng):
corner = max_coord * rng.random(size=2)
delta = max_delta * rng.random(size=2)
return np.concatenate((corner, delta))
# %%
# Now use this to create sample `y_true` and `y_pred` arrays of
# bounding boxes:
def many_bounding_boxes(n_rows, max_coord, max_delta, rng):
return [generate_bounding_box(max_coord, max_delta, rng) for _ in range(n_rows)]
true_bounding_boxes = many_bounding_boxes(n_rows, 5, 10, rng)
pred_bounding_boxes = many_bounding_boxes(n_rows, 5, 10, rng)
# %%
# Finally, we can use these in a :class:`~fairlearn.metrics.MetricFrame`:
mf_bb = MetricFrame(
metrics={"mean_iou": mean_iou},
y_true=true_bounding_boxes,
y_pred=pred_bounding_boxes,
sensitive_features=s_f,
)
print("Overall metric")
print(mf_bb.overall)
print("Metrics by group")
print(mf_bb.by_group)
# %%
# The individual entries in the `y_true` and `y_pred` arrays
# can be arbitrarily complex. It is the metric functions
# which give meaning to them. Similarly,
# :class:`~fairlearn.metrics.MetricFrame` does not impose
# restrictions on the return type. One can envisage an image
# recognition task where there are multiple detectable objects in each
# picture, and the image recognition algorithm produces
# multiple bounding boxes (not necessarily in a 1-to-1
# mapping either). The output of such a scenario might
# well be a matrix of some description.
# Another case where both the input data and the metrics
# will be complex is natural language processing,
# where each row of the input could be an entire sentence,
# possibly with complex word embeddings included.
# %%
# Conclusion
# ==========
#
# This notebook has given some taste of the flexibility
# of :class:`~fairlearn.metrics.MetricFrame` when it comes
# to inputs, outputs and metric functions.
# The input arrays can have elements of arbitrary types,
# and the return values from the metric functions can also
# be of any type (although methods such as
# :meth:`~fairlearn.metrics.MetricFrame.group_min` may not
# work).
| 29.980469 | 90 | 0.70684 |
01ff23975bf23fdde94b274262825fbd0335b668 | 12,084 | py | Python | modules/depth_and_motion_learning/training_utils.py | FloatingPoint64/depth_estimation | f14aa53db7b8b754450a5d0dab3fe1f90210cd5e | [
"Apache-2.0"
] | null | null | null | modules/depth_and_motion_learning/training_utils.py | FloatingPoint64/depth_estimation | f14aa53db7b8b754450a5d0dab3fe1f90210cd5e | [
"Apache-2.0"
] | 5 | 2021-02-23T06:21:24.000Z | 2021-02-23T13:57:10.000Z | modules/depth_and_motion_learning/training_utils.py | FloatingPoint64/depth_estimation | f14aa53db7b8b754450a5d0dab3fe1f90210cd5e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils for training on TPU using the Estimator framework.
The functions in this build estimator objects and invoke training loops, on TPU
and on GPU/CPU.
Since exporting summaries is a bit involved on TPU, we use a `summarizer`
training loop instead. This loop is intended to run on CPU, load the checkpoints
written by the TPU trainer, perform a single training step and write the
summaries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl import flags
from absl import logging
import six
import tensorflow.compat.v1 as tf
from depth_and_motion_learning import maybe_summary
from depth_and_motion_learning.parameter_container import ParameterContainer
from tensorflow.contrib import estimator as contrib_estimator
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'TensorFlow session address.')
flags.DEFINE_string('model_dir', '', 'Directory where the model is saved.')
flags.DEFINE_string(
'param_overrides',
'',
'Parameters for the trainer and the '
'model')
# Defaults for various parameters common to training configurations.
TRAINER_PARAMS = {
# Learning rate
'learning_rate': 2e-4,
# If not None, gradients will be clipped to this value.
'clip_gradients': 10.0,
# Number of iterations in the TPU internal on-device loop.
'iterations_per_loop': 20,
# If not None, the training will be initialized form this checkpoint.
'init_ckpt': None,
# A string, specifies the format of a checkpoint form which to initialize.
# The model code is expected to convert this string into a
# vars_to_restore_fn (see below),
'init_ckpt_type': None,
# Master address
'master': None,
# Directory where checkpoints will be saved.
'model_dir': None,
# Maximum number of training steps.
'max_steps': int(1e6),
# Number of hours between each checkpoint to be saved.
# The default value of 10,000 hours effectively disables the feature.
'keep_checkpoint_every_n_hours': 10000,
}
class InitFromCheckpointHook(tf.estimator.SessionRunHook):
"""A hook for initializing training from a checkpoint.
Although the Estimator framework supports initialization from a checkpoint via
https://www.tensorflow.org/api_docs/python/tf/estimator/WarmStartSettings,
the only way to build mapping between the variables and the checkpoint names
is via providing a regex. This class provides the same functionality, but the
mapping can be built by a callback, which provides more flexibility and
readability.
"""
def __init__(self, model_dir, ckpt_to_init_from, vars_to_restore_fn=None):
"""Creates an instance.
Args:
model_dir: A string, path where checkpoints are saved during training.
Used for checking whether a checkpoint already exists there, in which
case we want to continue training from there rather than initialize from
another checkpoint.
ckpt_to_init_from: A string, path to a checkpoint to initialize from.
vars_to_restore_fn: A callable that receives no arguments. When called,
expected to provide a dictionary that maps the checkpoint name of each
variable to the respective variable object. This dictionary will be used
as `var_list` in a Saver object used for initializing from
`ckpt_to_init_from`. If None, the default saver will be used.
"""
self._ckpt = None if tf.train.latest_checkpoint(
model_dir) else ckpt_to_init_from
self._vars_to_restore_fn = vars_to_restore_fn
def begin(self):
if not self._ckpt:
return
logging.info('%s will be used for initialization.', self._ckpt)
# Build a saver object for initializing from a checkpoint, or use the
# default one if no vars_to_restore_fn was given.
self._reset_step = None
if tf.train.get_global_step() is not None:
self._reset_step = tf.train.get_global_step().assign(0)
if not self._vars_to_restore_fn:
logging.info(
'All variables will be initialized form the checkpoint.')
self._saver = tf.get_collection(tf.GraphKeys.SAVERS)[0]
return
vars_to_restore = self._vars_to_restore_fn()
restored_vars_string = (
'The following variables are to be initialized from the checkpoint:\n')
for ckpt_name in sorted(vars_to_restore):
restored_vars_string += '%s --> %s\n' % (
ckpt_name, vars_to_restore[ckpt_name].op.name)
logging.info(restored_vars_string)
self._saver = tf.train.Saver(vars_to_restore)
def after_create_session(self, session, coord):
del coord # unused
if not self._ckpt:
return
self._saver.restore(session, self._ckpt)
self._saver.restore(session, self._ckpt)
if self._reset_step is not None:
session.run(self._reset_step)
def _build_estimator_spec(losses, trainer_params, mode, use_tpu=False):
"""Builds an EstimatorSpec/TPUEstimatorSpec based on trainer_params.
Args:
losses: A dictionary of {string: tf.Tensor} containing the various losses.
The keys will be used as display names for the summaries, the values will
be summed up to obtain the total loss, which is to be minimized.
trainer_params: A ParameterContainer object with parameters relevant to the
training.
mode: One of tf.estimator.ModeKeys: TRAIN, PREDICT or EVAL.
use_tpu: A boolean, if True, a TPU-compatible version of EstimatorSpec will
be built.
Returns:
A EstimatorSpec or a TPUEstimatorSpec object.
"""
if mode == tf.estimator.ModeKeys.TRAIN:
total_loss = 0.0
for loss_name, loss in six.iteritems(losses):
if not use_tpu:
tf.summary.scalar('Loss/%s' % loss_name, loss)
total_loss += loss
learning_rate = trainer_params.learning_rate
maybe_summary.scalar('Learning Rate', learning_rate)
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate, beta1=0.9)
optimizer = contrib_estimator.clip_gradients_by_norm(
optimizer, trainer_params.clip_gradients)
if use_tpu:
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(
total_loss, global_step=tf.train.get_global_step())
else:
total_loss = None
train_op = None
if use_tpu:
estimator_spec = tf.estimator.tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN, loss=total_loss, train_op=train_op)
else:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN, loss=total_loss, train_op=train_op)
return estimator_spec
def run_local_training(losses_fn,
input_fn,
trainer_params_overrides,
model_params,
vars_to_restore_fn=None):
"""Run a simple single-mechine traing loop.
Args:
losses_fn: A callable that receives two arguments, `features` and `params`,
both are dictionaries, and returns a dictionary whose values are the
losses. Their sum is the total loss to be minimized.
input_fn: A callable that complies with tf.Estimtor's definition of
input_fn.
trainer_params_overrides: A dictionary or a ParameterContainer with
overrides for the default values in TRAINER_PARAMS above.
model_params: A ParameterContainer that will be passed to the model (i. e.
to losses_fn and input_fn).
vars_to_restore_fn: A callable that receives no arguments. When called,
expected to provide a dictionary that maps the checkpoint name of each
variable to the respective variable object. This dictionary will be used
as `var_list` in a Saver object used for initializing from the checkpoint
at trainer_params.init_ckpt. If None, the default saver will be used.
"""
trainer_params = ParameterContainer.from_defaults_and_overrides(
TRAINER_PARAMS, trainer_params_overrides, is_strict=True)
run_config_params = {
'model_dir':
trainer_params.model_dir,
'save_summary_steps':
50,
'keep_checkpoint_every_n_hours':
trainer_params.keep_checkpoint_every_n_hours,
'log_step_count_steps':
50,
}
logging.info(
'Estimators run config parameters:\n%s',
json.dumps(run_config_params, indent=2, sort_keys=True, default=str))
run_config = tf.estimator.RunConfig(**run_config_params)
def estimator_spec_fn(features, labels, mode, params):
del labels # unused
return _build_estimator_spec(
losses_fn(features, mode, params),
trainer_params=trainer_params,
mode=mode,
use_tpu=False)
init_hook = InitFromCheckpointHook(trainer_params.model_dir,
trainer_params.init_ckpt,
vars_to_restore_fn)
estimator = tf.estimator.Estimator(
model_fn=estimator_spec_fn,
config=run_config,
params=model_params.as_dict())
estimator.train(
input_fn=input_fn,
max_steps=trainer_params.max_steps,
hooks=[init_hook])
def train(input_fn, loss_fn, get_vars_to_restore_fn=None):
"""Run training.
Args:
input_fn: A tf.Estimator compliant input_fn.
loss_fn: a callable with the signature loss_fn(features, mode, params),
where `features` is a dictionary mapping strings to tf.Tensors, `mode` is
a tf.estimator.ModeKeys (TRAIN, EVAL, PREDICT), and `params` is a
dictionary mapping strings to hyperparameters (could be nested). It
returns a dictionary mapping strings to scalar tf.Tensor-s representing
losses. Their sum is the total training loss.
get_vars_to_restore_fn: A callable that receives a string argument
(intdicating the type of initialization) and returns a vars_to_restore_fn.
The latter is a callable that receives no arguments and returns a
dictionary that can be passed to a tf.train.Saver object's constructor as
a `var_list` to indicate which variables to load from what names in the
checnpoint.
"""
params = ParameterContainer({
'model': {
'batch_size': 16,
'input': {}
},
}, {'trainer': {
'master': FLAGS.master,
'model_dir': FLAGS.model_dir
}})
params.override(FLAGS.param_overrides)
init_ckpt_type = params.trainer.get('init_ckpt_type')
if init_ckpt_type and not get_vars_to_restore_fn:
raise ValueError(
'An init_ckpt_type was specified (%s), but no get_vars_to_restore_fn '
'was provided.' %
init_ckpt_type)
vars_to_restore_fn = (
get_vars_to_restore_fn(init_ckpt_type) if init_ckpt_type else None)
logging.info(
'Starting training with the following parameters:\n%s',
json.dumps(params.as_dict(), indent=2, sort_keys=True, default=str))
run_local_training(loss_fn, input_fn, params.trainer, params.model,
vars_to_restore_fn)
| 38.240506 | 84 | 0.6857 |
cc86d832bb20129c8016a9f5011597104f115bfb | 1,080 | py | Python | app/api/v2/session.py | RushRay/view-oj-backend | ece96702de4fd5351f30d27bd70421c3b281e53d | [
"Apache-2.0"
] | null | null | null | app/api/v2/session.py | RushRay/view-oj-backend | ece96702de4fd5351f30d27bd70421c3b281e53d | [
"Apache-2.0"
] | null | null | null | app/api/v2/session.py | RushRay/view-oj-backend | ece96702de4fd5351f30d27bd70421c3b281e53d | [
"Apache-2.0"
] | null | null | null | from flask import jsonify
from flask_login import current_user, login_required, login_user, logout_user
from app.libs.error_code import AuthFailed, DeleteSuccess, Success
from app.libs.red_print import RedPrint
from app.models.user import User
from app.validators.session import LoginForm
api = RedPrint('session')
@api.route("", methods=['GET'])
@login_required
def get_session_api():
user = current_user
user.fields = ['username', 'nickname', 'group', 'permission', 'status']
return jsonify({
'code': 0,
'data': user
})
@api.route("", methods=['POST'])
def create_session_api():
form = LoginForm().validate_for_api().data_
user = User.get_by_id(form['username'])
if user is None:
raise AuthFailed('User not found')
if not user.check_password(form['password']):
raise AuthFailed('Wrong username or password')
login_user(user, remember=True)
raise Success('Login successful')
@api.route("", methods=['DELETE'])
def delete_session_api():
logout_user()
raise DeleteSuccess('Logout successful')
| 27.692308 | 77 | 0.702778 |
3d5d0681f0222216d379e1c1c63e1e4022eff3ea | 5,889 | py | Python | homeassistant/components/geonetnz_volcano/sensor.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 7 | 2019-02-07T14:14:12.000Z | 2019-07-28T06:56:10.000Z | homeassistant/components/geonetnz_volcano/sensor.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 58 | 2020-08-03T07:33:02.000Z | 2022-03-31T06:02:05.000Z | homeassistant/components/geonetnz_volcano/sensor.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Feed Entity Manager Sensor support for GeoNet NZ Volcano Feeds."""
import logging
from typing import Optional
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_UNIT_SYSTEM_IMPERIAL,
LENGTH_KILOMETERS,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.util import dt
from homeassistant.util.unit_system import IMPERIAL_SYSTEM
from .const import (
ATTR_ACTIVITY,
ATTR_DISTANCE,
ATTR_EXTERNAL_ID,
ATTR_HAZARDS,
DEFAULT_ICON,
DOMAIN,
FEED,
)
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATE = "feed_last_update"
ATTR_LAST_UPDATE_SUCCESSFUL = "feed_last_update_successful"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the GeoNet NZ Volcano Feed platform."""
manager = hass.data[DOMAIN][FEED][entry.entry_id]
@callback
def async_add_sensor(feed_manager, external_id, unit_system):
"""Add sensor entity from feed."""
new_entity = GeonetnzVolcanoSensor(
entry.entry_id, feed_manager, external_id, unit_system
)
_LOGGER.debug("Adding sensor %s", new_entity)
async_add_entities([new_entity], True)
manager.listeners.append(
async_dispatcher_connect(
hass, manager.async_event_new_entity(), async_add_sensor
)
)
hass.async_create_task(manager.async_update())
_LOGGER.debug("Sensor setup done")
class GeonetnzVolcanoSensor(Entity):
"""This represents an external event with GeoNet NZ Volcano feed data."""
def __init__(self, config_entry_id, feed_manager, external_id, unit_system):
"""Initialize entity with data from feed entry."""
self._config_entry_id = config_entry_id
self._feed_manager = feed_manager
self._external_id = external_id
self._unit_system = unit_system
self._title = None
self._distance = None
self._latitude = None
self._longitude = None
self._attribution = None
self._alert_level = None
self._activity = None
self._hazards = None
self._feed_last_update = None
self._feed_last_update_successful = None
self._remove_signal_update = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_update = async_dispatcher_connect(
self.hass,
f"geonetnz_volcano_update_{self._external_id}",
self._update_callback,
)
async def async_will_remove_from_hass(self) -> None:
"""Call when entity will be removed from hass."""
if self._remove_signal_update:
self._remove_signal_update()
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for GeoNet NZ Volcano feed location events."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._external_id)
feed_entry = self._feed_manager.get_entry(self._external_id)
last_update = self._feed_manager.last_update()
last_update_successful = self._feed_manager.last_update_successful()
if feed_entry:
self._update_from_feed(feed_entry, last_update, last_update_successful)
def _update_from_feed(self, feed_entry, last_update, last_update_successful):
"""Update the internal state from the provided feed entry."""
self._title = feed_entry.title
# Convert distance if not metric system.
if self._unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
self._distance = round(
IMPERIAL_SYSTEM.length(feed_entry.distance_to_home, LENGTH_KILOMETERS),
1,
)
else:
self._distance = round(feed_entry.distance_to_home, 1)
self._latitude = round(feed_entry.coordinates[0], 5)
self._longitude = round(feed_entry.coordinates[1], 5)
self._attribution = feed_entry.attribution
self._alert_level = feed_entry.alert_level
self._activity = feed_entry.activity
self._hazards = feed_entry.hazards
self._feed_last_update = dt.as_utc(last_update) if last_update else None
self._feed_last_update_successful = (
dt.as_utc(last_update_successful) if last_update_successful else None
)
@property
def state(self):
"""Return the state of the sensor."""
return self._alert_level
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return DEFAULT_ICON
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return f"Volcano {self._title}"
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "alert level"
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
for key, value in (
(ATTR_EXTERNAL_ID, self._external_id),
(ATTR_ATTRIBUTION, self._attribution),
(ATTR_ACTIVITY, self._activity),
(ATTR_HAZARDS, self._hazards),
(ATTR_LONGITUDE, self._longitude),
(ATTR_LATITUDE, self._latitude),
(ATTR_DISTANCE, self._distance),
(ATTR_LAST_UPDATE, self._feed_last_update),
(ATTR_LAST_UPDATE_SUCCESSFUL, self._feed_last_update_successful),
):
if value or isinstance(value, bool):
attributes[key] = value
return attributes
| 34.846154 | 87 | 0.671591 |
78039c2d3c81a0c7f37cf9b1f9b1b12081a814b9 | 1,147 | py | Python | toolkit/augment_data_conllu.py | danielhers/hit-scir-ucca-parser | 240508227c54e52ab9f1f988a1144f51bc8f25ca | [
"Apache-2.0"
] | 1 | 2020-07-18T13:40:06.000Z | 2020-07-18T13:40:06.000Z | toolkit/augment_data_conllu.py | danielhers/hit-scir-ucca-parser | 240508227c54e52ab9f1f988a1144f51bc8f25ca | [
"Apache-2.0"
] | null | null | null | toolkit/augment_data_conllu.py | danielhers/hit-scir-ucca-parser | 240508227c54e52ab9f1f988a1144f51bc8f25ca | [
"Apache-2.0"
] | 2 | 2020-05-28T13:16:39.000Z | 2022-02-15T01:58:03.000Z | import json
import collections
import argparse
from conllu.parser import parse_line, DEFAULT_FIELDS
parser = argparse.ArgumentParser(description='Augment Data')
parser.add_argument("conll", type=str, help="Augment CoNLL file")
parser.add_argument("mrp", type=str, help="Input MRP file")
parser.add_argument("output", type=str, help="Output Augmented file")
args = parser.parse_args()
conll_file = args.conll
mrp_file = args.mrp
out_file = args.output
augs = {}
with open(conll_file, 'r', encoding='utf8') as f_c:
conlls = f_c.read().split('\n\n')
for conll in conlls:
id = conll.split('\n')[0][1:]
augs[id] = [parse_line(line, DEFAULT_FIELDS) for line in conll.strip().split('\n')[1:]]
#print augs.keys()
with open(mrp_file, 'r', encoding='utf8') as f_m, open(out_file, 'w', encoding='utf8') as fo:
line = f_m.readline()
while line:
mrp = json.loads(line, object_pairs_hook=collections.OrderedDict)
id = mrp['id']
if id not in augs:
print("id:{} not in companion".format(id))
else:
mrp['companion'] = dict(sent_id=id, toks=augs[id])
fo.write((json.dumps(mrp)+'\n'))
line = f_m.readline()
| 33.735294 | 93 | 0.685266 |
6d2d0c45f9b236a7bef4e6546e2f07881f1354bb | 6,099 | py | Python | src/codebots/bots/sshbot.py | franaudo/codebots | f45ee9f665cadc4627d397ae74eefca226269656 | [
"MIT"
] | 5 | 2021-05-06T21:51:30.000Z | 2022-01-10T17:53:00.000Z | src/codebots/bots/sshbot.py | franaudo/codebots | f45ee9f665cadc4627d397ae74eefca226269656 | [
"MIT"
] | 10 | 2021-04-03T12:00:27.000Z | 2021-11-04T16:27:48.000Z | src/codebots/bots/sshbot.py | franaudo/codebots | f45ee9f665cadc4627d397ae74eefca226269656 | [
"MIT"
] | null | null | null | import paramiko
import os
import socket
from ._bot import BaseBot
__all__ = [
'sshBot'
]
class sshBot(BaseBot):
"""sshBot to help with ssh connections to a server.
Parameters
----------
hostname : str
ip address of the server, by default None.
username : str
username on the server, by default None.
password : str
password on the server, by default None.
pvtkey : str
path to the private RSA key file, by default None.
Attributes
----------
hostname : str
ip address of the server, by default None.
username : str
username on the server, by default None.
password : str
password on the server, by default None.
pvtkey : str
path to the private RSA key file, by default None.
ssh_client : obj
paramiko `SSHClient` object, if a connection is active, otherwise None.
sftp_client : obj
paramiko `SFTPClient` object, if a connection is active, otherwise None.
"""
def __init__(self, alias=None, config_file=None, **kwargs) -> None:
self.__name__ = "sshbot"
if not config_file:
if not alias:
if not kwargs:
raise ValueError("Either an existing config_file or the credentials must be passed")
self._credentials = kwargs
for k, v in self._credentials.items():
self.__setattr__(k, v)
else:
from .. import TOKENS
config_file = os.path .join(TOKENS, f"{alias}.json")
super().__init__(config_file)
else:
super().__init__(config_file)
self.host_address = self.hostname if '.' in self.hostname else socket.gethostbyname(self.hostname)
self._ssh_client = None
self._sftp_client = None
@property
def ssh_client(self):
return self._ssh_client
@property
def sfpt_client(self):
return self._sfpt_client
def connect_ssh_client(self):
"""Establish the ssh connection
Returns
-------
obj
ssh_client obj
"""
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
k = paramiko.RSAKey.from_private_key_file(self.pvtkey, password=self.password) if self.pvtkey else None
ssh_client.connect(hostname=self.host_address, username=self.username, password=self.password, pkey=k)
print("\nconnected\n")
return ssh_client
def execute_cmds(self, commands, close_connection=True, verbose=True):
"""Execute general command on the server side
Parameters
----------
commands : list of str
list of commands (str) to execute on the server.
close_connection : bool (optional)
if true close the ssh connection, by default True. Leave the connection
open if you plan to run several commands.
Returns
-------
None
"""
if not self._ssh_client:
self._ssh_client = self.connect_ssh_client()
out_dict = {"stdout": [], "stderr": []}
for command in commands:
if verbose:
print("Executing {}:\n".format(command))
stdin, stdout, stderr = self._ssh_client.exec_command(command)
out_dict["stdout"].append(stdout.read().rstrip().decode("utf-8"))
out_dict["stderr"].append(stderr.read().rstrip().decode("utf-8"))
if verbose:
print(out_dict["stdout"][-1])
print("\nErrors (if any):")
print(out_dict["stderr"][-1])
# close the connection
if close_connection:
self._ssh_client.close()
self._ssh_client = None
return out_dict
def connect_sftp_client(self):
"""Connect to the server through SFPT on port 22.
Returns
-------
obj
paramiko sfpt client object
"""
transport = paramiko.Transport((self.host_address, 22))
k = paramiko.RSAKey.from_private_key_file(self.pvtkey, password=self.password) if self.pvtkey else None
transport.connect(username=self.username, password=self.password, pkey=k)
return paramiko.SFTPClient.from_transport(transport)
def get_folder_from_server(self, path, dest, recursive=True, close_connection=True):
"""Retrieve the content of a folder from the server.
Parameters
----------
sftp_client : obj
paramiko sftp client object
path : str
path to the folder on the server
dest : str
path to the folder on the client (local)
recursive : bool (optional)
if true get subfolders content, by default
close_connection : bool (optional)
if true close the ssh connection, by default True. Leave the connection open if you plan to run several commands.
Warnings
--------
If in any folder there are files without extensions, the code will fail!
"""
# connect to the server through SFPT
if not self._sftp_client:
self._sftp_client = self.connect_sftp_client()
print("connection enstablished")
for item in self._sftp_client.listdir(path):
remotefile = os.path.join(path, str(item))
if not os.path.isdir(dest):
os.mkdir(dest)
localfilepath = os.path.join(dest, str(item))
# check if it is a folder (note file w/o ext will thorw an exception!)
if '.' in item:
self._sftp_client.get(remotefile, localfilepath)
else:
if recursive:
self.get_folder_from_server(self._sftp_client, remotefile, localfilepath)
print("files tranferred!")
# close the connection
if close_connection:
self._sftp_client.close()
self._sftp_client = None
print("connection closed")
| 33.510989 | 125 | 0.596655 |
7ba76ddde113297d694a9713db8e1e4be32ac8c5 | 338 | py | Python | api/run.py | pikulak/pywdbms | 1b4f4118f7956bd6ee885a3e7908ef3f61ed6890 | [
"Apache-2.0"
] | 2 | 2018-03-24T18:54:52.000Z | 2020-03-18T20:33:16.000Z | api/run.py | pikulak/pywdbms | 1b4f4118f7956bd6ee885a3e7908ef3f61ed6890 | [
"Apache-2.0"
] | null | null | null | api/run.py | pikulak/pywdbms | 1b4f4118f7956bd6ee885a3e7908ef3f61ed6890 | [
"Apache-2.0"
] | 1 | 2020-03-18T20:33:18.000Z | 2020-03-18T20:33:18.000Z | #!c:/python34/python.exe
import sys
import os
import os
p = os.path.dirname(os.path.join(os.path.realpath(__file__), "../../../"))
sys.path.insert(0, p)
from flask import Flask
from pywdbms.api.app import blueprint
app = Flask(__name__)
app.secret_key = 'some_secret'
app.register_blueprint(blueprint)
app.config['DEBUG'] = True
app.run() | 26 | 74 | 0.739645 |
b42f8ffe99430f4f4b5c52e261b0463b44d1c79a | 7,300 | py | Python | FTSensor/pybind11-master/tests/test_stl_binders.py | yanglh14/InteractiveGrasping | b5bc1866a1847e7b0c11616fd6cbe949c64a355b | [
"MIT"
] | 3 | 2021-04-14T08:24:40.000Z | 2021-11-04T04:10:19.000Z | FTSensor/pybind11-master/tests/test_stl_binders.py | yanglh14/InteractiveGrasping | b5bc1866a1847e7b0c11616fd6cbe949c64a355b | [
"MIT"
] | null | null | null | FTSensor/pybind11-master/tests/test_stl_binders.py | yanglh14/InteractiveGrasping | b5bc1866a1847e7b0c11616fd6cbe949c64a355b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
import sys
from pybind11_tests import stl_binders as m
with pytest.suppress(ImportError):
import numpy as np
def test_vector_int():
v_int = m.VectorInt([0, 0])
assert len(v_int) == 2
assert bool(v_int) is True
# test construction from a generator
v_int1 = m.VectorInt(x for x in range(5))
assert v_int1 == m.VectorInt([0, 1, 2, 3, 4])
v_int2 = m.VectorInt([0, 0])
assert v_int == v_int2
v_int2[1] = 1
assert v_int != v_int2
v_int2.append(2)
v_int2.insert(0, 1)
v_int2.insert(0, 2)
v_int2.insert(0, 3)
v_int2.insert(6, 3)
assert str(v_int2) == "VectorInt[3, 2, 1, 0, 1, 2, 3]"
with pytest.raises(IndexError):
v_int2.insert(8, 4)
v_int.append(99)
v_int2[2:-2] = v_int
assert v_int2 == m.VectorInt([3, 2, 0, 0, 99, 2, 3])
del v_int2[1:3]
assert v_int2 == m.VectorInt([3, 0, 99, 2, 3])
del v_int2[0]
assert v_int2 == m.VectorInt([0, 99, 2, 3])
v_int2.extend(m.VectorInt([4, 5]))
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5])
v_int2.extend([6, 7])
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7])
# test error handling, and that the vector is unchanged
with pytest.raises(RuntimeError):
v_int2.extend([8, 'a'])
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7])
# test extending from a generator
v_int2.extend(x for x in range(5))
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4])
# test negative indexing
assert v_int2[-1] == 4
# insert with negative index
v_int2.insert(-1, 88)
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 88, 4])
# delete negative index
del v_int2[-1]
assert v_int2 == m.VectorInt([0, 99, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 88])
v_int2.clear()
assert len(v_int2) == 0
# related to the PyPy's buffer protocol.
@pytest.unsupported_on_pypy
def test_vector_buffer():
b = bytearray([1, 2, 3, 4])
v = m.VectorUChar(b)
assert v[1] == 2
v[2] = 5
mv = memoryview(v) # We expose the buffer interface
if sys.version_info.major > 2:
assert mv[2] == 5
mv[2] = 6
else:
assert mv[2] == '\x05'
mv[2] = '\x06'
assert v[2] == 6
with pytest.raises(RuntimeError) as excinfo:
m.create_undeclstruct() # Undeclared struct contents, no buffer interface
assert "NumPy type info missing for " in str(excinfo.value)
@pytest.unsupported_on_pypy
@pytest.requires_numpy
def test_vector_buffer_numpy():
a = np.array([1, 2, 3, 4], dtype=np.int32)
with pytest.raises(TypeError):
m.VectorInt(a)
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], dtype=np.uintc)
v = m.VectorInt(a[0, :])
assert len(v) == 4
assert v[2] == 3
ma = np.asarray(v)
ma[2] = 5
assert v[2] == 5
v = m.VectorInt(a[:, 1])
assert len(v) == 3
assert v[2] == 10
v = m.get_vectorstruct()
assert v[0].x == 5
ma = np.asarray(v)
ma[1]['x'] = 99
assert v[1].x == 99
v = m.VectorStruct(np.zeros(3, dtype=np.dtype([('w', 'bool'), ('x', 'I'),
('y', 'float64'), ('z', 'bool')], align=True)))
assert len(v) == 3
def test_vector_bool():
import pybind11_cross_module_tests as cm
vv_c = cm.VectorBool()
for i in range(10):
vv_c.append(i % 2 == 0)
for i in range(10):
assert vv_c[i] == (i % 2 == 0)
assert str(vv_c) == "VectorBool[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]"
def test_vector_custom():
v_a = m.VectorEl()
v_a.append(m.El(1))
v_a.append(m.El(2))
assert str(v_a) == "VectorEl[El{1}, El{2}]"
vv_a = m.VectorVectorEl()
vv_a.append(v_a)
vv_b = vv_a[0]
assert str(vv_b) == "VectorEl[El{1}, El{2}]"
def test_map_string_double():
mm = m.MapStringDouble()
mm['a'] = 1
mm['b'] = 2.5
assert list(mm) == ['a', 'b']
assert list(mm.items()) == [('a', 1), ('b', 2.5)]
assert str(mm) == "MapStringDouble{a: 1, b: 2.5}"
um = m.UnorderedMapStringDouble()
um['ua'] = 1.1
um['ub'] = 2.6
assert sorted(list(um)) == ['ua', 'ub']
assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]
assert "UnorderedMapStringDouble" in str(um)
def test_map_string_double_const():
mc = m.MapStringDoubleConst()
mc['a'] = 10
mc['b'] = 20.5
assert str(mc) == "MapStringDoubleConst{a: 10, b: 20.5}"
umc = m.UnorderedMapStringDoubleConst()
umc['a'] = 11
umc['b'] = 21.5
str(umc)
def test_noncopyable_containers():
# std::vector
vnc = m.get_vnc(5)
for i in range(0, 5):
assert vnc[i].value == i + 1
for i, j in enumerate(vnc, start=1):
assert j.value == i
# std::deque
dnc = m.get_dnc(5)
for i in range(0, 5):
assert dnc[i].value == i + 1
i = 1
for j in dnc:
assert(j.value == i)
i += 1
# std::map
mnc = m.get_mnc(5)
for i in range(1, 6):
assert mnc[i].value == 10 * i
vsum = 0
for k, v in mnc.items():
assert v.value == 10 * k
vsum += v.value
assert vsum == 150
# std::unordered_map
mnc = m.get_umnc(5)
for i in range(1, 6):
assert mnc[i].value == 10 * i
vsum = 0
for k, v in mnc.items():
assert v.value == 10 * k
vsum += v.value
assert vsum == 150
# nested std::map<std::vector>
nvnc = m.get_nvnc(5)
for i in range(1, 6):
for j in range(0, 5):
assert nvnc[i][j].value == j + 1
# Note: maps do not have .values()
for _, v in nvnc.items():
for i, j in enumerate(v, start=1):
assert j.value == i
# nested std::map<std::map>
nmnc = m.get_nmnc(5)
for i in range(1, 6):
for j in range(10, 60, 10):
assert nmnc[i][j].value == 10 * j
vsum = 0
for _, v_o in nmnc.items():
for k_i, v_i in v_o.items():
assert v_i.value == 10 * k_i
vsum += v_i.value
assert vsum == 7500
# nested std::unordered_map<std::unordered_map>
numnc = m.get_numnc(5)
for i in range(1, 6):
for j in range(10, 60, 10):
assert numnc[i][j].value == 10 * j
vsum = 0
for _, v_o in numnc.items():
for k_i, v_i in v_o.items():
assert v_i.value == 10 * k_i
vsum += v_i.value
assert vsum == 7500
def test_map_delitem():
mm = m.MapStringDouble()
mm['a'] = 1
mm['b'] = 2.5
assert list(mm) == ['a', 'b']
assert list(mm.items()) == [('a', 1), ('b', 2.5)]
del mm['a']
assert list(mm) == ['b']
assert list(mm.items()) == [('b', 2.5)]
um = m.UnorderedMapStringDouble()
um['ua'] = 1.1
um['ub'] = 2.6
assert sorted(list(um)) == ['ua', 'ub']
assert sorted(list(um.items())) == [('ua', 1.1), ('ub', 2.6)]
del um['ua']
assert sorted(list(um)) == ['ub']
assert sorted(list(um.items())) == [('ub', 2.6)]
| 26.071429 | 99 | 0.518356 |
9dfccf7d037f7cb20b12ffacdf90aeb230d1cd6a | 3,695 | py | Python | dissect/protos/inet.py | AKOU0/dissect | b521153d86fe94dddc04846eb7ba3b6196917ee7 | [
"Apache-2.0"
] | 19 | 2015-07-08T18:51:40.000Z | 2020-03-08T16:06:16.000Z | dissect/protos/inet.py | AKOU0/dissect | b521153d86fe94dddc04846eb7ba3b6196917ee7 | [
"Apache-2.0"
] | 5 | 2016-02-24T15:23:13.000Z | 2019-11-09T11:23:47.000Z | dissect/protos/inet.py | AKOU0/dissect | b521153d86fe94dddc04846eb7ba3b6196917ee7 | [
"Apache-2.0"
] | 11 | 2015-10-22T00:32:20.000Z | 2017-07-14T01:45:14.000Z | import socket
from vstruct2.types import *
'''
Inet Packet Structures
'''
ethp = venum()
ethp.ipv4 = 0x0800
ethp.ipv6 = 0x86dd
ethp.vlan = 0x8100
ipproto = venum()
ipproto.ICMP = 1
ipproto.TCP = 6
ipproto.UDP = 17
ipproto.IPV6 = 41
TCP_F_FIN = 0x01
TCP_F_SYN = 0x02
TCP_F_RST = 0x04
TCP_F_PUSH = 0x08
TCP_F_ACK = 0x10
TCP_F_URG = 0x20
TCP_F_ECE = 0x40
TCP_F_CWR = 0x80
# Useful combinations...
TCP_F_SYNACK = (TCP_F_SYN | TCP_F_ACK)
icmptypes = venum()
icmptypes.ECHOREPLY = 0
icmptypes.DEST_UNREACH = 3
icmptypes.SOURCE_QUENCH = 4
icmptypes.REDIRECT = 5
icmptypes.ECHO = 8
icmptypes.TIME_EXCEEDED = 11
icmptypes.PARAMETERPROB = 12
icmptypes.TIMESTAMP = 13
icmptypes.TIMESTAMPREPLY = 14
icmptypes.INFO_REQUEST = 15
icmptypes.INFO_REPLY = 16
icmptypes.ADDRESS = 17
icmptypes.ADDRESSREPLY = 18
class IPv4Addr(uint32):
def __repr__(self):
return socket.inet_ntop(socket.AF_INET, bytes(self))
class IPv6Addr(vbytes):
def __init__(self):
vbytes.__init__(self, size=16)
def __repr__(self):
return socket.inet_ntop(socket.AF_INET6, self._vs_value)
class ETHERII(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.destmac = vbytes(size=6)
self.srcmac = vbytes(size=6)
self.etype = uint16(enum=ethp)
self['etype'].vsOnset( self._onSetEtype )
def _onSetEtype(self):
# append vlan tags if needed
if etype == ethp.vlan:
self.vtag = uint16()
self.vvlan = uint16()
class IPv4(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.veriphl = uint8()
self.tos = uint8()
self.totlen = uint16()
self.ipid = uint16()
self.flagfrag = uint16()
self.ttl = uint8()
self.proto = uint8(enum=ipproto)
self.cksum = uint16()
self.srcaddr = IPv4Addr()
self.dstaddr = IPv4Addr()
self['veriphl'].vsOnset( self._onSetVerIphl )
def _onSetVerIphl(self):
iphl = (self.veriphl & 0xf) * 4
if iphl > 20:
self.ipopts = vbytes( iphl - 20 )
class IPv6(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.verclsflowl= uint32()
self.totlen = uint16()
self.nexthdr = uint8()
self.hoplimit = uint8()
self.srcaddr = IPv6Addr()
self.dstaddr = IPv6Addr()
class TCP(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.srcport = uint16()
self.dstport = uint16()
self.sequence = uint32()
self.ackseq = uint32()
self.doff = uint8()
self.flags = uint8()
self.window = uint16()
self.checksum = uint16()
self.urgent = uint16()
self['doff'].vsOnset( self._onSetDoff )
def _onSetDoff(self):
off = (self.doff >> 2)
if off >= 20:
self.tcpopts = vbytes( off - 20 )
class UDP(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.srcport = uint16()
self.dstport = uint16()
self.udplen = uint16()
self.checksum = uint16()
class ICMP(VStruct):
def __init__(self):
VStruct.__init__(self)
self._vs_endian = 'big'
self.type = uint8(enum=icmptypes)
self.code = uint8()
self.checksum = uint16()
| 25.136054 | 64 | 0.574831 |
4792aab359a1b6db125e40b5f67b8a922b7ce41a | 3,408 | py | Python | Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/syntax/_matlab.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 9 | 2018-10-15T04:57:37.000Z | 2021-12-07T07:39:35.000Z | Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/syntax/_matlab.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 13 | 2018-10-19T11:52:44.000Z | 2021-09-08T00:39:30.000Z | Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/syntax/_matlab.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 3 | 2018-10-25T11:08:04.000Z | 2021-02-23T08:28:31.000Z | ###############################################################################
# Name: matlab.py #
# Purpose: Define Matlab and Octave syntax for highlighting and other features#
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: matlab.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for Matlab and Octave
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _matlab.py 68798 2011-08-20 17:17:05Z CJP $"
__revision__ = "$Revision: 68798 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
MATLAB_KW = (0, "break case catch continue else elseif end for function "
"global if otherwise persistent return switch try while")
OCTAVE_KW = (0, "break case catch continue do else elseif end "
"end_unwind_protect endfor endif endswitch endwhile for "
"function endfunction global if otherwise persistent return "
"switch try until unwind_protect unwind_protect_cleanup while")
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [(stc.STC_MATLAB_COMMAND, 'funct_style'),
(stc.STC_MATLAB_COMMENT, 'comment_style'),
(stc.STC_MATLAB_DEFAULT, 'default_style'),
(stc.STC_MATLAB_DOUBLEQUOTESTRING, 'string_style'),
(stc.STC_MATLAB_IDENTIFIER, 'default_style'),
(stc.STC_MATLAB_KEYWORD, 'keyword_style'),
(stc.STC_MATLAB_NUMBER, 'number_style'),
(stc.STC_MATLAB_OPERATOR, 'operator_style'),
(stc.STC_MATLAB_STRING, 'string_style')]
#---- Extra Properties ----#
FOLD = ('fold', '1')
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for MatLab and Octave"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
if self.LangId == synglob.ID_LANG_MATLAB:
self.SetLexer(stc.STC_LEX_MATLAB)
else:
self.SetLexer(stc.STC_LEX_OCTAVE)
def GetKeywords(self):
"""Returns Specified Keywords List """
if self.LangId == synglob.ID_LANG_MATLAB:
return [MATLAB_KW]
elif self.LangId == synglob.ID_LANG_OCTAVE:
return [OCTAVE_KW]
else:
return list()
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
if self.LangId == synglob.ID_LANG_MATLAB:
return [u'%']
elif self.LangId == synglob.ID_LANG_OCTAVE:
return [u'#']
else:
return list()
| 37.043478 | 79 | 0.528756 |
b3547ae8f1fe2b5823cf14f96f68eb9ea585614d | 86,599 | py | Python | psc/postgresql/driver/pq3.py | masterlee998/db_converter | ea43ede1e1e9654628eb0175ece2c35a7c542719 | [
"MIT"
] | 9 | 2020-04-21T01:58:32.000Z | 2021-02-03T16:31:26.000Z | psc/postgresql/driver/pq3.py | masterlee998/db_converter | ea43ede1e1e9654628eb0175ece2c35a7c542719 | [
"MIT"
] | null | null | null | psc/postgresql/driver/pq3.py | masterlee998/db_converter | ea43ede1e1e9654628eb0175ece2c35a7c542719 | [
"MIT"
] | 3 | 2021-01-25T17:36:56.000Z | 2021-05-24T12:01:05.000Z | ##
# .driver.pq3 - interface to PostgreSQL using PQ v3.0.
##
"""
PG-API interface for PostgreSQL using PQ version 3.0.
"""
import os
import weakref
import socket
from traceback import format_exception
from itertools import repeat, chain, count
from functools import partial
from abc import abstractmethod
from codecs import lookup as lookup_codecs
from operator import itemgetter
get0 = itemgetter(0)
get1 = itemgetter(1)
from .. import lib as pg_lib
from .. import versionstring as pg_version
from .. import iri as pg_iri
from .. import exceptions as pg_exc
from .. import string as pg_str
from .. import api as pg_api
from .. import message as pg_msg
from ..encodings.aliases import get_python_name
from ..string import quote_ident
from ..python.itertools import interlace, chunk
from ..python.socket import SocketFactory
from ..python.functools import process_tuple, process_chunk
from ..python.functools import Composition as compose
from ..protocol import xact3 as xact
from ..protocol import element3 as element
from ..protocol import client3 as client
from ..protocol.message_types import message_types
from ..notifyman import NotificationManager
from .. import types as pg_types
from ..types import io as pg_types_io
from ..types.io import lib as io_lib
import warnings
# Map element3.Notice field identifiers
# to names used by message.Message.
notice_field_to_name = {
message_types[b'S'[0]] : 'severity',
message_types[b'C'[0]] : 'code',
message_types[b'M'[0]] : 'message',
message_types[b'D'[0]] : 'detail',
message_types[b'H'[0]] : 'hint',
message_types[b'W'[0]] : 'context',
message_types[b'P'[0]] : 'position',
message_types[b'p'[0]] : 'internal_position',
message_types[b'q'[0]] : 'internal_query',
message_types[b'F'[0]] : 'file',
message_types[b'L'[0]] : 'line',
message_types[b'R'[0]] : 'function',
}
del message_types
notice_field_from_name = dict(
(v, k) for (k, v) in notice_field_to_name.items()
)
could_not_connect = element.ClientError((
(b'S', 'FATAL'),
(b'C', '08001'),
(b'M', "could not establish connection to server"),
))
# generate an id for a client statement or cursor
def ID(s, title = None, IDNS = 'py:'):
return IDNS + hex(id(s))
def declare_statement_string(
cursor_id,
statement_string,
insensitive = True,
scroll = True,
hold = True
):
s = 'DECLARE ' + cursor_id
if insensitive is True:
s += ' INSENSITIVE'
if scroll is True:
s += ' SCROLL'
s += ' CURSOR'
if hold is True:
s += ' WITH HOLD'
else:
s += ' WITHOUT HOLD'
return s + ' FOR ' + statement_string
def direction_str_to_bool(str):
s = str.upper()
if s == 'FORWARD':
return True
elif s == 'BACKWARD':
return False
else:
raise ValueError("invalid direction " + repr(str))
def direction_to_bool(v):
if isinstance(v, str):
return direction_str_to_bool(v)
elif v is not True and v is not False:
raise TypeError("invalid direction " + repr(v))
else:
return v
class TypeIO(pg_api.TypeIO):
"""
A class that manages I/O for a given configuration. Normally, a connection
would create an instance, and configure it based upon the version and
configuration of PostgreSQL that it is connected to.
"""
_e_factors = ('database',)
strio = (None, None, str)
def __init__(self, database):
self.database = database
self.encoding = None
strio = self.strio
self._cache = {
# Encoded character strings
pg_types.ACLITEMOID : strio, # No binary functions.
pg_types.NAMEOID : strio,
pg_types.BPCHAROID : strio,
pg_types.VARCHAROID : strio,
pg_types.CSTRINGOID : strio,
pg_types.TEXTOID : strio,
pg_types.REGTYPEOID : strio,
pg_types.REGPROCOID : strio,
pg_types.REGPROCEDUREOID : strio,
pg_types.REGOPEROID : strio,
pg_types.REGOPERATOROID : strio,
pg_types.REGCLASSOID : strio,
}
self.typinfo = {}
super().__init__()
def lookup_type_info(self, typid):
return self.database.sys.lookup_type(typid)
def lookup_composite_type_info(self, typid):
return self.database.sys.lookup_composite(typid)
def lookup_domain_basetype(self, typid):
if self.database.version_info[:2] >= (8, 4):
return self.lookup_domain_basetype_84(typid)
while typid:
r = self.database.sys.lookup_basetype(typid)
if not r[0][0]:
return typid
else:
typid = r[0][0]
def lookup_domain_basetype_84(self, typid):
r = self.database.sys.lookup_basetype_recursive(typid)
return r[0][0]
def set_encoding(self, value):
"""
Set a new client encoding.
"""
self.encoding = value.lower().strip()
enc = get_python_name(self.encoding)
ci = lookup_codecs(enc or self.encoding)
self._encode, self._decode, *_ = ci
def encode(self, string_data):
return self._encode(string_data)[0]
def decode(self, bytes_data):
return self._decode(bytes_data)[0]
def encodes(self, iter, get0 = get0):
"""
Encode the items in the iterable in the configured encoding.
"""
return map(compose((self._encode, get0)), iter)
def decodes(self, iter, get0 = get0):
"""
Decode the items in the iterable from the configured encoding.
"""
return map(compose((self._decode, get0)), iter)
def resolve_pack(self, typid):
return self.resolve(typid)[0] or self.encode
def resolve_unpack(self, typid):
return self.resolve(typid)[1] or self.decode
def attribute_map(self, pq_descriptor):
return zip(self.decodes(pq_descriptor.keys()), count())
def sql_type_from_oid(self, oid, qi = quote_ident):
if oid in pg_types.oid_to_sql_name:
return pg_types.oid_to_sql_name[oid]
if oid in self.typinfo:
nsp, name, *_ = self.typinfo[oid]
return qi(nsp) + '.' + qi(name)
name = pg_types.oid_to_name.get(oid)
if name:
return 'pg_catalog.%s' % name
else:
return None
def type_from_oid(self, oid):
if oid in self._cache:
typ = self._cache[oid][2]
return typ
def resolve_descriptor(self, desc, index):
'create a sequence of I/O routines from a pq descriptor'
return [
(self.resolve(x[3]) or (None, None))[index] for x in desc
]
# lookup a type's IO routines from a given typid
def resolve(self,
typid : "The Oid of the type to resolve pack and unpack routines for.",
from_resolution_of : \
"Sequence of typid's used to identify infinite recursion" = (),
builtins : "types.io.resolve" = pg_types_io.resolve,
quote_ident = quote_ident
):
if from_resolution_of and typid in from_resolution_of:
raise TypeError(
"type, %d, is already being looked up: %r" %(
typid, from_resolution_of
)
)
typid = int(typid)
typio = None
if typid in self._cache:
typio = self._cache[typid]
else:
typio = builtins(typid)
if typio is not None:
# If typio is a tuple, it's a constant pair: (pack, unpack)
# otherwise, it's an I/O pair constructor.
if typio.__class__ is not tuple:
typio = typio(typid, self)
self._cache[typid] = typio
if typio is None:
# Lookup the type information for the typid as it's not cached.
##
ti = self.lookup_type_info(typid)
if ti is not None:
typnamespace, typname, typtype, typlen, typelem, typrelid, \
ae_typid, ae_hasbin_input, ae_hasbin_output = ti
self.typinfo[typid] = (
typnamespace, typname, typrelid, int(typelem) if ae_typid else None
)
if typrelid:
# Row type
#
# The attribute name map,
# column I/O,
# column type Oids
# are needed to build the packing pair.
attmap = {}
cio = []
typids = []
attnames = []
i = 0
for x in self.lookup_composite_type_info(typrelid):
attmap[x[1]] = i
attnames.append(x[1])
if x[2]:
# This is a domain
fieldtypid = self.lookup_domain_basetype(x[0])
else:
fieldtypid = x[0]
typids.append(x[0])
te = self.resolve(
fieldtypid, list(from_resolution_of) + [typid]
)
cio.append((te[0] or self.encode, te[1] or self.decode))
i += 1
self._cache[typid] = typio = self.record_io_factory(
cio, typids, attmap, list(
map(self.sql_type_from_oid, typids)
), attnames,
typrelid,
quote_ident(typnamespace) + '.' + \
quote_ident(typname),
)
elif ae_typid is not None:
# resolve the element type and I/O pair
te = self.resolve(
int(typelem),
from_resolution_of = list(from_resolution_of) + [typid]
) or (None, None)
typio = self.array_io_factory(
te[0] or self.encode,
te[1] or self.decode,
typelem,
ae_hasbin_input,
ae_hasbin_output
)
self._cache[typid] = typio
else:
typio = None
if typtype == b'd':
basetype = self.lookup_domain_basetype(typid)
typio = self.resolve(
basetype,
from_resolution_of = list(from_resolution_of) + [typid]
)
elif typtype == b'p' and typnamespace == 'pg_catalog' and typname == 'record':
# anonymous record type
typio = self.anon_record_io_factory()
if not typio:
typio = self.strio
self._cache[typid] = typio
else:
# Throw warning about type without entry in pg_type?
typio = self.strio
return typio
def identify(self, **identity_mappings):
"""
Explicitly designate the I/O handler for the specified type.
Primarily used in cases involving UDTs.
"""
# get them ordered; we process separately, then recombine.
id = list(identity_mappings.items())
ios = [pg_types_io.resolve(x[0]) for x in id]
oids = list(self.database.sys.regtypes([x[1] for x in id]))
self._cache.update([
(oid, io if io.__class__ is tuple else io(oid, self))
for oid, io in zip(oids, ios)
])
def array_parts(self, array, ArrayType = pg_types.Array):
if array.__class__ is not ArrayType:
# Assume the data is a nested list.
array = ArrayType(array)
return (
array.elements(),
array.dimensions,
array.lowerbounds
)
def array_from_parts(self, parts, ArrayType = pg_types.Array):
elements, dimensions, lowerbounds = parts
return ArrayType.from_elements(
elements,
lowerbounds = lowerbounds,
upperbounds = [x + lb - 1 for x, lb in zip(dimensions, lowerbounds)]
)
##
# array_io_factory - build I/O pair for ARRAYs
##
def array_io_factory(
self,
pack_element, unpack_element,
typoid, # array element id
hasbin_input, hasbin_output,
array_pack = io_lib.array_pack,
array_unpack = io_lib.array_unpack,
):
packed_typoid = io_lib.ulong_pack(typoid)
if hasbin_input:
def pack_an_array(data, get_parts = self.array_parts):
elements, dimensions, lowerbounds = get_parts(data)
return array_pack((
0, # unused flags
typoid, dimensions, lowerbounds,
(x if x is None else pack_element(x) for x in elements),
))
else:
# signals string formatting
pack_an_array = None
if hasbin_output:
def unpack_an_array(data, array_from_parts = self.array_from_parts):
flags, typoid, dims, lbs, elements = array_unpack(data)
return array_from_parts(((x if x is None else unpack_element(x) for x in elements), dims, lbs))
else:
# signals string formatting
unpack_an_array = None
return (pack_an_array, unpack_an_array, pg_types.Array)
def RowTypeFactory(self, attribute_map = {}, _Row = pg_types.Row.from_sequence, composite_relid = None):
return partial(_Row, attribute_map)
##
# record_io_factory - Build an I/O pair for RECORDs
##
def record_io_factory(self,
column_io : "sequence (pack,unpack) tuples corresponding to the columns",
typids : "sequence of type Oids; index must correspond to the composite's",
attmap : "mapping of column name to index number",
typnames : "sequence of sql type names in order",
attnames : "sequence of attribute names in order",
composite_relid : "oid of the composite relation",
composite_name : "the name of the composite type",
get0 = get0,
get1 = get1,
fmt_errmsg = "failed to {0} attribute {1}, {2}::{3}, of composite {4} from wire data".format
):
fpack = tuple(map(get0, column_io))
funpack = tuple(map(get1, column_io))
row_constructor = self.RowTypeFactory(attribute_map = attmap, composite_relid = composite_relid)
def raise_pack_tuple_error(cause, procs, tup, itemnum):
data = repr(tup[itemnum])
if len(data) > 80:
# Be sure not to fill screen with noise.
data = data[:75] + ' ...'
self.raise_client_error(element.ClientError((
(b'C', '--cIO',),
(b'S', 'ERROR',),
(b'M', fmt_errmsg('pack', itemnum, attnames[itemnum], typnames[itemnum], composite_name),),
(b'W', data,),
(b'P', str(itemnum),)
)), cause = cause)
def raise_unpack_tuple_error(cause, procs, tup, itemnum):
data = repr(tup[itemnum])
if len(data) > 80:
# Be sure not to fill screen with noise.
data = data[:75] + ' ...'
self.raise_client_error(element.ClientError((
(b'C', '--cIO',),
(b'S', 'ERROR',),
(b'M', fmt_errmsg('unpack', itemnum, attnames[itemnum], typnames[itemnum], composite_name),),
(b'W', data,),
(b'P', str(itemnum),),
)), cause = cause)
def unpack_a_record(data,
unpack = io_lib.record_unpack,
process_tuple = process_tuple,
row_constructor = row_constructor
):
data = tuple([x[1] for x in unpack(data)])
return row_constructor(process_tuple(funpack, data, raise_unpack_tuple_error))
sorted_atts = sorted(attmap.items(), key = get1)
def pack_a_record(data,
pack = io_lib.record_pack,
process_tuple = process_tuple,
):
if isinstance(data, dict):
data = [data.get(k) for k,_ in sorted_atts]
return pack(
tuple(zip(
typids,
process_tuple(fpack, tuple(data), raise_pack_tuple_error)
))
)
return (pack_a_record, unpack_a_record, tuple)
def anon_record_io_factory(self):
def raise_unpack_tuple_error(cause, procs, tup, itemnum):
data = repr(tup[itemnum])
if len(data) > 80:
# Be sure not to fill screen with noise.
data = data[:75] + ' ...'
self.raise_client_error(element.ClientError((
(b'C', '--cIO',),
(b'S', 'ERROR',),
(b'M', 'Could not unpack element {} from anonymous record'.format(itemnum)),
(b'W', data,),
(b'P', str(itemnum),)
)), cause = cause)
def _unpack_record(data, unpack = io_lib.record_unpack, process_tuple = process_tuple):
record = list(unpack(data))
coloids = tuple(x[0] for x in record)
colio = map(self.resolve, coloids)
column_unpack = tuple(c[1] or self.decode for c in colio)
data = tuple(x[1] for x in record)
return process_tuple(column_unpack, data, raise_unpack_tuple_error)
return (None, _unpack_record)
def raise_client_error(self, error_message, cause = None, creator = None):
m = {
notice_field_to_name[k] : v
for k, v in error_message.items()
# don't include unknown messages in this list.
if k in notice_field_to_name
}
c = m.pop('code')
ms = m.pop('message')
client_error = self.lookup_exception(c)
client_error = client_error(ms, code = c, details = m, source = 'CLIENT', creator = creator or self.database)
client_error.database = self.database
if cause is not None:
raise client_error from cause
else:
raise client_error
def lookup_exception(self, code, errorlookup = pg_exc.ErrorLookup,):
return errorlookup(code)
def lookup_warning(self, code, warninglookup = pg_exc.WarningLookup,):
return warninglookup(code)
def raise_server_error(self, error_message, cause = None, creator = None):
m = dict(self.decode_notice(error_message))
c = m.pop('code')
ms = m.pop('message')
server_error = self.lookup_exception(c)
server_error = server_error(ms, code = c, details = m, source = 'SERVER', creator = creator or self.database)
server_error.database = self.database
if cause is not None:
raise server_error from cause
else:
raise server_error
def raise_error(self, error_message, ClientError = element.ClientError, **kw):
if 'creator' not in kw:
kw['creator'] = getattr(self.database, '_controller', self.database) or self.database
if error_message.__class__ is ClientError:
self.raise_client_error(error_message, **kw)
else:
self.raise_server_error(error_message, **kw)
##
# Used by decode_notice()
def _decode_failsafe(self, data):
decode = self._decode
i = iter(data)
for x in i:
try:
# prematurely optimized for your viewing displeasure.
v = x[1]
yield (x[0], decode(v)[0])
for x in i:
v = x[1]
yield (x[0], decode(v)[0])
except UnicodeDecodeError:
# Fallback to the bytes representation.
# This should be sufficiently informative in most cases,
# and in the cases where it isn't, an element traceback should
# ultimately yield the pertinent information
yield (x[0], repr(x[1])[2:-1])
def decode_notice(self, notice):
notice = self._decode_failsafe(notice.items())
return {
notice_field_to_name[k] : v
for k, v in notice
# don't include unknown messages in this list.
if k in notice_field_to_name
}
def emit_server_message(self, message, creator = None,
MessageType = pg_msg.Message
):
fields = self.decode_notice(message)
m = fields.pop('message')
c = fields.pop('code')
if fields['severity'].upper() == 'WARNING':
MessageType = self.lookup_warning(c)
message = MessageType(m, code = c, details = fields,
creator = creator, source = 'SERVER')
message.database = self.database
message.emit()
return message
def emit_client_message(self, message, creator = None,
MessageType = pg_msg.Message
):
fields = {
notice_field_to_name[k] : v
for k, v in message.items()
# don't include unknown messages in this list.
if k in notice_field_to_name
}
m = fields.pop('message')
c = fields.pop('code')
if fields['severity'].upper() == 'WARNING':
MessageType = self.lookup_warning(c)
message = MessageType(m, code = c, details = fields,
creator = creator, source = 'CLIENT')
message.database = self.database
message.emit()
return message
def emit_message(self, message, ClientNotice = element.ClientNotice, **kw):
if message.__class__ is ClientNotice:
return self.emit_client_message(message, **kw)
else:
return self.emit_server_message(message, **kw)
##
# This class manages all the functionality used to get
# rows from a PostgreSQL portal/cursor.
class Output(object):
_output = None
_output_io = None
_output_formats = None
_output_attmap = None
closed = False
cursor_id = None
statement = None
parameters = None
_complete_message = None
@abstractmethod
def _init(self):
"""
Bind a cursor based on the configured parameters.
"""
# The local initialization for the specific cursor.
def __init__(self, cursor_id, wref = weakref.ref, ID = ID):
self.cursor_id = cursor_id
if self.statement is not None:
stmt = self.statement
self._output = stmt._output
self._output_io = stmt._output_io
self._row_constructor = stmt._row_constructor
self._output_formats = stmt._output_formats or ()
self._output_attmap = stmt._output_attmap
self._pq_cursor_id = self.database.typio.encode(cursor_id)
# If the cursor's id was generated, it should be garbage collected.
if cursor_id == ID(self):
self.database.pq.register_cursor(self, self._pq_cursor_id)
self._quoted_cursor_id = '"' + cursor_id.replace('"', '""') + '"'
self._init()
def __iter__(self):
return self
def close(self):
if self.closed is False:
self.database.pq.trash_cursor(self._pq_cursor_id)
self.closed = True
def _ins(self, *args):
return xact.Instruction(*args, asynchook = self.database._receive_async)
def _pq_xp_describe(self):
return (element.DescribePortal(self._pq_cursor_id),)
def _pq_xp_bind(self):
return (
element.Bind(
self._pq_cursor_id,
self.statement._pq_statement_id,
self.statement._input_formats,
self.statement._pq_parameters(self.parameters),
self._output_formats,
),
)
def _pq_xp_fetchall(self):
return (
element.Bind(
b'',
self.statement._pq_statement_id,
self.statement._input_formats,
self.statement._pq_parameters(self.parameters),
self._output_formats,
),
element.Execute(b'', 0xFFFFFFFF),
)
def _pq_xp_declare(self):
return (
element.Parse(b'', self.database.typio.encode(
declare_statement_string(
str(self._quoted_cursor_id),
str(self.statement.string)
)
), ()
),
element.Bind(
b'', b'', self.statement._input_formats,
self.statement._pq_parameters(self.parameters), ()
),
element.Execute(b'', 1),
)
def _pq_xp_execute(self, quantity):
return (
element.Execute(self._pq_cursor_id, quantity),
)
def _pq_xp_fetch(self, direction, quantity):
##
# It's an SQL declared cursor, manually construct the fetch commands.
qstr = "FETCH " + ("FORWARD " if direction else "BACKWARD ")
if quantity is None:
qstr = qstr + "ALL IN " + self._quoted_cursor_id
else:
qstr = qstr \
+ str(quantity) + " IN " + self._quoted_cursor_id
return (
element.Parse(b'', self.database.typio.encode(qstr), ()),
element.Bind(b'', b'', (), (), self._output_formats),
# The "limit" is defined in the fetch query.
element.Execute(b'', 0xFFFFFFFF),
)
def _pq_xp_move(self, position, whence):
return (
element.Parse(b'',
b'MOVE ' + whence + b' ' + position + b' IN ' + \
self.database.typio.encode(self._quoted_cursor_id),
()
),
element.Bind(b'', b'', (), (), ()),
element.Execute(b'', 1),
)
def _process_copy_chunk(self, x):
if x:
if x[0].__class__ is not bytes or x[-1].__class__ is not bytes:
return [
y for y in x if y.__class__ is bytes
]
return x
# Process the element.Tuple message in x for column()
def _process_tuple_chunk_Column(self, x, range = range):
unpack = self._output_io[0]
# get the raw data for the first column
l = [y[0] for y in x]
# iterate over the range to keep track
# of which item we're processing.
r = range(len(l))
try:
return [unpack(l[i]) for i in r]
except Exception:
cause = sys.exc_info()[1]
try:
i = next(r)
except StopIteration:
i = len(l)
self._raise_column_tuple_error(cause, self._output_io, (l[i],), 0)
# Process the element.Tuple message in x for rows()
def _process_tuple_chunk_Row(self, x,
proc = process_chunk,
):
rc = self._row_constructor
return [
rc(y)
for y in proc(self._output_io, x, self._raise_column_tuple_error)
]
# Process the elemnt.Tuple messages in `x` for chunks()
def _process_tuple_chunk(self, x, proc = process_chunk):
return proc(self._output_io, x, self._raise_column_tuple_error)
def _raise_column_tuple_error(self, cause, procs, tup, itemnum):
'for column processing'
# The element traceback will include the full list of parameters.
data = repr(tup[itemnum])
if len(data) > 80:
# Be sure not to fill screen with noise.
data = data[:75] + ' ...'
em = element.ClientError((
(b'S', 'ERROR'),
(b'C', "--CIO"),
(b'M', "failed to unpack column %r, %s::%s, from wire data" %(
itemnum,
self.column_names[itemnum],
self.database.typio.sql_type_from_oid(
self.statement.pg_column_types[itemnum]
) or '<unknown>',
)
),
(b'D', data),
(b'H', "Try casting the column to 'text'."),
(b'P', str(itemnum)),
))
self.database.typio.raise_client_error(em, creator = self, cause = cause)
@property
def state(self):
if self.closed:
return 'closed'
else:
return 'open'
@property
def column_names(self):
if self._output is not None:
return list(self.database.typio.decodes(self._output.keys()))
# `None` if _output does not exist; not row data
@property
def column_types(self):
if self._output is not None:
return [self.database.typio.type_from_oid(x[3]) for x in self._output]
# `None` if _output does not exist; not row data
@property
def pg_column_types(self):
if self._output is not None:
return [x[3] for x in self._output]
# `None` if _output does not exist; not row data
@property
def sql_column_types(self):
return [
self.database.typio.sql_type_from_oid(x)
for x in self.pg_column_types
]
def command(self):
"The completion message's command identifier"
if self._complete_message is not None:
return self._complete_message.extract_command().decode('ascii')
def count(self):
"The completion message's count number"
if self._complete_message is not None:
return self._complete_message.extract_count()
class Chunks(Output, pg_api.Chunks):
pass
##
# FetchAll - A Chunks cursor that gets *all* the records in the cursor.
#
# It has added complexity over other variants as in order to stream results,
# chunks have to be removed from the protocol transaction's received messages.
# If this wasn't done, the entire result set would be fully buffered prior
# to processing.
class FetchAll(Chunks):
_e_factors = ('statement', 'parameters',)
def _e_metas(self):
yield ('type', type(self).__name__)
def __init__(self, statement, parameters):
self.statement = statement
self.parameters = parameters
self.database = statement.database
Output.__init__(self, '')
def _init(self,
null = element.Null.type,
complete = element.Complete.type,
bindcomplete = element.BindComplete.type,
parsecomplete = element.ParseComplete.type,
):
expect = self._expect
self._xact = self._ins(
self._pq_xp_fetchall() + (element.SynchronizeMessage,)
)
self.database._pq_push(self._xact, self)
# Get more messages until the first Tuple is seen.
STEP = self.database._pq_step
while self._xact.state != xact.Complete:
STEP()
for x in self._xact.messages_received():
if x.__class__ is tuple or expect == x.type:
# No need to step anymore once this is seen.
return
elif x.type == null:
# The protocol transaction is going to be complete..
self.database._pq_complete()
self._xact = None
return
elif x.type == complete:
self._complete_message = x
self.database._pq_complete()
# If this was a select/copy cursor,
# the data messages would have caused an earlier
# return. It's empty.
self._xact = None
return
elif x.type in (bindcomplete, parsecomplete):
# Noise.
pass
else:
# This should have been caught by the protocol transaction.
# "Can't happen".
self.database._pq_complete()
if self._xact.fatal is None:
self._xact.fatal = False
self._xact.error_message = element.ClientError((
(b'S', 'ERROR'),
(b'C', "--000"),
(b'M', "unexpected message type " + repr(x.type))
))
self.database.typio.raise_client_error(self._xact.error_message, creator = self)
return
def __next__(self,
data_types = (tuple,bytes),
complete = element.Complete.type,
):
x = self._xact
# self._xact = None; means that the cursor has been exhausted.
if x is None:
raise StopIteration
# Finish the protocol transaction.
STEP = self.database._pq_step
while x.state is not xact.Complete and not x.completed:
STEP()
# fatal is None == no error
# fatal is True == dead connection
# fatal is False == dead transaction
if x.fatal is not None:
self.database.typio.raise_error(x.error_message, creator = getattr(self, '_controller', self) or self)
# no messages to process?
if not x.completed:
# Transaction has been cleaned out of completed? iterator is done.
self._xact = None
self.close()
raise StopIteration
# Get the chunk to be processed.
chunk = [
y for y in x.completed[0][1]
if y.__class__ in data_types
]
r = self._process_chunk(chunk)
# Scan for _complete_message.
# Arguably, this can fail, but it would be a case
# where multiple sync messages were issued. Something that's
# not naturally occurring.
for y in x.completed[0][1][-3:]:
if getattr(y, 'type', None) == complete:
self._complete_message = y
# Remove it, it's been processed.
del x.completed[0]
return r
class SingleXactCopy(FetchAll):
_expect = element.CopyToBegin.type
_process_chunk = FetchAll._process_copy_chunk
class SingleXactFetch(FetchAll):
_expect = element.Tuple.type
class MultiXactStream(Chunks):
chunksize = 1024 * 16
# only tuple streams
_process_chunk = Output._process_tuple_chunk
def _e_metas(self):
yield ('chunksize', self.chunksize)
yield ('type', self.__class__.__name__)
def __init__(self, statement, parameters, cursor_id):
self.statement = statement
self.parameters = parameters
self.database = statement.database
Output.__init__(self, cursor_id or ID(self))
@abstractmethod
def _bind(self):
"""
Generate the commands needed to bind the cursor.
"""
@abstractmethod
def _fetch(self):
"""
Generate the commands needed to bind the cursor.
"""
def _init(self):
self._command = self._fetch()
self._xact = self._ins(self._bind() + self._command)
self.database._pq_push(self._xact, self)
def __next__(self, tuple_type = tuple):
x = self._xact
if x is None:
raise StopIteration
if self.database.pq.xact is x:
self.database._pq_complete()
# get all the element.Tuple messages
chunk = [
y for y in x.messages_received() if y.__class__ is tuple_type
]
if len(chunk) == self.chunksize:
# there may be more, dispatch the request for the next chunk
self._xact = self._ins(self._command)
self.database._pq_push(self._xact, self)
else:
# it's done.
self._xact = None
self.close()
if not chunk:
# chunk is empty, it's done *right* now.
raise StopIteration
chunk = self._process_chunk(chunk)
return chunk
##
# The cursor is streamed to the client on demand *inside*
# a single SQL transaction block.
class MultiXactInsideBlock(MultiXactStream):
_bind = MultiXactStream._pq_xp_bind
def _fetch(self):
##
# Use the extended protocol's execute to fetch more.
return self._pq_xp_execute(self.chunksize) + \
(element.SynchronizeMessage,)
##
# The cursor is streamed to the client on demand *outside* of
# a single SQL transaction block. [DECLARE ... WITH HOLD]
class MultiXactOutsideBlock(MultiXactStream):
_bind = MultiXactStream._pq_xp_declare
def _fetch(self):
##
# Use the extended protocol's execute to fetch more *against*
# an SQL FETCH statement yielding the data in the proper format.
#
# MultiXactOutsideBlock uses DECLARE to create the cursor WITH HOLD.
# When this is done, the cursor is configured to use StringFormat with
# all columns. It's necessary to use FETCH to adjust the formatting.
return self._pq_xp_fetch(True, self.chunksize) + \
(element.SynchronizeMessage,)
##
# Cursor is used to manage scrollable cursors.
class Cursor(Output, pg_api.Cursor):
_process_tuple = Output._process_tuple_chunk_Row
def _e_metas(self):
yield ('direction', 'FORWARD' if self.direction else 'BACKWORD')
yield ('type', 'Cursor')
def clone(self):
return type(self)(self.statement, self.parameters, self.database, None)
def __init__(self, statement, parameters, database, cursor_id):
self.database = database or statement.database
self.statement = statement
self.parameters = parameters
self.__dict__['direction'] = True
if self.statement is None:
self._e_factors = ('database', 'cursor_id')
Output.__init__(self, cursor_id or ID(self))
def get_direction(self):
return self.__dict__['direction']
def set_direction(self, value):
self.__dict__['direction'] = direction_to_bool(value)
direction = property(
fget = get_direction,
fset = set_direction,
)
del get_direction, set_direction
def _which_way(self, direction):
if direction is not None:
direction = direction_to_bool(direction)
# -1 * -1 = 1, -1 * 1 = -1, 1 * 1 = 1
return not ((not self.direction) ^ (not direction))
else:
return self.direction
def _init(self,
tupledesc = element.TupleDescriptor.type,
):
"""
Based on the cursor parameters and the current transaction state,
select a cursor strategy for managing the response from the server.
"""
if self.statement is not None:
x = self._ins(self._pq_xp_declare() + (element.SynchronizeMessage,))
self.database._pq_push(x, self)
self.database._pq_complete()
else:
x = self._ins(self._pq_xp_describe() + (element.SynchronizeMessage,))
self.database._pq_push(x, self)
self.database._pq_complete()
for m in x.messages_received():
if m.type == tupledesc:
typio = self.database.typio
self._output = m
self._output_attmap = typio.attribute_map(self._output)
self._row_constructor = typio.RowTypeFactory(self._output_attmap)
# tuple output
self._output_io = typio.resolve_descriptor(
self._output, 1 # (input, output)[1]
)
self._output_formats = [
element.StringFormat
if x is None
else element.BinaryFormat
for x in self._output_io
]
self._output_io = tuple([
x or typio.decode for x in self._output_io
])
def __next__(self):
result = self._fetch(self.direction, 1)
if not result:
raise StopIteration
else:
return result[0]
def read(self, quantity = None, direction = None):
if quantity == 0:
return []
dir = self._which_way(direction)
return self._fetch(dir, quantity)
def _fetch(self, direction, quantity):
x = self._ins(
self._pq_xp_fetch(direction, quantity) + \
(element.SynchronizeMessage,)
)
self.database._pq_push(x, self)
self.database._pq_complete()
return self._process_tuple((
y for y in x.messages_received() if y.__class__ is tuple
))
def seek(self, offset, whence = 'ABSOLUTE'):
rwhence = self._seek_whence_map.get(whence, whence)
if rwhence is None or rwhence.upper() not in \
self._seek_whence_map.values():
raise TypeError(
"unknown whence parameter, %r" %(whence,)
)
rwhence = rwhence.upper()
if offset == 'ALL':
if rwhence not in ('BACKWARD', 'FORWARD'):
rwhence = 'BACKWARD' if self.direction is False else 'FORWARD'
else:
if offset < 0 and rwhence == 'BACKWARD':
offset = -offset
rwhence = 'FORWARD'
if self.direction is False:
if offset == 'ALL' and rwhence != 'FORWARD':
rwhence = 'BACKWARD'
else:
if rwhence == 'RELATIVE':
offset = -offset
elif rwhence == 'ABSOLUTE':
rwhence = 'FROM_END'
else:
rwhence = 'ABSOLUTE'
if rwhence in ('RELATIVE', 'BACKWARD', 'FORWARD'):
if offset == 'ALL':
cmd = self._pq_xp_move(
str(offset).encode('ascii'), str(rwhence).encode('ascii')
)
else:
if offset < 0:
cmd = self._pq_xp_move(
str(-offset).encode('ascii'), b'BACKWARD'
)
else:
cmd = self._pq_xp_move(
str(offset).encode('ascii'), str(rwhence).encode('ascii')
)
elif rwhence == 'ABSOLUTE':
cmd = self._pq_xp_move(str(offset).encode('ascii'), b'ABSOLUTE')
else:
# move to last record, then consume it to put the position at
# the very end of the cursor.
cmd = self._pq_xp_move(b'', b'LAST') + \
self._pq_xp_move(b'', b'NEXT') + \
self._pq_xp_move(str(offset).encode('ascii'), b'BACKWARD')
x = self._ins(cmd + (element.SynchronizeMessage,),)
self.database._pq_push(x, self)
self.database._pq_complete()
count = None
complete = element.Complete.type
for cm in x.messages_received():
if getattr(cm, 'type', None) == complete:
count = cm.extract_count()
break
# XXX: Raise if count is None?
return count
class SingleExecution(pg_api.Execution):
database = None
def __init__(self, database):
self._prepare = database.prepare
def load_rows(self, query, *parameters):
return self._prepare(query).load_rows(*parameters)
def load_chunks(self, query, *parameters):
return self._prepare(query).load_chunks(*parameters)
def __call__(self, query, *parameters):
return self._prepare(query)(*parameters)
def declare(self, query, *parameters):
return self._prepare(query).declare(*parameters)
def rows(self, query, *parameters):
return self._prepare(query).rows(*parameters)
def chunks(self, query, *parameters):
return self._prepare(query).chunks(*parameters)
def column(self, query, *parameters):
return self._prepare(query).column(*parameters)
def first(self, query, *parameters):
return self._prepare(query).first(*parameters)
class Statement(pg_api.Statement):
string = None
database = None
statement_id = None
_input = None
_output = None
_output_io = None
_output_formats = None
_output_attmap = None
def _e_metas(self):
yield (None, '[' + self.state + ']')
if hasattr(self._xact, 'error_message'):
# be very careful not to trigger an exception.
# even in the cases of effective protocol errors,
# it is important not to bomb out.
pos = self._xact.error_message.get(b'P')
if pos is not None and pos.isdigit():
try:
pos = int(pos)
# get the statement source
q = str(self.string)
# normalize position..
pos = len('\n'.join(q[:pos].splitlines()))
# normalize newlines
q = '\n'.join(q.splitlines())
line_no = q.count('\n', 0, pos) + 1
# replace tabs with spaces because there is no way to identify
# the tab size of the final display. (ie, marker will be wrong)
q = q.replace('\t', ' ')
# grab the relevant part of the query string.
# the full source will be printed elsewhere.
# beginning of string or the newline before the position
bov = q.rfind('\n', 0, pos) + 1
# end of string or the newline after the position
eov = q.find('\n', pos)
if eov == -1:
eov = len(q)
view = q[bov:eov]
# position relative to the beginning of the view
pos = pos-bov
# analyze lines prior to position
dlines = view.splitlines()
marker = ((pos-1) * ' ') + '^' + (
' [line %d, character %d] ' %(line_no, pos)
)
# insert marker
dlines.append(marker)
yield ('LINE', os.linesep.join(dlines))
except:
import traceback
yield ('LINE', traceback.format_exc(chain=False))
spt = self.sql_parameter_types
if spt is not None:
yield ('sql_parameter_types', spt)
cn = self.column_names
ct = self.sql_column_types
if cn is not None:
if ct is not None:
yield (
'results',
'(' + ', '.join([
'{!r} {!r}'.format(n, t) for n,t in zip(cn,ct)
]) + ')'
)
else:
yield ('sql_column_names', cn)
elif ct is not None:
yield ('sql_column_types', ct)
def clone(self):
ps = self.__class__(self.database, None, self.string)
ps._init()
ps._fini()
return ps
def __init__(self,
database, statement_id, string,
wref = weakref.ref
):
self.database = database
self.string = string
self.statement_id = statement_id or ID(self)
self._xact = None
self.closed = None
self._pq_statement_id = database.typio._encode(self.statement_id)[0]
if not statement_id:
# Register statement on a connection to close it automatically on db end
database.pq.register_statement(self, self._pq_statement_id)
def __repr__(self):
return '<{mod}.{name}[{ci}] {state}>'.format(
mod = self.__class__.__module__,
name = self.__class__.__name__,
ci = self.database.connector._pq_iri,
state = self.state,
)
def _pq_parameters(self, parameters, proc = process_tuple):
return proc(
self._input_io, parameters,
self._raise_parameter_tuple_error
)
##
# process_tuple failed(exception). The parameters could not be packed.
# This function is called with the given information in the context
# of the original exception(to allow chaining).
def _raise_parameter_tuple_error(self, cause, procs, tup, itemnum):
# Find the SQL type name. This should *not* hit the server.
typ = self.database.typio.sql_type_from_oid(
self.pg_parameter_types[itemnum]
) or '<unknown>'
# Representation of the bad parameter.
bad_data = repr(tup[itemnum])
if len(bad_data) > 80:
# Be sure not to fill screen with noise.
bad_data = bad_data[:75] + ' ...'
em = element.ClientError((
(b'S', 'ERROR'),
(b'C', '--PIO'),
(b'M', "could not pack parameter %s::%s for transfer" %(
('$' + str(itemnum + 1)), typ,
)
),
(b'D', bad_data),
(b'H', "Try casting the parameter to 'text', then to the target type."),
(b'P', str(itemnum))
))
self.database.typio.raise_client_error(em, creator = self, cause = cause)
##
# Similar to the parameter variant.
def _raise_column_tuple_error(self, cause, procs, tup, itemnum):
# Find the SQL type name. This should *not* hit the server.
typ = self.database.typio.sql_type_from_oid(
self.pg_column_types[itemnum]
) or '<unknown>'
# Representation of the bad column.
data = repr(tup[itemnum])
if len(data) > 80:
# Be sure not to fill screen with noise.
data = data[:75] + ' ...'
em = element.ClientError((
(b'S', 'ERROR'),
(b'C', '--CIO'),
(b'M', "could not unpack column %r, %s::%s, from wire data" %(
itemnum, self.column_names[itemnum], typ
)
),
(b'D', data),
(b'H', "Try casting the column to 'text'."),
(b'P', str(itemnum)),
))
self.database.typio.raise_client_error(em, creator = self, cause = cause)
@property
def state(self) -> str:
if self.closed:
if self._xact is not None:
if self.string is not None:
return 'parsing'
else:
return 'describing'
return 'closed'
return 'prepared'
@property
def column_names(self):
if self.closed is None:
self._fini()
if self._output is not None:
return list(self.database.typio.decodes(self._output.keys()))
@property
def parameter_types(self):
if self.closed is None:
self._fini()
if self._input is not None:
return [self.database.typio.type_from_oid(x) for x in self._input]
@property
def column_types(self):
if self.closed is None:
self._fini()
if self._output is not None:
return [
self.database.typio.type_from_oid(x[3]) for x in self._output
]
@property
def pg_parameter_types(self):
if self.closed is None:
self._fini()
return self._input
@property
def pg_column_types(self):
if self.closed is None:
self._fini()
if self._output is not None:
return [x[3] for x in self._output]
@property
def sql_column_types(self):
if self.closed is None:
self._fini()
if self._output is not None:
return [
self.database.typio.sql_type_from_oid(x)
for x in self.pg_column_types
]
@property
def sql_parameter_types(self):
if self.closed is None:
self._fini()
if self._input is not None:
return [
self.database.typio.sql_type_from_oid(x)
for x in self.pg_parameter_types
]
def close(self):
if self.closed is False:
self.database.pq.trash_statement(self._pq_statement_id)
self.closed = True
def _init(self):
"""
Push initialization messages to the server, but don't wait for
the return as there may be things that can be done while waiting
for the return. Use the _fini() to complete.
"""
if self.string is not None:
q = self.database.typio._encode(str(self.string))[0]
cmd = [
element.CloseStatement(self._pq_statement_id),
element.Parse(self._pq_statement_id, q, ()),
]
else:
cmd = []
cmd.extend(
(
element.DescribeStatement(self._pq_statement_id),
element.SynchronizeMessage,
)
)
self._xact = xact.Instruction(cmd, asynchook = self.database._receive_async)
self.database._pq_push(self._xact, self)
def _fini(self, strfmt = element.StringFormat, binfmt = element.BinaryFormat):
"""
Complete initialization that the _init() method started.
"""
# assume that the transaction has been primed.
if self._xact is None:
raise RuntimeError("_fini called prior to _init; invalid state")
if self._xact is self.database.pq.xact:
try:
self.database._pq_complete()
except Exception:
self.closed = True
raise
try:
(*head, argtypes, tupdesc, last) = self._xact.messages_received()
except ValueError as e:
self.closed = True
self._xact = None
return
typio = self.database.typio
if tupdesc is None or tupdesc is element.NoDataMessage:
# Not typed output.
self._output = None
self._output_attmap = None
self._output_io = None
self._output_formats = None
self._row_constructor = None
else:
self._output = tupdesc
self._output_attmap = dict(
typio.attribute_map(tupdesc)
)
self._row_constructor = self.database.typio.RowTypeFactory(self._output_attmap)
# tuple output
self._output_io = typio.resolve_descriptor(tupdesc, 1)
self._output_formats = [
strfmt if x is None else binfmt
for x in self._output_io
]
self._output_io = tuple([
x or typio.decode for x in self._output_io
])
self._input = argtypes
packs = []
formats = []
for x in argtypes:
pack = (typio.resolve(x) or (None,None))[0]
packs.append(pack or typio.encode)
formats.append(
strfmt if x is None else binfmt
)
self._input_io = tuple(packs)
self._input_formats = formats
self.closed = False
self._xact = None
def __call__(self, *parameters):
if self._input is not None:
if len(parameters) != len(self._input):
raise TypeError("statement requires %d parameters, given %d" %(
len(self._input), len(parameters)
))
##
# get em' all!
if self._output is None:
# might be a copy.
c = SingleXactCopy(self, parameters)
else:
c = SingleXactFetch(self, parameters)
c._process_chunk = c._process_tuple_chunk_Row
# iff output is None, it's not a tuple returning query.
# however, if it's a copy, detect that fact by SingleXactCopy's
# immediate return after finding the copy begin message(no complete).
if self._output is None:
cmd = c.command()
if cmd is not None:
return (cmd, c.count())
# Returns rows, accumulate in a list.
r = []
for x in c:
r.extend(x)
return r
def declare(self, *parameters):
if self.closed is None:
self._fini()
if self._input is not None:
if len(parameters) != len(self._input):
raise TypeError("statement requires %d parameters, given %d" %(
len(self._input), len(parameters)
))
return Cursor(self, parameters, self.database, None)
def rows(self, *parameters, **kw):
chunks = self.chunks(*parameters, **kw)
if chunks._output_io:
chunks._process_chunk = chunks._process_tuple_chunk_Row
return chain.from_iterable(chunks)
__iter__ = rows
def chunks(self, *parameters):
if self.closed is None:
self._fini()
if self._input is not None:
if len(parameters) != len(self._input):
raise TypeError("statement requires %d parameters, given %d" %(
len(self._input), len(parameters)
))
if self._output is None:
# It's *probably* a COPY.
return SingleXactCopy(self, parameters)
if self.database.pq.state == b'I':
# Currently, *not* in a Transaction block, so
# DECLARE the statement WITH HOLD in order to allow
# access across transactions.
if self.string is not None:
return MultiXactOutsideBlock(self, parameters, None)
else:
##
# Statement source unknown, so it can't be DECLARE'd.
# This happens when statement_from_id is used.
return SingleXactFetch(self, parameters)
else:
# Likely, the best possible case. It gets to use Execute messages.
return MultiXactInsideBlock(self, parameters, None)
def column(self, *parameters, **kw):
chunks = self.chunks(*parameters, **kw)
chunks._process_chunk = chunks._process_tuple_chunk_Column
return chain.from_iterable(chunks)
def first(self, *parameters):
if self.closed is None:
# Not fully initialized; assume interrupted.
self._fini()
if self._input is not None:
# Use a regular TypeError.
if len(parameters) != len(self._input):
raise TypeError("statement requires %d parameters, given %d" %(
len(self._input), len(parameters)
))
# Parameters? Build em'.
db = self.database
if self._input_io:
params = process_tuple(
self._input_io, parameters,
self._raise_parameter_tuple_error
)
else:
params = ()
# Run the statement
x = xact.Instruction((
element.Bind(
b'',
self._pq_statement_id,
self._input_formats,
params,
self._output_formats or (),
),
# Get all
element.Execute(b'', 0xFFFFFFFF),
element.ClosePortal(b''),
element.SynchronizeMessage
),
asynchook = db._receive_async
)
# Push and complete protocol transaction.
db._pq_push(x, self)
db._pq_complete()
if self._output_io:
##
# It returned rows, look for the first tuple.
tuple_type = element.Tuple.type
for xt in x.messages_received():
if xt.__class__ is tuple:
break
else:
return None
if len(self._output_io) > 1:
# Multiple columns, return a Row.
return self._row_constructor(
process_tuple(
self._output_io, xt,
self._raise_column_tuple_error
)
)
else:
# Single column output.
if xt[0] is None:
return None
io = self._output_io[0] or self.database.typio.decode
return io(xt[0])
else:
##
# It doesn't return rows, so return a count.
##
# This loop searches through the received messages
# for the Complete message which contains the count.
complete = element.Complete.type
for cm in x.messages_received():
# Use getattr because COPY doesn't produce
# element.Message instances.
if getattr(cm, 'type', None) == complete:
break
else:
# Probably a Null command.
return None
count = cm.extract_count()
if count is None:
command = cm.extract_command()
if command is not None:
return command.decode('ascii')
return count
def _load_copy_chunks(self, chunks, *parameters):
"""
Given an chunks of COPY lines, execute the COPY ... FROM STDIN
statement and send the copy lines produced by the iterable to
the remote end.
"""
x = xact.Instruction((
element.Bind(
b'',
self._pq_statement_id,
(), (), (),
),
element.Execute(b'', 1),
element.SynchronizeMessage,
),
asynchook = self.database._receive_async
)
self.database._pq_push(x, self)
# localize
step = self.database._pq_step
# Get the COPY started.
while x.state is not xact.Complete:
step()
if hasattr(x, 'CopyFailSequence') and x.messages is x.CopyFailSequence:
# The protocol transaction has noticed that its a COPY.
break
else:
# Oh, it's not a COPY at all.
x.fatal = x.fatal or False
x.error_message = element.ClientError((
(b'S', 'ERROR'),
# OperationError
(b'C', '--OPE'),
(b'M', "_load_copy_chunks() used on a non-COPY FROM STDIN query"),
))
self.database.typio.raise_client_error(x.error_message, creator = self)
for chunk in chunks:
x.messages = list(chunk)
while x.messages is not x.CopyFailSequence:
# Continue stepping until the transaction
# sets the CopyFailSequence again. That's
# the signal that the transaction has sent
# all the previously set messages.
step()
x.messages = x.CopyDoneSequence
self.database._pq_complete()
self.database.pq.synchronize()
def _load_tuple_chunks(self, chunks):
pte = self._raise_parameter_tuple_error
last = (element.SynchronizeMessage,)
try:
for chunk in chunks:
bindings = [
(
element.Bind(
b'',
self._pq_statement_id,
self._input_formats,
process_tuple(
self._input_io, tuple(t), pte
),
(),
),
element.Execute(b'', 1),
)
for t in chunk
]
bindings.append(last)
self.database._pq_push(
xact.Instruction(
chain.from_iterable(bindings),
asynchook = self.database._receive_async
),
self
)
self.database._pq_complete()
except:
##
# In cases where row packing errors or occur,
# synchronize, finishing any pending transaction,
# and raise the error.
##
# If the data sent to the remote end is invalid,
# _complete will raise the exception and the current
# exception being marked as the cause, so there should
# be no [exception] information loss.
##
self.database.pq.synchronize()
raise
def load_chunks(self, chunks, *parameters):
"""
Execute the query for each row-parameter set in `iterable`.
In cases of ``COPY ... FROM STDIN``, iterable must be an iterable of
sequences of `bytes`.
"""
if self.closed is None:
self._fini()
if not self._input or parameters:
return self._load_copy_chunks(chunks)
else:
return self._load_tuple_chunks(chunks)
def load_rows(self, rows, chunksize = 256):
return self.load_chunks(chunk(rows, chunksize))
PreparedStatement = Statement
class StoredProcedure(pg_api.StoredProcedure):
_e_factors = ('database', 'procedure_id')
procedure_id = None
def _e_metas(self):
yield ('oid', self.oid)
def __repr__(self):
return '<%s:%s>' %(
self.procedure_id, self.statement.string
)
def __call__(self, *args, **kw):
if kw:
input = []
argiter = iter(args)
try:
word_idx = [(kw[k], self._input_attmap[k]) for k in kw]
except KeyError as k:
raise TypeError("%s got unexpected keyword argument %r" %(
self.name, k.message
)
)
word_idx.sort(key = get1)
current_word = word_idx.pop(0)
for x in range(argc):
if x == current_word[1]:
input.append(current_word[0])
current_word = word_idx.pop(0)
else:
input.append(argiter.next())
else:
input = args
if self.srf is True:
if self.composite is True:
return self.statement.rows(*input)
else:
# A generator expression is very appropriate here
# as SRFs returning large number of rows would require
# substantial amounts of memory.
return map(get0, self.statement.rows(*input))
else:
if self.composite is True:
return self.statement(*input)[0]
else:
return self.statement(*input)[0][0]
def __init__(self, ident, database, description = ()):
# Lookup pg_proc on database.
if isinstance(ident, int):
proctup = database.sys.lookup_procedure_oid(int(ident))
else:
proctup = database.sys.lookup_procedure_rp(str(ident))
if proctup is None:
raise LookupError("no function with identifier %s" %(str(ident),))
self.procedure_id = ident
self.oid = proctup[0]
self.name = proctup["proname"]
self._input_attmap = {}
argnames = proctup.get('proargnames') or ()
for x in range(len(argnames)):
an = argnames[x]
if an is not None:
self._input_attmap[an] = x
proargs = proctup['proargtypes']
for x in proargs:
# get metadata filled out.
database.typio.resolve(x)
self.statement = database.prepare(
"SELECT * FROM %s(%s) AS func%s" %(
proctup['_proid'],
# ($1::type, $2::type, ... $n::type)
', '.join([
'$%d::%s' %(x + 1, database.typio.sql_type_from_oid(proargs[x]))
for x in range(len(proargs))
]),
# Description for anonymous record returns
(description and \
'(' + ','.join(description) + ')' or '')
)
)
self.srf = bool(proctup.get("proretset"))
self.composite = proctup["composite"]
class SettingsCM(object):
def __init__(self, database, settings_to_set):
self.database = database
self.settings_to_set = settings_to_set
def __enter__(self):
if hasattr(self, 'stored_settings'):
raise RuntimeError("cannot re-use setting CMs")
self.stored_settings = self.database.settings.getset(
self.settings_to_set.keys()
)
self.database.settings.update(self.settings_to_set)
def __exit__(self, typ, val, tb):
self.database.settings.update(self.stored_settings)
class Settings(pg_api.Settings):
_e_factors = ('database',)
def __init__(self, database):
self.database = database
self.cache = {}
def _e_metas(self):
yield (None, str(len(self.cache)))
def _clear_cache(self):
self.cache.clear()
def __getitem__(self, i):
v = self.cache.get(i)
if v is None:
r = self.database.sys.setting_get(i)
if r:
v = r[0][0]
else:
raise KeyError(i)
return v
def __setitem__(self, i, v):
cv = self.cache.get(i)
if cv == v:
return
setas = self.database.sys.setting_set(i, v)
self.cache[i] = setas
def __delitem__(self, k):
self.database.execute(
'RESET "' + k.replace('"', '""') + '"'
)
self.cache.pop(k, None)
def __len__(self):
return self.database.sys.setting_len()
def __call__(self, **settings):
return SettingsCM(self.database, settings)
def path():
def fget(self):
return pg_str.split_ident(self["search_path"])
def fset(self, value):
self['search_path'] = ','.join([
'"%s"' %(x.replace('"', '""'),) for x in value
])
def fdel(self):
if self.database.connector.path is not None:
self.path = self.database.connector.path
else:
self.database.execute("RESET search_path")
doc = 'structured search_path interface'
return locals()
path = property(**path())
def get(self, k, alt = None):
if k in self.cache:
return self.cache[k]
db = self.database
r = self.database.sys.setting_get(k)
if r:
v = r[0][0]
self.cache[k] = v
else:
v = alt
return v
def getset(self, keys):
setmap = {}
rkeys = []
for k in keys:
v = self.cache.get(k)
if v is not None:
setmap[k] = v
else:
rkeys.append(k)
if rkeys:
r = self.database.sys.setting_mget(rkeys)
self.cache.update(r)
setmap.update(r)
rem = set(rkeys) - set([x['name'] for x in r])
if rem:
raise KeyError(rem)
return setmap
def keys(self):
return map(get0, self.database.sys.setting_keys())
__iter__ = keys
def values(self):
return map(get0, self.database.sys.setting_values())
def items(self):
return self.database.sys.setting_items()
def update(self, d):
kvl = [list(x) for x in dict(d).items()]
self.cache.update(self.database.sys.setting_update(kvl))
def _notify(self, msg):
subs = getattr(self, '_subscriptions', {})
d = self.database.typio._decode
key = d(msg.name)[0]
val = d(msg.value)[0]
for x in subs.get(key, ()):
x(self.database, key, val)
if None in subs:
for x in subs[None]:
x(self.database, key, val)
self.cache[key] = val
def subscribe(self, key, callback):
"""
Subscribe to changes of the setting using the callback. When the setting
is changed, the callback will be invoked with the connection, the key,
and the new value. If the old value is locally cached, its value will
still be available for inspection, but there is no guarantee.
If `None` is passed as the key, the callback will be called whenever any
setting is remotely changed.
>>> def watch(connection, key, newval):
...
>>> db.settings.subscribe('TimeZone', watch)
"""
subs = self._subscriptions = getattr(self, '_subscriptions', {})
callbacks = subs.setdefault(key, [])
if callback not in callbacks:
callbacks.append(callback)
def unsubscribe(self, key, callback):
"""
Stop listening for changes to a setting. The setting name(`key`), and
the callback used to subscribe must be given again for successful
termination of the subscription.
>>> db.settings.unsubscribe('TimeZone', watch)
"""
subs = getattr(self, '_subscriptions', {})
callbacks = subs.get(key, ())
if callback in callbacks:
callbacks.remove(callback)
class Transaction(pg_api.Transaction):
database = None
mode = None
isolation = None
_e_factors = ('database', 'isolation', 'mode')
def _e_metas(self):
yield (None, self.state)
def __init__(self, database, isolation = None, mode = None):
self.database = database
self.isolation = isolation
self.mode = mode
self.state = 'initialized'
self.type = None
def __enter__(self):
self.start()
return self
def __exit__(self, typ, value, tb):
if typ is None:
# No exception, but in a failed transaction?
if self.database.pq.state == b'E':
if not self.database.closed:
self.rollback()
# pg_exc.InFailedTransactionError
em = element.ClientError((
(b'S', 'ERROR'),
(b'C', '25P02'),
(b'M', 'invalid transaction block exit detected'),
(b'H', "Database was in an error-state, but no exception was raised.")
))
self.database.typio.raise_client_error(em, creator = self)
else:
# No exception, and no error state. Everything is good.
try:
self.commit()
# If an error occurs, clean up the transaction state
# and raise as needed.
except pg_exc.ActiveTransactionError as err:
if not self.database.closed:
# adjust the state so rollback will do the right thing and abort.
self.state = 'open'
self.rollback()
raise
elif issubclass(typ, Exception):
# There's an exception, so only rollback if the connection
# exists. If the rollback() was called here, it would just
# contribute noise to the error.
if not self.database.closed:
self.rollback()
@staticmethod
def _start_xact_string(isolation = None, mode = None):
q = 'START TRANSACTION'
if isolation is not None:
if ';' in isolation:
raise ValueError("invalid transaction isolation " + repr(mode))
q += ' ISOLATION LEVEL ' + isolation
if mode is not None:
if ';' in mode:
raise ValueError("invalid transaction mode " + repr(isolation))
q += ' ' + mode
return q + ';'
@staticmethod
def _savepoint_xact_string(id):
return 'SAVEPOINT "xact(' + id.replace('"', '""') + ')";'
def start(self):
if self.state == 'open':
return
if self.state != 'initialized':
em = element.ClientError((
(b'S', 'ERROR'),
(b'C', '--OPE'),
(b'M', "transactions cannot be restarted"),
(b'H', 'Create a new transaction object instead of re-using an old one.')
))
self.database.typio.raise_client_error(em, creator = self)
if self.database.pq.state == b'I':
self.type = 'block'
q = self._start_xact_string(
isolation = self.isolation,
mode = self.mode,
)
else:
self.type = 'savepoint'
if (self.isolation, self.mode) != (None,None):
em = element.ClientError((
(b'S', 'ERROR'),
(b'C', '--OPE'),
(b'M', "configured transaction used inside a transaction block"),
(b'H', 'A transaction block was already started.'),
))
self.database.typio.raise_client_error(em, creator = self)
q = self._savepoint_xact_string(hex(id(self)))
self.database.execute(q)
self.state = 'open'
begin = start
@staticmethod
def _release_string(id):
'release "";'
return 'RELEASE "xact(' + id.replace('"', '""') + ')";'
def commit(self):
if self.state == 'committed':
return
if self.state != 'open':
em = element.ClientError((
(b'S', 'ERROR'),
(b'C', '--OPE'),
(b'M', "commit attempted on transaction with unexpected state, " + repr(self.state)),
))
self.database.typio.raise_client_error(em, creator = self)
if self.type == 'block':
q = 'COMMIT'
else:
q = self._release_string(hex(id(self)))
self.database.execute(q)
self.state = 'committed'
@staticmethod
def _rollback_to_string(id, fmt = 'ROLLBACK TO "xact({0})"; RELEASE "xact({0})";'.format):
return fmt(id.replace('"', '""'))
def rollback(self):
if self.state == 'aborted':
return
if self.state not in ('prepared', 'open'):
em = element.ClientError((
(b'S', 'ERROR'),
(b'C', '--OPE'),
(b'M', "ABORT attempted on transaction with unexpected state, " + repr(self.state)),
))
self.database.typio.raise_client_error(em, creator = self)
if self.type == 'block':
q = 'ABORT;'
elif self.type == 'savepoint':
q = self._rollback_to_string(hex(id(self)))
else:
raise RuntimeError("unknown transaction type " + repr(self.type))
self.database.execute(q)
self.state = 'aborted'
abort = rollback
class Connection(pg_api.Connection):
connector = None
type = None
version_info = None
version = None
security = None
backend_id = None
client_address = None
client_port = None
# Replaced with instances on connection instantiation.
settings = Settings
def _e_metas(self):
yield (None, '[' + self.state + ']')
if self.client_address is not None:
yield ('client_address', self.client_address)
if self.client_port is not None:
yield ('client_port', self.client_port)
if self.version is not None:
yield ('version', self.version)
att = getattr(self, 'failures', None)
if att:
count = 0
for x in att:
# Format each failure without their traceback.
errstr = ''.join(format_exception(type(x.error), x.error, None))
factinfo = str(x.socket_factory)
if hasattr(x, 'ssl_negotiation'):
if x.ssl_negotiation is True:
factinfo = 'SSL ' + factinfo
else:
factinfo = 'NOSSL ' + factinfo
yield (
'failures[' + str(count) + ']',
factinfo + os.linesep + errstr
)
count += 1
def __repr__(self):
return '<%s.%s[%s] %s>' %(
type(self).__module__,
type(self).__name__,
self.connector._pq_iri,
self.closed and 'closed' or '%s' %(self.pq.state,)
)
def __exit__(self, type, value, tb):
# Don't bother closing unless it's a normal exception.
if type is None or issubclass(type, Exception):
self.close()
def interrupt(self, timeout = None):
self.pq.interrupt(timeout = timeout)
def execute(self, query : str) -> None:
q = xact.Instruction((
element.Query(self.typio._encode(query)[0]),
),
asynchook = self._receive_async
)
self._pq_push(q, self)
self._pq_complete()
def do(self, language : str, source : str,
qlit = pg_str.quote_literal,
qid = pg_str.quote_ident,
) -> None:
sql = "DO " + qlit(source) + " LANGUAGE " + qid(language) + ";"
self.execute(sql)
def xact(self, isolation = None, mode = None):
x = Transaction(self, isolation = isolation, mode = mode)
return x
def prepare(self,
sql_statement_string : str,
statement_id = None,
Class = Statement
) -> Statement:
ps = Class(self, statement_id, sql_statement_string)
ps._init()
ps._fini()
return ps
@property
def query(self, Class = SingleExecution):
return Class(self)
def statement_from_id(self, statement_id : str) -> Statement:
ps = Statement(self, statement_id, None)
ps._init()
ps._fini()
return ps
def proc(self, proc_id : (str, int)) -> StoredProcedure:
sp = StoredProcedure(proc_id, self)
return sp
def cursor_from_id(self, cursor_id : str) -> Cursor:
c = Cursor(None, None, self, cursor_id)
c._init()
return c
@property
def closed(self) -> bool:
if getattr(self, 'pq', None) is None:
return True
if hasattr(self.pq, 'socket') and self.pq.xact is not None:
return self.pq.xact.fatal is True
return False
def close(self, getattr = getattr):
# Write out the disconnect message if the socket is around.
# If the connection is known to be lost, don't bother. It will
# generate an extra exception.
if getattr(self, 'pq', None) is None or getattr(self.pq, 'socket', None) is None:
# No action to take.
return
x = getattr(self.pq, 'xact', None)
if x is not None and x.fatal is not True:
# finish the existing pq transaction iff it's not Closing.
self.pq.complete()
if self.pq.xact is None:
# It completed the existing transaction.
self.pq.push(xact.Closing())
self.pq.complete()
if self.pq.socket:
self.pq.complete()
# Close the socket if there is one.
if self.pq.socket:
self.pq.socket.close()
self.pq.socket = None
@property
def state(self) -> str:
if not hasattr(self, 'pq'):
return 'initialized'
if hasattr(self, 'failures'):
return 'failed'
if self.closed:
return 'closed'
if isinstance(self.pq.xact, xact.Negotiation):
return 'negotiating'
if self.pq.xact is None:
if self.pq.state == b'E':
return 'failed block'
return 'idle' + (' in block' if self.pq.state != b'I' else '')
else:
return 'busy'
def reset(self):
"""
restore original settings, reset the transaction, drop temporary
objects.
"""
self.execute("ABORT; RESET ALL;")
def __enter__(self):
self.connect()
return self
def connect(self):
'Establish the connection to the server'
if self.closed is False:
# already connected? just return.
return
if hasattr(self, 'pq'):
# It's closed, *but* there's a PQ connection..
x = self.pq.xact
self.typio.raise_error(x.error_message, cause = getattr(x, 'exception', None), creator = self)
# It's closed.
try:
self._establish()
except Exception:
# Close it up on failure.
self.close()
raise
def _establish(self):
# guts of connect()
self.pq = None
# if any exception occurs past this point, the connection
# will not be usable.
timeout = self.connector.connect_timeout
sslmode = self.connector.sslmode or 'prefer'
failures = []
exc = None
try:
# get the list of sockets to try
socket_factories = self.connector.socket_factory_sequence()
except Exception as e:
socket_factories = ()
exc = e
# When ssl is None: SSL negotiation will not occur.
# When ssl is True: SSL negotiation will occur *and* it must succeed.
# When ssl is False: SSL negotiation will occur but it may fail(NOSSL).
if sslmode == 'allow':
# without ssl, then with. :)
socket_factories = interlace(
zip(repeat(None, len(socket_factories)), socket_factories),
zip(repeat(True, len(socket_factories)), socket_factories)
)
elif sslmode == 'prefer':
# with ssl, then without. [maybe] :)
socket_factories = interlace(
zip(repeat(False, len(socket_factories)), socket_factories),
zip(repeat(None, len(socket_factories)), socket_factories)
)
# prefer is special, because it *may* be possible to
# skip the subsequent "without" in situations where SSL is off.
elif sslmode == 'require':
socket_factories = zip(repeat(True, len(socket_factories)), socket_factories)
elif sslmode == 'disable':
# None = Do Not Attempt SSL negotiation.
socket_factories = zip(repeat(None, len(socket_factories)), socket_factories)
else:
raise ValueError("invalid sslmode: " + repr(sslmode))
# can_skip is used when 'prefer' or 'allow' is the sslmode.
# if the ssl negotiation returns 'N' (nossl), then
# ssl "failed", but the socket is still usable for nossl.
# in these cases, can_skip is set to True so that the
# subsequent non-ssl attempt is skipped if it failed with the 'N' response.
can_skip = False
startup = self.connector._startup_parameters
password = self.connector._password
Connection3 = client.Connection
for (ssl, sf) in socket_factories:
if can_skip is True:
# the last attempt failed and knows this attempt will fail too.
can_skip = False
continue
pq = Connection3(sf, startup, password = password,)
if hasattr(self, 'tracer'):
pq.tracer = self.tracer
# Grab the negotiation transaction before
# connecting as it will be needed later if successful.
neg = pq.xact
pq.connect(ssl = ssl, timeout = timeout)
didssl = getattr(pq, 'ssl_negotiation', -1)
# It successfully connected if pq.xact is None;
# The startup/negotiation xact completed.
if pq.xact is None:
self.pq = pq
if hasattr(self.pq.socket, 'fileno'):
self.fileno = self.pq.socket.fileno
self.security = 'ssl' if didssl is True else None
showoption_type = element.ShowOption.type
for x in neg.asyncs:
if x.type == showoption_type:
self._receive_async(x)
# success!
break
elif pq.socket is not None:
# In this case, an application/protocol error occurred.
# Close out the sockets ourselves.
pq.socket.close()
# Identify whether or not we can skip the attempt.
# Whether or not we can skip depends entirely on the SSL parameter.
if sslmode == 'prefer' and ssl is False and didssl is False:
# In this case, the server doesn't support SSL or it's
# turned off. Therefore, the "without_ssl" attempt need
# *not* be ran because it has already been noted to be
# a failure.
can_skip = True
elif hasattr(pq.xact, 'exception'):
# If a Python exception occurred, chances are that it is
# going to fail again iff it is going to hit the same host.
if sslmode == 'prefer' and ssl is False:
# when 'prefer', the first attempt
# is marked with ssl is "False"
can_skip = True
elif sslmode == 'allow' and ssl is None:
# when 'allow', the first attempt
# is marked with dossl is "None"
can_skip = True
try:
self.typio.raise_error(pq.xact.error_message)
except Exception as error:
pq.error = error
# Otherwise, infinite recursion in the element traceback.
error.creator = None
# The tracebacks of the specific failures aren't particularly useful..
error.__traceback__ = None
if getattr(pq.xact, 'exception', None) is not None:
pq.error.__cause__ = pq.xact.exception
failures.append(pq)
else:
# No servers available. (see the break-statement in the for-loop)
self.failures = failures or ()
# it's over.
self.typio.raise_client_error(could_not_connect, creator = self, cause = exc)
##
# connected, now initialize connection information.
self.backend_id = self.pq.backend_id
sv = self.settings.cache.get("server_version", "0.0")
self.version_info = pg_version.normalize(pg_version.split(sv))
# manual binding
self.sys = pg_lib.Binding(self, pg_lib.sys)
vi = self.version_info[:2]
if vi <= (8,1):
sd = self.sys.startup_data_only_version()
elif vi >= (9,2):
sd = self.sys.startup_data_92()
else:
sd = self.sys.startup_data()
# connection info
self.version, self.backend_start, \
self.client_address, self.client_port = sd
# First word from the version string.
self.type = self.version.split()[0]
##
# Set standard_conforming_strings
scstr = self.settings.get('standard_conforming_strings')
if scstr is None or vi == (8,1):
# There used to be a warning emitted here.
# It was noisy, and had little added value
# over a nice WARNING at the top of the driver documentation.
pass
elif scstr.lower() not in ('on','true','yes'):
self.settings['standard_conforming_strings'] = 'on'
super().connect()
def _pq_push(self, xact, controller = None):
x = self.pq.xact
if x is not None:
self.pq.complete()
if x.fatal is not None:
self.typio.raise_error(x.error_message)
if controller is not None:
self._controller = controller
self.pq.push(xact)
# Complete the current protocol transaction.
def _pq_complete(self):
pq = self.pq
x = pq.xact
if x is not None:
# There is a running transaction, finish it.
pq.complete()
# Raise an error *iff* one occurred.
if x.fatal is not None:
self.typio.raise_error(x.error_message, cause = getattr(x, 'exception', None))
del self._controller
# Process the next message.
def _pq_step(self, complete_state = globals()['xact'].Complete):
pq = self.pq
x = pq.xact
if x is not None:
pq.step()
# If the protocol transaction was completed by
# the last step, raise the error *iff* one occurred.
if x.state is complete_state:
if x.fatal is not None:
self.typio.raise_error(x.error_message, cause = getattr(x, 'exception', None))
del self._controller
def _receive_async(self,
msg, controller = None,
showoption = element.ShowOption.type,
notice = element.Notice.type,
notify = element.Notify.type,
):
c = controller or getattr(self, '_controller', self)
typ = msg.type
if typ == showoption:
if msg.name == b'client_encoding':
self.typio.set_encoding(msg.value.decode('ascii'))
self.settings._notify(msg)
elif typ == notice:
m = self.typio.emit_message(msg, creator = c)
elif typ == notify:
self._notifies.append(msg)
else:
self.typio.emit_client_message(
element.ClientNotice((
(b'C', '-1000'),
(b'S', 'WARNING'),
(b'M', 'cannot process unrecognized asynchronous message'),
(b'D', repr(msg)),
)),
creator = c
)
def clone(self, *args, **kw):
c = self.__class__(self.connector, *args, **kw)
c.connect()
return c
def notify(self, *channels, **channel_and_payload):
notifies = ""
if channels:
notifies += ';'.join((
'NOTIFY "' + x.replace('"', '""') + '"' # str() case
if x.__class__ is not tuple else (
# tuple() case
'NOTIFY "' + x[0].replace('"', '""') + """",'""" + \
x[1].replace("'", "''") + "'"
)
for x in channels
))
notifies += ';'
if channel_and_payload:
notifies += ';'.join((
'NOTIFY "' + channel.replace('"', '""') + """",'""" + \
payload.replace("'", "''") + "'"
for channel, payload in channel_and_payload.items()
))
notifies += ';'
return self.execute(notifies)
def listening_channels(self):
if self.version_info[:2] > (8,4):
return self.sys.listening_channels()
else:
return self.sys.listening_relations()
def listen(self, *channels, len = len):
qstr = ''
for x in channels:
# XXX: hardcoded identifier length?
if len(x) > 63:
raise ValueError("channel name too long: " + x)
qstr += '; LISTEN ' + x.replace('"', '""')
return self.execute(qstr)
def unlisten(self, *channels, len = len):
qstr = ''
for x in channels:
# XXX: hardcoded identifier length?
if len(x) > 63:
raise ValueError("channel name too long: " + x)
qstr += '; UNLISTEN ' + x.replace('"', '""')
return self.execute(qstr)
def iternotifies(self, timeout = None):
nm = NotificationManager(self, timeout = timeout)
for x in nm:
if x is None:
yield None
else:
for y in x[1]:
yield y
def __init__(self, connector, *args, **kw):
"""
Create a connection based on the given connector.
"""
self.connector = connector
# raw notify messages
self._notifies = []
self.fileno = -1
self.typio = self.connector.driver.typio(self)
self.typio.set_encoding('ascii')
self.settings = Settings(self)
# class Connection
class Connector(pg_api.Connector):
"""
All arguments to Connector are keywords. At the very least, user,
and socket, may be provided. If socket, unix, or process is not
provided, host and port must be.
"""
@property
def _pq_iri(self):
return pg_iri.serialize(
{
k : v for k,v in self.__dict__.items()
if v is not None and not k.startswith('_') and k not in (
'driver', 'category'
)
},
obscure_password = True
)
def _e_metas(self):
yield (None, '[' + self.__class__.__name__ + '] ' + self._pq_iri)
def __repr__(self):
keywords = (',' + os.linesep + ' ').join([
'%s = %r' %(k, getattr(self, k, None)) for k in self.__dict__
if not k.startswith('_') and getattr(self, k, None) is not None
])
return '{mod}.{name}({keywords})'.format(
mod = type(self).__module__,
name = type(self).__name__,
keywords = os.linesep + ' ' + keywords if keywords else ''
)
@abstractmethod
def socket_factory_sequence(self):
"""
Generate a list of callables that will be used to attempt to make the
connection to the server. It is assumed that each factory will produce
an object with a socket interface that is ready for reading and writing
data.
The callables in the sequence must take a timeout parameter.
"""
def __init__(self,
connect_timeout : int = None,
server_encoding : "server encoding hint for driver" = None,
sslmode : ('allow', 'prefer', 'require', 'disable') = None,
sslcrtfile : "filepath" = None,
sslkeyfile : "filepath" = None,
sslrootcrtfile : "filepath" = None,
sslrootcrlfile : "filepath" = None,
application_name : "" = None,
driver = None,
**kw
):
super().__init__(**kw)
self.driver = driver
self.server_encoding = server_encoding
self.connect_timeout = connect_timeout
self.sslmode = sslmode
self.sslkeyfile = sslkeyfile
self.sslcrtfile = sslcrtfile
self.sslrootcrtfile = sslrootcrtfile
self.sslrootcrlfile = sslrootcrlfile
self.application_name = application_name
if self.sslrootcrlfile is not None:
pg_exc.IgnoredClientParameterWarning(
"certificate revocation lists are *not* checked",
creator = self,
).emit()
# Startup message parameters.
tnkw = {
'client_min_messages' : 'WARNING',
}
if self.settings:
s = dict(self.settings)
if 'search_path' in self.settings:
sp = s.get('search_path')
if sp is None:
self.settings.pop('search_path')
elif not isinstance(sp, str):
s['search_path'] = ','.join(
pg_str.quote_ident(x) for x in sp
)
tnkw.update(s)
tnkw['user'] = self.user
if self.database is not None:
tnkw['database'] = self.database
if self.application_name is not None:
tnkw['application_name'] = self.application_name
se = self.server_encoding or 'utf-8'
##
# Attempt to accommodate for literal treatment of startup data.
##
self._startup_parameters = tuple([
# All keys go in utf-8. However, ascii would probably be good enough.
(
k.encode('utf-8'),
# If it's a str(), encode in the hinted server_encoding.
# Otherwise, convert the object(int, float, bool, etc) into a string
# and treat it as utf-8.
v.encode(se) if type(v) is str else str(v).encode('utf-8')
)
for k, v in tnkw.items()
])
self._password = (self.password or '').encode(se)
self._socket_secure = {
'keyfile' : self.sslkeyfile,
'certfile' : self.sslcrtfile,
'ca_certs' : self.sslrootcrtfile,
}
# class Connector
class SocketConnector(Connector):
'abstract connector for using `socket` and `ssl`'
@abstractmethod
def socket_factory_sequence(self):
"""
Return a sequence of `SocketFactory`s for a connection to use to connect
to the target host.
"""
def create_socket_factory(self, **params):
return SocketFactory(**params)
class IPConnector(SocketConnector):
def socket_factory_sequence(self):
return self._socketcreators
def socket_factory_params(self, host, port, ipv, **kw):
if ipv != self.ipv:
raise TypeError("'ipv' keyword must be '%d'" % self.ipv)
if host is None:
raise TypeError("'host' is a required keyword and cannot be 'None'")
if port is None:
raise TypeError("'port' is a required keyword and cannot be 'None'")
return {'socket_create': (self.address_family, socket.SOCK_STREAM),
'socket_connect': (host, int(port))}
def __init__(self, host, port, ipv, **kw):
params = self.socket_factory_params(host, port, ipv, **kw)
self.host, self.port = params['socket_connect']
# constant socket connector
self._socketcreator = self.create_socket_factory(**params)
self._socketcreators = (self._socketcreator,)
super().__init__(**kw)
class IP4(IPConnector):
'Connector for establishing IPv4 connections'
ipv = 4
address_family = socket.AF_INET
def __init__(self,
host : "IPv4 Address (str)" = None,
port : int = None,
ipv = 4,
**kw
):
super().__init__(host, port, ipv, **kw)
class IP6(IPConnector):
'Connector for establishing IPv6 connections'
ipv = 6
address_family = socket.AF_INET6
def __init__(self,
host : "IPv6 Address (str)" = None,
port : int = None,
ipv = 6,
**kw
):
super().__init__(host, port, ipv, **kw)
class Unix(SocketConnector):
'Connector for establishing unix domain socket connections'
def socket_factory_sequence(self):
return self._socketcreators
def socket_factory_params(self, unix):
if unix is None:
raise TypeError("'unix' is a required keyword and cannot be 'None'")
return {'socket_create': (socket.AF_UNIX, socket.SOCK_STREAM),
'socket_connect': unix}
def __init__(self, unix = None, **kw):
params = self.socket_factory_params(unix)
self.unix = params['socket_connect']
# constant socket connector
self._socketcreator = self.create_socket_factory(**params)
self._socketcreators = (self._socketcreator,)
super().__init__(**kw)
class Host(SocketConnector):
"""
Connector for establishing hostname based connections.
This connector exercises socket.getaddrinfo.
"""
def socket_factory_sequence(self):
"""
Return a list of `SocketCreator`s based on the results of
`socket.getaddrinfo`.
"""
return [
# (AF, socktype, proto), (IP, Port)
self.create_socket_factory(**(self.socket_factory_params(x[0:3], x[4][:2],
self._socket_secure)))
for x in socket.getaddrinfo(
self.host, self.port, self._address_family, socket.SOCK_STREAM
)
]
def socket_factory_params(self, socktype, address, sslparams):
return {'socket_create': socktype,
'socket_connect': address,
'socket_secure': sslparams}
def __init__(self,
host : str = None,
port : (str, int) = None,
ipv : int = None,
address_family : "address family to use(AF_INET,AF_INET6)" = None,
**kw
):
if host is None:
raise TypeError("'host' is a required keyword")
if port is None:
raise TypeError("'port' is a required keyword")
if address_family is not None and ipv is not None:
raise TypeError("'ipv' and 'address_family' on mutually exclusive")
if ipv is None:
self._address_family = address_family or socket.AF_UNSPEC
elif ipv == 4:
self._address_family = socket.AF_INET
elif ipv == 6:
self._address_family = socket.AF_INET6
else:
raise TypeError("unknown IP version selected: 'ipv' = " + repr(ipv))
self.host = host
self.port = port
super().__init__(**kw)
class Driver(pg_api.Driver):
def _e_metas(self):
yield (None, type(self).__module__ + '.' + type(self).__name__)
def ip4(self, **kw):
return IP4(driver = self, **kw)
def ip6(self, **kw):
return IP6(driver = self, **kw)
def host(self, **kw):
return Host(driver = self, **kw)
def unix(self, **kw):
return Unix(driver = self, **kw)
def fit(self,
unix = None,
host = None,
port = None,
**kw
) -> Connector:
"""
Create the appropriate `postgresql.api.Connector` based on the
parameters.
This also protects against mutually exclusive parameters.
"""
if unix is not None:
if host is not None:
raise TypeError("'unix' and 'host' keywords are exclusive")
if port is not None:
raise TypeError("'unix' and 'port' keywords are exclusive")
return self.unix(unix = unix, **kw)
else:
if host is None or port is None:
raise TypeError("'host' and 'port', or 'unix' must be supplied")
# We have a host and a port.
# If it's an IP address, IP4 or IP6 should be selected.
if ':' in host:
# There's a ':' in host, good chance that it's IPv6.
try:
socket.inet_pton(socket.AF_INET6, host)
return self.ip6(host = host, port = port, **kw)
except (socket.error, NameError):
pass
# Not IPv6, maybe IPv4...
try:
socket.inet_aton(host)
# It's IP4
return self.ip4(host = host, port = port, **kw)
except socket.error:
pass
# neither host, nor port are None, probably a hostname.
return self.host(host = host, port = port, **kw)
def connect(self, **kw) -> Connection:
"""
For information on acceptable keywords, see:
`postgresql.documentation.driver`:Connection Keywords
"""
c = self.fit(**kw)()
c.connect()
return c
def __init__(self, connection = Connection, typio = TypeIO):
self.connection = connection
self.typio = typio
| 28.411745 | 111 | 0.67644 |
337f122938a6a3278588ff974fb1f1e8f6882910 | 2,317 | py | Python | conviz/utils/image_util.py | PhTrempe/conviz | b0d5a3a357ed013223847bcea5fa0df46e44de02 | [
"MIT"
] | null | null | null | conviz/utils/image_util.py | PhTrempe/conviz | b0d5a3a357ed013223847bcea5fa0df46e44de02 | [
"MIT"
] | null | null | null | conviz/utils/image_util.py | PhTrempe/conviz | b0d5a3a357ed013223847bcea5fa0df46e44de02 | [
"MIT"
] | null | null | null | import numpy
from scipy.misc import imsave
class ImageUtil(object):
"""
A utility class which provides image manipulation functionality.
"""
@staticmethod
def build_grid_image(images, grid_dim, padding):
"""
Builds a grid image from a list of images.
:param images: The list of images to place on the grid image.
:param grid_dim: A 2-element tuple for the grid's shape.
:param padding: A 2-element tuple for vertical and horizontal padding.
:return: The built grid image.
"""
img_2d_shape = images[0].shape[:2]
grid_img_shape = ImageUtil._compute_grid_image_shape(
img_2d_shape, grid_dim, padding)
grid_img = numpy.zeros(grid_img_shape)
ImageUtil._place_image_on_grid(
grid_img, images, grid_dim, img_2d_shape, padding)
return grid_img
@staticmethod
def save_image(img_data, img_path):
"""
Saves the given image data to the specified image file path.
:param img_data: The image data as a numpy array.
:param img_path: The path to which the image is created.
"""
imsave(img_path, img_data)
@staticmethod
def _compute_grid_image_shape(img_2d_shape, grid_dim, padding):
return (
(img_2d_shape[0] + padding[0]) * grid_dim[0] + padding[0],
(img_2d_shape[1] + padding[1]) * grid_dim[1] + padding[1],
3
)
@staticmethod
def _place_image_on_grid(grid_img, images, grid_dim, img_2d_shape, padding):
for grid_i in range(grid_dim[0]):
for grid_j in range(grid_dim[1]):
img = images[grid_i * grid_dim[1] + grid_j]
i_st, i_end, j_st, j_end = ImageUtil._compute_grid_image_zone(
img_2d_shape, padding, grid_i, grid_j)
grid_img[i_st:i_end, j_st:j_end, :] = img
@staticmethod
def _compute_grid_image_zone(img_2d_shape, padding, grid_i, grid_j):
grid_i_start = padding[0] + (img_2d_shape[0] + padding[0]) * grid_i
grid_i_end = grid_i_start + img_2d_shape[0]
grid_j_start = padding[1] + (img_2d_shape[1] + padding[1]) * grid_j
grid_j_end = grid_j_start + img_2d_shape[1]
return grid_i_start, grid_i_end, grid_j_start, grid_j_end
| 38.616667 | 80 | 0.637894 |
d1e5c94f42f66f851b6d2cc55a075f6740cc0153 | 12,338 | py | Python | doubleml/tests/test_plr_no_cross_fit.py | SvenKlaassen/doubleml-for-py | b3cbdb572fce435c18ec67ca323645900fc901b5 | [
"MIT"
] | 103 | 2020-12-21T08:41:17.000Z | 2022-03-29T07:49:48.000Z | doubleml/tests/test_plr_no_cross_fit.py | SvenKlaassen/doubleml-for-py | b3cbdb572fce435c18ec67ca323645900fc901b5 | [
"MIT"
] | 27 | 2020-12-23T09:25:25.000Z | 2022-03-31T12:22:30.000Z | doubleml/tests/test_plr_no_cross_fit.py | SvenKlaassen/doubleml-for-py | b3cbdb572fce435c18ec67ca323645900fc901b5 | [
"MIT"
] | 17 | 2021-02-10T10:03:08.000Z | 2022-03-23T15:44:57.000Z | import numpy as np
import pytest
import math
from sklearn.base import clone
from sklearn.linear_model import Lasso
import doubleml as dml
from ._utils import draw_smpls
from ._utils_plr_manual import fit_plr, plr_dml1, fit_nuisance_plr, boot_plr, tune_nuisance_plr
@pytest.fixture(scope='module',
params=[Lasso(alpha=0.1)])
def learner(request):
return request.param
@pytest.fixture(scope='module',
params=['IV-type', 'partialling out'])
def score(request):
return request.param
@pytest.fixture(scope='module',
params=[1, 2])
def n_folds(request):
return request.param
@pytest.fixture(scope="module")
def dml_plr_no_cross_fit_fixture(generate_data1, learner, score, n_folds):
boot_methods = ['normal']
n_rep_boot = 502
dml_procedure = 'dml1'
# collect data
data = generate_data1
x_cols = data.columns[data.columns.str.startswith('X')].tolist()
# Set machine learning methods for m & g
ml_g = clone(learner)
ml_m = clone(learner)
np.random.seed(3141)
obj_dml_data = dml.DoubleMLData(data, 'y', ['d'], x_cols)
dml_plr_obj = dml.DoubleMLPLR(obj_dml_data,
ml_g, ml_m,
n_folds,
score=score,
dml_procedure=dml_procedure,
apply_cross_fitting=False)
dml_plr_obj.fit()
np.random.seed(3141)
y = data['y'].values
x = data.loc[:, x_cols].values
d = data['d'].values
if n_folds == 1:
smpls = [(np.arange(len(y)), np.arange(len(y)))]
else:
n_obs = len(y)
all_smpls = draw_smpls(n_obs, n_folds)
smpls = all_smpls[0]
smpls = [smpls[0]]
res_manual = fit_plr(y, x, d, clone(learner), clone(learner),
[smpls], dml_procedure, score)
res_dict = {'coef': dml_plr_obj.coef,
'coef_manual': res_manual['theta'],
'se': dml_plr_obj.se,
'se_manual': res_manual['se'],
'boot_methods': boot_methods}
for bootstrap in boot_methods:
np.random.seed(3141)
boot_theta, boot_t_stat = boot_plr(y, d, res_manual['thetas'], res_manual['ses'],
res_manual['all_g_hat'], res_manual['all_m_hat'],
[smpls], score, bootstrap, n_rep_boot,
apply_cross_fitting=False)
np.random.seed(3141)
dml_plr_obj.bootstrap(method=bootstrap, n_rep_boot=n_rep_boot)
res_dict['boot_coef' + bootstrap] = dml_plr_obj.boot_coef
res_dict['boot_t_stat' + bootstrap] = dml_plr_obj.boot_t_stat
res_dict['boot_coef' + bootstrap + '_manual'] = boot_theta
res_dict['boot_t_stat' + bootstrap + '_manual'] = boot_t_stat
return res_dict
@pytest.mark.ci
def test_dml_plr_no_cross_fit_coef(dml_plr_no_cross_fit_fixture):
assert math.isclose(dml_plr_no_cross_fit_fixture['coef'],
dml_plr_no_cross_fit_fixture['coef_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_plr_no_cross_fit_se(dml_plr_no_cross_fit_fixture):
assert math.isclose(dml_plr_no_cross_fit_fixture['se'],
dml_plr_no_cross_fit_fixture['se_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_plr_no_cross_fit_boot(dml_plr_no_cross_fit_fixture):
for bootstrap in dml_plr_no_cross_fit_fixture['boot_methods']:
assert np.allclose(dml_plr_no_cross_fit_fixture['boot_coef' + bootstrap],
dml_plr_no_cross_fit_fixture['boot_coef' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
@pytest.fixture(scope='module',
params=[10, 13])
def n_rep(request):
return request.param
@pytest.fixture(scope="module")
def dml_plr_rep_no_cross_fit_fixture(generate_data1, learner, score, n_rep):
boot_methods = ['normal']
n_folds = 2
n_rep_boot = 498
dml_procedure = 'dml1'
# collect data
data = generate_data1
x_cols = data.columns[data.columns.str.startswith('X')].tolist()
# Set machine learning methods for m & g
ml_g = clone(learner)
ml_m = clone(learner)
np.random.seed(3141)
obj_dml_data = dml.DoubleMLData(data, 'y', ['d'], x_cols)
dml_plr_obj = dml.DoubleMLPLR(obj_dml_data,
ml_g, ml_m,
n_folds,
n_rep,
score,
dml_procedure,
apply_cross_fitting=False)
dml_plr_obj.fit()
np.random.seed(3141)
y = data['y'].values
x = data.loc[:, x_cols].values
d = data['d'].values
n_obs = len(y)
all_smpls = draw_smpls(n_obs, n_folds, n_rep)
# adapt to do no-cross-fitting in each repetition
all_smpls = [[xx[0]] for xx in all_smpls]
thetas = np.zeros(n_rep)
ses = np.zeros(n_rep)
all_g_hat = list()
all_m_hat = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
g_hat, m_hat = fit_nuisance_plr(y, x, d,
clone(learner), clone(learner), smpls)
all_g_hat.append(g_hat)
all_m_hat.append(m_hat)
thetas[i_rep], ses[i_rep] = plr_dml1(y, x, d,
all_g_hat[i_rep], all_m_hat[i_rep],
smpls, score)
res_manual = np.median(thetas)
se_manual = np.sqrt(np.median(np.power(ses, 2)*len(smpls[0][1]) + np.power(thetas - res_manual, 2))/len(smpls[0][1]))
res_dict = {'coef': dml_plr_obj.coef,
'coef_manual': res_manual,
'se': dml_plr_obj.se,
'se_manual': se_manual,
'boot_methods': boot_methods
}
for bootstrap in boot_methods:
np.random.seed(3141)
boot_theta, boot_t_stat = boot_plr(y, d, thetas, ses,
all_g_hat, all_m_hat,
all_smpls, score, bootstrap, n_rep_boot,
n_rep=n_rep, apply_cross_fitting=False)
np.random.seed(3141)
dml_plr_obj.bootstrap(method=bootstrap, n_rep_boot=n_rep_boot)
res_dict['boot_coef' + bootstrap] = dml_plr_obj.boot_coef
res_dict['boot_t_stat' + bootstrap] = dml_plr_obj.boot_t_stat
res_dict['boot_coef' + bootstrap + '_manual'] = boot_theta
res_dict['boot_t_stat' + bootstrap + '_manual'] = boot_t_stat
return res_dict
@pytest.mark.ci
def test_dml_plr_rep_no_cross_fit_coef(dml_plr_rep_no_cross_fit_fixture):
assert math.isclose(dml_plr_rep_no_cross_fit_fixture['coef'],
dml_plr_rep_no_cross_fit_fixture['coef_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_plr_rep_no_cross_fit_se(dml_plr_rep_no_cross_fit_fixture):
assert math.isclose(dml_plr_rep_no_cross_fit_fixture['se'],
dml_plr_rep_no_cross_fit_fixture['se_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_plr_rep_no_cross_fit_boot(dml_plr_rep_no_cross_fit_fixture):
for bootstrap in dml_plr_rep_no_cross_fit_fixture['boot_methods']:
assert np.allclose(dml_plr_rep_no_cross_fit_fixture['boot_coef' + bootstrap],
dml_plr_rep_no_cross_fit_fixture['boot_coef' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_plr_rep_no_cross_fit_fixture['boot_t_stat' + bootstrap],
dml_plr_rep_no_cross_fit_fixture['boot_t_stat' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
@pytest.fixture(scope='module',
params=[True, False])
def tune_on_folds(request):
return request.param
@pytest.fixture(scope="module")
def dml_plr_no_cross_fit_tune_fixture(generate_data1, learner, score, tune_on_folds):
par_grid = {'ml_g': {'alpha': np.linspace(0.05, .95, 7)},
'ml_m': {'alpha': np.linspace(0.05, .95, 7)}}
n_folds_tune = 3
boot_methods = ['normal']
n_rep_boot = 502
dml_procedure = 'dml1'
# collect data
data = generate_data1
x_cols = data.columns[data.columns.str.startswith('X')].tolist()
# Set machine learning methods for m & g
ml_g = Lasso()
ml_m = Lasso()
np.random.seed(3141)
obj_dml_data = dml.DoubleMLData(data, 'y', ['d'], x_cols)
dml_plr_obj = dml.DoubleMLPLR(obj_dml_data,
ml_g, ml_m,
n_folds=2,
score=score,
dml_procedure=dml_procedure,
apply_cross_fitting=False)
# tune hyperparameters
_ = dml_plr_obj.tune(par_grid, tune_on_folds=tune_on_folds, n_folds_tune=n_folds_tune)
# fit with tuned parameters
dml_plr_obj.fit()
np.random.seed(3141)
y = obj_dml_data.y
x = obj_dml_data.x
d = obj_dml_data.d
n_obs = len(y)
all_smpls = draw_smpls(n_obs, 2)
smpls = all_smpls[0]
smpls = [smpls[0]]
if tune_on_folds:
g_params, m_params = tune_nuisance_plr(y, x, d,
clone(ml_g), clone(ml_m), smpls, n_folds_tune,
par_grid['ml_g'], par_grid['ml_m'])
else:
xx = [(np.arange(len(y)), np.array([]))]
g_params, m_params = tune_nuisance_plr(y, x, d,
clone(ml_g), clone(ml_m), xx, n_folds_tune,
par_grid['ml_g'], par_grid['ml_m'])
res_manual = fit_plr(y, x, d, clone(ml_m), clone(ml_g),
[smpls], dml_procedure, score, g_params=g_params, m_params=m_params)
res_dict = {'coef': dml_plr_obj.coef,
'coef_manual': res_manual['theta'],
'se': dml_plr_obj.se,
'se_manual': res_manual['se'],
'boot_methods': boot_methods}
for bootstrap in boot_methods:
np.random.seed(3141)
boot_theta, boot_t_stat = boot_plr(y, d, res_manual['thetas'], res_manual['ses'],
res_manual['all_g_hat'], res_manual['all_m_hat'],
[smpls], score, bootstrap, n_rep_boot,
apply_cross_fitting=False)
np.random.seed(3141)
dml_plr_obj.bootstrap(method=bootstrap, n_rep_boot=n_rep_boot)
res_dict['boot_coef' + bootstrap] = dml_plr_obj.boot_coef
res_dict['boot_t_stat' + bootstrap] = dml_plr_obj.boot_t_stat
res_dict['boot_coef' + bootstrap + '_manual'] = boot_theta
res_dict['boot_t_stat' + bootstrap + '_manual'] = boot_t_stat
return res_dict
@pytest.mark.ci
def test_dml_plr_no_cross_fit_tune_coef(dml_plr_no_cross_fit_tune_fixture):
assert math.isclose(dml_plr_no_cross_fit_tune_fixture['coef'],
dml_plr_no_cross_fit_tune_fixture['coef_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_plr_no_cross_fit_tune_se(dml_plr_no_cross_fit_tune_fixture):
assert math.isclose(dml_plr_no_cross_fit_tune_fixture['se'],
dml_plr_no_cross_fit_tune_fixture['se_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_plr_no_cross_fit_tune_boot(dml_plr_no_cross_fit_tune_fixture):
for bootstrap in dml_plr_no_cross_fit_tune_fixture['boot_methods']:
assert np.allclose(dml_plr_no_cross_fit_tune_fixture['boot_coef' + bootstrap],
dml_plr_no_cross_fit_tune_fixture['boot_coef' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_plr_no_cross_fit_tune_fixture['boot_t_stat' + bootstrap],
dml_plr_no_cross_fit_tune_fixture['boot_t_stat' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
| 36.502959 | 121 | 0.590939 |
3afa0c88320c74a9cf1d8668f151a94bc120c74f | 1,459 | py | Python | networkx/readwrite/tests/test_gpickle.py | tempcyc/networkx | cae83ba501c242567cb2454f97f851898276f06e | [
"BSD-3-Clause"
] | 1 | 2018-08-09T14:29:43.000Z | 2018-08-09T14:29:43.000Z | networkx/readwrite/tests/test_gpickle.py | tempcyc/networkx | cae83ba501c242567cb2454f97f851898276f06e | [
"BSD-3-Clause"
] | null | null | null | networkx/readwrite/tests/test_gpickle.py | tempcyc/networkx | cae83ba501c242567cb2454f97f851898276f06e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from nose.tools import assert_equal
import os
import tempfile
import networkx as nx
from networkx.testing.utils import *
class TestGpickle(object):
def setUp(self):
G=nx.Graph(name="test")
e=[('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
G.add_edges_from(e,width=10)
G.add_node('g',color='green')
G.graph['number']=1
DG=nx.DiGraph(G)
MG=nx.MultiGraph(G)
MG.add_edge('a', 'a')
MDG=nx.MultiDiGraph(G)
MDG.add_edge('a', 'a')
fG = G.copy()
fDG = DG.copy()
fMG = MG.copy()
fMDG = MDG.copy()
nx.freeze(fG)
nx.freeze(fDG)
nx.freeze(fMG)
nx.freeze(fMDG)
self.G=G
self.DG=DG
self.MG=MG
self.MDG=MDG
self.fG=fG
self.fDG=fDG
self.fMG=fMG
self.fMDG=fMDG
def test_gpickle(self):
for G in [self.G, self.DG, self.MG, self.MDG,
self.fG, self.fDG, self.fMG, self.fMDG]:
(fd,fname)=tempfile.mkstemp()
nx.write_gpickle(G,fname)
Gin=nx.read_gpickle(fname)
assert_nodes_equal(G.nodes(data=True),
Gin.nodes(data=True))
assert_edges_equal(G.edges(data=True),
Gin.edges(data=True))
assert_graphs_equal(G, Gin)
os.close(fd)
os.unlink(fname)
| 28.057692 | 71 | 0.507197 |
51ff7426ac2a9fc7ed7595ccd69fecd75a7dff52 | 31,566 | py | Python | cvat/apps/engine/media_extractors.py | Fighting-Golion/mycvat | d53c1dd4568562c0b83b7f762fb98bec686804b4 | [
"Intel",
"MIT"
] | null | null | null | cvat/apps/engine/media_extractors.py | Fighting-Golion/mycvat | d53c1dd4568562c0b83b7f762fb98bec686804b4 | [
"Intel",
"MIT"
] | null | null | null | cvat/apps/engine/media_extractors.py | Fighting-Golion/mycvat | d53c1dd4568562c0b83b7f762fb98bec686804b4 | [
"Intel",
"MIT"
] | null | null | null | # Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
import tempfile
import shutil
import zipfile
import io
import itertools
import struct
from abc import ABC, abstractmethod
from contextlib import closing
import av
import numpy as np
from natsort import os_sorted
from pyunpack import Archive
from PIL import Image, ImageFile
from random import shuffle
import open3d as o3d
from cvat.apps.engine.utils import rotate_image
from cvat.apps.engine.models import DimensionType, SortingMethod
# fixes: "OSError:broken data stream" when executing line 72 while loading images downloaded from the web
# see: https://stackoverflow.com/questions/42462431/oserror-broken-data-stream-when-reading-image-file
ImageFile.LOAD_TRUNCATED_IMAGES = True
from cvat.apps.engine.mime_types import mimetypes
from utils.dataset_manifest import VideoManifestManager, ImageManifestManager
def get_mime(name):
for type_name, type_def in MEDIA_TYPES.items():
if type_def['has_mime_type'](name):
return type_name
return 'unknown'
def create_tmp_dir():
return tempfile.mkdtemp(prefix='cvat-', suffix='.data')
def delete_tmp_dir(tmp_dir):
if tmp_dir:
shutil.rmtree(tmp_dir)
def files_to_ignore(directory):
ignore_files = ('__MSOSX', '._.DS_Store', '__MACOSX', '.DS_Store')
if not any(ignore_file in directory for ignore_file in ignore_files):
return True
return False
def sort(images, sorting_method=SortingMethod.LEXICOGRAPHICAL, func=None):
if sorting_method == SortingMethod.LEXICOGRAPHICAL:
return sorted(images, key=func)
elif sorting_method == SortingMethod.NATURAL:
return os_sorted(images, key=func)
elif sorting_method == SortingMethod.PREDEFINED:
return images
elif sorting_method == SortingMethod.RANDOM:
shuffle(images)
return images
else:
raise NotImplementedError()
class IMediaReader(ABC):
def __init__(self, source_path, step, start, stop, dimension):
self._source_path = source_path
self._step = step
self._start = start
self._stop = stop
self._dimension = dimension
@abstractmethod
def __iter__(self):
pass
@abstractmethod
def get_preview(self):
pass
@abstractmethod
def get_progress(self, pos):
pass
@staticmethod
def _get_preview(obj):
PREVIEW_SIZE = (256, 256)
if isinstance(obj, io.IOBase):
preview = Image.open(obj)
else:
preview = obj
preview.thumbnail(PREVIEW_SIZE)
return preview.convert('RGB')
@abstractmethod
def get_image_size(self, i):
pass
def __len__(self):
return len(self.frame_range)
@property
def frame_range(self):
return range(self._start, self._stop, self._step)
class ImageListReader(IMediaReader):
def __init__(self,
source_path,
step=1,
start=0,
stop=None,
dimension=DimensionType.DIM_2D,
sorting_method=SortingMethod.LEXICOGRAPHICAL):
if not source_path:
raise Exception('No image found')
if stop is None:
stop = len(source_path)
else:
stop = min(len(source_path), stop + 1)
step = max(step, 1)
assert stop > start
super().__init__(
source_path=sort(source_path, sorting_method),
step=step,
start=start,
stop=stop,
dimension=dimension
)
self._sorting_method = sorting_method
def __iter__(self):
for i in range(self._start, self._stop, self._step):
yield (self.get_image(i), self.get_path(i), i)
def filter(self, callback):
source_path = list(filter(callback, self._source_path))
ImageListReader.__init__(
self,
source_path,
step=self._step,
start=self._start,
stop=self._stop,
dimension=self._dimension,
sorting_method=self._sorting_method
)
def get_path(self, i):
return self._source_path[i]
def get_image(self, i):
return self._source_path[i]
def get_progress(self, pos):
return (pos - self._start + 1) / (self._stop - self._start)
def get_preview(self):
if self._dimension == DimensionType.DIM_3D:
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
else:
fp = open(self._source_path[0], "rb")
return self._get_preview(fp)
def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D:
with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(self._source_path[i])
return img.width, img.height
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
# FIXME
ImageListReader.__init__(self,
source_path=source_files,
step=step,
start=start,
stop=stop,
sorting_method=self._sorting_method,
)
self._dimension = dimension
@property
def absolute_source_paths(self):
return [self.get_path(idx) for idx, _ in enumerate(self._source_path)]
class DirectoryReader(ImageListReader):
def __init__(self,
source_path,
step=1,
start=0,
stop=None,
dimension=DimensionType.DIM_2D,
sorting_method=SortingMethod.LEXICOGRAPHICAL):
image_paths = []
for source in source_path:
for root, _, files in os.walk(source):
paths = [os.path.join(root, f) for f in files]
paths = filter(lambda x: get_mime(x) == 'image', paths)
image_paths.extend(paths)
super().__init__(
source_path=image_paths,
step=step,
start=start,
stop=stop,
dimension=dimension,
sorting_method=sorting_method,
)
class ArchiveReader(DirectoryReader):
def __init__(self,
source_path,
step=1,
start=0,
stop=None,
dimension=DimensionType.DIM_2D,
sorting_method=SortingMethod.LEXICOGRAPHICAL,
extract_dir=None):
self._archive_source = source_path[0]
tmp_dir = extract_dir if extract_dir else os.path.dirname(source_path[0])
Archive(self._archive_source).extractall(tmp_dir)
if not extract_dir:
os.remove(self._archive_source)
super().__init__(
source_path=[tmp_dir],
step=step,
start=start,
stop=stop,
dimension=dimension,
sorting_method=sorting_method,
)
class PdfReader(ImageListReader):
def __init__(self,
source_path,
step=1,
start=0,
stop=None,
dimension=DimensionType.DIM_2D,
sorting_method=SortingMethod.LEXICOGRAPHICAL,
extract_dir=None):
if not source_path:
raise Exception('No PDF found')
self._pdf_source = source_path[0]
_basename = os.path.splitext(os.path.basename(self._pdf_source))[0]
_counter = itertools.count()
def _make_name():
for page_num in _counter:
yield '{}{:09d}.jpeg'.format(_basename, page_num)
from pdf2image import convert_from_path
self._tmp_dir = extract_dir if extract_dir else os.path.dirname(source_path[0])
os.makedirs(self._tmp_dir, exist_ok=True)
# Avoid OOM: https://github.com/openvinotoolkit/cvat/issues/940
paths = convert_from_path(self._pdf_source,
last_page=stop, paths_only=True,
output_folder=self._tmp_dir, fmt="jpeg", output_file=_make_name())
if not extract_dir:
os.remove(source_path[0])
super().__init__(
source_path=paths,
step=step,
start=start,
stop=stop,
dimension=dimension,
sorting_method=sorting_method,
)
class ZipReader(ImageListReader):
def __init__(self,
source_path,
step=1,
start=0,
stop=None,
dimension=DimensionType.DIM_2D,
sorting_method=SortingMethod.LEXICOGRAPHICAL,
extract_dir=None):
self._zip_source = zipfile.ZipFile(source_path[0], mode='r')
self.extract_dir = extract_dir
file_list = [f for f in self._zip_source.namelist() if files_to_ignore(f) and get_mime(f) == 'image']
super().__init__(file_list,
step=step,
start=start,
stop=stop,
dimension=dimension,
sorting_method=sorting_method)
def __del__(self):
self._zip_source.close()
def get_preview(self):
if self._dimension == DimensionType.DIM_3D:
# TODO
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
return self._get_preview(fp)
io_image = io.BytesIO(self._zip_source.read(self._source_path[0]))
return self._get_preview(io_image)
def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D:
with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(io.BytesIO(self._zip_source.read(self._source_path[i])))
return img.width, img.height
def get_image(self, i):
if self._dimension == DimensionType.DIM_3D:
return self.get_path(i)
return io.BytesIO(self._zip_source.read(self._source_path[i]))
def get_zip_filename(self):
return self._zip_source.filename
def get_path(self, i):
if self._zip_source.filename:
return os.path.join(os.path.dirname(self._zip_source.filename), self._source_path[i]) \
if not self.extract_dir else os.path.join(self.extract_dir, self._source_path[i])
else: # necessary for mime_type definition
return self._source_path[i]
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().reconcile(
source_files=source_files,
step=step,
start=start,
stop=stop,
dimension=dimension,
)
def extract(self):
self._zip_source.extractall(self.extract_dir if self.extract_dir else os.path.dirname(self._zip_source.filename))
if not self.extract_dir:
os.remove(self._zip_source.filename)
class VideoReader(IMediaReader):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().__init__(
source_path=source_path,
step=step,
start=start,
stop=stop + 1 if stop is not None else stop,
dimension=dimension,
)
def _has_frame(self, i):
if i >= self._start:
if (i - self._start) % self._step == 0:
if self._stop is None or i < self._stop:
return True
return False
def _decode(self, container):
frame_num = 0
for packet in container.demux():
if packet.stream.type == 'video':
for image in packet.decode():
frame_num += 1
if self._has_frame(frame_num - 1):
if packet.stream.metadata.get('rotate'):
old_image = image
image = av.VideoFrame().from_ndarray(
rotate_image(
image.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
image.pts = old_image.pts
yield (image, self._source_path[0], image.pts)
def __iter__(self):
container = self._get_av_container()
source_video_stream = container.streams.video[0]
source_video_stream.thread_type = 'AUTO'
return self._decode(container)
def get_progress(self, pos):
duration = self._get_duration()
return pos / duration if duration else None
def _get_av_container(self):
if isinstance(self._source_path[0], io.BytesIO):
self._source_path[0].seek(0) # required for re-reading
return av.open(self._source_path[0])
def _get_duration(self):
container = self._get_av_container()
stream = container.streams.video[0]
duration = None
if stream.duration:
duration = stream.duration
else:
# may have a DURATION in format like "01:16:45.935000000"
duration_str = stream.metadata.get("DURATION", None)
tb_denominator = stream.time_base.denominator
if duration_str and tb_denominator:
_hour, _min, _sec = duration_str.split(':')
duration_sec = 60*60*float(_hour) + 60*float(_min) + float(_sec)
duration = duration_sec * tb_denominator
return duration
def get_preview(self):
container = self._get_av_container()
stream = container.streams.video[0]
preview = next(container.decode(stream))
return self._get_preview(preview.to_image() if not stream.metadata.get('rotate') \
else av.VideoFrame().from_ndarray(
rotate_image(
preview.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
).to_image()
)
def get_image_size(self, i):
image = (next(iter(self)))[0]
return image.width, image.height
class FragmentMediaReader:
def __init__(self, chunk_number, chunk_size, start, stop, step=1):
self._start = start
self._stop = stop + 1 # up to the last inclusive
self._step = step
self._chunk_number = chunk_number
self._chunk_size = chunk_size
self._start_chunk_frame_number = \
self._start + self._chunk_number * self._chunk_size * self._step
self._end_chunk_frame_number = min(self._start_chunk_frame_number \
+ (self._chunk_size - 1) * self._step + 1, self._stop)
self._frame_range = self._get_frame_range()
@property
def frame_range(self):
return self._frame_range
def _get_frame_range(self):
frame_range = []
for idx in range(self._start, self._stop, self._step):
if idx < self._start_chunk_frame_number:
continue
elif idx < self._end_chunk_frame_number and \
not ((idx - self._start_chunk_frame_number) % self._step):
frame_range.append(idx)
elif (idx - self._start_chunk_frame_number) % self._step:
continue
else:
break
return frame_range
class ImageDatasetManifestReader(FragmentMediaReader):
def __init__(self, manifest_path, **kwargs):
super().__init__(**kwargs)
self._manifest = ImageManifestManager(manifest_path)
self._manifest.init_index()
def __iter__(self):
for idx in self._frame_range:
yield self._manifest[idx]
class VideoDatasetManifestReader(FragmentMediaReader):
def __init__(self, manifest_path, **kwargs):
self.source_path = kwargs.pop('source_path')
super().__init__(**kwargs)
self._manifest = VideoManifestManager(manifest_path)
self._manifest.init_index()
def _get_nearest_left_key_frame(self):
if self._start_chunk_frame_number >= \
self._manifest[len(self._manifest) - 1].get('number'):
left_border = len(self._manifest) - 1
else:
left_border = 0
delta = len(self._manifest)
while delta:
step = delta // 2
cur_position = left_border + step
if self._manifest[cur_position].get('number') < self._start_chunk_frame_number:
cur_position += 1
left_border = cur_position
delta -= step + 1
else:
delta = step
if self._manifest[cur_position].get('number') > self._start_chunk_frame_number:
left_border -= 1
frame_number = self._manifest[left_border].get('number')
timestamp = self._manifest[left_border].get('pts')
return frame_number, timestamp
def __iter__(self):
start_decode_frame_number, start_decode_timestamp = self._get_nearest_left_key_frame()
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = next(stream for stream in container.streams if stream.type == 'video')
video_stream.thread_type = 'AUTO'
container.seek(offset=start_decode_timestamp, stream=video_stream)
frame_number = start_decode_frame_number - 1
for packet in container.demux(video_stream):
for frame in packet.decode():
frame_number += 1
if frame_number in self._frame_range:
if video_stream.metadata.get('rotate'):
frame = av.VideoFrame().from_ndarray(
rotate_image(
frame.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
yield frame
elif frame_number < self._frame_range[-1]:
continue
else:
return
class IChunkWriter(ABC):
def __init__(self, quality, dimension=DimensionType.DIM_2D):
self._image_quality = quality
self._dimension = dimension
@staticmethod
def _compress_image(image_path, quality):
import tifffile
# image = image_path.to_image() if isinstance(image_path, av.VideoFrame) else Image.open(image_path)
try:
image = image_path.to_image() if isinstance(image_path, av.VideoFrame) else Image.open(image_path)
except:
image = image_path.to_image() if isinstance(image_path, av.VideoFrame) else tifffile.imread(image_path)
image = Image.fromarray(np.uint8(image*200))
# Ensure image data fits into 8bit per pixel before RGB conversion as PIL clips values on conversion
if image.mode == "I":
# Image mode is 32bit integer pixels.
# Autoscale pixels by factor 2**8 / im_data.max() to fit into 8bit
im_data = np.array(image)
im_data = im_data * (2**8 / im_data.max())
image = Image.fromarray(im_data.astype(np.int32))
converted_image = image.convert('RGB')
image.close()
buf = io.BytesIO()
converted_image.save(buf, format='JPEG', quality=quality, optimize=True)
buf.seek(0)
width, height = converted_image.size
converted_image.close()
return width, height, buf
@abstractmethod
def save_as_chunk(self, images, chunk_path):
pass
class ZipChunkWriter(IChunkWriter):
def save_as_chunk(self, images, chunk_path):
with zipfile.ZipFile(chunk_path, 'x') as zip_chunk:
for idx, (image, path, _) in enumerate(images):
arcname = '{:06d}{}'.format(idx, os.path.splitext(path)[1])
if isinstance(image, io.BytesIO):
zip_chunk.writestr(arcname, image.getvalue())
else:
zip_chunk.write(filename=image, arcname=arcname)
# return empty list because ZipChunkWriter write files as is
# and does not decode it to know img size.
return []
class ZipCompressedChunkWriter(IChunkWriter):
def save_as_chunk(self, images, chunk_path):
image_sizes = []
with zipfile.ZipFile(chunk_path, 'x') as zip_chunk:
for idx, (image, _, _) in enumerate(images):
if self._dimension == DimensionType.DIM_2D:
w, h, image_buf = self._compress_image(image, self._image_quality)
extension = "jpeg"
else:
image_buf = open(image, "rb") if isinstance(image, str) else image
properties = ValidateDimension.get_pcd_properties(image_buf)
w, h = int(properties["WIDTH"]), int(properties["HEIGHT"])
extension = "pcd"
image_buf.seek(0, 0)
image_buf = io.BytesIO(image_buf.read())
image_sizes.append((w, h))
arcname = '{:06d}.{}'.format(idx, extension)
zip_chunk.writestr(arcname, image_buf.getvalue())
return image_sizes
class Mpeg4ChunkWriter(IChunkWriter):
def __init__(self, quality=67):
# translate inversed range [1:100] to [0:51]
quality = round(51 * (100 - quality) / 99)
super().__init__(quality)
self._output_fps = 25
try:
codec = av.codec.Codec('libopenh264', 'w')
self._codec_name = codec.name
self._codec_opts = {
'profile': 'constrained_baseline',
'qmin': str(self._image_quality),
'qmax': str(self._image_quality),
'rc_mode': 'buffer',
}
except av.codec.codec.UnknownCodecError:
codec = av.codec.Codec('libx264', 'w')
self._codec_name = codec.name
self._codec_opts = {
"crf": str(self._image_quality),
"preset": "ultrafast",
}
def _create_av_container(self, path, w, h, rate, options, f='mp4'):
# x264 requires width and height must be divisible by 2 for yuv420p
if h % 2:
h += 1
if w % 2:
w += 1
container = av.open(path, 'w',format=f)
video_stream = container.add_stream(self._codec_name, rate=rate)
video_stream.pix_fmt = "yuv420p"
video_stream.width = w
video_stream.height = h
video_stream.options = options
return container, video_stream
def save_as_chunk(self, images, chunk_path):
if not images:
raise Exception('no images to save')
input_w = images[0][0].width
input_h = images[0][0].height
output_container, output_v_stream = self._create_av_container(
path=chunk_path,
w=input_w,
h=input_h,
rate=self._output_fps,
options=self._codec_opts,
)
self._encode_images(images, output_container, output_v_stream)
output_container.close()
return [(input_w, input_h)]
@staticmethod
def _encode_images(images, container, stream):
for frame, _, _ in images:
# let libav set the correct pts and time_base
frame.pts = None
frame.time_base = None
for packet in stream.encode(frame):
container.mux(packet)
# Flush streams
for packet in stream.encode():
container.mux(packet)
class Mpeg4CompressedChunkWriter(Mpeg4ChunkWriter):
def __init__(self, quality):
super().__init__(quality)
if self._codec_name == 'libx264':
self._codec_opts = {
'profile': 'baseline',
'coder': '0',
'crf': str(self._image_quality),
'wpredp': '0',
'flags': '-loop',
}
def save_as_chunk(self, images, chunk_path):
if not images:
raise Exception('no images to save')
input_w = images[0][0].width
input_h = images[0][0].height
downscale_factor = 1
while input_h / downscale_factor >= 1080:
downscale_factor *= 2
output_h = input_h // downscale_factor
output_w = input_w // downscale_factor
output_container, output_v_stream = self._create_av_container(
path=chunk_path,
w=output_w,
h=output_h,
rate=self._output_fps,
options=self._codec_opts,
)
self._encode_images(images, output_container, output_v_stream)
output_container.close()
return [(input_w, input_h)]
def _is_archive(path):
mime = mimetypes.guess_type(path)
mime_type = mime[0]
encoding = mime[1]
supportedArchives = ['application/x-rar-compressed',
'application/x-tar', 'application/x-7z-compressed', 'application/x-cpio',
'gzip', 'bzip2']
return mime_type in supportedArchives or encoding in supportedArchives
def _is_video(path):
mime = mimetypes.guess_type(path)
return mime[0] is not None and mime[0].startswith('video')
def _is_image(path):
mime = mimetypes.guess_type(path)
# Exclude vector graphic images because Pillow cannot work with them
return mime[0] is not None and mime[0].startswith('image') and \
not mime[0].startswith('image/svg')
def _is_dir(path):
return os.path.isdir(path)
def _is_pdf(path):
mime = mimetypes.guess_type(path)
return mime[0] == 'application/pdf'
def _is_zip(path):
mime = mimetypes.guess_type(path)
mime_type = mime[0]
encoding = mime[1]
supportedArchives = ['application/zip']
return mime_type in supportedArchives or encoding in supportedArchives
# 'has_mime_type': function receives 1 argument - path to file.
# Should return True if file has specified media type.
# 'extractor': class that extracts images from specified media.
# 'mode': 'annotation' or 'interpolation' - mode of task that should be created.
# 'unique': True or False - describes how the type can be combined with other.
# True - only one item of this type and no other is allowed
# False - this media types can be combined with other which have unique == False
MEDIA_TYPES = {
'image': {
'has_mime_type': _is_image,
'extractor': ImageListReader,
'mode': 'annotation',
'unique': False,
},
'video': {
'has_mime_type': _is_video,
'extractor': VideoReader,
'mode': 'interpolation',
'unique': True,
},
'archive': {
'has_mime_type': _is_archive,
'extractor': ArchiveReader,
'mode': 'annotation',
'unique': True,
},
'directory': {
'has_mime_type': _is_dir,
'extractor': DirectoryReader,
'mode': 'annotation',
'unique': False,
},
'pdf': {
'has_mime_type': _is_pdf,
'extractor': PdfReader,
'mode': 'annotation',
'unique': True,
},
'zip': {
'has_mime_type': _is_zip,
'extractor': ZipReader,
'mode': 'annotation',
'unique': True,
}
}
class ValidateDimension:
def __init__(self, path=None):
self.dimension = DimensionType.DIM_2D
self.path = path
self.related_files = {}
self.image_files = {}
self.converted_files = []
@staticmethod
def get_pcd_properties(fp, verify_version=False):
kv = {}
pcd_version = ["0.7", "0.6", "0.5", "0.4", "0.3", "0.2", "0.1",
".7", ".6", ".5", ".4", ".3", ".2", ".1"]
try:
for line in fp:
line = line.decode("utf-8")
if line.startswith("#"):
continue
k, v = line.split(" ", maxsplit=1)
kv[k] = v.strip()
if "DATA" in line:
break
if verify_version:
if "VERSION" in kv and kv["VERSION"] in pcd_version:
return True
return None
return kv
except AttributeError:
return None
@staticmethod
def convert_bin_to_pcd(path, delete_source=True):
list_pcd = []
with open(path, "rb") as f:
size_float = 4
byte = f.read(size_float * 4)
while byte:
x, y, z, _ = struct.unpack("ffff", byte)
list_pcd.append([x, y, z])
byte = f.read(size_float * 4)
np_pcd = np.asarray(list_pcd)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np_pcd)
pcd_filename = path.replace(".bin", ".pcd")
o3d.io.write_point_cloud(pcd_filename, pcd)
if delete_source:
os.remove(path)
return pcd_filename
def set_path(self, path):
self.path = path
def bin_operation(self, file_path, actual_path):
pcd_path = ValidateDimension.convert_bin_to_pcd(file_path)
self.converted_files.append(pcd_path)
return pcd_path.split(actual_path)[-1][1:]
@staticmethod
def pcd_operation(file_path, actual_path):
with open(file_path, "rb") as file:
is_pcd = ValidateDimension.get_pcd_properties(file, verify_version=True)
return file_path.split(actual_path)[-1][1:] if is_pcd else file_path
def process_files(self, root, actual_path, files):
pcd_files = {}
for file in files:
file_name, file_extension = os.path.splitext(file)
file_path = os.path.abspath(os.path.join(root, file))
if file_extension == ".bin":
path = self.bin_operation(file_path, actual_path)
pcd_files[file_name] = path
self.related_files[path] = []
elif file_extension == ".pcd":
path = ValidateDimension.pcd_operation(file_path, actual_path)
if path == file_path:
self.image_files[file_name] = file_path
else:
pcd_files[file_name] = path
self.related_files[path] = []
else:
if _is_image(file_path):
self.image_files[file_name] = file_path
return pcd_files
def validate(self):
"""
Validate the directory structure for kitty and point cloud format.
"""
if not self.path:
return
actual_path = self.path
for root, _, files in os.walk(actual_path):
if not files_to_ignore(root):
continue
self.process_files(root, actual_path, files)
if len(self.related_files.keys()):
self.dimension = DimensionType.DIM_3D
| 35.467416 | 121 | 0.584236 |
33daa1cec549dfa532aa77c616d44a62b857b552 | 8,694 | py | Python | signal_ocean/port_expenses/port_expenses_api.py | SignalOceanSdk/SignalSDK | e21cf6026386bb46fde7582a10995cc7deff8a42 | [
"Apache-2.0"
] | 10 | 2020-09-29T06:36:45.000Z | 2022-03-14T18:15:50.000Z | signal_ocean/port_expenses/port_expenses_api.py | SignalOceanSdk/SignalSDK | e21cf6026386bb46fde7582a10995cc7deff8a42 | [
"Apache-2.0"
] | 53 | 2020-10-08T10:05:00.000Z | 2022-03-29T14:21:18.000Z | signal_ocean/port_expenses/port_expenses_api.py | SignalOceanSdk/SignalSDK | e21cf6026386bb46fde7582a10995cc7deff8a42 | [
"Apache-2.0"
] | 5 | 2020-09-25T07:48:04.000Z | 2021-11-23T07:08:56.000Z | # noqa: D100
from datetime import datetime
from typing import cast, Optional, List, Tuple
from .. import Connection
from .._internals import QueryString
from .enums import Operation, OperationStatus, EstimationStatus,\
ItalianAnchorageDues, VesselTypeEnum
from .models import PortExpenses, Port, VesselType
from .port_filter import PortFilter
from ._port_expenses_json import parse_port_expenses, parse_ports
class PortExpensesAPI:
"""Represents Signal's Port Expenses API."""
def __init__(self, connection: Optional[Connection] = None):
"""Initializes the Port Expenses API.
Args:
connection: API connection configuration. If not provided, the
default connection method is used.
"""
self.__connection = connection or Connection()
def get_port_expenses(
self, imo: int, port_id: int, group_id: int = 1,
vessel_type_id: Optional[int] = None,
estimated_time_of_berth: Optional[datetime] = None,
estimated_time_of_sail: Optional[datetime] = None,
operation: Optional[Operation] = None,
italian_anchorage_dues: Optional[ItalianAnchorageDues] = None,
cargo_type: Optional[str] = None,
operation_status: Optional[OperationStatus] = None,
utc_date: Optional[datetime] = None,
historical_tce: Optional[bool] = None,
estimation_status: Optional[EstimationStatus] = None
) -> Optional[PortExpenses]:
"""Retrieves port expenses.
Args:
imo: The vessel's IMO number.
port_id: ID of the port to retrieve the expenses for.
group_id: Group ID.
vessel_type_id: Vessel type ID.
estimated_time_of_berth: Estimated time of berth.
estimated_time_of_sail: Estimated time of sail.
operation: Operation type.
italian_anchorage_dues: Italian anchorage dues.
cargo_type: Cargo type.
operation_status: Operation status.
utc_date: UTC date.
historical_tce: Flag for Historical TCE.
estimation_status: Estimation status.
Returns:
The port expenses or None if a port with given ID does not exist or
a vessel with the given IMO number does not exist.
"""
query_dict = {
"imo": '{}'.format(imo),
"portId": '{}'.format(port_id),
"groupId": '{}'.format(group_id)
}
if vessel_type_id is not None:
query_dict["vesselTypeId"] = '{}'.format(vessel_type_id)
if estimated_time_of_berth is not None:
query_dict["estimatedTimeOfBerth"] = \
estimated_time_of_berth.isoformat()
if estimated_time_of_sail is not None:
query_dict["estimatedTimeOfSail"] = \
estimated_time_of_sail.isoformat()
if operation is not None:
query_dict["operation"] = '{}'.format(operation.value)
if italian_anchorage_dues is not None:
query_dict["italianAnchorageDues"] = \
'{}'.format(italian_anchorage_dues.value)
if cargo_type is not None:
query_dict["cargoType"] = '{}'.format(cargo_type)
if operation_status is not None:
query_dict["operationStatus"] = '{}'.format(operation_status.value)
if utc_date is not None:
query_dict["utcDate"] = utc_date.isoformat()
if historical_tce is not None:
query_dict["historicalTce"] = '{}'.format(historical_tce)
if estimation_status is not None:
query_dict["estimationStatus"] = \
'{}'.format(estimation_status.value)
query_string: QueryString = query_dict
response = self.__connection._make_post_request(
"port-expenses/api/v1/Port", query_string
)
response.raise_for_status()
response_json = response.json()
return_object = parse_port_expenses(response_json) \
if response_json else None
return return_object
def get_port_model_vessel_expenses(
self, port_id: int, vessel_type_id: int,
formula_calculation_date: datetime, vessel_class_id: int = 0,
operation_status: OperationStatus = OperationStatus.BALLAST,
historical_tce: bool = False,
estimation_status: EstimationStatus =
EstimationStatus.PRIORITY_TO_FORMULAS) -> Optional[PortExpenses]:
"""Retrieves model vessel port expenses.
Args:
port_id: ID of the port to retrieve the expenses for.
vessel_type_id: Vessel type ID.
formula_calculation_date: Formula calculation date.
vessel_class_id: Vessel class ID.
operation_status: Operation status.
historical_tce: Flag for historical TCE.
estimation_status: Estimation status.
Returns:
The port expenses for model vessel or None if a port with given ID
does not exist or a vessel type with the given ID number does not
exist.
"""
query_string: QueryString = {
"portId": '{}'.format(port_id),
"vesselTypeId": '{}'.format(vessel_type_id),
"formulaCalculationDate": formula_calculation_date.isoformat(),
"vesselClassId": '{}'.format(vessel_class_id),
"operationStatus": '{}'.format(operation_status.value),
"historicalTce": '{}'.format(historical_tce),
"estimationStatus": '{}'.format(estimation_status.value)
}
response = self.__connection._make_post_request(
"port-expenses/api/v1/PortModelVessel", query_string
)
response.raise_for_status()
response_json = response.json()
return_object = parse_port_expenses(response_json) \
if response_json else None
return return_object
def get_required_formula_parameters(
self, port_id: int, vessel_type_id: Optional[int] = None,
calculation_date: Optional[datetime] = None
) -> List[str]:
"""Retrieves required formula parameters.
Args:
port_id: ID of the port to retrieve the expenses for.
vessel_type_id: Vessel type ID.
calculation_date: Calculation date.
Returns:
List of required port expenses formula calculation parameters.
"""
query_dict = {
"portId": '{}'.format(port_id)
}
if vessel_type_id is not None:
query_dict["vesselTypeId"] = '{}'.format(vessel_type_id)
if calculation_date is not None:
query_dict["calculationDate"] = calculation_date.isoformat()
query_string: QueryString = query_dict
response = self.__connection._make_post_request(
"port-expenses/api/v1/RequiredFormulaParameters", query_string
)
response.raise_for_status()
response_json = response.json()
return cast(List[str], response_json)
def get_vessel_types(self) -> Tuple[VesselType, ...]:
"""Retrieves all available vessel types.
Returns:
A tuple of all available vessel types.
"""
vessel_types = tuple(VesselType(vessel_type.value, vessel_type.name)
for vessel_type in VesselTypeEnum)
return vessel_types
def get_ports(
self, port_filter: Optional[PortFilter] = None
) -> Tuple[Port, ...]:
"""Retrieves available ports.
Args:
port_filter: A filter used to find specific ports. If not
specified, returns all available ports.
Returns:
A tuple of available ports that match the filter.
"""
query_dict = {
"date": datetime.now().isoformat()
}
query_string: QueryString = query_dict
available_ports: List[Port] = []
for vessel_type in VesselTypeEnum:
response = self.__connection._make_get_request(
f"port-expenses/api/v1/AvailablePorts/{vessel_type.value}",
query_string
)
response.raise_for_status()
response_json = response.json()
available_ports += parse_ports(response_json)
port_filter = port_filter or PortFilter()
return tuple(port_filter._apply(available_ports))
| 39.69863 | 80 | 0.608121 |
b81daa8c37f378e2e7a28f7d2f834e832e3e70d0 | 36,740 | py | Python | qiskit/visualization/matplotlib.py | chowington/qiskit-terra | a782c64c736fedd6a541bb45dbf89737a52b7c39 | [
"Apache-2.0"
] | null | null | null | qiskit/visualization/matplotlib.py | chowington/qiskit-terra | a782c64c736fedd6a541bb45dbf89737a52b7c39 | [
"Apache-2.0"
] | null | null | null | qiskit/visualization/matplotlib.py | chowington/qiskit-terra | a782c64c736fedd6a541bb45dbf89737a52b7c39 | [
"Apache-2.0"
] | 1 | 2019-06-13T08:07:26.000Z | 2019-06-13T08:07:26.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,missing-docstring
"""mpl circuit visualization backend."""
import collections
import fractions
import itertools
import json
import logging
import math
import numpy as np
try:
from matplotlib import patches
from matplotlib import pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.visualization import exceptions
from qiskit.visualization.qcstyle import DefaultStyle, BWStyle
from qiskit import user_config
logger = logging.getLogger(__name__)
Register = collections.namedtuple('Register', 'reg index')
WID = 0.65
HIG = 0.65
DEFAULT_SCALE = 4.3
PORDER_GATE = 5
PORDER_LINE = 2
PORDER_GRAY = 3
PORDER_TEXT = 6
PORDER_SUBP = 4
class Anchor:
def __init__(self, reg_num, yind, fold):
self.__yind = yind
self.__fold = fold
self.__reg_num = reg_num
self.__gate_placed = []
self.gate_anchor = 0
def plot_coord(self, index, gate_width):
h_pos = index % self.__fold + 1
# check folding
if self.__fold > 0:
if h_pos + (gate_width - 1) > self.__fold:
index += self.__fold - (h_pos - 1)
x_pos = index % self.__fold + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind - (index // self.__fold) * (self.__reg_num + 1)
else:
x_pos = index + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind
# could have been updated, so need to store
self.gate_anchor = index
return x_pos, y_pos
def is_locatable(self, index, gate_width):
hold = [index + i for i in range(gate_width)]
for p in hold:
if p in self.__gate_placed:
return False
return True
def set_index(self, index, gate_width):
h_pos = index % self.__fold + 1
if h_pos + (gate_width - 1) > self.__fold:
_index = index + self.__fold - (h_pos - 1)
else:
_index = index
for ii in range(gate_width):
if _index + ii not in self.__gate_placed:
self.__gate_placed.append(_index + ii)
self.__gate_placed.sort()
def get_index(self):
if self.__gate_placed:
return self.__gate_placed[-1] + 1
return 0
class MatplotlibDrawer:
def __init__(self, qregs, cregs, ops,
scale=1.0, style=None, plot_barriers=True,
reverse_bits=False):
if not HAS_MATPLOTLIB:
raise ImportError('The class MatplotlibDrawer needs matplotlib. '
'Run "pip install matplotlib" before.')
self._ast = None
self._scale = DEFAULT_SCALE * scale
self._creg = []
self._qreg = []
self._registers(cregs, qregs)
self._ops = ops
self._qreg_dict = collections.OrderedDict()
self._creg_dict = collections.OrderedDict()
self._cond = {
'n_lines': 0,
'xmax': 0,
'ymax': 0,
}
config = user_config.get_config()
if config:
config_style = config.get('circuit_mpl_style', 'default')
if config_style == 'default':
self._style = DefaultStyle()
elif config_style == 'bw':
self._style = BWStyle()
elif style is False:
self._style = BWStyle()
else:
self._style = DefaultStyle()
self.plot_barriers = plot_barriers
self.reverse_bits = reverse_bits
if style:
if isinstance(style, dict):
self._style.set_style(style)
elif isinstance(style, str):
with open(style, 'r') as infile:
dic = json.load(infile)
self._style.set_style(dic)
self.figure = plt.figure()
self.figure.patch.set_facecolor(color=self._style.bg)
self.ax = self.figure.add_subplot(111)
self.ax.axis('off')
self.ax.set_aspect('equal')
self.ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
def _registers(self, creg, qreg):
self._creg = []
for r in creg:
self._creg.append(Register(reg=r[0], index=r[1]))
self._qreg = []
for r in qreg:
self._qreg.append(Register(reg=r[0], index=r[1]))
@property
def ast(self):
return self._ast
def _custom_multiqubit_gate(self, xy, fc=None, wide=True, text=None,
subtext=None):
xpos = min([x[0] for x in xy])
ypos = min([y[1] for y in xy])
ypos_max = max([y[1] for y in xy])
if wide:
if subtext:
boxes_length = round(max([len(text), len(subtext)]) / 8) or 1
else:
boxes_length = round(len(text) / 8) or 1
wid = WID * 2.8 * boxes_length
else:
wid = WID
if fc:
_fc = fc
else:
_fc = self._style.gc
qubit_span = abs(ypos) - abs(ypos_max) + 1
height = HIG + (qubit_span - 1)
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - .5 * HIG),
width=wid, height=height, fc=_fc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
# Annotate inputs
for bit, y in enumerate([x[1] for x in xy]):
self.ax.text(xpos - 0.45 * wid, y, str(bit), ha='left', va='center',
fontsize=self._style.fs, color=self._style.gt,
clip_on=True, zorder=PORDER_TEXT)
if text:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.5 * height, disp_text, ha='center',
va='center', fontsize=self._style.fs,
color=self._style.gt, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos + 0.3 * height, subtext, ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos + .5 * (qubit_span - 1), disp_text,
ha='center',
va='center',
fontsize=self._style.fs,
color=self._style.gt,
clip_on=True,
zorder=PORDER_TEXT)
def _gate(self, xy, fc=None, wide=False, text=None, subtext=None):
xpos, ypos = xy
if wide:
if subtext:
wid = WID * 2.8
else:
boxes_wide = round(len(text) / 10) or 1
wid = WID * 2.8 * boxes_wide
else:
wid = WID
if fc:
_fc = fc
elif text and text in self._style.dispcol:
_fc = self._style.dispcol[text]
else:
_fc = self._style.gc
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid, height=HIG,
fc=_fc, ec=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
if text:
if text in self._style.dispcol:
disp_text = "${}$".format(self._style.disptex[text])
else:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.15 * HIG, disp_text, ha='center',
va='center', fontsize=self._style.fs,
color=self._style.gt, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos, disp_text, ha='center', va='center',
fontsize=self._style.fs,
color=self._style.gt,
clip_on=True,
zorder=PORDER_TEXT)
def _subtext(self, xy, text):
xpos, ypos = xy
self.ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _sidetext(self, xy, text):
xpos, ypos = xy
# 0.15 = the initial gap, each char means it needs to move
# another 0.0375 over
xp = xpos + 0.15 + (0.0375 * len(text))
self.ax.text(xp, ypos+HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _line(self, xy0, xy1, lc=None, ls=None):
x0, y0 = xy0
x1, y1 = xy1
if lc is None:
linecolor = self._style.lc
else:
linecolor = lc
if ls is None:
linestyle = 'solid'
else:
linestyle = ls
if linestyle == 'doublet':
theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0))
dx = 0.05 * WID * np.cos(theta)
dy = 0.05 * WID * np.sin(theta)
self.ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy],
color=linecolor,
linewidth=1.0,
linestyle='solid',
zorder=PORDER_LINE)
self.ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy],
color=linecolor,
linewidth=1.0,
linestyle='solid',
zorder=PORDER_LINE)
else:
self.ax.plot([x0, x1], [y0, y1],
color=linecolor,
linewidth=1.0,
linestyle=linestyle,
zorder=PORDER_LINE)
def _measure(self, qxy, cxy, cid):
qx, qy = qxy
cx, cy = cxy
self._gate(qxy, fc=self._style.dispcol['meas'])
# add measure symbol
arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7,
height=HIG * 0.7, theta1=0, theta2=180, fill=False,
ec=self._style.lc, linewidth=1.5,
zorder=PORDER_GATE)
self.ax.add_patch(arc)
self.ax.plot([qx, qx + 0.35 * WID],
[qy - 0.15 * HIG, qy + 0.20 * HIG],
color=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)
# arrow
self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style.cc,
ls=self._style.cline)
arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID),
(cx + 0.20 * WID, cy + 0.35 * WID),
(cx, cy)),
fc=self._style.cc,
ec=None)
self.ax.add_artist(arrowhead)
# target
if self._style.bundle:
self.ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _conds(self, xy, istrue=False):
xpos, ypos = xy
if istrue:
_fc = self._style.lc
else:
_fc = self._style.gc
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=_fc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _ctrl_qubit(self, xy):
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=self._style.lc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _tgt_qubit(self, xy):
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35,
fc=self._style.dispcol['target'],
ec=self._style.lc, linewidth=1.5,
zorder=PORDER_GATE)
self.ax.add_patch(box)
# add '+' symbol
self.ax.plot([xpos, xpos], [ypos - 0.35 * HIG, ypos + 0.35 * HIG],
color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)
self.ax.plot([xpos - 0.35 * HIG, xpos + 0.35 * HIG], [ypos, ypos],
color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)
def _swap(self, xy):
xpos, ypos = xy
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos - 0.20 * WID, ypos + 0.20 * WID],
color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos + 0.20 * WID, ypos - 0.20 * WID],
color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)
def _barrier(self, config, anc):
xys = config['coord']
group = config['group']
y_reg = []
for qreg in self._qreg_dict.values():
if qreg['group'] in group:
y_reg.append(qreg['y'])
x0 = xys[0][0]
box_y0 = min(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) - 0.5
box_y1 = max(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) + 0.5
box = patches.Rectangle(xy=(x0 - 0.3 * WID, box_y0),
width=0.6 * WID, height=box_y1 - box_y0,
fc=self._style.bc, ec=None, alpha=0.6,
linewidth=1.5, zorder=PORDER_GRAY)
self.ax.add_patch(box)
for xy in xys:
xpos, ypos = xy
self.ax.plot([xpos, xpos], [ypos + 0.5, ypos - 0.5],
linewidth=1, linestyle="dashed",
color=self._style.lc,
zorder=PORDER_TEXT)
def _linefeed_mark(self, xy):
xpos, ypos = xy
self.ax.plot([xpos - .1, xpos - .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
self.ax.plot([xpos + .1, xpos + .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
def draw(self, filename=None, verbose=False):
self._draw_regs()
self._draw_ops(verbose)
_xl = - self._style.margin[0]
_xr = self._cond['xmax'] + self._style.margin[1]
_yb = - self._cond['ymax'] - self._style.margin[2] + 1 - 0.5
_yt = self._style.margin[3] + 0.5
self.ax.set_xlim(_xl, _xr)
self.ax.set_ylim(_yb, _yt)
# update figure size
fig_w = _xr - _xl
fig_h = _yt - _yb
if self._style.figwidth < 0.0:
self._style.figwidth = fig_w * self._scale * self._style.fs / 72 / WID
self.figure.set_size_inches(self._style.figwidth, self._style.figwidth * fig_h / fig_w)
if filename:
self.figure.savefig(filename, dpi=self._style.dpi,
bbox_inches='tight')
plt.close(self.figure)
return self.figure
def _draw_regs(self):
# quantum register
for ii, reg in enumerate(self._qreg):
if len(self._qreg) > 1:
label = '${}_{{{}}}$'.format(reg.reg.name, reg.index)
else:
label = '${}$'.format(reg.reg.name)
pos = -ii
self._qreg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.reg
}
self._cond['n_lines'] += 1
# classical register
if self._creg:
n_creg = self._creg.copy()
n_creg.pop(0)
idx = 0
y_off = -len(self._qreg)
for ii, (reg, nreg) in enumerate(itertools.zip_longest(
self._creg, n_creg)):
pos = y_off - idx
if self._style.bundle:
label = '${}$'.format(reg.reg.name)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.reg
}
if not (not nreg or reg.reg != nreg.reg):
continue
else:
label = '${}_{{{}}}$'.format(reg.reg.name, reg.index)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.reg
}
self._cond['n_lines'] += 1
idx += 1
def _draw_regs_sub(self, n_fold, feedline_l=False, feedline_r=False):
# quantum register
for qreg in self._qreg_dict.values():
if n_fold == 0:
label = qreg['label'] + ' : $\\left|0\\right\\rangle$'
else:
label = qreg['label']
y = qreg['y'] - n_fold * (self._cond['n_lines'] + 1)
self.ax.text(-0.5, y, label, ha='right', va='center',
fontsize=self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([0, y], [self._cond['xmax'], y])
# classical register
this_creg_dict = {}
for creg in self._creg_dict.values():
if n_fold == 0:
label = creg['label'] + ' : 0 '
else:
label = creg['label']
y = creg['y'] - n_fold * (self._cond['n_lines'] + 1)
if y not in this_creg_dict.keys():
this_creg_dict[y] = {'val': 1, 'label': label}
else:
this_creg_dict[y]['val'] += 1
for y, this_creg in this_creg_dict.items():
# bundle
if this_creg['val'] > 1:
self.ax.plot([.6, .7], [y - .1, y + .1],
color=self._style.cc,
zorder=PORDER_LINE)
self.ax.text(0.5, y + .1, str(this_creg['val']), ha='left',
va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(-0.5, y, this_creg['label'], ha='right', va='center',
fontsize=self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([0, y], [self._cond['xmax'], y], lc=self._style.cc,
ls=self._style.cline)
# lf line
if feedline_r:
self._linefeed_mark((self._style.fold + 1 - 0.1,
- n_fold * (self._cond['n_lines'] + 1)))
if feedline_l:
self._linefeed_mark((0.1,
- n_fold * (self._cond['n_lines'] + 1)))
def _draw_ops(self, verbose=False):
_wide_gate = ['u2', 'u3', 'cu2', 'cu3']
_barriers = {'coord': [], 'group': []}
#
# generate coordinate manager
#
q_anchors = {}
for key, qreg in self._qreg_dict.items():
q_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=qreg['y'],
fold=self._style.fold)
c_anchors = {}
for key, creg in self._creg_dict.items():
c_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=creg['y'],
fold=self._style.fold)
#
# draw gates
#
prev_anc = -1
for layer in self._ops:
layer_width = 1
for op in layer:
if op.name in _wide_gate:
if layer_width < 2:
layer_width = 2
# if custom gate with a longer than standard name determine
# width
elif op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise', 'cswap', 'swap', 'measure'] and len(
op.name) >= 4:
box_width = round(len(op.name) / 8)
# handle params/subtext longer than op names
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params, self._style.pimode)
if len(param) > len(op.name):
box_width = round(len(param) / 8)
# If more than 4 characters min width is 2
if box_width <= 1:
box_width = 2
if layer_width < box_width:
if box_width > 2:
layer_width = box_width * 2
else:
layer_width = 2
continue
# If more than 4 characters min width is 2
if box_width <= 1:
box_width = 2
if layer_width < box_width:
if box_width > 2:
layer_width = box_width * 2
else:
layer_width = 2
this_anc = prev_anc + 1
for op in layer:
_iswide = op.name in _wide_gate
if op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise', 'cswap', 'swap', 'measure'] and len(
op.name) >= 4:
_iswide = True
# get qreg index
q_idxs = []
for qarg in op.qargs:
for index, reg in self._qreg_dict.items():
if (reg['group'] == qarg.register and
reg['index'] == qarg.index):
q_idxs.append(index)
break
# get creg index
c_idxs = []
for carg in op.cargs:
for index, reg in self._creg_dict.items():
if (reg['group'] == carg.register and
reg['index'] == carg.index):
c_idxs.append(index)
break
# Only add the gate to the anchors if it is going to be plotted.
# This prevents additional blank wires at the end of the line if
# the last instruction is a barrier type
if self.plot_barriers or \
op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise']:
for ii in q_idxs:
q_anchors[ii].set_index(this_anc, layer_width)
# qreg coordinate
q_xy = [q_anchors[ii].plot_coord(this_anc, layer_width) for ii in q_idxs]
# creg coordinate
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width) for ii in c_idxs]
# bottom and top point of qreg
qreg_b = min(q_xy, key=lambda xy: xy[1])
qreg_t = max(q_xy, key=lambda xy: xy[1])
# update index based on the value from plotting
this_anc = q_anchors[q_idxs[0]].gate_anchor
if verbose:
print(op)
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params, self._style.pimode)
else:
param = None
# conditional gate
if op.condition:
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width) for
ii in self._creg_dict]
mask = 0
for index, cbit in enumerate(self._creg):
if cbit.reg == op.condition[0]:
mask |= (1 << index)
val = op.condition[1]
# cbit list to consider
fmt_c = '{{:0{}b}}'.format(len(c_xy))
cmask = list(fmt_c.format(mask))[::-1]
# value
fmt_v = '{{:0{}b}}'.format(cmask.count('1'))
vlist = list(fmt_v.format(val))[::-1]
# plot conditionals
v_ind = 0
xy_plot = []
for xy, m in zip(c_xy, cmask):
if m == '1':
if xy not in xy_plot:
if vlist[v_ind] == '1' or self._style.bundle:
self._conds(xy, istrue=True)
else:
self._conds(xy, istrue=False)
xy_plot.append(xy)
v_ind += 1
creg_b = sorted(xy_plot, key=lambda xy: xy[1])[0]
self._subtext(creg_b, hex(val))
self._line(qreg_t, creg_b, lc=self._style.cc,
ls=self._style.cline)
#
# draw special gates
#
if op.name == 'measure':
vv = self._creg_dict[c_idxs[0]]['index']
self._measure(q_xy[0], c_xy[0], vv)
elif op.name in ['barrier', 'snapshot', 'load', 'save',
'noise']:
_barriers = {'coord': [], 'group': []}
for index, qbit in enumerate(q_idxs):
q_group = self._qreg_dict[qbit]['group']
if q_group not in _barriers['group']:
_barriers['group'].append(q_group)
_barriers['coord'].append(q_xy[index])
if self.plot_barriers:
self._barrier(_barriers, this_anc)
else:
# this stop there being blank lines plotted in place of barriers
this_anc -= 1
elif op.name == 'initialize':
vec = '[%s]' % param
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text="|psi>",
subtext=vec)
elif op.name == 'unitary':
# TODO(mtreinish): Look into adding the unitary to the
# subtext
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text="U")
#
# draw single qubit gates
#
elif len(q_xy) == 1:
disp = op.name
if param:
prm = '({})'.format(param)
if len(prm) < 20:
self._gate(q_xy[0], wide=_iswide, text=disp,
subtext=prm)
else:
self._gate(q_xy[0], wide=_iswide, text=disp)
else:
self._gate(q_xy[0], wide=_iswide, text=disp)
#
# draw multi-qubit gates (n=2)
#
elif len(q_xy) == 2:
# cx
if op.name == 'cx':
self._ctrl_qubit(q_xy[0])
self._tgt_qubit(q_xy[1])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# cz for latexmode
elif op.name == 'cz':
if self._style.latexmode:
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
else:
disp = op.name.replace('c', '')
self._ctrl_qubit(q_xy[0])
self._gate(q_xy[1], wide=_iswide, text=disp)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# control gate
elif op.name in ['cy', 'ch', 'cu3', 'crz']:
disp = op.name.replace('c', '')
self._ctrl_qubit(q_xy[0])
if param:
self._gate(q_xy[1], wide=_iswide, text=disp,
subtext='{}'.format(param))
else:
self._gate(q_xy[1], wide=_iswide, text=disp)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# cu1
elif op.name == 'cu1':
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
self._sidetext(qreg_b, param)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# rzz gate
elif op.name == 'rzz':
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
self._sidetext(qreg_b, text='zz({})'.format(param))
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# swap gate
elif op.name == 'swap':
self._swap(q_xy[0])
self._swap(q_xy[1])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# Custom gate
else:
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.name)
#
# draw multi-qubit gates (n=3)
#
elif len(q_xy) == 3:
# cswap gate
if op.name == 'cswap':
self._ctrl_qubit(q_xy[0])
self._swap(q_xy[1])
self._swap(q_xy[2])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# ccx gate
elif op.name == 'ccx':
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
self._tgt_qubit(q_xy[2])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# custom gate
else:
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.name)
# draw custom multi-qubit gate
elif len(q_xy) > 3:
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.name)
else:
logger.critical('Invalid gate %s', op)
raise exceptions.VisualizationError('invalid gate {}'.format(op))
prev_anc = this_anc + layer_width - 1
#
# adjust window size and draw horizontal lines
#
anchors = [q_anchors[ii].get_index() for ii in self._qreg_dict]
if anchors:
max_anc = max(anchors)
else:
max_anc = 0
n_fold = max(0, max_anc - 1) // self._style.fold
# window size
if max_anc > self._style.fold > 0:
self._cond['xmax'] = self._style.fold + 1
self._cond['ymax'] = (n_fold + 1) * (self._cond['n_lines'] + 1) - 1
else:
self._cond['xmax'] = max_anc + 1
self._cond['ymax'] = self._cond['n_lines']
# add horizontal lines
for ii in range(n_fold + 1):
feedline_r = (n_fold > 0 and n_fold > ii)
feedline_l = (ii > 0)
self._draw_regs_sub(ii, feedline_l, feedline_r)
# draw gate number
if self._style.index:
for ii in range(max_anc):
if self._style.fold > 0:
x_coord = ii % self._style.fold + 1
y_coord = - (ii // self._style.fold) * (self._cond['n_lines'] + 1) + 0.7
else:
x_coord = ii + 1
y_coord = 0.7
self.ax.text(x_coord, y_coord, str(ii + 1), ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.tc, clip_on=True,
zorder=PORDER_TEXT)
@staticmethod
def param_parse(v, pimode=False):
# create an empty list to store the parameters in
param_parts = [None] * len(v)
for i, e in enumerate(v):
if pimode:
try:
param_parts[i] = MatplotlibDrawer.format_pi(e)
except TypeError:
param_parts[i] = str(e)
else:
try:
param_parts[i] = MatplotlibDrawer.format_numeric(e)
except TypeError:
param_parts[i] = str(e)
if param_parts[i].startswith('-'):
param_parts[i] = '$-$' + param_parts[i][1:]
param_parts = ', '.join(param_parts)
return param_parts
@staticmethod
def format_pi(val):
fracvals = MatplotlibDrawer.fraction(val)
buf = ''
if fracvals:
nmr, dnm = fracvals.numerator, fracvals.denominator
if nmr == 1:
buf += '$\\pi$'
elif nmr == -1:
buf += '-$\\pi$'
else:
buf += '{}$\\pi$'.format(nmr)
if dnm > 1:
buf += '/{}'.format(dnm)
return buf
else:
coef = MatplotlibDrawer.format_numeric(val / np.pi)
if coef == '0':
return '0'
return '{}$\\pi$'.format(coef)
@staticmethod
def format_numeric(val, tol=1e-5):
if isinstance(val, complex):
return str(val)
elif complex(val).imag != 0:
val = complex(val)
abs_val = abs(val)
if math.isclose(abs_val, 0.0, abs_tol=1e-100):
return '0'
if math.isclose(math.fmod(abs_val, 1.0),
0.0, abs_tol=tol) and 0.5 < abs_val < 9999.5:
return str(int(val))
if 0.1 <= abs_val < 100.0:
return '{:.2f}'.format(val)
return '{:.1e}'.format(val)
@staticmethod
def fraction(val, base=np.pi, n=100, tol=1e-5):
abs_val = abs(val)
for i in range(1, n):
for j in range(1, n):
if math.isclose(abs_val, i / j * base, rel_tol=tol):
if val < 0:
i *= -1
return fractions.Fraction(i, j)
return None
| 39.633225 | 95 | 0.445754 |
9dac86962a02e0d5d53ba68e8f0216ecfa6e8a30 | 2,808 | py | Python | algo/src/deep_n_wide.py | RunanWang/somewhere | 0ba93a3f5f91042b4910171dda981f6c863ee581 | [
"MIT"
] | null | null | null | algo/src/deep_n_wide.py | RunanWang/somewhere | 0ba93a3f5f91042b4910171dda981f6c863ee581 | [
"MIT"
] | 20 | 2019-11-20T06:47:28.000Z | 2020-01-14T02:23:22.000Z | algo/src/deep_n_wide.py | RunanWang/somewhere | 0ba93a3f5f91042b4910171dda981f6c863ee581 | [
"MIT"
] | null | null | null | from keras.models import Model
from keras.layers import Input, Dense, Lambda, multiply, concatenate
from keras import backend as K
from keras import regularizers
import pandas as pd
import utils as utils
import preprocess as preprocess
from keras.callbacks import TensorBoard
def keras_sum_layer_output_shape(input_shape):
# a function calculate the shape(equal to 1 in the sum func)
shape = list(input_shape)
assert len(shape) == 2
shape[-1] = 1
return tuple(shape)
def keras_sum_layer(x):
# a function to take sum of the layers
return K.sum(x, axis=1, keepdims=True)
# 准备训练集数据
cont, cate, y = preprocess.read_data('process_train.csv')
cont = preprocess.preprocess_normal(cont)
X = preprocess.preprocess_merge(cont, cate)
# 准备测试集数据(验证数据也用测试集数据)
test_cont, test_cate, test_y = preprocess.read_data('process_test.csv')
test_cont = preprocess.preprocess_normal(test_cont)
test_X = preprocess.preprocess_merge(test_cont, test_cate)
# 设置MLR的分区数,默认为12.
wide_m = 1
# 第一层为输入层
input_wide = Input(shape=(X.shape[1], ))
# 第二层为LR和权重层,采用l2正则化项
wide_divide = Dense(wide_m,
activation='softmax',
bias_regularizer=regularizers.l2(0.01))(input_wide)
wide_fit = Dense(wide_m,
activation='sigmoid',
bias_regularizer=regularizers.l2(0.01))(input_wide)
# 第三层是乘积
wide_ele = multiply([wide_divide, wide_fit])
wide = Lambda(keras_sum_layer,
output_shape=keras_sum_layer_output_shape)(wide_ele)
# 构建deep部分
input_deep = Input(shape=(X.shape[1], ))
deep_layer1 = Dense(256,
activation='relu',
bias_regularizer=regularizers.l2(0.01))(input_deep)
deep_layer2 = Dense(128,
activation='relu',
bias_regularizer=regularizers.l2(0.01))(deep_layer1)
deep = Dense(64, activation='relu',
bias_regularizer=regularizers.l2(0.01))(deep_layer2)
# 组合deep&wide
frame = [X, X]
X_in = pd.concat(frame, axis=1)
coned = concatenate([wide, deep])
out = Dense(1, activation='sigmoid')(coned)
model = Model(inputs=[input_wide, input_deep], outputs=out)
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['accuracy', utils.rmse])
frame = [test_X, test_X]
test_X_in = pd.concat(frame, axis=1)
model.fit([X, X],
y,
epochs=100,
batch_size=10,
callbacks=[
utils.roc_callback(training_data=[[X, X], y],
validation_data=[[test_X, test_X], test_y]),
TensorBoard(log_dir='final/{}'.format("deep_wide"))
])
loss, accuracy, rmse = model.evaluate([test_X, test_X], test_y)
print('Accuracy: %.2f %%' % (accuracy * 100))
print('RMSE: %.2f %%' % (rmse * 100))
print('Loss: %.2f %%' % (loss * 100)) | 33.428571 | 77 | 0.662037 |
369e5d0b803043f7238fc6e16515e489d472e116 | 1,252 | py | Python | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CloneFlowJobRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CloneFlowJobRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/CloneFlowJobRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CloneFlowJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'CloneFlowJob')
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
def get_ProjectId(self):
return self.get_query_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_query_param('ProjectId',ProjectId) | 34.777778 | 65 | 0.75639 |
c03eae8196ecf9a82fc0eba938e02db3f926dd85 | 9,222 | py | Python | lib/models/ganomaly.py | strelka145/skip-ganomaly | a1f9e4b2ebd248e70822acb4591b702634574ed7 | [
"MIT"
] | null | null | null | lib/models/ganomaly.py | strelka145/skip-ganomaly | a1f9e4b2ebd248e70822acb4591b702634574ed7 | [
"MIT"
] | null | null | null | lib/models/ganomaly.py | strelka145/skip-ganomaly | a1f9e4b2ebd248e70822acb4591b702634574ed7 | [
"MIT"
] | null | null | null | """GANomaly
"""
# pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915
##
from collections import OrderedDict
import os
import time
import numpy as np
from tqdm import tqdm
from torch.autograd import Variable
import torch.optim as optim
import torch.nn as nn
import torch.utils.data
import torchvision.utils as vutils
from lib.models.networks import NetG, NetD, weights_init
from lib.visualizer import Visualizer
from lib.loss import l2_loss
from lib.evaluate import evaluate
from lib.models.basemodel import BaseModel
##
class Ganomaly(BaseModel):
"""GANomaly Class
"""
@property
def name(self): return 'Ganomaly'
def __init__(self, opt, data):
super(Ganomaly, self).__init__(opt, data)
# -- Misc attributes
self.epoch = 0
self.times = []
self.total_steps = 0
##
# Create and initialize networks.
self.netg = NetG(self.opt).to(self.device)
self.netd = NetD(self.opt).to(self.device)
self.netg.apply(weights_init)
self.netd.apply(weights_init)
##
if self.opt.resume != '':
print("\nLoading pre-trained networks.")
self.opt.iter = torch.load(os.path.join(self.opt.resume, 'netG.pth'))['epoch']
self.netg.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netG.pth'))['state_dict'])
self.netd.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netD.pth'))['state_dict'])
print("\tDone.\n")
self.l_adv = l2_loss
self.l_con = nn.L1Loss()
self.l_enc = l2_loss
self.l_bce = nn.BCELoss()
##
# Initialize input tensors.
self.input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize, self.opt.isize), dtype=torch.float32, device=self.device)
self.label = torch.empty(size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
self.gt = torch.empty(size=(opt.batchsize,), dtype=torch.long, device=self.device)
self.fixed_input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize, self.opt.isize), dtype=torch.float32, device=self.device)
self.real_label = torch.ones (size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
self.fake_label = torch.zeros(size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
##
# Setup optimizer
if self.opt.isTrain:
self.netg.train()
self.netd.train()
self.optimizer_d = optim.Adam(self.netd.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
self.optimizer_g = optim.Adam(self.netg.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
##
def forward_g(self):
""" Forward propagate through netG
"""
self.fake, self.latent_i, self.latent_o = self.netg(self.input)
##
def forward_d(self):
""" Forward propagate through netD
"""
self.pred_real, self.feat_real = self.netd(self.input)
self.pred_fake, self.feat_fake = self.netd(self.fake.detach())
##
def backward_g(self):
""" Backpropagate through netG
"""
self.err_g_adv = self.opt.w_adv * self.l_adv(self.feat_fake, self.feat_real)
self.err_g_con = self.opt.w_con * self.l_con(self.fake, self.input)
self.err_g_lat = self.opt.w_lat * self.l_enc(self.latent_o, self.latent_i).detach()
self.err_g = self.err_g_adv + self.err_g_con + self.err_g_lat
self.err_g.backward(retain_graph=True)
##
def backward_d(self):
""" Backpropagate through netD
"""
# Real - Fake Loss
self.err_d_real = self.l_bce(self.pred_real, self.real_label)
self.err_d_fake = self.l_bce(self.pred_fake, self.fake_label)
# NetD Loss & Backward-Pass
self.err_d = (self.err_d_real + self.err_d_fake) * 0.5
self.err_d.backward()
##
def optimize_params(self):
""" Forwardpass, Loss Computation and Backwardpass.
"""
# Forward-pass
self.forward_g()
self.forward_d()
# Backward-pass
# netg
self.optimizer_g.zero_grad()
self.backward_g()
self.optimizer_g.step()
# netd
self.optimizer_d.zero_grad()
self.backward_d()
self.optimizer_d.step()
if self.err_d.item() < 1e-5: self.reinit_d()
##
def test(self):
""" Test GANomaly model.
Args:
data ([type]): data for the test set
Raises:
IOError: Model weights not found.
"""
with torch.no_grad():
plot_hist=self.opt.histogram
# Load the weights of netg and netd.
if self.opt.load_weights:
path = "./output/{}/{}/train/weights/netG.pth".format(self.name.lower(), self.opt.dataset)
pretrained_dict = torch.load(path)['state_dict']
try:
self.netg.load_state_dict(pretrained_dict)
except IOError:
raise IOError("netG weights not found")
print(' Loaded weights.')
self.opt.phase = 'test'
# Create big error tensor for the test set.
self.an_scores = torch.zeros(size=(len(self.data.valid.dataset),), dtype=torch.float32, device=self.device)
self.gt_labels = torch.zeros(size=(len(self.data.valid.dataset),), dtype=torch.long, device=self.device)
self.latent_i = torch.zeros(size=(len(self.data.valid.dataset), self.opt.nz), dtype=torch.float32, device=self.device)
self.latent_o = torch.zeros(size=(len(self.data.valid.dataset), self.opt.nz), dtype=torch.float32, device=self.device)
# print(" Testing model %s." % self.name)
self.times = []
self.total_steps = 0
epoch_iter = 0
for i, data in enumerate(self.data.valid, 0):
self.total_steps += self.opt.batchsize
epoch_iter += self.opt.batchsize
time_i = time.time()
self.set_input(data)
self.fake, latent_i, latent_o = self.netg(self.input)
error = torch.mean(torch.pow((latent_i-latent_o), 2), dim=1)
time_o = time.time()
self.an_scores[i*self.opt.batchsize : i*self.opt.batchsize+error.size(0)] = error.reshape(error.size(0))
self.gt_labels[i*self.opt.batchsize : i*self.opt.batchsize+error.size(0)] = self.gt.reshape(error.size(0))
self.latent_i [i*self.opt.batchsize : i*self.opt.batchsize+error.size(0), :] = latent_i.reshape(error.size(0), self.opt.nz)
self.latent_o [i*self.opt.batchsize : i*self.opt.batchsize+error.size(0), :] = latent_o.reshape(error.size(0), self.opt.nz)
self.times.append(time_o - time_i)
# Save test images.
if self.opt.save_test_images:
dst = os.path.join(self.opt.outf, self.opt.name, 'test', 'images')
if not os.path.isdir(dst):
os.makedirs(dst)
real, fake, _ = self.get_current_images()
vutils.save_image(real, '%s/real_%03d.eps' % (dst, i+1), normalize=True)
vutils.save_image(fake, '%s/fake_%03d.eps' % (dst, i+1), normalize=True)
# Measure inference time.
self.times = np.array(self.times)
self.times = np.mean(self.times[:100] * 1000)
original_score=list(self.an_scores.cpu())
# Scale error vector between [0, 1]
self.an_scores = (self.an_scores - torch.min(self.an_scores)) / (torch.max(self.an_scores) - torch.min(self.an_scores))
auc = evaluate(self.gt_labels, self.an_scores, metric=self.opt.metric)
performance = OrderedDict([('Avg Run Time (ms/batch)', self.times), ('AUC', auc)])
##
# PLOT HISTOGRAM
if plot_hist:
plt.ion()
# Create data frame for scores and labels.
scores['original scores'] =original_score
scores['normalized scores'] = self.an_scores.cpu()
scores['labels'] = self.gt_labels.cpu()
hist = pd.DataFrame.from_dict(scores)
hist.to_csv("histogram.csv")
# Filter normal and abnormal scores.
abn_scr = hist.loc[hist.labels == 1]['normalized scores']
nrm_scr = hist.loc[hist.labels == 0]['normalized scores']
# Create figure and plot the distribution.
# fig, ax = plt.subplots(figsize=(4,4));
sns.distplot(nrm_scr, label=r'Normal Scores')
sns.distplot(abn_scr, label=r'Abnormal Scores')
plt.legend()
plt.yticks([])
plt.xlabel(r'Anomaly Scores')
if self.opt.display_id > 0 and self.opt.phase == 'test':
counter_ratio = float(epoch_iter) / len(self.data.valid.dataset)
self.visualizer.plot_performance(self.epoch, counter_ratio, performance)
return performance
| 40.270742 | 141 | 0.592171 |
ef2b1eaf8f09782b598ec4e3a0e093e5187277ea | 3,068 | py | Python | python/123.best-time-to-buy-and-sell-stock-iii.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 10 | 2019-09-15T00:23:57.000Z | 2022-01-05T12:53:42.000Z | python/123.best-time-to-buy-and-sell-stock-iii.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 3 | 2021-06-30T00:39:26.000Z | 2021-08-01T07:13:59.000Z | python/123.best-time-to-buy-and-sell-stock-iii.py | Zhenye-Na/leetcode | 95196a45f5709ccf7b970ee5ac84a4bf8fe2301e | [
"MIT"
] | 6 | 2020-02-08T02:55:22.000Z | 2022-01-02T22:48:18.000Z | #
# @lc app=leetcode id=123 lang=python3
#
# [123] Best Time to Buy and Sell Stock III
#
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iii/description/
#
# algorithms
# Hard (39.74%)
# Likes: 3278
# Dislikes: 85
# Total Accepted: 277.6K
# Total Submissions: 695.1K
# Testcase Example: '[3,3,5,0,0,3,1,4]'
#
# Say you have an array for which the i^th element is the price of a given
# stock on day i.
#
# Design an algorithm to find the maximum profit. You may complete at most two
# transactions.
#
# Note: You may not engage in multiple transactions at the same time (i.e., you
# must sell the stock before you buy again).
#
#
# Example 1:
#
#
# Input: prices = [3,3,5,0,0,3,1,4]
# Output: 6
# Explanation: Buy on day 4 (price = 0) and sell on day 6 (price = 3), profit =
# 3-0 = 3.
# Then buy on day 7 (price = 1) and sell on day 8 (price = 4), profit = 4-1 =
# 3.
#
# Example 2:
#
#
# Input: prices = [1,2,3,4,5]
# Output: 4
# Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit =
# 5-1 = 4.
# Note that you cannot buy on day 1, buy on day 2 and sell them later, as you
# are engaging multiple transactions at the same time. You must sell before
# buying again.
#
#
# Example 3:
#
#
# Input: prices = [7,6,4,3,1]
# Output: 0
# Explanation: In this case, no transaction is done, i.e. max profit = 0.
#
#
# Example 4:
#
#
# Input: prices = [1]
# Output: 0
#
#
#
# Constraints:
#
#
# 1 <= prices.length <= 10^5
# 0 <= prices[i] <= 10^5
#
#
#
# @lc code=start
# Dynamic_Programming
class Solution:
def maxProfit(self, prices: List[int]) -> int:
# initialization
# there are four states
# buy stock 1, sell stock 1, buy stock 2, sell stock2
dp = [[0 for _ in range(4)] for _ in range(len(prices))]
dp[0][0] = -prices[0]
dp[0][2] = -prices[0]
for i in range(1, len(prices)):
dp[i][0] = max(0 - prices[i], dp[i - 1][0])
dp[i][1] = max(dp[i - 1][0] + prices[i], dp[i - 1][1])
dp[i][2] = max(dp[i - 1][1] - prices[i], dp[i - 1][2])
dp[i][3] = max(dp[i - 1][2] + prices[i], dp[i - 1][3])
return max(dp[-1])
# Forward and Backward traversal
# Time O(n)
# Space O(n)
class Solution_TwoPass:
def maxProfit(self, prices: List[int]) -> int:
if not prices or len(prices) == 0:
return 0
l = len(prices)
forward_profits, backward_profits = [0] * l, [0] * l
min_price = prices[0]
for i in range(l):
min_price = min(prices[i], min_price)
forward_profits[i] = max(forward_profits[i - 1], prices[i] - min_price)
max_price = prices[-1]
for j in range(l - 2, -1, -1):
max_price = max(prices[j], max_price)
backward_profits[j] = max(max_price - prices[j], backward_profits[j + 1])
max_profit = 0
for i in range(l):
max_profit = max(max_profit, forward_profits[i] + backward_profits[i])
return max_profit
# @lc code=end
| 24.943089 | 85 | 0.577249 |
1b5e6b905aea358201b73eab41d6f30aefc2889d | 3,763 | py | Python | src/python/pants/backend/python/tasks/python_bundle.py | StephanErb/pants | a368267b6b4cf50138ba567f582409ed31bf5db9 | [
"Apache-2.0"
] | 94 | 2015-01-15T21:24:20.000Z | 2022-02-16T16:55:43.000Z | src/python/pants/backend/python/tasks/python_bundle.py | StephanErb/pants | a368267b6b4cf50138ba567f582409ed31bf5db9 | [
"Apache-2.0"
] | 5 | 2020-07-18T01:04:43.000Z | 2021-05-10T08:40:56.000Z | src/python/pants/backend/python/tasks/python_bundle.py | StephanErb/pants | a368267b6b4cf50138ba567f582409ed31bf5db9 | [
"Apache-2.0"
] | 47 | 2015-02-25T02:20:07.000Z | 2022-03-21T00:59:16.000Z | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.python.targets.python_app import PythonApp
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.bundle_mixin import BundleMixin
from pants.fs import archive
from pants.task.task import Task
from pants.util.dirutil import safe_mkdir
class PythonBundle(BundleMixin, Task):
"""Create an archive bundle of PythonApp targets."""
_DEPLOYABLE_ARCHIVES = 'deployable_archives'
_PEX_ARCHIVES = 'pex_archives'
@classmethod
def product_types(cls):
return [cls._DEPLOYABLE_ARCHIVES]
@classmethod
def prepare(cls, options, round_manager):
super(PythonBundle, cls).prepare(options, round_manager)
round_manager.require_data(cls._PEX_ARCHIVES)
@staticmethod
def _get_archive_path(vt, archive_format):
ext = archive.archive_extensions.get(archive_format, archive_format)
filename = '{}.{}'.format(vt.target.id, ext)
return os.path.join(vt.results_dir, filename) if archive_format else ''
@property
def create_target_dirs(self):
return True
def __init__(self, *args, **kwargs):
super(PythonBundle, self).__init__(*args, **kwargs)
self._outdir = self.get_options().pants_distdir
def execute(self):
targets_to_bundle = self.context.targets(PythonApp.is_python_app)
with self.invalidated(targets_to_bundle, invalidate_dependents=True) as invalidation_check:
bundle_archive_product = self.context.products.get(self._DEPLOYABLE_ARCHIVES)
for vt in invalidation_check.all_vts:
bundle_dir = self.get_bundle_dir(vt.target.id, vt.results_dir)
archive_format = self.resolved_option(self.get_options(), vt.target, 'archive')
archiver = archive.create_archiver(archive_format) if archive_format else None
archive_path = self._get_archive_path(vt, archive_format)
if not vt.valid: # Only recreate the bundle/archive if it's changed
self._bundle(vt.target, bundle_dir)
if archiver:
archiver.create(bundle_dir, vt.results_dir, vt.target.id)
self.context.log.info(
'created archive {}'.format(os.path.relpath(archive_path, get_buildroot())))
if archiver:
bundle_archive_product.add(
vt.target, os.path.dirname(archive_path)).append(os.path.basename(archive_path))
if vt.target in self.context.target_roots: # Always publish bundle/archive in dist
self.publish_results(self.get_options().pants_distdir,
False,
vt,
bundle_dir,
archive_path,
vt.target.id,
archive_format)
def _bundle(self, target, bundle_dir):
self.context.log.debug('creating {}'.format(os.path.relpath(bundle_dir, get_buildroot())))
safe_mkdir(bundle_dir, clean=True)
binary_path = self._get_binary_path(target)
os.symlink(binary_path, os.path.join(bundle_dir, os.path.basename(binary_path)))
self.symlink_bundles(target, bundle_dir)
def _get_binary_path(self, target):
pex_archives = self.context.products.get(self._PEX_ARCHIVES)
paths = []
for basedir, filenames in pex_archives.get(target.binary).items():
for filename in filenames:
paths.append(os.path.join(basedir, filename))
if len(paths) != 1:
raise TaskError('Expected one binary but found: {}'.format(', '.join(sorted(paths))))
return paths[0]
| 39.610526 | 95 | 0.701834 |
89475bdf8060e85f6f4d5fe7ab44e94195b29b47 | 4,409 | py | Python | adb_android/adb_android.py | abhi-r3v0/adb_android | 6d8203a28b322f3bb2e7372397024da5a6807ba3 | [
"BSD-3-Clause"
] | 1 | 2018-09-29T09:08:10.000Z | 2018-09-29T09:08:10.000Z | adb_android/adb_android.py | abhi-r3v0/adb_android | 6d8203a28b322f3bb2e7372397024da5a6807ba3 | [
"BSD-3-Clause"
] | null | null | null | adb_android/adb_android.py | abhi-r3v0/adb_android | 6d8203a28b322f3bb2e7372397024da5a6807ba3 | [
"BSD-3-Clause"
] | null | null | null | import tempfile
from subprocess import check_output, CalledProcessError
import var as v
def push(src, dest):
"""
Push object from host to target
:param src: string path to source object on host
:param dest: string destination path on target
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_PUSH, src, dest]
return _exec_command(adb_full_cmd)
def pull(src, dest):
"""
Pull object from target to host
:param src: string path of object on target
:param dest: string destination path on host
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_PULL, src, dest]
return _exec_command(adb_full_cmd)
def devices(opts=[]):
"""
Get list of all available devices including emulators
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_DEVICES, _convert_opts(opts)]
return _exec_command(adb_full_cmd)
def shell(cmd):
"""
Execute shell command on target
:param cmd: string shell command to execute
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_SHELL, cmd]
return _exec_command(adb_full_cmd)
def install(apk, opts=[]):
"""
Install *.apk on target
:param apk: string path to apk on host to install
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_INSTALL, _convert_opts(opts), apk]
return _exec_command(adb_full_cmd)
def uninstall(app, opts=[]):
"""
Uninstall app from target
:param app: app name to uninstall from target (e.g. "com.example.android.valid")
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_UNINSTALL, _convert_opts(opts), app]
return _exec_command(adb_full_cmd)
def getserialno():
"""
Get serial number for all available target devices
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_GETSERIALNO]
return _exec_command(adb_full_cmd)
def wait_for_device():
"""
Block execution until the device is online
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_WAITFORDEVICE]
return _exec_command(adb_full_cmd)
def start_server():
"""
Startd adb server daemon on host
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_START_SERVER]
return _exec_command(adb_full_cmd)
def kill_server():
"""
Kill adb server daemon on host
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_KILL_SERVER]
return _exec_command(adb_full_cmd)
def get_state():
"""
Get state of device connected per adb
:return: result of _exec_command() execution
"""
adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_GET_STATE]
return _exec_command(adb_full_cmd)
def _convert_opts(opts):
"""
Convert list with command options to single string value
with 'space' delimeter
:param opts: list with space-delimeted values
:return: string with space-delimeted values
"""
return ' '.join(opts)
def _exec_command(adb_cmd):
"""
Format adb command and execute it in shell
:param adb_cmd: list adb command to execute
:return: string '0' and shell command output if successful, otherwise
raise CalledProcessError exception and return error code
"""
t = tempfile.TemporaryFile()
final_adb_cmd = []
for e in adb_cmd:
if e != '': # avoid items with empty string...
final_adb_cmd.append(e) # ... so that final command doesn't
# contain extra spaces
print('\n*** Executing ' + ' '.join(adb_cmd) + ' ' + 'command')
try:
output = check_output(final_adb_cmd, stderr=t)
except CalledProcessError as e:
t.seek(0)
result = e.returncode, t.read()
else:
result = 0, output
print('\n' + result[1])
return result
| 29.198675 | 92 | 0.681334 |
5dc24b48a0633e417b089debd1592757c006d76a | 16,908 | py | Python | sdk/python/pulumi_azure/servicebus/queue.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/servicebus/queue.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/servicebus/queue.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Queue(pulumi.CustomResource):
auto_delete_on_idle: pulumi.Output[str]
"""
The ISO 8601 timespan duration of the idle interval after which the
Queue is automatically deleted, minimum of 5 minutes.
"""
dead_lettering_on_message_expiration: pulumi.Output[bool]
"""
Boolean flag which controls whether the Queue has dead letter support when a message expires. Defaults to `false`.
"""
default_message_ttl: pulumi.Output[str]
"""
The ISO 8601 timespan duration of the TTL of messages sent to this
queue. This is the default value used when TTL is not set on message itself.
"""
duplicate_detection_history_time_window: pulumi.Output[str]
"""
The ISO 8601 timespan duration during which
duplicates can be detected. Default value is 10 minutes. (`PT10M`)
"""
enable_batched_operations: pulumi.Output[bool]
enable_express: pulumi.Output[bool]
"""
Boolean flag which controls whether Express Entities
are enabled. An express queue holds a message in memory temporarily before writing
it to persistent storage. Defaults to `false` for Basic and Standard. For Premium, it MUST
be set to `false`.
"""
enable_partitioning: pulumi.Output[bool]
"""
Boolean flag which controls whether to enable
the queue to be partitioned across multiple message brokers. Changing this forces
a new resource to be created. Defaults to `false` for Basic and Standard. For Premium, it MUST
be set to `true`.
"""
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the resource exists.
Changing this forces a new resource to be created.
"""
lock_duration: pulumi.Output[str]
"""
The ISO 8601 timespan duration of a peek-lock; that is, the amount of time that the message is locked for other receivers. Maximum value is 5 minutes. Defaults to 1 minute. (`PT1M`)
"""
max_delivery_count: pulumi.Output[float]
"""
Integer value which controls when a message is automatically deadlettered. Defaults to `10`.
"""
max_size_in_megabytes: pulumi.Output[float]
"""
Integer value which controls the size of
memory allocated for the queue. For supported values see the "Queue/topic size"
section of [this document](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-quotas).
"""
name: pulumi.Output[str]
"""
Specifies the name of the ServiceBus Queue resource. Changing this forces a
new resource to be created.
"""
namespace_name: pulumi.Output[str]
"""
The name of the ServiceBus Namespace to create
this queue in. Changing this forces a new resource to be created.
"""
requires_duplicate_detection: pulumi.Output[bool]
"""
Boolean flag which controls whether
the Queue requires duplicate detection. Changing this forces
a new resource to be created. Defaults to `false`.
"""
requires_session: pulumi.Output[bool]
"""
Boolean flag which controls whether the Queue requires sessions.
This will allow ordered handling of unbounded sequences of related messages. With sessions enabled
a queue can guarantee first-in-first-out delivery of messages.
Changing this forces a new resource to be created. Defaults to `false`.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to
create the namespace. Changing this forces a new resource to be created.
"""
support_ordering: pulumi.Output[bool]
def __init__(__self__, resource_name, opts=None, auto_delete_on_idle=None, dead_lettering_on_message_expiration=None, default_message_ttl=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, enable_express=None, enable_partitioning=None, location=None, lock_duration=None, max_delivery_count=None, max_size_in_megabytes=None, name=None, namespace_name=None, requires_duplicate_detection=None, requires_session=None, resource_group_name=None, support_ordering=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a ServiceBus Queue.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] auto_delete_on_idle: The ISO 8601 timespan duration of the idle interval after which the
Queue is automatically deleted, minimum of 5 minutes.
:param pulumi.Input[bool] dead_lettering_on_message_expiration: Boolean flag which controls whether the Queue has dead letter support when a message expires. Defaults to `false`.
:param pulumi.Input[str] default_message_ttl: The ISO 8601 timespan duration of the TTL of messages sent to this
queue. This is the default value used when TTL is not set on message itself.
:param pulumi.Input[str] duplicate_detection_history_time_window: The ISO 8601 timespan duration during which
duplicates can be detected. Default value is 10 minutes. (`PT10M`)
:param pulumi.Input[bool] enable_express: Boolean flag which controls whether Express Entities
are enabled. An express queue holds a message in memory temporarily before writing
it to persistent storage. Defaults to `false` for Basic and Standard. For Premium, it MUST
be set to `false`.
:param pulumi.Input[bool] enable_partitioning: Boolean flag which controls whether to enable
the queue to be partitioned across multiple message brokers. Changing this forces
a new resource to be created. Defaults to `false` for Basic and Standard. For Premium, it MUST
be set to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists.
Changing this forces a new resource to be created.
:param pulumi.Input[str] lock_duration: The ISO 8601 timespan duration of a peek-lock; that is, the amount of time that the message is locked for other receivers. Maximum value is 5 minutes. Defaults to 1 minute. (`PT1M`)
:param pulumi.Input[float] max_delivery_count: Integer value which controls when a message is automatically deadlettered. Defaults to `10`.
:param pulumi.Input[float] max_size_in_megabytes: Integer value which controls the size of
memory allocated for the queue. For supported values see the "Queue/topic size"
section of [this document](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-quotas).
:param pulumi.Input[str] name: Specifies the name of the ServiceBus Queue resource. Changing this forces a
new resource to be created.
:param pulumi.Input[str] namespace_name: The name of the ServiceBus Namespace to create
this queue in. Changing this forces a new resource to be created.
:param pulumi.Input[bool] requires_duplicate_detection: Boolean flag which controls whether
the Queue requires duplicate detection. Changing this forces
a new resource to be created. Defaults to `false`.
:param pulumi.Input[bool] requires_session: Boolean flag which controls whether the Queue requires sessions.
This will allow ordered handling of unbounded sequences of related messages. With sessions enabled
a queue can guarantee first-in-first-out delivery of messages.
Changing this forces a new resource to be created. Defaults to `false`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the namespace. Changing this forces a new resource to be created.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/servicebus_queue.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['auto_delete_on_idle'] = auto_delete_on_idle
__props__['dead_lettering_on_message_expiration'] = dead_lettering_on_message_expiration
__props__['default_message_ttl'] = default_message_ttl
__props__['duplicate_detection_history_time_window'] = duplicate_detection_history_time_window
__props__['enable_batched_operations'] = enable_batched_operations
__props__['enable_express'] = enable_express
__props__['enable_partitioning'] = enable_partitioning
__props__['location'] = location
__props__['lock_duration'] = lock_duration
__props__['max_delivery_count'] = max_delivery_count
__props__['max_size_in_megabytes'] = max_size_in_megabytes
__props__['name'] = name
if namespace_name is None:
raise TypeError("Missing required property 'namespace_name'")
__props__['namespace_name'] = namespace_name
__props__['requires_duplicate_detection'] = requires_duplicate_detection
__props__['requires_session'] = requires_session
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['support_ordering'] = support_ordering
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure:eventhub/queue:Queue")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Queue, __self__).__init__(
'azure:servicebus/queue:Queue',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, auto_delete_on_idle=None, dead_lettering_on_message_expiration=None, default_message_ttl=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, enable_express=None, enable_partitioning=None, location=None, lock_duration=None, max_delivery_count=None, max_size_in_megabytes=None, name=None, namespace_name=None, requires_duplicate_detection=None, requires_session=None, resource_group_name=None, support_ordering=None):
"""
Get an existing Queue resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] auto_delete_on_idle: The ISO 8601 timespan duration of the idle interval after which the
Queue is automatically deleted, minimum of 5 minutes.
:param pulumi.Input[bool] dead_lettering_on_message_expiration: Boolean flag which controls whether the Queue has dead letter support when a message expires. Defaults to `false`.
:param pulumi.Input[str] default_message_ttl: The ISO 8601 timespan duration of the TTL of messages sent to this
queue. This is the default value used when TTL is not set on message itself.
:param pulumi.Input[str] duplicate_detection_history_time_window: The ISO 8601 timespan duration during which
duplicates can be detected. Default value is 10 minutes. (`PT10M`)
:param pulumi.Input[bool] enable_express: Boolean flag which controls whether Express Entities
are enabled. An express queue holds a message in memory temporarily before writing
it to persistent storage. Defaults to `false` for Basic and Standard. For Premium, it MUST
be set to `false`.
:param pulumi.Input[bool] enable_partitioning: Boolean flag which controls whether to enable
the queue to be partitioned across multiple message brokers. Changing this forces
a new resource to be created. Defaults to `false` for Basic and Standard. For Premium, it MUST
be set to `true`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists.
Changing this forces a new resource to be created.
:param pulumi.Input[str] lock_duration: The ISO 8601 timespan duration of a peek-lock; that is, the amount of time that the message is locked for other receivers. Maximum value is 5 minutes. Defaults to 1 minute. (`PT1M`)
:param pulumi.Input[float] max_delivery_count: Integer value which controls when a message is automatically deadlettered. Defaults to `10`.
:param pulumi.Input[float] max_size_in_megabytes: Integer value which controls the size of
memory allocated for the queue. For supported values see the "Queue/topic size"
section of [this document](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-quotas).
:param pulumi.Input[str] name: Specifies the name of the ServiceBus Queue resource. Changing this forces a
new resource to be created.
:param pulumi.Input[str] namespace_name: The name of the ServiceBus Namespace to create
this queue in. Changing this forces a new resource to be created.
:param pulumi.Input[bool] requires_duplicate_detection: Boolean flag which controls whether
the Queue requires duplicate detection. Changing this forces
a new resource to be created. Defaults to `false`.
:param pulumi.Input[bool] requires_session: Boolean flag which controls whether the Queue requires sessions.
This will allow ordered handling of unbounded sequences of related messages. With sessions enabled
a queue can guarantee first-in-first-out delivery of messages.
Changing this forces a new resource to be created. Defaults to `false`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the namespace. Changing this forces a new resource to be created.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/servicebus_queue.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["auto_delete_on_idle"] = auto_delete_on_idle
__props__["dead_lettering_on_message_expiration"] = dead_lettering_on_message_expiration
__props__["default_message_ttl"] = default_message_ttl
__props__["duplicate_detection_history_time_window"] = duplicate_detection_history_time_window
__props__["enable_batched_operations"] = enable_batched_operations
__props__["enable_express"] = enable_express
__props__["enable_partitioning"] = enable_partitioning
__props__["location"] = location
__props__["lock_duration"] = lock_duration
__props__["max_delivery_count"] = max_delivery_count
__props__["max_size_in_megabytes"] = max_size_in_megabytes
__props__["name"] = name
__props__["namespace_name"] = namespace_name
__props__["requires_duplicate_detection"] = requires_duplicate_detection
__props__["requires_session"] = requires_session
__props__["resource_group_name"] = resource_group_name
__props__["support_ordering"] = support_ordering
return Queue(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 65.281853 | 546 | 0.71688 |
17ed6ef2dfa9be2c2c14ab1bcf983d6030f16686 | 8,596 | py | Python | detect_secrets/settings.py | sfc-gh-nsfard/detect-secrets | 1c325e8433d732b3ac7783686651c5b98abbfe63 | [
"Apache-2.0"
] | 1 | 2021-03-15T15:12:42.000Z | 2021-03-15T15:12:42.000Z | detect_secrets/settings.py | sfc-gh-nsfard/detect-secrets | 1c325e8433d732b3ac7783686651c5b98abbfe63 | [
"Apache-2.0"
] | null | null | null | detect_secrets/settings.py | sfc-gh-nsfard/detect-secrets | 1c325e8433d732b3ac7783686651c5b98abbfe63 | [
"Apache-2.0"
] | null | null | null | from contextlib import contextmanager
from copy import deepcopy
from functools import lru_cache
from importlib import import_module
from typing import Any
from typing import Dict
from typing import Generator
from typing import List
from urllib.parse import urlparse
from .exceptions import InvalidFile
from .util.importlib import import_file_as_module
@lru_cache(maxsize=1)
def get_settings() -> 'Settings':
"""
This is essentially a singleton pattern, that allows for (controlled) global access
to common variables.
"""
return Settings()
def configure_settings_from_baseline(baseline: Dict[str, Any], filename: str = '') -> 'Settings':
"""
:raises: KeyError
"""
settings = get_settings()
if 'plugins_used' in baseline:
settings.configure_plugins(baseline['plugins_used'])
if 'filters_used' in baseline:
settings.configure_filters(baseline['filters_used'])
if 'detect_secrets.filters.wordlist.should_exclude_secret' in settings.filters:
config = settings.filters['detect_secrets.filters.wordlist.should_exclude_secret']
from detect_secrets import filters
filters.wordlist.initialize(
wordlist_filename=config['file_name'],
min_length=config['min_length'],
file_hash=config['file_hash'],
)
if filename:
settings.filters['detect_secrets.filters.common.is_baseline_file'] = {
'filename': filename,
}
return settings
@contextmanager
def default_settings() -> Generator['Settings', None, None]:
"""Convenience function to enable all plugins and default filters."""
from .core.plugins.util import get_mapping_from_secret_type_to_class
with transient_settings({
'plugins_used': [
{'name': plugin_type.__name__}
for plugin_type in get_mapping_from_secret_type_to_class().values()
],
}) as settings:
yield settings
@contextmanager
def transient_settings(config: Dict[str, Any]) -> Generator['Settings', None, None]:
"""Allows the customizability of non-global settings per invocation."""
original_settings = get_settings().json()
cache_bust()
try:
yield configure_settings_from_baseline(config)
finally:
cache_bust()
configure_settings_from_baseline(original_settings)
def cache_bust() -> None:
get_settings.cache_clear()
get_filters.cache_clear()
get_plugins.cache_clear()
class Settings:
DEFAULT_FILTERS = {
'detect_secrets.filters.common.is_invalid_file',
'detect_secrets.filters.heuristic.is_non_text_file',
}
def __init__(self) -> None:
self.clear()
def clear(self) -> None:
# mapping of class names to initialization variables
self.plugins: Dict[str, Dict[str, Any]] = {}
# mapping of python import paths to configuration variables
self.filters: Dict[str, Dict[str, Any]] = {
path: {}
for path in {
*self.DEFAULT_FILTERS,
'detect_secrets.filters.allowlist.is_line_allowlisted',
'detect_secrets.filters.heuristic.is_sequential_string',
'detect_secrets.filters.heuristic.is_potential_uuid',
'detect_secrets.filters.heuristic.is_likely_id_string',
'detect_secrets.filters.heuristic.is_templated_secret',
'detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign',
'detect_secrets.filters.heuristic.is_indirect_reference',
}
}
def set(self, other: 'Settings') -> None:
self.plugins = other.plugins
self.filters = other.filters
def configure_plugins(self, config: List[Dict[str, Any]]) -> 'Settings':
"""
:param config: e.g.
[
{'name': 'AWSKeyDetector'},
{'limit': 4.5, 'name': 'Base64HighEntropyString'}
]
"""
for plugin in config:
plugin = {**plugin}
name = plugin.pop('name')
self.plugins[name] = plugin
return self
def disable_plugins(self, *plugin_names: str) -> 'Settings':
for name in plugin_names:
try:
self.plugins.pop(name)
except KeyError:
pass
return self
def configure_filters(self, config: List[Dict[str, Any]]) -> 'Settings':
"""
:param config: e.g.
[
{'path': 'detect_secrets.filters.heuristic.is_sequential_string'},
{
'path': 'detect_secrets.filters.regex.should_exclude_files',
'pattern': '^test.*',
}
]
"""
self.filters = {
path: {}
for path in self.DEFAULT_FILTERS
}
# Make a copy, so we don't affect the original.
filter_configs = deepcopy(config)
for filter_config in filter_configs:
path = filter_config['path']
self.filters[path] = filter_config
return self
def disable_filters(self, *filter_paths: str) -> 'Settings':
for filter_path in filter_paths:
self.filters.pop(filter_path, None)
return self
def json(self) -> Dict[str, Any]:
plugins_used = []
for plugin in get_plugins():
# NOTE: We use the initialized plugin's JSON representation (rather than using
# the configured settings) to deal with cases where plugins define their own
# default variables, that is not necessarily carried through through the
# settings object.
serialized_plugin = plugin.json()
plugins_used.append({
# We want this to appear first.
'name': serialized_plugin['name'],
# NOTE: We still need to use the saved settings configuration though, since
# there are keys specifically in the settings object that we need to carry over
# (e.g. `path` for custom plugins).
**self.plugins[serialized_plugin['name']],
# Finally, this comes last so that it overrides any values that are saved in
# the settings object.
**serialized_plugin,
})
return {
'plugins_used': sorted(
plugins_used,
key=lambda x: str(x['name'].lower()),
),
'filters_used': sorted(
[
{
'path': path,
**config,
}
for path, config in self.filters.items()
if path not in self.DEFAULT_FILTERS
],
key=lambda x: str(x['path'].lower()),
),
}
@lru_cache(maxsize=1)
def get_plugins() -> List:
# We need to import this here, otherwise it will result in a circular dependency.
from .core import plugins
return [
plugins.initialize.from_plugin_classname(classname)
for classname in get_settings().plugins
]
@lru_cache(maxsize=1)
def get_filters() -> List:
from .core.log import log
from .util.inject import get_injectable_variables
output = []
for path, config in get_settings().filters.items():
parts = urlparse(path)
if not parts.scheme:
module_path, function_name = path.rsplit('.', 1)
try:
function = getattr(import_module(module_path), function_name)
except (ModuleNotFoundError, AttributeError):
log.warning(f'Invalid filter: {path}')
continue
elif parts.scheme == 'file':
file_path, function_name = path[len('file://'):].split('::')
try:
function = getattr(import_file_as_module(file_path), function_name)
except (FileNotFoundError, InvalidFile, AttributeError):
log.warning(f'Invalid filter: {path}')
continue
else:
log.warning(f'Invalid filter: {path}')
continue
# We attach this metadata to the function itself, so that we don't need to
# compute it everytime. This will allow for dependency injection for filters.
function.injectable_variables = set(get_injectable_variables(function))
output.append(function)
# This is for better logging.
function.path = path
return output
| 32.560606 | 97 | 0.599116 |
22a45bf5763ff5bffb8bceaae67d679167636817 | 3,000 | py | Python | learning_text_transformer/server.py | ianozsvald/learning_text_transformer | 23fa318a7c8ed0e2dbd1fc8e68e0cb7d1f15731d | [
"MIT"
] | 19 | 2015-08-28T14:41:16.000Z | 2021-03-05T17:26:42.000Z | learning_text_transformer/server.py | ianozsvald/learning_text_transformer | 23fa318a7c8ed0e2dbd1fc8e68e0cb7d1f15731d | [
"MIT"
] | null | null | null | learning_text_transformer/server.py | ianozsvald/learning_text_transformer | 23fa318a7c8ed0e2dbd1fc8e68e0cb7d1f15731d | [
"MIT"
] | null | null | null | """Flask server"""
import datetime
from flask import Flask, request, jsonify
from flask.ext import restful
from flask.ext.restful import abort
from learning_text_transformer import learner3 as learner
from learning_text_transformer import transforms
from learning_text_transformer import config
app = Flask(__name__)
api = restful.Api(app)
conf = config.get()
class Logging(object):
def __init__(self, conf):
self.conf = conf
self.write('__init__', '')
def log_learn_entry(self, examples_to_learn_from):
self.write(self.log_learn_entry.__name__, examples_to_learn_from)
def log_learn_exit(self, result, best_score):
self.write(self.log_learn_exit.__name__, (result, best_score))
def log_transform_entry(self, inputs):
self.write(self.log_transform_entry.__name__, inputs)
def write(self, fn_name, items):
output = repr((datetime.datetime.utcnow().isoformat(), fn_name, items))
with open(self.conf.log_filename, 'a') as f:
f.write(output)
f.write('\n')
logger = Logging(conf)
class HelloWorld(restful.Resource):
def get(self):
return {'learning': 'server'}
api.add_resource(HelloWorld, '/')
def make_learn_result(ts):
serialisation = transforms.Serialisation()
serialised_json = serialisation.serialise(ts)
result = {"transforms": serialised_json}
return result
class Learn(restful.Resource):
def check_inputs_or_abort(self, reqs):
if len(reqs['inputs']) != len(reqs['outputs']):
abort(400)
def post(self):
reqs = request.get_json()
self.check_inputs_or_abort(reqs)
examples_to_learn_from = list(zip(reqs['inputs'], reqs['outputs']))
logger.log_learn_entry(examples_to_learn_from)
best_score = None
if examples_to_learn_from:
transform_searcher = learner.get_transform_searcher()
chosen_transformations, best_score = transform_searcher.search_and_find_best_sequence(examples_to_learn_from)
else:
chosen_transformations = []
result = make_learn_result(chosen_transformations)
logger.log_learn_exit(result, best_score)
return jsonify(result)
api.add_resource(Learn, '/learn')
class Transform(restful.Resource):
def post(self):
reqs = request.get_json()
logger.log_transform_entry(reqs)
serialisation = transforms.Serialisation()
inputs = reqs['inputs']
outputs = []
for s in inputs:
ts_raw = reqs['transforms']
ts = serialisation.deserialise(ts_raw)
transform_searcher = learner.get_transform_searcher()
result, transform_always_made_changes = transform_searcher.apply_transforms(ts, s)
outputs.append(result)
result = {'outputs': outputs}
return jsonify(result)
api.add_resource(Transform, '/transform')
if __name__ == '__main__':
print("Starting...")
app.run(debug=True)
| 30.927835 | 121 | 0.683667 |
7c3216d525221a25a7811452be35f841583a716e | 4,944 | py | Python | ceilometer/tests/unit/publisher/test_prometheus.py | ionutbiru/ceilometer | f8992d40a135188a9d89c86f78868d340f3a1b96 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/unit/publisher/test_prometheus.py | ionutbiru/ceilometer | f8992d40a135188a9d89c86f78868d340f3a1b96 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/unit/publisher/test_prometheus.py | ionutbiru/ceilometer | f8992d40a135188a9d89c86f78868d340f3a1b96 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/publisher/prometheus.py"""
import datetime
from unittest import mock
import uuid
from oslotest import base
import requests
from six.moves.urllib import parse as urlparse
from ceilometer.publisher import prometheus
from ceilometer import sample
from ceilometer import service
class TestPrometheusPublisher(base.BaseTestCase):
resource_id = str(uuid.uuid4())
sample_data = [
sample.Sample(
name='alpha',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='beta',
type=sample.TYPE_DELTA,
unit='',
volume=3,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='gamma',
type=sample.TYPE_GAUGE,
unit='',
volume=5,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.now().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='delta.epsilon',
type=sample.TYPE_GAUGE,
unit='',
volume=7,
user_id='test',
project_id='test',
resource_id=resource_id,
timestamp=datetime.datetime.now().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
]
def setUp(self):
super(TestPrometheusPublisher, self).setUp()
self.CONF = service.prepare_service([], [])
def test_post_samples(self):
"""Test publisher post."""
parsed_url = urlparse.urlparse(
'prometheus://localhost:90/metrics/job/os')
publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url)
res = requests.Response()
res.status_code = 200
with mock.patch.object(requests.Session, 'post',
return_value=res) as m_req:
publisher.publish_samples(self.sample_data)
data = """# TYPE alpha counter
alpha{resource_id="%s", project_id="test"} 1
beta{resource_id="%s", project_id="test"} 3
# TYPE gamma gauge
gamma{resource_id="%s", project_id="test"} 5
# TYPE delta_epsilon gauge
delta_epsilon{resource_id="%s", project_id="test"} 7
""" % (self.resource_id, self.resource_id, self.resource_id, self.resource_id)
expected = [
mock.call('http://localhost:90/metrics/job/os',
auth=None,
cert=None,
data=data,
headers={'Content-type': 'plain/text'},
timeout=5,
verify=True)
]
self.assertEqual(expected, m_req.mock_calls)
def test_post_samples_ssl(self):
"""Test publisher post."""
parsed_url = urlparse.urlparse(
'prometheus://localhost:90/metrics/job/os?ssl=1')
publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url)
res = requests.Response()
res.status_code = 200
with mock.patch.object(requests.Session, 'post',
return_value=res) as m_req:
publisher.publish_samples(self.sample_data)
data = """# TYPE alpha counter
alpha{resource_id="%s", project_id="test"} 1
beta{resource_id="%s", project_id="test"} 3
# TYPE gamma gauge
gamma{resource_id="%s", project_id="test"} 5
# TYPE delta_epsilon gauge
delta_epsilon{resource_id="%s", project_id="test"} 7
""" % (self.resource_id, self.resource_id, self.resource_id, self.resource_id)
expected = [
mock.call('https://localhost:90/metrics/job/os',
auth=None,
cert=None,
data=data,
headers={'Content-type': 'plain/text'},
timeout=5,
verify=True)
]
self.assertEqual(expected, m_req.mock_calls)
| 33.405405 | 78 | 0.591222 |
74fd594901987bb15fe5301a5ad81cd1ef217c24 | 1,671 | py | Python | mindspore/ops/_op_impl/tbe/floor_div.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | 7 | 2020-05-24T03:19:26.000Z | 2020-05-24T03:20:00.000Z | mindspore/ops/_op_impl/tbe/floor_div.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | null | null | null | mindspore/ops/_op_impl/tbe/floor_div.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FloorDiv op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
floordiv_op_info = TBERegOp("FloorDiv") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("floordiv.so") \
.compute_cost(10) \
.kernel_name("floordiv") \
.partial_flag(True) \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \
.dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(floordiv_op_info)
def _floor_div_tbe():
"""FloorDiv TBE register"""
return
| 40.756098 | 85 | 0.697187 |
1132cde89fb2c930409b846ec1d85e8b6b30452b | 4,122 | py | Python | APIs/management/tests/test_addresses.py | matteyeux/MyBookingServices | ce6ec906b3a58da16e1f066b9af290fb7e8b82d3 | [
"MIT"
] | null | null | null | APIs/management/tests/test_addresses.py | matteyeux/MyBookingServices | ce6ec906b3a58da16e1f066b9af290fb7e8b82d3 | [
"MIT"
] | 3 | 2022-02-26T16:50:12.000Z | 2022-02-26T16:50:12.000Z | APIs/management/tests/test_addresses.py | matteyeux/MyBookingServices | ce6ec906b3a58da16e1f066b9af290fb7e8b82d3 | [
"MIT"
] | null | null | null | from fastapi.testclient import TestClient
from management import app
client = TestClient(app.app)
def test_get_addresses_01():
""" Test getting all addresses. """
response = client.get("/addresses/all/")
assert response.status_code == 200
assert response.json() is not None
def test_get_last_address_01():
""" Test getting the last address. """
addresses = client.get("/addresses/all/").json()
last_address = addresses["addresses"][-1]
response = client.get("/addresses/last/")
assert response.status_code == 200
assert response.json()["address"] == last_address
assert response.json() is not None
def test_get_one_addresses_01():
""" Test getting address with id 1. """
expect = {
"address": {
"id": 1,
"hotel_id": 1,
"number": "16",
"street": "chemin Antoinette Duval",
"town": "Gros",
"postal_code": 23084,
},
}
response = client.get("/addresses/1")
assert response.status_code == 200
assert response.json() == expect
assert response.json() is not None
def test_get_unexisting_address_01():
""" Test getting an unexisting address. """
last_address = client.get("/addresses/last/").json()
address_id = last_address["address"]["id"] + 1
expect = {
'detail': "Address not found",
}
response = client.get(f"/addresses/{address_id}")
assert response.status_code == 404
assert response.json() == expect
assert response.json() is not None
def test_create_address_01():
""" Test creating address. """
address = {
"hotel_id": 2,
"number": "42",
"street": "rue de la vie",
"town": "la terre",
"postal_code": 77777,
}
response = client.post("/addresses/", json=address)
last_address = client.get("/addresses/last/").json()
assert response.status_code == 200
assert response.json() == last_address
assert response.json() is not None
def test_update_address_01():
""" Test updating last inserted address. """
address = {
"hotel_id": 1,
"number": "777",
"street": "rue de la galaxy",
"postal_code": 42042,
"town": "Mars",
}
last_address = client.get("/addresses/last/").json()
address_id = last_address["address"]["id"]
response = client.put(f"/addresses/{address_id}", json=address)
# TODO : Formattage bug
# updated_address = client.get(f"/addresses/{address_id}")
updated_address = {
"address": {
"id": address_id,
"hotel_id": 1,
"number": "777",
"street": "rue de la galaxy",
"postal_code": 42042,
"town": "Mars",
},
}
assert response.status_code == 200
assert response.json() == updated_address
assert response.json() is not None
def test_update_unexisting_address_01():
""" Testing to update an unexisting address. """
address = {
"hotel_id": 2,
"number": "123",
"street": "rue de la la",
"postal_code": 67890,
"town": "null part",
}
expect = {
'detail': "Address not found",
}
last_address = client.get("/addresses/last/").json()
address_id = last_address["address"]["id"]
address_id += 5
response = client.put(f"/addresses/{address_id}", json=address)
assert response.status_code == 404
assert response.json() == expect
assert response.json() is not None
def test_delete_address_01():
""" Test deleting last inserted address. """
address_id = client.get("/addresses/last/").json()["address"]["id"]
response = client.delete(f"/addresses/{address_id}")
deleted_address = client.get(f"/addresses/{address_id}")
expect = {
'detail': "Address not found",
}
assert response.status_code == 200
assert response.json() == {}
assert response.json() is not None
assert deleted_address.status_code == 404
assert deleted_address.json() == expect
assert deleted_address.json() is not None
| 25.7625 | 71 | 0.602863 |
2803517b49f2db3d6a54b4838ad9c33cca6de626 | 21,578 | py | Python | disnake/ext/commands/common_bot_base.py | tooruu/disnake | 976124a779efff16a51b8a3b294ad6c9949bbb02 | [
"MIT"
] | null | null | null | disnake/ext/commands/common_bot_base.py | tooruu/disnake | 976124a779efff16a51b8a3b294ad6c9949bbb02 | [
"MIT"
] | null | null | null | disnake/ext/commands/common_bot_base.py | tooruu/disnake | 976124a779efff16a51b8a3b294ad6c9949bbb02 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import collections.abc
import importlib.util
import logging
import os
import sys
import time
import types
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generic,
List,
Mapping,
Optional,
Set,
TypeVar,
Union,
)
import disnake
import disnake.utils
from . import errors
from .cog import Cog
if TYPE_CHECKING:
import importlib.machinery
from ._types import CoroFunc
from .bot import AutoShardedBot, AutoShardedInteractionBot, Bot, InteractionBot
AnyBot = Union[Bot, AutoShardedBot, InteractionBot, AutoShardedInteractionBot]
__all__ = ("CommonBotBase",)
CogT = TypeVar("CogT", bound="Cog")
FuncT = TypeVar("FuncT", bound=Callable[..., Any])
CFT = TypeVar("CFT", bound="CoroFunc")
MISSING: Any = disnake.utils.MISSING
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class CommonBotBase(Generic[CogT]):
def __init__(self, *args, **kwargs):
self.__cogs: Dict[str, Cog] = {}
self.__extensions: Dict[str, types.ModuleType] = {}
self.extra_events: Dict[str, List[CoroFunc]] = {}
self._is_closed: bool = False
self.owner_id: Optional[int] = kwargs.get("owner_id")
self.owner_ids: Set[int] = kwargs.get("owner_ids", set())
self.owner: Optional[disnake.User] = None
self.owners: Set[disnake.TeamMember] = set()
if self.owner_id and self.owner_ids:
raise TypeError("Both owner_id and owner_ids are set.")
if self.owner_ids and not isinstance(self.owner_ids, collections.abc.Collection):
raise TypeError(f"owner_ids must be a collection not {self.owner_ids.__class__!r}")
self.reload: bool = kwargs.get("reload", False)
loop = asyncio.get_event_loop()
loop.create_task(self._fill_owners())
if self.reload:
loop.create_task(self._watchdog())
super().__init__(*args, **kwargs)
def dispatch(self, event_name: str, *args: Any, **kwargs: Any) -> None:
# super() will resolve to Client
super().dispatch(event_name, *args, **kwargs) # type: ignore
ev = "on_" + event_name
for event in self.extra_events.get(ev, []):
self._schedule_event(event, ev, *args, **kwargs) # type: ignore
async def _fill_owners(self) -> None:
if self.owner_id or self.owner_ids:
return
await self.wait_until_first_connect() # type: ignore
app = await self.application_info() # type: ignore
if app.team:
self.owners = set(app.team.members)
self.owner_ids = {m.id for m in app.team.members}
else:
self.owner = app.owner
self.owner_id = app.owner.id
async def close(self) -> None:
self._is_closed = True
for extension in tuple(self.__extensions):
try:
self.unload_extension(extension)
except Exception:
pass
for cog in tuple(self.__cogs):
try:
self.remove_cog(cog)
except Exception:
pass
await super().close() # type: ignore
async def is_owner(self, user: Union[disnake.User, disnake.Member]) -> bool:
"""|coro|
Checks if a :class:`~disnake.User` or :class:`~disnake.Member` is the owner of
this bot.
If an :attr:`owner_id` is not set, it is fetched automatically
through the use of :meth:`~.Bot.application_info`.
.. versionchanged:: 1.3
The function also checks if the application is team-owned if
:attr:`owner_ids` is not set.
Parameters
----------
user: :class:`.abc.User`
The user to check for.
Returns
-------
:class:`bool`
Whether the user is the owner.
"""
if self.owner_id:
return user.id == self.owner_id
elif self.owner_ids:
return user.id in self.owner_ids
else:
app = await self.application_info() # type: ignore
if app.team:
self.owners = set(app.team.members)
self.owner_ids = ids = {m.id for m in app.team.members}
return user.id in ids
else:
self.owner = app.owner
self.owner_id = owner_id = app.owner.id
return user.id == owner_id
# listener registration
def add_listener(self, func: CoroFunc, name: str = MISSING) -> None:
"""The non decorator alternative to :meth:`.listen`.
Parameters
----------
func: :ref:`coroutine <coroutine>`
The function to call.
name: :class:`str`
The name of the event to listen for. Defaults to ``func.__name__``.
Example
--------
.. code-block:: python
async def on_ready(): pass
async def my_message(message): pass
bot.add_listener(on_ready)
bot.add_listener(my_message, 'on_message')
Raises
------
TypeError
The function is not a coroutine.
"""
name = func.__name__ if name is MISSING else name
if not asyncio.iscoroutinefunction(func):
raise TypeError("Listeners must be coroutines")
if name in self.extra_events:
self.extra_events[name].append(func)
else:
self.extra_events[name] = [func]
def remove_listener(self, func: CoroFunc, name: str = MISSING) -> None:
"""Removes a listener from the pool of listeners.
Parameters
----------
func
The function that was used as a listener to remove.
name: :class:`str`
The name of the event we want to remove. Defaults to
``func.__name__``.
"""
name = func.__name__ if name is MISSING else name
if name in self.extra_events:
try:
self.extra_events[name].remove(func)
except ValueError:
pass
def listen(self, name: str = MISSING) -> Callable[[CFT], CFT]:
"""A decorator that registers another function as an external
event listener. Basically this allows you to listen to multiple
events from different places e.g. such as :func:`.on_ready`
The functions being listened to must be a :ref:`coroutine <coroutine>`.
Example
--------
.. code-block:: python3
@bot.listen()
async def on_message(message):
print('one')
# in some other file...
@bot.listen('on_message')
async def my_message(message):
print('two')
Would print one and two in an unspecified order.
Raises
------
TypeError
The function being listened to is not a coroutine.
"""
def decorator(func: CFT) -> CFT:
self.add_listener(func, name)
return func
return decorator
# cogs
def add_cog(self, cog: Cog, *, override: bool = False) -> None:
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
.. versionchanged:: 2.0
:exc:`.ClientException` is raised when a cog with the same name
is already loaded.
Parameters
----------
cog: :class:`.Cog`
The cog to register to the bot.
override: :class:`bool`
If a previously loaded cog with the same name should be ejected
instead of raising an error.
.. versionadded:: 2.0
Raises
------
TypeError
The cog does not inherit from :class:`.Cog`.
CommandError
An error happened during loading.
ClientException
A cog with the same name is already loaded.
"""
if not isinstance(cog, Cog):
raise TypeError("cogs must derive from Cog")
cog_name = cog.__cog_name__
existing = self.__cogs.get(cog_name)
if existing is not None:
if not override:
raise disnake.ClientException(f"Cog named {cog_name!r} already loaded")
self.remove_cog(cog_name)
# NOTE: Should be covariant
cog = cog._inject(self) # type: ignore
self.__cogs[cog_name] = cog
def get_cog(self, name: str) -> Optional[Cog]:
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
----------
name: :class:`str`
The name of the cog you are requesting.
This is equivalent to the name passed via keyword
argument in class creation or the class name if unspecified.
Returns
-------
Optional[:class:`Cog`]
The cog that was requested. If not found, returns ``None``.
"""
return self.__cogs.get(name)
def remove_cog(self, name: str) -> Optional[Cog]:
"""Removes a cog from the bot and returns it.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then this method has no effect.
Parameters
----------
name: :class:`str`
The name of the cog to remove.
Returns
-------
Optional[:class:`.Cog`]
The cog that was removed. Returns ``None`` if not found.
"""
cog = self.__cogs.pop(name, None)
if cog is None:
return
help_command = getattr(self, "_help_command", None)
if help_command and help_command.cog is cog:
help_command.cog = None
# NOTE: Should be covariant
cog._eject(self) # type: ignore
return cog
@property
def cogs(self) -> Mapping[str, Cog]:
"""Mapping[:class:`str`, :class:`Cog`]: A read-only mapping of cog name to cog."""
return types.MappingProxyType(self.__cogs)
# extensions
def _remove_module_references(self, name: str) -> None:
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.__cogs.copy().items():
if _is_submodule(name, cog.__module__):
self.remove_cog(cogname)
# remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = [
index
for index, event in enumerate(event_list)
if event.__module__ is not None and _is_submodule(name, event.__module__)
]
for index in reversed(remove):
del event_list[index]
def _call_module_finalizers(self, lib: types.ModuleType, key: str) -> None:
try:
func = getattr(lib, "teardown")
except AttributeError:
pass
else:
try:
func(self)
except Exception:
pass
finally:
self.__extensions.pop(key, None)
sys.modules.pop(key, None)
name = lib.__name__
for module in list(sys.modules.keys()):
if _is_submodule(name, module):
del sys.modules[module]
def _load_from_module_spec(self, spec: importlib.machinery.ModuleSpec, key: str) -> None:
# precondition: key not in self.__extensions
lib = importlib.util.module_from_spec(spec)
sys.modules[key] = lib
try:
spec.loader.exec_module(lib) # type: ignore
except Exception as e:
del sys.modules[key]
raise errors.ExtensionFailed(key, e) from e
try:
setup = getattr(lib, "setup")
except AttributeError:
del sys.modules[key]
raise errors.NoEntryPointError(key)
try:
setup(self)
except Exception as e:
del sys.modules[key]
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, key)
raise errors.ExtensionFailed(key, e) from e
else:
self.__extensions[key] = lib
def _resolve_name(self, name: str, package: Optional[str]) -> str:
try:
return importlib.util.resolve_name(name, package)
except ImportError:
raise errors.ExtensionNotFound(name)
def load_extension(self, name: str, *, package: Optional[str] = None) -> None:
"""Loads an extension.
An extension is a python module that contains commands, cogs, or
listeners.
An extension must have a global function, ``setup`` defined as
the entry point on what to do when the extension is loaded. This entry
point must have a single argument, the ``bot``.
Parameters
----------
name: :class:`str`
The extension name to load. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when loading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
------
ExtensionNotFound
The extension could not be imported.
This is also raised if the name of the extension could not
be resolved using the provided ``package`` parameter.
ExtensionAlreadyLoaded
The extension is already loaded.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension or its setup function had an execution error.
"""
name = self._resolve_name(name, package)
if name in self.__extensions:
raise errors.ExtensionAlreadyLoaded(name)
spec = importlib.util.find_spec(name)
if spec is None:
raise errors.ExtensionNotFound(name)
self._load_from_module_spec(spec, name)
def unload_extension(self, name: str, *, package: Optional[str] = None) -> None:
"""Unloads an extension.
When the extension is unloaded, all commands, listeners, and cogs are
removed from the bot and the module is un-imported.
The extension can provide an optional global function, ``teardown``,
to do miscellaneous clean-up if necessary. This function takes a single
parameter, the ``bot``, similar to ``setup`` from
:meth:`~.Bot.load_extension`.
Parameters
----------
name: :class:`str`
The extension name to unload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when unloading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
------
ExtensionNotFound
The name of the extension could not
be resolved using the provided ``package`` parameter.
ExtensionNotLoaded
The extension was not loaded.
"""
name = self._resolve_name(name, package)
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name)
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
def reload_extension(self, name: str, *, package: Optional[str] = None) -> None:
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed. This is
equivalent to a :meth:`unload_extension` followed by a :meth:`load_extension`
except done in an atomic way. That is, if an operation fails mid-reload then
the bot will roll-back to the prior working state.
Parameters
----------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when reloading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
This is also raised if the name of the extension could not
be resolved using the provided ``package`` parameter.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
name = self._resolve_name(name, package)
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name)
# get the previous module states from sys modules
modules = {
name: module
for name, module in sys.modules.items()
if _is_submodule(lib.__name__, name)
}
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name)
except Exception:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
lib.setup(self)
self.__extensions[name] = lib
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise
def load_extensions(self, path: str) -> None:
"""Loads all extensions in a directory.
.. versionadded:: 2.4
Parameters
----------
path: :class:`str`
The path to search for extensions
"""
for extension in disnake.utils.search_directory(path):
self.load_extension(extension)
@property
def extensions(self) -> Mapping[str, types.ModuleType]:
"""Mapping[:class:`str`, :class:`py:types.ModuleType`]: A read-only mapping of extension name to extension."""
return types.MappingProxyType(self.__extensions)
async def _watchdog(self):
"""|coro|
Starts the bot watchdog which will watch currently loaded extensions
and reload them when they're modified.
"""
if isinstance(self, disnake.Client):
await self.wait_until_ready()
reload_log = logging.getLogger(__name__)
# ensure the message actually shows up
if logging.root.level > logging.INFO:
logging.basicConfig()
reload_log.setLevel(logging.INFO)
if isinstance(self, disnake.Client):
is_closed = self.is_closed
else:
is_closed = lambda: False
reload_log.info(f"WATCHDOG: Watching extensions")
last = time.time()
while not is_closed():
t = time.time()
extensions = set()
for name, module in self.extensions.items():
file = module.__file__
if file and os.stat(file).st_mtime > last:
extensions.add(name)
for name in extensions:
try:
self.reload_extension(name)
except errors.ExtensionError as e:
reload_log.exception(e)
else:
reload_log.info(f"WATCHDOG: Reloaded '{name}'")
await asyncio.sleep(1)
last = t
| 33.145929 | 118 | 0.59315 |
c6c6d41f8fdf9585149e5144382b29e6fe94d7a9 | 399 | py | Python | code/List/pluck.py | jumploop/30-seconds-of-python | bfcc5a35d9bd0bba67e81de5715dba21e1ba43be | [
"CC0-1.0"
] | null | null | null | code/List/pluck.py | jumploop/30-seconds-of-python | bfcc5a35d9bd0bba67e81de5715dba21e1ba43be | [
"CC0-1.0"
] | null | null | null | code/List/pluck.py | jumploop/30-seconds-of-python | bfcc5a35d9bd0bba67e81de5715dba21e1ba43be | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
功能实现:将字典列表转换为与指定键对应的值列表。
解读:
使用列表推导式和dict.get()来获取lst中每个字典的key值。
"""
def pluck(lst, key):
return [x.get(key) for x in lst]
# Examples
simpsons = [
{'name': 'lisa', 'age': 8},
{'name': 'homer', 'age': 36},
{'name': 'marge', 'age': 34},
{'name': 'bart', 'age': 10}
]
print(pluck(simpsons, 'age'))
# output:
# [8, 36, 34, 10]
| 15.346154 | 36 | 0.548872 |
174d64b85f0c40907eb1a7f1e27a89a8af82f295 | 5,334 | py | Python | he_j_inference/submit_single_model.py | ynswon/MURA | 8a54d17302ca7b4a66a7a04f8d202fe1d61b4641 | [
"MIT"
] | 1 | 2019-02-21T15:28:56.000Z | 2019-02-21T15:28:56.000Z | he_j_inference/submit_single_model.py | ynswon/MURA | 8a54d17302ca7b4a66a7a04f8d202fe1d61b4641 | [
"MIT"
] | null | null | null | he_j_inference/submit_single_model.py | ynswon/MURA | 8a54d17302ca7b4a66a7a04f8d202fe1d61b4641 | [
"MIT"
] | 3 | 2019-01-28T09:19:15.000Z | 2020-06-09T07:06:34.000Z | import os
import sys
from keras import backend as K
import tensorflow as tf
from keras_model import ModelFactory
#from configparser import ConfigParser
import numpy as np
import pandas as pd
from PIL import Image
from random import shuffle
#from skimage.transform import resize
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
sess = tf.Session()
K.set_session(sess)
K.set_learning_phase(0) # all new operations will be in test mode from now on
SIZE = 448
batch_size=16
class_names = [u'Normal', u'Abnormal']
base_model_name = u'DenseNet169'
#base_model_name = u'InceptionV3'
#base_model_name = u'Xception'
#base_model_name=u"NASNetMobile"
use_base_model_weights = False
image_dimension = SIZE
imagenet_mean = np.array([0.485, 0.456, 0.406])
imagenet_std = np.array([0.229, 0.224, 0.225])
enable_batch=False
write_prob=True
eval_csv = sys.argv[1]
df = pd.read_csv(eval_csv, names=['img','label' ], header=None)
eval_imgs = df.img.values.tolist()
print (eval_imgs[:10])
if enable_batch:shuffle(eval_imgs)
right=0
patients={}
img_prob={}
#model_weights_file = 'experiments/inceptionv3_499/best_weights.h5'
#model_weights_file = 'experiments/densenet121_448/best_weights.h5'
#model_weights_file = 'densenet169_448.h5'
#model_weights_file = 'experiments/densenet121_448_wildcat_with_224_pretrain/best_weights.h5'
#model_weights_file = 'experiments/extra_data/inceptionv3_499/best_weights.h5'
#model_weights_file = 'experiments/30/best_weights.h5448_40499'
#model_weights_file = 'experiments/31/best_weights.h5_448_4172'
model_weights_file = 'dense169_448v2.h5'
model_factory = ModelFactory()
model = model_factory.get_model(
class_names,
model_name=base_model_name,
use_base_weights=use_base_model_weights,
weights_path=model_weights_file,
input_shape=(image_dimension, image_dimension, 3),
model_id=7)
def load_image(image_file):
image = Image.open(image_file)
image=image.resize((SIZE,SIZE),Image.ANTIALIAS)
image_array = np.asarray(image.convert("RGB"))
image_array = image_array / 255.
#image_array = resize(image_array, (SIZE,SIZE))
return image_array
def transform_batch_images(batch_x):
imagenet_mean = np.array([0.485, 0.456, 0.406])
imagenet_std = np.array([0.229, 0.224, 0.225])
batch_x = (batch_x - imagenet_mean) / imagenet_std
return batch_x
#batch process
if enable_batch:
for i in range(len(eval_imgs)/batch_size):
batch_x_path = eval_imgs[i * batch_size:(i + 1) * batch_size]
batch_x = np.asarray([load_image(x_path) for x_path in batch_x_path])
batch_x = transform_batch_images(batch_x)
result = model.predict(batch_x)
for j in range(batch_size):
img_file=eval_imgs[i*batch_size+j]
img_prob[img_file]=result[j][1]
label = 1 if 'positive' in img_file else 0
#print img_file,label,result[j][1]
right+=int((int(result[j][1]>0.5)==label))
patient=img_file[:-10]
if patient not in patients:
patients[patient]=[]
patients[patient].append(result[j][1])
else:
patients[patient].append(result[j][1])
rem=len(eval_imgs)-len(eval_imgs)/batch_size*batch_size
if rem>0:
batch_x_path = eval_imgs[(i + 1) * batch_size:]
batch_x = np.asarray([load_image(x_path) for x_path in batch_x_path])
batch_x = transform_batch_images(batch_x)
result = model.predict(batch_x)
for j in range(rem):
img_file=eval_imgs[len(eval_imgs)/batch_size*batch_size+j]
img_prob[img_file]=result[j][1]
label = 1 if 'positive' in img_file else 0
#print img_file,label,result[j][1]
right+=int((int(result[j][1]>0.5)==label))
patient=img_file[:-10]
if patient not in patients:
patients[patient]=[]
patients[patient].append(result[j][1])
else:
patients[patient].append(result[j][1])
else:
for i in range(len(eval_imgs)):
img_file=eval_imgs[i]
#MURA-v1.1/valid/XR_WRIST/patient11185/study1_positive/image1.png
# print(img_file)
patient=img_file[:-10]
image=Image.open(img_file)
image=image.resize((SIZE,SIZE),Image.ANTIALIAS)
image_array = np.asarray(image.convert("RGB"))
image_array = image_array / 255.
#image_array = resize(image_array, (SIZE,SIZE))
image_array = (image_array- imagenet_mean) / imagenet_std
x_data = np.expand_dims(np.asarray(image_array, dtype='float32'), 0)
result = model.predict(x_data)
img_prob[img_file]=result[0][1]
label = 1 if 'positive' in img_file else 0
#print img_file,label,result[0][1]
right+=int((int(result[0][1]>0.5)==label))
#output prob for [normal,abnormal],in csv file,0-normal,1-abnormal
if patient not in patients:
patients[patient]=[]
patients[patient].append(result[0][1])
else:
patients[patient].append(result[0][1])
'''
if write_prob:
f1=open('pred_'+base_model_name+'.csv','w')
for fn in img_prob:
f1.write(fn+','+str(img_prob[fn])+'\n')
f1.close()
'''
print ('acc:{}'.format(float(right)/len(eval_imgs)))
f=open(sys.argv[2],'w')
for patient in patients:
img_num=len(patients[patient])
average_score=sum(patients[patient])/img_num
label=int(average_score>0.5)
f.write(patient+','+str(label)+'\n')
f.close()
print ("done!")
| 34.412903 | 93 | 0.698163 |
2a8cebd1674713dd2f4089ed395c685b0d6636ed | 1,644 | py | Python | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/DisableThingRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/DisableThingRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/DisableThingRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DisableThingRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'DisableThing','iot')
def get_IotId(self):
return self.get_query_params().get('IotId')
def set_IotId(self,IotId):
self.add_query_param('IotId',IotId)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_DeviceName(self):
return self.get_query_params().get('DeviceName')
def set_DeviceName(self,DeviceName):
self.add_query_param('DeviceName',DeviceName)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey) | 34.25 | 71 | 0.761557 |
5227207b2c57b0972b7af903568685d83f38dd7a | 10,410 | py | Python | pyscf/df/df.py | crisely09/pyscf | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | [
"Apache-2.0"
] | 1 | 2020-04-07T21:12:08.000Z | 2020-04-07T21:12:08.000Z | pyscf/df/df.py | crisely09/pyscf | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | [
"Apache-2.0"
] | 2 | 2019-09-16T17:58:31.000Z | 2019-09-22T17:26:01.000Z | pyscf/df/df.py | crisely09/pyscf | cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
J-metric density fitting
'''
import time
import tempfile
import numpy
import h5py
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.df import incore
from pyscf.df import outcore
from pyscf.df import r_incore
from pyscf.df import addons
from pyscf.df import df_jk
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import _conc_mos, iden_coeffs
from pyscf import __config__
class DF(lib.StreamObject):
r'''
Object to hold 3-index tensor
Attributes:
auxbasis : str or dict
Same input format as :attr:`Mole.basis`
auxmol : Mole object
Read only Mole object to hold the auxiliary basis. auxmol is
generated automatically in the initialization step based on the
given auxbasis. It is used in the rest part of the code to
determine the problem size, the integral batches etc. This object
should NOT be modified.
_cderi_to_save : str
If _cderi_to_save is specified, the DF integral tensor will be
saved in this file.
_cderi : str or numpy array
If _cderi is specified, the DF integral tensor will be read from
this HDF5 file (or numpy array). When the DF integral tensor is
provided from the HDF5 file, it has to be stored under the dataset
'j3c'.
The DF integral tensor :math:`V_{x,ij}` should be a 2D array in C
(row-major) convention, where x corresponds to index of auxiliary
basis, and the combined index ij is the orbital pair index. The
hermitian symmetry is assumed for the combined ij index, ie
the elements of :math:`V_{x,i,j}` with :math:`i\geq j` are existed
in the DF integral tensor. Thus the shape of DF integral tensor
is (M,N*(N+1)/2), where M is the number of auxbasis functions and
N is the number of basis functions of the orbital basis.
blockdim : int
When reading DF integrals from disk the chunk size to load. It is
used to improve the IO performance.
'''
def __init__(self, mol, auxbasis=None):
self.mol = mol
self.stdout = mol.stdout
self.verbose = mol.verbose
self.max_memory = mol.max_memory
self._auxbasis = auxbasis
##################################################
# Following are not input options
self.auxmol = None
# If _cderi_to_save is specified, the 3C-integral tensor will be saved in this file.
self._cderi_to_save = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
# If _cderi is specified, the 3C-integral tensor will be read from this file
self._cderi = None
self._call_count = getattr(__config__, 'df_df_DF_call_count', None)
self.blockdim = getattr(__config__, 'df_df_DF_blockdim', 240)
self._keys = set(self.__dict__.keys())
@property
def auxbasis(self):
return self._auxbasis
@auxbasis.setter
def auxbasis(self, x):
if self._auxbasis != x:
self._auxbasis = x
self.auxmol = None
self._cderi = None
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('******** %s ********', self.__class__)
if self.auxmol is None:
log.info('auxbasis = %s', self.auxbasis)
else:
log.info('auxbasis = auxmol.basis = %s', self.auxmol.basis)
log.info('max_memory = %s', self.max_memory)
if isinstance(self._cderi, str):
log.info('_cderi = %s where DF integrals are loaded (readonly).',
self._cderi)
if isinstance(self._cderi_to_save, str):
log.info('_cderi_to_save = %s', self._cderi_to_save)
else:
log.info('_cderi_to_save = %s', self._cderi_to_save.name)
return self
def build(self):
t0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
self.check_sanity()
self.dump_flags()
mol = self.mol
auxmol = self.auxmol = addons.make_auxmol(self.mol, self.auxbasis)
nao = mol.nao_nr()
naux = auxmol.nao_nr()
nao_pair = nao*(nao+1)//2
max_memory = (self.max_memory - lib.current_memory()[0]) * .8
int3c = mol._add_suffix('int3c2e')
int2c = mol._add_suffix('int2c2e')
if (nao_pair*naux*3*8/1e6 < max_memory and
not isinstance(self._cderi_to_save, str)):
self._cderi = incore.cholesky_eri(mol, int3c=int3c, int2c=int2c,
auxmol=auxmol, verbose=log)
else:
if isinstance(self._cderi_to_save, str):
cderi = self._cderi_to_save
else:
cderi = self._cderi_to_save.name
if isinstance(self._cderi, str):
log.warn('Value of _cderi is ignored. DF integrals will be '
'saved in file %s .', cderi)
outcore.cholesky_eri(mol, cderi, dataname='j3c',
int3c=int3c, int2c=int2c, auxmol=auxmol,
max_memory=max_memory, verbose=log)
if nao_pair*naux*8/1e6 < max_memory:
with addons.load(cderi, 'j3c') as feri:
cderi = numpy.asarray(feri)
self._cderi = cderi
log.timer_debug1('Generate density fitting integrals', *t0)
return self
def kernel(self, *args, **kwargs):
return self.build(*args, **kwargs)
def loop(self, blksize=None):
if self._cderi is None:
self.build()
if blksize is None:
blksize = self.blockdim
with addons.load(self._cderi, 'j3c') as feri:
naoaux = feri.shape[0]
for b0, b1 in self.prange(0, naoaux, blksize):
eri1 = numpy.asarray(feri[b0:b1], order='C')
yield eri1
def prange(self, start, end, step):
if isinstance(self._call_count, int):
self._call_count += 1
if self._call_count % 2 == 1:
for i in reversed(range(start, end, step)):
yield i, min(i+step, end)
else:
for i in range(start, end, step):
yield i, min(i+step, end)
else:
for i in range(start, end, step):
yield i, min(i+step, end)
def get_naoaux(self):
# determine naoaux with self._cderi, because DF object may be used as CD
# object when self._cderi is provided.
if self._cderi is None:
self.build()
with addons.load(self._cderi, 'j3c') as feri:
return feri.shape[0]
def get_jk(self, dm, hermi=1, vhfopt=None, with_j=True, with_k=True):
return df_jk.get_jk(self, dm, hermi, vhfopt, with_j, with_k)
def get_eri(self):
nao = self.mol.nao_nr()
nao_pair = nao * (nao+1) // 2
ao_eri = numpy.zeros((nao_pair,nao_pair))
for eri1 in self.loop():
lib.dot(eri1.T, eri1, 1, ao_eri, 1)
return ao2mo.restore(8, ao_eri, nao)
get_ao_eri = get_eri
def ao2mo(self, mo_coeffs,
compact=getattr(__config__, 'df_df_DF_ao2mo_compact', True)):
if isinstance(mo_coeffs, numpy.ndarray) and mo_coeffs.ndim == 2:
mo_coeffs = (mo_coeffs,) * 4
ijmosym, nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1], compact)
klmosym, nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3], compact)
mo_eri = numpy.zeros((nij_pair,nkl_pair))
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[3]))
Lij = Lkl = None
for eri1 in self.loop():
Lij = _ao2mo.nr_e2(eri1, moij, ijslice, aosym='s2', mosym=ijmosym, out=Lij)
if sym:
Lkl = Lij
else:
Lkl = _ao2mo.nr_e2(eri1, mokl, klslice, aosym='s2', mosym=klmosym, out=Lkl)
lib.dot(Lij.T, Lkl, 1, mo_eri, 1)
return mo_eri
get_mo_eri = ao2mo
class DF4C(DF):
'''Relativistic 4-component'''
def build(self):
log = logger.Logger(self.stdout, self.verbose)
mol = self.mol
auxmol = self.auxmol = addons.make_auxmol(self.mol, self.auxbasis)
n2c = mol.nao_2c()
naux = auxmol.nao_nr()
nao_pair = n2c*(n2c+1)//2
max_memory = (self.max_memory - lib.current_memory()[0]) * .8
if nao_pair*naux*3*16/1e6*2 < max_memory:
self._cderi =(r_incore.cholesky_eri(mol, auxmol=auxmol, aosym='s2',
int3c='int3c2e_spinor', verbose=log),
r_incore.cholesky_eri(mol, auxmol=auxmol, aosym='s2',
int3c='int3c2e_spsp1_spinor', verbose=log))
else:
raise NotImplementedError
return self
def loop(self):
if self._cderi is None:
self.build()
with addons.load(self._cderi[0], 'j3c') as ferill:
naoaux = ferill.shape[0]
with addons.load(self._cderi[1], 'j3c') as feriss: # python2.6 not support multiple with
for b0, b1 in self.prange(0, naoaux, self.blockdim):
erill = numpy.asarray(ferill[b0:b1], order='C')
eriss = numpy.asarray(feriss[b0:b1], order='C')
yield erill, eriss
def get_jk(self, dm, hermi=1, vhfopt=None, with_j=True, with_k=True):
return df_jk.r_get_jk(self, dm, hermi)
def ao2mo(self, mo_coeffs):
raise NotImplementedError
| 39.732824 | 100 | 0.595005 |
bb2b8e008e68d586f1dcca6891f1b59f0973046f | 8,057 | py | Python | GFMM/basegfmmclassifier.py | thanhtung09t2/Hyperbox-classifier | 4b4cf9dfae68902bd9a742db421cacce8daf37a4 | [
"BSD-3-Clause"
] | null | null | null | GFMM/basegfmmclassifier.py | thanhtung09t2/Hyperbox-classifier | 4b4cf9dfae68902bd9a742db421cacce8daf37a4 | [
"BSD-3-Clause"
] | 1 | 2018-09-05T11:39:56.000Z | 2018-09-09T03:28:14.000Z | GFMM/basegfmmclassifier.py | thanhtung09t2/Hyperbox-classifier | 4b4cf9dfae68902bd9a742db421cacce8daf37a4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 10:22:19 2018
@author: Thanh Tung Khuat
Base GFMM classifier
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from GFMM.classification import predict
from functionhelper.matrixhelper import delete_const_dims, pca_transform
from functionhelper.preprocessinghelper import normalize
class BaseGFMMClassifier(object):
def __init__(self, gamma = 1, teta = 1, isDraw = False, oper = 'min', isNorm = True, norm_range = [0, 1]):
self.gamma = gamma
self.teta = teta
self.isDraw = isDraw
self.oper = oper
self.isNorm = isNorm
self.V = np.array([])
self.W = np.array([])
self.classId = np.array([])
# parameters for data normalization
self.loLim = norm_range[0]
self.hiLim = norm_range[1]
self.mins = []
self.maxs = []
self.delayConstant = 0.001 # delay time period to display hyperboxes on the canvas
def dataPreprocessing(self, X_l, X_u):
"""
Preprocess data: delete constant dimensions, Normalize input samples if needed
INPUT:
X_l Input data lower bounds (rows = objects, columns = features)
X_u Input data upper bounds (rows = objects, columns = features)
OUTPUT
X_l, X_u were preprocessed
"""
# delete constant dimensions
#X_l, X_u = delete_const_dims(X_l, X_u)
# Normalize input samples if needed
if X_l.min() < self.loLim or X_u.min() < self.loLim or X_u.max() > self.hiLim or X_l.max() > self.hiLim:
self.mins = X_l.min(axis = 0) # get min value of each feature
self.maxs = X_u.max(axis = 0) # get max value of each feature
X_l = normalize(X_l, [self.loLim, self.hiLim])
X_u = normalize(X_u, [self.loLim, self.hiLim])
else:
self.isNorm = False
self.mins = []
self.maxs = []
return (X_l, X_u)
def pcatransform(self):
"""
Perform PCA transform of V and W if the dimensions are larger than 3
OUTPUT:
V and W in the new space
"""
yX, xX = self.V.shape
if (xX > 3):
Vt = pca_transform(self.V, 3)
Wt = pca_transform(self.W, 3)
mins = Vt.min(axis = 0)
maxs = Wt.max(axis = 0)
Vt = self.loLim + (self.hiLim - self.loLim) * (Vt - np.ones((yX, 1)) * mins) / (np.ones((yX, 1)) * (maxs - mins))
Wt = self.loLim + (self.hiLim - self.loLim) * (Wt - np.ones((yX, 1)) * mins) / (np.ones((yX, 1)) * (maxs - mins))
else:
Vt = self.V
Wt = self.W
return (Vt, Wt)
def initializeCanvasGraph(self, figureName, numDims):
"""
Initialize canvas to draw hyperbox
INPUT
figureName Title name of windows containing hyperboxes
numDims The number of dimensions of hyperboxes
OUTPUT
drawing_canvas Plotting object of python
"""
fig = plt.figure(figureName)
plt.ion()
if numDims == 2:
drawing_canvas = fig.add_subplot(1, 1, 1)
drawing_canvas.axis([0, 1, 0, 1])
else:
drawing_canvas = Axes3D(fig)
drawing_canvas.set_xlim3d(0, 1)
drawing_canvas.set_ylim3d(0, 1)
drawing_canvas.set_zlim3d(0, 1)
return drawing_canvas
def delay(self):
"""
Delay a time period to display hyperboxes
"""
plt.pause(self.delayConstant)
def splitSimilarityMaxtrix(self, A, asimil_type = 'max', isSort = True):
"""
Split the similarity matrix A into the maxtrix with three columns:
+ First column is row indices of A
+ Second column is column indices of A
+ Third column is the values corresponding to the row and column
if isSort = True, the third column is sorted in the descending order
INPUT
A Degrees of membership of input patterns (each row is the output from memberG function)
asimil_type Use 'min' or 'max' (default) memberhsip in case of assymetric similarity measure (simil='mid')
isSort Sorting flag
OUTPUT
The output as mentioned above
"""
# get min/max memberships from triu and tril of memberhsip matrix which might not be symmetric (simil=='mid')
if asimil_type == 'min':
transformedA = np.minimum(np.flipud(np.rot90(np.tril(A, -1))), np.triu(A, 1)) # rotate tril to align it with triu for min (max) operation
else:
transformedA = np.maximum(np.flipud(np.rot90(np.tril(A, -1))), np.triu(A, 1))
ind_rows, ind_columns = np.nonzero(transformedA)
values = transformedA[ind_rows, ind_columns]
if isSort == True:
ind_SortedTransformedA = np.argsort(values)[::-1]
sortedTransformedA = values[ind_SortedTransformedA]
result = np.concatenate((ind_rows[ind_SortedTransformedA][:, np.newaxis], ind_columns[ind_SortedTransformedA][:, np.newaxis], sortedTransformedA[:, np.newaxis]), axis=1)
else:
result = np.concatenate((ind_rows[:, np.newaxis], ind_columns[:, np.newaxis], values[:, np.newaxis]), axis=1)
return result
def predict(self, Xl_Test, Xu_Test, patClassIdTest):
"""
Perform classification
result = predict(Xl_Test, Xu_Test, patClassIdTest)
INPUT:
Xl_Test Test data lower bounds (rows = objects, columns = features)
Xu_Test Test data upper bounds (rows = objects, columns = features)
patClassIdTest Test data class labels (crisp)
OUTPUT:
result A object with Bunch datatype containing all results as follows:
+ summis Number of misclassified objects
+ misclass Binary error map
+ sumamb Number of objects with maximum membership in more than one class
+ out Soft class memberships
+ mem Hyperbox memberships
"""
#Xl_Test, Xu_Test = delete_const_dims(Xl_Test, Xu_Test)
# Normalize testing dataset if training datasets were normalized
if len(self.mins) > 0:
noSamples = Xl_Test.shape[0]
Xl_Test = self.loLim + (self.hiLim - self.loLim) * (Xl_Test - np.ones((noSamples, 1)) * self.mins) / (np.ones((noSamples, 1)) * (self.maxs - self.mins))
Xu_Test = self.loLim + (self.hiLim - self.loLim) * (Xu_Test - np.ones((noSamples, 1)) * self.mins) / (np.ones((noSamples, 1)) * (self.maxs - self.mins))
if Xl_Test.min() < self.loLim or Xu_Test.min() < self.loLim or Xl_Test.max() > self.hiLim or Xu_Test.max() > self.hiLim:
print('Test sample falls outside', self.loLim, '-', self.hiLim, 'interval')
print('Number of original samples = ', noSamples)
# only keep samples within the interval loLim-hiLim
indXl_good = np.where((Xl_Test >= self.loLim).all(axis = 1) & (Xl_Test <= self.hiLim).all(axis = 1))[0]
indXu_good = np.where((Xu_Test >= self.loLim).all(axis = 1) & (Xu_Test <= self.hiLim).all(axis = 1))[0]
indKeep = np.intersect1d(indXl_good, indXu_good)
Xl_Test = Xl_Test[indKeep, :]
Xu_Test = Xu_Test[indKeep, :]
print('Number of kept samples =', Xl_Test.shape[0])
#return
# do classification
result = None
if Xl_Test.shape[0] > 0:
result = predict(self.V, self.W, self.classId, Xl_Test, Xu_Test, patClassIdTest, self.gamma, self.oper)
return result
| 39.495098 | 181 | 0.573166 |
9a92c0ee913478fa8ab8fb4f39516b350aef5bf5 | 308 | py | Python | bbuser/debug.py | ekivemark/BlueButtonUser | 096cf439cd0c4ccb3d16b0efebf1c34fd3fb8939 | [
"Apache-2.0"
] | 1 | 2020-10-29T07:29:49.000Z | 2020-10-29T07:29:49.000Z | bbuser/debug.py | ekivemark/BlueButtonUser | 096cf439cd0c4ccb3d16b0efebf1c34fd3fb8939 | [
"Apache-2.0"
] | 7 | 2020-02-11T23:03:55.000Z | 2021-12-13T19:42:01.000Z | bbuser/debug.py | ekivemark/BlueButtonUser | 096cf439cd0c4ccb3d16b0efebf1c34fd3fb8939 | [
"Apache-2.0"
] | 2 | 2018-10-06T21:45:51.000Z | 2020-10-10T16:10:36.000Z | """
bbuser
FILE: debug
Created: 7/12/15 5:11 PM
A callback routine for overriding Django.debug.toolbar
"""
__author__ = 'Mark Scrimshire:@ekivemark'
from django.conf import settings
def Debug_Toolbar_Display(request):
# Force to True
if not settings.DEBUG:
return False
return True
| 15.4 | 54 | 0.717532 |
554640eb19d4324a72a52b35064c6728ce2ff8c9 | 10,037 | py | Python | scalyr_agent/tests/configuration_docker_test.py | slomo/scalyr-agent-2 | 8726d6c2483c9ceb19a37697f266261c31e00b0b | [
"Apache-2.0"
] | null | null | null | scalyr_agent/tests/configuration_docker_test.py | slomo/scalyr-agent-2 | 8726d6c2483c9ceb19a37697f266261c31e00b0b | [
"Apache-2.0"
] | null | null | null | scalyr_agent/tests/configuration_docker_test.py | slomo/scalyr-agent-2 | 8726d6c2483c9ceb19a37697f266261c31e00b0b | [
"Apache-2.0"
] | null | null | null | import os
from mock import patch, Mock
from scalyr_agent import scalyr_monitor
from scalyr_agent.builtin_monitors.docker_monitor import DockerMonitor
from scalyr_agent.copying_manager import CopyingManager
from scalyr_agent.monitors_manager import MonitorsManager
from scalyr_agent.json_lib.objects import ArrayOfStrings
from scalyr_agent.test_util import FakeAgentLogger, FakePlatform
from scalyr_agent.tests.configuration_test import TestConfiguration
class TestConfigurationDocker(TestConfiguration):
"""This test subclasses from TestConfiguration for easier exclusion in python 2.5 and below"""
def _make_monitors_manager(self, config):
def fake_init(self):
# Initialize some requisite variables so that the k8s monitor loop can run
self._DockerMonitor__socket_file = None
self._DockerMonitor__container_checker = None
self._DockerMonitor__namespaces_to_ignore = []
self._DockerMonitor__include_controller_info = None
self._DockerMonitor__report_container_metrics = None
self._DockerMonitor__metric_fetcher = None
mock_logger = Mock()
@patch.object(DockerMonitor, "_initialize", new=fake_init)
def create_manager():
scalyr_monitor.log = mock_logger
return MonitorsManager(config, FakePlatform([]))
monitors_manager = create_manager()
return monitors_manager, mock_logger
@patch("scalyr_agent.builtin_monitors.docker_monitor.docker")
def test_environment_aware_module_params(self, mock_docker):
# Define test values here for all k8s and k8s_event monitor config params that are environment aware.
# Be sure to use non-default test values
TEST_INT = 123456789
TEST_STRING = "dummy string"
TEST_ARRAY_OF_STRINGS = ["s1", "s2", "s3"]
STANDARD_PREFIX = "_STANDARD_PREFIX_" # env var is SCALYR_<param_name>
# The following map contains config params to be tested
# config_param_name: (custom_env_name, test_value)
docker_testmap = {
"container_check_interval": (STANDARD_PREFIX, TEST_INT, int),
"docker_api_version": (STANDARD_PREFIX, TEST_STRING, str),
"docker_log_prefix": (STANDARD_PREFIX, TEST_STRING, str),
"log_mode": ("SCALYR_DOCKER_LOG_MODE", TEST_STRING, str),
"docker_raw_logs": (
STANDARD_PREFIX,
False,
bool,
), # test config file is set to True
"docker_percpu_metrics": (
STANDARD_PREFIX,
True,
bool,
), # test config file is set to False
"metrics_only": ("SCALYR_DOCKER_METRICS_ONLY", True, bool),
"container_globs": (STANDARD_PREFIX, TEST_ARRAY_OF_STRINGS, ArrayOfStrings),
"container_globs_exclude": (
STANDARD_PREFIX,
TEST_ARRAY_OF_STRINGS,
ArrayOfStrings,
),
"report_container_metrics": (STANDARD_PREFIX, False, bool),
"label_include_globs": (
STANDARD_PREFIX,
TEST_ARRAY_OF_STRINGS,
ArrayOfStrings,
),
"label_exclude_globs": (
STANDARD_PREFIX,
TEST_ARRAY_OF_STRINGS,
ArrayOfStrings,
),
"labels_as_attributes": (STANDARD_PREFIX, True, bool),
"label_prefix": (STANDARD_PREFIX, TEST_STRING, str),
"use_labels_for_log_config": (STANDARD_PREFIX, False, bool),
}
# Fake the environment varaibles
for key, value in docker_testmap.items():
custom_name = value[0]
env_name = (
("SCALYR_%s" % key).upper()
if custom_name == STANDARD_PREFIX
else custom_name.upper()
)
envar_value = str(value[1])
if value[2] == ArrayOfStrings:
# Array of strings should be entered into environment in the user-preferred format
# which is without square brackets and quotes around each element
envar_value = envar_value[1:-1] # strip square brackets
envar_value = envar_value.replace("'", "")
else:
envar_value = (
envar_value.lower()
) # lower() needed for proper bool encoding
os.environ[env_name] = envar_value
self._write_file_with_separator_conversion(
""" {
logs: [ { path:"/var/log/tomcat6/$DIR_VAR.log" }],
api_key: "abcd1234",
}
"""
)
self._write_config_fragment_file_with_separator_conversion(
"docker.json",
""" {
"monitors": [
{
module: "scalyr_agent.builtin_monitors.docker_monitor",
docker_raw_logs: true
}
]
}
""",
)
config = self._create_test_configuration_instance()
config.parse()
monitors_manager, mock_logger = self._make_monitors_manager(config)
docker_monitor = monitors_manager.monitors[0]
# All environment-aware params defined in the docker monitor must be gested
self.assertEquals(
set(docker_testmap.keys()),
set(docker_monitor._config._environment_aware_map.keys()),
)
# Verify module-level conflicts between env var and config file are logged at module-creation time
mock_logger.warn.assert_called_with(
"Conflicting values detected between scalyr_agent.builtin_monitors.docker_monitor config file "
"parameter `docker_raw_logs` and the environment variable `SCALYR_DOCKER_RAW_LOGS`. "
"Ignoring environment variable.",
limit_once_per_x_secs=300,
limit_key="config_conflict_scalyr_agent.builtin_monitors.docker_monitor_docker_raw_logs_SCALYR_DOCKER_RAW_LOGS",
)
CopyingManager(config, monitors_manager.monitors)
# Override Agent Logger to prevent writing to disk
for monitor in monitors_manager.monitors:
monitor._logger = FakeAgentLogger("fake_agent_logger")
# Verify environment variable values propagate into DockerMonitor monitor MonitorConfig
monitor_2_testmap = {
docker_monitor: docker_testmap,
}
for monitor, testmap in monitor_2_testmap.items():
for key, value in testmap.items():
test_val, convert_to = value[1:]
if key in ["docker_raw_logs"]:
# Keys were defined in config files so should not have changed
self.assertNotEquals(
test_val, monitor._config.get(key, convert_to=convert_to)
)
else:
# Keys were empty in config files so they take on environment values
materialized_value = monitor._config.get(key, convert_to=convert_to)
if hasattr(test_val, "__iter__"):
self.assertEquals(
[x1 for x1 in test_val], [x2 for x2 in materialized_value]
)
else:
self.assertEquals(test_val, materialized_value)
def test_label_include_globs_from_config(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
logs: [ { path:"/var/log/tomcat6/access.log" }],
monitors: [
{
module: "scalyr_agent.builtin_monitors.docker_monitor",
docker_raw_logs: true
label_include_globs: ["*glob1*", "*glob2*", "*glob3*"]
}
]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
test_manager, _ = self._make_monitors_manager(config)
docker_monitor = test_manager.monitors[0]
include_globs = docker_monitor._config.get("label_include_globs")
elems = ["*glob1*", "*glob2*", "*glob3*"]
self.assertNotEquals(elems, include_globs) # list != JsonArray
self.assertEquals(elems, [x for x in include_globs])
def test_label_include_globs_from_environment(self):
include_elems = ["*env_glob1*", "*env_glob2*"]
exclude_elems = [
"*env_glob1_exclude_1",
"*env_glob1_exclude_2",
"*env_glob1_exclude_3",
]
os.environ["SCALYR_LABEL_INCLUDE_GLOBS"] = "*env_glob1*, *env_glob2*"
os.environ[
"SCALYR_LABEL_EXCLUDE_GLOBS"
] = "*env_glob1_exclude_1, *env_glob1_exclude_2, *env_glob1_exclude_3"
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
logs: [ { path:"/var/log/tomcat6/access.log" }],
monitors: [
{
module: "scalyr_agent.builtin_monitors.docker_monitor",
docker_raw_logs: true
}
]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
test_manager, _ = self._make_monitors_manager(config)
docker_monitor = test_manager.monitors[0]
include_globs = docker_monitor._config.get("label_include_globs")
exclude_globs = docker_monitor._config.get("label_exclude_globs")
self.assertNotEquals(include_elems, include_globs) # list != ArrayOfStrings
self.assertNotEquals(exclude_elems, exclude_globs) # list != ArrayOfStrings
self.assertEquals(type(include_globs), ArrayOfStrings)
self.assertEquals(type(exclude_globs), ArrayOfStrings)
self.assertEquals(include_elems, list(include_globs))
self.assertEquals(exclude_elems, list(exclude_globs))
| 42.172269 | 124 | 0.609644 |
569baef7e63d7e7ec08b7efe5c5394ac9ac37801 | 3,704 | py | Python | abgthe/apps/profiles/models.py | jomauricio/abgthe | 70004ed1870e3ca0ae2989a2c2be5c67ca93b342 | [
"BSD-3-Clause"
] | null | null | null | abgthe/apps/profiles/models.py | jomauricio/abgthe | 70004ed1870e3ca0ae2989a2c2be5c67ca93b342 | [
"BSD-3-Clause"
] | null | null | null | abgthe/apps/profiles/models.py | jomauricio/abgthe | 70004ed1870e3ca0ae2989a2c2be5c67ca93b342 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
from model_utils import Choices
from localflavor.br.br_states import STATE_CHOICES
from django.core.urlresolvers import reverse
from abgthe.users.models import User
from django.dispatch import receiver
from allauth.socialaccount.models import SocialAccount
from allauth.account.signals import user_signed_up
from django.core.files import File
from avatar.models import Avatar
from urllib2 import urlopen
import requests
from django.core.files.temp import NamedTemporaryFile
class Profile(TimeStampedModel):
class Meta():
verbose_name = "Perfil"
verbose_name_plural ="Perfis"
GENDER = Choices(('masculino', _('Masculino')), ('feminino', _('Feminino')))
user = models.OneToOneField(settings.AUTH_USER_MODEL, verbose_name="Usuario", related_name="profile", editable=False)
first_name = models.CharField("Primeiro nome", max_length=30, blank=True)
last_name = models.CharField("Segundo nome", max_length=30, blank=True)
description = models.TextField("Descrição", blank=True)
birthday = models.DateField("Nascimento", max_length=10, blank=True, null=True)
gender = models.CharField("Sexo", choices=GENDER, max_length=20, blank=True)
cep = models.CharField("CEP", max_length=10, blank=True)
address = models.CharField("Endereço", max_length=200, blank=True)
cel_phone = models.CharField("Celular", max_length=15, blank=True)
home_phone = models.CharField("Fixo", max_length=15, blank=True)
state = models.CharField("Estado", choices=STATE_CHOICES, max_length=30, blank=True)
city = models.CharField("Cidade", max_length=100, blank=True)
username = models.CharField("username", unique=True, max_length=30, editable=False)
def __unicode__(self):
if self.first_name and self.last_name:
return u'%s %s' % (self.first_name, self.last_name)
else:
return self.username
def get_absolute_url(self):
return reverse('profiles:profile_detail', args=[self.username])
def save(self, *args, **kwargs):
u = self.user
u.first_name = self.first_name
u.last_name = self.last_name
u.save()
self.user = u
super(Profile, self).save(*args, **kwargs)
def download_avatar(self, url):
"""
"""
r = requests.get(url)
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(r.content)
img_temp.flush()
img_temp.seek(0)
return File(img_temp)
@receiver(user_signed_up)
def user_signed_up_(request, user, sociallogin=None, **kwargs):
'''
When a social account is created successfully and this signal is received,
django-allauth passes in the sociallogin param, giving access to metadata on the remote account, e.g.:
sociallogin.account.provider # e.g. 'twitter'
sociallogin.account.get_avatar_url()
sociallogin.account.get_profile_url()
sociallogin.account.extra_data['screen_name']
See the socialaccount_socialaccount table for more in the 'extra_data' field.
'''
if sociallogin:
# Extract first / last names from social nets and store on Profile record
if sociallogin.account.provider == 'facebook':
user.profile.first_name = sociallogin.account.extra_data['first_name']
user.profile.last_name = sociallogin.account.extra_data['last_name']
if sociallogin.account.provider == 'google':
user.profile.first_name = sociallogin.account.extra_data['given_name']
user.profile.last_name = sociallogin.account.extra_data['family_name']
user.profile.save()
mage_avatar = user.profile.download_avatar(sociallogin.account.get_avatar_url())
avatar = Avatar(user=user,primary=True, avatar=image_avatar)
avatar.save() | 37.795918 | 118 | 0.763499 |
e3c8a84c21c6218b21e76c14429164a517f7d756 | 241 | py | Python | setup.py | AbdelrahmanKhaled95/BScWSD | 7524f37f80dc847bff17125c677aa8f65afd9c97 | [
"Unlicense"
] | null | null | null | setup.py | AbdelrahmanKhaled95/BScWSD | 7524f37f80dc847bff17125c677aa8f65afd9c97 | [
"Unlicense"
] | null | null | null | setup.py | AbdelrahmanKhaled95/BScWSD | 7524f37f80dc847bff17125c677aa8f65afd9c97 | [
"Unlicense"
] | null | null | null | from distutils.core import setup
setup(
name='BScWSD',
version='0.1',
packages=['wsd',],
description='Python WSD',
long_description='A collection of different Python algorithms of Word Sense Disambiguation Systems(WSD) ',
) | 26.777778 | 110 | 0.713693 |
6b136d3e954c21651c0e846ce812a7cc0037a651 | 18,353 | py | Python | src/pypipegraph2/ppg1_compatibility.py | IMTMarburg/pypipegraph2 | 182f04481f0b0b6b1c05cfbe23549714af5cfbcc | [
"MIT"
] | null | null | null | src/pypipegraph2/ppg1_compatibility.py | IMTMarburg/pypipegraph2 | 182f04481f0b0b6b1c05cfbe23549714af5cfbcc | [
"MIT"
] | null | null | null | src/pypipegraph2/ppg1_compatibility.py | IMTMarburg/pypipegraph2 | 182f04481f0b0b6b1c05cfbe23549714af5cfbcc | [
"MIT"
] | null | null | null | from pathlib import Path
import types
import inspect
import os
import sys
import logging
import pypipegraph as ppg1
import pypipegraph.testing
import pypipegraph.testing.fixtures
import pypipegraph2 as ppg2
import pypipegraph2.testing
import wrapt
import importlib
from .util import log_info, log_error, log_warning, log_debug, log_job_trace
old_entries = {}
old_modules = {}
patched = False
exception_map = {
"RuntimeError": "JobsFailed",
"JobContractError": "JobContractError",
"PyPipeGraphError": "FatalGraphException",
"CycleError": "NotADag",
"JobDiedException": "JobDied",
"RuntimeException": "RunFailedInternally",
}
def replace_ppg1():
"""Turn all ppg1 references into actual ppg2
objects.
Best effort, but the commonly used API should be well supported.
Try to do this before anything imports ppg1.
"""
global patched
if patched:
return
for x in dir(ppg1):
old_entries[x] = getattr(ppg1, x)
delattr(ppg1, x)
for module_name, replacement in {
"pypipegraph.job": job,
"pypipegraph.testing": ppg2.testing,
"pypipegraph.testing.fixtures": ppg2.testing.fixtures,
}.items():
if not module_name in sys.modules:
importlib.import_module(module_name)
old_modules[module_name] = sys.modules[module_name]
sys.modules[module_name] = replacement
# ppg1.__name__ == "pypipegraph2"
# ppg1.__file__ == __file__
# ppg1.__path__ == __path__
# ppg1.__loader__ == __loader__
# ppg1.__spec__ == __spec__
# ppg1.__version__ == ppg2.__version__
ppg1.__doc__ == """ppg1->2 compatibility layer.
Supports the commonly used the old ppg1 API
with ppg2 objects. Aspires to be a drop-in replacement.
"""
for old, new in exception_map.items():
setattr(ppg1, old, getattr(ppg2.exceptions, new))
# invariants
ppg1.ParameterInvariant = ParameterInvariant
ppg1.FileInvariant = FileInvariant
ppg1.FileTimeInvariant = FileInvariant
ppg1.RobustFileChecksumInvariant = FileInvariant
ppg1.FileChecksumInvariant = FileInvariant
ppg1.FunctionInvariant = FunctionInvariant
ppg1.MultiFileInvariant = MultiFileInvariant
ppg1.MultiFileGeneratingJob = MultiFileGeneratingJob
ppg1.FileGeneratingJob = FileGeneratingJob
ppg1.CachedAttributeLoadingJob = CachedAttributeLoadingJob
ppg1.CachedDataLoadingJob = CachedDataLoadingJob
ppg1.TempFileGeneratingJob = TempFileGeneratingJob
ppg1.MultiTempFileGeneratingJob = MultiTempFileGeneratingJob
ppg1.PlotJob = PlotJob
ppg1.Job = ppg2.Job # don't wrap, we need the inheritance
ppg1.DataLoadingJob = wrap_job(ppg2.DataLoadingJob)
ppg1.AttributeLoadingJob = wrap_job(ppg2.AttributeLoadingJob)
ppg1.JobGeneratingJob = wrap_job(ppg2.JobGeneratingJob)
# unsupported
for k in (
"NotebookJob",
"DependencyInjectionJob",
"TempFilePlusGeneratingJob",
"MemMappedDataLoadingJob",
"FinalJob",
"CombinedPlotJob",
"NothingChanged", # very implementation detail...
):
setattr(ppg1, k, unsupported(k))
# misc
ppg1.resource_coordinators = ResourceCoordinators
ppg1.new_pipegraph = new_pipegraph
ppg1.run_pipegraph = run_pipegraph
ppg1.util = util
ppg1.graph = graph
ppg1.job = job
ppg1.JobList = ppg2.JobList
ppg1.ppg_exceptions = ppg_exceptions
ppg1.inside_ppg = ppg2.inside_ppg
ppg1.assert_uniqueness_of_object = ppg2.assert_uniqueness_of_object
ppg1.testing = ppg2.testing
ppg1.is_ppg2 = True
ppg1.testing = ppg2.testing
ppg1.testing.fixtures = ppg2.testing.fixtures
# todo: list unpatched...
new_entries = set(dir(ppg1))
# this was used to find unported code.
# for k in set(old_entries).difference(new_entries): # pragma: no cover
# if not k.startswith("__") and k != "all":
# warnings.warn(f"not yet ppg1-compatibility layer implemented: {k}")
patched = True
def unreplace_ppg1():
"""Turn ppg1 compatibility layer off, restoring ppg1
not that well tested, I suppose...
"""
global patched
if not patched:
return
for x in dir(ppg1):
delattr(ppg1, x)
for k, v in old_entries.items():
setattr(ppg1, k, v)
for k, v in old_modules.items():
sys.modules[k] = v
ppg1.testing = old_modules["pypipegraph.testing"]
ppg1.testing.fixtures = old_modules["pypipegraph.testing.fixtures"]
patched = False
def wrap_job(cls):
"""Adapt for ppg1 api idiosyncracies"""
return lambda *args, **kwargs: PPG1Adaptor(cls(*args, **kwargs))
class ResourceCoordinators:
def LocalSystem(max_cores_to_use=ppg2.ALL_CORES, profile=False, interactive=True):
return (max_cores_to_use, interactive)
class Util:
@property
def global_pipegraph(self):
from . import global_pipegraph
return global_pipegraph
@global_pipegraph.setter
def global_pipegraph(self, value):
from . import change_global_pipegraph
change_global_pipegraph(value)
@staticmethod
def checksum_file(filename):
"""was used by outside functions"""
import stat as stat_module
import hashlib
file_size = os.stat(filename)[stat_module.ST_SIZE]
if file_size > 200 * 1024 * 1024: # pragma: no cover
print("Taking md5 of large file", filename)
with open(filename, "rb") as op:
block_size = 1024 ** 2 * 10
block = op.read(block_size)
_hash = hashlib.md5()
while block:
_hash.update(block)
block = op.read(block_size)
res = _hash.hexdigest()
return res
def job_or_filename(
job_or_filename, invariant_class=None
): # we want to return the wrapped class
if invariant_class is None:
invariant_class = FileInvariant
return ppg2.util.job_or_filename(job_or_filename, invariant_class)
util = Util()
util.job_or_filename = job_or_filename
util.inside_ppg = ppg2.inside_ppg
util.assert_uniqueness_of_object = ppg2.assert_uniqueness_of_object
util.flatten_jobs = ppg2.util.flatten_jobs
util.freeze = ppg2.jobs.ParameterInvariant.freeze
class PPGExceptions:
pass
ppg_exceptions = PPGExceptions()
for old, new in exception_map.items():
setattr(ppg_exceptions, old, getattr(ppg2.exceptions, new))
# earlier on, we had a different pickling scheme,
# and that's what the files were called.
if os.path.exists(".pypipegraph_status_robust"): # old projects keep their filename
invariant_status_filename_default = ".pypipegraph_status_robust" # pragma: no cover
elif "/" in sys.argv[0]: # no script name but an executable?
invariant_status_filename_default = ".pypipegraph_status_robust"
else:
# script specific pipegraphs
invariant_status_filename_default = (
".ppg_status_%s" % sys.argv[0]
) # pragma: no cover
class Graph:
invariant_status_filename_default = invariant_status_filename_default
graph = Graph()
class Job:
_InvariantJob = ppg2.jobs._InvariantMixin
pass
job = Job()
job.function_to_str = ppg2.FunctionInvariant.function_to_str
class FakeRC:
@property
def cores_available(self):
return ppg2.global_pipegraph.cores
def new_pipegraph(
resource_coordinator=None,
quiet=False,
invariant_status_filename=None,
dump_graph=True,
interactive=True,
cache_folder="cache",
log_file=None,
log_level=logging.ERROR,
):
cores = ppg2.ALL_CORES
run_mode = ppg2.RunMode.CONSOLE
if resource_coordinator:
cores = resource_coordinator[0]
interactive = resource_coordinator[1] # rc overrides interactive setting
if interactive: # otherwise, we read the one passed into the function
run_mode = ppg2.RunMode.CONSOLE
else:
run_mode = ppg2.RunMode.NONINTERACTIVE
kwargs = {}
if invariant_status_filename:
invariant_status_filename = Path(invariant_status_filename)
kwargs["log_dir"] = invariant_status_filename / "logs"
kwargs["error_dir"] = invariant_status_filename / "errors"
kwargs["history_dir"] = invariant_status_filename / "history"
kwargs["run_dir"] = invariant_status_filename / "run"
kwargs["allow_short_filenames"] = False # as was the default for ppg1
kwargs["prevent_absolute_paths"] = False # as was the default for ppg1
res = ppg2.new(
cores=cores,
run_mode=run_mode,
log_level=log_level,
cache_dir=Path(cache_folder),
**kwargs,
)
_add_graph_comp(res)
return res
def _add_graph_comp(graph):
graph.cache_folder = graph.cache_dir # ppg1 compatibility
graph.rc = FakeRC()
util.global_pipegraph = graph
def run_pipegraph(*args, **kwargs):
"""Run the current global pipegraph"""
if util.global_pipegraph is None:
raise ValueError("You need to call new_pipegraph first")
ppg2.run(**kwargs)
def _ignore_code_changes(job):
job.depend_on_function = False
if hasattr(job, "func_invariant"):
log_job_trace(f"ignoring changes for {job.job_id}")
util.global_pipegraph.job_dag.remove_edge(job.func_invariant.job_id, job.job_id)
if hasattr(job.func_invariant, 'usage_counter'):
job.func_invariant.usage_counter -= 1
if not hasattr(job.func_invariant, 'usage_counter') or job.func_invariant.usage_counter == 0:
util.global_pipegraph.job_dag.remove_node(job.func_invariant.job_id)
for k in job.func_invariant.outputs:
util.global_pipegraph.job_inputs[job.job_id].remove(k)
del util.global_pipegraph.jobs[job.func_invariant.job_id]
del job.func_invariant
if hasattr(job, "lfg"):
_ignore_code_changes(job.lfg)
class PPG1AdaptorBase:
def ignore_code_changes(self):
_ignore_code_changes(self)
def use_cores(self, value):
self.cores_needed = value
return self
@property
def cores_needed(self):
res = None
if self.resources == ppg2.Resources.AllCores:
res = -1
elif self.resources == ppg2.Resources.Exclusive:
res = -2
else:
res = 1
return res
@cores_needed.setter
def cores_needed(self, value):
if value == -1 or value > 1:
self.use_resources(ppg2.Resources.AllCores)
elif value == -2:
self.use_resources(ppg2.Resources.Exclusive)
else: # elif value == 1:
self.use_resources(ppg2.Resources.SingleCore)
def depends_on(self, *args): # keep the wrapper
if hasattr(self, "__wrapped__"):
res = self.__wrapped__.depends_on(*args)
else:
super().depends_on(*args)
return self
def depends_on_file(self, filename):
job = FileInvariant(filename)
self.depends_on(job)
return ppg2.jobs.DependsOnInvariant(job, self)
def depends_on_params(self, params):
job = ParameterInvariant(self.job_id, params)
self.depends_on(job)
return ppg2.jobs.DependsOnInvariant(job, self)
@property
def filenames(self):
return self.files
@property
def prerequisites(self):
return self.upstreams
class PPG1Adaptor(wrapt.ObjectProxy, PPG1AdaptorBase):
pass
class FileInvariant(PPG1AdaptorBase, ppg2.FileInvariant):
pass
class FunctionInvariant(PPG1AdaptorBase, ppg2.FunctionInvariant):
pass
class ParameterInvariant(PPG1AdaptorBase, ppg2.ParameterInvariant):
pass
def assert_ppg_created():
if not util.global_pipegraph:
raise ValueError("Must instantiate a pipegraph before creating any Jobs")
def _first_param_empty(signature):
"""Check whether the first argument to this call is
empty, ie. no with a default value"""
try:
first = next((signature.parameters.items()).__iter__())
return first[1].default == inspect._empty
except StopIteration:
return True
def _wrap_func_if_no_output_file_params(function, accept_all_defaults=False):
sig = inspect.signature(function)
if len(sig.parameters) == 0 or not _first_param_empty(sig):
# no or only default parameters = do it oldstyle.
if not accept_all_defaults and not _first_param_empty(sig):
raise TypeError(
f"Could not correctly wrap {function}.\n"
f"{ppg2.FunctionInvariant.function_to_str(function)}\n"
"It has default parameter that would have been replaced "
"with output_filename in ppg1 already. Fix your function arguments"
)
def wrapper(of): # pragma: no cover - runs in spawned process
function()
if not isinstance(of, list):
of = [of]
for a_filename in of:
if not a_filename.exists():
raise ppg2.exceptions.JobContractError(
"%s did not create its file(s) %s %s\n.Cwd: %s"
% (
a_filename,
function.__code__.co_filename,
function.__code__.co_firstlineno,
os.path.abspath(os.getcwd()),
)
)
wrapper.wrapped_function = function
func = wrapper
else:
func = function
return func
class FileGeneratingJob(PPG1AdaptorBase, ppg2.FileGeneratingJob):
def __new__(cls, *args, **kwargs):
obj = ppg2.FileGeneratingJob.__new__(cls, *args, **kwargs)
return obj
def __init__(self, output_filename, function, rename_broken=False, empty_ok=False):
func = _wrap_func_if_no_output_file_params(function)
super().__init__(output_filename, func, empty_ok=empty_ok)
class MultiFileGeneratingJob(PPG1AdaptorBase, ppg2.MultiFileGeneratingJob):
def __init__(self, output_filenames, function, rename_broken=False, empty_ok=False):
func = _wrap_func_if_no_output_file_params(function, accept_all_defaults=True)
res = super().__init__(output_filenames, func, empty_ok=empty_ok)
class TempFileGeneratingJob(PPG1AdaptorBase, ppg2.TempFileGeneratingJob):
def __init__(self, output_filename, function, rename_broken=False):
func = _wrap_func_if_no_output_file_params(function)
super().__init__(output_filename, func)
class MultiTempFileGeneratingJob(PPG1AdaptorBase, ppg2.MultiTempFileGeneratingJob):
def __init__(self, output_filenames, function, rename_broken=False):
func = _wrap_func_if_no_output_file_params(function, accept_all_defaults=True)
super().__init__(output_filenames, func)
def MultiFileInvariant(filenames):
# ppg2 already detects when invariants are gained and lost -> no need for
# special MultiFileInvariant
res = []
for f in filenames:
res.append(ppg2.FileInvariant(f))
return res
# no one inherits from these, so wrapping in a function is ok, I suppose
# they should have been functions in ppg1.e..
def CachedAttributeLoadingJob(
cache_filename, target_object, target_attribute, calculating_function
):
try:
job = ppg2.CachedAttributeLoadingJob(
cache_filename, target_object, target_attribute, calculating_function
)
except ppg2.JobRedefinitionError as e:
raise ppg1.JobContractError(str(e))
return wrap_old_style_lfg_cached_job(job)
def CachedDataLoadingJob(cache_filename, calculating_function, loading_function):
job = ppg2.CachedDataLoadingJob(
cache_filename, calculating_function, loading_function
)
return wrap_old_style_lfg_cached_job(job)
def PlotJob(
output_filename,
calc_function,
plot_function,
render_args=None,
skip_table=False,
skip_caching=False,
):
pj = ppg2.PlotJob(
output_filename,
calc_function,
plot_function,
render_args=render_args,
cache_calc=not skip_caching,
create_table=not skip_table,
)
res = pj.plot
if isinstance(pj.cache, ppg2.jobs.CachedJobTuple):
res.cache_job = wrap_old_style_lfg_cached_job(pj.cache)
else:
res.cache_job = pj.cache
res.table_job = pj.table
res = PPG1Adaptor(res)
def depends_on(
self,
*other_jobs,
):
# FileGeneratingJob.depends_on(self, other_job) # just like the cached jobs, the plotting does not depend on the loading of prerequisites
if res.cache_job is None:
ppg2.Job.depends_on(self, *other_jobs)
if self.table_job is not None:
self.table_job.depends_on(*other_jobs)
elif (
hasattr(self, "cache_job") and other_jobs[0] is not self.cache_job
): # activate this after we have added the invariants...
self.cache_job.depends_on(*other_jobs)
return self
res.depends_on = types.MethodType(depends_on, res)
def ignore_code_changes(self):
_ignore_code_changes(self)
if self.cache_job is not None:
_ignore_code_changes(self.cache_job)
if self.table_job is not None:
_ignore_code_changes(self.table_job)
res.ignore_code_changes = types.MethodType(ignore_code_changes, res)
return res
def wrap_old_style_lfg_cached_job(job):
# adapt new style to old style
if hasattr(job.load, "__wrapped__"): # pragma: no cover
res = job.load
res.lfg = job.calc # just assume it's a PPG1Adaptor
else:
res = PPG1Adaptor(job.load)
res.lfg = PPG1Adaptor(job.calc)
def depends_on(self, *args, **kwargs):
if args and args[0] == self.lfg: # repeated definition, I suppose
# must not call self.__wrapped__.depends_on - that's a recursion for some reason?
ppg2.Job.depends_on(self, *args, **kwargs)
else:
self.lfg.depends_on(*args, **kwargs)
return self
res.depends_on = depends_on.__get__(res)
def use_cores(self, cores):
self.lfg.use_cores(cores)
return self
res.use_cores = use_cores.__get__(res)
return res
def unsupported(name):
def inner():
raise NotImplementedError(f"ppg2 no longer offers {name}")
return inner
| 31.10678 | 146 | 0.678581 |
a5df32463ecbf0b903539b9efefca46ed8319d45 | 7,349 | py | Python | acos_client/tests/unit/v30/test_bladeparam.py | dpunkturban/acos-client | cd90feeecea7fcfb833b15930e8979996f32ff1f | [
"Apache-2.0"
] | 1 | 2021-01-05T08:14:49.000Z | 2021-01-05T08:14:49.000Z | acos_client/tests/unit/v30/test_bladeparam.py | ytsai-a10/acos-client | 62b8387d25717907770e9be697db74cf0401fa09 | [
"Apache-2.0"
] | null | null | null | acos_client/tests/unit/v30/test_bladeparam.py | ytsai-a10/acos-client | 62b8387d25717907770e9be697db74cf0401fa09 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2016, A10 Networks Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import unittest
from unittest import mock
except ImportError:
import mock
import unittest2 as unittest
from acos_client.v30.vrrpa import blade_params
class TestBlade(unittest.TestCase):
def setUp(self):
self.client = mock.MagicMock()
self.target = blade_params.BladeParameters(self.client)
self.url_prefix = "/axapi/v3/vrrp-a/vrid/{0}/blade-parameters"
def _expected_payload(self, priority=None, interface=None, gateway=None):
rv = {'blade-parameters': {}}
if priority:
rv['blade-parameters']['priority'] = priority
if interface:
rv['blade-parameters']['tracking-options'] = interface
if gateway:
if rv['blade-parameters'].get('tracking-options'):
rv['blade-parameters']['tracking-options'].update(gateway)
else:
rv['blade-parameters']['tracking-options'] = gateway
return rv
def _build_interface(self, ethernet=1, priority_cost=1):
rv = {
'interface': [{
'ethernet': ethernet,
'priority-cost': priority_cost
}]
}
return rv
def _build_ipv4gateway(self, ip_address, priority_cost=1):
rv = {
'gateway': {
'ipv4-gateway-list': [{
'ip-address': ip_address,
'priority-cost': priority_cost
}],
'ipv6-gateway-list': []
}
}
return rv
def _build_ipv6gateway(self, ip_address, priority_cost=1):
rv = {
'gateway': {
'ipv6-gateway-list': [{
'ip-address': ip_address,
'priority-cost': priority_cost
}],
'ipv4-gateway-list': []
}
}
return rv
def test_blade_get(self):
self.target.get(0)
self.client.http.request.assert_called_with("GET", self.url_prefix.format(0), {}, mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_create(self):
self.target.create(4)
self.client.http.request.assert_called_with(
"POST", self.url_prefix.format(4),
self._expected_payload(), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_create_priority(self):
self.target.create(4, 122)
self.client.http.request.assert_called_with(
"POST", self.url_prefix.format(4),
self._expected_payload(122), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_create_interface(self):
interface = self._build_interface()
self.target.add_interface()
self.target.create(4)
self.client.http.request.assert_called_with(
"POST", self.url_prefix.format(4),
self._expected_payload(interface=interface), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_create_gateway(self):
gateway = self._build_ipv4gateway('1.1.1.1')
self.target.add_ipv4gateway('1.1.1.1')
self.target.create(4)
self.client.http.request.assert_called_with(
"POST", self.url_prefix.format(4),
self._expected_payload(gateway=gateway), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_create_gateway_ipv6(self):
gateway = self._build_ipv6gateway('1.1.1.1')
self.target.add_ipv6gateway('1.1.1.1')
self.target.create(4)
self.client.http.request.assert_called_with(
"POST", self.url_prefix.format(4),
self._expected_payload(gateway=gateway), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_create_interface_gateway(self):
interface = self._build_interface()
gateway = self._build_ipv4gateway('1.1.1.1')
self.target.add_interface()
self.target.add_ipv4gateway('1.1.1.1')
self.target.create(4)
self.client.http.request.assert_called_with(
"POST", self.url_prefix.format(4),
self._expected_payload(interface=interface, gateway=gateway), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_update(self):
self.target.update(4)
self.client.http.request.assert_called_with(
"PUT", self.url_prefix.format(4),
self._expected_payload(), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_update_priority(self):
self.target.update(4, 122)
self.client.http.request.assert_called_with(
"PUT", self.url_prefix.format(4),
self._expected_payload(122), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_update_interface(self):
interface = self._build_interface()
self.target.add_interface()
self.target.update(4)
self.client.http.request.assert_called_with(
"PUT", self.url_prefix.format(4),
self._expected_payload(interface=interface), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_update_gateway(self):
gateway = self._build_ipv4gateway('1.1.1.1')
self.target.add_ipv4gateway('1.1.1.1')
self.target.update(4)
self.client.http.request.assert_called_with(
"PUT", self.url_prefix.format(4),
self._expected_payload(gateway=gateway), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_update_gateway_ipv6(self):
gateway = self._build_ipv6gateway('1.1.1.1')
self.target.add_ipv6gateway('1.1.1.1')
self.target.update(4)
self.client.http.request.assert_called_with(
"PUT", self.url_prefix.format(4),
self._expected_payload(gateway=gateway), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
def test_blade_update_interface_gateway(self):
interface = self._build_interface()
gateway = self._build_ipv4gateway('1.1.1.1')
self.target.add_interface()
self.target.add_ipv4gateway('1.1.1.1')
self.target.update(4)
self.client.http.request.assert_called_with(
"PUT", self.url_prefix.format(4),
self._expected_payload(interface=interface, gateway=gateway), mock.ANY,
axapi_args=None, max_retries=None, timeout=None)
| 36.929648 | 100 | 0.626072 |
4e95ce368fd1b148c9a2c0539026c8ed0103d28c | 3,585 | py | Python | complaint_search/tests/test_views_document.py | DalavanCloud/ccdb5-api | 9e0c4bc6e0d33bff153c1a205c979b7ecaa847da | [
"CC0-1.0"
] | 1 | 2019-02-25T00:49:18.000Z | 2019-02-25T00:49:18.000Z | complaint_search/tests/test_views_document.py | DalavanCloud/ccdb5-api | 9e0c4bc6e0d33bff153c1a205c979b7ecaa847da | [
"CC0-1.0"
] | null | null | null | complaint_search/tests/test_views_document.py | DalavanCloud/ccdb5-api | 9e0c4bc6e0d33bff153c1a205c979b7ecaa847da | [
"CC0-1.0"
] | null | null | null | from django.core.urlresolvers import reverse
from django.core.cache import cache
from rest_framework import status
from rest_framework.test import APITestCase
from unittest import skip
from elasticsearch import TransportError
import mock
from complaint_search.es_interface import document
from complaint_search.throttling import (
DocumentAnonRateThrottle,
_CCDB_UI_URL,
)
class DocumentTests(APITestCase):
def setUp(self):
self.orig_document_anon_rate = DocumentAnonRateThrottle.rate
# Setting rates to something really big so it doesn't affect testing
DocumentAnonRateThrottle.rate = '2000/min'
def tearDown(self):
cache.clear()
DocumentAnonRateThrottle.rate = self.orig_document_anon_rate
@mock.patch('complaint_search.es_interface.document')
def test_document__valid(self, mock_esdocument):
"""
documenting with an ID
"""
url = reverse('complaint_search:complaint', kwargs={"id": "123456"})
mock_esdocument.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_esdocument.assert_called_once_with("123456")
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.document')
def test_document_with_document_anon_rate_throttle(self, mock_esdocument):
url = reverse('complaint_search:complaint', kwargs={"id": "123456"})
mock_esdocument.return_value = 'OK'
DocumentAnonRateThrottle.rate = self.orig_document_anon_rate
limit = int(self.orig_document_anon_rate.split('/')[0])
for i in range(limit):
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('OK', response.data)
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_429_TOO_MANY_REQUESTS
)
self.assertIsNotNone(response.data.get('detail'))
self.assertIn("Request was throttled", response.data.get('detail'))
self.assertEqual(limit, mock_esdocument.call_count)
self.assertEqual(5, limit)
@mock.patch('complaint_search.es_interface.document')
def test_document_with_document_ui_rate_throttle(self, mock_esdocument):
url = reverse('complaint_search:complaint', kwargs={"id": "123456"})
mock_esdocument.return_value = 'OK'
DocumentAnonRateThrottle.rate = self.orig_document_anon_rate
limit = int(self.orig_document_anon_rate.split('/')[0])
for _ in range(limit):
response = self.client.get(url, HTTP_REFERER=_CCDB_UI_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('OK', response.data)
response = self.client.get(url, HTTP_REFERER=_CCDB_UI_URL)
self.assertEqual(response.status_code, 200)
self.assertEqual('OK', response.data)
self.assertEqual(limit + 1, mock_esdocument.call_count)
self.assertEqual(5, limit)
@mock.patch('complaint_search.es_interface.document')
def test_document__transport_error(self, mock_esdocument):
mock_esdocument.side_effect = TransportError('N/A', "Error")
url = reverse('complaint_search:complaint', kwargs={"id": "123456"})
response = self.client.get(url)
self.assertEqual(response.status_code, 424)
self.assertDictEqual(
{"error": "There was an error calling Elasticsearch"},
response.data
)
| 41.686047 | 78 | 0.700976 |
c82f700b34d832fe92a69ee8cab5e800061e823d | 744 | py | Python | levelworks/levelweb/urls.py | benNthen/levelworks-site | 9570bcfd4cbaa3719102d21927f709ce9e0f865c | [
"Apache-2.0"
] | null | null | null | levelworks/levelweb/urls.py | benNthen/levelworks-site | 9570bcfd4cbaa3719102d21927f709ce9e0f865c | [
"Apache-2.0"
] | null | null | null | levelworks/levelweb/urls.py | benNthen/levelworks-site | 9570bcfd4cbaa3719102d21927f709ce9e0f865c | [
"Apache-2.0"
] | null | null | null | """levelworks URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
]
| 32.347826 | 77 | 0.701613 |
342623b254fdecc53cc5a13ad5ec75075efe2c4c | 7,064 | py | Python | qa/rpc-tests/test_framework/test_framework.py | WestonReed/bitcoinxt | ac5b611797cb83226b7abce53cc50d48cb2a0c16 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/test_framework.py | WestonReed/bitcoinxt | ac5b611797cb83226b7abce53cc50d48cb2a0c16 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/test_framework.py | WestonReed/bitcoinxt | ac5b611797cb83226b7abce53cc50d48cb2a0c16 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir \
+ " (clean: %s)" % self.setup_clean_chain)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
self.options.tmpdir += '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| 34.125604 | 106 | 0.612401 |
d58ca6ffdaaa70fe93247ff6c3dc8309d5ab7c0c | 2,160 | py | Python | tempest/services/compute/json/services_client.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | null | null | null | tempest/services/compute/json/services_client.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | null | null | null | tempest/services/compute/json/services_client.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 NEC Corporation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
from tempest.api_schema.compute import services as schema
from tempest.common import rest_client
from tempest import config
CONF = config.CONF
class ServicesClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(ServicesClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
def list_services(self, params=None):
url = 'os-services'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_services, resp, body)
return resp, body['services']
def enable_service(self, host_name, binary):
"""
Enable service on a host
host_name: Name of host
binary: Service binary
"""
post_body = json.dumps({'binary': binary, 'host': host_name})
resp, body = self.put('os-services/enable', post_body)
body = json.loads(body)
self.validate_response(schema.enable_service, resp, body)
return resp, body['service']
def disable_service(self, host_name, binary):
"""
Disable service on a host
host_name: Name of host
binary: Service binary
"""
post_body = json.dumps({'binary': binary, 'host': host_name})
resp, body = self.put('os-services/disable', post_body)
body = json.loads(body)
return resp, body['service']
| 33.230769 | 78 | 0.666667 |
20454cf901019d89d8d52d26ea022b64d5fa774f | 398 | py | Python | telescope_shop/telescopes/admin.py | iskrenivanov87/AstroTrader | 82230577aab10260bc9b478c4974a66c1bf1adb8 | [
"MIT"
] | null | null | null | telescope_shop/telescopes/admin.py | iskrenivanov87/AstroTrader | 82230577aab10260bc9b478c4974a66c1bf1adb8 | [
"MIT"
] | null | null | null | telescope_shop/telescopes/admin.py | iskrenivanov87/AstroTrader | 82230577aab10260bc9b478c4974a66c1bf1adb8 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from telescope_shop.telescopes.models import Telescope, Comment
from mptt.admin import MPTTModelAdmin
@admin.register(Telescope)
class TelescopeAdmin(admin.ModelAdmin):
list_display = ('make', 'model', 'location', 'user',)
list_filter = ('make', 'model', 'location', 'created',)
admin.site.register(Comment, MPTTModelAdmin) | 28.428571 | 63 | 0.756281 |
48f5d28a6cdc2496134b3b8824ce48259b13d41f | 356 | py | Python | python/8kyu/loenardo_dicaprio_and_oscars.py | Sigmanificient/codewars | b34df4bf55460d312b7ddf121b46a707b549387a | [
"MIT"
] | 3 | 2021-06-08T01:57:13.000Z | 2021-06-26T10:52:47.000Z | python/8kyu/loenardo_dicaprio_and_oscars.py | Sigmanificient/codewars | b34df4bf55460d312b7ddf121b46a707b549387a | [
"MIT"
] | null | null | null | python/8kyu/loenardo_dicaprio_and_oscars.py | Sigmanificient/codewars | b34df4bf55460d312b7ddf121b46a707b549387a | [
"MIT"
] | 2 | 2021-06-10T21:20:13.000Z | 2021-06-30T10:13:26.000Z | """Kata url: https://www.codewars.com/kata/56d49587df52101de70011e4."""
def leo(oscar: int) -> str:
if oscar == 88:
return "Leo finally won the oscar! Leo is happy"
if oscar == 86:
return "Not even for Wolf of wallstreet?!"
if oscar < 88:
return "When will you give Leo an Oscar?"
return "Leo got one already!"
| 23.733333 | 71 | 0.617978 |
278c02bd6802f1d4b5fef97c39a398e1bb92b272 | 987 | py | Python | tests/models.py | wtforms/wtforms-django | d35ac883d0060cc3257fc74a3a8bc5363f86de0b | [
"BSD-3-Clause"
] | 8 | 2016-02-17T11:12:52.000Z | 2022-02-25T14:21:32.000Z | tests/models.py | wtforms/wtforms-django | d35ac883d0060cc3257fc74a3a8bc5363f86de0b | [
"BSD-3-Clause"
] | 10 | 2015-10-26T14:17:55.000Z | 2018-06-08T17:08:10.000Z | tests/models.py | wtforms/wtforms-django | d35ac883d0060cc3257fc74a3a8bc5363f86de0b | [
"BSD-3-Clause"
] | 3 | 2015-10-27T12:42:04.000Z | 2018-06-11T19:18:06.000Z | from __future__ import unicode_literals
from django.db import models
try:
from localflavor.us.models import USStateField
except ImportError:
from django.contrib.localflavor.us.models import USStateField
class Group(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return "%s(%d)" % (self.name, self.pk)
__str__ = __unicode__
class User(models.Model):
username = models.CharField(max_length=40)
group = models.ForeignKey(Group, on_delete=models.SET_NULL)
birthday = models.DateField(help_text="Teh Birthday")
email = models.EmailField(blank=True)
posts = models.PositiveSmallIntegerField()
state = USStateField()
reg_ip = models.IPAddressField("IP Addy")
url = models.URLField()
file = models.FilePathField()
file2 = models.FileField(upload_to=".")
bool = models.BooleanField()
time1 = models.TimeField()
slug = models.SlugField()
nullbool = models.NullBooleanField()
| 28.2 | 65 | 0.716312 |
d8ccc4c1117519c60c9d4a3591ef41028e1a2e3c | 18,925 | py | Python | official/vision/beta/configs/semantic_segmentation.py | kia-ctw/models | 007070820109ec57cfb048ff505be090d4e88d10 | [
"Apache-2.0"
] | 2 | 2021-08-23T13:42:23.000Z | 2021-11-09T11:48:44.000Z | official/vision/beta/configs/semantic_segmentation.py | kia-ctw/models | 007070820109ec57cfb048ff505be090d4e88d10 | [
"Apache-2.0"
] | 2 | 2021-03-31T22:22:34.000Z | 2021-08-06T18:38:30.000Z | official/vision/beta/configs/semantic_segmentation.py | kia-ctw/models | 007070820109ec57cfb048ff505be090d4e88d10 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Semantic segmentation configuration definition."""
import os
from typing import List, Optional, Union
import dataclasses
import numpy as np
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.modeling.hyperparams import config_definitions as cfg
from official.vision.beta.configs import common
from official.vision.beta.configs import decoders
from official.vision.beta.configs import backbones
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
output_size: List[int] = dataclasses.field(default_factory=list)
# If crop_size is specified, image will be resized first to
# output_size, then crop of size crop_size will be cropped.
crop_size: List[int] = dataclasses.field(default_factory=list)
input_path: str = ''
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
shuffle_buffer_size: int = 1000
cycle_length: int = 10
# If resize_eval_groundtruth is set to False, original image sizes are used
# for eval. In that case, groundtruth_padded_size has to be specified too to
# allow for batching the variable input sizes of images.
resize_eval_groundtruth: bool = True
groundtruth_padded_size: List[int] = dataclasses.field(default_factory=list)
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
aug_rand_hflip: bool = True
drop_remainder: bool = True
file_type: str = 'tfrecord'
@dataclasses.dataclass
class SegmentationHead(hyperparams.Config):
"""Segmentation head config."""
level: int = 3
num_convs: int = 2
num_filters: int = 256
use_depthwise_convolution: bool = False
prediction_kernel_size: int = 1
upsample_factor: int = 1
feature_fusion: Optional[str] = None # None, deeplabv3plus, or pyramid_fusion
# deeplabv3plus feature fusion params
low_level: int = 2
low_level_num_filters: int = 48
@dataclasses.dataclass
class SemanticSegmentationModel(hyperparams.Config):
"""Semantic segmentation model config."""
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 6
head: SegmentationHead = SegmentationHead()
backbone: backbones.Backbone = backbones.Backbone(
type='resnet', resnet=backbones.ResNet())
decoder: decoders.Decoder = decoders.Decoder(type='identity')
norm_activation: common.NormActivation = common.NormActivation()
@dataclasses.dataclass
class Losses(hyperparams.Config):
label_smoothing: float = 0.0
ignore_label: int = 255
class_weights: List[float] = dataclasses.field(default_factory=list)
l2_weight_decay: float = 0.0
use_groundtruth_dimension: bool = True
top_k_percent_pixels: float = 1.0
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
report_per_class_iou: bool = True
report_train_mean_iou: bool = True # Turning this off can speed up training.
@dataclasses.dataclass
class SemanticSegmentationTask(cfg.TaskConfig):
"""The model config."""
model: SemanticSegmentationModel = SemanticSegmentationModel()
train_data: DataConfig = DataConfig(is_training=True)
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
evaluation: Evaluation = Evaluation()
train_input_partition_dims: List[int] = dataclasses.field(
default_factory=list)
eval_input_partition_dims: List[int] = dataclasses.field(
default_factory=list)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
@exp_factory.register_config_factory('semantic_segmentation')
def semantic_segmentation() -> cfg.ExperimentConfig:
"""Semantic segmentation general."""
return cfg.ExperimentConfig(
task=SemanticSegmentationModel(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
# PASCAL VOC 2012 Dataset
PASCAL_TRAIN_EXAMPLES = 10582
PASCAL_VAL_EXAMPLES = 1449
PASCAL_INPUT_PATH_BASE = 'pascal_voc_seg'
@exp_factory.register_config_factory('seg_deeplabv3_pascal')
def seg_deeplabv3_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on imagenet with resnet deeplabv3."""
train_batch_size = 16
eval_batch_size = 8
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = [12, 24, 36] # [6, 12, 18] if output_stride = 16
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
model_id=101, output_stride=output_stride,
multigrid=multigrid, stem_type=stem_type)),
decoder=decoders.Decoder(
type='aspp', aspp=decoders.ASPP(
level=level, dilation_rates=aspp_dilation_rates)),
head=SegmentationHead(level=level, num_convs=0),
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.9997,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
# TODO(arashwan): test changing size to 513 to match deeplab.
output_size=[512, 512],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
output_size=[512, 512],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
# resnet101
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=45 * steps_per_epoch,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007,
'decay_steps': 45 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('seg_deeplabv3plus_pascal')
def seg_deeplabv3plus_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on imagenet with resnet deeplabv3+."""
train_batch_size = 16
eval_batch_size = 8
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = [6, 12, 18]
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
model_id=101, output_stride=output_stride,
stem_type=stem_type, multigrid=multigrid)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level, dilation_rates=aspp_dilation_rates)),
head=SegmentationHead(
level=level,
num_convs=2,
feature_fusion='deeplabv3plus',
low_level=2,
low_level_num_filters=48),
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.9997,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
output_size=[512, 512],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
output_size=[512, 512],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
# resnet101
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=45 * steps_per_epoch,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007,
'decay_steps': 45 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('seg_resnetfpn_pascal')
def seg_resnetfpn_pascal() -> cfg.ExperimentConfig:
"""Image segmentation on imagenet with resnet-fpn."""
train_batch_size = 256
eval_batch_size = 32
steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
num_classes=21,
input_size=[512, 512, 3],
min_level=3,
max_level=7,
backbone=backbones.Backbone(
type='resnet', resnet=backbones.ResNet(model_id=50)),
decoder=decoders.Decoder(type='fpn', fpn=decoders.FPN()),
head=SegmentationHead(level=3, num_convs=3),
norm_activation=common.NormActivation(
activation='swish',
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.2,
aug_scale_max=1.5),
validation_data=DataConfig(
input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=False,
groundtruth_padded_size=[512, 512],
drop_remainder=False),
),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=450 * steps_per_epoch,
validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.007,
'decay_steps': 450 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
# Cityscapes Dataset (Download and process the dataset yourself)
CITYSCAPES_TRAIN_EXAMPLES = 2975
CITYSCAPES_VAL_EXAMPLES = 500
CITYSCAPES_INPUT_PATH_BASE = 'cityscapes'
@exp_factory.register_config_factory('seg_deeplabv3plus_cityscapes')
def seg_deeplabv3plus_cityscapes() -> cfg.ExperimentConfig:
"""Image segmentation on imagenet with resnet deeplabv3+."""
train_batch_size = 16
eval_batch_size = 16
steps_per_epoch = CITYSCAPES_TRAIN_EXAMPLES // train_batch_size
output_stride = 16
aspp_dilation_rates = [6, 12, 18]
multigrid = [1, 2, 4]
stem_type = 'v1'
level = int(np.math.log2(output_stride))
config = cfg.ExperimentConfig(
task=SemanticSegmentationTask(
model=SemanticSegmentationModel(
# Cityscapes uses only 19 semantic classes for train/evaluation.
# The void (background) class is ignored in train and evaluation.
num_classes=19,
input_size=[None, None, 3],
backbone=backbones.Backbone(
type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
model_id=101, output_stride=output_stride,
stem_type=stem_type, multigrid=multigrid)),
decoder=decoders.Decoder(
type='aspp',
aspp=decoders.ASPP(
level=level, dilation_rates=aspp_dilation_rates,
pool_kernel_size=[512, 1024])),
head=SegmentationHead(
level=level,
num_convs=2,
feature_fusion='deeplabv3plus',
low_level=2,
low_level_num_filters=48),
norm_activation=common.NormActivation(
activation='swish',
norm_momentum=0.99,
norm_epsilon=1e-3,
use_sync_bn=True)),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE,
'train_fine**'),
crop_size=[512, 1024],
output_size=[1024, 2048],
is_training=True,
global_batch_size=train_batch_size,
aug_scale_min=0.5,
aug_scale_max=2.0),
validation_data=DataConfig(
input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE, 'val_fine*'),
output_size=[1024, 2048],
is_training=False,
global_batch_size=eval_batch_size,
resize_eval_groundtruth=True,
drop_remainder=False),
# resnet101
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
init_checkpoint_modules='backbone'),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=500 * steps_per_epoch,
validation_steps=CITYSCAPES_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.01,
'decay_steps': 500 * steps_per_epoch,
'end_learning_rate': 0.0,
'power': 0.9
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 5 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 38.465447 | 112 | 0.60354 |
12a5168f934d2f294b24bae91f89db1b7654b325 | 445 | py | Python | test_bot/cogs/message_commands.py | Enegg/disnake | 1d48cbf4e0dfec82fdfb65d7f58396767ce7c009 | [
"MIT"
] | 290 | 2021-11-03T12:33:16.000Z | 2022-03-31T19:30:19.000Z | test_bot/cogs/message_commands.py | Enegg/disnake | 1d48cbf4e0dfec82fdfb65d7f58396767ce7c009 | [
"MIT"
] | 200 | 2021-11-03T10:41:41.000Z | 2022-03-31T08:13:11.000Z | test_bot/cogs/message_commands.py | Enegg/disnake | 1d48cbf4e0dfec82fdfb65d7f58396767ce7c009 | [
"MIT"
] | 118 | 2021-11-03T18:27:09.000Z | 2022-03-25T22:00:45.000Z | import disnake
from disnake.ext import commands
class MessageCommands(commands.Cog):
def __init__(self, bot):
self.bot: commands.Bot = bot
@commands.message_command(name="Reverse")
async def reverse(self, inter: disnake.MessageCommandInteraction):
await inter.response.send_message(inter.target.content[::-1])
def setup(bot):
bot.add_cog(MessageCommands(bot))
print(f"> Extension {__name__} is ready\n")
| 26.176471 | 70 | 0.719101 |
f50b5d9b34273be730edbd756e7099724ac09856 | 1,223 | py | Python | marklogic/models/utilities/files.py | paul-hoehne/MarkLogic_Python | 3d00c3e7bb9459de879a4a08b3c7e1b3530d59f9 | [
"Apache-2.0"
] | 7 | 2015-02-24T00:09:12.000Z | 2021-04-01T18:05:16.000Z | marklogic/models/utilities/files.py | paul-hoehne/MarkLogic_Python | 3d00c3e7bb9459de879a4a08b3c7e1b3530d59f9 | [
"Apache-2.0"
] | 19 | 2015-02-28T15:40:58.000Z | 2015-05-13T15:38:12.000Z | marklogic/models/utilities/files.py | paul-hoehne/MarkLogic_Python | 3d00c3e7bb9459de879a4a08b3c7e1b3530d59f9 | [
"Apache-2.0"
] | 12 | 2015-02-27T15:25:12.000Z | 2021-04-01T18:05:31.000Z | #
# Copyright 2015 MarkLogic Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File History
# ------------
#
# Paul Hoehne 03/01/2015 Initial development
#
import os, sys, stat
"""
MarkLogic file classes
"""
def walk_directories(current_directory):
"""
Recursively walk a directory returning all of the files found.
"""
file_list = []
for dir in os.listdir(current_directory):
pathname = os.path.join(current_directory, dir)
mode = os.stat(pathname).st_mode
if stat.S_ISDIR(mode):
file_list.extend(walk_directories(pathname))
else:
file_list.append({u'filename': dir, u'partial-directory': pathname})
return file_list
| 29.119048 | 80 | 0.696648 |
1f2aa0c6877297bf22abe04eb903d02c3018c215 | 2,864 | py | Python | envisage/ui/single_project/editor/project_editor.py | janvonrickenbach/Envisage_wxPhoenix_py3 | cf79e5b2a0c3b46898a60b5fe5a2fb580604808b | [
"BSD-3-Clause"
] | null | null | null | envisage/ui/single_project/editor/project_editor.py | janvonrickenbach/Envisage_wxPhoenix_py3 | cf79e5b2a0c3b46898a60b5fe5a2fb580604808b | [
"BSD-3-Clause"
] | 1 | 2017-05-22T21:15:22.000Z | 2017-05-22T21:15:22.000Z | envisage/ui/single_project/editor/project_editor.py | janvonrickenbach/Envisage_wxPhoenix_py3 | cf79e5b2a0c3b46898a60b5fe5a2fb580604808b | [
"BSD-3-Clause"
] | 1 | 2019-10-01T07:03:58.000Z | 2019-10-01T07:03:58.000Z | #-----------------------------------------------------------------------------
#
# Copyright (c) 2005, 2006 by Enthought, Inc.
# All rights reserved.
#
#-----------------------------------------------------------------------------
"""
A base class for editors that can be tracked by single project plugin projects.
"""
# Standard library imports.
import logging
# Enthought library imports
from envisage.workbench import DecoratedEditor
from traits.api import Instance
# Application specific imports.
from envisage.single_project.services import IPROJECT_MODEL
# Setup a logger for this module.
logger = logging.getLogger(__name__)
class ProjectEditor(DecoratedEditor):
"""
A base class for editors that can be tracked by single project plugin
projects.
"""
#########################################################################
# Attributes
#########################################################################
### public 'ProjectEditor' interface ####################################
# The project containing the resource we're editing
project = Instance('envisage.single_project.project.Project')
#########################################################################
# `object` interface
#########################################################################
#### operator methods ###################################################
def __init__(self, **traits):
"""
Constructor.
Extended to associate ourself with the current project.
"""
super(ProjectEditor, self).__init__(**traits)
# Make sure the current project knows this editor is associated with
# it's resources
model_service = self.window.application.get_service(IPROJECT_MODEL)
self.project = model_service.project
self.project.register_editor(self.resource, self)
return
#########################################################################
# 'Editor' interface.
#########################################################################
### public 'Editor' interface ###########################################
def destroy_control(self):
"""
Destroys the toolkit-specific control that represents the editor.
Extended to ensure that the current project stops associating us
with its resources.
"""
# Only do something if the editor is still open
if self.control:
logger.debug('Destroying control in ProjectEditor [%s]', self)
# Unregister from the associated project immediately.
self.project.register_editor(self.resource, self, remove=True)
super(ProjectEditor, self).destroy_control()
return
#### EOF ####################################################################
| 30.468085 | 79 | 0.482542 |
8be010cc8024c555c47ba5c68655fedb684a2926 | 2,336 | py | Python | tests/components/netatmo/test_select.py | Andrew55529/core | c440a30aff0d8f573d8aa0d949068702dd36c386 | [
"Apache-2.0"
] | 1 | 2021-07-31T21:08:49.000Z | 2021-07-31T21:08:49.000Z | tests/components/netatmo/test_select.py | flexy2dd/core | 1019ee22ff13e5f542e868179d791e6a0d87369a | [
"Apache-2.0"
] | 70 | 2020-07-16T02:07:46.000Z | 2022-03-31T06:01:48.000Z | tests/components/netatmo/test_select.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | 1 | 2020-10-25T12:21:56.000Z | 2020-10-25T12:21:56.000Z | """The tests for the Netatmo climate platform."""
from unittest.mock import patch
from homeassistant.components.select import DOMAIN as SELECT_DOMAIN
from homeassistant.components.select.const import ATTR_OPTION, ATTR_OPTIONS
from homeassistant.const import ATTR_ENTITY_ID, CONF_WEBHOOK_ID, SERVICE_SELECT_OPTION
from .common import selected_platforms, simulate_webhook
async def test_select_schedule_thermostats(hass, config_entry, caplog, netatmo_auth):
"""Test service for selecting Netatmo schedule with thermostats."""
with selected_platforms(["climate", "select"]):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
webhook_id = config_entry.data[CONF_WEBHOOK_ID]
select_entity = "select.netatmo_myhome"
assert hass.states.get(select_entity).state == "Default"
assert hass.states.get(select_entity).attributes[ATTR_OPTIONS] == [
"Default",
"Winter",
]
# Fake backend response changing schedule
response = {
"event_type": "schedule",
"schedule_id": "b1b54a2f45795764f59d50d8",
"previous_schedule_id": "59d32176d183948b05ab4dce",
"push_type": "home_event_changed",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(select_entity).state == "Winter"
# Test setting a different schedule
with patch(
"pyatmo.thermostat.AsyncHomeData.async_switch_home_schedule"
) as mock_switch_home_schedule:
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: select_entity,
ATTR_OPTION: "Default",
},
blocking=True,
)
await hass.async_block_till_done()
mock_switch_home_schedule.assert_called_once_with(
home_id="91763b24c43d3e344f424e8b", schedule_id="591b54a2764ff4d50d8b5795"
)
# Fake backend response changing schedule
response = {
"event_type": "schedule",
"schedule_id": "591b54a2764ff4d50d8b5795",
"previous_schedule_id": "b1b54a2f45795764f59d50d8",
"push_type": "home_event_changed",
}
await simulate_webhook(hass, webhook_id, response)
assert hass.states.get(select_entity).state == "Default"
| 35.393939 | 86 | 0.701199 |
8591ae3f43971790d3ce6d9b82d5f0db16ccfa3d | 3,332 | py | Python | word_embedding_models/sentence_similarities.py | acoli-repo/book-gen | ef93116d2d6d4f10ef5f29c296c1f55448793a4a | [
"MIT"
] | 5 | 2019-06-15T14:41:46.000Z | 2021-06-05T19:04:07.000Z | word_embedding_models/sentence_similarities.py | acoli-repo/book-gen | ef93116d2d6d4f10ef5f29c296c1f55448793a4a | [
"MIT"
] | null | null | null | word_embedding_models/sentence_similarities.py | acoli-repo/book-gen | ef93116d2d6d4f10ef5f29c296c1f55448793a4a | [
"MIT"
] | 3 | 2019-04-28T19:45:46.000Z | 2020-10-19T08:35:47.000Z | '''
Takes as input one-line separated, whitespace-tokenized lemmata(!)
and uses wordvector model (trained on lemmata)
to produce sentence vectors for each line by vector average over tokens.
Call this script with: python sentence_similarities.py aggregated-sentences.txt > sentence-similarities.txt
where senti-intro-sents.txt is the line-separated list of sentences
and sentence_similarities is the output file with pairwise cosine similarity scores between all sentences.
The output serves to reorder the sentences. (not part of this implementation).
'''
import gensim
import re
import random
import csv
import logging
import numpy
import sys
import numpy as np
import json
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics.pairwise import cosine_similarity
### Main program
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# Load word vectors
vectors = gensim.models.Word2Vec.load("vectors-chem-bio-cs.d2v")
# Read in sentence by sentence, token by token.
sentence_vectors = []
with open(sys.argv[1],'r') as f:
for sentence in f:
tokens = sentence.split()
found_toks = 0
average_vector = 0
for token in tokens:
token = token.lower()
#print(token)
# check if vector exists for that token.
if token in vectors:
token_vector = vectors[token]
#print(token_vector)
average_vector = average_vector + token_vector
found_toks = found_toks + 1
#else:
#print("no vector found for " + token)
#print("# found_toks")
#print(found_toks)
# no tokens found, make default vector.
if(found_toks==0):
#print("found no toks")
average_vector = np.zeros(100)
else:
average_vector = average_vector/found_toks
#print("sentence avg:")
#print(average_vector)
sentence_vectors.append(average_vector.reshape(1,-1))
print(len(sentence_vectors))
#print(sentence_vectors[1])
#print(sentence_vectors[2])
#print cosine_similarity(sentence_vectors[1], sentence_vectors[2])
#print cosine_similarity(sentence_vectors[1], sentence_vectors[106])
#print cosine_similarity(sentence_vectors[105], sentence_vectors[106])
# Iterate over all sentences and produce pairwise similarities for grouping.
# produce similarity matrix:
# e.g., 1 2 0.88
# 1 3 0.78
# . . .
for first_idx in range(len(sentence_vectors)):
first_sent = sentence_vectors[first_idx]
for idx2 in range(len(sentence_vectors)):
second_idx = first_idx+idx2+1
if(second_idx < len(sentence_vectors)):
second_sent = sentence_vectors[second_idx]
# line compared to line and cosine similarity.
sim = cosine_similarity(first_sent,second_sent)[0][0]
print (first_idx+1, second_idx+1, sim)
#if(sim > 0.94):
#sys.exit()
# Examples on how to use word vectors
#print vectors['opinion']
#print vectors.most_similar('chemistry')
#print vectors.most_similar(['battery', 'lithium'])
#print vectors.similarity('battery', 'lithium')
#print vectors.most_similar(['battery','lithium'], negative=['lithium-ion', 'li-ion'])
| 32.666667 | 107 | 0.681573 |
999f7cacb7ed5e55c9bbecbeec6ad7b006df030b | 1,033 | py | Python | backend/admingym/users/admin/customAdminUsers.py | ManuelRivera98/AdminGym | caf2b6f5e9a0ed9e98567a036bec9a34b44ecf13 | [
"MIT"
] | 1 | 2020-09-14T04:23:07.000Z | 2020-09-14T04:23:07.000Z | backend/admingym/users/admin/customAdminUsers.py | ManuelRivera98/AdminGym | caf2b6f5e9a0ed9e98567a036bec9a34b44ecf13 | [
"MIT"
] | null | null | null | backend/admingym/users/admin/customAdminUsers.py | ManuelRivera98/AdminGym | caf2b6f5e9a0ed9e98567a036bec9a34b44ecf13 | [
"MIT"
] | null | null | null | """User models admin"""
# Django
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# Models
from admingym.users.models import User
@admin.register(User)
class CustomUserAdmin(UserAdmin):
"""User model admin."""
list_display = ('id', 'email', 'username', 'first_name', 'last_name', 'cc', 'is_verified', 'already_owns', 'is_active')
list_filter = ('is_verified', 'username')
list_display_links = ('username', 'email')
fieldsets = (
('Gym', {
'fields': (
('first_name', 'last_name', 'username', 'email'), ('cc', 'already_owns')
),
}),
('Password', {
'fields': (
'password',
)
}),
('Metadata', {
'fields': (
('is_verified', 'is_active')
),
}),
(None, {
'fields': (
('created_at', 'updated_at')
),
})
)
readonly_fields = 'created_at', 'updated_at'
| 24.023256 | 123 | 0.498548 |
957c392902a0fc01bb5780b5f8d8cec1b14e97d7 | 3,624 | py | Python | gym_cellular_automata/forest_fire/bulldozer/utils/svg_paths.py | BrunoGupa/gym-cellular-automata | 6b5d96f2ccf13d1c1da9e2dc4d5273553cfbc89f | [
"MIT"
] | 13 | 2021-02-22T20:26:11.000Z | 2022-02-03T02:33:33.000Z | gym_cellular_automata/forest_fire/bulldozer/utils/svg_paths.py | BrunoGupa/gym-cellular-automata | 6b5d96f2ccf13d1c1da9e2dc4d5273553cfbc89f | [
"MIT"
] | 1 | 2021-10-18T00:50:01.000Z | 2021-10-18T00:50:01.000Z | gym_cellular_automata/forest_fire/bulldozer/utils/svg_paths.py | BrunoGupa/gym-cellular-automata | 6b5d96f2ccf13d1c1da9e2dc4d5273553cfbc89f | [
"MIT"
] | 2 | 2021-03-30T23:35:09.000Z | 2021-09-03T16:37:40.000Z | BULLDOZER = "m 385.10334,444.02706 -7.42197,-164.24713 c -0.19278,-3.95196 -3.47001,-7.1328 -7.42196,-7.1328 H 217.00063 c -3.56639,0 -6.65085,2.60251 -7.32557,6.07252 l -16.19338,86.84663 -50.31515,-45.39929 c -5.68696,-6.2653 -13.78365,-10.21726 -22.94062,-10.21726 h -12.62698 c 4.81946,-59.37572 0,-82.79829 -56.965994,-90.12386 -2.506118,-0.28917 -5.108625,0.67472 -6.650852,2.69889 -1.638616,2.02417 -2.120562,4.62668 -1.349448,7.1328 3.373621,10.02447 5.012236,20.53089 5.012236,31.1337 0,49.35125 -36.724271,91.18415 -85.497189,97.35306 -3.759177,0.48194 -6.554463,3.66279 -6.554463,7.42196 0,6.65086 2.216951,12.24143 6.747242,16.67533 9.831694,9.63891 28.2420254,11.08475 47.6162473,11.08475 3.8555667,0 7.7111327,-0.0964 11.5666997,-0.0964 3.662788,-0.0964 7.132798,-0.0964 10.506419,-0.0964 h 8.289468 c 22.844232,0.0964 37.977331,0.19278 48.869306,-10.98836 2.698897,-2.79529 5.012237,-6.07252 7.036409,-10.31364 h 20.241729 l 54.74904,73.06299 c -29.10953,10.31364 -50.02597,38.1701 -50.02597,70.84603 0,41.44734 33.63981,75.18355 74.99077,75.18355 26.50702,0 49.83319,-13.88004 63.1349,-34.7001 h 40.0015 c 13.30171,20.82006 36.62788,34.7001 63.1349,34.7001 41.35096,0 74.99077,-33.73621 74.99077,-75.18355 0.0964,-33.63982 -21.97672,-62.17101 -52.33931,-71.71354 z M 196.18058,575.88744 c -33.15788,0 -60.05045,-26.98897 -60.05045,-60.24323 0,-33.25426 26.98896,-60.24323 60.05045,-60.24323 33.06148,0 60.05044,26.98897 60.05044,60.24323 0,33.25426 -26.89257,60.24323 -60.05044,60.24323 z m 70.55686,-34.7001 c 2.89168,-8.0003 4.43391,-16.57894 4.43391,-25.54313 0,-41.44734 -33.63982,-75.18355 -74.99077,-75.18355 h -1.63862 l 28.62758,-153.06599 h 139.95707 l 6.94002,153.35516 c -2.50612,-0.28917 -4.91585,-0.38556 -7.51836,-0.38556 -41.35095,0 -74.99077,33.73621 -74.99077,75.18355 0,8.9642 1.54223,17.54283 4.43391,25.54313 z m 95.81083,34.7001 c -33.15787,0 -60.05045,-26.98897 -60.05045,-60.24323 0,-33.25426 26.98897,-60.24323 60.05045,-60.24323 33.06149,0 60.05045,26.98897 60.05045,60.24323 0,33.25426 -26.89257,60.24323 -60.05045,60.24323 z"
LOCATION = "m 14.229184,6.5393187 c -1.650932,0 -3,1.3490681 -3,3 0,1.6509323 1.349068,3.0000003 3,3.0000003 1.650932,0 3,-1.349068 3,-3.0000003 0,-1.6509319 -1.349068,-3 -3,-3 z m 0,1 c 1.110492,0 2,0.8895082 2,2 0,1.1104923 -0.889508,2.0000003 -2,2.0000003 -1.110492,0 -2,-0.889508 -2,-2.0000003 0,-1.1104918 0.889508,-2 2,-2 z m 0,-5.5 c -4.139595,0 -7.4999999,3.3604048 -7.4999999,7.5 0,2.8472223 1.8140676,6.1218833 3.5859379,8.7480473 1.771869,2.626164 3.542968,4.587891 3.542968,4.587891 l 0.371094,0.410156 0.371094,-0.410156 c 0,0 1.771099,-1.961727 3.542968,-4.587891 1.77187,-2.626164 3.585938,-5.900825 3.585938,-8.7480473 0,-4.1395952 -3.360405,-7.5 -7.5,-7.5 z m 0,1 c 3.600405,0 6.5,2.8995952 6.5,6.5 0,2.4027783 -1.685932,5.6281173 -3.414062,8.1894533 -1.54259,2.286339 -2.772427,3.640236 -3.085938,3.994141 -0.313511,-0.353905 -1.543348,-1.707802 -3.085937,-3.994141 -1.7281305,-2.561336 -3.4140629,-5.786675 -3.4140629,-8.1894533 0,-3.6004048 2.8995949,-6.5 6.4999999,-6.5 z"
FIRE = "M 48.284332,229.46764 C -5.424574,337.73798 29.528841,409.34985 118.19116,457.0911 c -39.216025,-91.21989 0.85252,-149.19141 48.59378,-202.90031 -34.1009,131.28844 28.13323,148.33888 49.44629,76.72701 40.06855,34.10089 33.24837,78.43205 6.82018,126.1733 83.54719,-51.15134 117.64808,-120.20565 56.26647,-244.6739 -12.78783,15.3454 -19.60801,38.3635 -41.77359,39.21602 C 181.27782,253.33827 288.69563,92.211555 153.14458,51.290479 182.98286,121.19731 55.957033,167.23351 76.417568,282.32403 55.957033,275.50385 48.284332,255.89583 48.284332,229.46764 Z"
| 906 | 2,068 | 0.721578 |
f0bfb9647e6b7955fd724106a0b68238930edca3 | 1,216 | py | Python | unintended_ml_bias/new_madlibber/runner.py | IlanPrice/unintended-ml-bias-analysis | 0cad5dc35ea5c03470001bfe9f2214e4be686dde | [
"Apache-2.0"
] | 314 | 2017-09-13T19:51:34.000Z | 2022-03-22T20:41:43.000Z | unintended_ml_bias/new_madlibber/runner.py | IlanPrice/unintended-ml-bias-analysis | 0cad5dc35ea5c03470001bfe9f2214e4be686dde | [
"Apache-2.0"
] | 41 | 2017-09-13T21:01:30.000Z | 2021-11-30T20:21:59.000Z | unintended_ml_bias/new_madlibber/runner.py | IlanPrice/unintended-ml-bias-analysis | 0cad5dc35ea5c03470001bfe9f2214e4be686dde | [
"Apache-2.0"
] | 105 | 2017-10-11T18:20:36.000Z | 2022-03-24T16:19:44.000Z | import argparse
import format_helper
import madlibber
import path_helper
import word_helper
def parse_args():
"""Returns parsed arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'-input_words',
type=str,
required=True,
help='The input words to substitute into templates.')
parser.add_argument(
'-input_sentence_templates',
type=str,
required=True,
help='The input sentence templates.')
parser.add_argument(
'-output_file',
type=str,
required=True,
help='The output file of filled in templates.')
return parser.parse_args()
def main():
args = parse_args()
ph = path_helper.PathHelper(args.input_words, args.input_sentence_templates,
args.output_file)
wh = word_helper.WordHelper(format_helper.FormatHelper)
m = madlibber.Madlibber(ph, format_helper.FormatHelper, wh)
m.load_sanity_check_templates_and_infer_word_categories()
m.load_and_sanity_check_words()
m.display_statistics()
should_fill = input('Do you wish to generate the sentences? [y/N]')
if should_fill == 'y':
m.fill_templates()
print('Done. Exiting...')
if __name__ == '__main__':
main()
| 25.87234 | 78 | 0.695724 |
0d5e6c5137383420c2a7149b3f2a1c693bd09fc3 | 8,837 | py | Python | tests/python/gaia-ui-tests/gaiatest/apps/contacts/regions/contact_form.py | DouglasSherk/gaia | ca471df16080c97d0b7c0b830fdbfab3ab065e56 | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/apps/contacts/regions/contact_form.py | DouglasSherk/gaia | ca471df16080c97d0b7c0b830fdbfab3ab065e56 | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/apps/contacts/regions/contact_form.py | DouglasSherk/gaia | ca471df16080c97d0b7c0b830fdbfab3ab065e56 | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_driver import expected, By, Wait
from gaiatest.apps.base import Base
class ContactForm(Base):
name = 'Contacts'
_given_name_locator = (By.ID, 'givenName')
_family_name_locator = (By.ID, 'familyName')
_phone_locator = (By.ID, 'number_0')
_email_locator = (By.ID, 'email_0')
_street_locator = (By.ID, 'streetAddress_0')
_zip_code_locator = (By.ID, 'postalCode_0')
_city_locator = (By.ID, 'locality_0')
_country_locator = (By.ID, 'countryName_0')
_comment_locator = (By.ID, 'note_0')
_add_new_email_locator = (By.ID, 'add-new-email')
_add_new_address_locator = (By.ID, 'add-new-address')
_add_new_note_locator = (By.ID, 'add-new-note')
_add_new_phone_locator = (By.ID, 'add-new-phone')
_thumbnail_photo_locator = (By.ID, 'thumbnail-photo')
@property
def given_name(self):
return self.marionette.find_element(*self._given_name_locator).text
def type_given_name(self, value):
element = self.marionette.find_element(*self._given_name_locator)
element.clear()
element.send_keys(value)
@property
def family_name(self):
return self.marionette.find_element(*self._family_name_locator).text
def type_family_name(self, value):
element = self.marionette.find_element(*self._family_name_locator)
element.clear()
element.send_keys(value)
@property
def phone(self):
return self.marionette.find_element(*self._phone_locator).text
def _type_in_field(self, add_locator, field_locator, value):
Wait(self.marionette).until(expected.element_present(*add_locator)).tap()
element = Wait(self.marionette).until(expected.element_present(*field_locator))
Wait(self.marionette).until(expected.element_displayed(element))
element.clear()
element.send_keys(value)
def type_phone(self, value):
self._type_in_field(self._add_new_phone_locator, self._phone_locator, value)
@property
def email(self):
return self.marionette.find_element(*self._email_locator).text
def type_email(self, value):
self._type_in_field(self._add_new_email_locator, self._email_locator, value)
@property
def street(self):
return self.marionette.find_element(*self._street_locator).text
def type_street(self, value):
self._type_in_field(self._add_new_address_locator, self._street_locator, value)
@property
def zip_code(self):
return self.marionette.find_element(*self._zip_code_locator).text
def type_zip_code(self, value):
element = self.marionette.find_element(*self._zip_code_locator)
element.clear()
element.send_keys(value)
@property
def city(self):
return self.marionette.find_element(*self._city_locator).text
def type_city(self, value):
element = self.marionette.find_element(*self._city_locator)
element.clear()
element.send_keys(value)
@property
def country(self):
return self.marionette.find_element(*self._country_locator).text
def type_country(self, value):
element = self.marionette.find_element(*self._country_locator)
element.clear()
element.send_keys(value)
@property
def comment(self):
return self.marionette.find_element(*self._comment_locator).text
def type_comment(self, value):
self._type_in_field(self._add_new_note_locator, self._comment_locator, value)
def tap_comment(self):
element = self.marionette.find_element(*self._add_new_note_locator)
element.tap()
self.marionette.execute_script(
'arguments[0].scrollIntoView(true);', [element])
element = self.marionette.find_element(*self._comment_locator)
self.marionette.execute_script(
'arguments[0].scrollIntoView(true);', [element])
element.tap()
@property
def picture_style(self):
return self.marionette.find_element(*self._thumbnail_photo_locator).get_attribute('style')
def tap_picture(self):
self.marionette.find_element(*self._thumbnail_photo_locator).tap()
from gaiatest.apps.system.regions.activities import Activities
return Activities(self.marionette)
def wait_for_image_to_load(self):
el = self.marionette.find_element(*self._thumbnail_photo_locator)
Wait(self.marionette).until(lambda m: 'background-image' in el.get_attribute('style'))
class EditContact(ContactForm):
_update_locator = (By.ID, 'save-button')
_cancel_locator = (By.ID, 'cancel-edit')
_delete_locator = (By.ID, 'delete-contact')
_delete_form_locator = (By.ID, 'confirmation-message')
_cancel_delete_locator = (By.CSS_SELECTOR, 'form#confirmation-message button:not(.danger)')
_confirm_delete_locator = (By.CSS_SELECTOR, 'form#confirmation-message button.danger')
def __init__(self, marionette):
ContactForm.__init__(self, marionette)
update = Wait(self.marionette).until(expected.element_present(*self._update_locator))
Wait(self.marionette).until(lambda m: update.location['y'] == 0 and update.is_displayed())
def tap_update(self, return_details=True):
self.wait_for_update_button_enabled()
update = self.marionette.find_element(*self._update_locator)
update.tap()
if return_details:
from gaiatest.apps.contacts.regions.contact_details import ContactDetails
return ContactDetails(self.marionette)
else:
# else we drop back to the underlying app
from gaiatest.apps.contacts.app import Contacts
Contacts(self.marionette).wait_to_not_be_displayed()
self.apps.switch_to_displayed_app()
def tap_cancel(self):
self.marionette.find_element(*self._cancel_locator).tap()
from gaiatest.apps.contacts.regions.contact_details import ContactDetails
return ContactDetails(self.marionette)
def tap_delete(self):
delete_item = self.marionette.find_element(*self._delete_locator)
self.marionette.execute_script(
'arguments[0].scrollIntoView(true);', [delete_item])
delete_item.tap()
def tap_cancel_delete(self):
Wait(self.marionette).until(expected.element_displayed(*self._delete_form_locator))
self.marionette.find_element(*self._cancel_delete_locator).tap()
def tap_confirm_delete(self):
Wait(self.marionette).until(expected.element_displayed(*self._delete_form_locator))
self.marionette.find_element(*self._confirm_delete_locator).tap()
def wait_for_update_button_enabled(self):
update = self.marionette.find_element(*self._update_locator)
Wait(self.marionette).until(expected.element_enabled(update))
class NewContact(ContactForm):
_src = 'app://communications.gaiamobile.org/contacts/views/form/form.html'
_done_button_locator = (By.ID, 'save-button')
def __init__(self, marionette):
ContactForm.__init__(self, marionette)
def switch_to_new_contact_form(self):
# When NewContact form is called as an ActivityWindow
Wait(self.marionette).until(lambda m: self.apps.displayed_app.src == self._src)
self.apps.switch_to_displayed_app()
self.wait_for_new_contact_form_to_load()
def wait_for_new_contact_form_to_load(self):
done = self.marionette.find_element(*self._done_button_locator)
Wait(self.marionette).until(lambda m: done.location['y'] == 0)
def tap_done(self, return_contacts=True):
element = self.marionette.find_element(*self._done_button_locator)
self.tap_element_from_system_app(element, add_statusbar_height=True)
return self.wait_for_done(return_contacts)
def a11y_click_done(self, return_contacts=True):
element = self.marionette.find_element(*self._done_button_locator)
self.accessibility.click(element)
Wait(self.marionette).until(expected.element_not_displayed(element))
return self.wait_for_done(return_contacts)
def wait_for_done(self, return_contacts=True):
# NewContact can be opened as an Activity from other apps. In this scenario we don't return Contacts
if return_contacts:
self.apps.switch_to_displayed_app()
from gaiatest.apps.contacts.app import Contacts
return Contacts(self.marionette)
else:
Wait(self.marionette).until(lambda m: self.apps.displayed_app.name != 'Communications')
# Fall back to the underlying app
self.apps.switch_to_displayed_app()
| 39.275556 | 108 | 0.708272 |
6b62dae6252345752a38a3ea9471c9cdad5931a5 | 8,393 | py | Python | pythonFiles/jedi/parser_utils.py | geecodecommunity/GeeCode | 18a4441f19ad4f61e760811e4b637d08c4b7ba37 | [
"MIT"
] | null | null | null | pythonFiles/jedi/parser_utils.py | geecodecommunity/GeeCode | 18a4441f19ad4f61e760811e4b637d08c4b7ba37 | [
"MIT"
] | 6 | 2020-07-17T08:45:58.000Z | 2022-03-25T18:44:47.000Z | pythonFiles/jedi/parser_utils.py | geecodecommunity/GeeCode | 18a4441f19ad4f61e760811e4b637d08c4b7ba37 | [
"MIT"
] | null | null | null | import re
import textwrap
from inspect import cleandoc
from parso.python import tree
from parso.cache import parser_cache
from jedi._compatibility import literal_eval, force_unicode
_EXECUTE_NODES = {'funcdef', 'classdef', 'import_from', 'import_name', 'test',
'or_test', 'and_test', 'not_test', 'comparison', 'expr',
'xor_expr', 'and_expr', 'shift_expr', 'arith_expr',
'atom_expr', 'term', 'factor', 'power', 'atom'}
_FLOW_KEYWORDS = (
'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while'
)
def get_executable_nodes(node, last_added=False):
"""
For static analysis.
"""
result = []
typ = node.type
if typ == 'name':
next_leaf = node.get_next_leaf()
if last_added is False and node.parent.type != 'param' and next_leaf != '=':
result.append(node)
elif typ == 'expr_stmt':
# I think evaluating the statement (and possibly returned arrays),
# should be enough for static analysis.
result.append(node)
for child in node.children:
result += get_executable_nodes(child, last_added=True)
elif typ == 'decorator':
# decorator
if node.children[-2] == ')':
node = node.children[-3]
if node != '(':
result += get_executable_nodes(node)
else:
try:
children = node.children
except AttributeError:
pass
else:
if node.type in _EXECUTE_NODES and not last_added:
result.append(node)
for child in children:
result += get_executable_nodes(child, last_added)
return result
def get_comp_fors(comp_for):
yield comp_for
last = comp_for.children[-1]
while True:
if last.type == 'comp_for':
yield last
elif not last.type == 'comp_if':
break
last = last.children[-1]
def for_stmt_defines_one_name(for_stmt):
"""
Returns True if only one name is returned: ``for x in y``.
Returns False if the for loop is more complicated: ``for x, z in y``.
:returns: bool
"""
return for_stmt.children[1].type == 'name'
def get_flow_branch_keyword(flow_node, node):
start_pos = node.start_pos
if not (flow_node.start_pos < start_pos <= flow_node.end_pos):
raise ValueError('The node is not part of the flow.')
keyword = None
for i, child in enumerate(flow_node.children):
if start_pos < child.start_pos:
return keyword
first_leaf = child.get_first_leaf()
if first_leaf in _FLOW_KEYWORDS:
keyword = first_leaf
return 0
def get_statement_of_position(node, pos):
for c in node.children:
if c.start_pos <= pos <= c.end_pos:
if c.type not in ('decorated', 'simple_stmt', 'suite',
'async_stmt', 'async_funcdef') \
and not isinstance(c, (tree.Flow, tree.ClassOrFunc)):
return c
else:
try:
return get_statement_of_position(c, pos)
except AttributeError:
pass # Must be a non-scope
return None
def clean_scope_docstring(scope_node):
""" Returns a cleaned version of the docstring token. """
node = scope_node.get_doc_node()
if node is not None:
# TODO We have to check next leaves until there are no new
# leaves anymore that might be part of the docstring. A
# docstring can also look like this: ``'foo' 'bar'
# Returns a literal cleaned version of the ``Token``.
cleaned = cleandoc(safe_literal_eval(node.value))
# Since we want the docstr output to be always unicode, just
# force it.
return force_unicode(cleaned)
return ''
def safe_literal_eval(value):
first_two = value[:2].lower()
if first_two[0] == 'f' or first_two in ('fr', 'rf'):
# literal_eval is not able to resovle f literals. We have to do that
# manually, but that's right now not implemented.
return ''
try:
return literal_eval(value)
except SyntaxError:
# It's possible to create syntax errors with literals like rb'' in
# Python 2. This should not be possible and in that case just return an
# empty string.
# Before Python 3.3 there was a more strict definition in which order
# you could define literals.
return ''
def get_call_signature(funcdef, width=72, call_string=None):
"""
Generate call signature of this function.
:param width: Fold lines if a line is longer than this value.
:type width: int
:arg func_name: Override function name when given.
:type func_name: str
:rtype: str
"""
# Lambdas have no name.
if call_string is None:
if funcdef.type == 'lambdef':
call_string = '<lambda>'
else:
call_string = funcdef.name.value
if funcdef.type == 'lambdef':
p = '(' + ''.join(param.get_code() for param in funcdef.get_params()).strip() + ')'
else:
p = funcdef.children[2].get_code()
p = re.sub(r'\s+', ' ', p)
if funcdef.annotation:
rtype = " ->" + funcdef.annotation.get_code()
else:
rtype = ""
code = call_string + p + rtype
return '\n'.join(textwrap.wrap(code, width))
def get_doc_with_call_signature(scope_node):
"""
Return a document string including call signature.
"""
call_signature = None
if scope_node.type == 'classdef':
for funcdef in scope_node.iter_funcdefs():
if funcdef.name.value == '__init__':
call_signature = \
get_call_signature(funcdef, call_string=scope_node.name.value)
elif scope_node.type in ('funcdef', 'lambdef'):
call_signature = get_call_signature(scope_node)
doc = clean_scope_docstring(scope_node)
if call_signature is None:
return doc
if not doc:
return call_signature
return '%s\n\n%s' % (call_signature, doc)
def move(node, line_offset):
"""
Move the `Node` start_pos.
"""
try:
children = node.children
except AttributeError:
node.line += line_offset
else:
for c in children:
move(c, line_offset)
def get_following_comment_same_line(node):
"""
returns (as string) any comment that appears on the same line,
after the node, including the #
"""
try:
if node.type == 'for_stmt':
whitespace = node.children[5].get_first_leaf().prefix
elif node.type == 'with_stmt':
whitespace = node.children[3].get_first_leaf().prefix
elif node.type == 'funcdef':
# actually on the next line
whitespace = node.children[4].get_first_leaf().get_next_leaf().prefix
else:
whitespace = node.get_last_leaf().get_next_leaf().prefix
except AttributeError:
return None
except ValueError:
# TODO in some particular cases, the tree doesn't seem to be linked
# correctly
return None
if "#" not in whitespace:
return None
comment = whitespace[whitespace.index("#"):]
if "\r" in comment:
comment = comment[:comment.index("\r")]
if "\n" in comment:
comment = comment[:comment.index("\n")]
return comment
def is_scope(node):
return node.type in ('file_input', 'classdef', 'funcdef', 'lambdef', 'comp_for')
def get_parent_scope(node, include_flows=False):
"""
Returns the underlying scope.
"""
scope = node.parent
while scope is not None:
if include_flows and isinstance(scope, tree.Flow):
return scope
if is_scope(scope):
break
scope = scope.parent
return scope
def get_cached_code_lines(grammar, path):
"""
Basically access the cached code lines in parso. This is not the nicest way
to do this, but we avoid splitting all the lines again.
"""
return parser_cache[grammar._hashed][path].lines
| 32.157088 | 92 | 0.588943 |
b7d82876024b11295cfa5ac9e8fd9a63441bd9da | 5,771 | py | Python | wagtail/core/templatetags/wagtailcore_tags.py | samgans/wagtail | 48a8af71e5333fb701476702bd784fa407567e25 | [
"BSD-3-Clause"
] | 2 | 2019-05-23T01:31:18.000Z | 2020-06-27T21:19:10.000Z | wagtail/core/templatetags/wagtailcore_tags.py | samgans/wagtail | 48a8af71e5333fb701476702bd784fa407567e25 | [
"BSD-3-Clause"
] | 6 | 2020-08-26T03:00:03.000Z | 2020-09-24T02:59:14.000Z | wagtail/core/templatetags/wagtailcore_tags.py | samgans/wagtail | 48a8af71e5333fb701476702bd784fa407567e25 | [
"BSD-3-Clause"
] | 1 | 2021-02-15T18:59:53.000Z | 2021-02-15T18:59:53.000Z | from django import template
from django.shortcuts import reverse
from django.template.defaulttags import token_kwargs
from django.template.loader import render_to_string
from django.utils.encoding import force_str
from wagtail import VERSION, __version__
from wagtail.core.models import Page, Site
from wagtail.core.rich_text import RichText, expand_db_html
from wagtail.utils.version import get_main_version
register = template.Library()
@register.simple_tag(takes_context=True)
def pageurl(context, page, fallback=None):
"""
Outputs a page's URL as relative (/foo/bar/) if it's within the same site as the
current page, or absolute (http://example.com/foo/bar/) if not.
If kwargs contains a fallback view name and page is None, the fallback view url will be returned.
"""
if page is None and fallback:
return reverse(fallback)
if not hasattr(page, 'relative_url'):
raise ValueError("pageurl tag expected a Page object, got %r" % page)
try:
site = Site.find_for_request(context['request'])
current_site = site
except KeyError:
# request not available in the current context; fall back on page.url
return page.url
if current_site is None:
# request does not correspond to a recognised site; fall back on page.url
return page.url
# Pass page.relative_url the request object, which may contain a cached copy of
# Site.get_site_root_paths()
# This avoids page.relative_url having to make a database/cache fetch for this list
# each time it's called.
return page.relative_url(current_site, request=context.get('request'))
@register.simple_tag(takes_context=True)
def slugurl(context, slug):
"""
Returns the URL for the page that has the given slug.
First tries to find a page on the current site. If that fails or a request
is not available in the context, then returns the URL for the first page
that matches the slug on any site.
"""
page = None
try:
site = Site.find_for_request(context['request'])
current_site = site
except KeyError:
# No site object found - allow the fallback below to take place.
pass
else:
if current_site is not None:
page = Page.objects.in_site(current_site).filter(slug=slug).first()
# If no page is found, fall back to searching the whole tree.
if page is None:
page = Page.objects.filter(slug=slug).first()
if page:
# call pageurl() instead of page.relative_url() here so we get the ``accepts_kwarg`` logic
return pageurl(context, page)
@register.simple_tag
def wagtail_version():
return __version__
@register.simple_tag
def wagtail_documentation_path():
major, minor, patch, release, num = VERSION
if release == 'final':
return 'https://docs.wagtail.io/en/v%s' % __version__
else:
return 'https://docs.wagtail.io/en/latest'
@register.simple_tag
def wagtail_release_notes_path():
return "%s.html" % get_main_version(VERSION)
@register.filter
def richtext(value):
if isinstance(value, RichText):
# passing a RichText value through the |richtext filter should have no effect
return value
elif value is None:
html = ''
else:
if isinstance(value, str):
html = expand_db_html(value)
else:
raise TypeError("'richtext' template filter received an invalid value; expected string, got {}.".format(type(value)))
return render_to_string('wagtailcore/shared/richtext.html', {'html': html})
class IncludeBlockNode(template.Node):
def __init__(self, block_var, extra_context, use_parent_context):
self.block_var = block_var
self.extra_context = extra_context
self.use_parent_context = use_parent_context
def render(self, context):
try:
value = self.block_var.resolve(context)
except template.VariableDoesNotExist:
return ''
if hasattr(value, 'render_as_block'):
if self.use_parent_context:
new_context = context.flatten()
else:
new_context = {}
if self.extra_context:
for var_name, var_value in self.extra_context.items():
new_context[var_name] = var_value.resolve(context)
return value.render_as_block(context=new_context)
else:
return force_str(value)
@register.tag
def include_block(parser, token):
"""
Render the passed item of StreamField content, passing the current template context
if there's an identifiable way of doing so (i.e. if it has a `render_as_block` method).
"""
tokens = token.split_contents()
try:
tag_name = tokens.pop(0)
block_var_token = tokens.pop(0)
except IndexError:
raise template.TemplateSyntaxError("%r tag requires at least one argument" % tag_name)
block_var = parser.compile_filter(block_var_token)
if tokens and tokens[0] == 'with':
tokens.pop(0)
extra_context = token_kwargs(tokens, parser)
else:
extra_context = None
use_parent_context = True
if tokens and tokens[0] == 'only':
tokens.pop(0)
use_parent_context = False
if tokens:
raise template.TemplateSyntaxError("Unexpected argument to %r tag: %r" % (tag_name, tokens[0]))
return IncludeBlockNode(block_var, extra_context, use_parent_context)
@register.simple_tag(takes_context=True)
def wagtail_site(context):
"""
Returns the Site object for the given request
"""
try:
request = context['request']
except KeyError:
return None
return Site.find_for_request(request=request)
| 31.883978 | 129 | 0.678045 |
e77ab1372b81027c5baf21b5ea7a72a0d2d7573d | 12,105 | py | Python | model.py | rafaie/cifar-100 | e61e1e4265b4d4e3f3978d8522bcb81fc4ac0fbf | [
"Apache-2.0"
] | 1 | 2020-08-26T09:56:54.000Z | 2020-08-26T09:56:54.000Z | model.py | rafaie/cifar-100 | e61e1e4265b4d4e3f3978d8522bcb81fc4ac0fbf | [
"Apache-2.0"
] | null | null | null | model.py | rafaie/cifar-100 | e61e1e4265b4d4e3f3978d8522bcb81fc4ac0fbf | [
"Apache-2.0"
] | 1 | 2019-11-06T10:32:58.000Z | 2019-11-06T10:32:58.000Z | """
model.py: it includes the base model and other models
"""
import tensorflow as tf
import numpy as np
import os
import functools
# import hooks
import util
tf.logging.set_verbosity(tf.logging.INFO) # if you want to see the log info
class BaseConfig(object):
def __init__(self, name, data_path, img_augmentation, dynamic_learning_rate, batch_size):
self.name = name
self.learning_rate = 0.001
self.model_dir = './' + self.name
if img_augmentation is True:
self.model_dir += '_aug'
if dynamic_learning_rate is True:
self.model_dir += '_dyn'
self.model_dir += '_' + str(batch_size)
self.model_dir += '_' + data_path.replace('.', '').replace('\\', '').replace('/', '')
os.makedirs(self.model_dir, exist_ok=True)
# config for `tf.data.Dataset`
self.shuffle_and_repeat = True
self.shuffle_buffer_size = 10000
self.batch_size = batch_size
self.decay_rate = 0.1
self.decay_steps = 10000
self.constant_steps = 20000
self.img_augmentation = img_augmentation
# training configuration
self.keep_checkpoint_max = 10
self.save_checkpoints_steps = 500
self.stop_if_no_increase_hook_max_steps_without_increase = 5000
self.stop_if_no_increase_hook_min_steps = 50000
self.stop_if_no_increase_hook_run_every_secs = 120
self.save_summary_steps = 100
self.num_epochs = 20000
self.throttle_secs = 0
self.wit_hook = True
self.dynamic_learning_rate = dynamic_learning_rate
self.data_path = data_path
self.learning_rate_warm_up_step = 10000
self.max_steps = 0
class BaseModel(object):
def __init__(self, data_path=None,
img_augmentation=False,
dynamic_learning_rate=False,
batch_size=120,
params=None):
self.config = None
self.init_config(data_path, img_augmentation, dynamic_learning_rate,
batch_size, params)
def do_augmentation(self, image, lable):
image = tf.image.resize_image_with_crop_or_pad(image, 32 + 5, 32 + 5)
image = tf.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
return image, lable
def load_dataset(self, dataset, mode):
# with tf.device(tf.DeviceSpec(device_type="CPU", device_index=0)):
if mode == tf.estimator.ModeKeys.TRAIN:
if self.config.shuffle_and_repeat is True:
dataset = dataset.shuffle(self.config.shuffle_buffer_size).repeat(
self.config.num_epochs)
if self.config.img_augmentation == True:
print('img_augmentation is activated')
dataset = dataset.map(self.do_augmentation, num_parallel_calls=4)
dataset = dataset.batch(self.config.batch_size)
elif mode == tf.estimator.ModeKeys.EVAL:
dataset = dataset.batch(self.config.batch_size)
ds_iter = dataset.make_one_shot_iterator()
return ds_iter.get_next()
def train_and_evaluate(self, ds_train, ds_eval):
# Prepare dataset
it_train = functools.partial(self.load_dataset, ds_train, tf.estimator.ModeKeys.TRAIN)
it_eval = functools.partial(self.load_dataset, ds_eval, tf.estimator.ModeKeys.EVAL)
# Session Cconfiguration
session_config = tf.ConfigProto()
session_config.allow_soft_placement = True
session_config.gpu_options.allow_growth = True
cfg = tf.estimator.RunConfig(model_dir=self.config.model_dir,
save_summary_steps=self.config.save_summary_steps,
save_checkpoints_steps=self.config.save_checkpoints_steps,
save_checkpoints_secs=None,
session_config=session_config,
keep_checkpoint_max=self.config.keep_checkpoint_max)
estimator = tf.estimator.Estimator(model_fn = self._model_fn,
config=cfg)
train_hooks, eval_hooks = self.get_hooks(estimator)
train_spec = tf.estimator.TrainSpec(input_fn=it_train, hooks=train_hooks, max_steps=self.config.max_steps)
eval_spec = tf.estimator.EvalSpec(input_fn=it_eval, hooks=eval_hooks, throttle_secs=self.config.throttle_secs)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def get_hooks(self, estimator):
train_hooks =[
util.ExamplesPerSecondHook(
batch_size=self.config.batch_size,
every_n_iter=self.config.save_summary_steps),
util.LoggingTensorHook(
collection="batch_logging",
every_n_iter=self.config.save_summary_steps,
batch=True),
util.LoggingTensorHook(
collection="logging",
every_n_iter=self.config.save_summary_steps,
batch=False),
tf.contrib.estimator.stop_if_no_increase_hook(
estimator, "accuracy",
max_steps_without_increase=self.config.stop_if_no_increase_hook_max_steps_without_increase,
min_steps = self.config.stop_if_no_increase_hook_min_steps)]
eval_hooks = [
util.SummarySaverHook(every_n_iter=self.config.save_summary_steps,
output_dir=os.path.join(self.config.model_dir, "eval"))]
return (train_hooks, eval_hooks)
def get_learning_rate(self):
if self.config.dynamic_learning_rate is False:
return self.config.learning_rate
# Exponenetial decay
step = tf.to_float(tf.train.get_or_create_global_step())
learning_rate = self.config.learning_rate
if step > self.config.learning_rate_warm_up_step:
learning_rate *= 0.35 ** (step // 10000)
return learning_rate
def _model_fn(self, features, labels, mode, params={}):
global_step = tf.train.get_or_create_global_step()
learning_rate = self.get_learning_rate()
opt = tf.train.AdamOptimizer(learning_rate)
predictions, loss, eval_metrics = self.model_fn(features, labels, mode, params)
losses = [loss]
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
loss = None
if losses:
loss = tf.add_n(losses) / len(losses)
tf.summary.scalar("loss/main", tf.add_n(losses))
if reg_losses:
loss += tf.add_n(reg_losses)
tf.summary.scalar("loss/regularization", tf.add_n(reg_losses))
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.control_dependencies(update_ops):
train_op = opt.minimize(loss,
global_step=global_step,
colocate_gradients_with_ops=True)
opts = tf.profiler.ProfileOptionBuilder().trainable_variables_parameter()
stats = tf.profiler.profile(tf.get_default_graph(), options=opts)
print("Total parameters:", stats.total_parameters)
else:
train_op = None
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metrics)
def init_config(self, data_path, img_augmentation, dynamic_learning_rate,
batch_size, params=None):
raise NotImplementedError
def model_fn(self, features, labels, mode, params={}):
raise NotImplementedError
# Implementation of ResNet-32
class Resnet(BaseModel):
def init_config(self, data_path, img_augmentation, dynamic_learning_rate, batch_size, params=None):
self.config = BaseConfig('Resnet', data_path,
img_augmentation, dynamic_learning_rate, batch_size)
self.config.weight_decay = 0.0002
self.config.drop_rate = 0.3
self.config.normalization_val = 1
def model_fn(self, images, labels, mode, params):
"""CNN classifier model."""
# images = features["image"]
# labels = labels["label"]
training = mode == tf.estimator.ModeKeys.TRAIN
drop_rate = self.config.drop_rate if training else 0.0
# features = tf.placeholder(tf.float32, [None, 32, 32, 3], name='input_placeholder')
features = tf.divide(images, tf.constant(self.config.normalization_val, tf.float32), name='input_placeholder')
features = util.conv_layers(features, [16], [3], linear_top_layer=True,
weight_decay=self.config.weight_decay)
features = util.resnet_blocks(
features, [16, 32, 64], [1, 2, 2], 5, training=training,
weight_decay=self.config.weight_decay,
drop_rates=drop_rate)
features = util.batch_normalization(features, training=training)
features = tf.nn.relu(features)
logits = util.dense_layers(features, [100],
linear_top_layer=False,
weight_decay=self.config.weight_decay)
logits = tf.reduce_mean(logits, axis=[1, 2])
logits = tf.identity(logits, name='output')
predictions = tf.argmax(logits, axis=-1)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
eval_metrics = {
"accuracy": tf.metrics.accuracy(labels, predictions),
"top_1_error": tf.metrics.mean(util.top_k_error(labels, logits, 1)),
}
return {"predictions": predictions}, loss, eval_metrics
class AlexNet(BaseModel):
def init_config(self, data_path, img_augmentation, dynamic_learning_rate, batch_size, params=None):
self.config = BaseConfig('AlexNet', data_path,
img_augmentation, dynamic_learning_rate, batch_size)
self.config.weight_decay = 0.002
self.config.drop_rate = 0.5
self.config.normalization_val = 1
def model_fn(self, images, labels, mode, params):
"""CNN classifier model."""
training = mode == tf.estimator.ModeKeys.TRAIN
drop_rate = self.config.drop_rate if training else 0.0
features = tf.divide(images, tf.constant(self.config.normalization_val, tf.float32), name='input_placeholder')
features = util.conv_layers(features,
filters=[64, 192, 384, 256, 256],
kernels=[3, 3, 3, 3, 3],
pool_sizes=[2, 2, 2, 2, 2])
features = tf.contrib.layers.flatten(features)
logits = util.dense_layers(
features, [512, 100],
drop_rates=drop_rate,
linear_top_layer=True)
logits = tf.identity(logits, name='output')
predictions = tf.argmax(logits, axis=-1)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
tf.summary.image("images", images)
eval_metrics = {
"accuracy": tf.metrics.accuracy(labels, predictions),
"top_1_error": tf.metrics.mean(util.top_k_error(labels, logits, 1)),
}
tf.add_to_collection(
"batch_logging", tf.identity(labels, name="labels"))
tf.add_to_collection(
"batch_logging", tf.identity(predictions, name="predictions"))
return {"predictions": predictions}, loss, eval_metrics
| 41.033898 | 118 | 0.606526 |
735b7c35f51ac23df7dff99004bf606b88b801d1 | 2,389 | py | Python | common/test_factors.py | plilja/project-euler | 646d1989cf15e903ef7e3c6e487284847d522ec9 | [
"Apache-2.0"
] | null | null | null | common/test_factors.py | plilja/project-euler | 646d1989cf15e903ef7e3c6e487284847d522ec9 | [
"Apache-2.0"
] | null | null | null | common/test_factors.py | plilja/project-euler | 646d1989cf15e903ef7e3c6e487284847d522ec9 | [
"Apache-2.0"
] | null | null | null | import unittest
from factors import *
class TestPrimeFactors(unittest.TestCase):
def test_prime_factors_of_1_is_1(self):
self.assertEqual(prime_factors(1), [1])
def test_prime_factors_of_prime_number_is_the_prime_number(self):
self.assertEqual(prime_factors(2), [2])
self.assertEqual(prime_factors(7), [7])
self.assertEqual(prime_factors(17), [17])
def test_prime_factors_of_4_is_2_and_2(self):
self.assertEqual(prime_factors(4), [2, 2])
def test_prime_factors_of_10_is_2_and_5(self):
self.assertEqual(sorted(prime_factors(10)), [2, 5])
def test_prime_factors_of_300_is_2_3_and_5(self):
self.assertEqual(sorted(prime_factors(30)), [2, 3, 5])
class TestPrimeFactorization(unittest.TestCase):
def test_prime_factorization(self):
self.assertEqual(prime_factorization(1), {1: 1})
self.assertEqual(prime_factorization(2), {2: 1})
self.assertEqual(prime_factorization(3), {3: 1})
self.assertEqual(prime_factorization(4), {2: 2})
self.assertEqual(prime_factorization(5), {5: 1})
self.assertEqual(prime_factorization(6), {2: 1, 3: 1})
self.assertEqual(prime_factorization(8), {2: 3})
self.assertEqual(prime_factorization(24), {2: 3, 3: 1})
class TestAllDivisors(unittest.TestCase):
def test_all_divisors(self):
self.assertEqual(all_divisors(1), {1})
self.assertEqual(all_divisors(2), {1, 2})
self.assertEqual(all_divisors(3), {1, 3})
self.assertEqual(all_divisors(4), {1, 2, 4})
self.assertEqual(all_divisors(6), {1, 2, 3, 6})
self.assertEqual(all_divisors(8), {1, 2, 4, 8})
self.assertEqual(all_divisors(24), {1, 2, 3, 4, 6, 8, 12, 24})
def test_all_factors_big_number(self):
self.assertEqual(len(all_divisors(76576500)), 576)
self.assertEqual(len(all_divisors(76576500000)), 2016)
class TestAllProperDivisors(unittest.TestCase):
def test_all_divisors(self):
self.assertEqual(all_proper_divisors(1), {1})
self.assertEqual(all_proper_divisors(2), {1})
self.assertEqual(all_proper_divisors(3), {1})
self.assertEqual(all_proper_divisors(4), {1, 2})
self.assertEqual(all_proper_divisors(8), {1, 2, 4})
self.assertEqual(all_proper_divisors(12), {1, 2, 3, 4, 6})
if __name__ == '__main__':
unittest.main()
| 37.328125 | 70 | 0.678108 |
db9b3c37a101a9357e224d219a6c5a22ad2c6827 | 9,159 | py | Python | elroee/ReportsScreenResearcher.py | vladigr1/soft-project | 9d33d4a9a894b85742474c6bdc9accf38ff396f3 | [
"MIT"
] | null | null | null | elroee/ReportsScreenResearcher.py | vladigr1/soft-project | 9d33d4a9a894b85742474c6bdc9accf38ff396f3 | [
"MIT"
] | null | null | null | elroee/ReportsScreenResearcher.py | vladigr1/soft-project | 9d33d4a9a894b85742474c6bdc9accf38ff396f3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ReportsScreenResearcher.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow,QInputDialog, QLineEdit, QDialog, QLabel, QComboBox, QPushButton
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(477, 476)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.label.setFont(font)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.cur_report_display_table = QtWidgets.QTableWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cur_report_display_table.sizePolicy().hasHeightForWidth())
self.cur_report_display_table.setSizePolicy(sizePolicy)
self.cur_report_display_table.setLineWidth(1)
self.cur_report_display_table.setObjectName("cur_report_display_table")
self.cur_report_display_table.setColumnCount(1)
self.cur_report_display_table.setRowCount(11)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setVerticalHeaderItem(10, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(4, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(5, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(6, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(7, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(8, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(9, 0, item)
item = QtWidgets.QTableWidgetItem()
self.cur_report_display_table.setItem(10, 0, item)
self.cur_report_display_table.horizontalHeader().setDefaultSectionSize(300)
self.verticalLayout_2.addWidget(self.cur_report_display_table)
self.addRowBtn = QtWidgets.QPushButton(self.centralwidget, clicked = self.addRowToTable)
self.addRowBtn.setText("add Row")
self.verticalLayout_2.addWidget(self.addRowBtn)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Report screen:"))
item = self.cur_report_display_table.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "beta1,0"))
item = self.cur_report_display_table.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "beta2,0"))
item = self.cur_report_display_table.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "beta3,0"))
item = self.cur_report_display_table.verticalHeaderItem(3)
item.setText(_translate("MainWindow", "beta4,0"))
item = self.cur_report_display_table.verticalHeaderItem(4)
item.setText(_translate("MainWindow", "beta5,0"))
item = self.cur_report_display_table.verticalHeaderItem(5)
item.setText(_translate("MainWindow", "amount of sleep"))
item = self.cur_report_display_table.verticalHeaderItem(6)
item.setText(_translate("MainWindow", "cognitive load"))
item = self.cur_report_display_table.verticalHeaderItem(7)
item.setText(_translate("MainWindow", "physical activity"))
item = self.cur_report_display_table.verticalHeaderItem(8)
item.setText(_translate("MainWindow", "t-value"))
item = self.cur_report_display_table.verticalHeaderItem(9)
item.setText(_translate("MainWindow", "critical t-value"))
item = self.cur_report_display_table.verticalHeaderItem(10)
item.setText(_translate("MainWindow", "accuracy"))
item = self.cur_report_display_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "value"))
__sortingEnabled = self.cur_report_display_table.isSortingEnabled()
self.cur_report_display_table.setSortingEnabled(False)
item = self.cur_report_display_table.item(0, 0)
item.setText(_translate("MainWindow", "2.7"))
item = self.cur_report_display_table.item(1, 0)
item.setText(_translate("MainWindow", "2.9"))
item = self.cur_report_display_table.item(2, 0)
item.setText(_translate("MainWindow", "3.4"))
item = self.cur_report_display_table.item(3, 0)
item.setText(_translate("MainWindow", "4.2"))
item = self.cur_report_display_table.item(4, 0)
item.setText(_translate("MainWindow", "4.4"))
item = self.cur_report_display_table.item(5, 0)
item.setText(_translate("MainWindow", "(5,6,7,8)"))
item = self.cur_report_display_table.item(6, 0)
item.setText(_translate("MainWindow", "-0.2"))
item = self.cur_report_display_table.item(7, 0)
item.setText(_translate("MainWindow", "-0.4"))
item = self.cur_report_display_table.item(8, 0)
item.setText(_translate("MainWindow", "1.12"))
item = self.cur_report_display_table.item(9, 0)
item.setText(_translate("MainWindow", "1.74"))
item = self.cur_report_display_table.item(10, 0)
item.setText(_translate("MainWindow", "12"))
self.cur_report_display_table.setSortingEnabled(__sortingEnabled)
def addRowToTable(self):
rowPosition = self.cur_report_display_table.rowCount()
self.cur_report_display_table.insertRow(rowPosition)
item = QtWidgets.QTableWidgetItem()
item.setText("Elro")
self.cur_report_display_table.setVerticalHeaderItem(rowPosition, item)
label = QLabel("Elro2")
self.cur_report_display_table.setCellWidget(rowPosition, 0, label)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 49.508108 | 108 | 0.70892 |
9e4e851139dc4380a642ddee51d302b3200c2175 | 4,085 | py | Python | lib/surface/compute/instances/set_min_cpu_platform.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/compute/instances/set_min_cpu_platform.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/instances/set_min_cpu_platform.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for setting minimum CPU platform for virtual machine instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instances import flags
from googlecloudsdk.core import log
@base.Deprecate(
is_removed=False,
warning='This command is deprecated. Use '
'$ gcloud alpha compute instances update --set-min-cpu-platform instead.')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SetMinCpuPlatform(base.UpdateCommand):
# pylint: disable=line-too-long
"""Set minimum CPU platform for Compute Engine virtual machine instance."""
# pylint: enable=line-too-long
@staticmethod
def Args(parser):
flags.INSTANCE_ARG.AddArgument(parser)
flags.AddMinCpuPlatformArgs(
parser, base.ReleaseTrack.ALPHA, required=True)
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
instance_ref = flags.INSTANCE_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=flags.GetInstanceZoneScopeLister(client))
embedded_request = client.messages.InstancesSetMinCpuPlatformRequest(
minCpuPlatform=args.min_cpu_platform or None)
request = client.messages.ComputeInstancesSetMinCpuPlatformRequest(
instance=instance_ref.instance,
project=instance_ref.project,
instancesSetMinCpuPlatformRequest=embedded_request,
zone=instance_ref.zone)
operation = client.apitools_client.instances.SetMinCpuPlatform(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
if args.async_:
log.UpdatedResource(
operation_ref,
kind='gce instance [{0}]'.format(instance_ref.Name()),
is_async=True,
details='Use [gcloud compute operations describe] command '
'to check the status of this operation.'
)
return operation
operation_poller = poller.Poller(client.apitools_client.instances)
return waiter.WaitFor(
operation_poller, operation_ref,
'Changing minimum CPU platform of instance [{0}]'.format(
instance_ref.Name()))
SetMinCpuPlatform.detailed_help = {
'brief': ('Set minimum CPU platform for Compute Engine virtual '
'machines'),
'DESCRIPTION':
"""\
`{command}` changes the minimum CPU platform of a virtual
machine with the *TERMINATED* status (a virtual machine instance that
has been stopped).
For example, running the command on example-instance virtual machine
which has a status of TERMINATED
$ {command} example-instance --zone us-central1-a\
--min-cpu-platform "Intel Broadwell"
will set the minimum CPU platform to `Intel Broadwell`. When
you start `example-instance` later, it will be provisioned using at
least `Intel Broadwell` CPU platform.
To get a list of available CPU platforms in us-central1-a zone, run:
$ gcloud alpha compute zones describe us-central1-a\
--format="value(availableCpuPlatforms)"
""",
}
| 37.136364 | 78 | 0.722399 |
ef04626e1ecf4359d6fe160d4e99a2cbe6e820cf | 1,427 | py | Python | myapp/forms.py | percevalm/aumyproject | b24b38005188ce9dd41ed663cf54dad5464afef3 | [
"bzip2-1.0.6"
] | null | null | null | myapp/forms.py | percevalm/aumyproject | b24b38005188ce9dd41ed663cf54dad5464afef3 | [
"bzip2-1.0.6"
] | 16 | 2020-03-24T17:30:37.000Z | 2022-03-11T23:57:41.000Z | myapp/forms.py | percevalm/aumyproject | b24b38005188ce9dd41ed663cf54dad5464afef3 | [
"bzip2-1.0.6"
] | null | null | null | from django import forms
from .models import Intbursary
from django.contrib import admin
##aded for file size limit
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class IntbursaryForm(forms.ModelForm):
class Meta:
model = Intbursary
fields = '__all__'
def clean_surname(self):
surname =self.cleaned_data.get('surname')
if surname =='':
raise forms.ValidationError("This field cannot be left empty!")
return surname
"""code malfunctioning 2019-02-06
def clean_content(self):
content = self.cleaned_data['personal_statement']
content_type = content.content_type.split('/')[0]
if content_type in settings.CONTENT_TYPES:
if content._size > settings.MAX_UPLOAD_SIZE:
raise forms.ValidationError(_('Please keep filesize under %s. Current filesize %s') % (filesizeformat(settings.MAX_UPLOAD_SIZE), filesizeformat(content._size)))
else:
raise forms.ValidationError(_('File type is not supported'))
return content
"""
#all fieldsthat we have ["surname","first_name","id_number","nationality","race","gender","telephone_number","email","employed","employed_at","full_part_time","employed_study", "employed_study_details","proposed_degree","other_funding","referee_details","degree_1","f_o_study_1","major_sub_1","institution_1","year_obtained_1","personal_statement"] | 44.59375 | 349 | 0.761037 |
03c5143c95025ac31c8c73183705e2cb5ed3a453 | 584 | py | Python | qiskit/test/mock/backends/kolkata/__init__.py | QAMP-Spring-2022-Transpiler-Hackathon/qiskit-terra | aee0dc4d538991560f212411db92cde5f511f65b | [
"Apache-2.0"
] | 1,456 | 2017-08-05T16:33:05.000Z | 2018-06-05T04:15:35.000Z | qiskit/test/mock/backends/kolkata/__init__.py | QAMP-Spring-2022-Transpiler-Hackathon/qiskit-terra | aee0dc4d538991560f212411db92cde5f511f65b | [
"Apache-2.0"
] | 365 | 2017-08-04T06:09:16.000Z | 2018-06-05T08:33:37.000Z | qiskit/test/mock/backends/kolkata/__init__.py | QAMP-Spring-2022-Transpiler-Hackathon/qiskit-terra | aee0dc4d538991560f212411db92cde5f511f65b | [
"Apache-2.0"
] | 463 | 2017-08-05T04:10:01.000Z | 2018-06-05T06:43:21.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Mock kolkata backend"""
from .fake_kolkata import FakeKolkataV2
from .fake_kolkata import FakeKolkata
| 34.352941 | 77 | 0.767123 |
b338afc1974d48bcdf1c428bd547e4b20ade2af6 | 9,788 | py | Python | python/ccxt/bter.py | GoChartingAdmin/ccxt | 56cd9caec8c570bcb3038e0e18544d25a4bd14c3 | [
"MIT"
] | 2 | 2020-03-23T07:31:05.000Z | 2021-05-17T10:34:31.000Z | python/ccxt/bter.py | GoChartingAdmin/ccxt | 56cd9caec8c570bcb3038e0e18544d25a4bd14c3 | [
"MIT"
] | null | null | null | python/ccxt/bter.py | GoChartingAdmin/ccxt | 56cd9caec8c570bcb3038e0e18544d25a4bd14c3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
class bter (Exchange):
def describe(self):
return self.deep_extend(super(bter, self).describe(), {
'id': 'bter',
'name': 'Bter',
'countries': ['VG', 'CN'], # British Virgin Islands, China
'version': '2',
'hasCORS': False,
'hasFetchTickers': True,
'hasWithdraw': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27980479-cfa3188c-6387-11e7-8191-93fc4184ba5c.jpg',
'api': {
'public': 'https://data.bter.com/api',
'private': 'https://api.bter.com/api',
},
'www': 'https://bter.com',
'doc': 'https://bter.com/api2',
},
'api': {
'public': {
'get': [
'pairs',
'marketinfo',
'marketlist',
'tickers',
'ticker/{id}',
'orderBook/{id}',
'trade/{id}',
'tradeHistory/{id}',
'tradeHistory/{id}/{tid}',
],
},
'private': {
'post': [
'balances',
'depositAddress',
'newAddress',
'depositsWithdrawals',
'buy',
'sell',
'cancelOrder',
'cancelAllOrders',
'getOrder',
'openOrders',
'tradeHistory',
'withdraw',
],
},
},
})
def fetch_markets(self):
response = self.publicGetMarketinfo()
markets = response['pairs']
result = []
for i in range(0, len(markets)):
market = markets[i]
keys = list(market.keys())
id = keys[0]
details = market[id]
base, quote = id.split('_')
base = base.upper()
quote = quote.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': details['decimal_places'],
'price': details['decimal_places'],
}
amountLimits = {
'min': details['min_amount'],
'max': None,
}
priceLimits = {
'min': None,
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'maker': details['fee'] / 100,
'taker': details['fee'] / 100,
'precision': precision,
'limits': limits,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
balance = self.privatePostBalances()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
code = self.common_currency_code(currency)
account = self.account()
if 'available' in balance:
if currency in balance['available']:
account['free'] = float(balance['available'][currency])
if 'locked' in balance:
if currency in balance['locked']:
account['used'] = float(balance['locked'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
self.load_markets()
orderbook = self.publicGetOrderBookId(self.extend({
'id': self.market_id(symbol),
}, params))
result = self.parse_order_book(orderbook)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high24hr']),
'low': float(ticker['low24hr']),
'bid': float(ticker['highestBid']),
'ask': float(ticker['lowestAsk']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': float(ticker['percentChange']),
'percentage': None,
'average': None,
'baseVolume': float(ticker['quoteVolume']),
'quoteVolume': float(ticker['baseVolume']),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTickers(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
ticker = tickers[id]
market = None
if symbol in self.markets:
market = self.markets[symbol]
if id in self.markets_by_id:
market = self.markets_by_id[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
ticker = self.publicGetTickerId(self.extend({
'id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = self.parse8601(trade['date'])
return {
'id': trade['tradeID'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': trade['rate'],
'amount': self.safe_float(trade, 'amount'),
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetTradeHistoryId(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response['data'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
self.load_markets()
method = 'privatePost' + self.capitalize(side)
order = {
'currencyPair': self.market_id(symbol),
'rate': price,
'amount': amount,
}
response = getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['orderNumber'],
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
return self.privatePostCancelOrder({'orderNumber': id})
def withdraw(self, currency, amount, address, params={}):
self.load_markets()
response = self.privatePostWithdraw(self.extend({
'currency': currency.lower(),
'amount': amount,
'address': address, # Address must exist in you AddressBook in security settings
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
prefix = (api + '/') if (api == 'private') else ''
url = self.urls['api'][api] + self.version + '/1/' + prefix + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {'nonce': nonce}
body = self.urlencode(self.extend(request, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Key': self.apiKey,
'Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
if response['result'] != 'true':
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 36.386617 | 126 | 0.482734 |
49251dcd87ed9cd037cb6072b6a08f175d1c045a | 3,368 | py | Python | tflib/vision/dataset/mnist.py | tonyshao5/Tensorflow-up | f8f8fce9436c40cad298f6211db2be3a18480bad | [
"MIT"
] | 10 | 2018-05-21T12:56:23.000Z | 2020-12-08T17:23:50.000Z | tflib/vision/dataset/mnist.py | tonyshao5/Tensorflow-up | f8f8fce9436c40cad298f6211db2be3a18480bad | [
"MIT"
] | null | null | null | tflib/vision/dataset/mnist.py | tonyshao5/Tensorflow-up | f8f8fce9436c40cad298f6211db2be3a18480bad | [
"MIT"
] | 9 | 2018-07-20T00:04:35.000Z | 2021-07-10T14:26:01.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import multiprocessing
import os
import struct
import subprocess
import numpy as np
import tensorflow as tf
from tflib.data.memory_data import MemoryData
_N_CPU = multiprocessing.cpu_count()
def unzip_gz(file_name):
unzip_name = file_name.replace('.gz', '')
gz_file = gzip.GzipFile(file_name)
open(unzip_name, 'w+').write(gz_file.read())
gz_file.close()
def mnist_download(download_dir):
url_base = 'http://yann.lecun.com/exdb/mnist/'
file_names = ['train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz']
for file_name in file_names:
url = url_base + file_name
save_path = os.path.join(download_dir, file_name)
cmd = ['curl', url, '-o', save_path]
print('Downloading ', file_name)
if not os.path.exists(save_path):
subprocess.call(cmd)
else:
print('%s exists, skip!' % file_name)
def mnist_load(data_dir, split='train'):
"""Load MNIST dataset, modified from https://gist.github.com/akesling/5358964.
Returns:
A tuple as (`imgs`, `lbls`, `num`).
`imgs`: [-1.0, 1.0] float64 images of shape (N * H * W).
`lbls`: Int labels of shape (N,).
`num`: # of datas.
"""
mnist_download(data_dir)
if split == 'train':
fname_img = os.path.join(data_dir, 'train-images-idx3-ubyte')
fname_lbl = os.path.join(data_dir, 'train-labels-idx1-ubyte')
elif split == 'test':
fname_img = os.path.join(data_dir, 't10k-images-idx3-ubyte')
fname_lbl = os.path.join(data_dir, 't10k-labels-idx1-ubyte')
else:
raise ValueError("split must be 'test' or 'train'")
if not os.path.exists(fname_img):
unzip_gz(fname_img + '.gz')
if not os.path.exists(fname_lbl):
unzip_gz(fname_lbl + '.gz')
# Load everything in some numpy arrays
with open(fname_lbl, 'rb') as flbl:
struct.unpack('>II', flbl.read(8))
lbls = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
_, _, rows, cols = struct.unpack('>IIII', fimg.read(16))
imgs = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbls), rows, cols)
imgs = imgs / 127.5 - 1
return imgs, lbls, len(lbls)
class Mnist(MemoryData):
def __init__(self, data_dir, batch_size, split='train', prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1, sess=None):
imgs, lbls, self.n_data = mnist_load(data_dir, split)
imgs.shape = imgs.shape + (1,)
imgs_pl = tf.placeholder(tf.float32, imgs.shape)
lbls_pl = tf.placeholder(tf.int64, lbls.shape)
memory_data_dict = {'img': imgs_pl, 'lbl': lbls_pl}
self.feed_dict = {imgs_pl: imgs, lbls_pl: lbls}
super(Mnist, self).__init__(memory_data_dict, batch_size, prefetch_batch, drop_remainder, filter,
map_func, num_threads, shuffle, buffer_size, repeat, sess)
def __len__(self):
return self.n_data
def reset(self):
super(Mnist, self).reset(self.feed_dict)
| 32.699029 | 120 | 0.634204 |
9cd6406309ae44f226813f5824347ed11321d0e9 | 2,373 | py | Python | src/pkgcore/repository/configured.py | mgorny/pkgcore | ab4a718aa1626f4edeb385383f5595a1e262b0dc | [
"BSD-3-Clause"
] | null | null | null | src/pkgcore/repository/configured.py | mgorny/pkgcore | ab4a718aa1626f4edeb385383f5595a1e262b0dc | [
"BSD-3-Clause"
] | null | null | null | src/pkgcore/repository/configured.py | mgorny/pkgcore | ab4a718aa1626f4edeb385383f5595a1e262b0dc | [
"BSD-3-Clause"
] | null | null | null | # Copyright: 2006-2008 Brian Harring <ferringb@gmail.com>
# License: GPL2/BSD
"""
wrap a repository, binding configuration to pkgs returned from the repository
"""
__all__ = ("tree",)
from functools import partial
from snakeoil.klass import GetAttrProxy, DirProxy
from pkgcore.operations.repo import operations_proxy
from pkgcore.package.conditionals import make_wrapper
from pkgcore.repository import prototype
class tree(prototype.tree):
configured = True
operations_kls = operations_proxy
def __init__(self, raw_repo, wrapped_attrs, pkg_kls_injections=()):
"""
:param raw_repo: repo to wrap
:type raw_repo: :obj:`pkgcore.repository.prototype.tree`
:param wrapped_attrs: sequence of attrs to wrap for each pkg
"""
# yes, we're intentionally not using tree's init.
# not perfect I know.
self.raw_repo = raw_repo
self.wrapped_attrs = wrapped_attrs
self._pkg_klass = self._mk_kls(pkg_kls_injections)
def _mk_kls(self, pkg_kls_injections):
return make_wrapper(
self, self.configurable, self.wrapped_attrs,
kls_injections=pkg_kls_injections)
def _get_pkg_kwds(self, pkg):
raise NotImplementedError
def package_class(self, pkg):
return self._pkg_klass(pkg, **self._get_pkg_kwds(pkg))
__getattr__ = GetAttrProxy("raw_repo")
__dir__ = DirProxy("raw_repo")
def itermatch(self, restrict, **kwds):
kwds.setdefault("force", True)
o = kwds.get("pkg_klass_override")
if o is not None:
kwds["pkg_klass_override"] = partial(self.package_class, o)
else:
kwds["pkg_klass_override"] = self.package_class
return self.raw_repo.itermatch(restrict, **kwds)
itermatch.__doc__ = prototype.tree.itermatch.__doc__.replace(
"@param", "@keyword").replace(":keyword restrict:", ":param restrict:")
def __getitem__(self, key):
obj = self.package_class(self.raw_repo[key])
if not obj.is_supported:
raise KeyError(key)
return obj
def __repr__(self):
return '<%s.%s raw_repo=%r wrapped=%r @%#8x>' % (
self.__class__.__module__, self.__class__.__name__,
getattr(self, 'raw_repo', 'unset'),
list(getattr(self, 'wrapped_attrs', {}).keys()),
id(self))
| 32.067568 | 79 | 0.66161 |
e5ececcbf83ac62d16e79589186edac979d9211d | 7,808 | py | Python | client.py | Bo0gieMan-VP/PFD | 3f3cc6809e275c7f11231d4c9c2766481f1b6d5b | [
"Unlicense"
] | null | null | null | client.py | Bo0gieMan-VP/PFD | 3f3cc6809e275c7f11231d4c9c2766481f1b6d5b | [
"Unlicense"
] | null | null | null | client.py | Bo0gieMan-VP/PFD | 3f3cc6809e275c7f11231d4c9c2766481f1b6d5b | [
"Unlicense"
] | null | null | null | import os
import socket
import time
import keyboard
from getmac import get_mac_address as gma
from sys import argv
is_admin = False
COLOR = {
'RED' : '\033[1;91m',
'UNDERLINE_PURPLE' : '\033[4;34m',
'GREEN' : '\033[1;92m',
'YELLOW' : '\033[1;33m',
'CYAN' : '\033[0;36m',
'PURPLE' : '\033[0;34m',
'MAGENTA' : '\033[0;35m',
'DEFAULT' : '\033[0m',
'TWITTER_BLUE' : '\033[38;5;33m',
}
def get_ip():
"""
Gets my current IP address (Locally)
:return: IP Address
:rtype: str
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
def translate_repsonse(server_response):
"""
Function to translate the Server-to-Client protocol into the correct value
:param server_response: The message received from server
:type server_response: str
:return: a Tuple with the value and Message (if there is one)
:rtype: tuple
"""
special_types = ['STR', 'CLEAR', 'QUIT', 'SHT', 'PLAY', 'CNFG']
t = server_response[0:6].strip()
if t not in special_types:
msg, content = server_response[7:].split("|")
else:
content = server_response[7:]
match(t):
case 'INT':
return int(content)
case 'BOOL':
return bool(int(content))
case 'LIST':
my_list = content.split(",")
print(msg)
print_list(my_list)
case 'LYRICS':
print_lyrics(msg, content)
case 'STR':
print(content)
case 'ADMIN':
global is_admin
is_admin = bool(int(msg))
print(content)
case 'PLAY':
return content
case 'CLEAR':
os.system('cls')
print(content)
case 'SHT' | 'QUIT' | 'CNFG':
return content
def print_list(client_list):
"""
Function to print a list of items
:param client_list: A list (albums, songs, etc.)
:type client_list: list
:return: None
"""
counter = 1
for item in client_list:
print((" %2d) %s" % (counter,item)))
counter += 1
print()
def print_lyrics(songname, lyrics):
"""
Function to print the song's lyrics
:param songname: Name of the Song
:param lyrics: Song's Lyrics
:type songname: str
:type lyrics: str
:return: None
"""
print("\n " + COLOR['TWITTER_BLUE'] + songname.title() + " - Pink Floyd" + COLOR['DEFAULT'])
if len(lyrics) > 0:
for line in lyrics.split("\n"):
if line.startswith("[") and line.endswith("]"):
print()
print(" " + line)
else:
print("This song is instrumental")
def client_to_server(my_socket):
"""
Manages the Client-to-Server dialog
:param my_socket: Client-to-Server socket
:type my_socket: socket obj
:return: None
"""
while True:
msg = input((COLOR['TWITTER_BLUE'] + 'PFD> ' if not is_admin else COLOR['GREEN'] + 'PFD# ') + COLOR['DEFAULT'])
space = msg.find(" ")
if space != -1:
command = msg[0:space]
length = len(msg[space+1:])
content = msg[space+1:]
msg = "%10s %4d %s" % (command, length, content)
else:
command = msg
msg = "%10s %4d" % (command, 0)
try:
my_socket.send(msg.encode())
except:
print(COLOR['RED'] + "Server is no longer available" + COLOR['DEFAULT'])
break
data = my_socket.recv(4096).decode()
data_type = data[0:6].strip()
if data_type != "CNFG":
data_content = translate_repsonse(data)
match (data_type):
case 'SHT':
print(data_content)
my_socket.close()
break
case 'PLAY':
print(data_content, end='\r')
for i in range(2, 0, -1):
data = my_socket.recv(1024).decode()
data_content = translate_repsonse(data)
print(data_content, end='\r')
data = my_socket.recv(1024).decode()
data_content = translate_repsonse(data)
print(data_content)
# case 'UPDATE':
# print(data_content)
# if is_admin:
# prompt = (COLOR['TWITTER_BLUE'] + 'PFD> ' if not is_admin else COLOR['GREEN'] + 'PFD# ') + COLOR['DEFAULT']
# user_input = input( prompt + "Choose method: ")
# msg = "%10s %4d" % ("UPDATE", int(user_input))
# my_socket.send(msg.encode())
# data = my_socket.recv(4096).decode()
# data_content = translate_repsonse(data)
# print(data_content)
# data = my_socket.recv(4096).decode()
# data_content = translate_repsonse(data)
# print(data_content)
# continue
case 'CNFG':
config = translate_repsonse(data)
while(data_type == 'CNFG'):
if config == "":
data = my_socket.recv(4096).decode()
data_type = data[0:6].strip()
if data_type != 'CNFG':
config = translate_repsonse(data)
break
config = translate_repsonse(data)
user_input = input(config + " ")
my_socket.send(user_input.encode())
config = ""
case 'QUIT':
print(data_content)
time.sleep(3)
break
def main():
# Cleans the CMD window
os.system('cls')
# Ask for the server's IP address
my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
ip_add = input("Enter server's IP address: ")
# If the client inputs 'my_ip' it gets the client's ip from the get_ip function
ip_add = ip_add if ip_add != "my_ip" else get_ip()
# Tries to connect, if fails informs that the IP address is wrong
try:
my_socket.connect((ip_add, 9595))
break
except:
print(COLOR['RED'] + 'Wrong IP Address' + COLOR['DEFAULT'])
# After successful connection get's the client's computer name and MAC address and sends it to the server
# So the server can recognize the client in it's log
mac = gma()
details = os.environ['COMPUTERNAME'] + "|" + mac
my_socket.send(details.encode())
data = my_socket.recv(1024).decode()
os.system('cls')
# msg = ""
# command = None
# length = None
# content = None
# Prints the Header Message
print(data)
# Start the Client-to-Server dialog
client_to_server(my_socket)
# Closing the client's socket and informing the client
my_socket.close()
print("Disconnected from server\nPress "+COLOR['GREEN'] + "[SPACE]" + COLOR['DEFAULT']+ " to exit")
# Waiting for SPACE to be pressed (So the window won't close instantly)
keyboard.wait('space')
if __name__ == "__main__":
main() | 35.652968 | 130 | 0.501921 |
f98a46878662e6e2c6b9b4004aa7749d6a955f7d | 1,600 | py | Python | lib/boost_1.66.0/tools/build/test/custom_generator.py | jrmie/math | 2850ec262181075a5843968e805dc9ad1654e069 | [
"BSD-3-Clause"
] | 918 | 2016-12-22T02:53:08.000Z | 2022-03-22T06:21:35.000Z | lib/boost_1.66.0/tools/build/test/custom_generator.py | jrmie/math | 2850ec262181075a5843968e805dc9ad1654e069 | [
"BSD-3-Clause"
] | 203 | 2016-12-27T12:09:03.000Z | 2022-03-30T20:46:55.000Z | lib/boost_1.66.0/tools/build/test/custom_generator.py | jrmie/math | 2850ec262181075a5843968e805dc9ad1654e069 | [
"BSD-3-Clause"
] | 122 | 2016-12-22T17:38:09.000Z | 2022-02-22T14:25:49.000Z | #!/usr/bin/python
# Copyright 2003, 2004, 2005 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Attempt to declare a generator for creating OBJ from RC files. That generator
# should be considered together with standard CPP->OBJ generators and
# successfully create the target. Since we do not have a RC compiler everywhere,
# we fake the action. The resulting OBJ will be unusable, but it must be
# created.
import BoostBuild
t = BoostBuild.Tester()
t.write("jamroot.jam", """
import rcc ;
""")
t.write("rcc.jam", """
import type ;
import generators ;
import print ;
# Use 'RCC' to avoid conflicts with definitions in the standard rc.jam and
# msvc.jam
type.register RCC : rcc ;
rule resource-compile ( targets * : sources * : properties * )
{
print.output $(targets[1]) ;
print.text "rc-object" ;
}
generators.register-standard rcc.resource-compile : RCC : OBJ ;
""")
t.write("rcc.py", """
import b2.build.type as type
import b2.build.generators as generators
from b2.manager import get_manager
# Use 'RCC' to avoid conflicts with definitions in the standard rc.jam and
# msvc.jam
type.register('RCC', ['rcc'])
generators.register_standard("rcc.resource-compile", ["RCC"], ["OBJ"])
get_manager().engine().register_action(
"rcc.resource-compile",
'@($(STDOUT):E=rc-object) > "$(<)"')
""")
t.write("jamfile.jam", """
obj r : r.rcc ;
""")
t.write("r.rcc", """
""")
t.run_build_system()
t.expect_content("bin/$toolset/debug*/r.obj", "rc-object")
t.cleanup()
| 23.880597 | 81 | 0.70125 |
d7b75d135c47b8e7902005b957d945169f26b42f | 99 | py | Python | src/main.py | guo40020/xterm-parser | 4b306cc59265e48aef748be6c868a1553ee1fd28 | [
"MIT"
] | 3 | 2021-10-08T02:28:34.000Z | 2021-12-21T07:37:56.000Z | src/main.py | guo40020/xterm-parser | 4b306cc59265e48aef748be6c868a1553ee1fd28 | [
"MIT"
] | null | null | null | src/main.py | guo40020/xterm-parser | 4b306cc59265e48aef748be6c868a1553ee1fd28 | [
"MIT"
] | null | null | null | from src import xterm_parser
if __name__ == '__main__':
xterm_parser.TermEventTracker().run()
| 19.8 | 41 | 0.747475 |
e79c5deab5840f439cde00956992d4db8947c8fc | 6,947 | py | Python | cryptography/lab2/server/crypto/aes.py | tonykozlovsky/bsu | 6ea290ec69bf4ac1a2bfc4c800065ae1c68039a3 | [
"MIT"
] | 113 | 2017-02-02T13:09:21.000Z | 2022-03-14T08:54:41.000Z | cryptography/lab2/server/crypto/aes.py | tonykozlovsky/bsu | 6ea290ec69bf4ac1a2bfc4c800065ae1c68039a3 | [
"MIT"
] | 12 | 2017-04-14T12:01:50.000Z | 2022-03-08T22:47:22.000Z | cryptography/lab2/server/crypto/aes.py | Drapegnik/bsu | 769ddac58eddd5877e40949227998575fd4dec77 | [
"MIT"
] | 78 | 2017-03-18T22:36:13.000Z | 2022-02-20T14:42:34.000Z | """
Advanced Encryption Standard aka Rijndael algorithm
http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
"""
from crypto.constants import SBOX, INV_SBOX, RCON
from crypto.utils import mul_by_02, mul_by_03, mul_by_09, mul_by_0b, mul_by_0d, mul_by_0e, state_to_list
ROWS_NUM = 4
COLS_NUM = 4
KEY_LENGTH = ROWS_NUM * COLS_NUM
ROUNDS_NUM = 10
def sub_bytes(state, inv=False):
"""
Replace every element from state on element from SBOX / INV_SBOX
like state[k] = SBOX[i][j], where i, j from hex(state[k]) == '0x{i}{j}'
:param state: array of [ROWS_NUMxCOLS_NUM] size
:param inv: True - decrypt, False - encrypt
:return: new state
"""
new_state = state[:]
box = INV_SBOX if inv else SBOX
for i, row in enumerate(new_state):
for j, el in enumerate(row):
b_i = el // 0x10
b_j = el % 0x10
new_state[i][j] = box[16 * b_i + b_j] # box is flat list
return new_state
def shift_rows(state, inv=False):
"""
Shift rows of state:
- fourth row over 3 bytes
- third row over 2 bytes
- second row over 1 byte
- first row not changed
:param state: array of [ROWS_NUMxCOLS_NUM] size
:param inv: True - decrypt (right shift), False - encrypt (left shift)
:return: new state
"""
new_state = state[:]
for i in range(1, ROWS_NUM):
sign = -1 if inv else 1
new_state[i] = new_state[i][sign * i:] + new_state[i][:sign * i]
return new_state
def mix_columns(state, inv=False):
"""
Multiply every column of state by polynomial:
- for decrypt by a'(x) = {0b}x**3 + {0d}x**2 + {09}x + {0e}
- for encrypt by a(x) = {03}x**3 + {01}x**2 + {01}x + {02}
:param state: array of [ROWS_NUMxCOLS_NUM] size
:param inv: True - decrypt, False - encrypt
:return: new state
"""
new_state = state[:]
for i in range(ROWS_NUM):
if inv: # decryption
s0 = mul_by_0e(state[0][i]) ^ mul_by_0b(state[1][i]) ^ mul_by_0d(state[2][i]) ^ mul_by_09(state[3][i])
s1 = mul_by_09(state[0][i]) ^ mul_by_0e(state[1][i]) ^ mul_by_0b(state[2][i]) ^ mul_by_0d(state[3][i])
s2 = mul_by_0d(state[0][i]) ^ mul_by_09(state[1][i]) ^ mul_by_0e(state[2][i]) ^ mul_by_0b(state[3][i])
s3 = mul_by_0b(state[0][i]) ^ mul_by_0d(state[1][i]) ^ mul_by_09(state[2][i]) ^ mul_by_0e(state[3][i])
else: # encryption
s0 = mul_by_02(state[0][i]) ^ mul_by_03(state[1][i]) ^ state[2][i] ^ state[3][i]
s1 = state[0][i] ^ mul_by_02(state[1][i]) ^ mul_by_03(state[2][i]) ^ state[3][i]
s2 = state[0][i] ^ state[1][i] ^ mul_by_02(state[2][i]) ^ mul_by_03(state[3][i])
s3 = mul_by_03(state[0][i]) ^ state[1][i] ^ state[2][i] ^ mul_by_02(state[3][i])
new_state[0][i] = s0
new_state[1][i] = s1
new_state[2][i] = s2
new_state[3][i] = s3
return new_state
def get_round_key(key):
"""
Generate list of round keys
:param key: algorithm key, string of KEY_LENGTH size
:return: array of [ROWS_NUMxCOLS_NUM] size
"""
key_symbols = [ord(symbol) for symbol in key]
key_schedule = [
[key_symbols[i + ROWS_NUM * j] for j in range(COLS_NUM)]
for i in range(ROWS_NUM)
]
for j in range(ROWS_NUM, COLS_NUM * (ROUNDS_NUM + 1)):
if j % ROWS_NUM == 0:
temp = [key_schedule[i][j - 1] for i in range(1, ROWS_NUM)]
temp.append(key_schedule[0][j - 1])
for i in range(ROWS_NUM):
sbox_i = temp[i] // 0x10
sbox_j = temp[i] % 0x10
temp[i] = SBOX[16 * sbox_i + sbox_j]
for i in range(ROWS_NUM):
s = (key_schedule[i][j - COLS_NUM]) ^ (temp[i]) ^ (RCON[i][int(j / ROWS_NUM - 1)])
key_schedule[i].append(s)
else:
for i in range(ROWS_NUM):
s = key_schedule[i][j - COLS_NUM] ^ key_schedule[i][j - 1]
key_schedule[i].append(s)
return key_schedule
def add_round_key(state, round_key, round_num=0):
"""
Perform XOR of state and round_key
:param state: array of [ROWS_NUMxCOLS_NUM] size
:param round_key: array of [ROWS_NUMxCOLS_NUM] size, see get_round_key()
:param round_num: number of crypt round
:return: new_state
"""
new_state = state[:]
for i in range(ROWS_NUM):
for j in range(COLS_NUM):
new_state[i][j] = new_state[i][j] ^ round_key[i][COLS_NUM * round_num + j]
return new_state
def process(input_bytes, key, method):
bad_symbols = list(filter(lambda s: ord(s) > 255, key))
if len(key) != KEY_LENGTH or bad_symbols:
raise ValueError('Key should be length {} and contain only latin alphabet and numbers'.format(KEY_LENGTH))
round_key = get_round_key(key)
processed = []
for i in range(0, len(input_bytes), KEY_LENGTH):
chunk = input_bytes[i:i + KEY_LENGTH]
if len(chunk) < KEY_LENGTH: # supplement to full chunk
chunk.extend([0] * (KEY_LENGTH - len(chunk) - 1))
chunk.append(1)
processed.extend(method(chunk, round_key))
return processed
def encrypt(input_bytes, key):
"""
:param input_bytes: data as list of bytes
:param key: string on KEY_LENGTH size
:return: encrypted data as list of int's
"""
return process(input_bytes, key, encrypt_chunk)
def encrypt_chunk(input_chunk, round_key):
"""
:param input_chunk: list of bytes [ROWS_NUM, COLS_NUM] size
:round_key see get_round_key()
:return: encrypted chunk data as list of int's
"""
state = [[input_chunk[i + ROWS_NUM * j] for j in range(COLS_NUM)] for i in range(ROWS_NUM)]
state = add_round_key(state, round_key)
for round_num in range(1, ROUNDS_NUM):
state = add_round_key(mix_columns(shift_rows(sub_bytes(state))), round_key, round_num)
state = add_round_key(shift_rows(sub_bytes(state)), round_key, ROUNDS_NUM)
return state_to_list(state)
def decrypt(input_bytes, key):
"""
:param input_bytes: data as list of bytes
:param key: string on KEY_LENGTH size
:return: decrypted data as list of int's
"""
return process(input_bytes, key, decrypt_chunk)
def decrypt_chunk(input_chunk, round_key):
"""
:param input_chunk: list of bytes [ROWS_NUM, COLS_NUM] size
:round_key see get_round_key()
:return: decrypted chunk data as list of int's
"""
state = [[input_chunk[i + ROWS_NUM * j] for j in range(COLS_NUM)] for i in range(ROWS_NUM)]
state = add_round_key(state, round_key, ROUNDS_NUM)
for round_num in range(ROUNDS_NUM - 1, 0, -1):
state = mix_columns(add_round_key(sub_bytes(shift_rows(state, True), True), round_key, round_num), True)
state = add_round_key(sub_bytes(shift_rows(state, True), True), round_key, 0)
return state_to_list(state)
| 37.349462 | 114 | 0.614942 |
8b70dce1d1b199842c4a599b0d23d0bb7c4fa64b | 3,436 | py | Python | pylith/topology/Subfield.py | Grant-Block/pylith | f6338261b17551eba879da998a5aaf2d91f5f658 | [
"MIT"
] | null | null | null | pylith/topology/Subfield.py | Grant-Block/pylith | f6338261b17551eba879da998a5aaf2d91f5f658 | [
"MIT"
] | null | null | null | pylith/topology/Subfield.py | Grant-Block/pylith | f6338261b17551eba879da998a5aaf2d91f5f658 | [
"MIT"
] | null | null | null | # ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2015 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
# @file pylith/topology/Subfield.py
#
# @brief Python object for defining attributes of a subfield within a
# field.
#
# Factory: subfield.
from pythia.pyre.components.Component import Component
class Subfield(Component):
"""Python object for defining discretization of a subfield.
FACTORY: subfield
"""
import pythia.pyre.inventory
basisOrder = pythia.pyre.inventory.int("basis_order", default=1)
basisOrder.meta['tip'] = "Order of basis functions."
quadOrder = pythia.pyre.inventory.int("quadrature_order", default=-1)
quadOrder.meta['tip'] = "Order of numerical quadrature."
dimension = pythia.pyre.inventory.int("dimension", default=-1)
dimension.meta["tip"] = "Topological dimension associated with subfield (=-1 will use dimension of domain)."
cellBasisStr = pythia.pyre.inventory.str("cell_basis", default="default",
validator=pythia.pyre.inventory.choice(["simplex", "tensor", "default"]))
cellBasisStr.meta['tip'] = "Type of cell basis functions (simplex, tensor, or default). Default is to use type matching cell type."
isBasisContinuous = pythia.pyre.inventory.bool("is_basis_continous", default=True)
isBasisContinuous.meta['tip'] = "Is basis continuous?"
feSpaceStr = pythia.pyre.inventory.str("finite_element_space", default="polynomial",
validator=pythia.pyre.inventory.choice(["polynomial", "point"]))
feSpaceStr.meta['tip'] = "Finite-element space (polynomial or point). Point space corresponds to delta functions at quadrature points."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="subfield"):
"""Constructor.
"""
Component.__init__(self, name, facility="subfield")
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""Set members based using inventory.
"""
from .topology import FieldBase
Component._configure(self)
mapBasis = {
"simplex": FieldBase.SIMPLEX_BASIS,
"tensor": FieldBase.TENSOR_BASIS,
"default": FieldBase.DEFAULT_BASIS,
}
self.cellBasis = mapBasis[self.inventory.cellBasisStr]
mapSpace = {
"polynomial": FieldBase.POLYNOMIAL_SPACE,
"point": FieldBase.POINT_SPACE,
}
self.feSpace = mapSpace[self.inventory.feSpaceStr]
return
# ITEM FACTORIES ///////////////////////////////////////////////////////
def subfieldFactory(name):
"""Factory for subfield items.
"""
from pythia.pyre.inventory import facility
return facility(name, family="subfield", factory=Subfield)
# FACTORIES ////////////////////////////////////////////////////////////
def subfield():
"""Factory associated with Subfield.
"""
return Subfield()
# End of file
| 32.72381 | 139 | 0.598952 |
b86ed33eeaa76f47f00af26fbd4d48fb7d887a9e | 580 | py | Python | app/models/source.py | koyoo-maxwel/news | 656166c47a5dc79b9f4b8516153c90a57f808cc9 | [
"MIT"
] | 2 | 2019-01-21T09:04:16.000Z | 2019-01-21T09:04:17.000Z | app/models/source.py | koyoo-maxwel/news | 656166c47a5dc79b9f4b8516153c90a57f808cc9 | [
"MIT"
] | null | null | null | app/models/source.py | koyoo-maxwel/news | 656166c47a5dc79b9f4b8516153c90a57f808cc9 | [
"MIT"
] | 2 | 2019-02-17T11:33:28.000Z | 2019-06-24T06:36:43.000Z | class Source:
'''
Movie class to define Movie Objects
'''
def __init__(self, id, name, description):
self.id =id
self.name = name
self.description = description
class Articles:
'''
defines the articles objects
'''
def __init__(self, blue, id, title, author, description, urlToImage, publishedAt, url):
self.blue = blue
self.title = title
self.author = author
self.description = description
self.urlToImage = urlToImage
self.publishedAt = publishedAt
self.url = url
| 24.166667 | 91 | 0.606897 |
cdb5285c2d64ccaea6fc3dca4916765e513e93f9 | 1,808 | py | Python | tables/tests/test_garbage.py | crs4/PyTables | 02d23d41f714122fd5fd4f7e1063c0b31d1a774b | [
"BSD-3-Clause"
] | 1 | 2020-12-27T13:53:00.000Z | 2020-12-27T13:53:00.000Z | tables/tests/test_garbage.py | mrgloom/PyTables | c30c6f40cd3d5996ee711d5685328085f3569cfc | [
"BSD-3-Clause"
] | null | null | null | tables/tests/test_garbage.py | mrgloom/PyTables | c30c6f40cd3d5996ee711d5685328085f3569cfc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: 2005-09-20
# Author: Ivan Vilata i Balaguer - ivan@selidor.net
#
# $Id$
#
########################################################################
"""Test module for detecting uncollectable garbage in PyTables
This test module *must* be loaded in the last place. It just checks for
the existence of uncollectable garbage in ``gc.garbage`` after running
all the tests.
"""
import unittest
import gc
from tables.tests import common
class GarbageTestCase(common.PyTablesTestCase):
"""Test for uncollectable garbage."""
def test00(self):
"""Checking for uncollectable garbage."""
garbageLen = len(gc.garbage)
if garbageLen == 0:
return # success
if common.verbose:
classCount = {}
# Count uncollected objects for each class.
for obj in gc.garbage:
objClass = obj.__class__.__name__
if objClass in classCount:
classCount[objClass] += 1
else:
classCount[objClass] = 1
incidence = ['``%s``: %d' % (cls, cnt)
for (cls, cnt) in classCount.iteritems()]
print "Class incidence:", ', '.join(incidence)
self.fail("Possible leak: %d uncollected objects." % garbageLen)
def suite():
"""Return a test suite consisting of all the test cases in the module."""
theSuite = unittest.TestSuite()
theSuite.addTest(unittest.makeSuite(GarbageTestCase))
return theSuite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
| 25.111111 | 77 | 0.561394 |
869f96901c1d0bf34d7df63c82034a77158f00c6 | 2,373 | py | Python | homeassistant/components/geo_location/__init__.py | AlbertoLopSie/home-assistant | d89e8ead61344c3e3c0420c1174e0e1077ba0410 | [
"Apache-2.0"
] | 1 | 2021-05-31T18:39:08.000Z | 2021-05-31T18:39:08.000Z | homeassistant/components/geo_location/__init__.py | AlbertoLopSie/home-assistant | d89e8ead61344c3e3c0420c1174e0e1077ba0410 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/geo_location/__init__.py | AlbertoLopSie/home-assistant | d89e8ead61344c3e3c0420c1174e0e1077ba0410 | [
"Apache-2.0"
] | 2 | 2019-01-21T05:49:23.000Z | 2019-02-19T16:30:48.000Z | """Support for Geolocation."""
from datetime import timedelta
import logging
from typing import Optional
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
_LOGGER = logging.getLogger(__name__)
ATTR_DISTANCE = "distance"
ATTR_SOURCE = "source"
DOMAIN = "geo_location"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = timedelta(seconds=60)
async def async_setup(hass, config):
"""Set up the Geolocation component."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class GeolocationEvent(Entity):
"""This represents an external event with an associated geolocation."""
@property
def state(self):
"""Return the state of the sensor."""
if self.distance is not None:
return round(self.distance, 1)
return None
@property
def source(self) -> str:
"""Return source value of this external event."""
raise NotImplementedError
@property
def distance(self) -> Optional[float]:
"""Return distance value of this external event."""
return None
@property
def latitude(self) -> Optional[float]:
"""Return latitude value of this external event."""
return None
@property
def longitude(self) -> Optional[float]:
"""Return longitude value of this external event."""
return None
@property
def state_attributes(self):
"""Return the state attributes of this external event."""
data = {}
if self.latitude is not None:
data[ATTR_LATITUDE] = round(self.latitude, 5)
if self.longitude is not None:
data[ATTR_LONGITUDE] = round(self.longitude, 5)
if self.source is not None:
data[ATTR_SOURCE] = self.source
return data
| 27.593023 | 75 | 0.678466 |
2f4a158e0ad7cf5b36c514ccb0534a770685d5ba | 3,259 | py | Python | kitsune/messages/tests/test_templates.py | theresnotime/kitsune | 0757b267b0d332264167d31ce84e342263e1c635 | [
"BSD-3-Clause"
] | null | null | null | kitsune/messages/tests/test_templates.py | theresnotime/kitsune | 0757b267b0d332264167d31ce84e342263e1c635 | [
"BSD-3-Clause"
] | null | null | null | kitsune/messages/tests/test_templates.py | theresnotime/kitsune | 0757b267b0d332264167d31ce84e342263e1c635 | [
"BSD-3-Clause"
] | null | null | null | from pyquery import PyQuery as pq
from kitsune.messages.models import OutboxMessage
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import UserFactory
class SendMessageTestCase(TestCase):
def setUp(self):
super(SendMessageTestCase, self).setUp()
self.user1 = UserFactory()
self.user2 = UserFactory()
self.user3 = UserFactory()
self.client.login(username=self.user1.username, password="testpass")
def test_send_message_page(self):
# Make sure page loads.
response = self.client.get(reverse("messages.new"), follow=True)
self.assertEqual(200, response.status_code)
assert len(pq(response.content)("#id_message"))
def _test_send_message_to(self, to):
# Post a new message and verify it was sent.
data = {"to": to, "message": "hi there"}
response = self.client.post(reverse("messages.new", locale="en-US"), data, follow=True)
self.assertEqual(200, response.status_code)
self.assertEqual("Your message was sent!", pq(response.content)("ul.user-messages").text())
self.assertEqual(1, OutboxMessage.objects.filter(sender=self.user1).count())
return response
def test_send_message_to_one(self):
self._test_send_message_to(self.user2.username)
def test_send_message_to_two(self):
to = ", ".join([self.user2.username, self.user3.username])
self._test_send_message_to(to)
def test_send_message_trailing_comma(self):
self._test_send_message_to(self.user2.username + ",")
def test_send_message_two_commas(self):
self._test_send_message_to(self.user2.username + ",," + self.user3.username)
def test_send_message_to_prefilled(self):
url = urlparams(reverse("messages.new"), to=self.user2.username)
response = self.client.get(url, follow=True)
self.assertEqual(200, response.status_code)
self.assertEqual(self.user2.username, pq(response.content)("#id_to")[0].attrib["value"])
def test_send_message_ratelimited(self):
"""Verify that after 50 messages, no more are sent."""
# Try to send 53 messages.
for i in range(53):
self.client.post(
reverse("messages.new", locale="en-US"),
{"to": self.user2.username, "message": "hi there %s" % i},
)
# Verify only 50 are sent.
self.assertEqual(50, OutboxMessage.objects.filter(sender=self.user1).count())
class MessagePreviewTests(TestCase):
"""Tests for preview."""
def setUp(self):
super(MessagePreviewTests, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password="testpass")
def test_preview(self):
"""Preview the wiki syntax content."""
response = self.client.post(
reverse("messages.preview_async", locale="en-US"),
{"content": "=Test Content="},
follow=True,
)
self.assertEqual(200, response.status_code)
doc = pq(response.content)
self.assertEqual("Test Content", doc("div.message h1").text())
| 39.26506 | 99 | 0.664621 |
9560d684a418376ae7ec1fab49e266002ec3f87f | 440 | py | Python | mundo 2/ex052.py | jorgeduartejr/Ex-PYTHON | 266b656ad94065e77ece7cdbc9e09062c5933100 | [
"MIT"
] | null | null | null | mundo 2/ex052.py | jorgeduartejr/Ex-PYTHON | 266b656ad94065e77ece7cdbc9e09062c5933100 | [
"MIT"
] | null | null | null | mundo 2/ex052.py | jorgeduartejr/Ex-PYTHON | 266b656ad94065e77ece7cdbc9e09062c5933100 | [
"MIT"
] | null | null | null | print('Condição de existência de um número primo')
tot = 0
num = int(input('Digite aqui um número: '))
for c in range(1, num + 1):
if num % c == 0:
print('\033[33m', end='')
tot += 1
else:
print('\033[31m', end='')
print('{} '.format(c), end='')
print('\n\033[m0 O número {}, foi divisível {} vezes'.format(num, tot))
if tot == 2:
print('E por isso ele é PRIMO.')
else:
print('Ele NÃO É PRIMO.')
| 27.5 | 71 | 0.552273 |
6105d829a63d62da46917be574ff9d4347be35fe | 3,523 | py | Python | data/transcoder_evaluation_gfg/python/LONGEST_COMMON_SUBSTRING.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 241 | 2021-07-20T08:35:20.000Z | 2022-03-31T02:39:08.000Z | data/transcoder_evaluation_gfg/python/LONGEST_COMMON_SUBSTRING.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 49 | 2021-07-22T23:18:42.000Z | 2022-03-24T09:15:26.000Z | data/transcoder_evaluation_gfg/python/LONGEST_COMMON_SUBSTRING.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 71 | 2021-07-21T05:17:52.000Z | 2022-03-29T23:49:28.000Z | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( X , Y , m , n ) :
LCSuff = [ [ 0 for k in range ( n + 1 ) ] for l in range ( m + 1 ) ]
result = 0
for i in range ( m + 1 ) :
for j in range ( n + 1 ) :
if ( i == 0 or j == 0 ) :
LCSuff [ i ] [ j ] = 0
elif ( X [ i - 1 ] == Y [ j - 1 ] ) :
LCSuff [ i ] [ j ] = LCSuff [ i - 1 ] [ j - 1 ] + 1
result = max ( result , LCSuff [ i ] [ j ] )
else :
LCSuff [ i ] [ j ] = 0
return result
#TOFILL
if __name__ == '__main__':
param = [
(['A', 'D', 'E', 'E', 'L', 'L', 'T', 'r', 'x'],['D', 'F', 'H', 'O', 'g', 'o', 'u', 'v', 'w'],4,4,),
(['9', '3', '4', '8', '7', '6', '3', '8', '3', '3', '5', '3', '5', '4', '2', '5', '5', '3', '6', '2', '1', '7', '4', '2', '7', '3', '2', '1', '3', '7', '6', '5', '0', '6', '3', '8', '5', '1', '7', '9', '2', '7'],['5', '5', '3', '7', '8', '0', '9', '8', '5', '8', '5', '1', '4', '4', '0', '2', '9', '2', '3', '1', '1', '3', '6', '1', '2', '0', '5', '4', '3', '7', '5', '5', '8', '1', '1', '4', '8', '1', '7', '5', '5', '4'],41,37,),
(['0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],['0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],35,29,),
(['W', 'X', 'P', 'u', 's', 'k', 'O', 'y', 'Q', 'i', 't', 'z', 'F', 'f', 's', 'N', 'K', 'm', 'I', 'M', 'g', 'e', 'E', 'P', 'b', 'Y', 'c', 'O', ' ', 'G', 'F', 'x'],['e', 'R', 'P', 'W', 'd', 'a', 'A', 'j', 'H', 'v', 'T', 'w', 'x', 'I', 'd', 'o', 'z', 'K', 'B', 'M', 'J', 'L', 'a', ' ', 'T', 'L', 'V', 't', 'M', 'U', 'z', 'R'],31,18,),
(['0', '1', '2', '4', '5', '7', '7', '7', '8', '8', '9', '9', '9'],['0', '0', '2', '2', '2', '3', '4', '6', '6', '7', '8', '9', '9'],12,8,),
(['0', '0', '1'],['0', '0', '1'],1,1,),
(['A', 'C', 'F', 'G', 'G', 'H', 'I', 'K', 'K', 'N', 'O', 'Q', 'R', 'V', 'V', 'W', 'Y', 'a', 'a', 'c', 'd', 'k', 'k', 'm', 'o', 'p', 't', 'u', 'y', 'y', 'y', 'z'],[' ', ' ', 'B', 'C', 'C', 'C', 'D', 'E', 'I', 'J', 'M', 'N', 'P', 'T', 'U', 'U', 'V', 'V', 'W', 'W', 'Y', 'b', 'c', 'e', 'i', 'o', 'p', 'r', 't', 'y', 'y', 'z'],21,23,),
(['0', '0', '0', '2', '8', '3', '5', '1', '0', '7', '7', '9', '9', '4', '8', '9', '5'],['8', '5', '8', '7', '1', '4', '0', '2', '2', '7', '2', '4', '0', '8', '3', '8', '7'],13,12,),
(['0', '0', '0', '0', '0', '0', '0', '0', '1', '1', '1'],['0', '0', '0', '0', '0', '0', '1', '1', '1', '1', '1'],9,9,),
(['B', 'o', 'R', 'k', 'Y', 'M', 'g', 'b', 'h', 'A', 'i', 'X', 'p', 'i', 'j', 'f', 'V', 'n', 'd', 'P', 'T', 'U', 'f', 'G', 'M', 'W', 'g', 'a', 'C', 'E', 'v', 'C', ' '],['F', 'h', 'G', 'H', 'Q', 'Q', 'K', 'g', 'k', 'u', 'l', 'c', 'c', 'o', 'n', 'G', 'i', 'Z', 'd', 'b', 'c', 'b', 'v', 't', 'S', 't', 'P', 'A', 'K', 'g', 'G', 'i', 'm'],19,32,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 85.926829 | 485 | 0.278172 |
22c9ae5ea1505fe49acda641b3184e33a5110121 | 4,968 | py | Python | test/functional/p2p_time_offset.py | INFINCOIN/INFINCOINCASH | 9d58549f28752019dbc7bb4227b99bebb3fdcc49 | [
"MIT"
] | 1 | 2021-12-30T23:57:35.000Z | 2021-12-30T23:57:35.000Z | test/functional/p2p_time_offset.py | INFINCOIN/INFINCOINCASH | 9d58549f28752019dbc7bb4227b99bebb3fdcc49 | [
"MIT"
] | null | null | null | test/functional/p2p_time_offset.py | INFINCOIN/INFINCOINCASH | 9d58549f28752019dbc7bb4227b99bebb3fdcc49 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from test_framework.test_framework import infincoincashTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
set_node_times,
)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
class TimeOffsetTest(infincoincashTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 8
self.enable_mocktime()
def setup_network(self):
# don't connect nodes yet
self.setup_nodes()
def check_connected_nodes(self):
ni = [node.getnetworkinfo() for node in self.connected_nodes]
assert_equal([x['connections'] for x in ni], [2] * len(ni))
assert_equal([x['timeoffset'] for x in ni], [0] * len(ni))
def run_test(self):
# Nodes synced but not connected
self.mocktime = int(time.time())
set_node_times(self.nodes, self.mocktime)
ni = [node.getnetworkinfo() for node in self.nodes]
assert_equal([x['connections'] for x in ni], [0] * self.num_nodes)
self.log.info("Nodes disconnected from each other. Time: %d" % self.mocktime)
assert_equal([x['timeoffset'] for x in ni], [0] * self.num_nodes)
self.log.info("Nodes have nTimeOffset 0")
# Set node times.
# nodes [1, 5]: set times to +10, +15, ..., +30 secs
for i in range(1, 6):
self.nodes[i].setmocktime(self.mocktime + 5 * (i + 1))
# nodes [6, 7]: set time to -5, -10 secs
for i in range(6, 8):
self.nodes[i].setmocktime(self.mocktime - 5 * (i - 5))
# connect nodes 1 and 2
self.log.info("Connecting with node-1 (+10 s) and node-2 (+15 s)...")
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
self.log.info("--> samples = [+0, +10, (+10), +15, +15]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 4)
assert_equal(ni['timeoffset'], 10)
self.connected_nodes = [self.nodes[1], self.nodes[2]]
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# connect node 3
self.log.info("Connecting with node-3 (+20 s). This will print the warning...")
connect_nodes_bi(self.nodes, 0, 3)
self.log.info("--> samples = [+0, +10, +10, (+15), +15, +20, +20]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 6)
assert_equal(ni['timeoffset'], 15)
self.connected_nodes.append(self.nodes[3])
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# connect node 6
self.log.info("Connecting with node-6 (-5 s)...")
connect_nodes_bi(self.nodes, 0, 6)
self.log.info("--> samples = [-5, -5, +0, +10, (+10), +15, +15, +20, +20]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 8)
assert_equal(ni['timeoffset'], 10)
self.connected_nodes.append(self.nodes[6])
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# connect node 4
self.log.info("Connecting with node-4 (+25 s). This will print the warning...")
connect_nodes_bi(self.nodes, 0, 4)
self.log.info("--> samples = [-5, -5, +0, +10, +10, (+15), +15, +20, +20, +25, +25]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 10)
assert_equal(ni['timeoffset'], 15)
self.connected_nodes.append(self.nodes[4])
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# try to connect node 5 and check that it can't
self.log.info("Trying to connect with node-5 (+30 s)...")
connect_nodes_bi(self.nodes, 0, 5)
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 10)
assert_equal(ni['timeoffset'], 15)
self.log.info("Not connected.")
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
# connect node 7
self.log.info("Connecting with node-7 (-10 s)...")
connect_nodes_bi(self.nodes, 0, 7)
self.log.info("--> samples = [-10, -10, -5, -5, +0, +10, (+10), +15, +15, +20, +20, +25, +25]")
ni = self.nodes[0].getnetworkinfo()
assert_equal(ni['connections'], 12)
assert_equal(ni['timeoffset'], 10)
self.connected_nodes.append(self.nodes[6])
self.check_connected_nodes()
self.log.info("Node-0 nTimeOffset: +%d seconds" % ni['timeoffset'])
if __name__ == '__main__':
TimeOffsetTest().main() | 41.4 | 103 | 0.605475 |
35bd5b9b18e9444f252b6010f7e3aad657f0687d | 1,524 | py | Python | nwbwidgets/test/test_image.py | alejoe91/nwb-jupyter-widgets | 5703f235c5c1a1bf8b32c9af6de2a6907788ce1a | [
"BSD-3-Clause-LBNL"
] | null | null | null | nwbwidgets/test/test_image.py | alejoe91/nwb-jupyter-widgets | 5703f235c5c1a1bf8b32c9af6de2a6907788ce1a | [
"BSD-3-Clause-LBNL"
] | null | null | null | nwbwidgets/test/test_image.py | alejoe91/nwb-jupyter-widgets | 5703f235c5c1a1bf8b32c9af6de2a6907788ce1a | [
"BSD-3-Clause-LBNL"
] | null | null | null | import ipywidgets as widgets
import matplotlib.pyplot as plt
import numpy as np
from nwbwidgets.image import show_rbga_image, show_grayscale_image, show_index_series, show_image_series
from nwbwidgets.view import default_neurodata_vis_spec
from pynwb.base import TimeSeries
from pynwb.image import RGBImage, GrayscaleImage, IndexSeries, ImageSeries
def test_show_rbg_image():
data = np.random.rand(2700).reshape((30, 30, 3))
rgb_image = RGBImage(name='test_image', data=data)
assert isinstance(show_rbga_image(rgb_image), plt.Figure)
def test_show_grayscale_image():
data = np.random.rand(900).reshape((30, 30))
grayscale_image = GrayscaleImage(name='test_image', data=data)
assert isinstance(show_grayscale_image(grayscale_image), plt.Figure)
def test_show_index_series():
data = np.array([12, 14, 16, 18, 20, 22, 24, 26])
indexed_timeseries = TimeSeries(name='Index Series time data',
data=np.random.rand(800).reshape((8, 10, 10)), rate=1.)
index_series = IndexSeries(name='Sample Index Series', data=data,
indexed_timeseries=indexed_timeseries, rate=1.)
assert isinstance(show_index_series(index_series, default_neurodata_vis_spec), widgets.Widget)
def test_show_image_series():
data = np.random.rand(800).reshape((8, 10, 10))
image_series = ImageSeries(name='Image Series', data=data, rate=1.)
assert isinstance(show_image_series(image_series, default_neurodata_vis_spec), widgets.Widget)
| 39.076923 | 104 | 0.73622 |
652e4587387b70122cf3af504bbed98f725cd810 | 6,028 | py | Python | apps/rgb_colourspace_chromatically_adapted_primaries.py | colour-science/colour-dash | 4f8713b8c324e3395849265713d3ab8f918129be | [
"BSD-3-Clause"
] | 2 | 2018-06-06T02:29:29.000Z | 2021-12-15T03:38:30.000Z | apps/rgb_colourspace_chromatically_adapted_primaries.py | colour-science/colour-dash | 4f8713b8c324e3395849265713d3ab8f918129be | [
"BSD-3-Clause"
] | 1 | 2021-06-08T20:33:00.000Z | 2021-06-08T20:33:00.000Z | apps/rgb_colourspace_chromatically_adapted_primaries.py | colour-science/colour-dash | 4f8713b8c324e3395849265713d3ab8f918129be | [
"BSD-3-Clause"
] | 1 | 2018-06-19T11:19:13.000Z | 2018-06-19T11:19:13.000Z | # -*- coding: utf-8 -*-
"""
RGB Colourspace Chromatically Adapted Primaries Application
===========================================================
"""
import sys
import urllib.parse
from dash.dependencies import Input, Output
from dash_core_components import Dropdown, Link, Markdown, Slider
from dash_html_components import A, Code, Div, H3, H5, Li, Pre, Ul
import colour
from app import APP, SERVER_URL
from apps.common import (CHROMATIC_ADAPTATION_TRANSFORM_OPTIONS,
ILLUMINANTS_OPTIONS, RGB_COLOURSPACE_OPTIONS)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2018-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'APP_NAME', 'APP_PATH', 'APP_DESCRIPTION', 'APP_UID', 'LAYOUT',
'set_primaries_output'
]
APP_NAME = 'RGB Colourspace Chromatically Adapted Primaries'
"""
App name.
APP_NAME : unicode
"""
APP_PATH = '/apps/{0}'.format(__name__.split('.')[-1])
"""
App path, i.e. app url.
APP_PATH : unicode
"""
APP_DESCRIPTION = ('This app computes the '
'*Chromatically Adapted Primaries* of the given '
'*RGB Colourspace* to the given *Illuminant* using the '
'given *Chromatic Adaptation Transform*.')
"""
App description.
APP_DESCRIPTION : unicode
"""
APP_UID = hash(APP_NAME)
"""
App unique id.
APP_UID : unicode
"""
LAYOUT = Div([
H3([Link(APP_NAME, href=APP_PATH)], className='text-center'),
Div([
Markdown(APP_DESCRIPTION),
H5(children='Colourspace'),
Dropdown(
id='colourspace-{0}'.format(APP_UID),
options=RGB_COLOURSPACE_OPTIONS,
value=RGB_COLOURSPACE_OPTIONS[0]['value'],
clearable=False,
className='app-widget'),
H5(children='Illuminant'),
Dropdown(
id='illuminant-{0}'.format(APP_UID),
options=ILLUMINANTS_OPTIONS,
value=ILLUMINANTS_OPTIONS[0]['value'],
clearable=False,
className='app-widget'),
H5(children='Chromatic Adaptation Transform'),
Dropdown(
id='chromatic-adaptation-transform-{0}'.format(APP_UID),
options=CHROMATIC_ADAPTATION_TRANSFORM_OPTIONS,
value=CHROMATIC_ADAPTATION_TRANSFORM_OPTIONS[0]['value'],
clearable=False,
className='app-widget'),
H5(children='Formatter'),
Dropdown(
id='formatter-{0}'.format(APP_UID),
options=[{
'label': 'str',
'value': 'str'
}, {
'label': 'repr',
'value': 'repr'
}],
value='str',
clearable=False,
className='app-widget'),
H5(children='Decimals'),
Slider(
id='decimals-{0}'.format(APP_UID),
min=1,
max=15,
step=1,
value=10,
marks={i + 1: str(i + 1)
for i in range(15)},
className='app-widget'),
Pre([Code(id='primaries-{0}'.format(APP_UID), className='code shell')],
className='app-widget app-output'),
Ul([
Li([Link('Back to index...', href='/', className='app-link')],
className='list-inline-item'),
Li([
A('Permalink',
href=urllib.parse.urljoin(SERVER_URL, APP_PATH),
target='_blank')
],
className='list-inline-item'),
Li([
A('colour-science.org',
href='https://www.colour-science.org',
target='_blank')
],
className='list-inline-item'),
],
className='list-inline text-center'),
],
className='col-6 mx-auto')
])
"""
App layout, i.e. :class:`Div` class instance.
LAYOUT : Div
"""
@APP.callback(
Output(
component_id='primaries-{0}'.format(APP_UID),
component_property='children'),
[
Input('colourspace-{0}'.format(APP_UID), 'value'),
Input('illuminant-{0}'.format(APP_UID), 'value'),
Input('chromatic-adaptation-transform-{0}'.format(APP_UID), 'value'),
Input('formatter-{0}'.format(APP_UID), 'value'),
Input('decimals-{0}'.format(APP_UID), 'value')
])
def set_primaries_output(colourspace, illuminant,
chromatic_adaptation_transform, formatter, decimals):
"""
Computes and writes the chromatically adapted *primaries *of the given
*RGB* colourspace to the given *illuminant* using the given
*chromatic adaptation transform*to into the output :class:`Pre` class
instance.
Parameters
----------
colourspace : unicode
*RGB* colourspace to chromatically adapt the *primaries*.
illuminant : unicode
*CIE 1931 2 Degree Standard Observer* illuminant to adapt the
*primaries* to.
chromatic_adaptation_transform : unicode
*Chromatic adaptation transform* to use.
formatter : unicode
Formatter to use, :func:`str` or :func:`repr`.
decimals : int
Decimals to use when formatting the chromatically adapted *primaries*.
Returns
-------
unicode
Chromatically adapted *primaries*.
"""
P = colour.chromatically_adapted_primaries(
colour.RGB_COLOURSPACES[colourspace].primaries,
colour.RGB_COLOURSPACES[colourspace].whitepoint,
colour.CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer'][
illuminant], chromatic_adaptation_transform)
with colour.utilities.numpy_print_options(
formatter={'float': ('{{: 0.{0}f}}'.format(decimals)).format},
threshold=sys.maxsize):
if formatter == 'str':
P = str(P)
elif formatter == 'repr':
P = repr(P)
return P
| 31.233161 | 79 | 0.584605 |
63771c584faf382c1203f3f9114700120d14fcef | 428 | py | Python | src/test.py | szlatyka/SemilabFunStuffRoomA | a0767313ae11285b3338e96b8b5d8c3ec5a20222 | [
"MIT"
] | null | null | null | src/test.py | szlatyka/SemilabFunStuffRoomA | a0767313ae11285b3338e96b8b5d8c3ec5a20222 | [
"MIT"
] | null | null | null | src/test.py | szlatyka/SemilabFunStuffRoomA | a0767313ae11285b3338e96b8b5d8c3ec5a20222 | [
"MIT"
] | 1 | 2021-01-05T13:46:22.000Z | 2021-01-05T13:46:22.000Z | import Api
#Api.User.Create("gábor", "absc")
usr = Api.User.ByName("gábor")
isr2 = Api.User(1);
print("#ID: " + str(isr2.name()))
#Api.Group.Create("Test csopi")
grp = Api.Group(1);
grp.assign(usr)
print();
grp.remove(usr)
print("#ID: " + str(usr.group()));
print("#ID: " + str(usr.group()));
print(Api.Group.All())
Api.Group.Create('Sorter')
print(Api.Group.All())
Api.Group.Create('Analysis')
print(Api.Group.All())
| 15.285714 | 34 | 0.635514 |
04e4a019d61c4ff49483883faa4f4026b83b0303 | 236 | py | Python | py3/lambda_function/source/taskcat_lambda/lambda_function.py | tonynv/lambda_layer | a39abe217a307a6ba03c36a252d69c59416db95a | [
"Apache-2.0"
] | null | null | null | py3/lambda_function/source/taskcat_lambda/lambda_function.py | tonynv/lambda_layer | a39abe217a307a6ba03c36a252d69c59416db95a | [
"Apache-2.0"
] | null | null | null | py3/lambda_function/source/taskcat_lambda/lambda_function.py | tonynv/lambda_layer | a39abe217a307a6ba03c36a252d69c59416db95a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
taskat lambda stub
"""
from taskcat._cli import get_installed_version as taskcat_version
def lambda_handler(event, context):
"""
Return taskcat version in layer
"""
return taskcat_version()
| 15.733333 | 65 | 0.707627 |
6c0f0336eaeb41baf7cf08f03b06ad0d2c6afbba | 11,180 | py | Python | electrum/gui/kivy/uix/dialogs/tx_dialog.py | Yun118/electrum | 780a5404fb0b3ee7c406faf70a6e79e5df872a03 | [
"MIT"
] | 2 | 2021-06-24T15:49:31.000Z | 2021-06-24T15:52:41.000Z | electrum/gui/kivy/uix/dialogs/tx_dialog.py | Yun118/electrum | 780a5404fb0b3ee7c406faf70a6e79e5df872a03 | [
"MIT"
] | null | null | null | electrum/gui/kivy/uix/dialogs/tx_dialog.py | Yun118/electrum | 780a5404fb0b3ee7c406faf70a6e79e5df872a03 | [
"MIT"
] | null | null | null | import copy
from datetime import datetime
from typing import NamedTuple, Callable, TYPE_CHECKING
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from .question import Question
from electrum.gui.kivy.i18n import _
from electrum.util import InvalidPassword
from electrum.address_synchronizer import TX_HEIGHT_LOCAL
from electrum.wallet import CannotBumpFee
from electrum.transaction import Transaction, PartialTransaction
from ...util import address_colors
if TYPE_CHECKING:
from ...main_window import ElectrumWindow
Builder.load_string('''
<TxDialog>
id: popup
title: _('Transaction')
is_mine: True
can_sign: False
can_broadcast: False
can_rbf: False
fee_str: ''
feerate_str: ''
date_str: ''
date_label:''
amount_str: ''
tx_hash: ''
status_str: ''
description: ''
outputs_str: ''
BoxLayout:
orientation: 'vertical'
ScrollView:
scroll_type: ['bars', 'content']
bar_width: '25dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
padding: '10dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
BoxLabel:
text: _('Status')
value: root.status_str
BoxLabel:
text: _('Description') if root.description else ''
value: root.description
BoxLabel:
text: root.date_label
value: root.date_str
BoxLabel:
text: _('Amount sent') if root.is_mine else _('Amount received')
value: root.amount_str
BoxLabel:
text: _('Transaction fee') if root.fee_str else ''
value: root.fee_str
BoxLabel:
text: _('Transaction fee rate') if root.feerate_str else ''
value: root.feerate_str
TopLabel:
text: _('Transaction ID') + ':' if root.tx_hash else ''
TxHashLabel:
data: root.tx_hash
name: _('Transaction ID')
TopLabel:
text: _('Outputs') + ':'
OutputList:
id: output_list
Widget:
size_hint: 1, 0.1
BoxLayout:
size_hint: 1, None
height: '48dp'
Button:
id: action_button
size_hint: 0.5, None
height: '48dp'
text: ''
disabled: True
opacity: 0
on_release: root.on_action_button_clicked()
IconButton:
size_hint: 0.5, None
height: '48dp'
icon: 'atlas://electrum/gui/kivy/theming/light/qrcode'
on_release: root.show_qr()
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Label')
on_release: root.label_dialog()
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Close')
on_release: root.dismiss()
''')
class ActionButtonOption(NamedTuple):
text: str
func: Callable
enabled: bool
class TxDialog(Factory.Popup):
def __init__(self, app, tx):
Factory.Popup.__init__(self)
self.app = app # type: ElectrumWindow
self.wallet = self.app.wallet
self.tx = tx # type: Transaction
self._action_button_fn = lambda btn: None
# If the wallet can populate the inputs with more info, do it now.
# As a result, e.g. we might learn an imported address tx is segwit,
# or that a beyond-gap-limit address is is_mine.
# note: this might fetch prev txs over the network.
tx.add_info_from_wallet(self.wallet)
def on_open(self):
self.update()
def update(self):
format_amount = self.app.format_amount_and_units
tx_details = self.wallet.get_tx_info(self.tx)
tx_mined_status = tx_details.tx_mined_status
exp_n = tx_details.mempool_depth_bytes
amount, fee = tx_details.amount, tx_details.fee
self.status_str = tx_details.status
self.description = tx_details.label
self.can_broadcast = tx_details.can_broadcast
self.can_rbf = tx_details.can_bump
self.tx_hash = tx_details.txid or ''
if tx_mined_status.timestamp:
self.date_label = _('Date')
self.date_str = datetime.fromtimestamp(tx_mined_status.timestamp).isoformat(' ')[:-3]
elif exp_n:
self.date_label = _('Mempool depth')
self.date_str = _('{} from tip').format('%.2f MB'%(exp_n/1000000))
else:
self.date_label = ''
self.date_str = ''
self.can_sign = self.wallet.can_sign(self.tx)
if amount is None:
self.amount_str = _("Transaction unrelated to your wallet")
elif amount > 0:
self.is_mine = False
self.amount_str = format_amount(amount)
else:
self.is_mine = True
self.amount_str = format_amount(-amount)
risk_of_burning_coins = (isinstance(self.tx, PartialTransaction)
and self.can_sign
and fee is not None
and bool(self.wallet.get_warning_for_risk_of_burning_coins_as_fees(self.tx)))
if fee is not None and not risk_of_burning_coins:
self.fee_str = format_amount(fee)
fee_per_kb = fee / self.tx.estimated_size() * 1000
self.feerate_str = self.app.format_fee_rate(fee_per_kb)
else:
self.fee_str = _('unknown')
self.feerate_str = _('unknown')
self.ids.output_list.update(self.tx.outputs())
for dict_entry in self.ids.output_list.data:
dict_entry['color'], dict_entry['background_color'] = address_colors(self.wallet, dict_entry['address'])
self.can_remove_tx = tx_details.can_remove
self.update_action_button()
def update_action_button(self):
action_button = self.ids.action_button
options = (
ActionButtonOption(text=_('Sign'), func=lambda btn: self.do_sign(), enabled=self.can_sign),
ActionButtonOption(text=_('Broadcast'), func=lambda btn: self.do_broadcast(), enabled=self.can_broadcast),
ActionButtonOption(text=_('Bump fee'), func=lambda btn: self.do_rbf(), enabled=self.can_rbf),
ActionButtonOption(text=_('Remove'), func=lambda btn: self.remove_local_tx(), enabled=self.can_remove_tx),
)
num_options = sum(map(lambda o: bool(o.enabled), options))
# if no options available, hide button
if num_options == 0:
action_button.disabled = True
action_button.opacity = 0
return
action_button.disabled = False
action_button.opacity = 1
if num_options == 1:
# only one option, button will correspond to that
for option in options:
if option.enabled:
action_button.text = option.text
self._action_button_fn = option.func
else:
# multiple options. button opens dropdown which has one sub-button for each
dropdown = DropDown()
action_button.text = _('Options')
self._action_button_fn = dropdown.open
for option in options:
if option.enabled:
btn = Button(text=option.text, size_hint_y=None, height='48dp')
btn.bind(on_release=option.func)
dropdown.add_widget(btn)
def on_action_button_clicked(self):
action_button = self.ids.action_button
self._action_button_fn(action_button)
def do_rbf(self):
from .bump_fee_dialog import BumpFeeDialog
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(self.tx)
if fee is None:
self.app.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
size = self.tx.estimated_size()
d = BumpFeeDialog(self.app, fee, size, self._do_rbf)
d.open()
def _do_rbf(self, new_fee_rate, is_final):
if new_fee_rate is None:
return
try:
new_tx = self.wallet.bump_fee(tx=self.tx,
new_fee_rate=new_fee_rate)
except CannotBumpFee as e:
self.app.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.tx = new_tx
self.update()
self.do_sign()
def do_sign(self):
self.app.protected(_("Sign this transaction?"), self._do_sign, ())
def _do_sign(self, password):
self.status_str = _('Signing') + '...'
Clock.schedule_once(lambda dt: self.__do_sign(password), 0.1)
def __do_sign(self, password):
try:
self.app.wallet.sign_transaction(self.tx, password)
except InvalidPassword:
self.app.show_error(_("Invalid PIN"))
self.update()
def do_broadcast(self):
self.app.broadcast(self.tx)
def show_qr(self):
original_raw_tx = str(self.tx)
qr_data = self.tx.to_qr_data()
self.app.qr_dialog(_("Raw Transaction"), qr_data, text_for_clipboard=original_raw_tx)
def remove_local_tx(self):
txid = self.tx.txid()
to_delete = {txid}
to_delete |= self.wallet.get_depending_transactions(txid)
question = _("Are you sure you want to remove this transaction?")
if len(to_delete) > 1:
question = (_("Are you sure you want to remove this transaction and {} child transactions?")
.format(len(to_delete) - 1))
def on_prompt(b):
if b:
for tx in to_delete:
self.wallet.remove_transaction(tx)
self.wallet.save_db()
self.app._trigger_update_wallet() # FIXME private...
self.dismiss()
d = Question(question, on_prompt)
d.open()
def label_dialog(self):
from .label_dialog import LabelDialog
key = self.tx.txid()
text = self.app.wallet.get_label(key)
def callback(text):
self.app.wallet.set_label(key, text)
self.update()
self.app.history_screen.update()
d = LabelDialog(_('Enter Transaction Label'), text, callback)
d.open()
| 36.416938 | 118 | 0.570394 |
57fb811bd25d7f4f416fbc3fa415443765a2a46d | 36,757 | py | Python | madgraph/various/q_polynomial.py | jlrainbolt/MG5_v2_6_1 | 241f0c6f309342d6e8b813284467b2edd393c7d6 | [
"NCSA"
] | 5 | 2018-10-23T14:37:18.000Z | 2021-11-22T20:59:02.000Z | madgraph/various/q_polynomial.py | jlrainbolt/MG5_v2_6_1 | 241f0c6f309342d6e8b813284467b2edd393c7d6 | [
"NCSA"
] | 26 | 2018-10-08T15:49:32.000Z | 2020-05-15T13:33:36.000Z | madgraph/various/q_polynomial.py | jlrainbolt/MG5_v2_6_1 | 241f0c6f309342d6e8b813284467b2edd393c7d6 | [
"NCSA"
] | 4 | 2019-02-18T11:42:18.000Z | 2021-11-11T20:46:08.000Z | import array
import copy
import math
class PolynomialError(Exception): pass
def get_number_of_coefs_for_rank(r):
""" Returns the number of independent coefficients there is in a
fully symmetric tensor of rank r """
return sum([((3+ri)*(2+ri)*(1+ri))/6 for ri in range(0,r+1)])
class Polynomial(object):
""" A class to represent a polynomial in the loop momentum (4-vector) q
and how the symmetrized coefficients are ordered. The ordering rule
correspond to what is presented in Eq. C.15 of arxiv:hep-ph/1405.0301"""
def __init__(self, rank):
assert rank > -1, "The rank of a q-polynomial should be 0 or positive"
self.rank=rank
self.init_coef_list()
def init_coef_list(self):
""" Creates a list whose elements are arrays being the coefficient
indices. We order this list according to the algorithm in
get_coef_position. This coef_list can then be used for the function
get_coef_at_position()
"""
self.coef_list=[None,]*get_number_of_coefs_for_rank(self.rank)
PNO = Polynomial_naive_ordering(self.rank)
for coef in PNO.coef_list:
self.coef_list[self.get_coef_position(list(coef))]=coef
def get_coef_position(self, indices_list):
""" Returns the canonical position for a coefficient characterized
by the value of the indices of the loop momentum q it multiplies,
that is for example C_01032 multiplying q_0*q_1*q_0*q_3*q_2.
We assume that the explicit construction of the position below is
faster than a lookup in a table"""
fact = math.factorial
if len(indices_list)==0:
return 0
res = get_number_of_coefs_for_rank(len(indices_list)-1)
new_indices_list = copy.copy(indices_list)
new_indices_list.sort()
for i, ind in enumerate(new_indices_list):
if ind>0:
res = res + (fact(ind+i)/(fact(i+1)*fact(ind - 1)))
return res
def get_coef_at_position(self, pos):
""" Returns the coefficient at position pos in the one dimensional
vector """
return list(self.coef_list[pos])
class Polynomial_naive_ordering(object):
""" A class to represent a polynomial in the loop momentum (4-vector) q"""
def __init__(self, rank):
assert rank > -1, "The rank of a q-polynomial should be 0 or positive"
self.rank=rank
self.init_coef_list()
def init_coef_list(self):
""" Creates a list whose elements are arrays being the coefficient
indices sorted in growing order and the value is their position in a
one-dimensional vector. For example the position of the coefficient
C_01032 will be placed in the list under array.array('i',(0,0,1,3,2)).
"""
self.coef_list=[]
self.coef_list.append(array.array('i',()))
if self.rank==0:
return
tmp_coef_list=[array.array('i',(0,)),array.array('i',(1,)),
array.array('i',(2,)),array.array('i',(3,))]
self.coef_list.extend(tmp_coef_list)
for i in range(1,self.rank):
new_tmp_coef_list=[]
for coef in tmp_coef_list:
for val in range(coef[-1],4):
new_coef=copy.copy(coef)
new_coef.append(val)
new_tmp_coef_list.append(new_coef)
tmp_coef_list=new_tmp_coef_list
self.coef_list.extend(tmp_coef_list)
def get_coef_position(self, indices_list):
""" Returns the canonical position for a coefficient characterized
by the value of the indices of the loop momentum q it multiplies,
that is for example C_01032 multiplying q_0*q_1*q_0*q_3*q_2 """
new_indices_list=copy.copy(indices_list)
new_indices_list.sort()
try:
return self.coef_list.index(array.array('i',new_indices_list))
except ValueError:
raise PolynomialError,\
"The index %s looked for could not be found"%str(indices_list)
def get_coef_at_position(self, pos):
""" Returns the coefficient at position pos in the one dimensional
vector """
return list(self.coef_list[pos])
class PolynomialRoutines(object):
""" The mother class to output the polynomial subroutines """
def __init__(self, max_rank, updater_max_rank=None,
coef_format='complex*16', sub_prefix='',
proc_prefix='',mp_prefix='',
line_split=30):
self.coef_format=coef_format
self.sub_prefix=sub_prefix
self.proc_prefix=proc_prefix
self.mp_prefix=mp_prefix
if updater_max_rank is None:
self.updater_max_rank = max_rank
else:
if updater_max_rank > max_rank:
raise PolynomialError, "The updater max rank must be at most"+\
" equal to the overall max rank"
else:
self.updater_max_rank = updater_max_rank
if coef_format=='complex*16':
self.rzero='0.0d0'
self.czero='(0.0d0,0.0d0)'
elif coef_format=='complex*32':
self.rzero='0.0e0_16'
self.czero='CMPLX(0.0e0_16,0.0e0_16,KIND=16)'
else:
self.rzero='0.0e0'
self.czero='(0.0e0,0.0e0)'
self.line_split=line_split
if max_rank<0:
raise PolynomialError, \
"The rank of a q-polynomial should be 0 or positive"
self.max_rank=max_rank
self.pq=Polynomial(max_rank)
# A useful replacement dictionary
self.rep_dict = {'sub_prefix':self.sub_prefix,
'proc_prefix':self.proc_prefix,
'mp_prefix':self.mp_prefix,
'coef_format':self.coef_format}
class FortranPolynomialRoutines(PolynomialRoutines):
""" A daughter class to output the subroutine in the fortran format"""
def write_polynomial_constant_module(self):
""" Writes a fortran90 module that defined polynomial constants objects."""
# Start with the polynomial constants module header
polynomial_constant_lines = []
polynomial_constant_lines.append(
"""MODULE %sPOLYNOMIAL_CONSTANTS
implicit none
include 'coef_specs.inc'
include 'loop_max_coefs.inc'
"""%self.sub_prefix)
# Add the N coef for rank
polynomial_constant_lines.append(
'C Map associating a rank to each coefficient position')
polynomial_constant_lines.append(
'INTEGER COEFTORANK_MAP(0:LOOPMAXCOEFS-1)')
for rank in range(self.max_rank+1):
start = get_number_of_coefs_for_rank(rank-1)
end = get_number_of_coefs_for_rank(rank)-1
polynomial_constant_lines.append(
'DATA COEFTORANK_MAP(%(start)d:%(end)d)/%(n_entries)d*%(rank)d/'%
{'start': start,'end': end,'n_entries': end-start+1,'rank': rank})
polynomial_constant_lines.append(
'\nC Map defining the number of coefficients for a symmetric tensor of a given rank')
polynomial_constant_lines.append(
"""INTEGER NCOEF_R(0:%(max_rank)d)
DATA NCOEF_R/%(ranks)s/"""%{'max_rank':self.max_rank,'ranks':','.join([
str(get_number_of_coefs_for_rank(r)) for r in range(0,self.max_rank+1)])})
polynomial_constant_lines.append(
'\nC Map defining the coef position resulting from the multiplication of two lower rank coefs.')
mult_matrix = [[
self.pq.get_coef_position(self.pq.get_coef_at_position(coef_a)+
self.pq.get_coef_at_position(coef_b))
for coef_b in range(0,get_number_of_coefs_for_rank(self.updater_max_rank))]
for coef_a in range(0,get_number_of_coefs_for_rank(self.max_rank))]
polynomial_constant_lines.append(
'INTEGER COMB_COEF_POS(0:LOOPMAXCOEFS-1,0:%(max_updater_rank)d)'\
%{'max_updater_rank':(get_number_of_coefs_for_rank(self.updater_max_rank)-1)})
for j, line in enumerate(mult_matrix):
chunk_size = 20
for k in xrange(0, len(line), chunk_size):
polynomial_constant_lines.append(
"DATA COMB_COEF_POS(%3r,%3r:%3r) /%s/" % \
(j, k, min(k + chunk_size, len(line))-1,
','.join(["%3r" % i for i in line[k:k + chunk_size]])))
polynomial_constant_lines.append(
"\nEND MODULE %sPOLYNOMIAL_CONSTANTS\n"%self.sub_prefix)
return '\n'.join(polynomial_constant_lines)
def write_pjfry_mapping(self):
""" Returns a fortran subroutine which fills in the array of integral reduction
coefficients following MadLoop standards using pjfry++ coefficients."""
# THE OUTPUT OF COEFS FROM PJFRY++ IS
# RANK=0: (,)
# RANK=1: (0,),(1,),(2,),(3,)
# RANK=2: (0,0),(0,1),(1,1),(0,2),(1,2),(2,2),(0,3),(1,3),(2,3),(3,3)
# ...
# THE OUTPUT OF COEFS FROM MADLOOP IS
# RANK=0: (,)
# RANK=1: (0,),(1,),(2,),(3,)
# RANK=2: (0,0),(0,1),(0,2),(0,3),(1,1),(2,1),(3,1),(2,2),(2,3),(3,3)
# ...
# Helper function
def format_power(pow):
b, e = pow
if e == 1:
return str(b)
else:
return "%s^%d" % (b, e)
def get_coef_position(indices_list):
new_indices_list=copy.copy(indices_list)
new_indices_list.sort()
r=len(new_indices_list)
if r == 0:
pos=0
else:
pos=get_number_of_coefs_for_rank(r-1)
for i,mu in enumerate(new_indices_list):
num = mu
den = 1
if mu > 0 and i > 0:
for j in range(2,i+2):
num *= (mu+j-1)
den *= j
pos += num/den
return pos
lines = []
lines.append(
"""SUBROUTINE %(sub_prefix)sCONVERT_PJFRY_COEFFS(RANK,PJCOEFS,TIRCOEFS)
C GLOABLE VARIABLES
include 'coef_specs.inc'
include 'loop_max_coefs.inc'
C ARGUMENTS
INTEGER RANK
%(coef_format)s PJCOEFS(0:LOOPMAXCOEFS-1,3)
%(coef_format)s TIRCOEFS(0:LOOPMAXCOEFS-1,3)"""
%{'sub_prefix':self.sub_prefix,'coef_format':self.coef_format})
for R in range(self.max_rank+1):
Ncoeff=((3+R)*(2+R)*(1+R))/6
if R == 0:
offset=0
else:
offset=get_number_of_coefs_for_rank(R-1)
for i in range(offset,Ncoeff+offset):
indices_list=self.pq.get_coef_at_position(i)
sindices = map(lambda i: "q(%d)" % i, indices_list)
coeff_list = []
for j in range(4):
qvalue = "q(%d)"%j
qpow = sindices.count(qvalue)
if qpow > 0:
coeff_list.append(format_power([qvalue,qpow]))
if not coeff_list:
coeff_str = "1"
else:
coeff_str = "*".join(coeff_list)
pjpos = get_coef_position(indices_list)
lines.append("c Reduction Coefficient %s"%coeff_str)
lines.append('TIRCOEFS(%d,1:3)=PJCOEFS(%d,1:3)'%(i,pjpos))
lines.append('IF(RANK.LE.%d)RETURN'%R)
lines.append('end')
return '\n'.join(lines)
def write_iregi_mapping(self):
""" Returns a fortran subroutine which fills in the array of integral reduction
coefficients following MadLoop standards using IREGI coefficients."""
# THE OUTPUT OF COEFS FROM IREGI IS
# RANK=0: (,)
# RANK=1: (0,),(1,),(2,),(3,)
# RANK=2: (0,0),(0,1),(0,2),(0,3),(1,1),(2,1),(3,1),(2,2),(2,3),(3,3)
# ...
# Helper function
def format_power(pow):
b, e = pow
if e == 1:
return str(b)
else:
return "%s^%d" % (b, e)
lines = []
lines.append(
"""SUBROUTINE %(sub_prefix)sCONVERT_IREGI_COEFFS(RANK,IREGICOEFS,TIRCOEFS)
C GLOABLE VARIABLES
include 'coef_specs.inc'
include 'loop_max_coefs.inc'
C ARGUMENTS
INTEGER RANK
%(coef_format)s IREGICOEFS(0:LOOPMAXCOEFS-1,3)
%(coef_format)s TIRCOEFS(0:LOOPMAXCOEFS-1,3)"""
%{'sub_prefix':self.sub_prefix,'coef_format':self.coef_format})
iregi_gen = FromIREGIFortranCodeGenerator(self.max_rank)
for R in range(self.max_rank+1):
Ncoeff=((3+R)*(2+R)*(1+R))/6
if R == 0:
offset=0
else:
offset=get_number_of_coefs_for_rank(R-1)
for i in range(offset,Ncoeff+offset):
indices_list=self.pq.get_coef_at_position(i)
sindices = map(lambda i: "q(%d)" % i, indices_list)
coeff_list = []
for j in range(4):
qvalue = "q(%d)"%j
qpow = sindices.count(qvalue)
if qpow > 0:
coeff_list.append(format_power([qvalue,qpow]))
if not coeff_list:
coeff_str = "1"
else:
coeff_str = "*".join(coeff_list)
iregipos = iregi_gen.get_coef_position(indices_list)
lines.append("c Reduction Coefficient %s"%coeff_str)
lines.append('TIRCOEFS(%d,1:3)=IREGICOEFS(%d,1:3)'%(i,iregipos))
lines.append('IF(RANK.LE.%d)RETURN'%R)
lines.append('end')
return '\n'.join(lines)
def get_COLLIER_mapping(self):
""" Returns a list of tuples of the form:
[ (COLLIER_ind0, COLLIER_ind1, COLLIER_ind2, COLLIER_ind3), ]
where the position in the list is the coef_ID in MadLoop ordering.
"""
res = []
for coef_pos in range(0,get_number_of_coefs_for_rank(self.pq.rank)):
indices_list = self.pq.get_coef_at_position(coef_pos)
res.append((indices_list.count(0),
indices_list.count(1),
indices_list.count(2),
indices_list.count(3)))
return res
def write_golem95_mapping(self):
""" Returns a fortran subroutine which fills in the array of tensorial
coefficients following golem95 standards using MadLoop coefficients."""
subroutines = []
# Set number of space-time dimensions to 4 here
d = 4
golem_max_rank = 6
# First generate the block_info which contains information about the
# about the block structure of the system
block_info = {}
for R in range(1,self.max_rank+1):
for k in range(1,min(R,d)+1):
LHS, RHS, lst, dic = \
FromGolem95FortranCodeGenerator.generate_equations(R, k)
block_info[(R,k)] = (lst, dic)
# Helper function
def format_power(pow):
b, e = pow
if e == 1:
return str(b)
else:
return "%s^%d" % (b, e)
# Write out one subroutine per rank
for R in range(golem_max_rank+1):
lines=[]
if R==0:
lines.append(
"""SUBROUTINE %(sub_prefix)sFILL_GOLEM_COEFFS_0(ML_COEFS,GOLEM_COEFS)
use precision_golem, only: ki
include 'coef_specs.inc'
include 'loop_max_coefs.inc'
%(coef_format)s ML_COEFS(0:LOOPMAXCOEFS-1)
complex(ki) GOLEM_COEFS"""
%{'sub_prefix':self.sub_prefix,'coef_format':self.coef_format})
lines.append("GOLEM_COEFS=ML_COEFS(0)")
lines.append("end")
subroutines.append('\n'.join(lines))
continue
# Start by writing out the header:
lines.append(
"""SUBROUTINE %(sub_prefix)sFILL_GOLEM_COEFFS_%(rank)d(ML_COEFS,GOLEM_COEFS)
use tens_rec, only: coeff_type_%(rank)d
include 'coef_specs.inc'
include 'loop_max_coefs.inc'
%(coef_format)s ML_COEFS(0:LOOPMAXCOEFS-1)
type(coeff_type_%(rank)d) GOLEM_COEFS"""
%{'sub_prefix':self.sub_prefix,'rank':R,
'coef_format':self.coef_format})
if R > self.max_rank:
lines.append('C Dummy routine for %(sub_prefix)sFILL_GOLEM_COEFS_%(rank)d'\
%{'sub_prefix':self.sub_prefix,'rank':R,
'coef_format':self.coef_format})
lines.append("STOP 'ERROR: %d > %d'"%(R,self.max_rank))
lines.append('end')
subroutines.append('\n'.join(lines))
continue
# The constant coefficient is treated separately
lines.append("c Constant coefficient ")
lines.append("GOLEM_COEFS%%c0=ML_COEFS(%d)"\
%self.pq.get_coef_position([]))
# Now write out the explicit mapping
for k in range(1,min(R,d)+1):
lst, dic = block_info[(R,k)]
dim = len(lst)
lab = 0
for indices in FromGolem95FortranCodeGenerator.select(range(d), k):
lab += 1
sindices = map(lambda i: "q(%d)" % i, indices)
for i in range(dim):
coeff_str = "*".join(map(format_power,zip(sindices, lst[i])))
ML_indices = sum(
[[ind]*lst[i][j] for j, ind in enumerate(indices)],[])
ML_coef_pos = self.pq.get_coef_position(ML_indices)
ML_sign_convention = ' ' if len(ML_indices)%2==0 else '-'
lines.append("c Coefficient %s"%coeff_str)
lines.append("GOLEM_COEFS%%c%d(%d,%d)=%sML_COEFS(%d)"\
% (k, lab, i+1, ML_sign_convention, ML_coef_pos))
subroutines.append('\n'.join(lines+['end']))
return '\n\n'.join(subroutines)
def write_compact_wl_updater(self,r_1,r_2,loop_over_vertex_coefs_first=True):
""" Give out the subroutine to update a polynomial of rank r_1 with
one of rank r_2 """
# The update is basically given by
# OUT(j,coef,i) = A(k,*,i) x B(j,*,k)
# with k a summed index and the 'x' operation is equivalent to
# putting together two regular polynomial in q with scalar coefficients
# The complexity of this subroutine is therefore
# MAXLWFSIZE**3 * NCoef(r_1) * NCoef(r_2)
# Which is for example 22'400 when updating a rank 4 loop wavefunction
# with a rank 1 updater.
# The situation is slightly improved by a smarter handling of the
# coefficients equal to zero
lines=[]
# Start by writing out the header:
lines.append(
"""SUBROUTINE %(sub_prefix)sUPDATE_WL_%(r_1)d_%(r_2)d(A,LCUT_SIZE,B,IN_SIZE,OUT_SIZE,OUT)
USE %(proc_prefix)sPOLYNOMIAL_CONSTANTS
implicit none
INTEGER I,J,K,L,M
%(coef_format)s A(MAXLWFSIZE,0:LOOPMAXCOEFS-1,MAXLWFSIZE)
%(coef_format)s B(MAXLWFSIZE,0:VERTEXMAXCOEFS-1,MAXLWFSIZE)
%(coef_format)s OUT(MAXLWFSIZE,0:LOOPMAXCOEFS-1,MAXLWFSIZE)
INTEGER LCUT_SIZE,IN_SIZE,OUT_SIZE
INTEGER NEW_POSITION
%(coef_format)s UPDATER_COEF
"""%{'sub_prefix':self.sub_prefix,'proc_prefix':self.proc_prefix,
'r_1':r_1,'r_2':r_2,'coef_format':self.coef_format})
# Start the loop on the elements i,j of the vector OUT(i,coef,j)
lines.append("C Welcome to the computational heart of MadLoop...")
if loop_over_vertex_coefs_first:
lines.append("OUT(:,:,:)=%s"%self.czero)
lines.append(
"""DO J=1,OUT_SIZE
DO M=0,%d
DO K=1,IN_SIZE
UPDATER_COEF = B(J,M,K)
IF (UPDATER_COEF.EQ.%s) CYCLE
DO L=0,%d
NEW_POSITION = COMB_COEF_POS(L,M)
DO I=1,LCUT_SIZE
OUT(J,NEW_POSITION,I)=OUT(J,NEW_POSITION,I) + A(K,L,I)*UPDATER_COEF
ENDDO
ENDDO
ENDDO
ENDDO
ENDDO
"""%(get_number_of_coefs_for_rank(r_2)-1,
self.czero,
get_number_of_coefs_for_rank(r_1)-1))
else:
lines.append("OUT(:,:,:)=%s"%self.czero)
lines.append(
"""DO I=1,LCUT_SIZE
DO L=0,%d
DO K=1,IN_SIZE
UPDATER_COEF = A(K,L,I)
IF (UPDATER_COEF.EQ.%s) CYCLE
DO M=0,%d
NEW_POSITION = COMB_COEF_POS(L,M)
DO J=1,OUT_SIZE
OUT(J,NEW_POSITION,I)=OUT(J,NEW_POSITION,I) + UPDATER_COEF*B(J,M,K)
ENDDO
ENDDO
ENDDO
ENDDO
ENDDO
"""%(get_number_of_coefs_for_rank(r_1)-1,
self.czero,
get_number_of_coefs_for_rank(r_2)-1))
lines.append("END")
# return the subroutine
return '\n'.join(lines)
def write_expanded_wl_updater(self,r_1,r_2):
""" Give out the subroutine to update a polynomial of rank r_1 with
one of rank r_2 """
# The update is basically given by
# OUT(j,coef,i) = A(k,*,i) x B(j,*,k)
# with k a summed index and the 'x' operation is equivalent to
# putting together two regular polynomial in q with scalar coefficients
# The complexity of this subroutine is therefore
# MAXLWFSIZE**3 * NCoef(r_1) * NCoef(r_2)
# Which is for example 22'400 when updating a rank 4 loop wavefunction
# with a rank 1 updater.
lines=[]
# Start by writing out the header:
lines.append(
"""SUBROUTINE %(sub_prefix)sUPDATE_WL_%(r_1)d_%(r_2)d(A,LCUT_SIZE,B,IN_SIZE,OUT_SIZE,OUT)
USE %(proc_prefix)sPOLYNOMIAL_CONSTANTS
INTEGER I,J,K
%(coef_format)s A(MAXLWFSIZE,0:LOOPMAXCOEFS-1,MAXLWFSIZE)
%(coef_format)s B(MAXLWFSIZE,0:VERTEXMAXCOEFS-1,MAXLWFSIZE)
%(coef_format)s OUT(MAXLWFSIZE,0:LOOPMAXCOEFS-1,MAXLWFSIZE)
INTEGER LCUT_SIZE,IN_SIZE,OUT_SIZE
"""%{'sub_prefix':self.sub_prefix,'proc_prefix':self.proc_prefix,
'r_1':r_1,'r_2':r_2,'coef_format':self.coef_format})
# Start the loop on the elements i,j of the vector OUT(i,coef,j)
lines.append("DO I=1,LCUT_SIZE")
lines.append(" DO J=1,OUT_SIZE")
lines.append(" DO K=0,%d"%(get_number_of_coefs_for_rank(r_2+r_1)-1))
lines.append(" OUT(J,K,I)=%s"%self.czero)
lines.append(" ENDDO")
lines.append(" DO K=1,IN_SIZE")
# Now we write the lines defining the coefs of OUT(j,*,i) from those
# of A(k,*,i) and B(j,*,k)
# The dictionary below stores the position of the new coefficient
# derived as keys and the list of the buidling blocks expressing
# them as values
coef_expressions={}
for coef_a in range(0,get_number_of_coefs_for_rank(r_1)):
for coef_b in range(0,get_number_of_coefs_for_rank(r_2)):
ind_list=self.pq.get_coef_at_position(coef_a)+\
self.pq.get_coef_at_position(coef_b)
new_term="A(K,%d,I)*B(J,%d,K)"%(coef_a,coef_b)
new_position=self.pq.get_coef_position(ind_list)
try:
coef_expressions[new_position].append(new_term)
except KeyError:
coef_expressions[new_position]=[new_term,]
for coef, value in coef_expressions.items():
split=0
while split<len(value):
lines.append("OUT(J,%d,I)=OUT(J,%d,I)+"%(coef,coef)+\
'+'.join(value[split:split+self.line_split]))
split=split+self.line_split
# And now we simply close the enddo.
lines.append(" ENDDO")
lines.append(" ENDDO")
lines.append("ENDDO")
lines.append("END")
# return the subroutine
return '\n'.join(lines)
def write_polynomial_evaluator(self):
""" Give out the subroutine to evaluate a polynomial of a rank up to
the maximal one specified when initializing the FortranPolynomialRoutines
object. """
lines=[]
# Start by writing out the header:
lines.append("""SUBROUTINE %(sub_prefix)sEVAL_POLY(C,R,Q,OUT)
USE %(proc_prefix)sPOLYNOMIAL_CONSTANTS
%(coef_format)s C(0:LOOPMAXCOEFS-1)
INTEGER R
%(coef_format)s Q(0:3)
%(coef_format)s OUT
"""%self.rep_dict)
# Start by the trivial coefficient of order 0.
lines.append("OUT=C(0)")
# Now scan them all progressively
for r in range(1,self.max_rank+1):
lines.append("IF (R.GE.%d) then"%r)
terms=[]
for coef_num in range(get_number_of_coefs_for_rank(r-1)
,get_number_of_coefs_for_rank(r)):
coef_inds=self.pq.get_coef_at_position(coef_num)
terms.append('*'.join(['C(%d)'%coef_num,]+
['Q(%d)'%ind for ind in coef_inds]))
split=0
while split<len(terms):
lines.append("OUT=OUT+"+\
'+'.join(terms[split:split+self.line_split]))
split=split+self.line_split
lines.append("ENDIF")
lines.append("END")
return '\n'.join(lines)
def write_wl_merger(self):
""" Give out the subroutine to merge the components of a final loop
wavefunction of a loop to create the coefficients of the polynomial
representing the numerator, while multiplying each of them by 'const'."""
lines=[]
# Start by writing out the header:
lines.append(
"""SUBROUTINE %(sub_prefix)sMERGE_WL(WL,R,LCUT_SIZE,CONST,OUT)
USE %(proc_prefix)sPOLYNOMIAL_CONSTANTS
INTEGER I,J
%(coef_format)s WL(MAXLWFSIZE,0:LOOPMAXCOEFS-1,MAXLWFSIZE)
INTEGER R,LCUT_SIZE
%(coef_format)s CONST
%(coef_format)s OUT(0:LOOPMAXCOEFS-1)
"""%self.rep_dict)
# Now scan them all progressively
lines.append("DO I=1,LCUT_SIZE")
lines.append(" DO J=0,NCOEF_R(R)-1")
lines.append(" OUT(J)=OUT(J)+WL(I,J,I)*CONST")
lines.append(" ENDDO")
lines.append("ENDDO")
lines.append("END")
return '\n'.join(lines)
def write_add_coefs(self):
""" Give out the subroutine to simply add together the coefficients
of two loop polynomials of rank R1 and R2 storing the result in the
first polynomial given in the arguments."""
lines=[]
# Start by writing out the header:
lines.append("""SUBROUTINE %(sub_prefix)sADD_COEFS(A,RA,B,RB)
USE %(proc_prefix)sPOLYNOMIAL_CONSTANTS
INTEGER I
%(coef_format)s A(0:LOOPMAXCOEFS-1),B(0:LOOPMAXCOEFS-1)
INTEGER RA,RB
"""%self.rep_dict)
# Now scan them all progressively
lines.append("DO I=0,NCOEF_R(RB)-1")
lines.append(" A(I)=A(I)+B(I)")
lines.append("ENDDO")
lines.append("END")
return '\n'.join(lines)
class FromIREGIFortranCodeGenerator():
""" Back up of the class Polynomial, which uses the same coefficeints orders with IREGI.
It is useful in the case that the order of MadLoop coefficients changes in the future."""
def __init__(self, rank):
assert rank > -1, "The rank of a q-polynomial should be 0 or positive"
self.rank=rank
self.init_coef_list()
def init_coef_list(self):
""" Creates a list whose elements are arrays being the coefficient
indices sorted in growing order and the value is their position in a
one-dimensional vector. For example the position of the coefficient
C_01032 will be placed in the list under array.array('i',(0,0,1,3,2)).
"""
self.coef_list=[]
self.coef_list.append(array.array('i',()))
if self.rank==0:
return
tmp_coef_list=[array.array('i',(0,)),array.array('i',(1,)),
array.array('i',(2,)),array.array('i',(3,))]
self.coef_list.extend(tmp_coef_list)
for i in range(1,self.rank):
new_tmp_coef_list=[]
for coef in tmp_coef_list:
for val in range(coef[-1],4):
new_coef=copy.copy(coef)
new_coef.append(val)
new_tmp_coef_list.append(new_coef)
tmp_coef_list=new_tmp_coef_list
self.coef_list.extend(tmp_coef_list)
def get_coef_position(self, indices_list):
""" Returns the canonical position for a coefficient characterized
by the value of the indices of the loop momentum q it multiplies,
that is for example C_01032 multiplying q_0*q_1*q_0*q_3*q_2 """
new_indices_list=copy.copy(indices_list)
new_indices_list.sort()
try:
return self.coef_list.index(array.array('i',new_indices_list))
except ValueError:
raise PolynomialError,\
"The index %s looked for could not be found"%str(indices_list)
def get_coef_at_position(self, pos):
""" Returns the coefficient at position pos in the one dimensional
vector """
return list(self.coef_list[pos])
class FromGolem95FortranCodeGenerator():
""" Just a container class with helper functions taken from the script
tens.py of golem which generates most of the golem95 tens_rec.f fortran
code."""
PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397, 401, 409,
419, 421, 431, 433, 439, 443, 449, 457, 461, 463,
467, 479, 487, 491, 499, 503, 509, 521, 523, 541,
547, 557, 563, 569, 571, 577, 587, 593, 599, 601,
607, 613, 617, 619, 631, 641, 643, 647, 653, 659,
661, 673, 677, 683, 691, 701, 709, 719, 727, 733,
739, 743, 751, 757, 761, 769, 773, 787, 797, 809,
811, 821, 823, 827, 829, 839, 853, 857, 859, 863,
877, 881, 883, 887, 907, 911, 919, 929, 937, 941,
947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013,
1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069,
1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151,
1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,
1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291,
1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373]
@classmethod
def combinat(cls, n, k):
"""
Calculates the binomial coefficient (n atop k).
"""
if k < 0 or k > n:
return 0
else:
num = 1
den = 1
for i in range(1, k+1):
num *= n-i+1
den *= i
return num/den
@classmethod
def generate_mapping(cls, R, k):
"""
Generates a mapping from tensor components \hat{C}(a_1, ..., a_k)
into a one dimensional array.
PARAMETER
R -- rank
k -- number of non-zero components of q
RETURN
(lst, dic)
lst -- list of (a_1, ..., a_k)
dic -- mapping from (a_1, ..., a_k) -> int
lst[dic[X]] = X if X in dic
"""
def rec_generator(k, R):
if k == 0:
yield []
elif k <= R:
for a_1 in range(1, R - (k - 1) + 1):
if k > 1:
for tail in rec_generator(k - 1, R - a_1):
yield [a_1] + tail
else:
yield [a_1]
lst = []
dic = {}
i = 0
for indices in rec_generator(k, R):
t = tuple(indices)
lst.append(t)
dic[t] = i
i += 1
assert i == cls.combinat(R, k), \
"len(%s) != %d, R=%d,k=%d" % (lst,cls.combinat(R, k),R,k)
return lst, dic
@classmethod
def generate_equations(cls, R, k):
"""
Generates a set of equations for a given number of non-zero
components and fixed maximum rank.
PARAMETER
R -- rank
k -- number of non-zero components of q
RETURN
(LHS, RHS)
LHS -- a matrix (i.e. list of lists) of coefficients
RHS -- a list of values of q
"""
lst, dic = cls.generate_mapping(R, k)
l = len(lst)
LHS = []
RHS = []
for num_eq in range(l):
q = map(lambda i: cls.PRIMES[i], lst[num_eq])
coeffs = [
reduce(lambda x,y: x*y, map(lambda (b,e): b**e, zip(q, term)), 1)
for term in lst]
LHS.append(coeffs)
RHS.append(q)
return LHS, RHS, lst, dic
@classmethod
def select(cls, items, k):
"""
Iterator over all selections of k elements from a given list.
PARAMETER
items -- list of elements to choose from (no repetitions)
k -- number of elements to select.
"""
n = len(items)
# We use the fact that
# (n choose k) = (1 choose 1)(n-1 choose k-1)+(1 choose 0)(n-1 choose k)
if k == n:
yield items[:]
elif k == 0:
yield []
elif 0 < k and k < n:
head = items[0:1]
tail = items[1:]
for result in cls.select(tail, k-1):
yield head + result
for result in cls.select(tail, k):
yield result
if __name__ == '__main__':
"""I test here the write_golem95_mapping function"""
P=Polynomial(7)
print "Coef (6,0,0,0) is at pos %s"%P.get_coef_position([0,0,0,0,0,0])
print "Coef (1,1,2,2) is at pos %s"%P.get_coef_position([0,1,2,2,3,3])
print "Coef (7,0,0,0) is at pos %s"%P.get_coef_position([0,0,0,0,0,0,0])
print "Coef (1,2,2,2) is at pos %s"%P.get_coef_position([0,1,1,2,2,3,3])
sys.exit(0)
max_rank=6
FPR=FortranPolynomialRoutines(max_rank)
print "Output of write_golem95_mapping function for max_rank=%d:\n\n"%max_rank
import os
import sys
root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0]
sys.path.insert(0, os.path.join(root_path,os.path.pardir))
import madgraph.iolibs.file_writers as writers
FWriter = writers.FortranWriter("GOLEM95_interface.f")
FWriter.writelines(FPR.write_golem95_mapping())
| 39.608836 | 99 | 0.543216 |
c735ebc57e704f5448afbf68d070a01454572f41 | 12,321 | py | Python | homeassistant/components/apple_tv/__init__.py | ekos2001/core | 6019bcf9d140458dba3b7eb39d99e9e2d4f2354a | [
"Apache-2.0"
] | 1 | 2017-05-30T22:21:05.000Z | 2017-05-30T22:21:05.000Z | homeassistant/components/apple_tv/__init__.py | ekos2001/core | 6019bcf9d140458dba3b7eb39d99e9e2d4f2354a | [
"Apache-2.0"
] | 51 | 2020-10-14T01:19:07.000Z | 2022-03-31T06:02:48.000Z | homeassistant/components/apple_tv/__init__.py | foxy82/home-assistant | 79ebe930e31a91967928a5642c98e58b522109b0 | [
"Apache-2.0"
] | 1 | 2021-08-16T02:53:15.000Z | 2021-08-16T02:53:15.000Z | """The Apple TV integration."""
import asyncio
import logging
from random import randrange
from pyatv import connect, exceptions, scan
from pyatv.const import Protocol
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.remote import DOMAIN as REMOTE_DOMAIN
from homeassistant.const import (
CONF_ADDRESS,
CONF_NAME,
CONF_PROTOCOL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from .const import CONF_CREDENTIALS, CONF_IDENTIFIER, CONF_START_OFF, DOMAIN
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Apple TV"
BACKOFF_TIME_UPPER_LIMIT = 300 # Five minutes
NOTIFICATION_TITLE = "Apple TV Notification"
NOTIFICATION_ID = "apple_tv_notification"
SOURCE_REAUTH = "reauth"
SIGNAL_CONNECTED = "apple_tv_connected"
SIGNAL_DISCONNECTED = "apple_tv_disconnected"
PLATFORMS = [MP_DOMAIN, REMOTE_DOMAIN]
async def async_setup(hass, config):
"""Set up the Apple TV integration."""
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry for Apple TV."""
manager = AppleTVManager(hass, entry)
hass.data.setdefault(DOMAIN, {})[entry.unique_id] = manager
async def on_hass_stop(event):
"""Stop push updates when hass stops."""
await manager.disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_hass_stop)
async def setup_platforms():
"""Set up platforms and initiate connection."""
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
]
)
await manager.init()
hass.async_create_task(setup_platforms())
return True
async def async_unload_entry(hass, entry):
"""Unload an Apple TV config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
manager = hass.data[DOMAIN].pop(entry.unique_id)
await manager.disconnect()
return unload_ok
class AppleTVEntity(Entity):
"""Device that sends commands to an Apple TV."""
def __init__(self, name, identifier, manager):
"""Initialize device."""
self.atv = None
self.manager = manager
self._name = name
self._identifier = identifier
async def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
@callback
def _async_connected(atv):
"""Handle that a connection was made to a device."""
self.atv = atv
self.async_device_connected(atv)
self.async_write_ha_state()
@callback
def _async_disconnected():
"""Handle that a connection to a device was lost."""
self.async_device_disconnected()
self.atv = None
self.async_write_ha_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass, f"{SIGNAL_CONNECTED}_{self._identifier}", _async_connected
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_DISCONNECTED}_{self._identifier}",
_async_disconnected,
)
)
def async_device_connected(self, atv):
"""Handle when connection is made to device."""
def async_device_disconnected(self):
"""Handle when connection was lost to device."""
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._identifier
@property
def should_poll(self):
"""No polling needed for Apple TV."""
return False
class AppleTVManager:
"""Connection and power manager for an Apple TV.
An instance is used per device to share the same power state between
several platforms. It also manages scanning and connection establishment
in case of problems.
"""
def __init__(self, hass, config_entry):
"""Initialize power manager."""
self.config_entry = config_entry
self.hass = hass
self.atv = None
self._is_on = not config_entry.options.get(CONF_START_OFF, False)
self._connection_attempts = 0
self._connection_was_lost = False
self._task = None
async def init(self):
"""Initialize power management."""
if self._is_on:
await self.connect()
def connection_lost(self, _):
"""Device was unexpectedly disconnected.
This is a callback function from pyatv.interface.DeviceListener.
"""
_LOGGER.warning(
'Connection lost to Apple TV "%s"', self.config_entry.data.get(CONF_NAME)
)
self._connection_was_lost = True
self._handle_disconnect()
def connection_closed(self):
"""Device connection was (intentionally) closed.
This is a callback function from pyatv.interface.DeviceListener.
"""
self._handle_disconnect()
def _handle_disconnect(self):
"""Handle that the device disconnected and restart connect loop."""
if self.atv:
self.atv.listener = None
self.atv.close()
self.atv = None
self._dispatch_send(SIGNAL_DISCONNECTED)
self._start_connect_loop()
async def connect(self):
"""Connect to device."""
self._is_on = True
self._start_connect_loop()
async def disconnect(self):
"""Disconnect from device."""
_LOGGER.debug("Disconnecting from device")
self._is_on = False
try:
if self.atv:
self.atv.push_updater.listener = None
self.atv.push_updater.stop()
self.atv.close()
self.atv = None
if self._task:
self._task.cancel()
self._task = None
except Exception: # pylint: disable=broad-except
_LOGGER.exception("An error occurred while disconnecting")
def _start_connect_loop(self):
"""Start background connect loop to device."""
if not self._task and self.atv is None and self._is_on:
self._task = asyncio.create_task(self._connect_loop())
else:
_LOGGER.debug(
"Not starting connect loop (%s, %s)", self.atv is None, self._is_on
)
async def _connect_loop(self):
"""Connect loop background task function."""
_LOGGER.debug("Starting connect loop")
# Try to find device and connect as long as the user has said that
# we are allowed to connect and we are not already connected.
while self._is_on and self.atv is None:
try:
conf = await self._scan()
if conf:
await self._connect(conf)
except exceptions.AuthenticationError:
self._auth_problem()
break
except asyncio.CancelledError:
pass
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to connect")
self.atv = None
if self.atv is None:
self._connection_attempts += 1
backoff = min(
randrange(2 ** self._connection_attempts), BACKOFF_TIME_UPPER_LIMIT
)
_LOGGER.debug("Reconnecting in %d seconds", backoff)
await asyncio.sleep(backoff)
_LOGGER.debug("Connect loop ended")
self._task = None
def _auth_problem(self):
"""Problem to authenticate occurred that needs intervention."""
_LOGGER.debug("Authentication error, reconfigure integration")
name = self.config_entry.data.get(CONF_NAME)
identifier = self.config_entry.unique_id
self.hass.components.persistent_notification.create(
"An irrecoverable connection problem occurred when connecting to "
f"`f{name}`. Please go to the Integrations page and reconfigure it",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
# Add to event queue as this function is called from a task being
# cancelled from disconnect
asyncio.create_task(self.disconnect())
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={CONF_NAME: name, CONF_IDENTIFIER: identifier},
)
)
async def _scan(self):
"""Try to find device by scanning for it."""
identifier = self.config_entry.unique_id
address = self.config_entry.data[CONF_ADDRESS]
protocol = Protocol(self.config_entry.data[CONF_PROTOCOL])
_LOGGER.debug("Discovering device %s", identifier)
atvs = await scan(
self.hass.loop, identifier=identifier, protocol=protocol, hosts=[address]
)
if atvs:
return atvs[0]
_LOGGER.debug(
"Failed to find device %s with address %s, trying to scan",
identifier,
address,
)
atvs = await scan(self.hass.loop, identifier=identifier, protocol=protocol)
if atvs:
return atvs[0]
_LOGGER.debug("Failed to find device %s, trying later", identifier)
return None
async def _connect(self, conf):
"""Connect to device."""
credentials = self.config_entry.data[CONF_CREDENTIALS]
session = async_get_clientsession(self.hass)
for protocol, creds in credentials.items():
conf.set_credentials(Protocol(int(protocol)), creds)
_LOGGER.debug("Connecting to device %s", self.config_entry.data[CONF_NAME])
self.atv = await connect(conf, self.hass.loop, session=session)
self.atv.listener = self
self._dispatch_send(SIGNAL_CONNECTED, self.atv)
self._address_updated(str(conf.address))
await self._async_setup_device_registry()
self._connection_attempts = 0
if self._connection_was_lost:
_LOGGER.info(
'Connection was re-established to Apple TV "%s"', self.atv.service.name
)
self._connection_was_lost = False
async def _async_setup_device_registry(self):
attrs = {
"identifiers": {(DOMAIN, self.config_entry.unique_id)},
"manufacturer": "Apple",
"name": self.config_entry.data[CONF_NAME],
}
if self.atv:
dev_info = self.atv.device_info
attrs["model"] = "Apple TV " + dev_info.model.name.replace("Gen", "")
attrs["sw_version"] = dev_info.version
if dev_info.mac:
attrs["connections"] = {(dr.CONNECTION_NETWORK_MAC, dev_info.mac)}
device_registry = await dr.async_get_registry(self.hass)
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id, **attrs
)
@property
def is_connecting(self):
"""Return true if connection is in progress."""
return self._task is not None
def _address_updated(self, address):
"""Update cached address in config entry."""
_LOGGER.debug("Changing address to %s", address)
self.hass.config_entries.async_update_entry(
self.config_entry, data={**self.config_entry.data, CONF_ADDRESS: address}
)
def _dispatch_send(self, signal, *args):
"""Dispatch a signal to all entities managed by this manager."""
async_dispatcher_send(
self.hass, f"{signal}_{self.config_entry.unique_id}", *args
)
| 32.253927 | 87 | 0.625355 |
a96eea54036e159e2a129d3d5c3ca3f59bd59c1a | 8,051 | py | Python | visualizer/show3d_balls.py | KaidongLi/pytorch-LatticePointClassifier | 5c00bb0f808a928ea57acb8a79364d62eb955cee | [
"MIT"
] | null | null | null | visualizer/show3d_balls.py | KaidongLi/pytorch-LatticePointClassifier | 5c00bb0f808a928ea57acb8a79364d62eb955cee | [
"MIT"
] | null | null | null | visualizer/show3d_balls.py | KaidongLi/pytorch-LatticePointClassifier | 5c00bb0f808a928ea57acb8a79364d62eb955cee | [
"MIT"
] | 1 | 2022-03-29T21:03:38.000Z | 2022-03-29T21:03:38.000Z | import numpy as np
import ctypes as ct
import cv2
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
showsz = 800
mousex, mousey = 0.5, 0.5
zoom = 1.0
changed = True
def onmouse(*args):
global mousex, mousey, changed
y = args[1]
x = args[2]
mousex = x / float(showsz)
mousey = y / float(showsz)
changed = True
cv2.namedWindow('show3d')
cv2.moveWindow('show3d', 0, 0)
cv2.setMouseCallback('show3d', onmouse)
dll = np.ctypeslib.load_library(os.path.join(BASE_DIR, 'render_balls_so'), '.')
def showpoints(xyz, c_gt=None, c_pred=None, waittime=0, showrot=False, magnifyBlue=0, freezerot=False,
background=(0, 0, 0), normalizecolor=True, ballradius=10):
global showsz, mousex, mousey, zoom, changed
xyz = xyz - xyz.mean(axis=0)
radius = ((xyz ** 2).sum(axis=-1) ** 0.5).max()
xyz /= (radius * 2.2) / showsz
if c_gt is None:
c0 = np.zeros((len(xyz),), dtype='float32') + 255
c1 = np.zeros((len(xyz),), dtype='float32') + 255
c2 = np.zeros((len(xyz),), dtype='float32') + 255
else:
c0 = c_gt[:, 0]
c1 = c_gt[:, 1]
c2 = c_gt[:, 2]
if normalizecolor:
c0 /= (c0.max() + 1e-14) / 255.0
c1 /= (c1.max() + 1e-14) / 255.0
c2 /= (c2.max() + 1e-14) / 255.0
c0 = np.require(c0, 'float32', 'C')
c1 = np.require(c1, 'float32', 'C')
c2 = np.require(c2, 'float32', 'C')
show = np.zeros((showsz, showsz, 3), dtype='uint8')
def render():
rotmat = np.eye(3)
if not freezerot:
xangle = (mousey - 0.5) * np.pi * 1.2
else:
xangle = 0
rotmat = rotmat.dot(np.array([
[1.0, 0.0, 0.0],
[0.0, np.cos(xangle), -np.sin(xangle)],
[0.0, np.sin(xangle), np.cos(xangle)],
]))
if not freezerot:
yangle = (mousex - 0.5) * np.pi * 1.2
else:
yangle = 0
rotmat = rotmat.dot(np.array([
[np.cos(yangle), 0.0, -np.sin(yangle)],
[0.0, 1.0, 0.0],
[np.sin(yangle), 0.0, np.cos(yangle)],
]))
rotmat *= zoom
nxyz = xyz.dot(rotmat) + [showsz / 2, showsz / 2, 0]
ixyz = nxyz.astype('int32')
show[:] = background
dll.render_ball(
ct.c_int(show.shape[0]),
ct.c_int(show.shape[1]),
show.ctypes.data_as(ct.c_void_p),
ct.c_int(ixyz.shape[0]),
ixyz.ctypes.data_as(ct.c_void_p),
c0.ctypes.data_as(ct.c_void_p),
c1.ctypes.data_as(ct.c_void_p),
c2.ctypes.data_as(ct.c_void_p),
ct.c_int(ballradius)
)
if magnifyBlue > 0:
show[:, :, 0] = np.maximum(show[:, :, 0], np.roll(show[:, :, 0], 1, axis=0))
if magnifyBlue >= 2:
show[:, :, 0] = np.maximum(show[:, :, 0], np.roll(show[:, :, 0], -1, axis=0))
show[:, :, 0] = np.maximum(show[:, :, 0], np.roll(show[:, :, 0], 1, axis=1))
if magnifyBlue >= 2:
show[:, :, 0] = np.maximum(show[:, :, 0], np.roll(show[:, :, 0], -1, axis=1))
if showrot:
cv2.putText(show, 'xangle %d' % (int(xangle / np.pi * 180)), (30, showsz - 30), 0, 0.5,
cv2.cv.CV_RGB(255, 0, 0))
cv2.putText(show, 'yangle %d' % (int(yangle / np.pi * 180)), (30, showsz - 50), 0, 0.5,
cv2.cv.CV_RGB(255, 0, 0))
cv2.putText(show, 'zoom %d%%' % (int(zoom * 100)), (30, showsz - 70), 0, 0.5, cv2.cv.CV_RGB(255, 0, 0))
changed = True
while True:
if changed:
render()
changed = False
cv2.imshow('show3d', show)
if waittime == 0:
cmd = cv2.waitKey(10) % 256
else:
cmd = cv2.waitKey(waittime) % 256
if cmd == ord('q'):
break
elif cmd == ord('Q'):
sys.exit(0)
if cmd == ord('t') or cmd == ord('p'):
if cmd == ord('t'):
if c_gt is None:
c0 = np.zeros((len(xyz),), dtype='float32') + 255
c1 = np.zeros((len(xyz),), dtype='float32') + 255
c2 = np.zeros((len(xyz),), dtype='float32') + 255
else:
c0 = c_gt[:, 0]
c1 = c_gt[:, 1]
c2 = c_gt[:, 2]
else:
if c_pred is None:
c0 = np.zeros((len(xyz),), dtype='float32') + 255
c1 = np.zeros((len(xyz),), dtype='float32') + 255
c2 = np.zeros((len(xyz),), dtype='float32') + 255
else:
c0 = c_pred[:, 0]
c1 = c_pred[:, 1]
c2 = c_pred[:, 2]
if normalizecolor:
c0 /= (c0.max() + 1e-14) / 255.0
c1 /= (c1.max() + 1e-14) / 255.0
c2 /= (c2.max() + 1e-14) / 255.0
c0 = np.require(c0, 'float32', 'C')
c1 = np.require(c1, 'float32', 'C')
c2 = np.require(c2, 'float32', 'C')
changed = True
if cmd == ord('n'):
zoom *= 1.1
changed = True
elif cmd == ord('m'):
zoom /= 1.1
changed = True
elif cmd == ord('r'):
zoom = 1.0
changed = True
elif cmd == ord('s'):
cv2.imwrite('show3d.png', show)
if waittime != 0:
break
return cmd
if __name__ == '__main__':
import os
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='../data/shapenet', help='dataset path')
parser.add_argument('--category', type=str, default='Airplane', help='select category')
parser.add_argument('--npoints', type=int, default=2500, help='resample points number')
parser.add_argument('--ballradius', type=int, default=10, help='ballradius')
opt = parser.parse_args()
'''
Airplane 02691156
Bag 02773838
Cap 02954340
Car 02958343
Chair 03001627
Earphone 03261776
Guitar 03467517
Knife 03624134
Lamp 03636649
Laptop 03642806
Motorbike 03790512
Mug 03797390
Pistol 03948459
Rocket 04099429
Skateboard 04225987
Table 04379243'''
cmap = np.array([[1.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[3.12493437e-02, 1.00000000e+00, 1.31250131e-06],
[0.00000000e+00, 6.25019688e-02, 1.00000000e+00],
[1.00000000e+00, 0.00000000e+00, 9.37500000e-02],
[1.00000000e+00, 0.00000000e+00, 9.37500000e-02],
[1.00000000e+00, 0.00000000e+00, 9.37500000e-02],
[1.00000000e+00, 0.00000000e+00, 9.37500000e-02],
[1.00000000e+00, 0.00000000e+00, 9.37500000e-02],
[1.00000000e+00, 0.00000000e+00, 9.37500000e-02],
[1.00000000e+00, 0.00000000e+00, 9.37500000e-02]])
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'data_utils'))
from ShapeNetDataLoader import PartNormalDataset
root = '../data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'
dataset = PartNormalDataset(root = root, npoints=2048, split='test', normal_channel=False)
idx = np.random.randint(0, len(dataset))
data = dataset[idx]
point_set, _, seg = data
choice = np.random.choice(point_set.shape[0], opt.npoints, replace=True)
point_set, seg = point_set[choice, :], seg[choice]
seg = seg - seg.min()
gt = cmap[seg, :]
pred = cmap[seg, :]
showpoints(point_set, gt, c_pred=pred, waittime=0, showrot=False, magnifyBlue=0, freezerot=False,
background=(255, 255, 255), normalizecolor=True, ballradius=opt.ballradius)
| 35.782222 | 115 | 0.510744 |
ae3174f1d33b7a3d7536ca313e3f58e194dfba21 | 3,005 | py | Python | tests/lib/git_submodule_helpers.py | merwok-forks/pip | b99065a6363a85e206218be24be02fe592d39d64 | [
"MIT"
] | 1 | 2019-06-27T11:57:35.000Z | 2019-06-27T11:57:35.000Z | tests/lib/git_submodule_helpers.py | merwok-forks/pip | b99065a6363a85e206218be24be02fe592d39d64 | [
"MIT"
] | null | null | null | tests/lib/git_submodule_helpers.py | merwok-forks/pip | b99065a6363a85e206218be24be02fe592d39d64 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import textwrap
def _create_test_package_submodule(env):
env.scratch_path.join("version_pkg_submodule").mkdir()
submodule_path = env.scratch_path / 'version_pkg_submodule'
env.run('touch', 'testfile', cwd=submodule_path)
env.run('git', 'init', cwd=submodule_path)
env.run('git', 'add', '.', cwd=submodule_path)
env.run('git', 'commit', '-q',
'--author', 'Pip <python-virtualenv@googlegroups.com>',
'-am', 'initial version / submodule', cwd=submodule_path)
return submodule_path
def _change_test_package_submodule(env, submodule_path):
submodule_path.join("testfile").write("this is a changed file")
submodule_path.join("testfile2").write("this is an added file")
env.run('git', 'add', '.', cwd=submodule_path)
env.run('git', 'commit', '-q',
'--author', 'Pip <python-virtualenv@googlegroups.com>',
'-am', 'submodule change', cwd=submodule_path)
def _pull_in_submodule_changes_to_module(env, module_path):
env.run(
'git',
'pull',
'-q',
'origin',
'master',
cwd=module_path / 'testpkg/static/',
)
env.run('git', 'commit', '-q',
'--author', 'Pip <python-virtualenv@googlegroups.com>',
'-am', 'submodule change', cwd=module_path)
def _create_test_package_with_submodule(env):
env.scratch_path.join("version_pkg").mkdir()
version_pkg_path = env.scratch_path / 'version_pkg'
version_pkg_path.join("testpkg").mkdir()
pkg_path = version_pkg_path / 'testpkg'
pkg_path.join("__init__.py").write("# hello there")
pkg_path.join("version_pkg.py").write(textwrap.dedent('''\
def main():
print('0.1')
'''))
version_pkg_path.join("setup.py").write(textwrap.dedent('''\
from setuptools import setup, find_packages
setup(name='version_pkg',
version='0.1',
packages=find_packages(),
)
'''))
env.run('git', 'init', cwd=version_pkg_path, expect_error=True)
env.run('git', 'add', '.', cwd=version_pkg_path, expect_error=True)
env.run('git', 'commit', '-q',
'--author', 'Pip <python-virtualenv@googlegroups.com>',
'-am', 'initial version', cwd=version_pkg_path,
expect_error=True)
submodule_path = _create_test_package_submodule(env)
env.run(
'git',
'submodule',
'add',
submodule_path,
'testpkg/static',
cwd=version_pkg_path,
expect_error=True,
)
env.run('git', 'commit', '-q',
'--author', 'Pip <python-virtualenv@googlegroups.com>',
'-am', 'initial version w submodule', cwd=version_pkg_path,
expect_error=True)
return version_pkg_path, submodule_path
| 36.204819 | 71 | 0.581697 |
b6f7958b57ccdd1da7fbb06506f1f421fc48bdbc | 7,358 | py | Python | utils/py27/Lib/ctypes/test/test_pointers.py | xahmol/8bit-Unity | b4f3bee00e012ca1755afba550a5270dce0a1054 | [
"BSD-2-Clause"
] | 42 | 2018-12-12T01:00:59.000Z | 2022-03-27T07:32:29.000Z | utils/py27/Lib/ctypes/test/test_pointers.py | xahmol/8bit-Unity | b4f3bee00e012ca1755afba550a5270dce0a1054 | [
"BSD-2-Clause"
] | 13 | 2020-11-06T13:50:45.000Z | 2022-01-25T07:17:37.000Z | utils/py27/Lib/ctypes/test/test_pointers.py | xahmol/8bit-Unity | b4f3bee00e012ca1755afba550a5270dce0a1054 | [
"BSD-2-Clause"
] | 8 | 2020-11-14T04:30:26.000Z | 2021-01-16T17:55:19.000Z | import unittest, sys
from ctypes import *
import _ctypes_test
ctype_types = [c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint,
c_long, c_ulong, c_longlong, c_ulonglong, c_double, c_float]
python_types = [int, int, int, int, int, long,
int, long, long, long, float, float]
class PointersTestCase(unittest.TestCase):
def test_pointer_crash(self):
class A(POINTER(c_ulong)):
pass
POINTER(c_ulong)(c_ulong(22))
# Pointer can't set contents: has no _type_
self.assertRaises(TypeError, A, c_ulong(33))
def test_pass_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
func.restype = c_long
i = c_int(12345678)
## func.argtypes = (POINTER(c_int),)
address = func(byref(i))
self.assertEqual(c_int.from_address(address).value, 12345678)
func.restype = POINTER(c_int)
res = func(pointer(i))
self.assertEqual(res.contents.value, 12345678)
self.assertEqual(res[0], 12345678)
def test_change_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
i = c_int(87654)
func.restype = POINTER(c_int)
func.argtypes = (POINTER(c_int),)
res = func(pointer(i))
self.assertEqual(res[0], 87654)
self.assertEqual(res.contents.value, 87654)
# C code: *res = 54345
res[0] = 54345
self.assertEqual(i.value, 54345)
# C code:
# int x = 12321;
# res = &x
x = c_int(12321)
res.contents = x
self.assertEqual(i.value, 54345)
x.value = -99
self.assertEqual(res.contents.value, -99)
def test_callbacks_with_pointers(self):
# a function type receiving a pointer
PROTOTYPE = CFUNCTYPE(c_int, POINTER(c_int))
self.result = []
def func(arg):
for i in range(10):
## print arg[i],
self.result.append(arg[i])
## print
return 0
callback = PROTOTYPE(func)
dll = CDLL(_ctypes_test.__file__)
# This function expects a function pointer,
# and calls this with an integer pointer as parameter.
# The int pointer points to a table containing the numbers 1..10
doit = dll._testfunc_callback_with_pointer
## i = c_int(42)
## callback(byref(i))
## self.assertEqual(i.value, 84)
doit(callback)
## print self.result
doit(callback)
## print self.result
def test_basics(self):
from operator import delitem
for ct, pt in zip(ctype_types, python_types):
i = ct(42)
p = pointer(i)
## print type(p.contents), ct
self.assertIs(type(p.contents), ct)
# p.contents is the same as p[0]
## print p.contents
## self.assertEqual(p.contents, 42)
## self.assertEqual(p[0], 42)
self.assertRaises(TypeError, delitem, p, 0)
def test_from_address(self):
from array import array
a = array('i', [100, 200, 300, 400, 500])
addr = a.buffer_info()[0]
p = POINTER(POINTER(c_int))
## print dir(p)
## print p.from_address
## print p.from_address(addr)[0][0]
def test_other(self):
class Table(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int)]
pt = pointer(Table(1, 2, 3))
self.assertEqual(pt.contents.a, 1)
self.assertEqual(pt.contents.b, 2)
self.assertEqual(pt.contents.c, 3)
pt.contents.c = 33
from ctypes import _pointer_type_cache
del _pointer_type_cache[Table]
def test_basic(self):
p = pointer(c_int(42))
# Although a pointer can be indexed, it has no length
self.assertRaises(TypeError, len, p)
self.assertEqual(p[0], 42)
self.assertEqual(p[0:1], [42])
self.assertEqual(p.contents.value, 42)
def test_charpp(self):
"""Test that a character pointer-to-pointer is correctly passed"""
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_c_p_p
func.restype = c_char_p
argv = (c_char_p * 2)()
argc = c_int( 2 )
argv[0] = 'hello'
argv[1] = 'world'
result = func( byref(argc), argv )
assert result == 'world', result
def test_bug_1467852(self):
# http://sourceforge.net/tracker/?func=detail&atid=532154&aid=1467852&group_id=71702
x = c_int(5)
dummy = []
for i in range(32000):
dummy.append(c_int(i))
y = c_int(6)
p = pointer(x)
pp = pointer(p)
q = pointer(y)
pp[0] = q # <==
self.assertEqual(p[0], 6)
def test_c_void_p(self):
# http://sourceforge.net/tracker/?func=detail&aid=1518190&group_id=5470&atid=105470
if sizeof(c_void_p) == 4:
self.assertEqual(c_void_p(0xFFFFFFFFL).value,
c_void_p(-1).value)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFFL).value,
c_void_p(-1).value)
elif sizeof(c_void_p) == 8:
self.assertEqual(c_void_p(0xFFFFFFFFL).value,
0xFFFFFFFFL)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFFL).value,
c_void_p(-1).value)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFFFFFFFFFFL).value,
c_void_p(-1).value)
self.assertRaises(TypeError, c_void_p, 3.14) # make sure floats are NOT accepted
self.assertRaises(TypeError, c_void_p, object()) # nor other objects
def test_pointers_bool(self):
# NULL pointers have a boolean False value, non-NULL pointers True.
self.assertEqual(bool(POINTER(c_int)()), False)
self.assertEqual(bool(pointer(c_int())), True)
self.assertEqual(bool(CFUNCTYPE(None)(0)), False)
self.assertEqual(bool(CFUNCTYPE(None)(42)), True)
# COM methods are boolean True:
if sys.platform == "win32":
mth = WINFUNCTYPE(None)(42, "name", (), None)
self.assertEqual(bool(mth), True)
def test_pointer_type_name(self):
LargeNamedType = type('T' * 2 ** 25, (Structure,), {})
self.assertTrue(POINTER(LargeNamedType))
# to not leak references, we must clean _pointer_type_cache
from ctypes import _pointer_type_cache
del _pointer_type_cache[LargeNamedType]
def test_pointer_type_str_name(self):
large_string = 'T' * 2 ** 25
P = POINTER(large_string)
self.assertTrue(P)
# to not leak references, we must clean _pointer_type_cache
from ctypes import _pointer_type_cache
del _pointer_type_cache[id(P)]
def test_abstract(self):
from ctypes import _Pointer
self.assertRaises(TypeError, _Pointer.set_type, 42)
if __name__ == '__main__':
unittest.main()
| 33.294118 | 93 | 0.56374 |
8b0c1171fb90aabb892aa298324906eb6ba7cee6 | 33,736 | py | Python | test/functional/rpc_fundrawtransaction.py | XziimP/bitgesell | cdf1295f44e840e5603b22f2c2cdfec9572c7bcf | [
"MIT"
] | 1 | 2020-10-13T15:59:46.000Z | 2020-10-13T15:59:46.000Z | test/functional/rpc_fundrawtransaction.py | XziimP/bitgesell | cdf1295f44e840e5603b22f2c2cdfec9572c7bcf | [
"MIT"
] | 2 | 2020-12-03T12:20:22.000Z | 2021-01-27T07:26:34.000Z | test/functional/rpc_fundrawtransaction.py | XziimP/bitgesell | cdf1295f44e840e5603b22f2c2cdfec9572c7bcf | [
"MIT"
] | 1 | 2020-11-10T21:05:32.000Z | 2020-11-10T21:05:32.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BGLTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BGLTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# This test isn't testing tx relay. Set whitelist on the peers for
# instant tx relay.
self.extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
def run_test(self):
self.log.info("Connect nodes, set fees, generate blocks, and sync")
self.min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(self.min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
self.fee_tolerance = 2 * self.min_relay_tx_fee / 1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
self.test_change_position()
self.test_simple()
self.test_simple_two_coins()
self.test_simple_two_outputs()
self.test_change()
self.test_no_change()
self.test_invalid_option()
self.test_invalid_change_address()
self.test_valid_change_address()
self.test_change_type()
self.test_coin_selection()
self.test_two_vin()
self.test_two_vin_two_vout()
self.test_invalid_input()
self.test_fee_p2pkh()
self.test_fee_p2pkh_multi_out()
self.test_fee_p2sh()
self.test_fee_4of5()
self.test_spend_2of2()
self.test_locked_wallet()
self.test_many_inputs_fee()
self.test_many_inputs_send()
self.test_op_return()
self.test_watchonly()
self.test_all_watched_funds()
self.test_option_feerate()
self.test_address_reuse()
self.test_option_subtract_fee_from_outputs()
self.test_subtract_fee_with_presets()
def test_change_position(self):
"""Ensure setting changePosition in fundraw with an exact match is handled properly."""
self.log.info("Test fundrawtxn changePosition option")
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
self.watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
self.watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, self.watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
self.watchonly_vout = find_vout_for_address(self.nodes[0], self.watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": self.watchonly_txid, "vout": self.watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), self.watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
def test_simple(self):
self.log.info("Test fundrawtxn")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test that we have enough inputs
def test_simple_two_coins(self):
self.log.info("Test fundrawtxn with 2 coins")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test if we have enough inputs
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_simple_two_outputs(self):
self.log.info("Test fundrawtxn with 2 outputs")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert len(dec_tx['vin']) > 0
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_change(self):
self.log.info("Test fundrawtxn with a vin > required amount")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
self.test_no_change_fee = fee # Use the same fee for the next tx
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_no_change(self):
self.log.info("Test fundrawtxn not having a change output")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = {self.nodes[0].getnewaddress(): Decimal(5.0) - self.test_no_change_fee - self.fee_tolerance}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_invalid_option(self):
self.log.info("Test fundrawtxn with an invalid option")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
def test_invalid_change_address(self):
self.log.info("Test fundrawtxn with an invalid change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid BGL address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
def test_valid_change_address(self):
self.log.info("Test fundrawtxn with a provided change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
def test_change_type(self):
self.log.info("Test fundrawtxn with a provided change type")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
def test_coin_selection(self):
self.log.info("Test fundrawtxn with a vin < required amount")
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
def test_two_vin(self):
self.log.info("Test fundrawtxn with 2 vins")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
def test_two_vin_two_vout(self):
self.log.info("Test fundrawtxn with 2 vins and 2 vouts")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
def test_invalid_input(self):
self.log.info("Test fundrawtxn with an invalid vin")
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
def test_fee_p2pkh(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn p2pkh fee")
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2pkh_multi_out(self):
"""Compare fee of a standard pubkeyhash transaction with multiple outputs."""
self.log.info("Test fundrawtxn p2pkh fee with multiple outputs")
inputs = []
outputs = {
self.nodes[1].getnewaddress():1.1,
self.nodes[1].getnewaddress():1.2,
self.nodes[1].getnewaddress():0.1,
self.nodes[1].getnewaddress():1.3,
self.nodes[1].getnewaddress():0.2,
self.nodes[1].getnewaddress():0.3,
}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2sh(self):
"""Compare fee of a 2-of-2 multisig p2sh transaction."""
# Create 2-of-2 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_4of5(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn fee with 4-of-5 addresses")
# Create 4-of-5 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(
4,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
addr3Obj['pubkey'],
addr4Obj['pubkey'],
addr5Obj['pubkey'],
]
)['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_spend_2of2(self):
"""Spend a 2-of-2 multisig transaction over fundraw."""
self.log.info("Test fundrawtxn spending 2-of-2 multisig")
# Create 2-of-2 addr.
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(
2,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
]
)['address']
# Send 1.2 BTC to msig addr.
self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.nodes[0].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[2].sendrawtransaction(signedTx['hex'])
self.nodes[2].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
def test_locked_wallet(self):
self.log.info("Test fundrawtxn with locked wallet")
self.nodes[1].encryptwallet("test")
# Drain the keypool.
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
# Refill the keypool.
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Now we need to unlock.
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
def test_many_inputs_fee(self):
"""Multiple (~19) inputs tx test | Compare fee."""
self.log.info("Test fundrawtxn fee with many inputs")
# Empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance * 19 #~19 inputs
def test_many_inputs_send(self):
"""Multiple (~19) inputs tx test | sign/send."""
self.log.info("Test fundrawtxn sign+send with many inputs")
# Again, empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
def test_op_return(self):
self.log.info("Test fundrawtxn with OP_RETURN and no vin")
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
def test_watchonly(self):
self.log.info("Test fundrawtxn using only watchonly")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], self.watchonly_txid)
assert "fee" in result.keys()
assert_greater_than(result["changepos"], -1)
def test_all_watched_funds(self):
self.log.info("Test fundrawtxn using entirety of watched funds")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching).
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert res_dec["vin"][0]["txid"] == self.watchonly_txid or res_dec["vin"][1]["txid"] == self.watchonly_txid
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], self.watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert not signedtx["complete"]
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert signedtx["complete"]
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
def test_option_feerate(self):
self.log.info("Test fundrawtxn feeRate option")
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses self.min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10 * self.min_relay_tx_fee})
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by -maxtxfee", self.nodes[3].fundrawtransaction, rawtx, {"feeRate": 1})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
def test_address_reuse(self):
"""Test no address reuse occurs."""
self.log.info("Test fundrawtxn does not reuse addresses")
rawtx = self.nodes[3].createrawtransaction(inputs=[], outputs={self.nodes[3].getnewaddress(): 1})
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert changeaddress != ""
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool.
assert changeaddress != nextaddr
def test_option_subtract_fee_from_outputs(self):
self.log.info("Test fundrawtxn subtractFeeFromOutputs option")
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee, "subtractFeeFromOutputs": [0]}),]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# Split the fee between outputs 0, 2, and 3, but not output 1.
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction.
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions.
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# Output 1 is the same in both transactions.
assert_equal(share[1], 0)
# The other 3 outputs are smaller as a result of subtractFeeFromOutputs.
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# Outputs 2 and 3 take the same share of the fee.
assert_equal(share[2], share[3])
# Output 0 takes at least as much share of the fee, and no more than 2
# satoshis more, than outputs 2 and 3.
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# The fee is the same in both transactions.
assert_equal(result[0]['fee'], result[1]['fee'])
# The total subtracted from the outputs is equal to the fee.
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
def test_subtract_fee_with_presets(self):
self.log.info("Test fundrawtxn subtract fee from outputs with preset inputs that are sufficient")
addr = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
rawtx = self.nodes[0].createrawtransaction([{'txid': txid, 'vout': vout}], [{self.nodes[0].getnewaddress(): 5}])
fundedtx = self.nodes[0].fundrawtransaction(rawtx, {'subtractFeeFromOutputs': [0]})
signedtx = self.nodes[0].signrawtransactionwithwallet(fundedtx['hex'])
self.nodes[0].sendrawtransaction(signedtx['hex'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 44.447958 | 165 | 0.635226 |
965e9b3499c2ff518d493b6fecc004e8295d2120 | 7,809 | py | Python | tests/scripts/thread-cert/Cert_7_1_04_BorderRouterAsRouter.py | sarah-iot/openthread | 8210a02400c145eb862b65e32040ed0415ba36c2 | [
"BSD-3-Clause"
] | 69 | 2021-12-16T01:34:09.000Z | 2022-03-31T08:27:39.000Z | tests/scripts/thread-cert/Cert_7_1_04_BorderRouterAsRouter.py | xuyirio/openthread | 349041bc61e899acdccefea455c3401c05e1318b | [
"BSD-3-Clause"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | tests/scripts/thread-cert/Cert_7_1_04_BorderRouterAsRouter.py | xuyirio/openthread | 349041bc61e899acdccefea455c3401c05e1318b | [
"BSD-3-Clause"
] | 21 | 2021-12-20T09:05:45.000Z | 2022-03-28T02:52:28.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
from pktverify.consts import MLE_ADVERTISEMENT, MLE_CHILD_ID_REQUEST, MLE_DATA_RESPONSE, MLE_CHILD_ID_RESPONSE, MLE_CHILD_UPDATE_RESPONSE, MLE_CHILD_UPDATE_REQUEST, SVR_DATA_URI, SOURCE_ADDRESS_TLV, MODE_TLV, ADDRESS_REGISTRATION_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, ACTIVE_TIMESTAMP_TLV, ROUTE64_TLV
from pktverify.packet_verifier import PacketVerifier
from pktverify.addrs import Ipv6Addr
LEADER = 1
ROUTER = 2
ED2 = 3
SED2 = 4
MTDS = [SED2, ED2]
class Cert_7_1_4_BorderRouterAsRouter(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'allowlist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'allowlist': [LEADER, ED2, SED2]
},
ED2: {
'name': 'MED',
'is_mtd': True,
'mode': 'rn',
'allowlist': [ROUTER]
},
SED2: {
'name': 'SED',
'is_mtd': True,
'mode': '-',
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(config.ROUTER_STARTUP_DELAY)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED2].get_state(), 'child')
self.nodes[SED2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED2].get_state(), 'child')
self.collect_rloc16s()
self.nodes[ROUTER].add_prefix('2001:2:0:1::/64', 'paros')
self.nodes[ROUTER].add_prefix('2001:2:0:2::/64', 'paro')
self.nodes[ROUTER].register_netdata()
self.simulator.go(5)
# Set lowpan context of sniffer
self.simulator.set_lowpan_context(1, '2001:2:0:1::/64')
self.simulator.set_lowpan_context(2, '2001:2:0:2::/64')
addrs = self.nodes[ED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
addrs = self.nodes[SED2].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
ROUTER = pv.vars['ROUTER']
MED = pv.vars['MED']
SED = pv.vars['SED']
_rpkts = pkts.filter_wpan_src64(ROUTER)
# Step 3: The DUT MUST send properly formatted MLE Advertisements
# The DUT MUST send a CoAP Server Data Notification message
# with the server’s information (Prefix, Border Router) to the Leader
_rpkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next()
_rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
_pkt = _rpkts.filter_coap_request(SVR_DATA_URI).must_next()
_pkt.must_verify(lambda p: p.wpan.dst16 == pv.vars['LEADER_RLOC16'] and {
Ipv6Addr('2001:2:0:1::'), Ipv6Addr('2001:2:0:2::')
} == set(p.thread_nwd.tlv.prefix) and p.thread_nwd.tlv.border_router.flag.p == [1, 1] and p.thread_nwd.tlv.
border_router.flag.s == [1, 1] and p.thread_nwd.tlv.border_router.flag.r == [1, 1] and p.
thread_nwd.tlv.border_router.flag.o == [1, 1] and p.thread_nwd.tlv.stable == [1, 1, 0, 0])
_rpkts_med = _rpkts.copy()
_rpkts_sed = _rpkts.copy()
# Step 4: Automatically transmits a 2.04 Changed CoAP response to the DUT
# Step 5: The DUT MUST send a multicast MLE Data Response
_rpkts.filter_mle_cmd(MLE_DATA_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, ACTIVE_TIMESTAMP_TLV} == set(
p.mle.tlv.type) and {Ipv6Addr('2001:2:0:1::'), Ipv6Addr('2001:2:0:2::')} == set(
p.thread_nwd.tlv.prefix) and p.thread_nwd.tlv.border_router.flag.p == [1, 1] and p.thread_nwd.tlv.
border_router.flag.s == [1, 1] and p.thread_nwd.tlv.border_router.flag.r == [1, 1] and p.thread_nwd.tlv.
border_router.flag.o == [1, 1] and p.thread_nwd.tlv.stable == [0, 1, 1, 1, 0, 0, 0])
# Step 6: The DUT MUST send a Child Update Response to MED_1
_rpkts_med.filter_wpan_dst64(MED).filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, ADDRESS_REGISTRATION_TLV} <= set(p.mle.tlv.type))
# Step 7: The DUT MUST send an MLE Child Update Request to SED_1
_rpkts_sed.filter_wpan_dst64(SED).filter_mle_cmd(MLE_CHILD_UPDATE_REQUEST).must_next().must_verify(
lambda p: {Ipv6Addr('2001:2:0:1::')} == set(p.thread_nwd.tlv.prefix
) and p.thread_nwd.tlv.border_router_16 == [0xFFFE])
# Step 8: SED_1 send its configured global address to the DUT
# Step 9: The DUT MUST send a Child Update Response to SED_1
_sed_pkt = pkts.range(
_rpkts_sed.index).filter_wpan_src64(SED).filter_mle_cmd(MLE_CHILD_UPDATE_REQUEST).must_next()
_rpkts_sed.filter_wpan_dst64(SED).filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, ADDRESS_REGISTRATION_TLV} <= set(p.mle.tlv.type) and set(
p.mle.tlv.addr_reg_iid) < set(_sed_pkt.mle.tlv.addr_reg_iid))
if __name__ == '__main__':
unittest.main()
| 46.760479 | 303 | 0.652068 |
74e6f9d7e0561f34b295e7b42ddfc8e0c84a6df6 | 212 | py | Python | perception/mil_vision/object_classification/HOG_descriptor.py | marcgabe15/mil_common | 75fd308365f4b2754874add917e0a0151b39b924 | [
"MIT"
] | 27 | 2020-02-17T21:54:09.000Z | 2022-03-18T17:49:23.000Z | perception/mil_vision/object_classification/HOG_descriptor.py | marcgabe15/mil_common | 75fd308365f4b2754874add917e0a0151b39b924 | [
"MIT"
] | 325 | 2019-09-11T14:13:56.000Z | 2022-03-31T00:38:30.000Z | perception/mil_vision/object_classification/HOG_descriptor.py | marcgabe15/mil_common | 75fd308365f4b2754874add917e0a0151b39b924 | [
"MIT"
] | 24 | 2019-09-16T00:29:45.000Z | 2022-03-06T10:56:38.000Z | import cv2
class HOGDescriptor(object):
def __init__(self):
self.hog = cv2.HOGDescriptor((8, 8), (8, 8), (4, 4), (8, 8), 9)
def get_descriptor(self, img):
return self.hog.compute(img)
| 19.272727 | 71 | 0.599057 |
bc93dccfa20f32fa4c9cd01f54bf93dee9710710 | 1,380 | py | Python | medium/109_convert_sorted_linked_list_to_binary_search_tree.py | Sukhrobjon/leetcode | 547c200b627c774535bc22880b16d5390183aeba | [
"MIT"
] | null | null | null | medium/109_convert_sorted_linked_list_to_binary_search_tree.py | Sukhrobjon/leetcode | 547c200b627c774535bc22880b16d5390183aeba | [
"MIT"
] | null | null | null | medium/109_convert_sorted_linked_list_to_binary_search_tree.py | Sukhrobjon/leetcode | 547c200b627c774535bc22880b16d5390183aeba | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
if not head:
return head
return self.build_tree(head, tail=None)
def find_middle(self, head, tail):
"""
Find the middle node in the linked list given head and tail node
"""
slow = head
fast = head
while fast is not tail and fast.next is not tail:
# slow move one step ahead
slow = slow.next
fast = fast.next.next
if slow:
return slow
else:
return None
def build_tree(self, head, tail=None):
# base case
if head == tail:
return None
mid = self.find_middle(head, tail)
# create a node
root = TreeNode(mid.val)
# left side from the mid val
root.left = self.build_tree(head, mid)
# right side from the mid value
root.right = self.build_tree(mid.next, tail)
return root
| 25.090909 | 76 | 0.539855 |
c41121d43088a6a41cadb457f4081bc2d2db10d6 | 6,862 | py | Python | bin/dm_redcap_scan_completed.py | kyjimmy/datman | 262ead8ee6b6357a07f9142c1203e9e607f0a3ea | [
"Apache-2.0"
] | 17 | 2015-09-08T13:56:40.000Z | 2022-01-20T19:09:33.000Z | bin/dm_redcap_scan_completed.py | kyjimmy/datman | 262ead8ee6b6357a07f9142c1203e9e607f0a3ea | [
"Apache-2.0"
] | 169 | 2015-02-23T23:11:15.000Z | 2022-03-28T20:32:22.000Z | bin/dm_redcap_scan_completed.py | kyjimmy/datman | 262ead8ee6b6357a07f9142c1203e9e607f0a3ea | [
"Apache-2.0"
] | 21 | 2015-09-15T16:22:44.000Z | 2021-11-05T19:03:02.000Z | #!/usr/bin/env python
"""
Add MR comments from the Scan Completed instrument on REDCap to the database.
Usage:
dm_redcap_scan_completed.py [options] <study>
Arguments:
<study> Name of the study to process
Options:
-q --quiet Less logging
-v --verbose Verbose logging
-d --debug Debug logging
"""
import os
import sys
import requests
import logging
from docopt import docopt
import datman.config
import datman.scanid
import datman.dashboard as dashboard
logger = logging.getLogger(os.path.basename(__file__))
cfg = None
redcap_url = None
redcap_version = None
redcap_project = None
instrument = None
def read_token(token_file):
if not os.path.isfile(token_file):
logger.error('REDCap token file: {} not found'.format(token_file))
raise IOError
with open(token_file, 'r') as token_file:
token = token_file.readline().strip()
return token
def get_records(api_url, token, instrument, record_key):
payload = {'token': token,
'content': 'record',
'forms': instrument,
'format': 'json',
'type': 'flat',
'rawOrLabel': 'raw',
'fields': record_key}
response = requests.post(api_url, data=payload)
if response.status_code != 200:
raise Exception('API request failed. HTTP status code: {}. Reason: '
'{}'.format(response.status_code, response.text))
return response.json()
def get_version(api_url, token):
payload = {'token': token,
'content': 'version'}
response = requests.post(api_url, data=payload)
version = response.content
try:
version = version.decode('UTF-8')
except AttributeError:
pass
return version
def add_session_redcap(record, record_key):
record_id = record[record_key]
subject_id = record[cfg.get_key('RedcapSubj')].upper()
if not datman.scanid.is_scanid(subject_id):
subject_id = subject_id + '_01'
try:
datman.scanid.is_scanid(subject_id)
except datman.scanid.ParseException:
logger.error('Invalid session: {}, skipping'.format(subject_id))
return
try:
ident = parse_id(subject_id)
except datman.scanid.ParseException:
logger.error('Invalid session: {}, skipping'.format(subject_id))
return
session_date = record[cfg.get_key('RedcapDate')]
try:
session = dashboard.get_session(ident, date=session_date, create=True)
except datman.exceptions.DashboardException as e:
logger.error('Failed adding session {} to dashboard. Reason: '
'{}'.format(ident, e))
return
try:
record_comment = record[cfg.get_key('RedcapComments')]
event_id = cfg.get_key('RedcapEventId')[record['redcap_event_name']]
except (datman.config.UndefinedSetting, datman.config.ConfigException):
logger.error("Can't add REDCap session info. Verify that "
"values 'RedcapComments' and 'RedcapEventId' are "
"correctly defined in the config file")
return
except KeyError:
record_comment = None
event_id = None
try:
session.add_redcap(
record_id, session_date, project=redcap_project, url=redcap_url,
instrument=instrument, comment=record_comment, event_id=event_id,
redcap_version=redcap_version
)
except Exception:
logger.error('Failed adding REDCap info for session {} to '
'dashboard'.format(ident))
def parse_id(subject_id):
"""Parse the ID from the redcap form into datman convention.
Args:
subject_id (:obj:`str`): A string subject ID
Raises:
datman.scanid.ParseException: When an ID can't be converted to a
valid datman ID.
Returns:
datman.scanid.Identifier
"""
ident = datman.scanid.parse(subject_id)
if isinstance(ident, datman.scanid.DatmanIdentifier):
return ident
# If the redcap form contained a KCNI ID, fields may need to be mapped to
# the datman version.
try:
id_map = cfg.get_key('IdMap')
except datman.config.UndefinedSetting:
# KCNI site and study fields match the datman fields.
return ident
# Avoid modifying the ID if the study happens to match the destination
# study, otherwise duplicate records may be made
if 'Study' in id_map and ident.study in id_map['Study'].values():
return ident
return datman.scanid.parse(subject_id, settings=id_map)
def main():
global cfg
global redcap_url
global redcap_version
global redcap_project
global instrument
arguments = docopt(__doc__)
study = arguments['<study>']
quiet = arguments['--quiet']
verbose = arguments['--verbose']
debug = arguments['--debug']
# setup logging
ch = logging.StreamHandler(sys.stdout)
log_level = logging.WARN
if quiet:
log_level = logging.ERROR
if verbose:
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
logger.setLevel(log_level)
ch.setLevel(log_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - {study} - '
'%(levelname)s - %(message)s'.format(
study=study))
ch.setFormatter(formatter)
logger.addHandler(ch)
logging.getLogger('datman.utils').addHandler(ch)
logging.getLogger('datman.dashboard').addHandler(ch)
# setup the config object
cfg = datman.config.config(study=study)
# get paths
dir_meta = cfg.get_path('meta')
# configure redcap variables
api_url = cfg.get_key('RedcapUrl')
redcap_url = api_url.replace('/api/', '/')
token_path = os.path.join(dir_meta, cfg.get_key('RedcapToken'))
token = read_token(token_path)
redcap_project = cfg.get_key('RedcapProjectId')
instrument = cfg.get_key('RedcapInstrument')
date_field = cfg.get_key('RedcapDate')
status_field = cfg.get_key('RedcapStatus')
status_val = cfg.get_key('RedcapStatusValue')
record_key = cfg.get_key('RedcapRecordKey')
# make status_val into a list
if not (isinstance(status_val, list)):
status_val = [status_val]
redcap_version = get_version(api_url, token)
response_json = get_records(api_url, token, instrument, record_key)
project_records = []
for item in response_json:
# only grab records where instrument has been marked complete
if not (item[date_field] and item[status_field] in status_val):
continue
project_records.append(item)
for record in project_records:
add_session_redcap(record, record_key)
if __name__ == '__main__':
main()
| 28.953586 | 78 | 0.645293 |
db07e044b95c0a836ec0465d00f0696e0cd11b48 | 2,082 | py | Python | custom_components/hacs/__init__.py | Swampen/home-assistant-config | c9cd4027df1d460fd27d469a53804320a0d1aaa1 | [
"MIT"
] | 1,383 | 2018-06-23T20:16:57.000Z | 2022-03-30T09:10:06.000Z | config/custom_components/hacs/__init__.py | jclark2019/home-assistant-config | a1354a8889e12b961fd16f4800c452b4fd0124f0 | [
"MIT"
] | 303 | 2020-10-16T18:38:06.000Z | 2022-03-31T07:16:38.000Z | config/custom_components/hacs/__init__.py | jclark2019/home-assistant-config | a1354a8889e12b961fd16f4800c452b4fd0124f0 | [
"MIT"
] | 270 | 2018-12-17T05:54:10.000Z | 2022-03-23T20:28:54.000Z | """
HACS gives you a powerful UI to handle downloads of all your custom needs.
For more details about this integration, please refer to the documentation at
https://hacs.xyz/
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
import voluptuous as vol
from .const import DOMAIN, PLATFORMS
from .enums import HacsDisabledReason
from .helpers.functions.configuration_schema import hacs_config_combined
from .operational.setup import (
async_setup as hacs_yaml_setup,
async_setup_entry as hacs_ui_setup,
)
if TYPE_CHECKING:
from .base import HacsBase
CONFIG_SCHEMA = vol.Schema({DOMAIN: hacs_config_combined()}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, config: dict[str, Any]) -> bool:
"""Set up this integration using yaml."""
return await hacs_yaml_setup(hass, config)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up this integration using UI."""
config_entry.add_update_listener(async_reload_entry)
return await hacs_ui_setup(hass, config_entry)
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Handle removal of an entry."""
hacs: HacsBase = hass.data[DOMAIN]
for task in hacs.recuring_tasks:
# Cancel all pending tasks
task()
try:
if hass.data.get("frontend_panels", {}).get("hacs"):
hacs.log.info("Removing sidepanel")
hass.components.frontend.async_remove_panel("hacs")
except AttributeError:
pass
unload_ok = await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS)
hacs.disable_hacs(HacsDisabledReason.REMOVED)
hass.data.pop(DOMAIN, None)
return unload_ok
async def async_reload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Reload the HACS config entry."""
await async_unload_entry(hass, config_entry)
await async_setup_entry(hass, config_entry)
| 30.173913 | 89 | 0.74976 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.