hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
01659930e839b18fafb2d3804ee333ff0d0a2b6d | 335 | gyp | Python | binding.gyp | nomagick/tea-napi | 1946f8db63ed2c85e59f7a181b0b87e203654c23 | [
"MIT"
] | null | null | null | binding.gyp | nomagick/tea-napi | 1946f8db63ed2c85e59f7a181b0b87e203654c23 | [
"MIT"
] | null | null | null | binding.gyp | nomagick/tea-napi | 1946f8db63ed2c85e59f7a181b0b87e203654c23 | [
"MIT"
] | null | null | null | {
"targets": [
{
"target_name": "tea_napi",
"sources": [ "src/tea-napi.cc" ],
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")"
],
'defines': [ 'NAPI_DISABLE_CPP_EXCEPTIONS' ]
}
]
} | 23.928571 | 60 | 0.486567 |
67130cf0a00b0c129902c154d7de78acda5c56e8 | 1,052 | py | Python | GetRabbitPairs_20210318A.py | LukeHebert/Rosalind | 1c6e1b937056f3c92c21df3d691b6b59be619125 | [
"MIT"
] | null | null | null | GetRabbitPairs_20210318A.py | LukeHebert/Rosalind | 1c6e1b937056f3c92c21df3d691b6b59be619125 | [
"MIT"
] | null | null | null | GetRabbitPairs_20210318A.py | LukeHebert/Rosalind | 1c6e1b937056f3c92c21df3d691b6b59be619125 | [
"MIT"
] | null | null | null | '''
Author: Luke Hebert
Date begun: March 18th, 2021
Description: solves rabbit Fibonacci sequence from Rosalind
input is number of generations and offspring-per-parent rate
output is number of total rabbit units (pairs, in this case) at the
given generation
'''
print('Enter a positive integer for the desired number of generations:')
gen = int(input())
print('Enter a positive integer for the "pairs-per-adult pair" offspring rate:')
off_rate = int(input())
def get_total_rabbits(generation, offspring_rate):
child_pairs, adult_pairs = 1,0
if generation > 1:
for i in range(2, generation+1):
prev_adult = adult_pairs
prev_child = child_pairs
adult_pairs = prev_adult + prev_child
child_pairs = prev_adult*offspring_rate
total_pairs = child_pairs + adult_pairs
return total_pairs
rabbit_count = get_total_rabbits(gen, off_rate)
print('\n\nTotal number of rabbits after ' + str(gen) + ' generations:\t' + str(rabbit_count))
| 37.571429 | 95 | 0.686312 |
63d4e9a1cad23d2f4687ef92e42045976ffc7f50 | 39,674 | py | Python | run_classifier_transfer_learning.py | junhahyung/bert_transfer | c59f8598e3203400b094f48896127fefe7aa28b4 | [
"Apache-2.0"
] | null | null | null | run_classifier_transfer_learning.py | junhahyung/bert_transfer | c59f8598e3203400b094f48896127fefe7aa28b4 | [
"Apache-2.0"
] | null | null | null | run_classifier_transfer_learning.py | junhahyung/bert_transfer | c59f8598e3203400b094f48896127fefe7aa28b4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class KsaProcessor(DataProcessor):
"""Processor for the fine-grained emotion analysis (Korean) data set (modified by Junha Hyung)."""
def get_train_examples(self, data_dir):
"""See base class."""
train_dir = os.path.join(data_dir, "korean_train.csv")
with tf.gfile.Open(train_dir, "r") as f:
reader = csv.reader(f, dialect='excel')
lines = []
for line in reader:
lines.append(line)
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
try:
text_a = tokenization.convert_to_unicode(line[2])
except:
print("{}, {}".format(i, line))
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
dev_dir = os.path.join(data_dir, "korean_dev.csv")
with tf.gfile.Open(dev_dir, "r") as f:
reader = csv.reader(f, dialect='excel')
lines = []
for line in reader:
lines.append(line)
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
text_a = tokenization.convert_to_unicode(line[2])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
test_dir = os.path.join(data_dir, "korean_test.csv")
with tf.gfile.Open(test_dir, "r") as f:
reader = csv.reader(f, dialect='excel')
lines = []
for line in reader:
lines.append(line)
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "test-%d" % (i)
text_a = tokenization.convert_to_unicode(line[2])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
class EmoProcessor(DataProcessor):
"""Processor for the fine-grained emotion analysis (Korean) data set (modified by Junha Hyung)."""
def get_train_examples(self, data_dir):
"""See base class."""
train_dir = os.path.join(data_dir, "train.csv")
with tf.gfile.Open(train_dir, "r") as f:
reader = csv.reader(f, dialect='excel')
lines = []
for line in reader:
lines.append(line)
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
try:
text_a = tokenization.convert_to_unicode(line[2])
except:
print("{}, {}".format(i, line))
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
dev_dir = os.path.join(data_dir, "dev.csv")
with tf.gfile.Open(dev_dir, "r") as f:
reader = csv.reader(f, dialect='excel')
lines = []
for line in reader:
lines.append(line)
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
text_a = tokenization.convert_to_unicode(line[2])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
test_dir = os.path.join(data_dir, "test.csv")
with tf.gfile.Open(test_dir, "r") as f:
reader = csv.reader(f, dialect='excel')
lines = []
for line in reader:
lines.append(line)
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "test-%d" % (i)
text_a = tokenization.convert_to_unicode(line[2])
label = tokenization.convert_to_unicode(line[1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3"]
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
# for debugging
print(tvars)
tf.logging.info("trainable variables : %s" % tvars)
""" remove final classification layer for transfer learning"""
for var in tvars:
if "output_weights" in var.name:
tvars.remove(var)
elif "output_bias" in var.name:
tvars.remove(var)
else:
pass
tf.logging.info("trainable variables except final layer : %s" % tvars)
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 35.328584 | 100 | 0.670691 |
bb1c35fbd98fce9b19d22d91b23dac5b50da241d | 2,539 | py | Python | soda/core/soda/execution/change_over_time_metric_check.py | sodadata/soda-core | d9b98d4f6f3364c5eb8210e8288c4c861bcf8f8a | [
"Apache-2.0"
] | 4 | 2022-03-23T02:43:42.000Z | 2022-03-31T15:20:54.000Z | soda/core/soda/execution/change_over_time_metric_check.py | sodadata/soda-core | d9b98d4f6f3364c5eb8210e8288c4c861bcf8f8a | [
"Apache-2.0"
] | 543 | 2022-03-22T09:02:17.000Z | 2022-03-31T16:29:41.000Z | soda/core/soda/execution/change_over_time_metric_check.py | sodadata/soda-core | d9b98d4f6f3364c5eb8210e8288c4c861bcf8f8a | [
"Apache-2.0"
] | 1 | 2022-03-27T03:37:55.000Z | 2022-03-27T03:37:55.000Z | from typing import Dict, Optional
from soda.execution.metric import Metric
from soda.execution.metric_check import MetricCheck
from soda.soda_cloud.historic_descriptor import HistoricChangeOverTimeDescriptor
from soda.sodacl.metric_check_cfg import MetricCheckCfg
KEY_HISTORIC_METRIC_AGGREGATE = "historic_metric_aggregate"
class ChangeOverTimeMetricCheck(MetricCheck):
def __init__(
self,
check_cfg: "MetricCheckCfg",
data_source_scan: "DataSourceScan",
partition: Optional["Partition"] = None,
column: Optional["Column"] = None,
):
super().__init__(
check_cfg=check_cfg,
data_source_scan=data_source_scan,
partition=partition,
column=column,
)
metric_check_cfg: MetricCheckCfg = self.check_cfg
metric_name = metric_check_cfg.metric_name
metric = self.metrics[metric_name]
self.historic_descriptors[KEY_HISTORIC_METRIC_AGGREGATE] = HistoricChangeOverTimeDescriptor(
metric_identity=metric.identity,
change_over_time_cfg=metric_check_cfg.change_over_time_cfg,
)
def evaluate(self, metrics: Dict[str, Metric], historic_values: Dict[str, object]):
metric_value = self.get_metric_value()
historic_value = (
historic_values.get(KEY_HISTORIC_METRIC_AGGREGATE).get("measurements").get("results")[0].get("value")
)
if historic_value is not None:
if isinstance(metric_value, int) and isinstance(historic_value, int):
self.check_value = metric_value - historic_value
else:
self.check_value = float(metric_value) - float(historic_value)
self.historic_diff_values = {
"historic_value": historic_value,
"metric_value": metric_value,
}
self.set_outcome_based_on_check_value()
else:
self.logs.info("Skipping metric check eval because there is not enough historic data yet")
def get_cloud_diagnostics_dict(self) -> dict:
cloud_diagnostics = super().get_cloud_diagnostics_dict()
if self.historic_diff_values:
cloud_diagnostics["diagnostics"] = self.historic_diff_values
return cloud_diagnostics
def get_log_diagnostic_dict(self) -> dict:
log_diagnostics = super().get_log_diagnostic_dict()
if self.historic_diff_values:
log_diagnostics.update(self.historic_diff_values)
return log_diagnostics
| 36.797101 | 113 | 0.68334 |
4d2e06182497ffc7d924413d86b42f3287f1ab30 | 4,943 | py | Python | tests/evaluation_test.py | boschresearch/blackboxopt | 85abea86f01a4a9d50f05d15e7d850e3288baafd | [
"ECL-2.0",
"Apache-2.0"
] | 8 | 2021-07-05T13:37:22.000Z | 2022-03-11T12:23:27.000Z | tests/evaluation_test.py | boschresearch/blackboxopt | 85abea86f01a4a9d50f05d15e7d850e3288baafd | [
"ECL-2.0",
"Apache-2.0"
] | 14 | 2021-07-07T13:55:23.000Z | 2022-02-07T13:09:01.000Z | tests/evaluation_test.py | boschresearch/blackboxopt | 85abea86f01a4a9d50f05d15e7d850e3288baafd | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/blackboxopt
#
# SPDX-License-Identifier: Apache-2.0
import pytest
from blackboxopt.evaluation import Evaluation, EvaluationSpecification
def test_evaluation_with_inf_values():
Evaluation({"mse": float("Inf"), "r²": 0.2}, {}, {})
Evaluation({"mse": float("-Inf"), "r²": 0.2}, {}, {})
def test_evaluation_with_optional_objective_values():
Evaluation({"mse": None, "r²": 0.2}, {}, {})
Evaluation({"mse": None, "r²": None}, {}, {})
def test_evaluation_with_nan_objective_value():
with pytest.raises(ValueError):
Evaluation({"mse": float("NaN"), "r²": 0.2}, {}, {})
def test_unpack_specification_into_result():
result = Evaluation(
{"mse": 0.0, "r²": 1.0},
**EvaluationSpecification(
configuration={"p1": 1.2},
settings={"fidelity": 1.0},
optimizer_info={"id": 123},
),
)
assert "p1" in result.configuration
assert "id" in result.optimizer_info
assert "fidelity" in result.settings
def test_evaluate_specification_into_result():
evaluation_spec = EvaluationSpecification(
configuration={"p1": 1.2},
settings={"fidelity": 1.0},
optimizer_info={"id": 123},
)
result = evaluation_spec.create_evaluation(objectives={"mse": 0.0, "r²": 1.0})
assert "p1" in result.configuration
assert "id" in result.optimizer_info
assert "fidelity" in result.settings
def test_evaluation_result_independent_from_specification():
spec = EvaluationSpecification(
configuration={"p1": 1.2},
settings={"fidelity": 1.0},
optimizer_info={"id": 123},
)
result = Evaluation(objectives={"mse": 0.0, "r²": 1.0}, **spec)
result.configuration["p1"] = -1.0
assert spec.configuration["p1"] == 1.2
assert result.configuration["p1"] == -1.0
def test_get_specification_from_result_is_independent():
spec = EvaluationSpecification(
configuration={"p1": 1.2},
settings={"fidelity": 1.0},
optimizer_info={"id": 123},
)
result = Evaluation(objectives={"mse": 0.0, "r²": 1.0}, **spec)
new_spec = result.get_specification()
new_spec.settings["fidelity"] = 2.0
assert new_spec.settings["fidelity"] == 2.0
assert result.settings["fidelity"] == 1.0
assert spec.settings["fidelity"] == 1.0
def test_to_json():
spec = EvaluationSpecification(
configuration={"p1": 1.2},
settings={"fidelity": 1.0},
optimizer_info={"id": 123},
created_unixtime=1.0,
)
spec_json = spec.to_json()
assert spec_json == (
'{"configuration": {"p1": 1.2}, "settings": {"fidelity": 1.0}, '
'"optimizer_info": {"id": 123}, "created_unixtime": 1.0, "context": null}'
)
result = Evaluation(
objectives={"mse": 0.0, "r²": 1.0}, finished_unixtime=2.0, **spec
)
result_json = result.to_json()
assert result_json == (
'{"objectives": {"mse": 0.0, "r\\u00b2": 1.0}, "configuration": {"p1": 1.2}, '
'"settings": {"fidelity": 1.0}, "optimizer_info": {"id": 123}, '
'"created_unixtime": 1.0, "context": null, "constraints": null, '
'"finished_unixtime": 2.0, "stacktrace": null, "user_info": null}'
)
def test_to_dict():
spec = EvaluationSpecification(
configuration={"p1": 1.2},
settings={"fidelity": 1.0},
optimizer_info={"id": 123},
created_unixtime=1.0,
)
spec_dict = spec.to_dict()
assert spec_dict["configuration"] == spec.configuration
assert spec_dict["settings"] == spec.settings
assert spec_dict["optimizer_info"] == spec.optimizer_info
assert spec_dict["created_unixtime"] == spec.created_unixtime
assert spec_dict["context"] is None
result = Evaluation(
objectives={"mse": 0.0, "r²": 1.0}, finished_unixtime=2.0, **spec
)
result_dict = result.to_dict()
assert result_dict["objectives"] == result.objectives
assert result_dict["finished_unixtime"] == result.finished_unixtime
assert result_dict["user_info"] is None
assert result_dict["stacktrace"] is None
def test_get_specification_from_evaluation():
eval_spec = EvaluationSpecification(
configuration={"p1": 1.2},
settings={"fidelity": 1.0},
optimizer_info={"id": 123},
context={"temperature": 25.3},
)
result = eval_spec.create_evaluation({"mse": 0.0, "r²": 1.0})
assert result.get_specification() == eval_spec
derived_eval_spec = result.get_specification(reset_created_unixtime=True)
assert result.created_unixtime != derived_eval_spec.created_unixtime
assert result.configuration == derived_eval_spec.configuration
assert result.settings == derived_eval_spec.settings
assert result.optimizer_info == derived_eval_spec.optimizer_info
| 33.856164 | 88 | 0.644548 |
dd8f90160e3e11a79ba726cf0eab8f647ccad956 | 2,990 | py | Python | instagram/models.py | sharon002/insta-gram | dcce96464a4e51485f7a077299dcd3adf34da078 | [
"MIT"
] | 1 | 2021-07-24T15:29:35.000Z | 2021-07-24T15:29:35.000Z | instagram/models.py | sharon002/insta-gram | dcce96464a4e51485f7a077299dcd3adf34da078 | [
"MIT"
] | null | null | null | instagram/models.py | sharon002/insta-gram | dcce96464a4e51485f7a077299dcd3adf34da078 | [
"MIT"
] | 1 | 2021-07-24T15:29:36.000Z | 2021-07-24T15:29:36.000Z | from django.db import models
from django.contrib.auth.models import User
from PIL import Image as pil_img
import datetime
from django.utils import timezone
from tinymce.models import HTMLField
from django.db.models.signals import post_save
from django.dispatch import receiver
import cloudinary
from cloudinary.models import CloudinaryField
class Profile(models.Model):
profile_photo = models.ImageField(default='default.jpg', upload_to='profile_pics/')
bio = models.TextField(blank=True)
user = models.OneToOneField(User,on_delete=models.CASCADE)
def __str__(self):
return f'{self.user.username} Profile'
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
def delete_profile(self):
self.delete()
@classmethod
def search_profile(cls, name):
profile = Profile.objects.filter(user__username__icontains = name)
return profile
@classmethod
def get_by_id(cls, id):
profile = Profile.objects.get(user = id)
return profile
@classmethod
def filter_by_id(cls, id):
profile = Profile.objects.filter(user = id).first()
return profile
class Image(models.Model):
# image = models.ImageField(upload_to='images/')
image= cloudinary.models.CloudinaryField('image',null=True, blank=True)
image_name = models.CharField(max_length=20, blank=True)
image_caption = models.CharField(max_length=100)
likes = models.BooleanField(default=False)
date_posted = models.DateTimeField(default=timezone.now)
profile = models.ForeignKey(User, on_delete=models.CASCADE, default='1')
def __str__(self):
return self.image_caption
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def update_caption(cls, update):
pass
@classmethod
def get_image_id(cls, id):
image = Image.objects.get(pk=id)
return image
@classmethod
def get_profile_images(cls, profile):
images = Image.objects.filter(profile__pk = profile)
return images
@classmethod
def get_all_images(cls):
images = Image.objects.all()
return images
class Meta:
ordering = ['-date_posted']
class Comment(models.Model):
comment = HTMLField()
posted_on = models.DateTimeField(auto_now=True)
image = models.ForeignKey(Image, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def save_comment(self):
self.save()
def save_comment(self):
self.save()
@classmethod
def get_comments_by_images(cls, id):
comments = Comment.objects.filter(image__pk = id)
return comments | 28.207547 | 87 | 0.682943 |
f89aa46a1e668e697b7d93180f661cdd7dfc9942 | 1,673 | py | Python | topi/tests/python/test_topi_shortcut.py | weberlo/tvm | e4b9f986dab8c48ba109a52106565fc4be6b67c4 | [
"Apache-2.0"
] | 4 | 2018-09-11T05:50:03.000Z | 2022-01-23T03:43:22.000Z | topi/tests/python/test_topi_shortcut.py | ganzhiliang/tvm | b076cad542524cb3744149d953c341b5815f6474 | [
"Apache-2.0"
] | 4 | 2020-12-04T21:00:38.000Z | 2022-01-22T12:49:30.000Z | topi/tests/python/test_topi_shortcut.py | ganzhiliang/tvm | b076cad542524cb3744149d953c341b5815f6474 | [
"Apache-2.0"
] | 4 | 2018-09-10T23:43:51.000Z | 2019-06-14T16:27:23.000Z | """Example code to do shortcut."""
import numpy as np
import topi
from topi.util import get_const_tuple
import tvm
def verify_shortcut(batch, in_size, in_channel):
'''Verify shortcut operator by comparing outputs from tvm and numpy implementation'''
in_height = in_width = in_size
A1 = tvm.placeholder((batch, in_channel, in_height, in_width), name='A1')
A2 = tvm.placeholder((batch, in_channel, in_height, in_width), name='A2')
B = topi.vision.shortcut(A1, A2)
a_shape = get_const_tuple(A1.shape)
dtype = A1.dtype
def get_ref_data_shortcut():
a_np1 = np.random.uniform(size=a_shape).astype(dtype)
a_np2 = np.random.uniform(size=a_shape).astype(dtype)
b_np = topi.testing.shortcut_python(a_np1, a_np2)
return a_np1, a_np2, b_np
a_np1, a_np2, b_np = get_ref_data_shortcut()
def check_device(device):
'''Cheching devices is enabled or not'''
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective([B])
a1 = tvm.nd.array(a_np1, ctx)
a2 = tvm.nd.array(a_np2, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
func = tvm.build(s, [A1, A2, B], device)
func(a1, a2, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['llvm', 'cuda']:
check_device(device)
def test_shortcut():
verify_shortcut(1, 144, 32)
if __name__ == "__main__":
test_shortcut()
| 34.142857 | 89 | 0.645547 |
283483698992a38f9ddb54a439f469e3a7861d17 | 8,554 | py | Python | distributed/comm/tests/test_ucx.py | gforsyth/distributed | 6fe62774aa7ad585cf2231ca6475f70fdc1cec24 | [
"BSD-3-Clause"
] | null | null | null | distributed/comm/tests/test_ucx.py | gforsyth/distributed | 6fe62774aa7ad585cf2231ca6475f70fdc1cec24 | [
"BSD-3-Clause"
] | null | null | null | distributed/comm/tests/test_ucx.py | gforsyth/distributed | 6fe62774aa7ad585cf2231ca6475f70fdc1cec24 | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import pytest
ucp = pytest.importorskip("ucp")
from distributed import Client
from distributed.comm import ucx, listen, connect
from distributed.comm.registry import backends, get_backend
from distributed.comm import ucx, parse_address
from distributed.protocol import to_serialize
from distributed.deploy.local import LocalCluster
from dask.dataframe.utils import assert_eq
from distributed.utils_test import gen_test, loop, inc # noqa: 401
from .test_comms import check_deserialize
HOST = ucp.get_address()
def test_registered():
assert "ucx" in backends
backend = get_backend("ucx")
assert isinstance(backend, ucx.UCXBackend)
async def get_comm_pair(
listen_addr="ucx://" + HOST, listen_args=None, connect_args=None, **kwargs
):
q = asyncio.queues.Queue()
async def handle_comm(comm):
await q.put(comm)
listener = listen(listen_addr, handle_comm, connection_args=listen_args, **kwargs)
with listener:
comm = await connect(
listener.contact_address, connection_args=connect_args, **kwargs
)
serv_com = await q.get()
return comm, serv_com
@pytest.mark.asyncio
async def test_ping_pong():
com, serv_com = await get_comm_pair()
msg = {"op": "ping"}
await com.write(msg)
result = await serv_com.read()
assert result == msg
result["op"] = "pong"
await serv_com.write(result)
result = await com.read()
assert result == {"op": "pong"}
await com.close()
await serv_com.close()
@pytest.mark.asyncio
async def test_comm_objs():
comm, serv_comm = await get_comm_pair()
scheme, loc = parse_address(comm.peer_address)
assert scheme == "ucx"
scheme, loc = parse_address(serv_comm.peer_address)
assert scheme == "ucx"
assert comm.peer_address == serv_comm.local_address
def test_ucx_specific():
"""
Test concrete UCX API.
"""
# TODO:
# 1. ensure exceptions in handle_comm fail the test
# 2. Use dict in read / write, put seralization there.
# 3. Test peer_address
# 4. Test cleanup
async def f():
address = "ucx://{}:{}".format(HOST, 0)
async def handle_comm(comm):
msg = await comm.read()
msg["op"] = "pong"
await comm.write(msg)
await comm.read()
assert comm.closed() is False
await comm.close()
assert comm.closed
listener = ucx.UCXListener(address, handle_comm)
listener.start()
host, port = listener.get_host_port()
assert host.count(".") == 3
assert port > 0
connector = ucx.UCXConnector()
l = []
async def client_communicate(key, delay=0):
addr = "%s:%d" % (host, port)
comm = await connector.connect(addr)
# TODO: peer_address
# assert comm.peer_address == 'ucx://' + addr
assert comm.extra_info == {}
msg = {"op": "ping", "data": key}
await comm.write(msg)
if delay:
await asyncio.sleep(delay)
msg = await comm.read()
assert msg == {"op": "pong", "data": key}
await comm.write({"op": "client closed"})
l.append(key)
return comm
comm = await client_communicate(key=1234, delay=0.5)
# Many clients at once
N = 2
futures = [client_communicate(key=i, delay=0.05) for i in range(N)]
await asyncio.gather(*futures)
assert set(l) == {1234} | set(range(N))
asyncio.run(f())
@pytest.mark.asyncio
async def test_ping_pong_data():
np = pytest.importorskip("numpy")
data = np.ones((10, 10))
com, serv_com = await get_comm_pair()
msg = {"op": "ping", "data": to_serialize(data)}
await com.write(msg)
result = await serv_com.read()
result["op"] = "pong"
data2 = result.pop("data")
np.testing.assert_array_equal(data2, data)
await serv_com.write(result)
result = await com.read()
assert result == {"op": "pong"}
await com.close()
await serv_com.close()
@gen_test()
def test_ucx_deserialize():
yield check_deserialize("tcp://")
@pytest.mark.asyncio
@pytest.mark.parametrize(
"g",
[
lambda cudf: cudf.Series([1, 2, 3]),
lambda cudf: cudf.Series([]),
lambda cudf: cudf.DataFrame([]),
lambda cudf: cudf.DataFrame([1]).head(0),
lambda cudf: cudf.DataFrame([1.0]).head(0),
lambda cudf: cudf.DataFrame({"a": []}),
lambda cudf: cudf.DataFrame({"a": ["a"]}).head(0),
lambda cudf: cudf.DataFrame({"a": [1.0]}).head(0),
lambda cudf: cudf.DataFrame({"a": [1]}).head(0),
lambda cudf: cudf.DataFrame({"a": [1, 2, None], "b": [1.0, 2.0, None]}),
lambda cudf: cudf.DataFrame({"a": ["Check", "str"], "b": ["Sup", "port"]}),
],
)
async def test_ping_pong_cudf(g):
# if this test appears after cupy an import error arises
# *** ImportError: /usr/lib/x86_64-linux-gnu/libstdc++.so.6: version `CXXABI_1.3.11'
# not found (required by python3.7/site-packages/pyarrow/../../../libarrow.so.12)
cudf = pytest.importorskip("cudf")
cudf_obj = g(cudf)
com, serv_com = await get_comm_pair()
msg = {"op": "ping", "data": to_serialize(cudf_obj)}
await com.write(msg)
result = await serv_com.read()
cudf_obj_2 = result.pop("data")
assert result["op"] == "ping"
assert_eq(cudf_obj, cudf_obj_2)
await com.close()
await serv_com.close()
@pytest.mark.asyncio
@pytest.mark.parametrize("shape", [(100,), (10, 10), (4947,)])
async def test_ping_pong_cupy(shape):
cupy = pytest.importorskip("cupy")
com, serv_com = await get_comm_pair()
arr = cupy.random.random(shape)
msg = {"op": "ping", "data": to_serialize(arr)}
_, result = await asyncio.gather(com.write(msg), serv_com.read())
data2 = result.pop("data")
assert result["op"] == "ping"
cupy.testing.assert_array_equal(arr, data2)
await com.close()
await serv_com.close()
@pytest.mark.slow
@pytest.mark.asyncio
@pytest.mark.parametrize(
"n",
[
int(1e9),
pytest.param(
int(2.5e9), marks=[pytest.mark.xfail(reason="integer type in ucx-py")]
),
],
)
async def test_large_cupy(n):
cupy = pytest.importorskip("cupy")
com, serv_com = await get_comm_pair()
arr = cupy.ones(n, dtype="u1")
msg = {"op": "ping", "data": to_serialize(arr)}
_, result = await asyncio.gather(com.write(msg), serv_com.read())
data2 = result.pop("data")
assert result["op"] == "ping"
assert len(data2) == len(arr)
await com.close()
await serv_com.close()
@pytest.mark.asyncio
async def test_ping_pong_numba():
np = pytest.importorskip("numpy")
numba = pytest.importorskip("numba")
import numba.cuda
arr = np.arange(10)
arr = numba.cuda.to_device(arr)
com, serv_com = await get_comm_pair()
msg = {"op": "ping", "data": to_serialize(arr)}
await com.write(msg)
result = await serv_com.read()
data2 = result.pop("data")
assert result["op"] == "ping"
@pytest.mark.parametrize("processes", [True, False])
def test_ucx_localcluster(loop, processes):
if processes:
pytest.skip("Known bug, processes=True doesn't work currently")
with LocalCluster(
protocol="ucx",
dashboard_address=None,
n_workers=2,
threads_per_worker=1,
processes=processes,
loop=loop,
) as cluster:
with Client(cluster) as client:
x = client.submit(inc, 1)
x.result()
assert x.key in cluster.scheduler.tasks
if not processes:
assert any(w.data == {x.key: 2} for w in cluster.workers.values())
assert len(cluster.scheduler.workers) == 2
@pytest.mark.slow
@pytest.mark.asyncio
async def test_stress():
import dask.array as da
from distributed import wait
chunksize = "10 MB"
async with LocalCluster(
protocol="ucx", dashboard_address=None, asynchronous=True, processes=False
) as cluster:
async with Client(cluster, asynchronous=True) as client:
rs = da.random.RandomState()
x = rs.random((10000, 10000), chunks=(-1, chunksize))
x = x.persist()
await wait(x)
for i in range(10):
x = x.rechunk((chunksize, -1))
x = x.rechunk((-1, chunksize))
x = x.persist()
await wait(x)
| 27.954248 | 88 | 0.60989 |
100e3bd05fbe16ce588f02dcdba539440c6a9238 | 161 | py | Python | exercicios/exe003/exe003.py | tiagolsouza/exercicios-Curso-em-video-PYTHON | e4e6975fac7e4883aeab58b970c6ca72895564e4 | [
"MIT"
] | null | null | null | exercicios/exe003/exe003.py | tiagolsouza/exercicios-Curso-em-video-PYTHON | e4e6975fac7e4883aeab58b970c6ca72895564e4 | [
"MIT"
] | null | null | null | exercicios/exe003/exe003.py | tiagolsouza/exercicios-Curso-em-video-PYTHON | e4e6975fac7e4883aeab58b970c6ca72895564e4 | [
"MIT"
] | null | null | null | n1 = int(input('digite um numero: '))
n2 = int(input('digite outro numero:'))
s = n1+n2
print('a soma entre \033[31m{}\033[m e {} sera: {}.' .format(n1, n2, s))
| 32.2 | 72 | 0.602484 |
8541ef68514929a6e57d3bea53b58b3080f31a46 | 8,467 | py | Python | unimport/main.py | abdulniyaspm/unimport | 372a144a0c5b5c334ff042451a4436c88e1f0dab | [
"MIT"
] | 1 | 2020-11-18T22:45:38.000Z | 2020-11-18T22:45:38.000Z | unimport/main.py | abdulniyaspm/unimport | 372a144a0c5b5c334ff042451a4436c88e1f0dab | [
"MIT"
] | 5 | 2020-11-18T22:32:21.000Z | 2020-11-19T02:05:24.000Z | unimport/main.py | abdulniyaspm/unimport | 372a144a0c5b5c334ff042451a4436c88e1f0dab | [
"MIT"
] | null | null | null | import argparse
import difflib
import re
import sys
from pathlib import Path
from typing import List, Optional, Sequence, Set, Tuple
from unimport import color
from unimport import constants as C
from unimport.session import Session
from unimport.statement import ImportFrom
from unimport.utils import (
actiontobool,
get_exclude_list_from_gitignore,
get_used_packages,
package_name_from_metadata,
)
def print_if_exists(sequence: Tuple[str, ...]) -> bool:
if sequence:
print(color.difference(sequence))
return bool(sequence)
def show(unused_import: List[C.ImportT], py_path: Path) -> None:
for imp in unused_import:
if isinstance(imp, ImportFrom) and imp.star and imp.suggestions:
context = (
color.paint(f"from {imp.name} import *", color.RED)
+ " -> "
+ color.paint(
f"from {imp.name} import {', '.join(imp.suggestions)}",
color.GREEN,
)
)
else:
context = color.paint(imp.name, color.YELLOW)
print(
context
+ " at "
+ color.paint(py_path.as_posix(), color.GREEN)
+ ":"
+ color.paint(str(imp.lineno), color.GREEN)
)
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(
prog="unimport",
description=C.DESCRIPTION,
epilog="Get rid of all unused imports 🥳",
)
exclusive_group = parser.add_mutually_exclusive_group(required=False)
parser.add_argument(
"sources",
default=[Path(".")],
nargs="*",
help="files and folders to find the unused imports.",
action="store",
type=Path,
)
parser.add_argument(
"-c",
"--config",
default=".",
help="read configuration from PATH.",
metavar="PATH",
action="store",
type=Path,
)
parser.add_argument(
"--include",
help="file include pattern.",
metavar="include",
action="store",
default=[],
type=lambda value: [value],
)
parser.add_argument(
"--exclude",
help="file exclude pattern.",
metavar="exclude",
action="store",
default=[],
type=lambda value: [value],
)
parser.add_argument(
"--gitignore",
action="store_true",
help="exclude .gitignore patterns. if present.",
)
parser.add_argument(
"--include-star-import",
action="store_true",
help="Include star imports during scanning and refactor.",
)
parser.add_argument(
"--show-error",
action="store_true",
help="Show or don't show errors captured during static analysis.",
)
parser.add_argument(
"-d",
"--diff",
action="store_true",
help="Prints a diff of all the changes unimport would make to a file.",
)
exclusive_group.add_argument(
"-r",
"--remove",
action="store_true",
help="remove unused imports automatically.",
)
exclusive_group.add_argument(
"-p",
"--permission",
action="store_true",
help="Refactor permission after see diff.",
)
parser.add_argument(
"--requirements",
action="store_true",
help="Include requirements.txt file, You can use it with all other arguments",
)
parser.add_argument(
"--check",
action="store_true",
help="Prints which file the unused imports are in.",
)
parser.add_argument(
"-v",
"--version",
action="version",
version=f"Unimport {C.VERSION}",
help="Prints version of unimport",
)
argv = argv if argv is not None else sys.argv[1:]
args = parser.parse_args(argv)
session = Session(
config_file=args.config,
include_star_import=args.include_star_import,
show_error=args.show_error,
)
args.remove = args.remove or session.config.remove # type: ignore
args.diff = any((args.diff, args.permission, session.config.diff)) # type: ignore
args.check = args.check or not any((args.diff, args.remove))
args.requirements = args.requirements or session.config.requirements # type: ignore
args.gitignore = args.gitignore or session.config.gitignore # type: ignore
args.sources.extend(session.config.sources) # type: ignore
args.include.extend(session.config.include) # type: ignore
args.exclude.extend(session.config.exclude) # type: ignore
if args.gitignore:
args.exclude.extend(get_exclude_list_from_gitignore())
include = re.compile("|".join(args.include)).pattern
exclude = re.compile("|".join(args.exclude)).pattern
unused_modules = set()
packages: Set[str] = set()
for source_path in args.sources:
for py_path in session.list_paths(source_path, include, exclude):
session.scanner.scan(source=session.read(py_path)[0])
unused_imports = session.scanner.unused_imports
unused_modules.update({imp.name for imp in unused_imports})
packages.update(
get_used_packages(
session.scanner.imports, session.scanner.unused_imports
)
)
if args.check:
show(unused_imports, py_path)
session.scanner.clear()
if args.diff:
exists_diff = print_if_exists(session.diff_file(py_path))
if args.permission and exists_diff:
action = input(
f"Apply suggested changes to '{color.paint(str(py_path), color.YELLOW)}' [Y/n/q] ? >"
).lower()
if action == "q":
return 1
elif actiontobool(action):
args.remove = True
if args.remove and session.refactor_file(py_path, apply=True)[1]:
print(
f"Refactoring '{color.paint(str(py_path), color.GREEN)}'"
)
if not unused_modules and args.check:
print(
color.paint(
"✨ Congratulations there is no unused import in your project. ✨",
color.GREEN,
)
)
if args.requirements and packages:
for requirements in Path(".").glob("requirements*.txt"):
splitlines_requirements = requirements.read_text().splitlines()
result = splitlines_requirements.copy()
for index, requirement in enumerate(splitlines_requirements):
module_name = package_name_from_metadata(
requirement.split("==")[0]
)
if module_name is None:
if args.show_error:
print(
color.paint(requirement + " not found", color.RED)
)
continue
if module_name not in packages:
result.remove(requirement)
if args.check:
print(
f"{color.paint(requirement, color.CYAN)} at "
f"{color.paint(requirements.as_posix(), color.CYAN)}:{color.paint(str(index + 1), color.CYAN)}"
)
if args.diff:
exists_diff = print_if_exists(
tuple(
difflib.unified_diff(
splitlines_requirements,
result,
fromfile=requirements.as_posix(),
)
)
)
if args.permission and exists_diff:
action = input(
f"Apply suggested changes to '{color.paint(requirements.as_posix(), color.CYAN)}' [Y/n/q] ? >"
).lower()
if action == "q":
return 1
if actiontobool(action):
args.remove = True
if args.remove:
requirements.write_text("".join(result))
print(
f"Refactoring '{color.paint(requirements.as_posix(), color.CYAN)}'"
)
if unused_modules:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| 34.559184 | 123 | 0.549782 |
146a95c535e382b506d2e1c6f40eb49824ddae14 | 4,231 | py | Python | sdk/python/pulumi_kubernetes/policy/v1beta1/PodDisruptionBudgetList.py | Carlangueitor/pulumi-kubernetes | 859ccaaeb8291de49128dbc202fbac1358b2a25a | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/policy/v1beta1/PodDisruptionBudgetList.py | Carlangueitor/pulumi-kubernetes | 859ccaaeb8291de49128dbc202fbac1358b2a25a | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/policy/v1beta1/PodDisruptionBudgetList.py | Carlangueitor/pulumi-kubernetes | 859ccaaeb8291de49128dbc202fbac1358b2a25a | [
"Apache-2.0"
] | null | null | null | # *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class PodDisruptionBudgetList(pulumi.CustomResource):
"""
PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
"""
apiVersion: pulumi.Output[str]
"""
APIVersion defines the versioned schema of this representation of an object. Servers should
convert recognized schemas to the latest internal value, and may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
"""
kind: pulumi.Output[str]
"""
Kind is a string value representing the REST resource this object represents. Servers may infer
this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More
info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
"""
items: pulumi.Output[list]
metadata: pulumi.Output[dict]
def __init__(self, resource_name, opts=None, items=None, metadata=None, __name__=None, __opts__=None):
"""
Create a PodDisruptionBudgetList resource with the given unique name, arguments, and options.
:param str resource_name: The _unique_ name of the resource.
:param pulumi.ResourceOptions opts: A bag of options that control this resource's behavior.
:param pulumi.Input[list] items:
:param pulumi.Input[dict] metadata:
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'policy/v1beta1'
__props__['kind'] = 'PodDisruptionBudgetList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
__props__['status'] = None
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(),
))
super(PodDisruptionBudgetList, self).__init__(
"kubernetes:policy/v1beta1:PodDisruptionBudgetList",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
"""
Get the state of an existing `PodDisruptionBudgetList` resource, as identified by `id`.
The ID is of the form `[namespace]/[name]`; if `[namespace]` is omitted,
then (per Kubernetes convention) the ID becomes `default/[name]`.
Pulumi will keep track of this resource using `resource_name` as the Pulumi ID.
:param str resource_name: _Unique_ name used to register this resource with Pulumi.
:param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve.
Takes the form `[namespace]/[name]` or `[name]`.
:param Optional[pulumi.ResourceOptions] opts: A bag of options that control this
resource's behavior.
"""
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return PodDisruptionBudgetList(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 40.295238 | 107 | 0.68069 |
2b7f36816beb1119aa43d99bef44ca8cc00f2f35 | 2,579 | py | Python | tests/test_simple_sbml.py | ModelEngineering/Kinetics-Validator | 9350da492fd9c1482b50332f386632e6db0e7ed2 | [
"MIT"
] | null | null | null | tests/test_simple_sbml.py | ModelEngineering/Kinetics-Validator | 9350da492fd9c1482b50332f386632e6db0e7ed2 | [
"MIT"
] | null | null | null | tests/test_simple_sbml.py | ModelEngineering/Kinetics-Validator | 9350da492fd9c1482b50332f386632e6db0e7ed2 | [
"MIT"
] | null | null | null | """
Tests for simple_sbml
"""
from SBMLKinetics.common import constants as cn
from SBMLKinetics.common import simple_sbml
from SBMLKinetics.common.simple_sbml import SimpleSBML
from SBMLKinetics.common.reaction import Reaction
from SBMLKinetics.common import util
from tests.common import helpers
import copy
import numpy as np
import os
import libsbml
import unittest
import tellurium as te
import zipfile
IGNORE_TEST = False
IS_PLOT = False
NO_NAME = "dummy"
#############################
# Tests
#############################
class TestSimpleSBML(unittest.TestCase):
def setUp(self):
self.simple = helpers.getSimple()
def testConstructor(self):
if IGNORE_TEST:
return
def test(a_list, a_type):
self.assertGreater(len(a_list), 0)
self.assertTrue(isinstance(a_list[0], a_type))
#
test(self.simple.reactions, Reaction)
test(self.simple.species, libsbml.Species)
test(self.simple.parameters, libsbml.Parameter)
self.assertTrue(isinstance(self.simple.model,
libsbml.Model))
simple = helpers.getSimple_BIOMD56()
self.assertGreater(len(simple.function_definitions), 0)
def testGet(self):
if IGNORE_TEST:
return
def test(func, a_list):
this_id = a_list[0].getId()
an_object = func(this_id)
self.assertEqual(an_object, a_list[0])
#
test(self.simple.getReaction, self.simple.reactions)
test(self.simple.getSpecies, self.simple.species)
test(self.simple.getParameter, self.simple.parameters)
def testConstructWithRoadrunner(self):
if IGNORE_TEST:
return
model = te.loadSBMLModel(helpers.TEST_PATH)
simple = helpers.getSimple()
self.assertGreater(len(simple.reactions), 0)
class TestFunctions(unittest.TestCase):
def testReadURL(self):
pass
def _testIterator(self, itr):
for item in itr:
self.assertTrue(isinstance(item.model,
SimpleSBML))
COUNT = 5
itr = simple_sbml.modelIterator(final=COUNT)
item_number = -1
for item in itr:
self.assertTrue(isinstance(item.filename, str))
item_number = item.number
self.assertEqual(item_number, COUNT - 1)
def testModelIterator1(self):
if IGNORE_TEST:
return
self._testIterator(simple_sbml.modelIterator(final=1))
def testGetZipfilePath(self):
if IGNORE_TEST:
return
ffiles, zipper = simple_sbml.getZipfilePaths()
for ffile in ffiles:
try:
fid = zipper.open(ffile)
fid.close()
except:
assertTrue(False)
if __name__ == '__main__':
unittest.main()
| 24.561905 | 59 | 0.69019 |
9365b34f71a5848221cfd2ad9619bdfcc7465382 | 27,738 | py | Python | test/input_gen/genInput.py | suehdn/nntrainer | f8c4c6f168be83b76f724d68f50f9f7c6d0543de | [
"Apache-2.0"
] | null | null | null | test/input_gen/genInput.py | suehdn/nntrainer | f8c4c6f168be83b76f724d68f50f9f7c6d0543de | [
"Apache-2.0"
] | null | null | null | test/input_gen/genInput.py | suehdn/nntrainer | f8c4c6f168be83b76f724d68f50f9f7c6d0543de | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0-only
##
# Copyright (C) 2020 Jijoong Moon <jijoong.moon@samsung.com>
# Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
#
# @file genTestInput.py
# @brief Generate test input
# @author Jijoong Moon <jijoong.moon@samsung.com>
# @author Parichay Kapoor <pk.kapoor@samsung.com>
import sys
import os
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
import struct
import random
DEBUG = True
# Fix the seeds across frameworks
SEED = 1234
tf.compat.v1.reset_default_graph()
random.seed(SEED)
tf.compat.v1.set_random_seed(SEED)
np.random.seed(SEED)
##
# @brief save data into file with filename
# @param[in] data The data to be saved
def save(filename, *data):
if os.path.isfile(filename):
os.remove(filename)
with open(filename, 'ab') as outfile:
for item in data:
np.array(item, dtype=np.float32).tofile(outfile)
try:
print(item.shape, " data is generated")
print(item)
except:
pass
##
# @brief generate random tensor
def gen_tensor(shape):
rand_t = np.random.randint(1, 10, size=shape)
return rand_t.astype(np.float32)
##
# @brief generate random data and save
# @param[in] outfile_name outfile_name
# @param[in] input_shape shape of input
# @param[in] savefile boolean save file
# @return data generted data
def gen_input(outfile_name, input_shape, savefile=True):
x = gen_tensor(input_shape)
if savefile:
save(outfile_name, x)
return x
##
# @brief conv2d layer forwarding with tensorflow
# @param[in] x input data
# @param[in] kerenl weight data
# @param[in] batch batch size of x
# @param[in] channel channel size of x
# @param[in] height height size of x
# @param[in] width width size of x
# @param[in] k_width width size of kernel
# @param[in] k_height height size of kernel
# @param[in] k_num batch size of kernel
# @param[in] stride stride size
# @param[in] pad padding : SAME VALID FULL
# @param[in] bias bias data
# @return tf_o calculated result
def conv2d_tf(x, kernel, batch, width, height, channel, k_width, k_height, k_num, stride, pad, bias, num_loop):
x = np.transpose(x,[0,2,3,1])
kernel = np.transpose(kernel, [2,3,1,0])
tf.compat.v1.reset_default_graph()
input_shape = (batch, height, width, channel)
tf_input = tf.compat.v1.placeholder(
dtype=dtypes.float32, shape=input_shape, name='input')
kernel_w = tf.constant_initializer(kernel)
bias_w = tf.constant_initializer(bias)
conv2d_layer = tf.keras.layers.Conv2D(k_num, k_width, strides = stride, padding=pad, kernel_initializer=kernel_w, bias_initializer=bias_w)(tf_input)
optimizer = tf.keras.optimizers.SGD(learning_rate = 1)
trainable_variables = tf.compat.v1.trainable_variables()
all_variables = [tf_input] + trainable_variables
grad = tf.gradients(conv2d_layer, all_variables)
train_op = optimizer.apply_gradients(list(zip(grad[1:], trainable_variables)))
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
conv2d_result, grad_result, _ = sess.run([conv2d_layer, grad, train_op], feed_dict={tf_input: x})
for i in range(0,num_loop):
conv2d_result2, grad_result2, _ = sess.run([conv2d_layer, grad, train_op], feed_dict={tf_input: x})
if DEBUG:
for item, input_variable in zip(grad_result, all_variables):
print(input_variable.name)
print(item.shape)
for item, input_variable in zip(grad_result2, all_variables):
print(input_variable.name)
print(item.shape)
return conv2d_result, grad_result[0], grad_result[1], grad_result[2], \
conv2d_result2, grad_result2[0], grad_result2[1], grad_result2[2]
##
# @brief conv2d layer forwarding with tensorflow
# @param[in] x input data
# @param[in] kernel weight data
# @param[in] kernel2 weight data
# @return tf_o calculated result
def conv2d_tf_2(x, kernel, bias, kernel2, bias2):
x = np.transpose(x,[0,2,3,1])
kernel = np.transpose(kernel, [2,3,1,0])
kernel2 = np.transpose(kernel2, [2,3,1,0])
tf.compat.v1.reset_default_graph()
input_shape = (1, 28, 28, 3)
tf_input = tf.compat.v1.placeholder(
dtype=dtypes.float32, shape=input_shape, name='input')
kernel_w = tf.constant_initializer(kernel)
kernel_w2 = tf.constant_initializer(kernel2)
bias_w = tf.constant_initializer(bias)
bias_w2 = tf.constant_initializer(bias2)
conv2d_layer = tf.keras.layers.Conv2D(6, 5, kernel_initializer=kernel_w, bias_initializer=bias_w)(tf_input)
conv2d_layer2 = tf.keras.layers.Conv2D(12, 1, kernel_initializer=kernel_w2, bias_initializer=bias_w2)(conv2d_layer)
optimizer = tf.keras.optimizers.SGD(learning_rate = 1)
trainable_variables = tf.compat.v1.trainable_variables()
all_variables = [tf_input] + trainable_variables
grad = tf.gradients(conv2d_layer2, all_variables)
train_op = optimizer.apply_gradients(list(zip(grad[1:], trainable_variables)))
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
conv2d_result, conv2d_result2, grad_result, _ = sess.run([conv2d_layer, conv2d_layer2, grad, train_op], feed_dict={tf_input: x})
if DEBUG:
for item, input_variable in zip(grad_result, all_variables):
print(input_variable.name)
print(item.shape)
return conv2d_result, conv2d_result2, grad_result
def pooling2d_tf(x, pool_size, stride, padding, pooling):
x = np.transpose(x, [0,2,3,1])
tf.compat.v1.reset_default_graph()
input_shape = x.shape
tf_input=tf.compat.v1.placeholder(dtype=dtypes.float32, shape=input_shape, name='input')
if (pooling == "max"):
pooling2d_layer=tf.keras.layers.MaxPooling2D(pool_size=pool_size, strides =stride, padding = "valid")(tf_input)
elif (pooling == "average"):
pooling2d_layer=tf.keras.layers.AveragePooling2D(pool_size=pool_size, strides =stride, padding = "valid")(tf_input)
elif (pooling == "global_max"):
pooling2d_layer=tf.keras.layers.GlobalMaxPooling2D()(tf_input)
elif (pooling == "global_average"):
pooling2d_layer=tf.keras.layers.GlobalAveragePooling2D()(tf_input)
pooling2d_variables = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
input_variables = [tf_input] + pooling2d_variables
grad = tf.gradients(pooling2d_layer, input_variables)
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
pooling2d_result = sess.run(pooling2d_layer, feed_dict={tf_input:x})
grad_result = sess.run(grad, feed_dict={tf_input:x})
if DEBUG:
for item, input_variable in zip(grad_result, input_variables):
print(input_variable.name)
print(item)
return pooling2d_result, grad_result[0]
##
# Tested with tensorflow 1.x (1.14.0 and above)
# @brief fc layer forwarding with tensorflow
# @param[in] x input data
# @param[in] kernel weight data
# @param[in] bias bias data
# @param[in] activation activation after the operation
# @return tf_o calculated result
def fc_tf_simplified_backward(x, kernel, label, bias, activation, opt):
tf.compat.v1.reset_default_graph()
tf_input = tf.placeholder(dtype = dtypes.float32, shape=x.shape)
fc_out = tf.keras.layers.Dense(kernel.shape[-1],
activation=activation,
use_bias=True,
kernel_initializer=tf.constant_initializer(kernel),
bias_initializer=tf.constant_initializer(bias),
input_shape=tf_input.shape)(tf_input)
trainable_variables = tf.compat.v1.trainable_variables()
all_variables = [tf_input] + trainable_variables
if opt == 'sgd':
optimizer = tf.keras.optimizers.SGD(learning_rate = 1)
elif opt == 'adam':
optimizer = tf.keras.optimizers.Adam(learning_rate = 1,beta_1=0.9, beta_2=0.999, epsilon=1.0e-7)
else:
raise 'unknown optimizer'
tf_grad = tf.gradients(fc_out, all_variables)
train_op = optimizer.apply_gradients(list(zip(tf_grad[1:], trainable_variables)))
with tf.compat.v1.Session() as sess:
with tf.compat.v1.variable_scope('fc'):
sess.run(tf.compat.v1.global_variables_initializer())
old_w = sess.run(trainable_variables)
tf_outs = sess.run([fc_out, tf_grad, train_op], feed_dict={tf_input: x})
new_w = sess.run(trainable_variables)
tf_outs = tf_outs[:-1] + [new_w]
if DEBUG:
print("FC simplified backward with activation.")
print(tf_outs[0].shape)
print(tf_outs[1][0].shape)
print(tf_outs[1][1].shape)
print(tf_outs[1][2].shape)
print(tf_outs[2][0].shape)
print(tf_outs[2][1].shape)
print("-------------------")
print(tf_outs[0])
print(tf_outs[1][0])
print(tf_outs[1][1])
print(tf_outs[1][2])
print(tf_outs[2][0])
print(tf_outs[2][1])
print("-------------------")
return tf_outs
##
# Tested with tensorflow 1.x (1.14.0 and above)
# @brief fc layer forwarding and training with tensorflow
# @param[in] x input data
# @param[in] kernel weight data
# @param[in] bias bias data
# @param[in] activation activation after the operation
# @param[in] train train a few steps
# @return tf_o calculated result
def fc_tf(x, kernel, label, bias, activation, train=False, loss='mse', opt='sgd'):
lr = 1
tf.compat.v1.reset_default_graph()
tf_input = tf.placeholder(dtype = dtypes.float32, shape=x.shape)
if (loss == 'cross'):
stored_act = activation
activation = None
with tf.compat.v1.Session() as sess:
with tf.compat.v1.variable_scope('fc'):
model = tf.keras.Sequential([tf.keras.layers.Dense(kernel.shape[-1],
activation=activation,
use_bias=True,
kernel_initializer=tf.constant_initializer(kernel),
bias_initializer=tf.constant_initializer(bias),
input_shape=tf_input.shape)])
tf_logit = model(tf_input, training=train)
tf_label = tf.placeholder(dtype = dtypes.float32, shape=label.shape)
if loss == 'mse':
tf_loss = tf.reduce_mean(tf.keras.losses.MSE(tf_label, tf_logit))
elif loss == 'cross':
if stored_act == tf.nn.sigmoid:
tf_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf_label, logits=tf_logit))
elif stored_act == tf.nn.softmax:
tf_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf_label, logits=tf_logit))
else:
raise 'unknown activation with cross entropy'
tf_logit = stored_act(tf_logit)
else:
raise 'unknown loss'
if train:
if opt == 'sgd':
optimizer = tf.keras.optimizers.SGD(learning_rate=lr)
elif opt == 'adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
else:
raise 'unknown optimizer'
trainable_variables = tf.compat.v1.trainable_variables()
if DEBUG:
print([x.name for x in trainable_variables])
tf_grad = optimizer.get_gradients(tf_loss, params=[tf_input] + trainable_variables)
train_op = optimizer.apply_gradients(list(zip(tf_grad[1:], trainable_variables)))
var_to_run = [tf_logit, tf_loss, tf_grad, train_op]
feed_dict = {tf_input: x, tf_label: label}
else:
var_to_run = [tf_logit, tf_loss]
feed_dict = {tf_input: x, tf_label: label}
sess.run(tf.compat.v1.global_variables_initializer())
if DEBUG:
old_w = sess.run(tf.compat.v1.trainable_variables())
tf_outs = sess.run(var_to_run, feed_dict = feed_dict)
if DEBUG:
new_w = sess.run(tf.compat.v1.trainable_variables())
if (train):
# Replace the train_op value with updated weights
tf_outs = tf_outs[:-1]
tf_outs.append(sess.run(tf.compat.v1.trainable_variables()))
# tf outs contain :
# 1. forward output numpy array
# 2. final loss value
# 3. gradient for weights in list form
# 4. updated weights in list form
if DEBUG:
print(tf_outs[0].shape)
print(tf_outs[1].shape)
print(tf_outs[2][0].shape)
print(tf_outs[2][1].shape)
print(tf_outs[3][0].shape)
print(tf_outs[3][1].shape)
if DEBUG:
if opt == 'sgd':
assert(np.isclose(new_w[1].all(), (old_w[1] - (tf_outs[2][1] * 0.1)).all()))
print(old_w[1])
print(new_w[1])
print(tf_outs[2][1])
return tf_outs
##
# tested with tf 1.14.0
# @param[in] x input
# @param[in] trainable
# @return input_variables, bn output, output_variables, grad_result (0. dx / 1. gamma / 2. beta / 3. mean / 4. variance)
# for updated_gamma, updated_beta, x <- x - grad is used for easier calculation
def bn_tf(x, *, trainable=True, init_beta=gen_tensor, init_gamma=gen_tensor, axis=[1, 2, 3]):
tf.compat.v1.reset_default_graph()
tf_input = tf.compat.v1.placeholder(
dtype=dtypes.float32, shape=x.shape, name='input')
bnlayer = tf.keras.layers.BatchNormalization(
axis=axis,
trainable=trainable,
momentum=1.0,
gamma_initializer=gen_tensor,
beta_initializer=gen_tensor)(tf_input)
bn_variables = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope='batch_normalization')
input_variables = [tf_input] + bn_variables
grad = tf.gradients(bnlayer, input_variables)
f_dict = {tf_input: x, tf.keras.backend.learning_phase(): trainable}
with tf.compat.v1.Session() as sess:
with tf.compat.v1.variable_scope('bn'):
sess.run(tf.compat.v1.global_variables_initializer())
old_var = sess.run(input_variables, feed_dict=f_dict)
bn_result = sess.run(bnlayer, feed_dict=f_dict)
grad_result = sess.run(grad, feed_dict=f_dict)
updated_gamma = sess.run(input_variables[1] - grad_result[1])
updated_beta = sess.run(input_variables[2] - grad_result[2])
output_variables = [bn_result, updated_gamma, updated_beta]
if DEBUG:
print("======================================")
print("Input:\n %s\n Output:\n %s" % (x[0], bn_result[0]))
print("dx: %s" % grad_result[0][0][0])
print("gradient of gamma: %s" % grad_result[1][0][0], grad_result[1].shape)
print("gradient of beta: %s" % grad_result[2][0][0], grad_result[2].shape)
print("======================================")
return old_var, output_variables, grad_result
def gen_test_case_conv(i_b, i_c, i_h, i_w, k_c, k_h, k_w, padding, stride, bias, base_name, num_loop):
x=gen_input(base_name+"_conv2DLayer.in", [i_b, i_c, i_h, i_w])
kernel=gen_input(base_name+"_conv2DKernel.in", [k_c, i_c, k_h, k_w])
with open(base_name+"_conv2DKernel.in", 'ab') as outfile:
np.array(bias, dtype=np.float32).tofile(outfile)
golden_conv, golden_grad_input, golden_grad_kernel, golden_grad_bias, \
golden_conv2, golden_grad_input2, golden_grad_kernel2, golden_grad_bias2 = \
conv2d_tf(x, kernel, i_b, i_h, i_w, i_c, k_h, k_w, k_c, stride, padding, bias, num_loop)
save(base_name+"_goldenConv2DResult.out", np.transpose(golden_conv,(0,3,1,2)))
save(base_name+"_goldenInputGrad.out", np.transpose(golden_grad_input,(0,3,1,2)))
save(base_name+"_goldenKernelGrad.out", np.transpose(golden_grad_kernel,(3,2,0,1)))
save(base_name+"_goldenBiasGrad.out", golden_grad_bias)
save(base_name+"_goldenConv2DResult2.out", np.transpose(golden_conv2,(0,3,1,2)))
save(base_name+"_goldenInputGrad2.out", np.transpose(golden_grad_input2,(0,3,1,2)))
save(base_name+"_goldenKernelGrad2.out", np.transpose(golden_grad_kernel2,(3,2,0,1)))
save(base_name+"_goldenBiasGrad2.out", golden_grad_bias2)
def gen_test_case_conv_2layers(base_name):
x=gen_input(base_name+"_conv2DLayer.in", [1, 3, 28, 28])
kernel=gen_input(base_name+"_conv2DKernel.in", [6, 3, 5, 5])
bias = np.ones(6)
with open(base_name+"_conv2DKernel.in", 'ab') as outfile:
np.array(bias, dtype=np.float32).tofile(outfile)
kernel2=gen_input(base_name+"_conv2DKernel2.in", [12, 6, 1, 1])
bias2 = np.ones(12)
with open(base_name+"_conv2DKernel2.in", 'ab') as outfile:
np.array(bias2, dtype=np.float32).tofile(outfile)
golden_conv, golden_conv2, golden_grads = conv2d_tf_2(x, kernel, bias, kernel2, bias2)
save(base_name+"_goldenConv2DResult.out", np.transpose(golden_conv,(0,3,1,2)))
save(base_name+"_goldenConv2DResult2.out", np.transpose(golden_conv2,(0,3,1,2)))
save(base_name+"_goldenInputGrad.out", np.transpose(golden_grads[0],(0,3,1,2)))
save(base_name+"_goldenKernelGrad.out", np.transpose(golden_grads[1],(3,2,0,1)))
save(base_name+"_goldenBiasGrad.out", golden_grads[2])
save(base_name+"_goldenKernel2Grad.out", np.transpose(golden_grads[3],(3,2,0,1)))
save(base_name+"_goldenBias2Grad.out", golden_grads[4])
def gen_test_case_pooling(input_shape, pool_size, stride, padding, pooling, base_name, gen_in):
if gen_in:
input_data = gen_input(base_name + ".in", input_shape)
else:
with open(base_name+".in", 'rb') as f:
input_data = np.fromfile(f, dtype=np.float32)
input_data=np.reshape(input_data, input_shape)
golden_pooling, golden_grad_input = pooling2d_tf(input_data, pool_size, stride, padding, pooling)
if (pooling == "global_average" or pooling == "global_max"):
save(base_name+"_goldenPooling2D"+pooling+".out", golden_pooling)
else:
save(base_name+"_goldenPooling2D"+pooling+".out", np.transpose(golden_pooling,(0,3,1,2)))
save(base_name+"_goldenPooling2D"+pooling+"Grad.out", np.transpose(golden_grad_input,(0,3,1,2)))
##
# @brief generate fc test case data for forward and backward pass with loss
def gen_test_case_fc(input_shape, kernel_shape, base_name):
input_data = gen_input(base_name + "_FCLayer.in", input_shape)
label = gen_input(base_name + "_FCLabel.in", input_shape[:-1] + [kernel_shape[-1]])
kernel = gen_input(base_name + "_FCKernel.in", kernel_shape)
bias = gen_input(base_name + "_FCKernel.in", kernel_shape[-1:], savefile=False)
with open(base_name+"_FCKernel.in", 'ab') as outfile:
np.array(bias, dtype=np.float32).tofile(outfile)
golden_fc_simplified = fc_tf_simplified_backward(input_data, kernel, label, bias, activation=None, opt='adam')
save(base_name + "_goldenFCAdam.out", golden_fc_simplified[0])
save(base_name + "_goldenFCGradientAdam.out", golden_fc_simplified[1][0])
save(base_name + "_goldenFCUpdatedWeightAdam.out", golden_fc_simplified[2][0])
save(base_name + "_goldenFCUpdatedBiasAdam.out", golden_fc_simplified[2][1])
golden_fc = fc_tf(input_data, kernel, label, bias, activation=None, train=True, loss='mse', opt='sgd')
save(base_name + "_goldenFCResultActNone.out", golden_fc[0])
save(base_name + "_goldenFCLossActNoneMse.out", golden_fc[1])
save(base_name + "_goldenFCGradientDxActNoneMse.out", golden_fc[2][0])
save(base_name + "_goldenFCGradientsActNoneMse.out", golden_fc[2][1], golden_fc[2][2])
save(base_name + "_goldenFCUpdatedWeightsActNoneMse.out", golden_fc[3][0], golden_fc[3][1])
golden_fc_simplified = fc_tf_simplified_backward(input_data, kernel, label, bias, activation=None, opt='sgd' )
assert(golden_fc_simplified[0].all() == golden_fc[0].all())
save(base_name + "_goldenFCGradientDxActNone.out", golden_fc_simplified[1][0])
save(base_name + "_goldenFCGradientsActNone.out", golden_fc_simplified[1][1], golden_fc_simplified[1][2])
save(base_name + "_goldenFCUpdatedWeightsActNone.out", golden_fc_simplified[2][0], golden_fc_simplified[2][1])
golden_fc = fc_tf(input_data, kernel, label, bias, activation=tf.nn.sigmoid, train=True, loss='mse', opt='sgd')
save(base_name + "_goldenFCResultSigmoidMse.out", golden_fc[0])
save(base_name + "_goldenFCLossSigmoidMse.out", golden_fc[1])
save(base_name + "_goldenFCGradientDxSigmoidMse.out", golden_fc[2][0])
save(base_name + "_goldenFCGradientsSigmoidMse.out", golden_fc[2][1], golden_fc[2][2])
save(base_name + "_goldenFCUpdatedWeightsSigmoidMse.out", golden_fc[3][0], golden_fc[3][1])
golden_fc_simplified = fc_tf_simplified_backward(input_data, kernel, label, bias, activation=tf.nn.sigmoid, opt='sgd')
assert(golden_fc_simplified[0].all() == golden_fc[0].all())
save(base_name + "_goldenFCGradientDxSigmoid.out", golden_fc_simplified[1][0])
save(base_name + "_goldenFCGradientsSigmoid.out", golden_fc_simplified[1][1], golden_fc_simplified[1][2])
save(base_name + "_goldenFCUpdatedWeightsSigmoid.out", golden_fc_simplified[2][0], golden_fc_simplified[2][1])
golden_fc = fc_tf(input_data, kernel, label, bias, activation=tf.nn.softmax, train=True, loss='mse', opt='sgd')
save(base_name + "_goldenFCResultSoftmaxMse.out", golden_fc[0])
save(base_name + "_goldenFCLossSoftmaxMse.out", golden_fc[1])
save(base_name + "_goldenFCGradientDxSoftmaxMse.out", golden_fc[2][0])
save(base_name + "_goldenFCGradientsSoftmaxMse.out", golden_fc[2][1], golden_fc[2][2])
save(base_name + "_goldenFCUpdatedWeightsSoftmaxMse.out", golden_fc[3][0], golden_fc[3][1])
golden_fc_simplified = fc_tf_simplified_backward(input_data, kernel, label, bias, activation=tf.nn.softmax, opt='sgd')
assert(golden_fc_simplified[0].all() == golden_fc[0].all())
save(base_name + "_goldenFCGradientDxSoftmax.out", golden_fc_simplified[1][0])
save(base_name + "_goldenFCGradientsSoftmax.out", golden_fc_simplified[1][1], golden_fc_simplified[1][2])
save(base_name + "_goldenFCUpdatedWeightsSoftmax.out", golden_fc_simplified[2][0], golden_fc_simplified[2][1])
golden_fc = fc_tf(input_data, kernel, label, bias, activation=tf.nn.sigmoid, train=True, loss='cross', opt='sgd')
save(base_name + "_goldenFCResultSigmoidCross.out", golden_fc[0])
save(base_name + "_goldenFCLossSigmoidCross.out", golden_fc[1])
save(base_name + "_goldenFCGradientDxSigmoidCross.out", golden_fc[2][0])
save(base_name + "_goldenFCGradientsSigmoidCross.out", golden_fc[2][1], golden_fc[2][2])
save(base_name + "_goldenFCUpdatedWeightsSigmoidCross.out", golden_fc[3][0], golden_fc[3][1])
golden_fc = fc_tf(input_data, kernel, label, bias, activation=tf.nn.softmax, train=True, loss='cross', opt='sgd')
save(base_name + "_goldenFCResultSoftmaxCross.out", golden_fc[0])
save(base_name + "_goldenFCLossSoftmaxCross.out", golden_fc[1])
save(base_name + "_goldenFCGradientDxSoftmaxCross.out", golden_fc[2][0])
save(base_name + "_goldenFCGradientsSoftmaxCross.out", golden_fc[2][1], golden_fc[2][2])
save(base_name + "_goldenFCUpdatedWeightsSoftmaxCross.out", golden_fc[3][0], golden_fc[3][1])
golden_fc = fc_tf(input_data, kernel, label, bias, activation=tf.nn.softmax, train=True, loss='cross', opt='adam')
save(base_name + "_goldenFCResultSoftmaxCrossAdam.out", golden_fc[0])
save(base_name + "_goldenFCLossSoftmaxCrossAdam.out", golden_fc[1])
save(base_name + "_goldenFCGradientDxSoftmaxCrossAdam.out", golden_fc[2][0])
save(base_name + "_goldenFCGradientsSoftmaxCrossAdam.out", golden_fc[2][1], golden_fc[2][2])
save(base_name + "_goldenFCUpdatedWeightsSoftmaxCrossAdam.out", golden_fc[3][0], golden_fc[3][1])
def gen_test_case_bn(input_shape, base_name, training=True):
input_data = gen_input(base_name + "_BNLayerInput.in", input_shape)
input_variables, output_variables, grad = bn_tf(input_data)
# mu / var / gamma / beta
save(base_name + "_BNLayerWeights.in", input_variables[3], input_variables[4], input_variables[1], input_variables[2])
save(base_name + "_goldenBNResultForward.out", output_variables[0])
# todo: change 0 to initial moving avg / std in case of training
save(base_name + "_goldenBNLayerAfterUpdate.out", 0, 0, output_variables[1], output_variables[2])
save(base_name + "_goldenBNLayerBackwardDx.out", grad[0])
if __name__ == "__main__":
target = sys.argv[1]
# Input File Generation with given info
if target == "gen_tensor":
if len(sys.argv) != 7 :
print('wrong argument : 1 filename, batch, channel, height, width')
exit()
gen_input(sys.argv[2], [int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]), int(sys.argv[6])])
# Convolution Test Case : Generate input & kernel & golden data
# first unit test case : 1, 3, 7, 7, 2, 3, 3, VALID, 1 test_1_
# : Input Dimension (1, 3, 7, 7)
# : Kernel (2, 3, 3, 3)
# : output (1,2,5,5)
# : stride 1, 1
# : padding 0, 0 (VALID)
if target == "conv2d_1":
bias1 = [1.0, 1.0]
gen_test_case_conv(1, 3, 7, 7, 2, 3, 3, "VALID", 1, bias1, "tc_conv2d_1", 4)
# second unit test case : 2, 3, 7, 7, 3, 3, 3, VALID, 1 test_2_
# : Input Dimension (2, 3, 7, 7)
# : Kernel (3, 3, 3, 3)
# : output (1,3,5,5)
# : stride 1, 1
# : padding 0, 0 (VALID)
if target == "conv2d_2":
bias2 = [1.0, 1.0, 1.0]
gen_test_case_conv(2, 3, 7, 7, 3, 3, 3, "VALID", 1, bias2, "tc_conv2d_2", 4)
if target == "conv2d_2layers":
gen_test_case_conv_2layers("tc_conv2d_int")
# FC layer unit test case:
if target == "fc_1":
gen_test_case_fc(input_shape = [3, 1, 1, 12],
kernel_shape = [12, 15],
base_name = "tc_fc_1")
# Bn layer unit test case:
if target == "bn_1":
gen_test_case_bn(input_shape = [3, 1, 4, 5], base_name = "tc_bn_1")
if target == "pooling2d_1":
gen_test_case_pooling(input_shape = [1,2,5,5], pool_size=[2,2], stride=[1,1], padding=[0,0], pooling="max", base_name="tc_pooling2d_1", gen_in=True)
gen_test_case_pooling(input_shape = [1,2,5,5], pool_size=[2,2], stride=[1,1], padding=[0,0], pooling="average", base_name="tc_pooling2d_1", gen_in=False)
gen_test_case_pooling(input_shape = [1,2,5,5], pool_size=[2,2], stride=[1,1], padding=[0,0], pooling="global_max", base_name="tc_pooling2d_1", gen_in=False)
gen_test_case_pooling(input_shape = [1,2,5,5], pool_size=[2,2], stride=[1,1], padding=[0,0], pooling="global_average", base_name="tc_pooling2d_1", gen_in=False)
if target == "pooling2d_2":
gen_test_case_pooling(input_shape = [2,2,5,5], pool_size=[2,2], stride=[1,1], padding=[0,0], pooling="max", base_name="tc_pooling2d_2", gen_in=True)
gen_test_case_pooling(input_shape = [2,2,5,5], pool_size=[2,2], stride=[1,1], padding=[0,0], pooling="average", base_name="tc_pooling2d_2", gen_in=False)
gen_test_case_pooling(input_shape = [2,2,5,5], pool_size=[2,2], stride=[1,1], padding=[0,0], pooling="global_max", base_name="tc_pooling2d_2", gen_in=False)
gen_test_case_pooling(input_shape = [2,2,5,5], pool_size=[2,2], stride=[1,1], padding=[0,0], pooling="global_average", base_name="tc_pooling2d_2", gen_in=False)
| 46.69697 | 168 | 0.670416 |
bae2247b11d702441967ff83b531a915fef772f8 | 27,758 | py | Python | sdk/digitaltwins/azure-digitaltwins-core/azure/digitaltwins/core/_digitaltwins_client.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | null | null | null | sdk/digitaltwins/azure-digitaltwins-core/azure/digitaltwins/core/_digitaltwins_client.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | null | null | null | sdk/digitaltwins/azure-digitaltwins-core/azure/digitaltwins/core/_digitaltwins_client.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import uuid
from datetime import datetime
from typing import Dict, List, Any, TYPE_CHECKING
from azure.core.paging import ItemPaged
from azure.core.tracing.decorator import distributed_trace
from azure.core import MatchConditions
from ._utils import (
prep_if_match,
prep_if_none_match
)
from ._generated.models import (
QuerySpecification,
DigitalTwinsAddOptions,
DigitalTwinsDeleteOptions,
DigitalTwinsUpdateOptions,
DigitalTwinsUpdateComponentOptions,
DigitalTwinsDeleteRelationshipOptions,
DigitalTwinsUpdateRelationshipOptions,
DigitalTwinsAddRelationshipOptions
)
from ._generated import AzureDigitalTwinsAPI
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
from ._generated.models import DigitalTwinsModelData
class DigitalTwinsClient(object): # pylint: disable=too-many-public-methods
"""Creates an instance of the Digital Twins client.
:param str endpoint: The URL endpoint of an Azure search service
:param ~azure.core.credentials.TokenCredential credential:
A credential to authenticate requests to the service
"""
def __init__(self, endpoint, credential, **kwargs):
# type: (str, TokenCredential, **Any) -> None
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
self._client = AzureDigitalTwinsAPI(
credential=credential,
base_url=endpoint,
**kwargs
)
@distributed_trace
def get_digital_twin(self, digital_twin_id, **kwargs):
# type: (str, **Any) -> Dict[str, object]
"""Get a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:return: Dictionary containing the twin.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
If the digital twin doesn't exist.
"""
return self._client.digital_twins.get_by_id(
digital_twin_id,
**kwargs
)
@distributed_trace
def upsert_digital_twin(self, digital_twin_id, digital_twin, **kwargs):
# type: (str, Dict[str, object], **Any) -> Dict[str, object]
"""Create or update a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param Dict[str, object] digital_twin:
Dictionary containing the twin to create or update.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: Dictionary containing the created or updated twin.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError
:raises ~azure.core.exceptions.ResourceExistsError: If the digital twin already exists.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_none_match, error_map = prep_if_none_match(etag, match_condition)
if if_none_match:
options = DigitalTwinsAddOptions(if_none_match=if_none_match)
return self._client.digital_twins.add(
digital_twin_id,
digital_twin,
digital_twins_add_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace
def update_digital_twin(self, digital_twin_id, json_patch, **kwargs):
# type: (str, List[Dict[str, object]], **Any) -> None
"""Update a digital twin using a json patch.
:param str digital_twin_id: The ID of the digital twin.
:param List[Dict[str, object]] json_patch: An update specification described by JSON Patch.
Updates to property values and $model elements may happen in the same request.
Operations are limited to `add`, `replace` and `remove`.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
If there is no digital twin with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsUpdateOptions(if_match=if_match)
return self._client.digital_twins.update(
digital_twin_id,
json_patch,
digital_twins_update_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace
def delete_digital_twin(self, digital_twin_id, **kwargs):
# type: (str, **Any) -> None
"""Delete a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
If there is no digital twin with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsDeleteOptions(if_match=if_match)
return self._client.digital_twins.delete(
digital_twin_id,
digital_twins_delete_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace
def get_component(self, digital_twin_id, component_name, **kwargs):
# type: (str, str, **Any) -> Dict[str, object]
"""Get a component on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str component_name: The component being retrieved.
:return: Dictionary containing the component.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin with the provided ID or the component path is invalid.
"""
return self._client.digital_twins.get_component(
digital_twin_id,
component_name,
**kwargs
)
@distributed_trace
def update_component(
self,
digital_twin_id,
component_name,
json_patch,
**kwargs
):
# type: (str, str, List[Dict[str, object]], **Any) -> None
"""Update properties of a component on a digital twin using a JSON patch.
:param str digital_twin_id: The ID of the digital twin.
:param str component_name: The component being updated.
:param List[Dict[str, object]] json_patch: An update specification described by JSON Patch.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin with the provided ID or the component path is invalid.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsUpdateComponentOptions(if_match=if_match)
return self._client.digital_twins.update_component(
digital_twin_id,
component_name,
patch_document=json_patch,
digital_twins_update_component_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace
def get_relationship(self, digital_twin_id, relationship_id, **kwargs):
# type: (str, str, **Any) -> Dict[str, object]
"""Get a relationship on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to retrieve.
:return: Dictionary containing the relationship.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin or relationship with the provided ID.
"""
return self._client.digital_twins.get_relationship_by_id(
digital_twin_id,
relationship_id,
**kwargs
)
@distributed_trace
def upsert_relationship(self, digital_twin_id, relationship_id, relationship, **kwargs):
# type: (str, str, Dict[str, object], **Any) -> Dict[str, object]
"""Create or update a relationship on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to retrieve.
:param Dict[str, object] relationship: Dictionary containing the relationship.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: The created or updated relationship.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin, target digital twin or relationship with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_none_match, error_map = prep_if_none_match(etag, match_condition)
if if_none_match:
options = DigitalTwinsAddRelationshipOptions(if_none_match=if_none_match)
return self._client.digital_twins.add_relationship(
id=digital_twin_id,
relationship_id=relationship_id,
relationship=relationship,
digital_twins_add_relationship_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace
def update_relationship(
self,
digital_twin_id,
relationship_id,
json_patch,
**kwargs
):
# type: (str, str, List[Dict[str, object]], **Any) -> None
"""Updates the properties of a relationship on a digital twin using a JSON patch.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to retrieve.
:param List[Dict[str, object]] json_patch: JSON Patch description of the update
to the relationship properties.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin or relationship with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsUpdateRelationshipOptions(if_match=if_match)
return self._client.digital_twins.update_relationship(
id=digital_twin_id,
relationship_id=relationship_id,
patch_document=json_patch,
digital_twins_update_relationship_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace
def delete_relationship(
self,
digital_twin_id,
relationship_id,
**kwargs
):
# type: (str, str, **Any) -> None
"""Delete a relationship on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to delete.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin or relationship with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsDeleteRelationshipOptions(if_match=if_match)
return self._client.digital_twins.delete_relationship(
digital_twin_id,
relationship_id,
digital_twins_delete_relationship_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace
def list_relationships(self, digital_twin_id, relationship_id=None, **kwargs):
# type: (str, Optional[str], **Any) -> ~azure.core.paging.ItemPaged[Dict[str, object]]
"""Retrieve relationships for a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to
get (if None all the relationship will be retrieved).
:return: An iterator instance of list of relationships
:rtype: ~azure.core.paging.ItemPaged[Dict[str,object]]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID.
"""
return self._client.digital_twins.list_relationships(
digital_twin_id,
relationship_name=relationship_id,
**kwargs
)
@distributed_trace
def list_incoming_relationships(self, digital_twin_id, **kwargs):
# type: (str, **Any) -> ~azure.core.paging.ItemPaged[IncomingRelationship]
"""Retrieve all incoming relationships for a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:return: An iterator like instance of either Relationship.
:rtype: ~azure.core.paging.ItemPaged[~azure.digitaltwins.IncomingRelationship]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID.
"""
return self._client.digital_twins.list_incoming_relationships(
digital_twin_id,
**kwargs
)
@distributed_trace
def publish_telemetry(self, digital_twin_id, payload, **kwargs):
# type: (str, object, **Any) -> None
"""Publish telemetry from a digital twin, which is then consumed by
one or many destination endpoints (subscribers) defined under.
:param str digital_twin_id: The Id of the digital twin
:param object payload: The telemetry payload to be sent
:keyword str message_id: The message ID. If not specified, a UUID will be generated.
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
:raises :class: `~azure.core.exceptions.ServiceRequestError`: If the request is invalid.
:raises :class: `~azure.core.exceptions.ResourceNotFoundError`: If there is no
digital twin with the provided id.
"""
message_id = kwargs.pop('message_id', None) or str(uuid.uuid4())
timestamp = datetime.now()
return self._client.digital_twins.send_telemetry(
digital_twin_id,
message_id=message_id,
telemetry=payload,
telemetry_source_time=timestamp,
**kwargs
)
@distributed_trace
def publish_component_telemetry(
self,
digital_twin_id,
component_name,
payload,
**kwargs
):
# type: (str, str, object, **Any) -> None
"""Publish telemetry from a digital twin's component, which is then consumed by
one or many destination endpoints (subscribers) defined under.
:param str digital_twin_id: The Id of the digital twin.
:param str component_name: The name of the DTDL component.
:param object payload: The telemetry payload to be sent.
:keyword str message_id: The message ID. If not specified, a UUID will be generated.
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID or the component path is invalid.
"""
message_id = kwargs.pop('message_id', None) or str(uuid.uuid4())
timestamp = datetime.now()
return self._client.digital_twins.send_component_telemetry(
digital_twin_id,
component_name,
message_id=message_id,
telemetry=payload,
telemetry_source_time=timestamp,
**kwargs
)
@distributed_trace
def get_model(self, model_id, **kwargs):
# type: (str, **Any) -> DigitalTwinsModelData
"""Get a model, including the model metadata and the model definition.
:param str model_id: The ID of the model.
:keyword bool include_model_definition: Include the model definition
as part of the result. The default value is False.
:return: The model data.
:rtype: ~azure.digitaltwins.DigitalTwinsModelData
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
model with the provided ID.
"""
include_model_definition = kwargs.pop("include_model_definition", False)
return self._client.digital_twin_models.get_by_id(
model_id,
include_model_definition=include_model_definition,
**kwargs
)
@distributed_trace
def list_models(self, dependencies_for=None, **kwargs):
# type: (List[str], **Any) -> ItemPaged[DigitalTwinsModelData]
"""Get the list of models.
:param List[str] dependencies_for: The model IDs to have dependencies retrieved.
If omitted, all models are retrieved.
:keyword bool include_model_definition: Include the model definition
as part of the results. The default value is False.
:keyword int results_per_page: The maximum number of items to retrieve per request.
The server may choose to return less than the requested max.
:return: An iterator instance of list of model data.
:rtype: ~azure.core.paging.ItemPaged[~azure.digitaltwins.DigitalTwinsModelData]
:raises ~azure.core.exceptions.HttpResponseError:
"""
include_model_definition = kwargs.pop('include_model_definition', False)
results_per_page = kwargs.pop('results_per_page', None)
digital_twin_models_list_options = None
if results_per_page is not None:
digital_twin_models_list_options = {'max_item_count': results_per_page}
return self._client.digital_twin_models.list(
dependencies_for=dependencies_for,
include_model_definition=include_model_definition,
digital_twin_models_list_options=digital_twin_models_list_options,
**kwargs
)
@distributed_trace
def create_models(self, dtdl_models, **kwargs):
# type: (List[Dict[str, object]], **Any) -> List[DigitalTwinsModelData]
"""Create one or more models. When any error occurs, no models are uploaded.
:param List[Dict[str,object]] model_list: The set of models to create.
Each dict corresponds to exactly one model.
:return: The list of created models.
:rtype: List[~azure.digitaltwins.DigitalTwinsModelData]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceExistsError: One or more of
the provided models already exist.
"""
return self._client.digital_twin_models.add(
dtdl_models,
**kwargs
)
@distributed_trace
def decommission_model(self, model_id, **kwargs):
# type: (str, **Any) -> None
"""Decommissions a model.
:param str model_id: The ID for the model. The ID is globally unique and case sensitive.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: There is no model
with the provided id.
"""
json_patch = [{'op': 'replace', 'path': '/decommissioned', 'value': True}]
return self._client.digital_twin_models.update(
model_id,
json_patch,
**kwargs
)
@distributed_trace
def delete_model(self, model_id, **kwargs):
# type: (str, **Any) -> None
"""Delete a model.
:param str model_id: The ID of the model to delete.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: There is no model
with the provided id.
:raises ~azure.core.exceptions.ResourceExistsError: There are dependencies
on the model that prevent it from being deleted.
"""
return self._client.digital_twin_models.delete(
model_id,
**kwargs
)
@distributed_trace
def get_event_route(self, event_route_id, **kwargs):
# type: (str, **Any) -> DigitalTwinsEventRoute
"""Get an event route.
:param str event_route_id: The ID of the event route.
:return: The event route object.
:rtype: ~azure.digitaltwins.core.DigitalTwinsEventRoute
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: There is no
event route with the provided ID.
"""
return self._client.event_routes.get_by_id(
event_route_id,
**kwargs
)
@distributed_trace
def list_event_routes(self, **kwargs):
# type: (**Any) -> ItemPaged[DigitalTwinsEventRoute]
"""Retrieves all event routes.
:keyword int results_per_page: The maximum number of items to retrieve per request.
The server may choose to return less than the requested max.
:return: An iterator instance of event routes.
:rtype: ~azure.core.paging.ItemPaged[~azure.digitaltwins.core.DigitalTwinsEventRoute]
:raises ~azure.core.exceptions.HttpResponseError:
"""
event_routes_list_options = None
results_per_page = kwargs.pop('results_per_page', None)
if results_per_page is not None:
event_routes_list_options = {'max_item_count': results_per_page}
return self._client.event_routes.list(
event_routes_list_options=event_routes_list_options,
**kwargs
)
@distributed_trace
def upsert_event_route(self, event_route_id, event_route, **kwargs):
# type: (str, DigitalTwinsEventRoute, **Any) -> None
"""Create or update an event route.
:param str event_route_id: The ID of the event route to create or update.
:param DigitalTwinsEventRoute event_route: The event route data.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ServiceRequestError: The request is invalid.
"""
return self._client.event_routes.add(
event_route_id,
event_route=event_route,
**kwargs
)
@distributed_trace
def delete_event_route(self, event_route_id, **kwargs):
# type: (str, **Any) -> None
"""Delete an event route.
:param str event_route_id: The ID of the event route to delete.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: There is no
event route with the provided ID.
"""
return self._client.event_routes.delete(
event_route_id,
**kwargs
)
@distributed_trace
def query_twins(self, query_expression, **kwargs):
# type: (str, **Any) -> ItemPaged[Dict[str, object]]
"""Query for digital twins.
Note: that there may be a delay between before changes in your instance are reflected in queries.
For more details on query limitations, see
https://docs.microsoft.com/en-us/azure/digital-twins/how-to-query-graph#query-limitations
:param str query_expression: The query expression to execute.
:return: An iterable of query results.
:rtype: ~azure.core.ItemPaged[Dict[str, object]]
:raises ~azure.core.exceptions.HttpResponseError:
"""
def extract_data(deserialized):
list_of_elem = deserialized.value
return deserialized.continuation_token or None, iter(list_of_elem)
def get_next(continuation_token=None):
query_spec = QuerySpecification(
query=query_expression,
continuation_token=continuation_token)
return self._client.query.query_twins(query_spec, **kwargs)
return ItemPaged(
get_next,
extract_data
)
| 42.18541 | 105 | 0.65156 |
2aca06dd3d179046896903be249587b581ff0812 | 96 | py | Python | 7_kyu/vowel_count.py | nik4nd/codewars | efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0 | [
"MIT"
] | null | null | null | 7_kyu/vowel_count.py | nik4nd/codewars | efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0 | [
"MIT"
] | null | null | null | 7_kyu/vowel_count.py | nik4nd/codewars | efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0 | [
"MIT"
] | null | null | null | def get_count(sentence):
return sum([sentence.count(x) for x in ['a', 'e', 'i', 'o', 'u']])
| 32 | 70 | 0.5625 |
85c58ed3986d0b17e1da2a259829392cebe7bbfc | 538 | py | Python | devpotato_bot/commands/daily_titles/models/base.py | cl0ne/cryptopotato-bot | af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1 | [
"MIT"
] | 1 | 2021-05-15T23:41:29.000Z | 2021-05-15T23:41:29.000Z | devpotato_bot/commands/daily_titles/models/base.py | cl0ne/cryptopotato-bot | af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1 | [
"MIT"
] | 1 | 2022-02-19T20:38:33.000Z | 2022-02-19T23:53:39.000Z | devpotato_bot/commands/daily_titles/models/base.py | cl0ne/cryptopotato-bot | af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1 | [
"MIT"
] | 1 | 2021-05-15T23:42:21.000Z | 2021-05-15T23:42:21.000Z | from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import as_declarative
naming_convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_N_name)s",
"ck": "ck_%(table_name)s_%(column_0_N_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
@as_declarative(metadata=MetaData(naming_convention=naming_convention))
class Base:
"""Base model class that provides common prefix for table names."""
TABLENAME_PREFIX = 'daily_titles_'
| 31.647059 | 71 | 0.743494 |
864c1767ed0c9c36a0ec421f2b8f3955acfbc8b7 | 274 | py | Python | raissyon/raissyon/doctype/results_of_evaluation/results_of_evaluation.py | mhbu50/raissyon | 73d5d7498e3e7f74b07e4c0a1c979ad10f9c37ce | [
"MIT"
] | null | null | null | raissyon/raissyon/doctype/results_of_evaluation/results_of_evaluation.py | mhbu50/raissyon | 73d5d7498e3e7f74b07e4c0a1c979ad10f9c37ce | [
"MIT"
] | null | null | null | raissyon/raissyon/doctype/results_of_evaluation/results_of_evaluation.py | mhbu50/raissyon | 73d5d7498e3e7f74b07e4c0a1c979ad10f9c37ce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Accurate Systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ResultsOfEvaluation(Document):
pass
| 24.909091 | 55 | 0.784672 |
a88898e14aaabe74c987e92fa9c4b2b2f7e05dd0 | 638 | py | Python | tests/ui/test_login.py | andreip/addons-server | 9189e9be0c0d9f3df9c9dfbc26e9b16ae934d19e | [
"BSD-3-Clause"
] | null | null | null | tests/ui/test_login.py | andreip/addons-server | 9189e9be0c0d9f3df9c9dfbc26e9b16ae934d19e | [
"BSD-3-Clause"
] | null | null | null | tests/ui/test_login.py | andreip/addons-server | 9189e9be0c0d9f3df9c9dfbc26e9b16ae934d19e | [
"BSD-3-Clause"
] | null | null | null | import pytest
from pages.desktop.home import Home
@pytest.mark.withoutresponses
def test_login(base_url, selenium, fxa_account):
"""User can login"""
page = Home(selenium, base_url).open()
assert not page.logged_in
page.login(fxa_account.email, fxa_account.password)
assert page.logged_in
@pytest.mark.skip(
reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1453779')
@pytest.mark.withoutresponses
def test_logout(base_url, selenium, user):
"""User can logout"""
page = Home(selenium, base_url).open()
page.login(user['email'], user['password'])
page.logout()
assert not page.logged_in
| 26.583333 | 66 | 0.719436 |
843cb421fec5f4247274780620fdfd535bcb243f | 42,096 | py | Python | frappe/model/document.py | kwiesmueller/frappe | 6a748661c2140b15fd43437477f2ea6eef6b5de0 | [
"MIT"
] | null | null | null | frappe/model/document.py | kwiesmueller/frappe | 6a748661c2140b15fd43437477f2ea6eef6b5de0 | [
"MIT"
] | null | null | null | frappe/model/document.py | kwiesmueller/frappe | 6a748661c2140b15fd43437477f2ea6eef6b5de0 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import time
from frappe import _, msgprint
from frappe.utils import flt, cstr, now, get_datetime_str, file_lock, date_diff
from frappe.utils.background_jobs import enqueue
from frappe.model.base_document import BaseDocument, get_controller
from frappe.model.naming import set_new_name
from six import iteritems, string_types
from werkzeug.exceptions import NotFound, Forbidden
import hashlib, json
from frappe.model import optional_fields, table_fields
from frappe.model.workflow import validate_workflow
from frappe.model.workflow import set_workflow_state_on_action
from frappe.utils.global_search import update_global_search
from frappe.integrations.doctype.webhook import run_webhooks
from frappe.desk.form.document_follow import follow_document
from frappe.core.doctype.server_script.server_script_utils import run_server_script_for_doc_event
# once_only validation
# methods
def get_doc(*args, **kwargs):
"""returns a frappe.model.Document object.
:param arg1: Document dict or DocType name.
:param arg2: [optional] document name.
There are multiple ways to call `get_doc`
# will fetch the latest user object (with child table) from the database
user = get_doc("User", "test@example.com")
# create a new object
user = get_doc({
"doctype":"User"
"email_id": "test@example.com",
"roles: [
{"role": "System Manager"}
]
})
# create new object with keyword arguments
user = get_doc(doctype='User', email_id='test@example.com')
"""
if args:
if isinstance(args[0], BaseDocument):
# already a document
return args[0]
elif isinstance(args[0], string_types):
doctype = args[0]
elif isinstance(args[0], dict):
# passed a dict
kwargs = args[0]
else:
raise ValueError('First non keyword argument must be a string or dict')
if kwargs:
if 'doctype' in kwargs:
doctype = kwargs['doctype']
else:
raise ValueError('"doctype" is a required key')
controller = get_controller(doctype)
if controller:
return controller(*args, **kwargs)
raise ImportError(doctype)
class Document(BaseDocument):
"""All controllers inherit from `Document`."""
def __init__(self, *args, **kwargs):
"""Constructor.
:param arg1: DocType name as string or document **dict**
:param arg2: Document name, if `arg1` is DocType name.
If DocType name and document name are passed, the object will load
all values (including child documents) from the database.
"""
self.doctype = self.name = None
self._default_new_docs = {}
self.flags = frappe._dict()
if args and args[0] and isinstance(args[0], string_types):
# first arugment is doctype
if len(args)==1:
# single
self.doctype = self.name = args[0]
else:
self.doctype = args[0]
if isinstance(args[1], dict):
# filter
self.name = frappe.db.get_value(args[0], args[1], "name")
if self.name is None:
frappe.throw(_("{0} {1} not found").format(_(args[0]), args[1]),
frappe.DoesNotExistError)
else:
self.name = args[1]
self.load_from_db()
return
if args and args[0] and isinstance(args[0], dict):
# first argument is a dict
kwargs = args[0]
if kwargs:
# init base document
super(Document, self).__init__(kwargs)
self.init_valid_columns()
else:
# incorrect arguments. let's not proceed.
raise ValueError('Illegal arguments')
@staticmethod
def whitelist(f):
"""Decorator: Whitelist method to be called remotely via REST API."""
f.whitelisted = True
return f
def reload(self):
"""Reload document from database"""
self.load_from_db()
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(_("{0} {1} not found").format(_(self.doctype), self.name), frappe.DoesNotExistError)
super(Document, self).__init__(d)
if self.name=="DocType" and self.doctype=="DocType":
from frappe.model.meta import DOCTYPE_TABLE_FIELDS
table_fields = DOCTYPE_TABLE_FIELDS
else:
table_fields = self.meta.get_table_fields()
for df in table_fields:
children = frappe.db.get_values(df.options,
{"parent": self.name, "parenttype": self.doctype, "parentfield": df.fieldname},
"*", as_dict=True, order_by="idx asc")
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
def get_latest(self):
if not getattr(self, "latest", None):
self.latest = frappe.get_doc(self.doctype, self.name)
return self.latest
def check_permission(self, permtype='read', permlevel=None):
"""Raise `frappe.PermissionError` if not permitted"""
if not self.has_permission(permtype):
self.raise_no_permission_to(permlevel or permtype)
def has_permission(self, permtype="read", verbose=False):
"""Call `frappe.has_permission` if `self.flags.ignore_permissions`
is not set.
:param permtype: one of `read`, `write`, `submit`, `cancel`, `delete`"""
if self.flags.ignore_permissions:
return True
return frappe.has_permission(self.doctype, permtype, self, verbose=verbose)
def raise_no_permission_to(self, perm_type):
"""Raise `frappe.PermissionError`."""
frappe.flags.error_message = _('Insufficient Permission for {0}').format(self.doctype)
raise frappe.PermissionError
def insert(self, ignore_permissions=None, ignore_links=None, ignore_if_duplicate=False,
ignore_mandatory=None, set_name=None, set_child_names=True):
"""Insert the document in the database (as a new document).
This will check for user permissions and execute `before_insert`,
`validate`, `on_update`, `after_insert` methods if they are written.
:param ignore_permissions: Do not check permissions if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
if ignore_links!=None:
self.flags.ignore_links = ignore_links
if ignore_mandatory!=None:
self.flags.ignore_mandatory = ignore_mandatory
self.set("__islocal", True)
self.check_permission("create")
self._set_defaults()
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.run_method("before_insert")
self._validate_links()
self.set_new_name(set_name=set_name, set_child_names=set_child_names)
self.set_parent_in_children()
self.validate_higher_perm_levels()
self.flags.in_insert = True
self.run_before_save_methods()
self._validate()
self.set_docstatus()
self.flags.in_insert = False
# follow document on document creation
# run validate, on update etc.
# parent
if getattr(self.meta, "issingle", 0):
self.update_single(self.get_valid_dict())
else:
try:
self.db_insert()
except frappe.DuplicateEntryError as e:
if not ignore_if_duplicate:
raise e
# children
for d in self.get_all_children():
d.db_insert()
self.run_method("after_insert")
self.flags.in_insert = True
if self.get("amended_from"):
self.copy_attachments_from_amended_from()
# flag to prevent creation of event update log for create and update both
# during document creation
self.flags.update_log_for_doc_creation = True
self.run_post_save_methods()
self.flags.in_insert = False
# delete __islocal
if hasattr(self, "__islocal"):
delattr(self, "__islocal")
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
if not (frappe.flags.in_migrate or frappe.local.flags.in_install or frappe.flags.in_setup_wizard):
follow_document(self.doctype, self.name, frappe.session.user)
return self
def save(self, *args, **kwargs):
"""Wrapper for _save"""
return self._save(*args, **kwargs)
def _save(self, ignore_permissions=None, ignore_version=None):
"""Save the current document in the database in the **DocType**'s table or
`tabSingles` (for single types).
This will check for user permissions and execute
`validate` before updating, `on_update` after updating triggers.
:param ignore_permissions: Do not check permissions if True.
:param ignore_version: Do not save version if True."""
if self.flags.in_print:
return
self.flags.notifications_executed = []
if ignore_permissions!=None:
self.flags.ignore_permissions = ignore_permissions
self.flags.ignore_version = frappe.flags.in_test if ignore_version is None else ignore_version
if self.get("__islocal") or not self.get("name"):
self.insert()
return
self.check_permission("write", "save")
self.set_user_and_timestamp()
self.set_docstatus()
self.check_if_latest()
self.set_parent_in_children()
self.set_name_in_children()
self.validate_higher_perm_levels()
self._validate_links()
self.run_before_save_methods()
if self._action != "cancel":
self._validate()
if self._action == "update_after_submit":
self.validate_update_after_submit()
self.set_docstatus()
# parent
if self.meta.issingle:
self.update_single(self.get_valid_dict())
else:
self.db_update()
self.update_children()
self.run_post_save_methods()
# clear unsaved flag
if hasattr(self, "__unsaved"):
delattr(self, "__unsaved")
return self
def copy_attachments_from_amended_from(self):
"""Copy attachments from `amended_from`"""
from frappe.desk.form.load import get_attachments
#loop through attachments
for attach_item in get_attachments(self.doctype, self.amended_from):
#save attachments to new doc
_file = frappe.get_doc({
"doctype": "File",
"file_url": attach_item.file_url,
"file_name": attach_item.file_name,
"attached_to_name": self.name,
"attached_to_doctype": self.doctype,
"folder": "Home/Attachments"})
_file.save()
def update_children(self):
"""update child tables"""
for df in self.meta.get_table_fields():
self.update_child_table(df.fieldname, df)
def update_child_table(self, fieldname, df=None):
"""sync child table for given fieldname"""
rows = []
if not df:
df = self.meta.get_field(fieldname)
for d in self.get(df.fieldname):
d.db_update()
rows.append(d.name)
if df.options in (self.flags.ignore_children_type or []):
# do not delete rows for this because of flags
# hack for docperm :(
return
if rows:
# select rows that do not match the ones in the document
deleted_rows = frappe.db.sql("""select name from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s
and name not in ({1})""".format(df.options, ','.join(['%s'] * len(rows))),
[self.name, self.doctype, fieldname] + rows)
if len(deleted_rows) > 0:
# delete rows that do not match the ones in the document
frappe.db.sql("""delete from `tab{0}` where name in ({1})""".format(df.options,
','.join(['%s'] * len(deleted_rows))), tuple(row[0] for row in deleted_rows))
else:
# no rows found, delete all rows
frappe.db.sql("""delete from `tab{0}` where parent=%s
and parenttype=%s and parentfield=%s""".format(df.options),
(self.name, self.doctype, fieldname))
def get_doc_before_save(self):
return getattr(self, '_doc_before_save', None)
def set_new_name(self, force=False, set_name=None, set_child_names=True):
"""Calls `frappe.naming.set_new_name` for parent and child docs."""
if self.flags.name_set and not force:
return
if set_name:
self.name = set_name
else:
set_new_name(self)
if set_child_names:
# set name for children
for d in self.get_all_children():
set_new_name(d)
self.flags.name_set = True
def get_title(self):
"""Get the document title based on title_field or `title` or `name`"""
return self.get(self.meta.get_title_field())
def set_title_field(self):
"""Set title field based on template"""
def get_values():
values = self.as_dict()
# format values
for key, value in iteritems(values):
if value==None:
values[key] = ""
return values
if self.meta.get("title_field")=="title":
df = self.meta.get_field(self.meta.title_field)
if df.options:
self.set(df.fieldname, df.options.format(**get_values()))
elif self.is_new() and not self.get(df.fieldname) and df.default:
# set default title for new transactions (if default)
self.set(df.fieldname, df.default.format(**get_values()))
def update_single(self, d):
"""Updates values for Single type Document in `tabSingles`."""
frappe.db.sql("""delete from `tabSingles` where doctype=%s""", self.doctype)
for field, value in iteritems(d):
if field != "doctype":
frappe.db.sql("""insert into `tabSingles` (doctype, field, value)
values (%s, %s, %s)""", (self.doctype, field, value))
if self.doctype in frappe.db.value_cache:
del frappe.db.value_cache[self.doctype]
def set_user_and_timestamp(self):
self._original_modified = self.modified
self.modified = now()
self.modified_by = frappe.session.user
if not self.creation:
self.creation = self.modified
if not self.owner:
self.owner = self.modified_by
for d in self.get_all_children():
d.modified = self.modified
d.modified_by = self.modified_by
if not d.owner:
d.owner = self.owner
if not d.creation:
d.creation = self.creation
frappe.flags.currently_saving.append((self.doctype, self.name))
def set_docstatus(self):
if self.docstatus==None:
self.docstatus=0
for d in self.get_all_children():
d.docstatus = self.docstatus
def _validate(self):
self._validate_mandatory()
self._validate_data_fields()
self._validate_selects()
self._validate_length()
self._extract_images_from_text_editor()
self._sanitize_content()
self._save_passwords()
self.validate_workflow()
children = self.get_all_children()
for d in children:
d._validate_data_fields()
d._validate_selects()
d._validate_length()
d._extract_images_from_text_editor()
d._sanitize_content()
d._save_passwords()
if self.is_new():
# don't set fields like _assign, _comments for new doc
for fieldname in optional_fields:
self.set(fieldname, None)
else:
self.validate_set_only_once()
def validate_workflow(self):
"""Validate if the workflow transition is valid"""
if frappe.flags.in_install == 'frappe': return
workflow = self.meta.get_workflow()
if workflow:
validate_workflow(self)
if not self._action == 'save':
set_workflow_state_on_action(self, workflow, self._action)
def validate_set_only_once(self):
"""Validate that fields are not changed if not in insert"""
set_only_once_fields = self.meta.get_set_only_once_fields()
if set_only_once_fields and self._doc_before_save:
# document exists before saving
for field in set_only_once_fields:
fail = False
value = self.get(field.fieldname)
original_value = self._doc_before_save.get(field.fieldname)
if field.fieldtype in table_fields:
fail = not self.is_child_table_same(field.fieldname)
elif field.fieldtype in ('Date', 'Datetime', 'Time'):
fail = str(value) != str(original_value)
else:
fail = value != original_value
if fail:
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(field.fieldname)),
frappe.CannotChangeConstantError)
return False
def is_child_table_same(self, fieldname):
"""Validate child table is same as original table before saving"""
value = self.get(fieldname)
original_value = self._doc_before_save.get(fieldname)
same = True
if len(original_value) != len(value):
same = False
else:
# check all child entries
for i, d in enumerate(original_value):
new_child = value[i].as_dict(convert_dates_to_str = True)
original_child = d.as_dict(convert_dates_to_str = True)
# all fields must be same other than modified and modified_by
for key in ('modified', 'modified_by', 'creation'):
del new_child[key]
del original_child[key]
if original_child != new_child:
same = False
break
return same
def apply_fieldlevel_read_permissions(self):
"""Remove values the user is not allowed to read (called when loading in desk)"""
has_higher_permlevel = False
for p in self.get_permissions():
if p.permlevel > 0:
has_higher_permlevel = True
break
if not has_higher_permlevel:
return
has_access_to = self.get_permlevel_access('read')
for df in self.meta.fields:
if df.permlevel and not df.permlevel in has_access_to:
self.set(df.fieldname, None)
for table_field in self.meta.get_table_fields():
for df in frappe.get_meta(table_field.options).fields or []:
if df.permlevel and not df.permlevel in has_access_to:
for child in self.get(table_field.fieldname) or []:
child.set(df.fieldname, None)
def validate_higher_perm_levels(self):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
if self.flags.ignore_permissions or frappe.flags.in_install:
return
has_access_to = self.get_permlevel_access()
high_permlevel_fields = self.meta.get_high_permlevel_fields()
if high_permlevel_fields:
self.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
# If new record then don't reset the values for child table
if self.is_new(): return
# check for child tables
for df in self.meta.get_table_fields():
high_permlevel_fields = frappe.get_meta(df.options).get_high_permlevel_fields()
if high_permlevel_fields:
for d in self.get(df.fieldname):
d.reset_values_if_no_permlevel_access(has_access_to, high_permlevel_fields)
def get_permlevel_access(self, permission_type='write'):
if not hasattr(self, "_has_access_to"):
self._has_access_to = {}
if not self._has_access_to.get(permission_type):
self._has_access_to[permission_type] = []
roles = frappe.get_roles()
for perm in self.get_permissions():
if perm.role in roles and perm.permlevel > 0 and perm.get(permission_type):
if perm.permlevel not in self._has_access_to[permission_type]:
self._has_access_to[permission_type].append(perm.permlevel)
return self._has_access_to[permission_type]
def has_permlevel_access_to(self, fieldname, df=None, permission_type='read'):
if not df:
df = self.meta.get_field(fieldname)
return df.permlevel in self.get_permlevel_access(permission_type)
def get_permissions(self):
if self.meta.istable:
# use parent permissions
permissions = frappe.get_meta(self.parenttype).permissions
else:
permissions = self.meta.permissions
return permissions
def _set_defaults(self):
if frappe.flags.in_import:
return
new_doc = frappe.new_doc(self.doctype, as_dict=True)
self.update_if_missing(new_doc)
# children
for df in self.meta.get_table_fields():
new_doc = frappe.new_doc(df.options, as_dict=True)
value = self.get(df.fieldname)
if isinstance(value, list):
for d in value:
d.update_if_missing(new_doc)
def check_if_latest(self):
"""Checks if `modified` timestamp provided by document being updated is same as the
`modified` timestamp in the database. If there is a different, the document has been
updated in the database after the current copy was read. Will throw an error if
timestamps don't match.
Will also validate document transitions (Save > Submit > Cancel) calling
`self.check_docstatus_transition`."""
conflict = False
self._action = "save"
if not self.get('__islocal'):
if self.meta.issingle:
modified = frappe.db.sql("""select value from tabSingles
where doctype=%s and field='modified' for update""", self.doctype)
modified = modified and modified[0][0]
if modified and modified != cstr(self._original_modified):
conflict = True
else:
tmp = frappe.db.sql("""select modified, docstatus from `tab{0}`
where name = %s for update""".format(self.doctype), self.name, as_dict=True)
if not tmp:
frappe.throw(_("Record does not exist"))
else:
tmp = tmp[0]
modified = cstr(tmp.modified)
if modified and modified != cstr(self._original_modified):
conflict = True
self.check_docstatus_transition(tmp.docstatus)
if conflict:
frappe.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.modified)) \
+ _("Please refresh to get the latest document."),
raise_exception=frappe.TimestampMismatchError)
else:
self.check_docstatus_transition(0)
def check_docstatus_transition(self, docstatus):
"""Ensures valid `docstatus` transition.
Valid transitions are (number in brackets is `docstatus`):
- Save (0) > Save (0)
- Save (0) > Submit (1)
- Submit (1) > Submit (1)
- Submit (1) > Cancel (2)
"""
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
self.check_permission("submit")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 0 to 2"))
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
self.check_permission("submit")
elif self.docstatus==2:
self._action = "cancel"
self.check_permission("cancel")
else:
raise frappe.DocstatusTransitionError(_("Cannot change docstatus from 1 to 0"))
elif docstatus==2:
raise frappe.ValidationError(_("Cannot edit cancelled document"))
def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype
def set_name_in_children(self):
# Set name for any new children
for d in self.get_all_children():
if not d.name:
set_new_name(d)
def validate_update_after_submit(self):
if self.flags.ignore_validate_update_after_submit:
return
self._validate_update_after_submit()
for d in self.get_all_children():
if d.is_new() and self.meta.get_field(d.parentfield).allow_on_submit:
# in case of a new row, don't validate allow on submit, if table is allow on submit
continue
d._validate_update_after_submit()
# TODO check only allowed values are updated
def _validate_mandatory(self):
if self.flags.ignore_mandatory:
return
missing = self._get_missing_mandatory_fields()
for d in self.get_all_children():
missing.extend(d._get_missing_mandatory_fields())
if not missing:
return
for fieldname, msg in missing:
msgprint(msg)
if frappe.flags.print_messages:
print(self.as_json().encode("utf-8"))
raise frappe.MandatoryError('[{doctype}, {name}]: {fields}'.format(
fields=", ".join((each[0] for each in missing)),
doctype=self.doctype,
name=self.name))
def _validate_links(self):
if self.flags.ignore_links or self._action == "cancel":
return
invalid_links, cancelled_links = self.get_invalid_links()
for d in self.get_all_children():
result = d.get_invalid_links(is_submittable=self.meta.is_submittable)
invalid_links.extend(result[0])
cancelled_links.extend(result[1])
if invalid_links:
msg = ", ".join((each[2] for each in invalid_links))
frappe.throw(_("Could not find {0}").format(msg),
frappe.LinkValidationError)
if cancelled_links:
msg = ", ".join((each[2] for each in cancelled_links))
frappe.throw(_("Cannot link cancelled document: {0}").format(msg),
frappe.CancelledLinkError)
def get_all_children(self, parenttype=None):
"""Returns all children documents from **Table** type field in a list."""
ret = []
for df in self.meta.get("fields", {"fieldtype": ['in', table_fields]}):
if parenttype:
if df.options==parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def run_method(self, method, *args, **kwargs):
"""run standard triggers, plus those in hooks"""
if "flags" in kwargs:
del kwargs["flags"]
if hasattr(self, method) and hasattr(getattr(self, method), "__call__"):
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
else:
# hack! to run hooks even if method does not exist
fn = lambda self, *args, **kwargs: None
fn.__name__ = str(method)
out = Document.hook(fn)(self, *args, **kwargs)
self.run_notifications(method)
run_webhooks(self, method)
run_server_script_for_doc_event(self, method)
return out
def run_trigger(self, method, *args, **kwargs):
return self.run_method(method, *args, **kwargs)
def run_notifications(self, method):
"""Run notifications for this method"""
if frappe.flags.in_import or frappe.flags.in_patch or frappe.flags.in_install:
return
if self.flags.notifications_executed==None:
self.flags.notifications_executed = []
from frappe.email.doctype.notification.notification import evaluate_alert
if self.flags.notifications == None:
alerts = frappe.cache().hget('notifications', self.doctype)
if alerts==None:
alerts = frappe.get_all('Notification', fields=['name', 'event', 'method'],
filters={'enabled': 1, 'document_type': self.doctype})
frappe.cache().hset('notifications', self.doctype, alerts)
self.flags.notifications = alerts
if not self.flags.notifications:
return
def _evaluate_alert(alert):
if not alert.name in self.flags.notifications_executed:
evaluate_alert(self, alert.name, alert.event)
self.flags.notifications_executed.append(alert.name)
event_map = {
"on_update": "Save",
"after_insert": "New",
"on_submit": "Submit",
"on_cancel": "Cancel"
}
if not self.flags.in_insert:
# value change is not applicable in insert
event_map['on_change'] = 'Value Change'
for alert in self.flags.notifications:
event = event_map.get(method, None)
if event and alert.event == event:
_evaluate_alert(alert)
elif alert.event=='Method' and method == alert.method:
_evaluate_alert(alert)
@whitelist.__func__
def _submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self.docstatus = 1
self.save()
@whitelist.__func__
def _cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self.docstatus = 2
self.save()
@whitelist.__func__
def submit(self):
"""Submit the document. Sets `docstatus` = 1, then saves."""
self._submit()
@whitelist.__func__
def cancel(self):
"""Cancel the document. Sets `docstatus` = 2, then saves."""
self._cancel()
def delete(self):
"""Delete document."""
frappe.delete_doc(self.doctype, self.name, flags=self.flags)
def run_before_save_methods(self):
"""Run standard methods before `INSERT` or `UPDATE`. Standard Methods are:
- `validate`, `before_save` for **Save**.
- `validate`, `before_submit` for **Submit**.
- `before_cancel` for **Cancel**
- `before_update_after_submit` for **Update after Submit**
Will also update title_field if set"""
self.load_doc_before_save()
self.reset_seen()
if self.flags.ignore_validate:
return
if self._action=="save":
self.run_method("before_validate")
self.run_method("validate")
self.run_method("before_save")
elif self._action=="submit":
self.run_method("before_validate")
self.run_method("validate")
self.run_method("before_submit")
elif self._action=="cancel":
self.run_method("before_cancel")
elif self._action=="update_after_submit":
self.run_method("before_update_after_submit")
self.set_title_field()
def load_doc_before_save(self):
"""Save load document from db before saving"""
self._doc_before_save = None
if not self.is_new():
try:
self._doc_before_save = frappe.get_doc(self.doctype, self.name)
except frappe.DoesNotExistError:
self._doc_before_save = None
frappe.clear_last_message()
def run_post_save_methods(self):
"""Run standard methods after `INSERT` or `UPDATE`. Standard Methods are:
- `on_update` for **Save**.
- `on_update`, `on_submit` for **Submit**.
- `on_cancel` for **Cancel**
- `update_after_submit` for **Update after Submit**"""
doc_before_save = self.get_doc_before_save()
if self._action=="save":
self.run_method("on_update")
elif self._action=="submit":
self.run_method("on_update")
self.run_method("on_submit")
elif self._action=="cancel":
self.run_method("on_cancel")
self.check_no_back_links_exist()
elif self._action=="update_after_submit":
self.run_method("on_update_after_submit")
self.clear_cache()
self.notify_update()
update_global_search(self)
if getattr(self.meta, 'track_changes', False) and self._doc_before_save and not self.flags.ignore_version:
self.save_version()
self.run_method('on_change')
if (self.doctype, self.name) in frappe.flags.currently_saving:
frappe.flags.currently_saving.remove((self.doctype, self.name))
# make event update log for doctypes having event consumers
if not frappe.flags.in_install and not frappe.flags.in_migrate and check_doctype_has_consumers(self.doctype):
if self.flags.update_log_for_doc_creation:
make_event_update_log(self, update_type='Create')
self.flags.update_log_for_doc_creation = False
else:
from frappe.event_streaming.doctype.event_update_log.event_update_log import get_update
diff = get_update(doc_before_save, self)
if diff:
doc = self
doc.diff = diff
make_event_update_log(doc, update_type='Update')
self.latest = None
def clear_cache(self):
frappe.clear_document_cache(self.doctype, self.name)
def reset_seen(self):
"""Clear _seen property and set current user as seen"""
if getattr(self.meta, 'track_seen', False):
frappe.db.set_value(self.doctype, self.name, "_seen", json.dumps([frappe.session.user]), update_modified=False)
def notify_update(self):
"""Publish realtime that the current document is modified"""
frappe.publish_realtime("doc_update", {"modified": self.modified, "doctype": self.doctype, "name": self.name},
doctype=self.doctype, docname=self.name, after_commit=True)
if not self.meta.get("read_only") and not self.meta.get("issingle") and \
not self.meta.get("istable"):
data = {
"doctype": self.doctype,
"name": self.name,
"user": frappe.session.user
}
frappe.publish_realtime("list_update", data, after_commit=True)
def db_set(self, fieldname, value=None, update_modified=True, notify=False, commit=False):
"""Set a value in the document object, update the timestamp and update the database.
WARNING: This method does not trigger controller validations and should
be used very carefully.
:param fieldname: fieldname of the property to be updated, or a {"field":"value"} dictionary
:param value: value of the property to be updated
:param update_modified: default True. updates the `modified` and `modified_by` properties
:param notify: default False. run doc.notify_updated() to send updates via socketio
:param commit: default False. run frappe.db.commit()
"""
if isinstance(fieldname, dict):
self.update(fieldname)
else:
self.set(fieldname, value)
if update_modified and (self.doctype, self.name) not in frappe.flags.currently_saving:
# don't update modified timestamp if called from post save methods
# like on_update or on_submit
self.set("modified", now())
self.set("modified_by", frappe.session.user)
self.load_doc_before_save()
# to trigger notification on value change
self.run_method('before_change')
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
self.run_method('on_change')
if notify:
self.notify_update()
self.clear_cache()
if commit:
frappe.db.commit()
def db_get(self, fieldname):
"""get database value for this fieldname"""
return frappe.db.get_value(self.doctype, self.name, fieldname)
def check_no_back_links_exist(self):
"""Check if document links to any active document before Cancel."""
from frappe.model.delete_doc import check_if_doc_is_linked, check_if_doc_is_dynamically_linked
if not self.flags.ignore_links:
check_if_doc_is_linked(self, method="Cancel")
check_if_doc_is_dynamically_linked(self, method="Cancel")
def save_version(self):
"""Save version info"""
version = frappe.new_doc('Version')
if version.set_diff(self._doc_before_save, self):
version.insert(ignore_permissions=True)
if not frappe.flags.in_migrate:
follow_document(self.doctype, self.name, frappe.session.user)
@staticmethod
def hook(f):
"""Decorator: Make method `hookable` (i.e. extensible by another app).
Note: If each hooked method returns a value (dict), then all returns are
collated in one dict and returned. Ideally, don't return values in hookable
methods, set properties in the document."""
def add_to_return_value(self, new_return_value):
if isinstance(new_return_value, dict):
if not self.get("_return_value"):
self._return_value = {}
self._return_value.update(new_return_value)
else:
self._return_value = new_return_value or self.get("_return_value")
def compose(fn, *hooks):
def runner(self, method, *args, **kwargs):
add_to_return_value(self, fn(self, *args, **kwargs))
for f in hooks:
add_to_return_value(self, f(self, method, *args, **kwargs))
return self._return_value
return runner
def composer(self, *args, **kwargs):
hooks = []
method = f.__name__
doc_events = frappe.get_doc_hooks()
for handler in doc_events.get(self.doctype, {}).get(method, []) \
+ doc_events.get("*", {}).get(method, []):
hooks.append(frappe.get_attr(handler))
composed = compose(f, *hooks)
return composed(self, method, *args, **kwargs)
return composer
def is_whitelisted(self, method):
fn = getattr(self, method, None)
if not fn:
raise NotFound("Method {0} not found".format(method))
elif not getattr(fn, "whitelisted", False):
raise Forbidden("Method {0} not whitelisted".format(method))
def validate_value(self, fieldname, condition, val2, doc=None, raise_exception=None):
"""Check that value of fieldname should be 'condition' val2
else throw Exception."""
error_condition_map = {
"in": _("one of"),
"not in": _("none of"),
"^": _("beginning with"),
}
if not doc:
doc = self
val1 = doc.get_value(fieldname)
df = doc.meta.get_field(fieldname)
val2 = doc.cast(val2, df)
if not frappe.compare(val1, condition, val2):
label = doc.meta.get_label(fieldname)
condition_str = error_condition_map.get(condition, condition)
if doc.parentfield:
msg = _("Incorrect value in row {0}: {1} must be {2} {3}").format(doc.idx, label, condition_str, val2)
else:
msg = _("Incorrect value: {0} must be {1} {2}").format(label, condition_str, val2)
# raise passed exception or True
msgprint(msg, raise_exception=raise_exception or True)
def validate_table_has_rows(self, parentfield, raise_exception=None):
"""Raise exception if Table field is empty."""
if not (isinstance(self.get(parentfield), list) and len(self.get(parentfield)) > 0):
label = self.meta.get_label(parentfield)
frappe.throw(_("Table {0} cannot be empty").format(label), raise_exception or frappe.EmptyTableError)
def round_floats_in(self, doc, fieldnames=None):
"""Round floats for all `Currency`, `Float`, `Percent` fields for the given doc.
:param doc: Document whose numeric properties are to be rounded.
:param fieldnames: [Optional] List of fields to be rounded."""
if not fieldnames:
fieldnames = (df.fieldname for df in
doc.meta.get("fields", {"fieldtype": ["in", ["Currency", "Float", "Percent"]]}))
for fieldname in fieldnames:
doc.set(fieldname, flt(doc.get(fieldname), self.precision(fieldname, doc.parentfield)))
def get_url(self):
"""Returns Desk URL for this document. `/desk#Form/{doctype}/{name}`"""
return "/desk#Form/{doctype}/{name}".format(doctype=self.doctype, name=self.name)
def add_comment(self, comment_type='Comment', text=None, comment_email=None, link_doctype=None, link_name=None, comment_by=None):
"""Add a comment to this document.
:param comment_type: e.g. `Comment`. See Communication for more info."""
out = frappe.get_doc({
"doctype":"Comment",
'comment_type': comment_type,
"comment_email": comment_email or frappe.session.user,
"comment_by": comment_by,
"reference_doctype": self.doctype,
"reference_name": self.name,
"content": text or comment_type,
"link_doctype": link_doctype,
"link_name": link_name
}).insert(ignore_permissions=True)
return out
def add_seen(self, user=None):
"""add the given/current user to list of users who have seen this document (_seen)"""
if not user:
user = frappe.session.user
if self.meta.track_seen:
_seen = self.get('_seen') or []
_seen = frappe.parse_json(_seen)
if user not in _seen:
_seen.append(user)
frappe.db.set_value(self.doctype, self.name, '_seen', json.dumps(_seen), update_modified=False)
frappe.local.flags.commit = True
def add_viewed(self, user=None):
"""add log to communication when a user views a document"""
if not user:
user = frappe.session.user
if hasattr(self.meta, 'track_views') and self.meta.track_views:
frappe.get_doc({
"doctype": "View Log",
"viewed_by": frappe.session.user,
"reference_doctype": self.doctype,
"reference_name": self.name,
}).insert(ignore_permissions=True)
frappe.local.flags.commit = True
def get_signature(self):
"""Returns signature (hash) for private URL."""
return hashlib.sha224(get_datetime_str(self.creation).encode()).hexdigest()
def get_liked_by(self):
liked_by = getattr(self, "_liked_by", None)
if liked_by:
return json.loads(liked_by)
else:
return []
def set_onload(self, key, value):
if not self.get("__onload"):
self.set("__onload", frappe._dict())
self.get("__onload")[key] = value
def get_onload(self, key=None):
if not key:
return self.get("__onload", frappe._dict())
return self.get('__onload')[key]
def queue_action(self, action, **kwargs):
"""Run an action in background. If the action has an inner function,
like _submit for submit, it will call that instead"""
# call _submit instead of submit, so you can override submit to call
# run_delayed based on some action
# See: Stock Reconciliation
if hasattr(self, '_' + action):
action = '_' + action
if file_lock.lock_exists(self.get_signature()):
frappe.throw(_('This document is currently queued for execution. Please try again'),
title=_('Document Queued'))
self.lock()
enqueue('frappe.model.document.execute_action', doctype=self.doctype, name=self.name,
action=action, **kwargs)
def lock(self, timeout=None):
"""Creates a lock file for the given document. If timeout is set,
it will retry every 1 second for acquiring the lock again
:param timeout: Timeout in seconds, default 0"""
signature = self.get_signature()
if file_lock.lock_exists(signature):
lock_exists = True
if timeout:
for i in range(timeout):
time.sleep(1)
if not file_lock.lock_exists(signature):
lock_exists = False
break
if lock_exists:
raise frappe.DocumentLockedError
file_lock.create_lock(signature)
def unlock(self):
"""Delete the lock file for this document"""
file_lock.delete_lock(self.get_signature())
# validation helpers
def validate_from_to_dates(self, from_date_field, to_date_field):
"""
Generic validation to verify date sequence
"""
if date_diff(self.get(to_date_field), self.get(from_date_field)) < 0:
frappe.throw(_('{0} must be after {1}').format(
frappe.bold(self.meta.get_label(to_date_field)),
frappe.bold(self.meta.get_label(from_date_field)),
), frappe.exceptions.InvalidDates)
def get_assigned_users(self):
assignments = frappe.get_all('ToDo',
fields=['owner'],
filters={
'reference_type': self.doctype,
'reference_name': self.name,
'status': ('!=', 'Cancelled'),
})
users = set([assignment.owner for assignment in assignments])
return users
def execute_action(doctype, name, action, **kwargs):
"""Execute an action on a document (called by background worker)"""
doc = frappe.get_doc(doctype, name)
doc.unlock()
try:
getattr(doc, action)(**kwargs)
except Exception:
frappe.db.rollback()
# add a comment (?)
if frappe.local.message_log:
msg = json.loads(frappe.local.message_log[-1]).get('message')
else:
msg = '<pre><code>' + frappe.get_traceback() + '</pre></code>'
doc.add_comment('Comment', _('Action Failed') + '<br><br>' + msg)
doc.notify_update()
def make_event_update_log(doc, update_type):
"""Save update info for doctypes that have event consumers"""
if update_type != 'Delete':
# diff for update type, doc for create type
data = frappe.as_json(doc) if not doc.get('diff') else frappe.as_json(doc.diff)
else:
data = None
log_doc = frappe.get_doc({
'doctype': 'Event Update Log',
'update_type': update_type,
'ref_doctype': doc.doctype,
'docname': doc.name,
'data': data
})
log_doc.insert(ignore_permissions=True)
frappe.db.commit()
def check_doctype_has_consumers(doctype):
"""Check if doctype has event consumers for event streaming"""
if not frappe.db.exists('DocType', 'Event Consumer'):
return False
event_consumers = frappe.get_all('Event Consumer Document Type', {
'ref_doctype': doctype,
'status': 'Approved'
}, limit=1)
if len(event_consumers) and event_consumers[0]:
return True
return False
| 31.368107 | 130 | 0.718477 |
ed9f079a5b65950eaaab642a82fa9d435eff8db3 | 529 | py | Python | blogApp/utils/hash62.py | amitra/BikeMaps | eb80eed2e3159ad9c4e46427a9f488e1221794fa | [
"MIT"
] | null | null | null | blogApp/utils/hash62.py | amitra/BikeMaps | eb80eed2e3159ad9c4e46427a9f488e1221794fa | [
"MIT"
] | null | null | null | blogApp/utils/hash62.py | amitra/BikeMaps | eb80eed2e3159ad9c4e46427a9f488e1221794fa | [
"MIT"
] | null | null | null | ALPHA62 = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def hash(i10):
""" Convert a base-10 integer into a base-62 integer and return the string """
digits = []
while i10 > 0:
digits.append(i10%62)
i10 /= 62
return "".join(map(lambda x: ALPHA62[x], digits))
def dehash(s62):
""" Convert a base-62 integer (as type:string) into a base-10 integer and return the integer """
res = 0
for i, c in enumerate(s62):
res += ALPHA62.index(c) * 62**i
return res
| 31.117647 | 100 | 0.642722 |
deaf300f23e0e02f5e44d6f622339a10d72c8428 | 1,343 | py | Python | contact/models.py | Dimstella/blockchain-contact-tracing-app-hospitals | e0b2bf2b3b8c06e58032faed99900d1c7b7d300d | [
"MIT"
] | null | null | null | contact/models.py | Dimstella/blockchain-contact-tracing-app-hospitals | e0b2bf2b3b8c06e58032faed99900d1c7b7d300d | [
"MIT"
] | null | null | null | contact/models.py | Dimstella/blockchain-contact-tracing-app-hospitals | e0b2bf2b3b8c06e58032faed99900d1c7b7d300d | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
class Patient(models.Model):
"""
Patient information in hospital database
"""
CHOICES = [(True, 'Infected'), (False, 'Cured'), ('Suspected', 'Suspected')]
uid = models.AutoField(primary_key=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, default=1)
name = models.CharField(max_length=100, blank=False, default='')
surname = models.CharField(max_length=70, blank=False, default='')
address = models.CharField(max_length=256, blank=False, default='')
email = models.CharField(max_length=256, blank=False, default='')
city = models.CharField(max_length=70, blank=False, default='')
region = models.CharField(max_length=70, blank=False, default='')
postal = models.CharField(max_length=70, blank=False, default='')
country = models.CharField(max_length=70, blank=False, default='')
phone = models.CharField(max_length=70, blank=False, default='')
status = models.CharField(max_length=20, choices=CHOICES, null=True, default='')
notes = models.TextField(null=True, blank=True)
created_at = models.CharField(max_length=70, blank=False, default='')
hashing = models.CharField(max_length=100, blank=False, default='')
| 53.72 | 85 | 0.703649 |
21acec5bdaab40e05be82f855ea0b3797695ed5b | 34,091 | py | Python | animal/quadruped_spine.py | Sookhaal/auri_rigging_scripts | d6705b90a75ac9ff28da6c5674d7bdf864349073 | [
"MIT"
] | 5 | 2018-11-16T09:21:14.000Z | 2022-02-05T08:06:00.000Z | animal/quadruped_spine.py | Sookhaal/auri_rigging_scripts | d6705b90a75ac9ff28da6c5674d7bdf864349073 | [
"MIT"
] | 44 | 2017-09-04T10:48:18.000Z | 2018-01-08T10:31:05.000Z | animal/quadruped_spine.py | Sookhaal/auri_maya_rigging_scripts | d6705b90a75ac9ff28da6c5674d7bdf864349073 | [
"MIT"
] | 2 | 2018-01-29T14:04:32.000Z | 2019-08-14T08:15:45.000Z | """
:created: 2017-12
:author: Alex BROSSARD <abrossard@artfx.fr>
"""
from PySide2 import QtWidgets, QtCore, QtGui
from pymel import core as pmc
from auri.auri_lib import AuriScriptView, AuriScriptController, AuriScriptModel, is_checked, grpbox
from auri.scripts.Maya_Scripts import rig_lib
from auri.scripts.Maya_Scripts.rig_lib import RigController
reload(rig_lib)
class View(AuriScriptView):
def __init__(self, *args, **kwargs):
self.modules_cbbox = QtWidgets.QComboBox()
self.outputs_cbbox = QtWidgets.QComboBox()
self.refresh_btn = QtWidgets.QPushButton("Refresh")
self.prebuild_btn = QtWidgets.QPushButton("Prebuild")
self.how_many_jnts = QtWidgets.QSpinBox()
self.how_many_ctrls = QtWidgets.QSpinBox()
self.ik_creation_switch = QtWidgets.QCheckBox()
self.stretch_creation_switch = QtWidgets.QCheckBox()
self.refresh_spaces_btn = QtWidgets.QPushButton("Refresh")
self.add_space_btn = QtWidgets.QPushButton("Add")
self.remove_space_btn = QtWidgets.QPushButton("Remove")
self.space_modules_cbbox = QtWidgets.QComboBox()
self.spaces_cbbox = QtWidgets.QComboBox()
self.selected_space_module = "No_space_module"
self.selected_space = "no_space"
self.space_list_view = QtWidgets.QListView()
self.space_list = QtGui.QStringListModel()
super(View, self).__init__(*args, **kwargs)
def set_controller(self):
self.ctrl = Controller(self.model, self)
def set_model(self):
self.model = Model()
def refresh_view(self):
self.ik_creation_switch.setChecked(self.model.ik_creation_switch)
self.stretch_creation_switch.setChecked(self.model.stretch_creation_switch)
self.how_many_ctrls.setValue(self.model.how_many_ctrls)
self.how_many_jnts.setValue(self.model.how_many_jnts)
self.ctrl.look_for_parent()
self.space_list.setStringList(self.model.space_list)
self.ctrl.look_for_parent(l_cbbox_stringlist=self.ctrl.modules_with_spaces,
l_cbbox_selection=self.selected_space_module,
l_cbbox=self.space_modules_cbbox, r_cbbox_stringlist=self.ctrl.spaces_model,
r_cbbox_selection=self.selected_space, r_cbbox=self.spaces_cbbox)
def setup_ui(self):
self.modules_cbbox.setModel(self.ctrl.modules_with_output)
self.modules_cbbox.currentTextChanged.connect(self.ctrl.on_modules_cbbox_changed)
self.outputs_cbbox.setModel(self.ctrl.outputs_model)
self.outputs_cbbox.currentTextChanged.connect(self.ctrl.on_outputs_cbbox_changed)
self.how_many_jnts.setMinimum(1)
self.how_many_jnts.valueChanged.connect(self.ctrl.on_how_many_jnts_changed)
self.how_many_ctrls.setMinimum(2)
self.how_many_ctrls.valueChanged.connect(self.ctrl.on_how_many_ctrls_changed)
self.ik_creation_switch.stateChanged.connect(self.ctrl.on_ik_creation_switch_changed)
self.stretch_creation_switch.stateChanged.connect(self.ctrl.on_stretch_creation_switch_changed)
self.refresh_btn.clicked.connect(self.ctrl.look_for_parent)
self.prebuild_btn.clicked.connect(self.ctrl.prebuild)
self.space_modules_cbbox.setModel(self.ctrl.modules_with_spaces)
self.space_modules_cbbox.currentTextChanged.connect(self.ctrl.on_space_modules_cbbox_changed)
self.spaces_cbbox.setModel(self.ctrl.spaces_model)
self.spaces_cbbox.currentTextChanged.connect(self.ctrl.on_spaces_cbbox_changed)
self.space_list_view.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.space_list.setStringList(self.model.space_list)
self.space_list_view.setModel(self.space_list)
self.add_space_btn.clicked.connect(self.ctrl.add_space_to_list)
self.remove_space_btn.clicked.connect(self.ctrl.remove_space_from_list)
self.refresh_spaces_btn.clicked.connect(self.ctrl.look_for_spaces)
main_layout = QtWidgets.QVBoxLayout()
select_parent_layout = QtWidgets.QVBoxLayout()
select_parent_grp = grpbox("Select parent", select_parent_layout)
cbbox_layout = QtWidgets.QHBoxLayout()
cbbox_layout.addWidget(self.modules_cbbox)
cbbox_layout.addWidget(self.outputs_cbbox)
select_parent_layout.addLayout(cbbox_layout)
select_parent_layout.addWidget(self.refresh_btn)
select_spaces_layout = QtWidgets.QVBoxLayout()
select_spaces_grp = grpbox("Select local spaces :", select_spaces_layout)
spaces_cbbox_layout = QtWidgets.QHBoxLayout()
spaces_cbbox_layout.addWidget(self.space_modules_cbbox)
spaces_cbbox_layout.addWidget(self.spaces_cbbox)
btn_layout = QtWidgets.QVBoxLayout()
btn_layout.addWidget(self.refresh_spaces_btn)
btn_layout.addWidget(self.add_space_btn)
select_spaces_layout.addLayout(spaces_cbbox_layout)
select_spaces_layout.addLayout(btn_layout)
space_list_layout = QtWidgets.QVBoxLayout()
space_list_grp = grpbox("local spaces :", space_list_layout)
space_list_layout.addWidget(self.space_list_view)
space_list_layout.addWidget(self.remove_space_btn)
options_layout = QtWidgets.QVBoxLayout()
options_grp = grpbox("Options", options_layout)
how_many_layout = QtWidgets.QVBoxLayout()
jnts_layout = QtWidgets.QVBoxLayout()
jnts_text = QtWidgets.QLabel("How many jnts :")
jnts_layout.addWidget(jnts_text)
jnts_layout.addWidget(self.how_many_jnts)
ctrls_layout = QtWidgets.QVBoxLayout()
ctrls_text = QtWidgets.QLabel("How many ctrls :")
ctrls_layout.addWidget(ctrls_text)
ctrls_layout.addWidget(self.how_many_ctrls)
how_many_layout.addLayout(jnts_layout)
how_many_layout.addLayout(ctrls_layout)
checkbox_layout = QtWidgets.QVBoxLayout()
ik_layout = QtWidgets.QHBoxLayout()
ik_text = QtWidgets.QLabel("IK ctrls :")
ik_layout.addWidget(ik_text)
ik_layout.addWidget(self.ik_creation_switch)
stretch_layout = QtWidgets.QHBoxLayout()
stretch_text = QtWidgets.QLabel("stretch/squash :")
stretch_layout.addWidget(stretch_text)
stretch_layout.addWidget(self.stretch_creation_switch)
checkbox_layout.addLayout(ik_layout)
checkbox_layout.addLayout(stretch_layout)
options_layout.addLayout(how_many_layout)
options_layout.addLayout(checkbox_layout)
main_layout.addWidget(select_parent_grp)
main_layout.addWidget(options_grp)
main_layout.addWidget(select_spaces_grp)
main_layout.addWidget(space_list_grp)
main_layout.addWidget(self.prebuild_btn)
self.setLayout(main_layout)
class Controller(RigController):
def __init__(self, model, view):
"""
Args:
model (Model):
view (View):
"""
self.guides_grp = None
self.guides = []
self.guide_names = []
self.created_spine_jnts = []
self.created_pelvis_jnt = None
self.ik_spline = None
self.created_locs = []
# self.tangent_locs = []
self.created_fk_ctrls = []
self.created_inv_fk_ctrls = []
self.created_pelvis_ctrl = None
self.created_ik_ctrls = []
self.jnts_to_skin = []
self.bend_ctrls = []
RigController.__init__(self, model, view)
def prebuild(self):
temp_outputs = ["pelvis_OUTPUT", "start_OUTPUT", "end_OUTPUT"]
for i in xrange(self.model.how_many_jnts):
temp_output = "jnt_{0}_OUTPUT".format(i)
temp_outputs.append(temp_output)
self.create_temporary_outputs(temp_outputs)
self.guide_names = ["{0}_pelvis_GUIDE".format(self.model.module_name),
"{0}_spine_GUIDE".format(self.model.module_name)]
d = 3
nb_points = self.model.how_many_ctrls - 2
if self.model.how_many_ctrls < 4:
d = 3 + self.model.how_many_ctrls - 4
nb_points = 2
if self.guide_check(self.guide_names):
self.guides = pmc.ls(self.guide_names)
if d != 2 and (self.guides[1].getShape().getAttr("spans") != nb_points - 1 or
self.guides[1].getShape().getAttr("degree") != d):
self.guides[1] = pmc.rebuildCurve(self.guide_names[1], rpo=0, rt=0, end=1, kr=0, kep=1, kt=0,
s=(nb_points - 1), d=d, ch=0, replaceOriginal=1)[0]
elif self.guides[1].getShape().getAttr("spans") != nb_points - 1 or self.guides[1].getShape().getAttr("degree") != d:
self.guides[1] = pmc.rebuildCurve(self.guide_names[1], rpo=0, rt=0, end=1, kr=0, kep=1, kt=0,
s=3, d=d, ch=0, replaceOriginal=1)[0]
pmc.delete(self.guides[1].cv[-2])
pmc.delete(self.guides[1].cv[1])
self.guides_grp = pmc.ls("{0}_guides".format(self.model.module_name))[0]
self.guides_grp.setAttr("visibility", 1)
self.view.refresh_view()
pmc.select(d=1)
return
pelvis_guides = pmc.spaceLocator(p=(0, 0, 0), n=self.guide_names[0])
spine_guide = rig_lib.create_curve_guide(d=d, number_of_points=nb_points, name=self.guide_names[1],
hauteur_curve=5, front_axe="z")
self.guides = [pelvis_guides, spine_guide]
self.guides_grp = self.group_guides(self.guides)
self.guides[0].setAttr("translate", (0, 7, -1))
self.guides[1].setAttr("translate", (0, 8, 0))
self.view.refresh_view()
pmc.select(d=1)
def execute(self):
self.created_locs = []
self.created_fk_ctrls = []
self.created_inv_fk_ctrls = []
self.bend_ctrls = []
self.prebuild()
self.delete_existing_objects()
self.connect_to_parent()
self.create_jnts()
self.create_ikspline()
self.create_fk()
self.activate_twist()
if self.model.stretch_creation_switch == 1 and self.model.how_many_ctrls > 2:
self.connect_z_ik_spline_stretch(self.ik_spline, self.created_spine_jnts, measure_type="accurate")
elif self.model.stretch_creation_switch == 1:
self.connect_z_ik_spline_stretch(self.ik_spline, self.created_spine_jnts, measure_type="average")
if self.model.ik_creation_switch == 1:
self.create_ik()
self.create_outputs()
self.create_local_spaces()
self.clean_rig()
pmc.select(cl=1)
def create_jnts(self):
guide_rebuilded = pmc.rebuildCurve(self.guides[1], rpo=0, rt=0, end=1, kr=0, kep=1, kt=0,
s=self.model.how_many_jnts, d=1, ch=0, replaceOriginal=0)[0]
if self.model.how_many_jnts == 2:
pmc.delete(guide_rebuilded.cv[-2])
pmc.delete(guide_rebuilded.cv[1])
guide_rebuilded.rename("{0}_temp_rebuilded_GUIDE".format(self.model.module_name))
vertex_list = guide_rebuilded.cv[:]
self.created_spine_jnts = rig_lib.create_jnts_from_cv_list_and_return_jnts_list(vertex_list,
self.model.module_name,
forward_axis="z")
pmc.parent(self.created_spine_jnts[0], self.jnt_input_grp, r=0)
rig_lib.change_jnt_chain_suffix(self.created_spine_jnts, new_suffix="SKN")
pmc.delete(guide_rebuilded)
pmc.select(cl=1)
self.created_pelvis_jnt = pmc.joint(p=(pmc.xform(self.guides[0], q=1, ws=1, translation=1)),
n="{0}_pelvis_SKN".format(self.model.module_name))
self.created_pelvis_jnt.setAttr("rotateOrder", 2)
pmc.parent(self.created_pelvis_jnt, self.jnt_input_grp)
self.jnts_to_skin = self.created_spine_jnts[:]
self.jnts_to_skin.append(self.created_pelvis_jnt)
def create_ikspline(self):
# if self.model.ik_creation_switch:
# self.ik_spline = pmc.rebuildCurve(self.guides[1], rpo=0, rt=0, end=1, kr=0, kep=1, kt=0,
# s=self.model.how_many_ctrls - 1, d=3, ch=0, replaceOriginal=0,
# n="{0}_ik_CRV".format(self.model.module_name))[0]
# else:
# self.ik_spline = pmc.duplicate(self.guides[1], n="{0}_ik_CRV".format(self.model.module_name))[0]
#
self.ik_spline = pmc.duplicate(self.guides[1], n="{0}_ik_CRV".format(self.model.module_name))[0]
ik_handle = pmc.ikHandle(n=("{0}_ik_HDL".format(self.model.module_name)), startJoint=self.created_spine_jnts[0],
endEffector=self.created_spine_jnts[-1], solver="ikSplineSolver", curve=self.ik_spline,
createCurve=False, parentCurve=False)[0]
pmc.parent(self.ik_spline, self.parts_grp, r=1)
pmc.parent(ik_handle, self.parts_grp, r=1)
ik_effector = pmc.listRelatives(self.created_spine_jnts[-2], children=1)[1]
ik_effector.rename("{0}_ik_EFF".format(self.model.module_name))
# pmc.parent(ik_effector, self.created_spine_jnts[-1])
def create_fk(self):
ik_spline_cv_list = []
ik_spline_controlpoints_list = []
for i, cv in enumerate(self.guides[1].cv):
ik_spline_cv_list.append(cv)
for i, cp in enumerate(self.ik_spline.controlPoints):
ik_spline_controlpoints_list.append(cp)
self.created_locs = []
for i, cv in enumerate(ik_spline_cv_list):
self.created_locs.append(self.create_locators(i, cv, ik_spline_controlpoints_list))
self.create_ctrls(i, self.created_locs[i])
pmc.parent(self.created_locs[0], self.created_inv_fk_ctrls[0])
pmc.parent(self.created_locs[-1], self.created_fk_ctrls[-1])
center_ctrl = (self.model.how_many_ctrls / 2.0) - 1.5
for i, loc in enumerate(self.created_locs[1:-1]):
pmc.select(cl=1)
ctrl_ofs = pmc.joint(p=(0, 0, 0), n="{0}".format(str(loc).replace("_pos", "_bend_ctrl_OFS")))
ctrl_ofs.setAttr("drawStyle", 2)
ctrl_shape = pmc.circle(c=(0, 0, 0), nr=(0, 0, 1), sw=360, r=1.5, d=3, s=8,
n="{0}".format(str(loc).replace("_pos", "_bend_CTRL_Shape")), ch=0)[0]
ctrl = rig_lib.create_jnttype_ctrl(name=str(loc).replace("_pos", "_bend_CTRL"), shape=ctrl_shape)
pmc.parent(ctrl, ctrl_ofs)
nearest_point_on_curve = pmc.createNode("nearestPointOnCurve", n="temp_NPOC")
self.guides[1].worldSpace >> nearest_point_on_curve.inputCurve
loc.getShape().worldPosition >> nearest_point_on_curve.inPosition
ctrl_ofs.setAttr("translate", nearest_point_on_curve.getAttr("position"))
pmc.delete(nearest_point_on_curve)
pmc.parent(ctrl_ofs, self.ctrl_input_grp)
pmc.parent(loc, ctrl)
if i == center_ctrl:
const = pmc.parentConstraint(self.created_inv_fk_ctrls[0], self.created_fk_ctrls[-1], ctrl_ofs,
maintainOffset=1, skipRotate=["x", "y", "z"])
const.setAttr("{0}W0".format(self.created_inv_fk_ctrls[0]), 1)
const.setAttr("{0}W1".format(self.created_fk_ctrls[-1]), 1)
elif i < center_ctrl:
const = pmc.parentConstraint(self.created_inv_fk_ctrls[0], self.created_fk_ctrls[-1], ctrl_ofs,
maintainOffset=1, skipRotate=["x", "y", "z"])
const.setAttr("{0}W0".format(self.created_inv_fk_ctrls[0]), 1)
const.setAttr("{0}W1".format(self.created_fk_ctrls[-1]), ((1 / (self.model.how_many_ctrls / 2.0)) *
((i+1) / 2.0)))
elif i > center_ctrl:
const = pmc.parentConstraint(self.created_inv_fk_ctrls[0], self.created_fk_ctrls[-1], ctrl_ofs,
maintainOffset=1, skipRotate=["x", "y", "z"])
const.setAttr("{0}W0".format(self.created_inv_fk_ctrls[0]), ((1 / (self.model.how_many_ctrls / 2.0)) *
(((len(self.created_locs) - 1) - (i+1)) / 2.0)))
const.setAttr("{0}W1".format(self.created_fk_ctrls[-1]), 1)
self.bend_ctrls.append(ctrl)
self.ik_spline.setAttr("translate", (0, 0, 0))
self.ik_spline.setAttr("rotate", (0, 0, 0))
self.ik_spline.setAttr("scale", (1, 1, 1))
# pmc.parentConstraint(self.created_fk_ctrls[-1], self.created_spine_jnts[-1], maintainOffset=1, skipTranslate=("x", "y", "z"))
pmc.select(d=1)
pelvis_ctrl_shape = pmc.circle(c=(0, 0, 0), nr=(0, 1, 0), sw=360, r=2.5, d=3, s=8,
n="{0}_pelvis_CTRL_shape".format(self.model.module_name), ch=0)[0]
self.created_pelvis_ctrl = rig_lib.create_jnttype_ctrl(name="{0}_pelvis_CTRL".format(self.model.module_name),
shape=pelvis_ctrl_shape, drawstyle=2, rotateorder=2)
self.created_pelvis_ctrl.setAttr("translate", pmc.xform(self.created_pelvis_jnt, q=1, ws=1, translation=1))
pmc.parent(self.created_pelvis_ctrl, self.ctrl_input_grp)
# pmc.pointConstraint(self.created_pelvis_ctrl, self.created_pelvis_jnt, maintainOffset=1)
# self.created_pelvis_ctrl.rotate >> self.created_pelvis_jnt.rotate
pmc.parentConstraint(self.created_pelvis_ctrl, self.created_pelvis_jnt, maintainOffset=1)
self.created_pelvis_ctrl.scale >> self.created_pelvis_jnt.scale
def create_locators(self, i, cv, ik_spline_controlpoints):
cv_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_{1}_pos".format(self.model.module_name, (i + 1)))
cv_loc.setAttr("translate", pmc.xform(cv, q=1, ws=1, translation=1))
cv_loc_shape = cv_loc.getShape()
cv_loc_shape.worldPosition >> ik_spline_controlpoints[i]
return cv_loc
def create_ctrls(self, i, cv_loc):
pmc.select(cl=1)
ctrl_shape = pmc.circle(c=(0, 0, 0), nr=(0, 0, 1), sw=360, r=3, d=3, s=8,
n="{0}_{1}_fk_CTRL_shape".format(self.model.module_name, (i + 1)), ch=0)[0]
ctrl = rig_lib.create_jnttype_ctrl(name="{0}_{1}_fk_CTRL".format(self.model.module_name, (i + 1)),
shape=ctrl_shape, drawstyle=2, rotateorder=2)
inv_ctrl_shape = pmc.circle(c=(0, 0, 0), nr=(0, 0, 1), sw=360, r=2.5, d=3, s=8,
n="{0}_{1}_inv_fk_CTRL_shape".format(self.model.module_name, (i + 1)), ch=0)[0]
inv_ctrl = rig_lib.create_jnttype_ctrl(name="{0}_{1}_inv_fk_CTRL".format(self.model.module_name, (i + 1)),
shape=inv_ctrl_shape, drawstyle=2, rotateorder=2)
nearest_point_on_curve = pmc.createNode("nearestPointOnCurve", n="temp_NPOC")
self.guides[1].worldSpace >> nearest_point_on_curve.inputCurve
cv_loc.getShape().worldPosition >> nearest_point_on_curve.inPosition
ctrl.setAttr("translate", nearest_point_on_curve.getAttr("position"))
inv_ctrl.setAttr("translate", nearest_point_on_curve.getAttr("position"))
pmc.delete(nearest_point_on_curve)
pmc.parent(inv_ctrl, self.ctrl_input_grp, r=0)
if i == 0:
pmc.parent(ctrl, self.ctrl_input_grp, r=0)
else:
pmc.parent(ctrl, "{0}_{1}_fk_CTRL".format(self.model.module_name, i), r=0)
pmc.reorder(ctrl, front=1)
pmc.parent(self.created_inv_fk_ctrls[i-1], inv_ctrl)
pmc.reorder(self.created_inv_fk_ctrls[i-1], front=1)
self.created_fk_ctrls.append(ctrl)
self.created_inv_fk_ctrls.append(inv_ctrl)
def create_ik(self):
start_shape = rig_lib.z_box_curve("{0}_start_ik_CTRL_shape".format(self.model.module_name))
start_ctrl = rig_lib.create_jnttype_ctrl(name="{0}_start_ik_CTRL".format(self.model.module_name), shape=start_shape,
drawstyle=2, rotateorder=2)
end_shape = rig_lib.z_box_curve("{0}_end_ik_CTRL_shape".format(self.model.module_name))
end_ctrl = rig_lib.create_jnttype_ctrl(name="{0}_end_ik_CTRL".format(self.model.module_name), shape=end_shape,
drawstyle=2, rotateorder=2)
start_ofs = pmc.group(start_ctrl, n="{0}_start_ik_ctrl_OFS".format(self.model.module_name))
start_ofs.setAttr("rotateOrder", 2)
end_ofs = pmc.group(end_ctrl, n="{0}_end_ik_ctrl_OFS".format(self.model.module_name))
end_ofs.setAttr("rotateOrder", 2)
start_ofs.setAttr("translate", pmc.xform(self.created_inv_fk_ctrls[0], q=1, ws=1, translation=1))
start_ofs.setAttr("rotate", pmc.xform(self.created_inv_fk_ctrls[0], q=1, ws=1, rotation=1))
end_ofs.setAttr("translate", pmc.xform(self.created_fk_ctrls[-1], q=1, ws=1, translation=1))
end_ofs.setAttr("rotate", pmc.xform(self.created_fk_ctrls[-1], q=1, ws=1, rotation=1))
pmc.parent(start_ofs, self.created_inv_fk_ctrls[1], r=0)
pmc.parent(self.created_inv_fk_ctrls[0], start_ctrl, r=0)
pmc.parent(end_ofs, self.created_fk_ctrls[-2], r=0)
pmc.parent(self.created_fk_ctrls[-1], end_ctrl, r=0)
pmc.parent(self.created_locs[0], start_ctrl, r=0)
pmc.parent(self.created_locs[-1], end_ctrl, r=0)
self.created_fk_ctrls[-1].setAttr("visibility", 0)
self.created_inv_fk_ctrls[0].setAttr("visibility", 0)
self.created_ik_ctrls = [start_ctrl, end_ctrl]
# if not self.model.how_many_ctrls == 2:
# center_ctrl = (self.model.how_many_ctrls / 2.0) - 0.5
# for i, loc in enumerate(self.created_locs):
# if i == center_ctrl:
# const = pmc.parentConstraint(start_ctrl, end_ctrl, loc, maintainOffset=1,
# skipRotate=["x", "y", "z"])
# const.setAttr("{0}W0".format(start_ctrl), 1)
# const.setAttr("{0}W1".format(end_ctrl), 1)
# elif i < center_ctrl:
# const = pmc.parentConstraint(start_ctrl, end_ctrl, loc, maintainOffset=1,
# skipRotate=["x", "y", "z"])
# const.setAttr("{0}W0".format(start_ctrl), 1)
# const.setAttr("{0}W1".format(end_ctrl), ((1 / (self.model.how_many_ctrls / 2.0)) * (i / 2.0)))
# elif i > center_ctrl:
# const = pmc.parentConstraint(start_ctrl, end_ctrl, loc, maintainOffset=1,
# skipRotate=["x", "y", "z"])
# const.setAttr("{0}W0".format(start_ctrl), ((1 / (self.model.how_many_ctrls / 2.0)) *
# (((len(self.created_locs)-1) - i) / 2.0)))
# const.setAttr("{0}W1".format(end_ctrl), 1)
pmc.parent(self.created_pelvis_ctrl, start_ctrl)
pmc.parentConstraint(end_ctrl, self.created_spine_jnts[-1], maintainOffset=1, skipTranslate=["x", "y", "z"])
def activate_twist(self):
ik_handle = pmc.ls("{0}_ik_HDL".format(self.model.module_name))[0]
ik_handle.setAttr("dTwistControlEnable", 1)
ik_handle.setAttr("dWorldUpType", 4)
ik_handle.setAttr("dForwardAxis", 4)
ik_handle.setAttr("dWorldUpAxis", 0)
ik_handle.setAttr("dWorldUpVectorX", 0)
ik_handle.setAttr("dWorldUpVectorY", 1)
ik_handle.setAttr("dWorldUpVectorZ", 0)
ik_handle.setAttr("dWorldUpVectorEndX", 0)
ik_handle.setAttr("dWorldUpVectorEndY", 1)
ik_handle.setAttr("dWorldUpVectorEndZ", 0)
self.created_locs[0].worldMatrix[0] >> ik_handle.dWorldUpMatrix
self.created_locs[-1].worldMatrix[0] >> ik_handle.dWorldUpMatrixEnd
def clean_rig(self):
self.jnt_input_grp.setAttr("visibility", 0)
self.parts_grp.setAttr("visibility", 0)
self.guides_grp.setAttr("visibility", 0)
for loc in self.created_locs:
loc_shape = loc.getShape()
loc_shape.setAttr("visibility", 0)
for ctrl in self.created_fk_ctrls:
rig_lib.clean_ctrl(ctrl, 14, trs="ts")
for ctrl in self.created_inv_fk_ctrls:
rig_lib.clean_ctrl(ctrl, 18, trs="ts")
for ctrl in self.created_ik_ctrls:
rig_lib.clean_ctrl(ctrl, 17, trs="s")
rig_lib.clean_ctrl(self.created_pelvis_ctrl, 14, trs="t")
for ctrl in self.bend_ctrls:
rig_lib.clean_ctrl(ctrl, 4, trs="rs")
rig_lib.clean_ctrl(ctrl.getParent(), 4, trs="trs")
if len(self.model.space_list) > 0:
if self.model.ik_creation_switch == 0:
self.created_fk_ctrls[-1].setAttr("space", len(self.model.space_list))
self.created_inv_fk_ctrls[0].setAttr("space", len(self.model.space_list))
else:
self.created_ik_ctrls[-1].setAttr("space", len(self.model.space_list))
self.created_ik_ctrls[0].setAttr("space", len(self.model.space_list))
info_crv = rig_lib.signature_shape_curve("{0}_INFO".format(self.model.module_name))
info_crv.getShape().setAttr("visibility", 0)
info_crv.setAttr("hiddenInOutliner", 1)
info_crv.setAttr("translateX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("translateY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("translateZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("visibility", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("overrideEnabled", 1)
info_crv.setAttr("overrideDisplayType", 2)
pmc.parent(info_crv, self.parts_grp)
rig_lib.add_parameter_as_extra_attr(info_crv, "Module", "quadruped_spine")
rig_lib.add_parameter_as_extra_attr(info_crv, "parent_Module", self.model.selected_module)
rig_lib.add_parameter_as_extra_attr(info_crv, "parent_output", self.model.selected_output)
rig_lib.add_parameter_as_extra_attr(info_crv, "how_many_jnts", self.model.how_many_jnts)
rig_lib.add_parameter_as_extra_attr(info_crv, "how_many_ctrls", self.model.how_many_ctrls)
rig_lib.add_parameter_as_extra_attr(info_crv, "ik_creation", self.model.ik_creation_switch)
rig_lib.add_parameter_as_extra_attr(info_crv, "stretch_creation", self.model.stretch_creation_switch)
rig_lib.add_parameter_as_extra_attr(info_crv, "local_spaces", self.model.space_list)
if not pmc.objExists("jnts_to_SKN_SET"):
skn_set = pmc.createNode("objectSet", n="jnts_to_SKN_SET")
else:
skn_set = pmc.ls("jnts_to_SKN_SET", type="objectSet")[0]
for jnt in self.jnts_to_skin:
if type(jnt) == list:
for obj in jnt:
skn_set.add(obj)
else:
skn_set.add(jnt)
def create_outputs(self):
rig_lib.create_output(name="{0}_pelvis_OUTPUT".format(self.model.module_name), parent=self.created_pelvis_jnt)
rig_lib.create_output(name="{0}_start_OUTPUT".format(self.model.module_name), parent=self.created_locs[0])
rig_lib.create_output(name="{0}_end_OUTPUT".format(self.model.module_name), parent=self.created_spine_jnts[-1])
for i, jnt in enumerate(self.created_spine_jnts):
if jnt != self.created_spine_jnts[-1]:
name = "{0}_jnt_{1}_OUTPUT".format(self.model.module_name, i)
rig_lib.create_output(name=name, parent=jnt)
def create_local_spaces(self):
spaces_names = []
end_space_locs = []
start_space_locs = []
for space in self.model.space_list:
name = str(space).replace("_OUTPUT", "")
if "local_ctrl" in name:
name = "world"
spaces_names.append(name)
if pmc.objExists("{0}_{1}_SPACELOC".format(self.created_ik_ctrls[-1], name)):
pmc.delete("{0}_{1}_SPACELOC".format(self.created_ik_ctrls[-1], name))
end_space_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_{1}_SPACELOC".format(self.created_ik_ctrls[-1], name))
end_space_locs.append(end_space_loc)
if pmc.objExists("{0}_{1}_SPACELOC".format(self.created_ik_ctrls[0], name)):
pmc.delete("{0}_{1}_SPACELOC".format(self.created_ik_ctrls[0], name))
start_space_loc = pmc.spaceLocator(p=(0, 0, 0), n="{0}_{1}_SPACELOC".format(self.created_ik_ctrls[0], name))
start_space_locs.append(start_space_loc)
spaces_names.append("local")
if len(self.model.space_list) > 0:
if self.model.ik_creation_switch == 0:
self.created_fk_ctrls[-1].addAttr("space", attributeType="enum", enumName=spaces_names, hidden=0, keyable=1)
pmc.group(self.created_fk_ctrls[-1], p=self.created_fk_ctrls[-2],
n="{0}_CONSTGRP".format(self.created_fk_ctrls[-1]))
self.created_inv_fk_ctrls[0].addAttr("space", attributeType="enum", enumName=spaces_names, hidden=0, keyable=1)
pmc.group(self.created_inv_fk_ctrls[0], p=self.created_inv_fk_ctrls[1],
n="{0}_CONSTGRP".format(self.created_inv_fk_ctrls[0]))
else:
self.created_ik_ctrls[-1].addAttr("space", attributeType="enum", enumName=spaces_names, hidden=0, keyable=1)
self.created_ik_ctrls[0].addAttr("space", attributeType="enum", enumName=spaces_names, hidden=0, keyable=1)
for i, space in enumerate(self.model.space_list):
end_space_locs[i].setAttr("translate", pmc.xform(self.created_spine_jnts[-1], q=1, ws=1, translation=1))
pmc.parent(end_space_locs[i], space)
start_space_locs[i].setAttr("translate", pmc.xform(self.created_spine_jnts[0], q=1, ws=1, translation=1))
pmc.parent(start_space_locs[i], space)
if self.model.ik_creation_switch == 0:
end_fk_space_const = pmc.orientConstraint(end_space_locs[i], self.created_fk_ctrls[-1].getParent(),
maintainOffset=1)
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(end_fk_space_const, end_space_locs[i], i),
self.created_fk_ctrls[-1].space, i,
"{0}_{1}Space_COND".format(self.created_fk_ctrls[-1],
spaces_names[i]))
start_fk_space_const = pmc.orientConstraint(start_space_locs[i], self.created_inv_fk_ctrls[0].getParent(),
maintainOffset=1)
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(start_fk_space_const, start_space_locs[i], i),
self.created_inv_fk_ctrls[0].space, i,
"{0}_{1}Space_COND".format(self.created_inv_fk_ctrls[0],
spaces_names[i]))
else:
end_ik_space_const = pmc.orientConstraint(end_space_locs[i], self.created_ik_ctrls[-1].getParent(),
maintainOffset=1)
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(end_ik_space_const, end_space_locs[i], i),
self.created_ik_ctrls[-1].space, i,
"{0}_{1}Space_COND".format(self.created_ik_ctrls[-1],
spaces_names[i]))
start_ik_space_const = pmc.orientConstraint(start_space_locs[i], self.created_ik_ctrls[0].getParent(),
maintainOffset=1)
rig_lib.connect_condition_to_constraint("{0}.{1}W{2}".format(start_ik_space_const, start_space_locs[i], i),
self.created_ik_ctrls[0].space, i,
"{0}_{1}Space_COND".format(self.created_ik_ctrls[0],
spaces_names[i]))
class Model(AuriScriptModel):
def __init__(self):
AuriScriptModel.__init__(self)
self.selected_module = None
self.selected_output = None
self.how_many_jnts = 10
self.how_many_ctrls = 4
self.ik_creation_switch = True
self.stretch_creation_switch = True
self.space_list = []
| 52.936335 | 135 | 0.616908 |
24e39debb4a48384ec16ac1ecde1742e7b7ad8b7 | 4,112 | py | Python | odoo-13.0/addons/point_of_sale/models/pos_payment_method.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/point_of_sale/models/pos_payment_method.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/point_of_sale/models/pos_payment_method.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | from odoo import api, fields, models, _
from odoo.exceptions import UserError
class PosPaymentMethod(models.Model):
""" Used to classify pos.payment.
Generic characteristics of a pos.payment is described in this model.
E.g. A cash payment can be described by a pos.payment.method with
fields: is_cash_count = True and a cash_journal_id set to an
`account.journal` (type='cash') record.
When a pos.payment.method is cash, cash_journal_id is required as
it will be the journal where the account.bank.statement.line records
will be created.
"""
_name = "pos.payment.method"
_description = "Point of Sale Payment Methods"
_order = "id asc"
def _get_payment_terminal_selection(self):
return []
name = fields.Char(string="Payment Method", required=True, translate=True)
receivable_account_id = fields.Many2one('account.account',
string='Intermediary Account',
required=True,
domain=[('reconcile', '=', True), ('user_type_id.type', '=', 'receivable')],
default=lambda self: self.env.company.account_default_pos_receivable_account_id,
ondelete='restrict',
help='Account used as counterpart of the income account in the accounting entry representing the pos sales.')
is_cash_count = fields.Boolean(string='Cash')
cash_journal_id = fields.Many2one('account.journal',
string='Cash Journal',
domain=[('type', '=', 'cash')],
ondelete='restrict',
help='The payment method is of type cash. A cash statement will be automatically generated.')
split_transactions = fields.Boolean(
string='Split Transactions',
default=False,
help='If ticked, each payment will generate a separated journal item. Ticking that option will slow the closing of the PoS.')
open_session_ids = fields.Many2many('pos.session', string='Pos Sessions', compute='_compute_open_session_ids', help='Open PoS sessions that are using this payment method.')
config_ids = fields.Many2many('pos.config', string='Point of Sale Configurations')
company_id = fields.Many2one('res.company', string='Company', default=lambda self: self.env.company)
use_payment_terminal = fields.Selection(selection=lambda self: self._get_payment_terminal_selection(), string='Use a Payment Terminal', help='Record payments with a terminal on this journal.')
hide_use_payment_terminal = fields.Boolean(compute='_compute_hide_use_payment_terminal', help='Technical field which is used to '
'hide use_payment_terminal when no payment interfaces are installed.')
@api.depends('is_cash_count')
def _compute_hide_use_payment_terminal(self):
no_terminals = not bool(self._fields['use_payment_terminal'].selection(self))
for payment_method in self:
payment_method.hide_use_payment_terminal = no_terminals or payment_method.is_cash_count
@api.onchange('use_payment_terminal')
def _onchange_use_payment_terminal(self):
"""Used by inheriting model to unset the value of the field related to the unselected payment terminal."""
pass
@api.depends('config_ids')
def _compute_open_session_ids(self):
for payment_method in self:
payment_method.open_session_ids = self.env['pos.session'].search([('config_id', 'in', payment_method.config_ids.ids), ('state', '!=', 'closed')])
@api.onchange('is_cash_count')
def _onchange_is_cash_count(self):
if not self.is_cash_count:
self.cash_journal_id = False
else:
self.use_payment_terminal = False
def _is_write_forbidden(self, fields):
return bool(fields and self.open_session_ids)
def write(self, vals):
if self._is_write_forbidden(set(vals.keys())):
raise UserError('Kindly close and validate the following open PoS Sessions before modifying this payment method.\n'
'Open sessions: %s' % (' '.join(self.open_session_ids.mapped('name')),))
return super(PosPaymentMethod, self).write(vals)
| 50.765432 | 196 | 0.700632 |
97099ce6d9cae1370af39d71e235937ef8ab5997 | 97,938 | py | Python | sympy/core/function.py | mgeier/sympy | 3d0e2ec4dcb7653f7fac039cc585ae81dc7251c2 | [
"BSD-3-Clause"
] | null | null | null | sympy/core/function.py | mgeier/sympy | 3d0e2ec4dcb7653f7fac039cc585ae81dc7251c2 | [
"BSD-3-Clause"
] | null | null | null | sympy/core/function.py | mgeier/sympy | 3d0e2ec4dcb7653f7fac039cc585ae81dc7251c2 | [
"BSD-3-Clause"
] | null | null | null | """
There are three types of functions implemented in SymPy:
1) defined functions (in the sense that they can be evaluated) like
exp or sin; they have a name and a body:
f = exp
2) undefined function which have a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) anonymous function (or lambda function) which have a body (defined
with dummy variables) but have no name:
f = Lambda(x, exp(x)*x)
f = Lambda((x, y), exp(x)*y)
The fourth type of functions are composites, like (sin + cos)(x); these work in
SymPy core, but are not yet part of SymPy.
Examples
========
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print(sympy.srepr(f(x).func))
Function('f')
>>> f(x).args
(x,)
"""
from __future__ import print_function, division
from .add import Add
from .assumptions import ManagedProperties, _assume_defined
from .basic import Basic
from .cache import cacheit
from .compatibility import iterable, is_sequence, as_int, ordered, Iterable
from .decorators import _sympifyit
from .expr import Expr, AtomicExpr
from .numbers import Rational, Float
from .operations import LatticeOp
from .rules import Transform
from .singleton import S
from .sympify import sympify
from sympy.core.containers import Tuple, Dict
from sympy.core.logic import fuzzy_and
from sympy.core.compatibility import string_types, with_metaclass, range
from sympy.utilities import default_sort_key
from sympy.utilities.misc import filldedent
from sympy.utilities.iterables import has_dups
from sympy.core.evaluate import global_evaluate
import sys
import mpmath
import mpmath.libmp as mlib
import inspect
from collections import Counter
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> from sympy.core.function import _coeff_isneg
>>> from sympy import S, Symbol, oo, pi
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(S(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
"""
if a.is_Mul:
a = a.args[0]
return a.is_Number and a.is_negative
class PoleError(Exception):
pass
class ArgumentIndexError(ValueError):
def __str__(self):
return ("Invalid operation with argument number %s for Function %s" %
(self.args[1], self.args[0]))
def _getnargs(cls):
if hasattr(cls, 'eval'):
if sys.version_info < (3, ):
return _getnargs_old(cls.eval)
else:
return _getnargs_new(cls.eval)
else:
return None
def _getnargs_old(eval_):
evalargspec = inspect.getargspec(eval_)
if evalargspec.varargs:
return None
else:
evalargs = len(evalargspec.args) - 1 # subtract 1 for cls
if evalargspec.defaults:
# if there are default args then they are optional; the
# fewest args will occur when all defaults are used and
# the most when none are used (i.e. all args are given)
return tuple(range(
evalargs - len(evalargspec.defaults), evalargs + 1))
return evalargs
def _getnargs_new(eval_):
parameters = inspect.signature(eval_).parameters.items()
if [p for n,p in parameters if p.kind == p.VAR_POSITIONAL]:
return None
else:
p_or_k = [p for n,p in parameters if p.kind == p.POSITIONAL_OR_KEYWORD]
num_no_default = len(list(filter(lambda p:p.default == p.empty, p_or_k)))
num_with_default = len(list(filter(lambda p:p.default != p.empty, p_or_k)))
if not num_with_default:
return num_no_default
return tuple(range(num_no_default, num_no_default+num_with_default+1))
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
_new = type.__new__
def __init__(cls, *args, **kwargs):
# honor kwarg value or class-defined value before using
# the number of arguments in the eval function (if present)
nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', _getnargs(cls)))
# Canonicalize nargs here; change to set in nargs.
if is_sequence(nargs):
if not nargs:
raise ValueError(filldedent('''
Incorrectly specified nargs as %s:
if there are no arguments, it should be
`nargs = 0`;
if there are any number of arguments,
it should be
`nargs = None`''' % str(nargs)))
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
cls._nargs = nargs
super(FunctionClass, cls).__init__(*args, **kwargs)
@property
def __signature__(self):
"""
Allow Python 3's inspect.signature to give a useful signature for
Function subclasses.
"""
# Python 3 only, but backports (like the one in IPython) still might
# call this.
try:
from inspect import signature
except ImportError:
return None
# TODO: Look at nargs
return signature(self.eval)
@property
def nargs(self):
"""Return a set of the allowed number of arguments for the function.
Examples
========
>>> from sympy.core.function import Function
>>> from sympy.abc import x, y
>>> f = Function('f')
If the function can take any number of arguments, the set of whole
numbers is returned:
>>> Function('f').nargs
Naturals0
If the function was initialized to accept one or more arguments, a
corresponding set will be returned:
>>> Function('f', nargs=1).nargs
{1}
>>> Function('f', nargs=(2, 1)).nargs
{1, 2}
The undefined function, after application, also has the nargs
attribute; the actual number of arguments is always available by
checking the ``args`` attribute:
>>> f = Function('f')
>>> f(1).nargs
Naturals0
>>> len(f(1).args)
1
"""
from sympy.sets.sets import FiniteSet
# XXX it would be nice to handle this in __init__ but there are import
# problems with trying to import FiniteSet there
return FiniteSet(*self._nargs) if self._nargs else S.Naturals0
def __repr__(cls):
return cls.__name__
class Application(with_metaclass(FunctionClass, Basic)):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from sympy.sets.fancysets import Naturals0
from sympy.sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_evaluate[0])
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError("Unknown options: %s" % options)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super(Application, cls).__new__(cls, *args, **options)
# make nargs uniform here
try:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here
if is_sequence(obj.nargs):
nargs = tuple(ordered(set(obj.nargs)))
elif obj.nargs is not None:
nargs = (as_int(obj.nargs),)
else:
nargs = None
except AttributeError:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
nargs = obj._nargs # note the underscore here
# convert to FiniteSet
obj.nargs = FiniteSet(*nargs) if nargs else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Examples of eval() for the function "sign"
---------------------------------------------
.. code-block:: python
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg is S.Zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, Mul):
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One:
return cls(coeff) * cls(terms)
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and
callable(old) and callable(new) and
old == self.func and len(self.args) in new.nargs):
return new(*[i._subs(old, new) for i in self.args])
class Function(Application, Expr):
"""
Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
Examples
========
First example shows how to use Function as a constructor for undefined
function classes:
>>> from sympy import Function, Symbol
>>> x = Symbol('x')
>>> f = Function('f')
>>> g = Function('g')(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
Assumptions can be passed to Function.
>>> f_real = Function('f', real=True)
>>> f_real(x).is_real
True
Note that assumptions on a function are unrelated to the assumptions on
the variable it is called on. If you want to add a relationship, subclass
Function and define the appropriate ``_eval_is_assumption`` methods.
In the following example Function is used as a base class for
``my_func`` that represents a mathematical function *my_func*. Suppose
that it is well known, that *my_func(0)* is *1* and *my_func* at infinity
goes to *0*, so we want those two simplifications to occur automatically.
Suppose also that *my_func(x)* is real exactly when *x* is real. Here is
an implementation that honours those requirements:
>>> from sympy import Function, S, oo, I, sin
>>> class my_func(Function):
...
... @classmethod
... def eval(cls, x):
... if x.is_Number:
... if x is S.Zero:
... return S.One
... elif x is S.Infinity:
... return S.Zero
...
... def _eval_is_real(self):
... return self.args[0].is_real
...
>>> x = S('x')
>>> my_func(0) + sin(0)
1
>>> my_func(oo)
0
>>> my_func(3.54).n() # Not yet implemented for my_func.
my_func(3.54)
>>> my_func(I).is_real
False
In order for ``my_func`` to become useful, several other methods would
need to be implemented. See source code of some of the already
implemented functions for more complete examples.
Also, if the function can take more than one argument, then ``nargs``
must be defined, e.g. if ``my_func`` can take one or two arguments
then,
>>> class my_func(Function):
... nargs = (1, 2)
...
>>>
"""
@property
def _diff_wrt(self):
"""Allow derivatives wrt functions.
Examples
========
>>> from sympy import Function, Symbol
>>> f = Function('f')
>>> x = Symbol('x')
>>> f(x)._diff_wrt
True
"""
return True
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args, **options)
n = len(args)
if n not in cls.nargs:
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
temp = ('%(name)s takes %(qual)s %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': cls,
'qual': 'exactly' if len(cls.nargs) == 1 else 'at least',
'args': min(cls.nargs),
'plural': 's'*(min(cls.nargs) != 1),
'given': n})
evaluate = options.get('evaluate', global_evaluate[0])
result = super(Function, cls).__new__(cls, *args, **options)
if evaluate and isinstance(result, cls) and result.args:
pr2 = min(cls._should_evalf(a) for a in result.args)
if pr2 > 0:
pr = max(cls._should_evalf(a) for a in result.args)
result = result.evalf(mlib.libmpf.prec_to_dps(pr))
return result
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number.
This function is used by __new__.
Returns the precision to evalf to, or -1 if it shouldn't evalf.
"""
from sympy.core.evalf import pure_complex
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
m = pure_complex(arg)
if m is None or not (m[0].is_Float or m[1].is_Float):
return -1
l = [i._prec for i in m if i.is_Float]
l.append(-1)
return max(l)
@classmethod
def class_key(cls):
from sympy.sets.fancysets import Naturals0
funcs = {
'exp': 10,
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
i = 0 if isinstance(cls.nargs, Naturals0) else 10000
return 4, i, name
@property
def is_commutative(self):
"""
Returns whether the function is commutative.
"""
if all(getattr(t, 'is_commutative') for t in self.args):
return True
else:
return False
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
try:
if isinstance(self, AppliedUndef):
# Shouldn't lookup in mpmath but might have ._imp_
raise AttributeError
fname = self.func.__name__
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
try:
return Float(self._imp_(*[i.evalf(prec) for i in self.args]), prec)
except (AttributeError, TypeError, ValueError):
return
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
def bad(m):
from mpmath import mpf, mpc
# the precision of an mpf value is the last element
# if that is 1 (and m[1] is not 1 which would indicate a
# power of 2), then the eval failed; so check that none of
# the arguments failed to compute to a finite precision.
# Note: An mpc value has two parts, the re and imag tuple;
# check each of those parts, too. Anything else is allowed to
# pass
if isinstance(m, mpf):
m = m._mpf_
return m[1] !=1 and m[-1] == 1
elif isinstance(m, mpc):
m, n = m._mpc_
return m[1] !=1 and m[-1] == 1 and \
n[1] !=1 and n[-1] == 1
else:
return False
if any(bad(a) for a in args):
raise ValueError # one or more args failed to compute with significance
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da is S.Zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def _eval_is_complex(self):
return fuzzy_and(a.is_complex for a in self.args)
def as_base_exp(self):
"""
Returns the method as the 2-tuple (base, exponent).
"""
return self, S.One
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
from sympy.utilities.misc import filldedent
raise PoleError(filldedent('''
Asymptotic expansion of %s around %s is
not implemented.''' % (type(self), args0)))
def _eval_nseries(self, x, n, logx):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> from sympy import atan2
>>> from sympy.abc import x, y
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> from sympy import loggamma
>>> loggamma(1/x)._eval_nseries(x,0,None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
from sympy import Order
from sympy.sets.sets import FiniteSet
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(t.is_finite is False for t in args0):
from sympy import oo, zoo, nan
# XXX could use t.as_leading_term(x) here but it's a little
# slower
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any([t.has(oo, -oo, zoo, nan) for t in a0]):
return self._eval_aseries(n, args0, x, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy() for t in z]
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x)
return s
if (self.func.nargs is S.Naturals0
or (self.func.nargs == FiniteSet(1) and args0[0])
or any(c > 1 for c in self.func.nargs)):
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
term = e.subs(x, S.Zero)
if term.is_finite is False or term is S.NaN:
raise PoleError("Cannot expand %s around 0" % (self))
series = term
fact = S.One
_x = Dummy('x')
e = e.subs(x, _x)
for i in range(n - 1):
i += 1
fact *= Rational(i)
e = e.diff(_x)
subs = e.subs(_x, S.Zero)
if subs is S.NaN:
# try to evaluate a limit if we have to
subs = e.limit(_x, S.Zero)
if subs.is_finite is False:
raise PoleError("Cannot expand %s around 0" % (self))
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
l = []
g = None
# try to predict a number of terms needed
nterms = n + 2
cf = Order(arg.as_leading_term(x), x).getn()
if cf != 0:
nterms = int(nterms / cf)
for i in range(nterms):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return Add(*l) + Order(x**n, x)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if not (1 <= argindex <= len(self.args)):
raise ArgumentIndexError(self, argindex)
if self.args[argindex - 1].is_Symbol:
for i in range(len(self.args)):
if i == argindex - 1:
continue
# See issue 8510
if self.args[argindex - 1] in self.args[i].free_symbols:
break
else:
return Derivative(self, self.args[argindex - 1], evaluate=False)
# See issue 4624 and issue 4719 and issue 5600
arg_dummy = Dummy('xi_%i' % argindex, dummy_index=hash(self.args[argindex - 1]))
new_args = [arg for arg in self.args]
new_args[argindex-1] = arg_dummy
return Subs(Derivative(self.func(*new_args), arg_dummy),
arg_dummy, self.args[argindex - 1])
def _eval_as_leading_term(self, x):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
from sympy import Order
args = [a.as_leading_term(x) for a in self.args]
o = Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
'%s has no _eval_as_leading_term routine' % self.func)
else:
return self.func(*args)
def _sage_(self):
import sage.all as sage
fname = self.func.__name__
func = getattr(sage, fname,None)
args = [arg._sage_() for arg in self.args]
# In the case the function is not known in sage:
if func is None:
import sympy
if getattr(sympy, fname,None) is None:
# abstract function
return sage.function(fname)(*args)
else:
# the function defined in sympy is not known in sage
# this exception is caught in sage
raise AttributeError
return func(*args)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
is_number = False
def __new__(cls, *args, **options):
args = list(map(sympify, args))
obj = super(AppliedUndef, cls).__new__(cls, *args, **options)
return obj
def _eval_as_leading_term(self, x):
return self
def _sage_(self):
import sage.all as sage
fname = str(self.func)
args = [arg._sage_() for arg in self.args]
func = sage.function(fname)(*args)
return func
class UndefinedFunction(FunctionClass):
"""
The (meta)class of undefined functions.
"""
def __new__(mcl, name, bases=(AppliedUndef,), __dict__=None, **kwargs):
__dict__ = __dict__ or {}
# Allow Function('f', real=True)
__dict__.update({'is_' + arg: val for arg, val in kwargs.items() if arg in _assume_defined})
# You can add other attributes, although they do have to be hashable
# (but seriously, if you want to add anything other than assumptions,
# just subclass Function)
__dict__.update(kwargs)
# Save these for __eq__
__dict__.update({'_extra_kwargs': kwargs})
__dict__['__module__'] = None # For pickling
ret = super(UndefinedFunction, mcl).__new__(mcl, name, bases, __dict__)
return ret
def __instancecheck__(cls, instance):
return cls in type(instance).__mro__
_extra_kwargs = {}
def __hash__(self):
return hash((self.class_key(), frozenset(self._extra_kwargs.items())))
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.class_key() == other.class_key() and
self._extra_kwargs == other._extra_kwargs)
def __ne__(self, other):
return not self == other
class WildFunction(Function, AtomicExpr):
"""
A WildFunction function matches any function (with its arguments).
Examples
========
>>> from sympy import WildFunction, Function, cos
>>> from sympy.abc import x, y
>>> F = WildFunction('F')
>>> f = Function('f')
>>> F.nargs
Naturals0
>>> x.match(F)
>>> F.match(F)
{F_: F_}
>>> f(x).match(F)
{F_: f(x)}
>>> cos(x).match(F)
{F_: cos(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a given number of arguments, set ``nargs`` to the
desired value at instantiation:
>>> F = WildFunction('F', nargs=2)
>>> F.nargs
{2}
>>> f(x).match(F)
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a range of arguments, set ``nargs`` to a tuple
containing the desired number of arguments, e.g. if ``nargs = (1, 2)``
then functions with 1 or 2 arguments will be matched.
>>> F = WildFunction('F', nargs=(1, 2))
>>> F.nargs
{1, 2}
>>> f(x).match(F)
{F_: f(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
>>> f(x, y, 1).match(F)
"""
include = set()
def __init__(cls, name, **assumptions):
from sympy.sets.sets import Set, FiniteSet
cls.name = name
nargs = assumptions.pop('nargs', S.Naturals0)
if not isinstance(nargs, Set):
# Canonicalize nargs here. See also FunctionClass.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
nargs = FiniteSet(*nargs)
cls.nargs = nargs
def matches(self, expr, repl_dict={}, old=False):
if not isinstance(expr, (AppliedUndef, Function)):
return None
if len(expr.args) not in self.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
expr must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Simplification of high-order derivatives:
Because there can be a significant amount of simplification that can be
done when multiple differentiations are performed, results will be
automatically simplified in a fairly conservative fashion unless the
keyword ``simplify`` is set to False.
>>> from sympy import sqrt, diff
>>> from sympy.abc import x
>>> e = sqrt((x + 1)**2 + x)
>>> diff(e, (x, 5), simplify=False).count_ops()
136
>>> diff(e, (x, 5)).count_ops()
30
Ordering of variables:
If evaluate is set to True and the expression can not be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked. This sorting
assumes that derivatives wrt Symbols commute, derivatives wrt non-Symbols
commute, but Symbol and non-Symbol derivatives don't commute with each
other.
Derivative wrt non-Symbols:
This class also allows derivatives wrt non-Symbols that have _diff_wrt
set to True, such as Function and Derivative. When a derivative wrt a non-
Symbol is attempted, the non-Symbol is temporarily converted to a Symbol
while the differentiation is performed.
Note that this may seem strange, that Derivative allows things like
f(g(x)).diff(g(x)), or even f(cos(x)).diff(cos(x)). The motivation for
allowing this syntax is to make it easier to work with variational calculus
(i.e., the Euler-Lagrange method). The best way to understand this is that
the action of derivative with respect to a non-Symbol is defined by the
above description: the object is substituted for a Symbol and the
derivative is taken with respect to that. This action is only allowed for
objects for which this can be done unambiguously, for example Function and
Derivative objects. Note that this leads to what may appear to be
mathematically inconsistent results. For example::
>>> from sympy import cos, sin, sqrt
>>> from sympy.abc import x
>>> (2*cos(x)).diff(cos(x))
2
>>> (2*sqrt(1 - sin(x)**2)).diff(cos(x))
0
This appears wrong because in fact 2*cos(x) and 2*sqrt(1 - sin(x)**2) are
identically equal. However this is the wrong way to think of this. Think
of it instead as if we have something like this::
>>> from sympy.abc import c, s, u, x
>>> def F(u):
... return 2*u
...
>>> def G(u):
... return 2*sqrt(1 - u**2)
...
>>> F(cos(x))
2*cos(x)
>>> G(sin(x))
2*sqrt(-sin(x)**2 + 1)
>>> F(c).diff(c)
2
>>> F(cos(x)).diff(cos(x))
2
>>> G(s).diff(c)
0
>>> G(sin(x)).diff(cos(x))
0
Here, the Symbols c and s act just like the functions cos(x) and sin(x),
respectively. Think of 2*cos(x) as f(c).subs(c, cos(x)) (or f(c) *at*
c = cos(x)) and 2*sqrt(1 - sin(x)**2) as g(s).subs(s, sin(x)) (or g(s) *at*
s = sin(x)), where f(u) == 2*u and g(u) == 2*sqrt(1 - u**2). Here, we
define the function first and evaluate it at the function, but we can
actually unambiguously do this in reverse in SymPy, because
expr.subs(Function, Symbol) is well-defined: just structurally replace the
function everywhere it appears in the expression.
This is the same notational convenience used in the Euler-Lagrange method
when one says F(t, f(t), f'(t)).diff(f(t)). What is actually meant is
that the expression in question is represented by some F(t, u, v) at u =
f(t) and v = f'(t), and F(t, f(t), f'(t)).diff(f(t)) simply means F(t, u,
v).diff(u) at u = f(t).
We do not allow derivatives to be taken with respect to expressions where this
is not so well defined. For example, we do not allow expr.diff(x*y)
because there are multiple ways of structurally defining where x*y appears
in an expression, some of which may surprise the reader (for example, a
very strict definition would have that (x*y*z).diff(x*y) == 0).
>>> from sympy.abc import x, y, z
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't differentiate wrt the variable: x*y, 1
Note that this definition also fits in nicely with the definition of the
chain rule. Note how the chain rule in SymPy is defined using unevaluated
Subs objects::
>>> from sympy import symbols, Function
>>> f, g = symbols('f g', cls=Function)
>>> f(2*g(x)).diff(x)
2*Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), _xi_1, 2*g(x))
>>> f(g(x)).diff(x)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), _xi_1, g(x))
Finally, note that, to be consistent with variational calculus, and to
ensure that the definition of substituting a Function for a Symbol in an
expression is well-defined, derivatives of functions are assumed to not be
related to the function. In other words, we have::
>>> from sympy import diff
>>> diff(f(x), x).diff(f(x))
0
The same is true for derivatives of different orders::
>>> diff(f(x), x, 2).diff(diff(f(x), x, 1))
0
>>> diff(f(x), x, 1).diff(diff(f(x), x, 2))
0
Note, any class can allow derivatives to be taken with respect to itself.
See the docstring of Expr._diff_wrt.
Examples
========
Some basic examples:
>>> from sympy import Derivative, Symbol, Function
>>> f = Function('f')
>>> g = Function('g')
>>> x = Symbol('x')
>>> y = Symbol('y')
>>> Derivative(x**2, x, evaluate=True)
2*x
>>> Derivative(Derivative(f(x,y), x), y)
Derivative(f(x, y), x, y)
>>> Derivative(f(x), x, 3)
Derivative(f(x), (x, 3))
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Now some derivatives wrt functions:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
>>> Derivative(f(g(x)), x, evaluate=True)
Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), _xi_1, g(x))
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Derivatives if it contains a function.
Examples
========
>>> from sympy import Function, Symbol, Derivative
>>> f = Function('f')
>>> x = Symbol('x')
>>> Derivative(f(x),x)._diff_wrt
True
>>> Derivative(x**2,x)._diff_wrt
False
"""
if self.expr.is_Function:
return True
else:
return False
def __new__(cls, expr, *variables, **kwargs):
from sympy.matrices.common import MatrixCommon
from sympy import Integer
from sympy.tensor.array import Array, NDimArray, derive_by_array
from sympy.utilities.misc import filldedent
expr = sympify(expr)
try:
has_symbol_set = isinstance(expr.free_symbols, set)
except AttributeError:
has_symbol_set = False
if not has_symbol_set:
raise ValueError(filldedent('''
Since there are no variables in the expression %s,
it cannot be differentiated.''' % expr))
# There are no variables, we differentiate wrt all of the free symbols
# in expr.
if not variables:
variables = expr.free_symbols
if len(variables) != 1:
if expr.is_number:
return S.Zero
if len(variables) == 0:
raise ValueError(filldedent('''
Since there are no variables in the expression,
the variable(s) of differentiation must be supplied
to differentiate %s''' % expr))
else:
raise ValueError(filldedent('''
Since there is more than one variable in the
expression, the variable(s) of differentiation
must be supplied to differentiate %s''' % expr))
# Standardize the variables by sympifying them:
variables = list(sympify(variables))
# Split the list of variables into a list of the variables we are diff
# wrt, where each element of the list has the form (s, count) where
# s is the entity to diff wrt and count is the order of the
# derivative.
variable_count = []
j = 0
array_likes = (tuple, list, Tuple)
for i, v in enumerate(variables):
if isinstance(v, Integer):
count = v
if i == 0:
raise ValueError("First variable cannot be a number: %i" % v)
prev, prevcount = variable_count[j-1]
if prevcount != 1:
raise TypeError("tuple {0} followed by number {1}".format((prev, prevcount), v))
if count == 0:
j -= 1
variable_count.pop()
else:
variable_count[j-1] = Tuple(prev, count)
else:
if isinstance(v, array_likes):
if len(v) == 0:
# Ignore empty tuples: Derivative(expr, ... , (), ... )
continue
if isinstance(v[0], array_likes):
# Derive by array: Derivative(expr, ... , [[x, y, z]], ... )
if len(v) == 1:
v = Array(v[0])
count = 1
else:
v, count = v
v = Array(v)
else:
v, count = v
else:
count = S(1)
if count == 0:
continue
if not v._diff_wrt:
last_digit = int(str(count)[-1])
ordinal = 'st' if last_digit == 1 else 'nd' if last_digit == 2 else 'rd' if last_digit == 3 else 'th'
raise ValueError(filldedent('''
Can\'t calculate %s%s derivative wrt %s.''' % (count, ordinal, v)))
if j != 0 and v == variable_count[-1][0]:
prev, prevcount = variable_count[j-1]
variable_count[-1] = Tuple(prev, prevcount + count)
else:
variable_count.append(Tuple(v, count))
j += 1
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if len(variable_count) == 0:
return expr
evaluate = kwargs.get('evaluate', False)
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannot check non-symbols like
# functions and Derivatives as those can be created by intermediate
# derivatives.
if evaluate and all(isinstance(sc[0], Symbol) for sc in variable_count):
symbol_set = set(sc[0] for sc in variable_count if sc[1].is_positive)
if symbol_set.difference(expr.free_symbols):
if isinstance(expr, (MatrixCommon, NDimArray)):
return expr.zeros(*expr.shape)
else:
return S.Zero
# If we can't compute the derivative of expr (but we wanted to) and
# expr is itself not a Derivative, finish building an unevaluated
# derivative class by calling Expr.__new__.
if (not (hasattr(expr, '_eval_derivative') and evaluate) and
(not isinstance(expr, Derivative))):
# If we wanted to evaluate, we sort the variables into standard
# order for later comparisons. This is too aggressive if evaluate
# is False, so we don't do it in that case.
if evaluate:
#TODO: check if assumption of discontinuous derivatives exist
variable_count = cls._sort_variable_count(variable_count)
obj = Expr.__new__(cls, expr, *variable_count)
return obj
# Compute the derivative now by repeatedly calling the
# _eval_derivative method of expr for each variable. When this method
# returns None, the derivative couldn't be computed wrt that variable
# and we save the variable for later.
unhandled_variable_count = []
# Once we encouter a non_symbol that is unhandled, we stop taking
# derivatives entirely. This is because derivatives wrt functions
# don't commute with derivatives wrt symbols and we can't safely
# continue.
unhandled_non_symbol = False
nderivs = 0 # how many derivatives were performed
for v, count in variable_count:
is_symbol = v.is_symbol
if unhandled_non_symbol:
obj = None
elif (count < 0) == True:
obj = None
else:
if isinstance(v, (Iterable, Tuple, MatrixCommon, NDimArray)):
# Treat derivatives by arrays/matrices as much as symbols.
is_symbol = True
if not is_symbol:
new_v = Dummy('xi_%i' % i, dummy_index=hash(v))
expr = expr.xreplace({v: new_v})
old_v = v
v = new_v
# Evaluate the derivative `n` times. If
# `_eval_derivative_n_times` is not overridden by the current
# object, the default in `Basic` will call a loop over
# `_eval_derivative`:
obj = expr._eval_derivative_n_times(v, count)
nderivs += count
if not is_symbol:
if obj is not None:
if not old_v.is_symbol and obj.is_Derivative:
# Derivative evaluated at a point that is not a
# symbol, let subs check if this is okay to replace
obj = obj.subs(v, old_v)
else:
obj = obj.xreplace({v: old_v})
v = old_v
if obj is None:
unhandled_variable_count.append(Tuple(v, count))
if not is_symbol:
unhandled_non_symbol = True
elif obj is S.Zero:
return S.Zero
else:
expr = obj
if unhandled_variable_count:
unhandled_variable_count = cls._sort_variable_count(unhandled_variable_count)
expr = Expr.__new__(cls, expr, *unhandled_variable_count)
else:
# We got a Derivative at the end of it all, and we rebuild it by
# sorting its variables.
if isinstance(expr, Derivative):
expr = cls(
expr.args[0], *cls._sort_variable_count(expr.args[1:])
)
if (nderivs > 1) == True and kwargs.get('simplify', True):
from sympy.core.exprtools import factor_terms
from sympy.simplify.simplify import signsimp
expr = factor_terms(signsimp(expr))
return expr
@classmethod
def _remove_derived_once(cls, v):
return [i[0] if i[1] == 1 else i for i in v]
@classmethod
def _sort_variable_count(cls, varcounts):
"""
Sort (variable, count) pairs by variable, but disallow sorting of non-symbols.
The count is not sorted. It is kept in the same order as the input
after sorting by variable.
When taking derivatives, the following rules usually hold:
* Derivative wrt different symbols commute.
* Derivative wrt different non-symbols commute.
* Derivatives wrt symbols and non-symbols don't commute.
Examples
========
>>> from sympy import Derivative, Function, symbols
>>> vsort = Derivative._sort_variable_count
>>> x, y, z = symbols('x y z')
>>> f, g, h = symbols('f g h', cls=Function)
>>> vsort([(x, 3), (y, 2), (z, 1)])
[(x, 3), (y, 2), (z, 1)]
>>> vsort([(h(x), 1), (g(x), 1), (f(x), 1)])
[(f(x), 1), (g(x), 1), (h(x), 1)]
>>> vsort([(z, 1), (y, 2), (x, 3), (h(x), 1), (g(x), 1), (f(x), 1)])
[(x, 3), (y, 2), (z, 1), (f(x), 1), (g(x), 1), (h(x), 1)]
>>> vsort([(x, 1), (f(x), 1), (y, 1), (f(y), 1)])
[(x, 1), (f(x), 1), (y, 1), (f(y), 1)]
>>> vsort([(y, 1), (x, 2), (g(x), 1), (f(x), 1), (z, 1), (h(x), 1), (y, 2), (x, 1)])
[(x, 2), (y, 1), (f(x), 1), (g(x), 1), (z, 1), (h(x), 1), (x, 1), (y, 2)]
>>> vsort([(z, 1), (y, 1), (f(x), 1), (x, 1), (f(x), 1), (g(x), 1)])
[(y, 1), (z, 1), (f(x), 1), (x, 1), (f(x), 1), (g(x), 1)]
>>> vsort([(z, 1), (y, 2), (f(x), 1), (x, 2), (f(x), 2), (g(x), 1), (z, 2), (z, 1), (y, 1), (x, 1)])
[(y, 2), (z, 1), (f(x), 1), (x, 2), (f(x), 2), (g(x), 1), (x, 1), (y, 1), (z, 2), (z, 1)]
"""
sorted_vars = []
symbol_part = []
non_symbol_part = []
for (v, c) in varcounts:
if not v.is_symbol:
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=lambda i: default_sort_key(i[0])))
symbol_part = []
non_symbol_part.append((v, c))
else:
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=lambda i: default_sort_key(i[0])))
non_symbol_part = []
symbol_part.append((v, c))
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,
key=lambda i: default_sort_key(i[0])))
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,
key=lambda i: default_sort_key(i[0])))
return [Tuple(*i) for i in sorted_vars]
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative_n_times(self, s, n):
from sympy import Integer
if isinstance(n, (int, Integer)):
# TODO: it would be desirable to squash `_eval_derivative` into
# this code.
return super(Derivative, self)._eval_derivative_n_times(s, n)
dict_var_count = dict(self.variable_count)
if s in dict_var_count:
dict_var_count[s] += n
else:
dict_var_count[s] = n
return Derivative(self.expr, *dict_var_count.items())
def _eval_derivative(self, v):
# If the variable s we are diff wrt is not in self.variables, we
# assume that we might be able to take the derivative.
if v not in self.variables:
obj = self.expr.diff(v)
if obj is S.Zero:
return S.Zero
if isinstance(obj, Derivative):
return obj.func(obj.expr, *(self.variable_count + obj.variable_count))
# The derivative wrt s could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when obj is a simple
# number so that the derivative wrt anything else will vanish.
return self.func(obj, *self.variables, evaluate=True)
# In this case s was in self.variables so the derivatve wrt s has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
variable_count = list(self.variable_count)
if variable_count[-1][0] == v:
variable_count[-1] = Tuple(v, variable_count[-1][1] + 1)
else:
variable_count.append(Tuple(v, S(1)))
return self.func(self.expr, *variable_count, evaluate=False)
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
return self.func(expr, *self.variable_count, **hints)
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
import mpmath
from sympy.core.expr import Expr
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec))
f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec))
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
return self._args[0]
@property
def variables(self):
# TODO: deprecate?
# TODO: support for `d^n`?
return tuple(v for v, count in self.variable_count if count.is_Integer for i in (range(count) if count.is_Integer else [1]))
@property
def variable_count(self):
return self._args[1:]
@property
def derivative_count(self):
return sum([count for var, count in self.variable_count], 0)
@property
def free_symbols(self):
return self.expr.free_symbols
def _eval_subs(self, old, new):
if old in self.variables and not new._diff_wrt:
# issue 4719
return Subs(self, old, new)
# If both are Derivatives with the same expr, check if old is
# equivalent to self or if old is a subderivative of self.
if old.is_Derivative and old.expr == self.expr:
# Check if canonical order of variables is equal.
old_vars = Counter(dict(reversed(old.variable_count)))
self_vars = Counter(dict(reversed(self.variable_count)))
if old_vars == self_vars:
return new
# collections.Counter doesn't have __le__
def _subset(a, b):
return all((a[i] <= b[i]) == True for i in a)
if _subset(old_vars, self_vars):
return Derivative(new, *(self_vars - old_vars).items())
# Check whether the substitution (old, new) cannot be done inside
# Derivative(expr, vars). Disallowed:
# (1) changing expr by introducing a variable among vars
# (2) changing vars by introducing a variable contained in expr
old_symbols = (old.free_symbols if isinstance(old.free_symbols, set)
else set())
new_symbols = (new.free_symbols if isinstance(new.free_symbols, set)
else set())
introduced_symbols = new_symbols - old_symbols
args_subbed = tuple(x._subs(old, new) for x in self.args)
if ((self.args[0] != args_subbed[0] and
len(set(self.variables) & introduced_symbols) > 0
) or
(self.args[1:] != args_subbed[1:] and
len(self.free_symbols & introduced_symbols) > 0
)):
return Subs(self, old, new)
else:
return Derivative(*args_subbed)
def _eval_lseries(self, x, logx):
dx = self.variables
for term in self.expr.lseries(x, logx=logx):
yield self.func(term, *dx)
def _eval_nseries(self, x, n, logx):
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
dx = self.variables
rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x):
series_gen = self.expr.lseries(x)
d = S.Zero
for leading_term in series_gen:
d = diff(leading_term, *self.variables)
if d != 0:
break
return d
def _sage_(self):
import sage.all as sage
args = [arg._sage_() for arg in self.args]
return sage.derivative(*args)
def as_finite_difference(self, points=1, x0=None, wrt=None):
""" Expresses a Derivative instance as a finite difference.
Parameters
==========
points : sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around ``x0``. Default: 1 (step-size 1)
x0 : number or Symbol, optional
the value of the independent variable (``wrt``) at which the
derivative is to be approximated. Default: same as ``wrt``.
wrt : Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the derivative is ordinary. Default: ``None``.
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> f(x).diff(x).as_finite_difference()
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and
``order + 1`` respectively. We can change the step size by
passing a symbol as a parameter:
>>> f(x).diff(x).as_finite_difference(h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a
sequence:
>>> f(x).diff(x).as_finite_difference([x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around ``x0``, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> f(x).diff(x, 1).as_finite_difference(xl, x+h*sq2) # doctest: +ELLIPSIS
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/...
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> d2fdxdy.as_finite_difference(wrt=x)
-Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
We can apply ``as_finite_difference`` to ``Derivative`` instances in
compound expressions using ``replace``:
>>> (1 + 42**f(x).diff(x)).replace(lambda arg: arg.is_Derivative,
... lambda arg: arg.as_finite_difference())
42**(-f(x - 1/2) + f(x + 1/2)) + 1
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.differentiate_finite
sympy.calculus.finite_diff.finite_diff_weights
"""
from ..calculus.finite_diff import _as_finite_diff
return _as_finite_diff(self, points, x0, wrt)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, variables, expr):
from sympy.sets.sets import FiniteSet
v = list(variables) if iterable(variables) else [variables]
for i in v:
if not getattr(i, 'is_symbol', False):
raise TypeError('variable is not a symbol: %s' % i)
if len(v) == 1 and v[0] == expr:
return S.IdentityFunction
obj = Expr.__new__(cls, Tuple(*v), sympify(expr))
obj.nargs = FiniteSet(len(v))
return obj
@property
def variables(self):
"""The variables used in the internal representation of the function"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
## XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
return self.expr.xreplace(dict(list(zip(self.variables, args))))
def __eq__(self, other):
if not isinstance(other, Lambda):
return False
if self.nargs != other.nargs:
return False
selfexpr = self.args[1]
otherexpr = other.args[1]
otherexpr = otherexpr.xreplace(dict(list(zip(other.args[0], self.args[0]))))
return selfexpr == otherexpr
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Lambda, self).__hash__()
def _hashable_content(self):
return (self.expr.xreplace(self.canonical_variables),)
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
if len(self.args) == 2:
return self.args[0] == self.args[1]
else:
return None
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs(expr, x, x0)`` receives 3 arguments: an expression, a variable or
list of distinct variables and a point or list of evaluation points
corresponding to those variables.
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
A simple example:
>>> from sympy import Subs, Function, sin
>>> from sympy.abc import x, y, z
>>> f = Function('f')
>>> e = Subs(f(x).diff(x), x, y)
>>> e.subs(y, 0)
Subs(Derivative(f(x), x), x, 0)
>>> e.subs(f, sin).doit()
cos(y)
An example with several variables:
>>> Subs(f(x)*sin(y) + z, (x, y), (0, 1))
Subs(z + f(x)*sin(y), (x, y), (0, 1))
>>> _.doit()
z + f(0)*sin(1)
"""
def __new__(cls, expr, variables, point, **assumptions):
from sympy import Symbol
if not is_sequence(variables, Tuple):
variables = [variables]
variables = Tuple(*variables)
if has_dups(variables):
repeated = [str(v) for v, i in Counter(variables).items() if i > 1]
__ = ', '.join(repeated)
raise ValueError(filldedent('''
The following expressions appear more than once: %s
''' % __))
point = Tuple(*(point if is_sequence(point, Tuple) else [point]))
if len(point) != len(variables):
raise ValueError('Number of point values must be the same as '
'the number of variables.')
if not point:
return sympify(expr)
# denest
if isinstance(expr, Subs):
variables = expr.variables + variables
point = expr.point + point
expr = expr.expr
else:
expr = sympify(expr)
# use symbols with names equal to the point value (with preppended _)
# to give a variable-independent expression
pre = "_"
pts = sorted(set(point), key=default_sort_key)
from sympy.printing import StrPrinter
class CustomStrPrinter(StrPrinter):
def _print_Dummy(self, expr):
return str(expr) + str(expr.dummy_index)
def mystr(expr, **settings):
p = CustomStrPrinter(settings)
return p.doprint(expr)
while 1:
s_pts = {p: Symbol(pre + mystr(p)) for p in pts}
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-preppended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + mystr(point[variables.index(r)])) != r
for _, r in reps):
pre += "_"
continue
break
obj = Expr.__new__(cls, expr, Tuple(*variables), point)
obj._expr = expr.subs(reps)
return obj
def _eval_is_commutative(self):
return self.expr.is_commutative
def doit(self):
return self.expr.doit().subs(list(zip(self.variables, self.point)))
def evalf(self, prec=None, **options):
return self.doit().evalf(prec, **options)
n = evalf
@property
def variables(self):
"""The variables to be evaluated"""
return self._args[1]
@property
def expr(self):
"""The expression on which the substitution operates"""
return self._args[0]
@property
def point(self):
"""The values for which the variables are to be substituted"""
return self._args[2]
@property
def free_symbols(self):
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
@property
def expr_free_symbols(self):
return (self.expr.expr_free_symbols - set(self.variables) |
set(self.point.expr_free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Subs, self).__hash__()
def _hashable_content(self):
return (self._expr.xreplace(self.canonical_variables),
) + tuple(ordered([(v, p) for v, p in
zip(self.variables, self.point) if not self.expr.has(v)]))
def _eval_subs(self, old, new):
# Subs doit will do the variables in order; the semantics
# of subs for Subs is have the following invariant for
# Subs object foo:
# foo.doit().subs(reps) == foo.subs(reps).doit()
pt = list(self.point)
if old in self.variables:
i = self.variables.index(old)
# any occurance of old before this point will get
# handled by replacements from here on
for j in range(i, len(self.variables)):
pt[j] = pt[j]._subs(old, new)
return self.func(self.expr, self.variables, pt)
v = [i._subs(old, new) for i in self.variables]
if v != list(self.variables):
return self.func(self.expr, self.variables + (old,), pt + [new])
expr = self.expr._subs(old, new)
pt = [i._subs(old, new) for i in self.point]
return self.func(expr, v, pt)
def _eval_derivative(self, s):
# Apply the chain rule of the derivative on the substitution variables:
val = Add.fromiter(p.diff(s) * Subs(self.expr.diff(v), self.variables, self.point).doit() for v, p in zip(self.variables, self.point))
# Check if there are free symbols in `self.expr`:
# First get the `expr_free_symbols`, which returns the free symbols
# that are directly contained in an expression node (i.e. stop
# searching if the node isn't an expression). At this point turn the
# expressions into `free_symbols` and check if there are common free
# symbols in `self.expr` and the deriving factor.
fs1 = {j for i in self.expr_free_symbols for j in i.free_symbols}
if len(fs1 & s.free_symbols) > 0:
val += Subs(self.expr.diff(s), self.variables, self.point).doit()
return val
def _eval_nseries(self, x, n, logx):
if x in self.point:
# x is the variable being substituted into
apos = self.point.index(x)
other = self.variables[apos]
arg = self.expr.nseries(other, n=n, logx=logx)
o = arg.getO()
subs_args = [self.func(a, *self.args[1:]) for a in arg.removeO().args]
return Add(*subs_args) + o.subs(other, x)
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
subs_args = [self.func(a, *self.args[1:]) for a in arg.removeO().args]
return Add(*subs_args) + o
def _eval_as_leading_term(self, x):
if x in self.point:
ipos = self.point.index(x)
xvar = self.variables[ipos]
return self.expr.as_leading_term(xvar)
if x in self.variables:
# if `x` is a dummy variable, it means it won't exist after the
# substitution has been performed:
return self
# The variable is independent of the substitution:
return self.expr.as_leading_term(x)
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), (x, 3))
>>> diff(f(x), x, 3)
Derivative(f(x), (x, 3))
>>> diff(sin(x)*cos(y), x, 2, y, 2)
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
http://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html
See Also
========
Derivative
sympy.geometry.util.idiff: computes the derivative implicitly
"""
kwargs.setdefault('evaluate', True)
try:
return f._eval_diff(*symbols, **kwargs)
except AttributeError:
pass
return Derivative(f, *symbols, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
r"""
Expand an expression using methods given as hints.
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
The ``basic`` hint is used for any special rewriting of an object that
should be done automatically (along with the other hints like ``mul``)
when expand is called. This is a catch-all hint to handle any sort of
expansion that may not be described by the existing hint names. To use
this hint an object should override the ``_eval_expand_basic`` method.
Objects may also define their own expand methods, which are not run by
default. See the API section below.
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Hints
=====
These hints are run by default
mul
---
Distributes multiplication over addition:
>>> from sympy import cos, exp, sin
>>> from sympy.abc import x, y, z
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial
-----------
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp
---------
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base
----------
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, SymPy performs
it automatically:
>>> (x*y)**2
x**2*y**2
log
---
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> from sympy import log, symbols
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x,y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
basic
-----
This hint is intended primarily as a way for custom subclasses to enable
expansion by default.
These hints are not run by default:
complex
-------
Split an expression into real and imaginary parts.
>>> x, y = symbols('x,y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func
----
Expand other functions.
>>> from sympy import gamma
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig
----
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change. See `this MathWorld article
<http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more
information.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x + y)*(x + y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x + exp(x + y)).expand(deep=False)
exp(x)*exp(exp(x + y))
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial`, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> from sympy import expand, expand_mul, expand_power_base
>>> x, y, z = symbols('x,y,z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the mul
expanded form, either of the following will work::
>>> expand_mul(log(x*(y + z)))
log(x*y + x*z)
>>> expand(log(x*(y + z)), log=False)
log(x*y + x*z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
API
===
Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Examples
========
>>> from sympy import Expr, sympify
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, **hints):
... '''
... Doubles the args of MyClass.
...
... If there more than four args, doubling is not performed,
... unless force=True is also used (False by default).
... '''
... force = hints.pop('force', False)
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func, hyperexpand
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# This is a special application of two hints
def _mexpand(expr, recursive=False):
# expand multinomials and then expand products; this may not always
# be sufficient to give a fully expanded expression (see
# test_issue_8247_8354 in test_arit)
if expr is None:
return
was = None
while was != expr:
was, expr = expr, expand_mul(expand_multinomial(expr))
if not recursive:
break
return expr
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(log(x) + 2*log(y))*exp(x + y)
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_trig, sin
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_complex, exp, sqrt, I
>>> from sympy.abc import z
>>> expand_complex(exp(z))
I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
See the expand docstring for more information.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> from sympy.abc import x, y, z
>>> from sympy import expand_power_base, sin, cos, exp
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*exp(y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y*y**z
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
See the expand docstring for more information.
Examples
========
>>> from sympy import expand_power_exp
>>> from sympy.abc import x, y
>>> expand_power_exp(x**(y + 2))
x**2*x**y
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
If ``visual`` is ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``visual`` is ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
If expr is an iterable, the sum of the op counts of the
items will be returned.
Examples
========
>>> from sympy.abc import a, b, x, y
>>> from sympy import sin, count_ops
Although there isn't a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq=x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from sympy import Integral, Symbol
from sympy.core.relational import Relational
from sympy.simplify.radsimp import fraction
from sympy.logic.boolalg import BooleanFunction
from sympy.utilities.misc import func_name
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
ops = []
args = [expr]
NEG = Symbol('NEG')
DIV = Symbol('DIV')
SUB = Symbol('SUB')
ADD = Symbol('ADD')
while args:
a = args.pop()
# XXX: This is a hack to support non-Basic args
if isinstance(a, string_types):
continue
if a.is_Rational:
#-1/3 = NEG + DIV
if a is not S.One:
if a.p < 0:
ops.append(NEG)
if a.q != 1:
ops.append(DIV)
continue
elif a.is_Mul or a.is_MatMul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
if n < 0:
ops.append(NEG)
args.append(d)
continue # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add or a.is_MatAdd:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
continue
if a.is_Pow and a.exp is S.NegativeOne:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if (a.is_Mul or
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, Integral)):
o = Symbol(a.func.__name__.upper())
# count the args
if (a.is_Mul or isinstance(a, LatticeOp)):
ops.append(o*(len(a.args) - 1))
else:
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif type(expr) is dict:
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.items()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif isinstance(expr, (Relational, BooleanFunction)):
ops = []
for arg in expr.args:
ops.append(count_ops(arg, visual=True))
o = Symbol(func_name(expr, short=True).upper())
ops.append(o)
elif not isinstance(expr, Basic):
ops = []
else: # it's Basic not isinstance(expr, Expr):
if not isinstance(expr, Basic):
raise TypeError("Invalid type of expr")
else:
ops = []
args = [expr]
while args:
a = args.pop()
# XXX: This is a hack to support non-Basic args
if isinstance(a, string_types):
continue
if a.args:
o = Symbol(a.func.__name__.upper())
if a.is_Boolean:
ops.append(o*(len(a.args)-1))
else:
ops.append(o)
args.extend(a.args)
if not ops:
if visual:
return S.Zero
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True).
Examples
========
>>> from sympy.core.function import nfloat
>>> from sympy.abc import x, y
>>> from sympy import cos, pi, sqrt
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
"""
from sympy.core.power import Pow
from sympy.polys.rootoftools import RootOf
if iterable(expr, exclude=string_types):
if isinstance(expr, (dict, Dict)):
return type(expr)([(k, nfloat(v, n, exponent)) for k, v in
list(expr.items())])
return type(expr)([nfloat(a, n, exponent) for a in expr])
rv = sympify(expr)
if rv.is_Number:
return Float(rv, n)
elif rv.is_number:
# evalf doesn't always set the precision
rv = rv.n(n)
if rv.is_Number:
rv = Float(rv.n(n), n)
else:
pass # pure_complex(rv) is likely True
return rv
# watch out for RootOf instances that don't like to have
# their exponents replaced with Dummies and also sometimes have
# problems with evaluating at low precision (issue 6393)
rv = rv.xreplace({ro: ro.n(n) for ro in rv.atoms(RootOf)})
if not exponent:
reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)]
rv = rv.xreplace(dict(reps))
rv = rv.n(n)
if not exponent:
rv = rv.xreplace({d.exp: p.exp for p, d in reps})
else:
# Pow._eval_evalf special cases Integer exponents so if
# exponent is suppose to be handled we have to do so here
rv = rv.xreplace(Transform(
lambda x: Pow(x.base, Float(x.exp, n)),
lambda x: x.is_Pow and x.exp.is_Integer))
return rv.xreplace(Transform(
lambda x: x.func(*nfloat(x.args, n, exponent)),
lambda x: isinstance(x, Function)))
from sympy.core.symbol import Dummy, Symbol
| 34.828592 | 142 | 0.565787 |
bba7f35da2cf575bf3eba9871321e201653dbff5 | 2,460 | py | Python | supportingModules/top.py | smithchristian/arcpy-create-base-dataset | 6dccb8e9ef474453219738952723fb890e3c332d | [
"MIT"
] | null | null | null | supportingModules/top.py | smithchristian/arcpy-create-base-dataset | 6dccb8e9ef474453219738952723fb890e3c332d | [
"MIT"
] | null | null | null | supportingModules/top.py | smithchristian/arcpy-create-base-dataset | 6dccb8e9ef474453219738952723fb890e3c332d | [
"MIT"
] | null | null | null | # ----------------------------------------------------------------------------
# Name: top.py (Topography.py)
# Purpose: This module contains variables for the construction
# of a topography dataset. This module is to be used in
# conjunction with create-Base-DataSet/main.py.
# Description
# and Examples: Physical landform: DEM grids, Contour data, LiDAR, Slope,
# Bathymetry
#
# Author: Christian Fletcher Smith
#
# Created: 10/02/2015
# Copyright: (c) smithc5 2015
# Version: 2
# ---------------------------------------------------------------------------
# This is the name for the topography dataset.
TOP_GDB_NAME = "Topography.gdb"
'''
The following information outlines the variable structure for each feature
in order to be used correctly within main.py.
NOTE: The * used in the information below is to indicate a user defined
name.
Feature variable structure:
# Layer Name ----------------------------------------------------------
* -- This is the source location of the layer to be clipped.
*_FC_NAME -- This is the .gdb name and feature class name for the layer to
be used. The user only needs to populate text after the '{}\', as
'{}\' is formatted to use the variable ADM_GDB_NAME.
*_ALIAS -- This is the alias name to be displayed within ArcGIS.
*_DIC -- The dictionary is used to store all the features variables which
will be imported into main.py as required.
example:
# 10m Contours -----------------------------------------------------------
CONT10M = r"D:\Elevation\Contours_10m.shp"
CONT10M_FC_NAME = "{}\Contours_10m".format(TOP_GDB_NAME)
CONT10M_ALIAS = "10m Contours"
CONT10M_DIC = {"source_location": CON10M,
"output_name": CON10M_FC_NAME,
"alias": CON10M_ALIAS}
'''
# TODO: need to add in layer variables
# -----------------------------------------------------------------------------
# DO NOT ADD LAYER VARIABLES BELOW THIS LINE!
#
# The following list comprehension is designed to compile all the dictionaries
# from the above layers into a single list. This list is imported into main.py
# when required.
# -----------------------------------------------------------------------------
TOP_DIC_LIST = [val for name, val in globals().items() if name.endswith('_DIC')]
| 35.652174 | 81 | 0.543496 |
53ddeb9188cdb34883a5c68a195fe4c0b4d5c371 | 3,598 | py | Python | twiggy/levels.py | tirkarthi/twiggy | 054a07e71f2ceae231270fc62cb7f0a59cf72958 | [
"BSD-3-Clause"
] | 36 | 2015-02-06T06:19:27.000Z | 2022-01-31T18:36:22.000Z | twiggy/levels.py | tirkarthi/twiggy | 054a07e71f2ceae231270fc62cb7f0a59cf72958 | [
"BSD-3-Clause"
] | 51 | 2015-01-13T18:15:06.000Z | 2021-05-16T06:03:07.000Z | twiggy/levels.py | tirkarthi/twiggy | 054a07e71f2ceae231270fc62cb7f0a59cf72958 | [
"BSD-3-Clause"
] | 6 | 2016-05-31T20:11:12.000Z | 2021-09-08T16:37:26.000Z | """
Levels include (increasing severity): ``DEBUG``, ``INFO``, ``NOTICE``, ``WARNING``, ``ERROR``,
``CRITICAL``, ``DISABLED``
"""
from six import PY3, with_metaclass
class LogLevelMeta(type):
"""
Metaclass that aids in making comparisons work the same in Python2 and Python3
Python3 raises TypeError when unorderable types are compared via lt, gt, le, ge.
Python2 picks an order but it doesn't always make much sense.
In Python3, we only need the rich comparison operators to get this behaviour.
In Python2, we use the __cmp__ function to raise TypeError for lt, gt, le, and ge.
We define __eq__ and __ne__ on their own since those should just say that a LogLevel is never
equal to a non-LogLevel.
"""
def __new__(meta, name, bases, dct):
cls = super(LogLevelMeta, meta).__new__(meta, name, bases, dct)
if PY3: # pragma: no py2 cover
cls.__lt__ = cls._lt
cls.__gt__ = cls._gt
cls.__le__ = cls._le
cls.__ge__ = cls._ge
del cls.__cmp__
else: # pragma: no py3 cover
del cls._lt
del cls._gt
del cls._le
del cls._ge
return cls
class LogLevel(with_metaclass(LogLevelMeta, object)):
"""A log level. Users should *not* create new instances.
Levels are opaque; they may be compared to each other, but nothing else.
"""
__slots__ = ['__name', '__value']
_name2levels = {}
def __init__(self, name, value):
self.__name = name
self.__value = value
self._name2levels[name] = self
def __str__(self):
return self.__name
def __repr__(self):
return "<LogLevel %s>" % self.__name
def _lt(self, other): # pragma: no py2 cover
if not isinstance(other, LogLevel):
return NotImplemented
else:
return self.__value < other.__value
def _le(self, other): # pragma: no py2 cover
if not isinstance(other, LogLevel):
return NotImplemented
else:
return self.__value <= other.__value
def _gt(self, other): # pragma: no py2 cover
if not isinstance(other, LogLevel):
return NotImplemented
else:
return self.__value > other.__value
def _ge(self, other): # pragma: no py2 cover
if not isinstance(other, LogLevel):
return NotImplemented
else:
return self.__value >= other.__value
def __eq__(self, other):
if not isinstance(other, LogLevel):
return False
else:
return self.__value == other.__value
def __ne__(self, other):
if not isinstance(other, LogLevel):
return True
else:
return self.__value != other.__value
def __cmp__(self, other): # pragma: no py3 cover
# Python 2 only
if not isinstance(other, LogLevel):
raise TypeError('Unorderable types LogLevel() and %s' % type(other))
elif self.__value < other.__value:
return -1
elif self.__value > other.__value:
return 1
else:
return 0
def __hash__(self):
return hash(self.__value)
def name2level(name):
"""return a `LogLevel` from a case-insensitve string"""
return LogLevel._name2levels[name.upper()]
DEBUG = LogLevel('DEBUG', 1)
INFO = LogLevel('INFO', 2)
NOTICE = LogLevel('NOTICE', 3)
WARNING = LogLevel('WARNING', 4)
ERROR = LogLevel('ERROR', 5)
CRITICAL = LogLevel('CRITICAL', 6)
DISABLED = LogLevel('DISABLED', 7)
| 29.252033 | 97 | 0.608394 |
13f2ddf6ee00477290d7e66f662c4ab8d2862f5f | 10,121 | py | Python | paddlespeech/server/util.py | SmileGoat/PaddleSpeech | 67994cb8a3f0d3c65446ef9560c69025d6d9a0ef | [
"Apache-2.0"
] | 2 | 2021-11-29T09:02:20.000Z | 2022-02-10T09:30:00.000Z | paddlespeech/server/util.py | SmileGoat/PaddleSpeech | 67994cb8a3f0d3c65446ef9560c69025d6d9a0ef | [
"Apache-2.0"
] | null | null | null | paddlespeech/server/util.py | SmileGoat/PaddleSpeech | 67994cb8a3f0d3c65446ef9560c69025d6d9a0ef | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import inspect
import json
import os
import tarfile
import threading
import time
import uuid
import zipfile
from typing import Any
from typing import Dict
import paddle
import paddleaudio
import requests
import yaml
from paddle.framework import load
from .entry import client_commands
from .entry import server_commands
from paddlespeech.cli import download
try:
from .. import __version__
except ImportError:
__version__ = "0.0.0" # for develop branch
requests.adapters.DEFAULT_RETRIES = 3
__all__ = [
'cli_server_register',
'get_server_command',
'cli_client_register',
'get_client_command',
'download_and_decompress',
'load_state_dict_from_url',
'stats_wrapper',
]
def cli_server_register(name: str, description: str='') -> Any:
def _warpper(command):
items = name.split('.')
com = server_commands
for item in items:
com = com[item]
com['_entry'] = command
if description:
com['_description'] = description
return command
return _warpper
def get_server_command(name: str) -> Any:
items = name.split('.')
com = server_commands
for item in items:
com = com[item]
return com['_entry']
def cli_client_register(name: str, description: str='') -> Any:
def _warpper(command):
items = name.split('.')
com = client_commands
for item in items:
com = com[item]
com['_entry'] = command
if description:
com['_description'] = description
return command
return _warpper
def get_client_command(name: str) -> Any:
items = name.split('.')
com = client_commands
for item in items:
com = com[item]
return com['_entry']
def _get_uncompress_path(filepath: os.PathLike) -> os.PathLike:
file_dir = os.path.dirname(filepath)
is_zip_file = False
if tarfile.is_tarfile(filepath):
files = tarfile.open(filepath, "r:*")
file_list = files.getnames()
elif zipfile.is_zipfile(filepath):
files = zipfile.ZipFile(filepath, 'r')
file_list = files.namelist()
is_zip_file = True
else:
return file_dir
if download._is_a_single_file(file_list):
rootpath = file_list[0]
uncompressed_path = os.path.join(file_dir, rootpath)
elif download._is_a_single_dir(file_list):
if is_zip_file:
rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[0]
else:
rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
else:
rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1]
uncompressed_path = os.path.join(file_dir, rootpath)
files.close()
return uncompressed_path
def download_and_decompress(archive: Dict[str, str], path: str) -> os.PathLike:
"""
Download archieves and decompress to specific path.
"""
if not os.path.isdir(path):
os.makedirs(path)
assert 'url' in archive and 'md5' in archive, \
'Dictionary keys of "url" and "md5" are required in the archive, but got: {}'.format(list(archive.keys()))
filepath = os.path.join(path, os.path.basename(archive['url']))
if os.path.isfile(filepath) and download._md5check(filepath,
archive['md5']):
uncompress_path = _get_uncompress_path(filepath)
if not os.path.isdir(uncompress_path):
download._decompress(filepath)
else:
StatsWorker(
task='download',
version=__version__,
extra_info={
'download_url': archive['url'],
'paddle_version': paddle.__version__
}).start()
uncompress_path = download.get_path_from_url(archive['url'], path,
archive['md5'])
return uncompress_path
def load_state_dict_from_url(url: str, path: str, md5: str=None) -> os.PathLike:
"""
Download and load a state dict from url
"""
if not os.path.isdir(path):
os.makedirs(path)
download.get_path_from_url(url, path, md5)
return load(os.path.join(path, os.path.basename(url)))
def _get_user_home():
return os.path.expanduser('~')
def _get_paddlespcceh_home():
if 'PPSPEECH_HOME' in os.environ:
home_path = os.environ['PPSPEECH_HOME']
if os.path.exists(home_path):
if os.path.isdir(home_path):
return home_path
else:
raise RuntimeError(
'The environment variable PPSPEECH_HOME {} is not a directory.'.
format(home_path))
else:
return home_path
return os.path.join(_get_user_home(), '.paddlespeech')
def _get_sub_home(directory):
home = os.path.join(_get_paddlespcceh_home(), directory)
if not os.path.exists(home):
os.makedirs(home)
return home
PPSPEECH_HOME = _get_paddlespcceh_home()
MODEL_HOME = _get_sub_home('models')
CONF_HOME = _get_sub_home('conf')
def _md5(text: str):
'''Calculate the md5 value of the input text.'''
md5code = hashlib.md5(text.encode())
return md5code.hexdigest()
class ConfigCache:
def __init__(self):
self._data = {}
self._initialize()
self.file = os.path.join(CONF_HOME, 'cache.yaml')
if not os.path.exists(self.file):
self.flush()
return
with open(self.file, 'r') as file:
try:
cfg = yaml.load(file, Loader=yaml.FullLoader)
self._data.update(cfg)
except BaseException:
self.flush()
@property
def cache_info(self):
return self._data['cache_info']
def _initialize(self):
# Set default configuration values.
cache_info = _md5(str(uuid.uuid1())[-12:]) + "-" + str(int(time.time()))
self._data['cache_info'] = cache_info
def flush(self):
'''Flush the current configuration into the configuration file.'''
with open(self.file, 'w') as file:
cfg = json.loads(json.dumps(self._data))
yaml.dump(cfg, file)
stats_api = "http://paddlepaddle.org.cn/paddlehub/stat"
cache_info = ConfigCache().cache_info
class StatsWorker(threading.Thread):
def __init__(self,
task="asr",
model=None,
version=__version__,
extra_info={}):
threading.Thread.__init__(self)
self._task = task
self._model = model
self._version = version
self._extra_info = extra_info
def run(self):
params = {
'task': self._task,
'version': self._version,
'from': 'ppspeech'
}
if self._model:
params['model'] = self._model
self._extra_info.update({
'cache_info': cache_info,
})
params.update({"extra": json.dumps(self._extra_info)})
try:
requests.get(stats_api, params)
except Exception:
pass
return
def _note_one_stat(cls_name, params={}):
task = cls_name.replace('Executor', '').lower() # XXExecutor
extra_info = {
'paddle_version': paddle.__version__,
}
if 'model' in params:
model = params['model']
else:
model = None
if 'audio_file' in params:
try:
_, sr = paddleaudio.load(params['audio_file'])
except Exception:
sr = -1
if task == 'asr':
extra_info.update({
'lang': params['lang'],
'inp_sr': sr,
'model_sr': params['sample_rate'],
})
elif task == 'st':
extra_info.update({
'lang':
params['src_lang'] + '-' + params['tgt_lang'],
'inp_sr':
sr,
'model_sr':
params['sample_rate'],
})
elif task == 'tts':
model = params['am']
extra_info.update({
'lang': params['lang'],
'vocoder': params['voc'],
})
elif task == 'cls':
extra_info.update({
'inp_sr': sr,
})
elif task == 'text':
extra_info.update({
'sub_task': params['task'],
'lang': params['lang'],
})
else:
return
StatsWorker(
task=task,
model=model,
version=__version__,
extra_info=extra_info, ).start()
def _parse_args(func, *args, **kwargs):
# FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations)
argspec = inspect.getfullargspec(func)
keys = argspec[0]
if keys[0] == 'self': # Remove self pointer.
keys = keys[1:]
default_values = argspec[3]
values = [None] * (len(keys) - len(default_values))
values.extend(list(default_values))
params = dict(zip(keys, values))
for idx, v in enumerate(args):
params[keys[idx]] = v
for k, v in kwargs.items():
params[k] = v
return params
def stats_wrapper(executor_func):
def _warpper(self, *args, **kwargs):
try:
_note_one_stat(
type(self).__name__, _parse_args(executor_func, *args,
**kwargs))
except Exception:
pass
return executor_func(self, *args, **kwargs)
return _warpper
| 27.502717 | 114 | 0.595593 |
daf0a7d2ace906b41240d100e1ec79b017695721 | 368 | py | Python | app/admin.py | sp35/PlantDBBrowser | 641f541278f5dffe4aed1eac0f963d8da88a265e | [
"MIT"
] | null | null | null | app/admin.py | sp35/PlantDBBrowser | 641f541278f5dffe4aed1eac0f963d8da88a265e | [
"MIT"
] | null | null | null | app/admin.py | sp35/PlantDBBrowser | 641f541278f5dffe4aed1eac0f963d8da88a265e | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Category, DataBase, SubCategory
@admin.register(DataBase)
class DataBaseAdmin(admin.ModelAdmin):
list_display = ["name", "url", "category", "sub_category"]
list_filter = ["approved", "category","sub_category"]
search_fields = ["name"]
admin.site.register(Category)
admin.site.register(SubCategory)
| 24.533333 | 62 | 0.741848 |
c2dab75d35818e97d2eb323b33e580ace542eefb | 2,033 | py | Python | tests/unit/test_boot.py | madeiramadeirabr/template-serverless-service-python | 17a57e19906f44978dad0c09cff3a16c299ff0c0 | [
"MIT"
] | null | null | null | tests/unit/test_boot.py | madeiramadeirabr/template-serverless-service-python | 17a57e19906f44978dad0c09cff3a16c299ff0c0 | [
"MIT"
] | 1 | 2022-03-31T17:55:16.000Z | 2022-03-31T17:59:00.000Z | tests/unit/test_boot.py | madeiramadeirabr/template-serverless-service-python | 17a57e19906f44978dad0c09cff3a16c299ff0c0 | [
"MIT"
] | null | null | null | """
Boot Unit Test for Flambda APP
Version: 1.0.0
"""
import os
import unittest
from unittest.mock import patch
from boot import load_dot_env, reset, is_loaded, load_env
from tests.unit.mocks.flambda_app_mocks.aws.secrets_mock import secrets_mock_caller
from tests.unit.testutils import BaseUnitTestCase, get_function_name
from unittest_data_provider import data_provider
def get_env():
return (None, True), ('dev', True), ('development', True), ('integration', True), ('staging', True), (
'production', True)
def get_load_dot_env():
return (None, True), ('dev', True), ('development', True), ('integration', True), ('staging', True), (
'production', True)
class BootTestCase(BaseUnitTestCase):
@data_provider(get_env)
def test_load_env(self, env, expected):
self.logger.info('Running test: %s - %s', get_function_name(__name__), env)
APP_TYPE = os.environ['APP_TYPE']
self.logger.info("APP_TYPE: {}".format(APP_TYPE))
if APP_TYPE == 'Chalice':
reset()
load_env(env)
self.assertEqual(expected, is_loaded())
else:
self.skipTest('test_load_env - Ignored because the APP_TYPE {}'.format(APP_TYPE))
@patch('flambda_app.aws.secrets.Secrets', secrets_mock_caller)
@data_provider(get_load_dot_env)
def test_load_dot_env(self, env, expected):
self.logger.info('Running test: %s - %s', get_function_name(__name__), env)
APP_TYPE = os.environ['APP_TYPE']
self.logger.info("APP_TYPE: {}".format(APP_TYPE))
if APP_TYPE == 'Flask':
# AWS Image condition
if 'ENVIRONMENT_NAME' in os.environ:
if env == os.environ['ENVIRONMENT_NAME']:
expected = True
reset()
load_dot_env(env)
self.assertEqual(expected, is_loaded())
else:
self.skipTest('test_load_dot_env - Ignored because the APP_TYPE {}'.format(APP_TYPE))
if __name__ == '__main__':
unittest.main()
| 33.883333 | 106 | 0.647319 |
7621d2912a3e497762f1a8c402f38453bf6560db | 14,291 | py | Python | nayan/forms.py | patilnayan92/etonlinetest | 42b57cb6f10e518be99faa47e3f9f57a1a54b413 | [
"Python-2.0"
] | 2 | 2019-03-06T02:17:25.000Z | 2019-10-03T17:43:26.000Z | nayan/forms.py | patilnayan92/etonlinetest | 42b57cb6f10e518be99faa47e3f9f57a1a54b413 | [
"Python-2.0"
] | null | null | null | nayan/forms.py | patilnayan92/etonlinetest | 42b57cb6f10e518be99faa47e3f9f57a1a54b413 | [
"Python-2.0"
] | 4 | 2019-02-01T16:10:40.000Z | 2020-08-30T02:44:39.000Z | from django import forms
from nayan.models import get_model_class, Profile, Quiz, Question, TestCase, Course,\
QuestionPaper, StandardTestCase, StdIOBasedTestCase, \
HookTestCase, IntegerTestCase, StringTestCase
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from taggit.managers import TaggableManager
from taggit.forms import TagField
from django.forms.models import inlineformset_factory
from django.db.models import Q
from django.utils import timezone
from textwrap import dedent
try:
from string import letters
except ImportError:
from string import ascii_letters as letters
from string import punctuation, digits
import datetime
import pytz
from .send_emails import generate_activation_key
languages = (
("select", "Select Language"),
("python", "Python"),
("bash", "Bash"),
("c", "C Language"),
("cpp", "C++ Language"),
("java", "Java Language"),
("scilab", "Scilab"),
)
question_types = (
("select", "Select Question Type"),
("mcq", "Multiple Choice"),
("mcc", "Multiple Correct Choices"),
("code", "Code"),
("upload", "Assignment Upload"),
("integer", "Answer in Integer"),
("string", "Answer in String"),
("float", "Answer in Float"),
)
test_case_types = (
("standardtestcase", "Standard Testcase"),
("stdiobasedtestcase", "StdIO Based Testcase"),
("mcqtestcase", "MCQ Testcase"),
("hooktestcase", "Hook Testcase"),
("integertestcase", "Integer Testcase"),
("stringtestcase", "String Testcase"),
("floattestcase", "Float Testcase"),
)
UNAME_CHARS = letters + "._" + digits
PWD_CHARS = letters + punctuation + digits
attempts = [(i, i) for i in range(1, 6)]
attempts.append((-1, 'Infinite'))
days_between_attempts = ((j, j) for j in range(401))
def get_object_form(model, exclude_fields=None):
model_class = get_model_class(model)
class _ObjectForm(forms.ModelForm):
class Meta:
model = model_class
exclude = exclude_fields
return _ObjectForm
from django import forms
class UserRegisterForm(forms.Form):
"""A Class to create new form for User's Registration.
It has the various fields and functions required to register
a new user to the system"""
username = forms.CharField(max_length=30, help_text='Letters, digits,\
period and underscores only.')
email = forms.EmailField()
password = forms.CharField(max_length=30, widget=forms.PasswordInput())
confirm_password = forms.CharField\
(max_length=30, widget=forms.PasswordInput())
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
roll_number = forms.CharField\
(max_length=30, help_text="Use a dummy if you don't have one.")
institute = forms.CharField\
(max_length=128, help_text='Institute/Organization')
department = forms.CharField\
(max_length=64, help_text='Department you work/study at')
position = forms.CharField\
(max_length=64, help_text='Student/Faculty/Researcher/Industry/etc.')
timezone = forms.ChoiceField(choices=[(tz, tz) for tz in pytz.common_timezones],
initial=pytz.utc)
def clean_username(self):
u_name = self.cleaned_data["username"]
if u_name.strip(UNAME_CHARS):
msg = "Only letters, digits, period and underscore characters are"\
" allowed in username"
raise forms.ValidationError(msg)
try:
User.objects.get(username__exact=u_name)
raise forms.ValidationError("Username already exists.")
except User.DoesNotExist:
return u_name
def clean_password(self):
pwd = self.cleaned_data['password']
if pwd.strip(PWD_CHARS):
raise forms.ValidationError("Only letters, digits and punctuation\
are allowed in password")
return pwd
def clean_confirm_password(self):
c_pwd = self.cleaned_data['confirm_password']
pwd = self.data['password']
if c_pwd != pwd:
raise forms.ValidationError("Passwords do not match")
return c_pwd
def clean_email(self):
user_email = self.cleaned_data['email']
if User.objects.filter(email=user_email).exists():
raise forms.ValidationError("This email already exists")
return user_email
def save(self):
u_name = self.cleaned_data["username"]
u_name = u_name.lower()
pwd = self.cleaned_data["password"]
email = self.cleaned_data['email']
new_user = User.objects.create_user(u_name, email, pwd)
new_user.first_name = self.cleaned_data["first_name"]
new_user.last_name = self.cleaned_data["last_name"]
new_user.save()
cleaned_data = self.cleaned_data
new_profile = Profile(user=new_user)
new_profile.roll_number = cleaned_data["roll_number"]
new_profile.institute = cleaned_data["institute"]
new_profile.department = cleaned_data["department"]
new_profile.position = cleaned_data["position"]
new_profile.timezone = cleaned_data["timezone"]
if settings.IS_DEVELOPMENT:
new_profile.is_email_verified = True
else:
new_profile.activation_key = generate_activation_key(new_user.username)
new_profile.key_expiry_time = timezone.now() + \
timezone.timedelta(minutes=20)
new_profile.save()
return u_name, pwd, new_user.email, new_profile.activation_key
class UserLoginForm(forms.Form):
"""Creates a form which will allow the user to log into the system."""
username = forms.CharField(max_length=30)
password = forms.CharField(max_length=30, widget=forms.PasswordInput())
def clean(self):
super(UserLoginForm, self).clean()
try:
u_name, pwd = self.cleaned_data["username"],\
self.cleaned_data["password"]
user = authenticate(username=u_name, password=pwd)
except Exception:
raise forms.ValidationError\
("Username and/or Password is not entered")
if not user:
raise forms.ValidationError("Invalid username/password")
return user
class QuizForm(forms.ModelForm):
"""Creates a form to add or edit a Quiz.
It has the related fields and functions required."""
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
course_id = kwargs.pop('course')
super(QuizForm, self).__init__(*args, **kwargs)
prerequisite_list = Quiz.objects.filter(
course__id=course_id,
is_trial=False
).exclude(id=self.instance.id)
self.fields['prerequisite'] = forms.ModelChoiceField(prerequisite_list)
self.fields['prerequisite'].required = False
self.fields['course'] = forms.ModelChoiceField(
queryset=Course.objects.filter(id=course_id), empty_label=None)
self.fields["instructions"].initial = dedent("""\
<p>
This examination system has been
developed with the intention of
making you learn programming and
be assessed in an interactive and
fun manner.
You will be presented with a
series of programming questions
and problems that you will answer
online and get immediate
feedback for.
</p>
<p>
Here are some important
instructions and rules that you
should understand carefully.</p>
<ul>
<li>For any programming questions,
you can submit solutions as many
times as you want without a
penalty. You may skip questions
and solve them later.</li>
<li> You <strong>may</strong>
use your computer's Python/IPython
shell or an editor to solve the
problem and cut/paste the
solution to the web interface.
</li>
<li> <strong>You are not allowed
to use any internet resources,
i.e. no google etc.</strong>
</li>
<li> Do not copy or share the
questions or answers with anyone
until the exam is complete
<strong>for everyone</strong>.
</li>
<li> <strong>All</strong> your
attempts at the questions are
logged. Do not try to outsmart
and break the testing system.
If you do, we know who you are
and we will expel you from the
course. You have been warned.
</li>
</ul>
<p>
We hope you enjoy taking this
exam !!!
</p>
""")
def clean_prerequisite(self):
prereq = self.cleaned_data['prerequisite']
if prereq and prereq.prerequisite:
if prereq.prerequisite.id == self.instance.id:
raise forms.ValidationError("Please set another prerequisite quiz")
return prereq
class Meta:
model = Quiz
exclude = ["is_trial"]
class QuestionForm(forms.ModelForm):
"""Creates a form to add or edit a Question.
It has the related fields and functions required."""
class Meta:
model = Question
exclude = ['user', 'active']
class FileForm(forms.Form):
file_field = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True}),
required=False)
class RandomQuestionForm(forms.Form):
question_type = forms.CharField(max_length=8, widget=forms.Select\
(choices=question_types))
marks = forms.CharField(max_length=8, widget=forms.Select\
(choices=(('select', 'Select Marks'),)))
shuffle_questions = forms.BooleanField(required=False)
class QuestionFilterForm(forms.Form):
def __init__(self, *args, **kwargs):
user = kwargs.pop("user")
super(QuestionFilterForm, self).__init__(*args, **kwargs)
questions = Question.objects.filter(user_id=user.id)
points_list = questions.values_list('points', flat=True).distinct()
points_options = [(None, 'Select Marks')]
points_options.extend([(point, point) for point in points_list])
self.fields['marks'] = forms.FloatField(widget=forms.Select\
(choices=points_options))
language = forms.CharField(max_length=8, widget=forms.Select\
(choices=languages))
question_type = forms.CharField(max_length=8, widget=forms.Select\
(choices=question_types))
class CourseForm(forms.ModelForm):
""" course form for moderators """
def save(self, commit=True, *args, **kwargs):
instance = super(CourseForm, self).save(commit=False)
if instance.code:
instance.hidden = True
else:
instance.hidden = False
if commit:
instance.save()
return instance
class Meta:
model = Course
exclude = ['creator', 'requests', 'students', 'rejected',
'created_on', 'is_trial', 'hidden', 'teachers']
class ProfileForm(forms.ModelForm):
""" profile form for students and moderators """
class Meta:
model = Profile
fields = ['first_name', 'last_name', 'institute',
'department', 'roll_number', 'position', 'timezone']
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
def __init__(self, *args, **kwargs):
if 'user' in kwargs:
user = kwargs.pop('user')
super(ProfileForm, self).__init__(*args, **kwargs)
self.fields['first_name'].initial = user.first_name
self.fields['last_name'].initial = user.last_name
class UploadFileForm(forms.Form):
file = forms.FileField()
class QuestionPaperForm(forms.ModelForm):
class Meta:
model = QuestionPaper
fields = ['shuffle_questions']
| 41.303468 | 91 | 0.542089 |
ef87ded4dfdf6eb615c1acd523a25f06b717f3fd | 23,064 | py | Python | hg-fast-export.py | rajatguptarg/hg2git | 47d330de83055efc5d4eeec6d0593d7aa3640555 | [
"MIT"
] | null | null | null | hg-fast-export.py | rajatguptarg/hg2git | 47d330de83055efc5d4eeec6d0593d7aa3640555 | [
"MIT"
] | null | null | null | hg-fast-export.py | rajatguptarg/hg2git | 47d330de83055efc5d4eeec6d0593d7aa3640555 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2007, 2008 Rocco Rutte <pdmef@gmx.net> and others.
# License: MIT <http://www.opensource.org/licenses/mit-license.php>
from mercurial import node
from mercurial.scmutil import revsymbol
from hg2git import setup_repo,fixup_user,get_branch,get_changeset
from hg2git import load_cache,save_cache,get_git_sha1,set_default_branch,set_origin_name
from optparse import OptionParser
import re
import sys
import os
import pluginloader
if sys.platform == "win32":
# On Windows, sys.stdout is initially opened in text mode, which means that
# when a LF (\n) character is written to sys.stdout, it will be converted
# into CRLF (\r\n). That makes git blow up, so use this platform-specific
# code to change the mode of sys.stdout to binary.
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
# silly regex to catch Signed-off-by lines in log message
sob_re=re.compile('^Signed-[Oo]ff-[Bb]y: (.+)$')
# insert 'checkpoint' command after this many commits or none at all if 0
cfg_checkpoint_count=0
# write some progress message every this many file contents written
cfg_export_boundary=1000
subrepo_cache={}
submodule_mappings=None
def gitmode(flags):
return 'l' in flags and '120000' or 'x' in flags and '100755' or '100644'
def wr_no_nl(msg=''):
if msg:
sys.stdout.write(msg)
def wr(msg=''):
wr_no_nl(msg)
sys.stdout.write('\n')
#map(lambda x: sys.stderr.write('\t[%s]\n' % x),msg.split('\n'))
def checkpoint(count):
count=count+1
if cfg_checkpoint_count>0 and count%cfg_checkpoint_count==0:
sys.stderr.write("Checkpoint after %d commits\n" % count)
wr('checkpoint')
wr()
return count
def revnum_to_revref(rev, old_marks):
"""Convert an hg revnum to a git-fast-import rev reference (an SHA1
or a mark)"""
return old_marks.get(rev) or ':%d' % (rev+1)
def file_mismatch(f1,f2):
"""See if two revisions of a file are not equal."""
return node.hex(f1)!=node.hex(f2)
def split_dict(dleft,dright,l=[],c=[],r=[],match=file_mismatch):
"""Loop over our repository and find all changed and missing files."""
for left in dleft.keys():
right=dright.get(left,None)
if right==None:
# we have the file but our parent hasn't: add to left set
l.append(left)
elif match(dleft[left],right) or gitmode(dleft.flags(left))!=gitmode(dright.flags(left)):
# we have it but checksums mismatch: add to center set
c.append(left)
for right in dright.keys():
left=dleft.get(right,None)
if left==None:
# if parent has file but we don't: add to right set
r.append(right)
# change is already handled when comparing child against parent
return l,c,r
def get_filechanges(repo,revision,parents,mleft):
"""Given some repository and revision, find all changed/deleted files."""
l,c,r=[],[],[]
for p in parents:
if p<0: continue
mright=revsymbol(repo,str(p)).manifest()
l,c,r=split_dict(mleft,mright,l,c,r)
l.sort()
c.sort()
r.sort()
return l,c,r
def get_author(logmessage,committer,authors):
"""As git distincts between author and committer of a patch, try to
extract author by detecting Signed-off-by lines.
This walks from the end of the log message towards the top skipping
empty lines. Upon the first non-empty line, it walks all Signed-off-by
lines upwards to find the first one. For that (if found), it extracts
authorship information the usual way (authors table, cleaning, etc.)
If no Signed-off-by line is found, this defaults to the committer.
This may sound stupid (and it somehow is), but in log messages we
accidentially may have lines in the middle starting with
"Signed-off-by: foo" and thus matching our detection regex. Prevent
that."""
loglines=logmessage.split('\n')
i=len(loglines)
# from tail walk to top skipping empty lines
while i>=0:
i-=1
if len(loglines[i].strip())==0: continue
break
if i>=0:
# walk further upwards to find first sob line, store in 'first'
first=None
while i>=0:
m=sob_re.match(loglines[i])
if m==None: break
first=m
i-=1
# if the last non-empty line matches our Signed-Off-by regex: extract username
if first!=None:
r=fixup_user(first.group(1),authors)
return r
return committer
def export_file_contents(ctx,manifest,files,hgtags,encoding='',plugins={}):
count=0
max=len(files)
for file in files:
if submodule_mappings and ctx.substate and file==".hgsubstate":
# Remove all submodules as we don't detect deleted submodules properly
# in any other way. We will add the ones not deleted back again below.
for module in submodule_mappings.keys():
wr('D %s' % module)
# Read .hgsubstate file in order to find the revision of each subrepo
data=ctx.filectx(file).data()
subHashes={}
for line in data.split('\n'):
if line.strip()=="":
continue
cols=line.split(' ')
subHashes[cols[1]]=cols[0]
gitmodules=""
# Create the .gitmodules file and all submodules
for name in ctx.substate:
gitRepoLocation=submodule_mappings[name] + "/.git"
# Populate the cache to map mercurial revision to git revision
if not name in subrepo_cache:
subrepo_cache[name]=(load_cache(gitRepoLocation+"/hg2git-mapping"),
load_cache(gitRepoLocation+"/hg2git-marks",
lambda s: int(s)-1))
(mapping_cache, marks_cache)=subrepo_cache[name]
if subHashes[name] in mapping_cache:
revnum=mapping_cache[subHashes[name]]
gitSha=marks_cache[int(revnum)]
wr('M 160000 %s %s' % (gitSha, name))
sys.stderr.write("Adding submodule %s, revision %s->%s\n"
% (name,subHashes[name],gitSha))
gitmodules+='[submodule "%s"]\n\tpath = %s\n\turl = %s\n' % (name, name, submodule_mappings[name])
else:
sys.stderr.write("Warning: Could not find hg revision %s for %s in git %s\n" % (subHashes[name],name,gitRepoLocation))
if len(gitmodules):
wr('M 100644 inline .gitmodules')
wr('data %d' % (len(gitmodules)+1))
wr(gitmodules)
# Skip .hgtags files. They only get us in trouble.
if not hgtags and file == ".hgtags":
sys.stderr.write('Skip %s\n' % (file))
continue
if encoding:
filename=file.decode(encoding).encode('utf8')
else:
filename=file
file_ctx=ctx.filectx(file)
d=file_ctx.data()
if plugins and plugins['file_data_filters']:
file_data = {'filename':filename,'file_ctx':file_ctx,'data':d}
for filter in plugins['file_data_filters']:
filter(file_data)
d=file_data['data']
filename=file_data['filename']
file_ctx=file_data['file_ctx']
wr('M %s inline %s' % (gitmode(manifest.flags(file)),
strip_leading_slash(filename)))
wr('data %d' % len(d)) # had some trouble with size()
wr(d)
count+=1
if count%cfg_export_boundary==0:
sys.stderr.write('Exported %d/%d files\n' % (count,max))
if max>cfg_export_boundary:
sys.stderr.write('Exported %d/%d files\n' % (count,max))
def sanitize_name(name,what="branch", mapping={}):
"""Sanitize input roughly according to git-check-ref-format(1)"""
# NOTE: Do not update this transform to work around
# incompatibilities on your platform. If you change it and it starts
# modifying names which previously were not touched it will break
# preexisting setups which are doing incremental imports.
#
# Fast-export tries to not inflict arbitrary naming policy on the
# user, instead it aims to provide mechanisms allowing the user to
# apply their own policy. Therefore do not add a transform which can
# already be implemented with the -B and -T options to mangle branch
# and tag names. If you have a source repository where this is too
# much work to do manually, write a tool that does it for you.
#
def dot(name):
if not name: return name
if name[0] == '.': return '_'+name[1:]
return name
n=mapping.get(name,name)
p=re.compile('([[ ~^:?\\\\*]|\.\.)')
n=p.sub('_', n)
if n[-1] in ('/', '.'): n=n[:-1]+'_'
n='/'.join(map(dot,n.split('/')))
p=re.compile('_+')
n=p.sub('_', n)
if n!=name:
sys.stderr.write('Warning: sanitized %s [%s] to [%s]\n' % (what,name,n))
return n
def strip_leading_slash(filename):
if filename[0] == '/':
return filename[1:]
return filename
def export_commit(ui,repo,revision,old_marks,max,count,authors,
branchesmap,sob,brmap,hgtags,encoding='',fn_encoding='',
plugins={}):
def get_branchname(name):
if brmap.has_key(name):
return brmap[name]
n=sanitize_name(name, "branch", branchesmap)
brmap[name]=n
return n
(revnode,_,user,(time,timezone),files,desc,branch,_)=get_changeset(ui,repo,revision,authors,encoding)
branch=get_branchname(branch)
parents = [p for p in repo.changelog.parentrevs(revision) if p >= 0]
author = get_author(desc,user,authors)
if plugins and plugins['commit_message_filters']:
commit_data = {'branch': branch, 'parents': parents, 'author': author, 'desc': desc}
for filter in plugins['commit_message_filters']:
filter(commit_data)
branch = commit_data['branch']
parents = commit_data['parents']
author = commit_data['author']
desc = commit_data['desc']
if len(parents)==0 and revision != 0:
wr('reset refs/heads/%s' % branch)
wr('commit refs/heads/%s' % branch)
wr('mark :%d' % (revision+1))
if sob:
wr('author %s %d %s' % (author,time,timezone))
wr('committer %s %d %s' % (user,time,timezone))
wr('data %d' % (len(desc)+1)) # wtf?
wr(desc)
wr()
ctx=revsymbol(repo,str(revision))
man=ctx.manifest()
added,changed,removed,type=[],[],[],''
if len(parents) == 0:
# first revision: feed in full manifest
added=man.keys()
added.sort()
type='full'
else:
wr('from %s' % revnum_to_revref(parents[0], old_marks))
if len(parents) == 1:
# later non-merge revision: feed in changed manifest
# if we have exactly one parent, just take the changes from the
# manifest without expensively comparing checksums
f=repo.status(parents[0],revnode)[:3]
added,changed,removed=f[1],f[0],f[2]
type='simple delta'
else: # a merge with two parents
wr('merge %s' % revnum_to_revref(parents[1], old_marks))
# later merge revision: feed in changed manifest
# for many files comparing checksums is expensive so only do it for
# merges where we really need it due to hg's revlog logic
added,changed,removed=get_filechanges(repo,revision,parents,man)
type='thorough delta'
sys.stderr.write('%s: Exporting %s revision %d/%d with %d/%d/%d added/changed/removed files\n' %
(branch,type,revision+1,max,len(added),len(changed),len(removed)))
if fn_encoding:
removed=[r.decode(fn_encoding).encode('utf8') for r in removed]
removed=[strip_leading_slash(x) for x in removed]
map(lambda r: wr('D %s' % r),removed)
export_file_contents(ctx,man,added,hgtags,fn_encoding,plugins)
export_file_contents(ctx,man,changed,hgtags,fn_encoding,plugins)
wr()
return checkpoint(count)
def export_note(ui,repo,revision,count,authors,encoding,is_first):
(revnode,_,user,(time,timezone),_,_,_,_)=get_changeset(ui,repo,revision,authors,encoding)
parents = [p for p in repo.changelog.parentrevs(revision) if p >= 0]
wr('commit refs/notes/hg')
wr('committer %s %d %s' % (user,time,timezone))
wr('data 0')
if is_first:
wr('from refs/notes/hg^0')
wr('N inline :%d' % (revision+1))
hg_hash=revsymbol(repo,str(revision)).hex()
wr('data %d' % (len(hg_hash)))
wr_no_nl(hg_hash)
wr()
return checkpoint(count)
wr('data %d' % (len(desc)+1)) # wtf?
wr(desc)
wr()
def export_tags(ui,repo,old_marks,mapping_cache,count,authors,tagsmap):
l=repo.tagslist()
for tag,node in l:
# Remap the branch name
tag=sanitize_name(tag,"tag",tagsmap)
# ignore latest revision
if tag=='tip': continue
# ignore tags to nodes that are missing (ie, 'in the future')
if node.encode('hex_codec') not in mapping_cache:
sys.stderr.write('Tag %s refers to unseen node %s\n' % (tag, node.encode('hex_codec')))
continue
rev=int(mapping_cache[node.encode('hex_codec')])
ref=revnum_to_revref(rev, old_marks)
if ref==None:
sys.stderr.write('Failed to find reference for creating tag'
' %s at r%d\n' % (tag,rev))
continue
sys.stderr.write('Exporting tag [%s] at [hg r%d] [git %s]\n' % (tag,rev,ref))
wr('reset refs/tags/%s' % tag)
wr('from %s' % ref)
wr()
count=checkpoint(count)
return count
def load_mapping(name, filename, mapping_is_raw):
raw_regexp=re.compile('^([^=]+)[ ]*=[ ]*(.+)$')
string_regexp='"(((\\.)|(\\")|[^"])*)"'
quoted_regexp=re.compile('^'+string_regexp+'[ ]*=[ ]*'+string_regexp+'$')
def parse_raw_line(line):
m=raw_regexp.match(line)
if m==None:
return None
return (m.group(1).strip(), m.group(2).strip())
def parse_quoted_line(line):
m=quoted_regexp.match(line)
if m==None:
return None
return (m.group(1).decode('string_escape'),
m.group(5).decode('string_escape'))
cache={}
if not os.path.exists(filename):
sys.stderr.write('Could not open mapping file [%s]\n' % (filename))
return cache
f=open(filename,'r')
l=0
a=0
for line in f.readlines():
l+=1
line=line.strip()
if l==1 and line[0]=='#' and line=='# quoted-escaped-strings':
continue
elif line=='' or line[0]=='#':
continue
m=parse_raw_line(line) if mapping_is_raw else parse_quoted_line(line)
if m==None:
sys.stderr.write('Invalid file format in [%s], line %d\n' % (filename,l))
continue
# put key:value in cache, key without ^:
cache[m[0]]=m[1]
a+=1
f.close()
sys.stderr.write('Loaded %d %s\n' % (a, name))
return cache
def branchtip(repo, heads):
'''return the tipmost branch head in heads'''
tip = heads[-1]
for h in reversed(heads):
if 'close' not in repo.changelog.read(h)[5]:
tip = h
break
return tip
def verify_heads(ui,repo,cache,force,branchesmap):
branches={}
for bn, heads in repo.branchmap().iteritems():
branches[bn] = branchtip(repo, heads)
l=[(-repo.changelog.rev(n), n, t) for t, n in branches.items()]
l.sort()
# get list of hg's branches to verify, don't take all git has
for _,_,b in l:
b=get_branch(b)
sanitized_name=sanitize_name(b,"branch",branchesmap)
sha1=get_git_sha1(sanitized_name)
c=cache.get(sanitized_name)
if sha1!=c:
sys.stderr.write('Error: Branch [%s] modified outside hg-fast-export:'
'\n%s (repo) != %s (cache)\n' % (b,sha1,c))
if not force: return False
# verify that branch has exactly one head
t={}
for h in repo.heads():
(_,_,_,_,_,_,branch,_)=get_changeset(ui,repo,h)
if t.get(branch,False):
sys.stderr.write('Error: repository has at least one unnamed head: hg r%s\n' %
repo.changelog.rev(h))
if not force: return False
t[branch]=True
return True
def hg2git(repourl,m,marksfile,mappingfile,headsfile,tipfile,
authors={},branchesmap={},tagsmap={},
sob=False,force=False,hgtags=False,notes=False,encoding='',fn_encoding='',
plugins={}):
def check_cache(filename, contents):
if len(contents) == 0:
sys.stderr.write('Warning: %s does not contain any data, this will probably make an incremental import fail\n' % filename)
_max=int(m)
old_marks=load_cache(marksfile,lambda s: int(s)-1)
mapping_cache=load_cache(mappingfile)
heads_cache=load_cache(headsfile)
state_cache=load_cache(tipfile)
if len(state_cache) != 0:
for (name, data) in [(marksfile, old_marks),
(mappingfile, mapping_cache),
(headsfile, state_cache)]:
check_cache(name, data)
ui,repo=setup_repo(repourl)
if not verify_heads(ui,repo,heads_cache,force,branchesmap):
return 1
try:
tip=repo.changelog.count()
except AttributeError:
tip=len(repo)
min=int(state_cache.get('tip',0))
max=_max
if _max<0 or max>tip:
max=tip
for rev in range(0,max):
(revnode,_,_,_,_,_,_,_)=get_changeset(ui,repo,rev,authors)
mapping_cache[revnode.encode('hex_codec')] = str(rev)
if submodule_mappings:
# Make sure that all submodules are registered in the submodule-mappings file
for rev in range(0,max):
ctx=revsymbol(repo,str(rev))
if ctx.substate:
for key in ctx.substate:
if key not in submodule_mappings:
sys.stderr.write("Error: %s not found in submodule-mappings\n" % (key))
return 1
c=0
brmap={}
for rev in range(min,max):
c=export_commit(ui,repo,rev,old_marks,max,c,authors,branchesmap,
sob,brmap,hgtags,encoding,fn_encoding,
plugins)
if notes:
for rev in range(min,max):
c=export_note(ui,repo,rev,c,authors, encoding, rev == min and min != 0)
state_cache['tip']=max
state_cache['repo']=repourl
save_cache(tipfile,state_cache)
save_cache(mappingfile,mapping_cache)
c=export_tags(ui,repo,old_marks,mapping_cache,c,authors,tagsmap)
sys.stderr.write('Issued %d commands\n' % c)
return 0
if __name__=='__main__':
def bail(parser,opt):
sys.stderr.write('Error: No %s option given\n' % opt)
parser.print_help()
sys.exit(2)
parser=OptionParser()
parser.add_option("-m","--max",type="int",dest="max",
help="Maximum hg revision to import")
parser.add_option("--mapping",dest="mappingfile",
help="File to read last run's hg-to-git SHA1 mapping")
parser.add_option("--marks",dest="marksfile",
help="File to read git-fast-import's marks from")
parser.add_option("--heads",dest="headsfile",
help="File to read last run's git heads from")
parser.add_option("--status",dest="statusfile",
help="File to read status from")
parser.add_option("-r","--repo",dest="repourl",
help="URL of repo to import")
parser.add_option("-s",action="store_true",dest="sob",
default=False,help="Enable parsing Signed-off-by lines")
parser.add_option("--hgtags",action="store_true",dest="hgtags",
default=False,help="Enable exporting .hgtags files")
parser.add_option("-A","--authors",dest="authorfile",
help="Read authormap from AUTHORFILE")
parser.add_option("-B","--branches",dest="branchesfile",
help="Read branch map from BRANCHESFILE")
parser.add_option("-T","--tags",dest="tagsfile",
help="Read tags map from TAGSFILE")
parser.add_option("-f","--force",action="store_true",dest="force",
default=False,help="Ignore validation errors by force")
parser.add_option("-M","--default-branch",dest="default_branch",
help="Set the default branch")
parser.add_option("-o","--origin",dest="origin_name",
help="use <name> as namespace to track upstream")
parser.add_option("--hg-hash",action="store_true",dest="notes",
default=False,help="Annotate commits with the hg hash as git notes in the hg namespace")
parser.add_option("-e",dest="encoding",
help="Assume commit and author strings retrieved from Mercurial are encoded in <encoding>")
parser.add_option("--fe",dest="fn_encoding",
help="Assume file names from Mercurial are encoded in <filename_encoding>")
parser.add_option("--mappings-are-raw",dest="raw_mappings", default=False,
help="Assume mappings are raw <key>=<value> lines")
parser.add_option("--filter-contents",dest="filter_contents",
help="Pipe contents of each exported file through FILTER_CONTENTS <file-path> <hg-hash> <is-binary>")
parser.add_option("--plugin-path", type="string", dest="pluginpath",
help="Additional search path for plugins ")
parser.add_option("--plugin", action="append", type="string", dest="plugins",
help="Add a plugin with the given init string <name=init>")
parser.add_option("--subrepo-map", type="string", dest="subrepo_map",
help="Provide a mapping file between the subrepository name and the submodule name")
(options,args)=parser.parse_args()
m=-1
if options.max!=None: m=options.max
if options.marksfile==None: bail(parser,'--marks')
if options.mappingfile==None: bail(parser,'--mapping')
if options.headsfile==None: bail(parser,'--heads')
if options.statusfile==None: bail(parser,'--status')
if options.repourl==None: bail(parser,'--repo')
if options.subrepo_map:
if not os.path.exists(options.subrepo_map):
sys.stderr.write('Subrepo mapping file not found %s\n'
% options.subrepo_map)
sys.exit(1)
submodule_mappings=load_mapping('subrepo mappings',
options.subrepo_map,False)
a={}
if options.authorfile!=None:
a=load_mapping('authors', options.authorfile, options.raw_mappings)
b={}
if options.branchesfile!=None:
b=load_mapping('branches', options.branchesfile, options.raw_mappings)
t={}
if options.tagsfile!=None:
t=load_mapping('tags', options.tagsfile, True)
if options.default_branch!=None:
set_default_branch(options.default_branch)
if options.origin_name!=None:
set_origin_name(options.origin_name)
encoding=''
if options.encoding!=None:
encoding=options.encoding
fn_encoding=encoding
if options.fn_encoding!=None:
fn_encoding=options.fn_encoding
plugins=[]
if options.plugins!=None:
plugins+=options.plugins
if options.filter_contents!=None:
plugins+=['shell_filter_file_contents='+options.filter_contents]
plugins_dict={}
plugins_dict['commit_message_filters']=[]
plugins_dict['file_data_filters']=[]
if plugins and options.pluginpath:
sys.stderr.write('Using additional plugin path: ' + options.pluginpath + '\n')
for plugin in plugins:
split = plugin.split('=')
name, opts = split[0], '='.join(split[1:])
i = pluginloader.get_plugin(name,options.pluginpath)
sys.stderr.write('Loaded plugin ' + i['name'] + ' from path: ' + i['path'] +' with opts: ' + opts + '\n')
plugin = pluginloader.load_plugin(i).build_filter(opts)
if hasattr(plugin,'file_data_filter') and callable(plugin.file_data_filter):
plugins_dict['file_data_filters'].append(plugin.file_data_filter)
if hasattr(plugin, 'commit_message_filter') and callable(plugin.commit_message_filter):
plugins_dict['commit_message_filters'].append(plugin.commit_message_filter)
sys.exit(hg2git(options.repourl,m,options.marksfile,options.mappingfile,
options.headsfile, options.statusfile,
authors=a,branchesmap=b,tagsmap=t,
sob=options.sob,force=options.force,hgtags=options.hgtags,
notes=options.notes,encoding=encoding,fn_encoding=fn_encoding,
plugins=plugins_dict))
| 35.428571 | 128 | 0.66645 |
34ddb058d1c7eba39b868915bb007f584e143bac | 595 | py | Python | rdsctrl.py | migueltorroja/rds-sdr | a8553d2c620881734b1b90b3a089455e21b2004d | [
"MIT"
] | null | null | null | rdsctrl.py | migueltorroja/rds-sdr | a8553d2c620881734b1b90b3a089455e21b2004d | [
"MIT"
] | 1 | 2015-12-15T07:06:12.000Z | 2015-12-15T07:07:19.000Z | rdsctrl.py | migueltorroja/rds-sdr | a8553d2c620881734b1b90b3a089455e21b2004d | [
"MIT"
] | null | null | null | #! /usr/bin/python
import argparse
import sys
from sourcefile import sourcefile
from plotsamples import plotsamples
if __name__=='__main__':
parser = argparse.ArgumentParser(description='rds control options')
parser.add_argument('-f', dest='filename',help="file name with the sampled data")
parsed_args=parser.parse_args(sys.argv[1:])
if parsed_args.filename is None:
sys.exit(1)
src=sourcefile(parsed_args.filename)
pltsamp=plotsamples(src)
src.read_samples()
print src.get_samples_types()
print src.get_sampling_rate()
print src.get_osc_freq()
| 31.315789 | 85 | 0.739496 |
7b8885911f4f98e5e942cc785d04d7b8f30461ac | 868 | py | Python | tutorial/colour_grey.py | LloydTao/generativepy | 8bf6afed57200cbebd3163e4fdc730fc8761e753 | [
"MIT"
] | 58 | 2019-06-15T16:09:28.000Z | 2022-03-25T03:24:26.000Z | tutorial/colour_grey.py | LloydTao/generativepy | 8bf6afed57200cbebd3163e4fdc730fc8761e753 | [
"MIT"
] | 1 | 2021-09-09T16:12:18.000Z | 2021-09-09T18:13:05.000Z | tutorial/colour_grey.py | LloydTao/generativepy | 8bf6afed57200cbebd3163e4fdc730fc8761e753 | [
"MIT"
] | 4 | 2020-07-26T10:54:19.000Z | 2021-11-17T17:24:13.000Z | # Author: Martin McBride
# Created: 2021-04-19
# Copyright (C) 2021, Martin McBride
# License: MIT
from generativepy.drawing import make_image, setup
from generativepy.color import Color
from generativepy.geometry import Rectangle
def draw_grey(ctx, pixel_width, pixel_height, frame_no, frame_count):
setup(ctx, pixel_width, pixel_height, background=Color('cornflowerblue'))
pos = [10, 10]
w = 100
h = 100
Rectangle(ctx).of_corner_size(pos, w, h).fill(Color(0))
pos[0] += w
Rectangle(ctx).of_corner_size(pos, w, h).fill(Color(0.25))
pos[0] += w
Rectangle(ctx).of_corner_size(pos, w, h).fill(Color(0.5))
pos[0] += w
Rectangle(ctx).of_corner_size(pos, w, h).fill(Color(0.75))
pos[0] += w
Rectangle(ctx).of_corner_size(pos, w, h).fill(Color(1))
pos[0] += w
make_image("colour-grey.png", draw_grey, 520, 120) | 28.933333 | 77 | 0.680876 |
e44b9513a199f28c6def42a529fb79a5f255429c | 270 | py | Python | topCoder/srms/200s/srm251/div2/elections.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 25 | 2015-01-21T16:39:18.000Z | 2021-05-24T07:01:24.000Z | topCoder/srms/200s/srm251/div2/elections.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 2 | 2020-09-30T19:39:36.000Z | 2020-10-01T17:15:16.000Z | topCoder/srms/200s/srm251/div2/elections.py | ferhatelmas/algo | a7149c7a605708bc01a5cd30bf5455644cefd04d | [
"WTFPL"
] | 15 | 2015-01-21T16:39:27.000Z | 2020-10-01T17:00:22.000Z | from operator import itemgetter
class Elections:
def visit(self, likelihoods):
return min(
map(
lambda (i, n): (i, n.count("1") / float(len(n))), enumerate(likelihoods)
),
key=itemgetter(1),
)[0]
| 22.5 | 88 | 0.507407 |
bd580938f7b6ffcf73d341223a96dd67959bb59a | 4,591 | py | Python | notebooks/knn_model_with_grouping.py | TuomoKareoja/wifi-locationing | a40878bd1dd26a2b09b0f32bdfdd7e929ad9fc86 | [
"MIT"
] | null | null | null | notebooks/knn_model_with_grouping.py | TuomoKareoja/wifi-locationing | a40878bd1dd26a2b09b0f32bdfdd7e929ad9fc86 | [
"MIT"
] | null | null | null | notebooks/knn_model_with_grouping.py | TuomoKareoja/wifi-locationing | a40878bd1dd26a2b09b0f32bdfdd7e929ad9fc86 | [
"MIT"
] | null | null | null | # %%
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from IPython.core.interactiveshell import InteractiveShell
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.neighbors import KNeighborsRegressor
from src.models.scoring import distance75
# Setting styles
InteractiveShell.ast_node_interactivity = "all"
sns.set(style="whitegrid", color_codes=True, rc={"figure.figsize": (12.7, 9.27)})
random_state = 123
# %%
df = pd.read_csv(os.path.join("data", "processed", "train.csv"))
df = df.drop(columns=["train"])
df_valid = pd.read_csv(os.path.join("data", "processed", "test.csv"))
df_valid = df_valid.drop(columns=["train", "spaceid", "relativeposition"])
# %% grouping the training data by location
# this drops the amount of datapoints by 95 %
df = df.groupby(
["buildingid", "floor", "spaceid", "relativeposition"], as_index=False
).mean()
df.drop(columns=["spaceid", "relativeposition"], inplace=True)
# %%
X = df.drop(columns=["longitude", "latitude", "floor", "buildingid"])
y = pd.DataFrame(
{
"lon": df.longitude,
"lat": df.latitude,
"floor": df.floor,
"building": df.buildingid,
}
)
X_valid = df_valid.drop(columns=["longitude", "latitude", "floor", "buildingid"])
y_valid = pd.DataFrame(
{
"lon": df_valid.longitude,
"lat": df_valid.latitude,
"floor": df_valid.floor,
"building": df_valid.buildingid,
}
)
# %%
def calculate_distance(y, y_pred):
distance = distance75(y, y_pred)
return np.mean(distance)
distance_scorer = make_scorer(calculate_distance, greater_is_better=False)
# %% Optimizing hyperparameters
def squared_distance(weights):
# replacing zero values with machine epsilon
weights[weights == 0] = np.finfo(float).eps
weights = [
(1 / weights_obs ** 2) / np.sum(1 / weights_obs ** 2) for weights_obs in weights
]
return weights
param_grid = {
"n_neighbors": [1, 2, 3],
"weights": ["uniform", "distance", squared_distance],
"metric": ["euclidean", "manhattan"],
}
# there might be some inherent order in the dataset
# so shuffling to get rid of this
folds = KFold(n_splits=10, shuffle=True, random_state=random_state)
knn_model = KNeighborsRegressor()
param_search = GridSearchCV(
knn_model, param_grid, scoring=distance_scorer, n_jobs=-2, cv=folds, verbose=2
)
param_search.fit(X, y)
print("Best Params:")
print(param_search.best_params_)
print("Best CV Score:")
print(-param_search.best_score_)
best_params = param_search.best_params_
# %% Training the model with full data and optimized hyperparameters
knn_model = KNeighborsRegressor(**best_params)
knn_model.fit(X, y)
pred = knn_model.predict(X_valid)
pred_lon = pred[:, 0]
pred_lat = pred[:, 1]
pred_floor = np.round(pred[:, 2], decimals=0)
pred_building = np.round(pred[:, 3], decimals=0)
distance = distance75(y_valid, pred)
score = np.mean(distance)
lon_score = np.mean(np.absolute(pred_lon - y_valid.lon))
lat_score = np.mean(np.absolute(pred_lat - y_valid.lat))
right_floor = np.round(np.mean(pred_floor == y_valid.floor) * 100, 2)
right_building = np.round(np.mean(pred_building == y_valid.building) * 100, 2)
predictions = pd.DataFrame(
{
"LATITUDE": pred_lat,
"LONGITUDE": pred_lon,
"FLOOR": pred_floor,
"distance": distance,
}
)
true_values = pd.DataFrame(
{
"LATITUDE": y_valid.lat,
"LONGITUDE": y_valid.lon,
"FLOOR": y_valid.floor,
"distance": distance,
}
)
# %%
print(f"Mean error in distance75: {score}")
print(f"Latitude error: {lat_score} %")
print(f"Longitude error: {lon_score} %")
print(f"Floors correct: {right_floor} %")
print(f"Building correct: {right_building} %")
for floor in sorted(predictions.FLOOR.unique()):
fig, ax = plt.subplots()
sns.scatterplot(
x="LONGITUDE",
y="LATITUDE",
hue="distance",
ax=ax,
s=100,
data=predictions[predictions["FLOOR"] == int(floor)],
)
ax.set_aspect(aspect="equal")
plt.title(f"Predictions Floor {int(floor)}")
plt.show()
fig, ax = plt.subplots()
sns.scatterplot(
x="LONGITUDE",
y="LATITUDE",
hue="distance",
s=100,
data=true_values[true_values["FLOOR"] == int(floor)],
ax=ax,
)
ax.set_aspect(aspect="equal")
plt.title(f"Real Values Floor {int(floor)}")
plt.show()
# %% distribution of the errors
predictions.distance.hist(bins=100)
# %%
| 24.550802 | 88 | 0.669353 |
6841dcee25309c101e985aac07dc8a81b63554bc | 1,189 | py | Python | src/python/dart/engine/redshift/command/unload.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 18 | 2016-03-03T19:10:21.000Z | 2021-07-14T22:37:35.000Z | src/python/dart/engine/redshift/command/unload.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 62 | 2016-04-11T15:17:23.000Z | 2017-09-08T17:18:53.000Z | src/python/dart/engine/redshift/command/unload.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 15 | 2016-03-03T15:38:34.000Z | 2019-03-27T19:33:08.000Z | from datetime import datetime
from dart.engine.redshift.admin.utils import lookup_credentials, sanitized_query
from dart.util.strings import substitute_date_tokens
def unload_to_s3(action, conn):
""" :type action: dart.model.action.Action """
args = action.data.args
aws_access_key_id, aws_secret_access_key, security_token = lookup_credentials(action)
sql = """
UNLOAD ('{statement}') TO '{s3_path}'
CREDENTIALS 'aws_access_key_id={aws_access_key_id};aws_secret_access_key={aws_secret_access_key}{token}'
ALLOWOVERWRITE
NULL AS 'NULL'
ESCAPE
DELIMITER '{delimiter}'
PARALLEL {parallel}
GZIP;
""".format(
statement=sanitized_query(args['source_sql_statement'].replace("'", "''")),
s3_path=substitute_date_tokens(args['destination_s3_path'], datetime.utcnow()),
delimiter=args['delimiter'] if args.get('delimiter') else '\t',
parallel='ON' if args['parallel'] else 'OFF',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
token=';token=%s' % security_token if security_token else '',
)
conn.execute(sql)
| 41 | 112 | 0.688814 |
54006dc0454c028228ee5293f72d045f40e704ad | 595 | py | Python | S11/S11L65_LOTTO.py | Tooopik/Kurs_Python | 5c8f040467dae5f9b90ff6a85550708ac4d88549 | [
"Unlicense"
] | null | null | null | S11/S11L65_LOTTO.py | Tooopik/Kurs_Python | 5c8f040467dae5f9b90ff6a85550708ac4d88549 | [
"Unlicense"
] | null | null | null | S11/S11L65_LOTTO.py | Tooopik/Kurs_Python | 5c8f040467dae5f9b90ff6a85550708ac4d88549 | [
"Unlicense"
] | null | null | null | import random
def chose_random_numbers(amount=1, total_amount=1):
if amount <= total_amount:
numbers = []
x = 0
while x < amount:
number = random.randint(1, total_amount)
if number not in numbers:
numbers.append(number)
x = x + 1
return numbers
else:
return False
def chose_random_numbers_2(amount=1, total_amount=1):
if amount <= total_amount:
return random.sample(range(1, total_amount + 1), amount)
else:
return False
print(chose_random_numbers_2(6, 49))
| 20.517241 | 64 | 0.589916 |
f0dd6e312899c1ac89756cac9ca4f307c9545cda | 8,226 | py | Python | pymodbus/bit_read_message.py | exmachina-dev/pymodbus | 60a7266d32db75786b9489513c85a373cf950a3e | [
"W3C"
] | 6 | 2017-11-02T19:40:07.000Z | 2021-06-19T17:19:08.000Z | pymodbus/bit_read_message.py | sjhilt/pymodbus | 5e57d003c01e6eff44cd354c03a686ff95eaf8a9 | [
"W3C"
] | null | null | null | pymodbus/bit_read_message.py | sjhilt/pymodbus | 5e57d003c01e6eff44cd354c03a686ff95eaf8a9 | [
"W3C"
] | 3 | 2019-12-04T14:38:49.000Z | 2020-11-19T02:41:38.000Z | """
Bit Reading Request/Response messages
--------------------------------------
"""
import struct
from pymodbus.pdu import ModbusRequest
from pymodbus.pdu import ModbusResponse
from pymodbus.pdu import ModbusExceptions as merror
from pymodbus.utilities import pack_bitstring, unpack_bitstring
class ReadBitsRequestBase(ModbusRequest):
''' Base class for Messages Requesting bit values '''
_rtu_frame_size = 8
def __init__(self, address, count, **kwargs):
''' Initializes the read request data
:param address: The start address to read from
:param count: The number of bits after 'address' to read
'''
ModbusRequest.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self):
''' Encodes a request pdu
:returns: The encoded pdu
'''
return struct.pack('>HH', self.address, self.count)
def decode(self, data):
''' Decodes a request pdu
:param data: The packet data to decode
'''
self.address, self.count = struct.unpack('>HH', data)
def __str__(self):
''' Returns a string representation of the instance
:returns: A string representation of the instance
'''
return "ReadBitRequest(%d,%d)" % (self.address, self.count)
class ReadBitsResponseBase(ModbusResponse):
''' Base class for Messages responding to bit-reading values '''
_rtu_byte_count_pos = 2
def __init__(self, values, **kwargs):
''' Initializes a new instance
:param values: The requested values to be returned
'''
ModbusResponse.__init__(self, **kwargs)
self.bits = values or []
def encode(self):
''' Encodes response pdu
:returns: The encoded packet message
'''
result = pack_bitstring(self.bits)
packet = struct.pack(">B", len(result)) + result
return packet
def decode(self, data):
''' Decodes response pdu
:param data: The packet data to decode
'''
self.byte_count = struct.unpack(">B", data[0])[0]
self.bits = unpack_bitstring(data[1:])
def setBit(self, address, value=1):
''' Helper function to set the specified bit
:param address: The bit to set
:param value: The value to set the bit to
'''
self.bits[address] = (value != 0)
def resetBit(self, address):
''' Helper function to set the specified bit to 0
:param address: The bit to reset
'''
self.setBit(address, 0)
def getBit(self, address):
''' Helper function to get the specified bit's value
:param address: The bit to query
:returns: The value of the requested bit
'''
return self.bits[address]
def __str__(self):
''' Returns a string representation of the instance
:returns: A string representation of the instance
'''
return "ReadBitResponse(%d)" % len(self.bits)
class ReadCoilsRequest(ReadBitsRequestBase):
'''
This function code is used to read from 1 to 2000(0x7d0) contiguous status
of coils in a remote device. The Request PDU specifies the starting
address, ie the address of the first coil specified, and the number of
coils. In the PDU Coils are addressed starting at zero. Therefore coils
numbered 1-16 are addressed as 0-15.
'''
function_code = 1
def __init__(self, address=None, count=None, **kwargs):
''' Initializes a new instance
:param address: The address to start reading from
:param count: The number of bits to read
'''
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
''' Run a read coils request against a datastore
Before running the request, we make sure that the request is in
the max valid range (0x001-0x7d0). Next we make sure that the
request is valid against the current datastore.
:param context: The datastore to request from
:returns: The initializes response message, exception message otherwise
'''
if not (1 <= self.count <= 0x7d0):
return self.doException(merror.IllegalValue)
if not context.validate(self.function_code, self.address, self.count):
return self.doException(merror.IllegalAddress)
values = context.getValues(self.function_code, self.address, self.count)
return ReadCoilsResponse(values)
class ReadCoilsResponse(ReadBitsResponseBase):
'''
The coils in the response message are packed as one coil per bit of
the data field. Status is indicated as 1= ON and 0= OFF. The LSB of the
first data byte contains the output addressed in the query. The other
coils follow toward the high order end of this byte, and from low order
to high order in subsequent bytes.
If the returned output quantity is not a multiple of eight, the
remaining bits in the final data byte will be padded with zeros
(toward the high order end of the byte). The Byte Count field specifies
the quantity of complete bytes of data.
'''
function_code = 1
def __init__(self, values=None, **kwargs):
''' Intializes a new instance
:param values: The request values to respond with
'''
ReadBitsResponseBase.__init__(self, values, **kwargs)
class ReadDiscreteInputsRequest(ReadBitsRequestBase):
'''
This function code is used to read from 1 to 2000(0x7d0) contiguous status
of discrete inputs in a remote device. The Request PDU specifies the
starting address, ie the address of the first input specified, and the
number of inputs. In the PDU Discrete Inputs are addressed starting at
zero. Therefore Discrete inputs numbered 1-16 are addressed as 0-15.
'''
function_code = 2
def __init__(self, address=None, count=None, **kwargs):
''' Intializes a new instance
:param address: The address to start reading from
:param count: The number of bits to read
'''
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
def execute(self, context):
''' Run a read discrete input request against a datastore
Before running the request, we make sure that the request is in
the max valid range (0x001-0x7d0). Next we make sure that the
request is valid against the current datastore.
:param context: The datastore to request from
:returns: The initializes response message, exception message otherwise
'''
if not (1 <= self.count <= 0x7d0):
return self.doException(merror.IllegalValue)
if not context.validate(self.function_code, self.address, self.count):
return self.doException(merror.IllegalAddress)
values = context.getValues(self.function_code, self.address, self.count)
return ReadDiscreteInputsResponse(values)
class ReadDiscreteInputsResponse(ReadBitsResponseBase):
'''
The discrete inputs in the response message are packed as one input per
bit of the data field. Status is indicated as 1= ON; 0= OFF. The LSB of
the first data byte contains the input addressed in the query. The other
inputs follow toward the high order end of this byte, and from low order
to high order in subsequent bytes.
If the returned input quantity is not a multiple of eight, the
remaining bits in the final data byte will be padded with zeros
(toward the high order end of the byte). The Byte Count field specifies
the quantity of complete bytes of data.
'''
function_code = 2
def __init__(self, values=None, **kwargs):
''' Intializes a new instance
:param values: The request values to respond with
'''
ReadBitsResponseBase.__init__(self, values, **kwargs)
#---------------------------------------------------------------------------#
# Exported symbols
#---------------------------------------------------------------------------#
__all__ = [
"ReadCoilsRequest", "ReadCoilsResponse",
"ReadDiscreteInputsRequest", "ReadDiscreteInputsResponse",
]
| 35.004255 | 80 | 0.651593 |
9fe9698880d23d6fee6ec935cd11e0010709c646 | 9,574 | py | Python | pdfextractor/models/article_model.py | snook9/pdf_extractor | 7a4c37db5026bb1789c5df5a76bc50718288bc1f | [
"MIT"
] | 1 | 2021-11-05T07:55:56.000Z | 2021-11-05T07:55:56.000Z | pdfextractor/models/article_model.py | snook9/pdf_extractor | 7a4c37db5026bb1789c5df5a76bc50718288bc1f | [
"MIT"
] | 8 | 2021-11-05T20:19:22.000Z | 2021-12-22T15:15:37.000Z | pdfextractor/models/article_model.py | snook9/pdf_extractor | 7a4c37db5026bb1789c5df5a76bc50718288bc1f | [
"MIT"
] | null | null | null | """
Name: PdfExporter
Authors: Jonathan CASSAING
Tool for parsing and extracting PDF file content
"""
from datetime import datetime
from pathlib import Path
import json
from multiprocessing import Process
# pdftotext is used to extract PDF content (text body)
import pdftotext
# PyPDF2 is used to extract PDF meta data
from PyPDF2 import PdfFileReader
from flask import current_app as app
from sqlalchemy import Column, Integer, String
from pdfextractor.common.base import Base
from pdfextractor.common.base import session_factory
class ArticleModel(Base):
"""Class for representing Article entity and his Data Access Object
This class can be used to persist the object in the database AND
to save the Article in a basic text file.
"""
# Table name in the database
__tablename__ = "file"
# Internal ID is used to store the real ID (in database) after the session close
internal_id = None
# ID primary key in the database
# Nota: this id is wiped after a session.close()
id = Column("id", Integer, primary_key=True)
# Status column in the database
status = Column("status", String(255))
# Date and time column in the database
date = Column("date", String(255))
# Author PDF meta data
author = Column("author", String(255))
# Creator PDF meta data
creator = Column("creator", String(255))
# Producer PDF meta data
producer = Column("producer", String(255))
# Subjet PDF meta data
subject = Column("subject", String(255))
# Title PDF meta data
title = Column("title", String(255))
# Pages count PDF meta data
number_of_pages = Column("number_of_pages", Integer)
# Raw informations PDF meta data
raw_info = Column("raw_info", String())
# Content column in the database
content = Column("content", String)
def __init__(
self: object,
status: str = None,
date: str = None,
author: str = None,
creator: str = None,
producer: str = None,
subject: str = None,
title: str = None,
number_of_pages: int = None,
raw_info: str = None,
content: str = None,
):
"""Initialize the object
Args:
status (str, optional): to force status. Defaults to None.
date (str, optional): to force date and time. Defaults to None.
author (str, optional): to force author. Defaults to None.
creator (str, optional): to force creator. Defaults to None.
producer (str, optional): to force producer. Defaults to None.
subject (str, optional): to force subject. Defaults to None.
title (str, optional): to force title. Defaults to None.
number_of_pages (int, optional): to force number_of_pages. Defaults to None.
raw_info (str, optional): to force raw_info. Defaults to None.
content (str, optional): to force content. Defaults to None.
"""
self.status = str(status)
self.date = str(date)
self.author = str(author)
self.creator = str(creator)
self.producer = str(producer)
self.subject = str(subject)
self.title = str(title)
self.number_of_pages = number_of_pages
self.raw_info = str(raw_info)
self.content = str(content)
# Configure the folder where text files will be saved
self._output_folder = Path(app.config["DATA_FOLDER"])
if False is self._output_folder.exists():
# If the folder doesn't exist, we create it
self._output_folder.mkdir()
def _persist(
self,
date: str = None,
author: str = None,
creator: str = None,
producer: str = None,
subject: str = None,
title: str = None,
number_of_pages: int = None,
raw_info: str = None,
content: str = None,
object_id: int = None,
):
"""Private method to persist/update the object in the database
Warning: this method is not thread safe and a lock could be appropriate...
Args:
date (str): date field
author (str): author field
creator (str): creator field
producer (str): producer field
subject (str): subject field
title (str): title field
number_of_pages (int): number_of_pages field
raw_info (str): raw_info field
content (str): content field
object_id (int): none for inserting a new object, otherwise - id of the object to update
"""
session = session_factory()
if object_id is None:
self.status = "PENDING"
self.date = str(date)
self.author = str(author)
self.creator = str(creator)
self.producer = str(producer)
self.subject = str(subject)
self.title = str(title)
self.number_of_pages = number_of_pages
self.raw_info = str(raw_info)
self.content = str(content)
session.add(self)
else:
article_model = session.query(ArticleModel).get(object_id)
article_model.status = "SUCCESS"
article_model.date = str(date)
article_model.author = str(author)
article_model.creator = str(creator)
article_model.producer = str(producer)
article_model.subject = str(subject)
article_model.title = str(title)
article_model.number_of_pages = number_of_pages
article_model.raw_info = str(raw_info)
article_model.content = str(content)
session.commit()
# We save the ID cause it will wiped after the session.close()
self.internal_id = self.id
session.close()
return self.internal_id
def _async_extract_and_persist(self, filename: Path, object_id: int):
"""Private method to extract then update a PDF object in the database
You must use persist() without parameter before,
to get the id of your futur line in the database
Args:
filename (str): filename of the target file
object_id (int): id of the database line to update
Returns:
int: ID of the persisted object in the database.
"""
today = datetime.today().strftime("%Y-%m-%d-%H-%M-%S.%f")
# Create a unique filename
output_filepath = self._output_folder / Path("file_" + today + ".txt")
with open(filename, "rb") as file:
# Extracting the text (content)
data = pdftotext.PDF(file)
# Extracting meta data
pdf = PdfFileReader(file)
info = pdf.getDocumentInfo()
number_of_pages = pdf.getNumPages()
author = info.author
creator = info.creator
producer = info.producer
subject = info.subject
title = info.title
with open(output_filepath, "w", encoding="utf-8") as file:
# Saving content to a text file
file.write("\n".join(data))
# Saving content AND meta data to the database
self._persist(
today,
author,
creator,
producer,
subject,
title,
number_of_pages,
info,
"".join(data),
object_id
)
return self.internal_id
def extract_and_persist(self, filename: Path):
"""Public method to extract then persist a PDF object in the database
First, this method ask an ID for the futur line in the database, then,
this method create a process for extracting data and
persisting the object in the database.
This method returns the ID of the object in the database
which will be inserted when the process will finish.
Args:
filename (str): filename of the target file
Returns:
int: ID of the persisted object in the database,
otherwise - returns None if the file's type is not supported.
"""
if str(filename).rsplit(".", 1)[1].lower() == "pdf":
# We persist an empty object just to get the ID of the line in the database
object_id = self._persist()
process = Process(target=self._async_extract_and_persist, args=(filename, object_id))
process.start()
return object_id
return None
class ArticleEncoder(json.JSONEncoder):
"""Class for converting full object to JSON string"""
def default(self, o):
if isinstance(o, ArticleModel):
doc_id = o.id
if None is doc_id:
# If None, the object was created after a INSERT query,
# so, the internal_id is the table id
doc_id = o.internal_id
return {
"id": doc_id,
"status": o.status,
"date": o.date,
"author": o.author,
"creator": o.creator,
"producer": o.producer,
"subject": o.subject,
"title": o.title,
"number_of_pages": o.number_of_pages,
"raw_info": o.raw_info,
"content": o.content,
}
# Base class will raise the TypeError.
return super().default(o)
| 36.823077 | 100 | 0.587424 |
21f7f481571904ed760662bf114556deb4294b35 | 2,345 | py | Python | core/domain/dependency_registry.py | VictoriaRoux/oppia | 5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6 | [
"Apache-2.0"
] | 3 | 2015-03-17T01:34:14.000Z | 2015-04-11T10:35:53.000Z | core/domain/dependency_registry.py | VictoriaRoux/oppia | 5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6 | [
"Apache-2.0"
] | null | null | null | core/domain/dependency_registry.py | VictoriaRoux/oppia | 5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for JavaScript library dependencies."""
__author__ = 'Sean Lip'
import itertools
import os
from extensions.dependencies import dependencies_config
import feconf
import utils
class Registry(object):
"""Registry of all JS/CSS library dependencies."""
@classmethod
def get_dependency_html(cls, dependency_id):
"""Returns the HTML template needed to inject this dependency in the
client webpage.
"""
return utils.get_file_contents(os.path.join(
feconf.DEPENDENCIES_TEMPLATES_DIR, '%s.html' % dependency_id))
@classmethod
def get_angular_modules(cls, dependency_id):
"""Returns a list of additional modules that the main Angular module
in the client webpage needs to depend on.
"""
return dependencies_config.DEPENDENCIES_TO_ANGULAR_MODULES_DICT.get(
dependency_id, [])
@classmethod
def get_deps_html_and_angular_modules(cls, dependency_ids):
"""Returns data needed to load the given dependencies.
The return value is a 2-tuple. The first element of the tuple is the
additional HTML to insert on the page. The second element of the tuple
is a de-duplicated list of strings, each representing an additional
angular module that should be loaded.
"""
html = '\n'.join([
cls.get_dependency_html(dep) for dep in set(dependency_ids)])
angular_modules_for_each_dep = [
cls.get_angular_modules(dep) for dep in set(dependency_ids)]
deduplicated_angular_modules = list(set(list(
itertools.chain.from_iterable(angular_modules_for_each_dep))))
return html, deduplicated_angular_modules
| 36.640625 | 78 | 0.713433 |
3a405cad3a894586b1be24dd676ec10a0df6fe1e | 190 | py | Python | trpp/__init__.py | DmitryBogomolov/training-plan-parser | 4193ee1ce0c880147e7d25eb83ead8e431251c72 | [
"MIT"
] | null | null | null | trpp/__init__.py | DmitryBogomolov/training-plan-parser | 4193ee1ce0c880147e7d25eb83ead8e431251c72 | [
"MIT"
] | null | null | null | trpp/__init__.py | DmitryBogomolov/training-plan-parser | 4193ee1ce0c880147e7d25eb83ead8e431251c72 | [
"MIT"
] | null | null | null | from .text_parser import parse
from .plan_renderer import render
from .processor import process, process_file
from .cli import run, DESC
__doc__ = DESC # pylint: disable=redefined-builtin
| 27.142857 | 51 | 0.805263 |
fc8a8c2137b28a49a695e71c6f8771506241671c | 7,196 | py | Python | chapter3/Readercoin_/test/functional/signrawtransactions.py | MyawBug/Blockchain-By-Example | 2d0495a130d1a9f91b7fb99359cbb8e9f7b9763d | [
"MIT"
] | 51 | 2018-12-14T09:09:20.000Z | 2022-03-28T03:25:45.000Z | chapter3/Readercoin_/test/functional/signrawtransactions.py | MyawBug/Blockchain-By-Example | 2d0495a130d1a9f91b7fb99359cbb8e9f7b9763d | [
"MIT"
] | 4 | 2019-08-02T18:23:17.000Z | 2022-02-12T04:33:25.000Z | chapter3/Readercoin_/test/functional/signrawtransactions.py | xiaqingdoc/--- | b15448739983b0787ffc963811294bcf44487303 | [
"MIT"
] | 42 | 2018-12-14T09:09:24.000Z | 2022-03-31T01:49:35.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Readercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import ReadercoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(ReadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransaction(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| 49.972222 | 712 | 0.714008 |
f7217746e68b217cef673ded6405c62a5976ac18 | 5,365 | py | Python | Benchmarking/CM_Benchmark/basic_benchmark/rde.py | CipiOrhei/eecvf | 759fb2127c8d65a570ba2df536ff8429ccf5bdf2 | [
"MIT"
] | 1 | 2021-04-02T15:33:12.000Z | 2021-04-02T15:33:12.000Z | Benchmarking/CM_Benchmark/basic_benchmark/rde.py | CipiOrhei/eecvf | 759fb2127c8d65a570ba2df536ff8429ccf5bdf2 | [
"MIT"
] | null | null | null | Benchmarking/CM_Benchmark/basic_benchmark/rde.py | CipiOrhei/eecvf | 759fb2127c8d65a570ba2df536ff8429ccf5bdf2 | [
"MIT"
] | 1 | 2021-08-14T09:07:22.000Z | 2021-08-14T09:07:22.000Z | import math
import os
from math import log10
# noinspection PyPackageRequirements
import cv2
import numpy as np
from scipy.ndimage import distance_transform_edt
import config_main
from Utils.log_handler import log_setup_info_to_console, log_error_to_console, log_benchmark_info_to_console
from Benchmarking.Util.image_parsing import find_img_extension
from Benchmarking.Config.create_benchmark_job import set_gt_location, set_image_set, set_input_location, job_set
def rde_calc(img, img_gt, k_value):
"""
Dubuisson, M.P.; Jain, A.K. A modified Hausdorff distance for object matching. IEEE ICPR 1994, 1, 566-568
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.1.8155&rep=rep1&type=pdf
:param img: edge map resulting of algorithm
:param img_gt: ground truth image
:return: psnr value for image
"""
# calculate distances
dist_gt = distance_transform_edt(np.invert(img_gt))
dist_dc = distance_transform_edt(np.invert(img))
# calculate sum(d^k(D))
sum_dc = 0.0
sum_gt = 0.0
left = 0.0
right = 0.0
for i in range(0, img_gt.shape[0]):
for j in range(0, img_gt.shape[1]):
if img_gt[i, j]:
sum_dc += dist_dc[i, j] ** k_value
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i, j]:
sum_gt += dist_gt[i, j] ** k_value
cn_cd = np.count_nonzero(img)
cn_gt = np.count_nonzero(img_gt)
if cn_cd != 0 :
left = math.pow(sum_gt / cn_cd, 1.0/k_value)
if cn_gt != 0:
right = math.pow(sum_dc / cn_gt, 1.0/k_value)
if cn_cd==0:
rde = 1000
else:
rde = left + right
return rde
# noinspection PyPep8Naming
def run_RDE_benchmark(input_location: str, gt_location: str,
raw_image: str, jobs_set: list,
k: int):
"""
xxx
:param input_location: location of algorithm images
:param gt_location: location of gt images
:param raw_image: location of raw images
:param jobs_set: algo sets to evaluate
:return: None
"""
set_gt_location(gt_location)
set_input_location(input_location)
set_image_set(raw_image)
job_set(jobs_set)
run_CM_benchmark_RDE(k)
def run_CM_benchmark_RDE(k_value):
"""
:return:
"""
log_setup_info_to_console("BENCHMARKING CM RDEK" + int(k_value).__str__())
idx = 0
for set in config_main.BENCHMARK_SETS:
log_benchmark_info_to_console('Current set: {number}\{total} : {set}'.format(number=idx, total=len(config_main.BENCHMARK_SETS), set=set))
idx += 1
# try:
if True:
# Write results to disk
results_path = os.path.join(os.getcwd(), config_main.BENCHMARK_RESULTS, "RDEK" + int(k_value).__str__())
if not os.path.exists(results_path):
os.makedirs(results_path)
csv = open(os.path.join(results_path, set + '.log'), "w+")
csv.write('Per image (#, RDEK' + int(k_value).__str__() + ':\n')
# log_benchmark_info_to_console('Per image (#, RDE):\n')
avg = 0
count = 0
for file in config_main.BENCHMARK_SAMPLE_NAMES:
# find extension of images and gt_images
if config_main.APPL_SAVE_JOB_NAME is True:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file))
else:
img_extension = find_img_extension(os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file))
gt_extension = find_img_extension(os.path.join(config_main.BENCHMARK_GT_LOCATION, file))
path_img_gt = os.path.join(config_main.BENCHMARK_GT_LOCATION, file + gt_extension)
if config_main.APPL_SAVE_JOB_NAME is True:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, set + '_' + file + img_extension)
else:
path_img_al = os.path.join(config_main.BENCHMARK_INPUT_LOCATION, set, file + img_extension)
img_gt = cv2.cvtColor(cv2.imread(path_img_gt), cv2.COLOR_BGR2GRAY)
img_al = cv2.cvtColor(cv2.imread(path_img_al), cv2.COLOR_BGR2GRAY)
try:
val = rde_calc(img_al, img_gt, k_value)
avg += val
count += 1
csv.write('{:<10s} {:<10.6f}\n'.format(file, val))
# log_benchmark_info_to_console('{:<10s} {:<10.6f}\n'.format(file, val))
except Exception as ex:
log_error_to_console("BENCHMARK CM RDEK{val}: {file}".format(val=int(k_value).__str__(), file=file), ex.__str__())
log_benchmark_info_to_console('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
csv.write('RDEK{val}: {set:<10s} {cnt:<10.6f}\n'.format(val=int(k_value).__str__(), set=set, cnt=avg / count))
# except Exception as ex:
# log_error_to_console('BENCHMARK CM RDEK' + int(k_value).__str__() + 'NOK', ex.__str__())
if __name__ == "__main__":
pass
| 37.517483 | 146 | 0.608574 |
77adda9152d65dfa71a0273a7f963ecde34d291e | 1,743 | py | Python | src/003_cov_data_load.py | sebastian-konicz/covid-dashboard | 49f5baea5081afdd4af86597ed662c2be3122658 | [
"MIT"
] | null | null | null | src/003_cov_data_load.py | sebastian-konicz/covid-dashboard | 49f5baea5081afdd4af86597ed662c2be3122658 | [
"MIT"
] | null | null | null | src/003_cov_data_load.py | sebastian-konicz/covid-dashboard | 49f5baea5081afdd4af86597ed662c2be3122658 | [
"MIT"
] | null | null | null | from pathlib import Path
import pandas as pd
import time
import glob
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
def main():
# start time of program
start_time = time.time()
# project directory
project_dir = str(Path(__file__).resolve().parents[1])
file_path = project_dir + r'\data\raw\covid_data'
# empty file list
cov_files = []
# reading all files in folder
print('reading all covid data from folder')
for each_file in glob.glob(file_path + r'\*.csv'):
df = pd.read_csv(each_file, encoding_errors='ignore', sep=';')
cov_files.append(df)
# concatenating all data
print('file concatenation')
data = pd.concat(cov_files, ignore_index=True)
# data transformation
# restricting dataframe to necessary columns
data_transf = data[['teryt', 'powiat_miasto', 'liczba_przypadkow', 'zgony', 'stan_rekordu_na']].copy()
# renaming columns
data_transf.rename(columns={'powiat_miasto': 'powiat', 'liczba_przypadkow': "zarazenia", 'stan_rekordu_na': 'data'}, inplace=True)
# changing teryt code
data_transf['teryt'] = data_transf['teryt'].apply(lambda x: str(x)[1:])
# filling nan values
data_transf.fillna(value=0, inplace=True)
print('saving covid data - all')
data_save_path = r'\data\interim\covid_data\covid_county_all'
data_transf.to_excel(project_dir + data_save_path + '.xlsx', index=False)
# data_transf.to_csv(project_dir + data_save_path + '.csv', index=False)
# end time of program + duration
end_time = time.time()
execution_time = int(end_time - start_time)
print('\n', 'exectution time = ', execution_time, 'sec')
if __name__ == "__main__":
main() | 32.277778 | 134 | 0.686747 |
bba3322fa50f4cca83a610494d310396b1a7a3e2 | 2,368 | py | Python | language/boolq/utils/ops_test.py | naveenjafer/language | efc5183855a7aeecac3e81fe12ce60fc824f8ca7 | [
"Apache-2.0"
] | 2 | 2020-09-30T11:52:51.000Z | 2020-09-30T12:07:41.000Z | language/boolq/utils/ops_test.py | naveenjafer/language | efc5183855a7aeecac3e81fe12ce60fc824f8ca7 | [
"Apache-2.0"
] | 2 | 2021-08-21T03:46:28.000Z | 2022-02-10T08:16:56.000Z | language/boolq/utils/ops_test.py | naveenjafer/language | efc5183855a7aeecac3e81fe12ce60fc824f8ca7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from language.boolq.utils import ops
import tensorflow.compat.v1 as tf
class OpsTest(tf.test.TestCase):
def test_lowercase(self):
with self.test_session() as sess:
test_str = [["Abc%@||", "DZ dzD", ""]]
self.assertEqual(
sess.run(ops.lowercase_op(tf.convert_to_tensor(test_str))).tolist(),
[[x.lower() for x in test_str[0]]])
def test_lowercase_unicode(self):
with self.test_session() as sess:
test_str = ["ŠČŽɬЩЮɦ"]
self.assertEqual(
sess.run(ops.lowercase_op(tf.convert_to_tensor(test_str))).tolist(),
[test_str[0].lower()])
def test_bucket_by_quantiles(self):
with self.test_session() as sess:
data = tf.data.Dataset.from_tensor_slices(list(range(10))).repeat()
data = data.apply(ops.bucket_by_quantiles(
len_fn=lambda x: x, batch_size=4, n_buckets=2,
hist_bounds=[2, 4, 6, 8]))
it = data.make_initializable_iterator()
sess.run(it.initializer)
sess.run(tf.local_variables_initializer())
next_op = it.get_next()
# Let the model gather statistics, it sees 4*5=20 = 2 epochs,
# so each bin should have a count of 4
for _ in range(5):
sess.run(next_op)
counts = sess.run(tf.local_variables()[0])
self.assertEqual(counts.tolist(), [4, 8, 12, 16, 20])
# At this point the model should perfectly quantize the input
for _ in range(4):
out = sess.run(next_op)
if out[0] < 5:
self.assertAllInRange(out, 0, 5)
else:
self.assertAllInRange(out, 5, 10)
if __name__ == "__main__":
tf.test.main()
| 33.828571 | 78 | 0.67652 |
c90d4a3848fcfbc7849084f4f7ca42ec6be87ee9 | 2,523 | py | Python | examples/mnist_logreg.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | examples/mnist_logreg.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | examples/mnist_logreg.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | '''Logistic regression classifier on mnist.
Borrows some code from
# http://scikit-learn.org/stable/auto_examples/neural_networks/plot_mnist_filters.html
# http://scikit-learn.org/stable/auto_examples/classification/plot_digits_classification.html
'''
import matplotlib.pyplot as plt
import numpy as np
import os
from sklearn import linear_model
from sklearn import metrics
from timeit import default_timer as timer
from examples import get_mnist
from utils.util import save_fig
(x_train, y_train, x_test, y_test) = get_mnist.get_mnist()
#(x_train, y_train, x_test, y_test) = get_mnist()
'''
# Sanity check
import keras
from keras.datasets import mnist
(x_train_k, y_train_k), (x_test_k, y_test_k) = mnist.load_data()
np.array_equal(x_train, x_train_k)
# x_train[0,-4]
'''
ntrain = x_train.shape[0] #60k
ntest = x_test.shape[0] # 10k
num_classes = len(np.unique(y_train)) # 10
ndims = x_train.shape[1] * x_train.shape[2] # 28*28=784
# Preprocess data
x_train = x_train.reshape(ntrain, ndims)
x_test = x_test.reshape(ntest, ndims)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('starring training')
classifier = linear_model.LogisticRegression(C=1e5, multi_class='multinomial',
solver='sag', max_iter=10, verbose=1)
start = timer()
classifier.fit(x_train, y_train)
end = timer()
print('Training took {:f} seconds'.format(end - start))
'''
starring training
max_iter reached after 25 seconds
Training took 25.566608 seconds
Accuracy on test set 0.924600
/Users/kpmurphy/Library/Enthought/Canopy/edm/envs/User/lib/python3.5/site-packages/sklearn/linear_model/sag.py:286: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
"the coef_ did not converge", ConvergenceWarning)
[Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 25.2s finished
'''
predicted = classifier.predict(x_test)
expected = y_test
acc = metrics.accuracy_score(expected, predicted)
misclassified_ndx = np.argwhere(predicted != expected)
nerrors = len(misclassified_ndx)
print("Performance on test set. Accuracy {:f}, nerrors {:d}".format(acc, nerrors))
# Show first 9 images
for i in range(9):
plt.subplot(3, 3, i+1)
plt.axis('off')
j = i
#j = int(misclassified_ndx[i])
image = np.reshape(x_test[j], [28, 28])
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
ttl = 'True {:d}, Pred: {:d}'.format(expected[j], predicted[j])
plt.title(ttl)
plt.tight_layout()
save_fig('mnist_logreg.pdf')
plt.show()
| 30.39759 | 199 | 0.745145 |
9fe5accdf9e30b207982e0cc85f53185e5953600 | 630 | py | Python | bigpicture/manage.py | dchaplinsky/big_picture | e8b9a318bfadf535dc410dbbd3d5a2cf3fcec4e7 | [
"MIT"
] | 1 | 2020-03-06T02:32:08.000Z | 2020-03-06T02:32:08.000Z | bigpicture/manage.py | dchaplinsky/big_picture | e8b9a318bfadf535dc410dbbd3d5a2cf3fcec4e7 | [
"MIT"
] | 6 | 2021-03-19T00:23:45.000Z | 2022-03-12T00:17:27.000Z | bigpicture/manage.py | dchaplinsky/big_picture | e8b9a318bfadf535dc410dbbd3d5a2cf3fcec4e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bigpicture.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.636364 | 74 | 0.684127 |
047092c64cf8b6cd38d11aa58e55a2d0eb81c385 | 6,764 | py | Python | Detection_and_control/ASUM-GUI-with-detection-and-control/data/voc0712.py | kebijuelun/ASUM | 125a0d5db76e6adce360ee897f2a3c3445f7d138 | [
"MIT"
] | 2 | 2021-12-12T09:35:43.000Z | 2021-12-21T15:38:02.000Z | Detection_and_control/ASUM-GUI-with-detection-and-control/data/voc0712.py | kebijuelun/ASUM | 125a0d5db76e6adce360ee897f2a3c3445f7d138 | [
"MIT"
] | null | null | null | Detection_and_control/ASUM-GUI-with-detection-and-control/data/voc0712.py | kebijuelun/ASUM | 125a0d5db76e6adce360ee897f2a3c3445f7d138 | [
"MIT"
] | null | null | null | """VOC Dataset Classes
Original author: Francisco Massa
https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
Updated by: Ellis Brown, Max deGroot
"""
from .config import HOME
import os.path as osp
import sys
import torch
import torch.utils.data as data
import cv2
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
# VOC_CLASSES = ( # always index 0
# 'aeroplane', 'bicycle', 'bird', 'boat',
# 'bottle', 'bus', 'car', 'cat', 'chair',
# 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant',
# 'sheep', 'sofa', 'train', 'tvmonitor')
VOC_CLASSES = ("ultrathin_section", "left_baffle", "right_baffle") # always index 0
# note: if you used our download scripts, this should be right
VOC_ROOT = osp.join(HOME, "data/VOCdevkit/")
class VOCAnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=False):
self.class_to_ind = class_to_ind or dict(
zip(VOC_CLASSES, range(len(VOC_CLASSES)))
)
self.keep_difficult = keep_difficult
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Elemendataset_roott
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
for obj in target.iter("object"):
difficult = int(obj.find("difficult").text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find("name").text.lower().strip()
bbox = obj.find("bndbox")
pts = ["xmin", "ymin", "xmax", "ymax"]
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class VOCDetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(
self,
root,
# image_sets=[('2007', 'trainval'), ('2012', 'trainval')],
image_sets=[("2007", "trainval")],
transform=None,
target_transform=VOCAnnotationTransform(),
dataset_name="VOC0712",
):
self.root = root
self.image_set = image_sets
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
self._annopath = osp.join("%s", "Annotations", "%s.xml")
self._imgpath = osp.join("%s", "JPEGImages", "%s.jpg")
self.ids = list()
for (year, name) in image_sets:
rootpath = osp.join(self.root, "VOC" + year)
for line in open(osp.join(rootpath, "ImageSets", "Main", name + ".txt")):
self.ids.append((rootpath, line.strip()))
def __getitem__(self, index):
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
img = cv2.imread(self._imgpath % img_id)
height, width, channels = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgbMultiBoxLoss
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
# return torch.from_numpy(img), target, height, width
def pull_image(self, index):
"""Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
"""
img_id = self.ids[index]
return cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
def pull_anno(self, index):
"""Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:dataset_root
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
"""
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
gt = self.target_transform(anno, 1, 1)
return img_id[1], gt
def pull_tensor(self, index):
"""Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
"""
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
| 35.046632 | 85 | 0.597723 |
ea3a04565e178f7ae9cd80a512f7820c910ab137 | 8,480 | py | Python | py/load_firmware.py | pengguoguo/bitbox02-firmware | d40df7b05b9998313a9e6e4222680519afa7f630 | [
"Apache-2.0"
] | null | null | null | py/load_firmware.py | pengguoguo/bitbox02-firmware | d40df7b05b9998313a9e6e4222680519afa7f630 | [
"Apache-2.0"
] | 1 | 2020-10-11T10:39:37.000Z | 2020-10-11T10:59:34.000Z | py/load_firmware.py | pengguoguo/bitbox02-firmware | d40df7b05b9998313a9e6e4222680519afa7f630 | [
"Apache-2.0"
] | 1 | 2019-09-11T13:45:15.000Z | 2019-09-11T13:45:15.000Z | #!/usr/bin/env python3
# Copyright 2019 Shift Cryptosecurity AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: document"""
import argparse
import enum
import sys
import pprint
from typing import Any, Tuple
from time import sleep
import hid
from bitboxbase import BitBoxBase, get_bitboxbase_default_device
from communication import devices, TransportLayer, u2fhid, usart
from communication.devices import TooManyFoundException, NoneFoundException
from bitbox02 import Bootloader, BitBox02
def eprint(*args: Any, **kwargs: Any) -> None:
"""
Like print, but defaults to stderr.
"""
kwargs.setdefault("file", sys.stderr)
print(*args, **kwargs)
def _get_bitbox_and_reboot() -> devices.DeviceInfo:
"""Search for a bitbox and then reboot it into bootloader"""
device = devices.get_any_bitbox02()
# bitbox02 detected -> send command to reboot into bootloader to upgrade.
def _show_pairing(code: str) -> bool:
print("Please compare and confirm the pairing code on your BitBox02:")
print(code)
return True
hid_device = hid.device()
hid_device.open_path(device["path"])
bitbox = BitBox02(
transport=u2fhid.U2FHid(hid_device), device_info=device, show_pairing_callback=_show_pairing
)
bitbox.reboot()
# wait for it to reboot
while True:
try:
bootloader_device = devices.get_any_bitbox02_bootloader()
except NoneFoundException:
sys.stdout.write(".")
sys.stdout.flush()
sleep(1)
continue
return bootloader_device
def _find_and_open_usb_bitbox02() -> Tuple[devices.DeviceInfo, TransportLayer]:
"""
Connects to a BitBox02 bootloader over USB.
If the BitBox02 is currently running a firmware, it will
be rebooted and this function will connect to the bootloader
when it shows up.
"""
bootloader_device = None
try:
bootloader_device = devices.get_any_bitbox02_bootloader()
except TooManyFoundException:
eprint("Found multiple bb02 bootloader standard editions. Only one supported.")
sys.exit(1)
except NoneFoundException:
pass
if bootloader_device is None:
try:
bootloader_device = _get_bitbox_and_reboot()
except TooManyFoundException:
eprint("Found multiple bitboxes. Only one supported.")
sys.exit(1)
except NoneFoundException:
eprint("Neither bootloader nor bitbox found.")
sys.exit(1)
pprint.pprint(bootloader_device)
hid_device = hid.device()
hid_device.open_path(bootloader_device["path"])
return bootloader_device, u2fhid.U2FHid(hid_device)
class UsartBootloaderProbeResult(enum.Enum):
""" Result of probing the connection to a BitBoxBase bootloader. """
# We received a response from the device informing us that the bootloader is not running.
NOT_AVAILABLE = "NotAvailable"
# We successfully connected to the bootloader.
SUCCESS = "Success"
# We didn't receive anything from the device: something's wrong with the system.
TIMEOUT = "Timeout"
def _try_usart_bootloader_connection(
serial_port: usart.SerialPort, bootloader_device: devices.DeviceInfo
) -> UsartBootloaderProbeResult:
"""
Probes the connection to a BitBoxBase bootloader
over the specified UART port.
"""
transport = usart.U2FUsart(serial_port)
try:
bootloader_attempt = Bootloader(transport, bootloader_device)
bootloader_attempt.versions()
success = UsartBootloaderProbeResult.SUCCESS
except usart.U2FUsartErrorResponse as err:
if err.error_code != usart.U2FUsartErrorResponse.ENDPOINT_UNAVAILABLE:
raise
success = UsartBootloaderProbeResult.NOT_AVAILABLE
except usart.U2FUsartTimeoutError:
success = UsartBootloaderProbeResult.TIMEOUT
finally:
bootloader_attempt.close()
return success
def _find_and_open_usart_bitbox(serial_port: usart.SerialPort) -> devices.DeviceInfo:
"""
Connects to a BitBoxBase bootloader over UART.
If the BitBoxBase is currently running a firmware, it will
be rebooted and this function will connect to the bootloader
when it shows up.
"""
print("Connecting to BitBox bootloader over UART.")
bootloader_device: devices.DeviceInfo = get_bitboxbase_default_device(serial_port.port)
# First, try to connect to the bootloader directly.
bootloader_status = _try_usart_bootloader_connection(serial_port, bootloader_device)
if bootloader_status == UsartBootloaderProbeResult.SUCCESS:
return bootloader_device
if bootloader_status == UsartBootloaderProbeResult.TIMEOUT:
print("No reponse from BitBox. Maybe it's not connected properly?")
sys.exit(1)
# The bootloader wasn't valid, try to connect to the firmware instead.
print("BitBox bootloader not available.")
print("Trying to connect to BitBox firmware instead...")
def _show_pairing(code: str) -> bool:
print("(Pairing should be automatic) Pairing code:")
print(code)
return True
try:
transport = usart.U2FUsart(serial_port)
bitbox_attempt = BitBoxBase(
transport, bootloader_device, show_pairing_callback=_show_pairing
)
print("Connected. Rebooting.")
bitbox_attempt.reboot()
except usart.U2FUsartTimeoutError:
pass
finally:
bitbox_attempt.close()
print("Reboot completed.")
# wait for it to reboot
while True:
bootloader_status = _try_usart_bootloader_connection(serial_port, bootloader_device)
if bootloader_status == UsartBootloaderProbeResult.SUCCESS:
return bootloader_device
if bootloader_status == UsartBootloaderProbeResult.TIMEOUT:
print("Waiting for the BitBox bootloader to show up...")
sleep(1)
else:
print("Stuck in bitbox mode - didn't reboot properly!")
def main() -> int:
"""Main function"""
parser = argparse.ArgumentParser(
description="Tool for flashing a new firmware on BitBox devices."
)
parser.add_argument("--debug", action="store_true", help="Flash a debug (unsigned) firmware.")
parser.add_argument(
"--usart",
action="store",
help="Flash firmware using U2F-over-UART (BitBoxBase), with the specified serial port.",
)
parser.add_argument("firmware", nargs=1, help="Firmware to flash.")
args = parser.parse_args()
if not args.debug and ".signed.bin" not in args.firmware[0]:
eprint("Expecting firmware to end with '.signed.bin'")
return 1
if args.usart is not None:
serial_port = usart.SerialPort(args.usart)
bootloader_device = _find_and_open_usart_bitbox(serial_port)
transport: TransportLayer = usart.U2FUsart(serial_port)
bootloader = Bootloader(transport, bootloader_device)
else:
bootloader_device, transport = _find_and_open_usb_bitbox02()
bootloader = Bootloader(transport, bootloader_device)
with open(args.firmware[0], "rb") as file:
firmware = file.read()
def progress(perc: float) -> None:
sys.stdout.write(f"{perc*100:.02f}%\r")
if bootloader.erased():
print("device contains NO firmware")
else:
print("firmware version: %d\nsigning pubkeys version: %d" % bootloader.versions())
firmware_hash, signing_keydata_hash = bootloader.get_hashes()
print("firmware hash:", firmware_hash.hex())
print("signing keydata hash:", signing_keydata_hash.hex())
if args.debug:
bootloader.flash_unsigned_firmware(firmware, progress)
else:
bootloader.flash_signed_firmware(firmware, progress)
print() # print a newline
sleep(1) # Pause to show the upgrade finished at 100%
bootloader.reboot()
return 0
if __name__ == "__main__":
sys.exit(main())
| 34.897119 | 100 | 0.697759 |
a26a3f8841042b3fe1fb3e66fd31cd0a0353480c | 12,982 | py | Python | sasmodels/kernelpy.py | pkienzle/sasmodels | de3d42cb3621294b8706e55928035477790cd0ac | [
"BSD-3-Clause"
] | 11 | 2016-07-24T01:29:01.000Z | 2021-12-12T13:41:00.000Z | sasmodels/kernelpy.py | pkienzle/sasmodels | de3d42cb3621294b8706e55928035477790cd0ac | [
"BSD-3-Clause"
] | 426 | 2016-03-16T21:37:11.000Z | 2022-03-31T13:48:28.000Z | sasmodels/kernelpy.py | pkienzle/sasmodels | de3d42cb3621294b8706e55928035477790cd0ac | [
"BSD-3-Clause"
] | 28 | 2016-03-16T10:26:50.000Z | 2021-03-17T10:29:48.000Z | """
Python driver for python kernels
Calls the kernel with a vector of $q$ values for a single parameter set.
Polydispersity is supported by looping over different parameter sets and
summing the results. The interface to :class:`PyModel` matches those for
:class:`.kernelcl.GpuModel` and :class:`.kerneldll.DllModel`.
"""
from __future__ import division, print_function
import logging
import numpy as np # type: ignore
from numpy import pi
try:
from numpy import cbrt
except ImportError:
def cbrt(x):
"""Return cubed root of x."""
return x ** (1.0/3.0)
from .generate import F64
from .kernel import KernelModel, Kernel
# pylint: disable=unused-import
try:
from typing import Union, Callable, List
from .details import CallDetails
from .modelinfo import ModelInfo
except ImportError:
pass
# pylint: enable=unused-import
logger = logging.getLogger(__name__)
class PyModel(KernelModel):
"""
Wrapper for pure python models.
"""
def __init__(self, model_info):
# Make sure Iq is available and vectorized.
_create_default_functions(model_info)
self.info = model_info
self.dtype = np.dtype('d')
logger.info("make python model %s", self.info.name)
def make_kernel(self, q_vectors):
"""Instantiate the python kernel with input *q_vectors*"""
q_input = PyInput(q_vectors, dtype=F64)
return PyKernel(self.info, q_input)
def release(self):
"""
Free resources associated with the model.
"""
pass
class PyInput(object):
"""
Make q data available to the gpu.
*q_vectors* is a list of q vectors, which will be *[q]* for 1-D data,
and *[qx, qy]* for 2-D data. Internally, the vectors will be reallocated
to get the best performance on OpenCL, which may involve shifting and
stretching the array to better match the memory architecture. Additional
points will be evaluated with *q=1e-3*.
*dtype* is the data type for the q vectors. The data type should be
set to match that of the kernel, which is an attribute of
:class:`PyModel`. Note that not all kernels support double
precision, so even if the program was created for double precision,
the *GpuProgram.dtype* may be single precision.
Call :meth:`release` when complete. Even if not called directly, the
buffer will be released when the data object is freed.
"""
def __init__(self, q_vectors, dtype):
self.nq = q_vectors[0].size
self.dtype = dtype
self.is_2d = (len(q_vectors) == 2)
if self.is_2d:
self.q = np.empty((self.nq, 2), dtype=dtype)
self.q[:, 0] = q_vectors[0]
self.q[:, 1] = q_vectors[1]
else:
self.q = np.empty(self.nq, dtype=dtype)
self.q[:self.nq] = q_vectors[0]
def release(self):
"""
Free resources associated with the model inputs.
"""
self.q = None
class PyKernel(Kernel):
"""
Callable SAS kernel.
*kernel* is the kernel object to call.
*model_info* is the module information
*q_input* is the DllInput q vectors at which the kernel should be
evaluated.
The resulting call method takes the *pars*, a list of values for
the fixed parameters to the kernel, and *pd_pars*, a list of (value,weight)
vectors for the polydisperse parameters. *cutoff* determines the
integration limits: any points with combined weight less than *cutoff*
will not be calculated.
Call :meth:`release` when done with the kernel instance.
"""
def __init__(self, model_info, q_input):
# type: (ModelInfo, List[np.ndarray]) -> None
self.dtype = np.dtype('d')
self.info = model_info
self.q_input = q_input
self.res = np.empty(q_input.nq, q_input.dtype)
self.dim = '2d' if q_input.is_2d else '1d'
partable = model_info.parameters
#kernel_parameters = (partable.iqxy_parameters if q_input.is_2d
# else partable.iq_parameters)
kernel_parameters = partable.iq_parameters
volume_parameters = partable.form_volume_parameters
# Create an array to hold the parameter values. There will be a
# single array whose values are updated as the calculator goes
# through the loop. Arguments to the kernel and volume functions
# will use views into this vector, relying on the fact that a
# an array of no dimensions acts like a scalar.
parameter_vector = np.empty(len(partable.call_parameters)-2, 'd')
# Create views into the array to hold the arguments.
offset = 0
kernel_args, volume_args = [], []
for p in partable.kernel_parameters:
if p.length == 1:
# Scalar values are length 1 vectors with no dimensions.
v = parameter_vector[offset:offset+1].reshape(())
else:
# Vector values are simple views.
v = parameter_vector[offset:offset+p.length]
offset += p.length
if p in kernel_parameters:
kernel_args.append(v)
if p in volume_parameters:
volume_args.append(v)
# Hold on to the parameter vector so we can use it to call kernel later.
# This may also be required to preserve the views into the vector.
self._parameter_vector = parameter_vector
# Generate a closure which calls the kernel with the views into the
# parameter array.
if q_input.is_2d:
form = model_info.Iqxy
qx, qy = q_input.q[:, 0], q_input.q[:, 1]
self._form = lambda: form(qx, qy, *kernel_args)
else:
form = model_info.Iq
q = q_input.q
self._form = lambda: form(q, *kernel_args)
# Generate a closure which calls the form_volume if it exists.
self._volume_args = volume_args
volume = model_info.form_volume
shell = model_info.shell_volume
radius = model_info.radius_effective
self._volume = ((lambda: (shell(*volume_args), volume(*volume_args))) if shell and volume
else (lambda: [volume(*volume_args)]*2) if volume
else (lambda: (1.0, 1.0)))
self._radius = ((lambda mode: radius(mode, *volume_args)) if radius
else (lambda mode: cbrt(0.75/pi*volume(*volume_args))) if volume
else (lambda mode: 1.0))
def _call_kernel(self, call_details, values, cutoff, magnetic, radius_effective_mode):
# type: (CallDetails, np.ndarray, np.ndarray, float, bool) -> None
if magnetic:
raise NotImplementedError("Magnetism not implemented for pure python models")
#print("Calling python kernel")
#call_details.show(values)
radius = ((lambda: 0.0) if radius_effective_mode == 0
else (lambda: self._radius(radius_effective_mode)))
self.result = _loops(
self._parameter_vector, self._form, self._volume, radius,
self.q_input.nq, call_details, values, cutoff)
def release(self):
# type: () -> None
"""
Free resources associated with the kernel.
"""
self.q_input.release()
self.q_input = None
def _loops(parameters, form, form_volume, form_radius, nq, call_details,
values, cutoff):
# type: (np.ndarray, Callable[[], np.ndarray], Callable[[], float], Callable[[], float], int, CallDetails, np.ndarray, float) -> None
################################################################
# #
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #
# !! !! #
# !! KEEP THIS CODE CONSISTENT WITH KERNEL_TEMPLATE.C !! #
# !! !! #
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #
# #
################################################################
# WARNING: Trickery ahead
# The parameters[] vector is embedded in the closures for form(),
# form_volume() and form_radius(). We set the initial vector from
# the values for the model parameters. As we loop through the polydispesity
# mesh, we update the components with the polydispersity values before
# calling the respective functions.
n_pars = len(parameters)
parameters[:] = values[2:n_pars+2]
if call_details.num_active == 0:
total = form()
weight_norm = 1.0
weighted_shell, weighted_form = form_volume()
weighted_radius = form_radius()
else:
pd_value = values[2+n_pars:2+n_pars + call_details.num_weights]
pd_weight = values[2+n_pars + call_details.num_weights:]
weight_norm = 0.0
weighted_form = 0.0
weighted_shell = 0.0
weighted_radius = 0.0
partial_weight = np.NaN
weight = np.NaN
p0_par = call_details.pd_par[0]
p0_length = call_details.pd_length[0]
p0_index = p0_length
p0_offset = call_details.pd_offset[0]
pd_par = call_details.pd_par[:call_details.num_active]
pd_offset = call_details.pd_offset[:call_details.num_active]
pd_stride = call_details.pd_stride[:call_details.num_active]
pd_length = call_details.pd_length[:call_details.num_active]
total = np.zeros(nq, 'd')
for loop_index in range(call_details.num_eval):
# Update polydispersity parameter values.
if p0_index == p0_length:
pd_index = (loop_index//pd_stride)%pd_length
parameters[pd_par] = pd_value[pd_offset+pd_index]
partial_weight = np.prod(pd_weight[pd_offset+pd_index][1:])
p0_index = loop_index%p0_length
weight = partial_weight * pd_weight[p0_offset + p0_index]
parameters[p0_par] = pd_value[p0_offset + p0_index]
p0_index += 1
if weight > cutoff:
# Call the scattering function.
# Assume that NaNs are only generated if the parameters are bad;
# exclude all q for that NaN. Even better would be to have an
# INVALID expression like the C models, but that is expensive.
Iq = np.asarray(form(), 'd')
if np.isnan(Iq).any():
continue
# Update value and norm.
total += weight * Iq
weight_norm += weight
unweighted_shell, unweighted_form = form_volume()
weighted_shell += weight * unweighted_shell
weighted_form += weight * unweighted_form
weighted_radius += weight * form_radius()
result = np.hstack((total, weight_norm, weighted_form, weighted_shell, weighted_radius))
return result
def _create_default_functions(model_info):
"""
Autogenerate missing functions, such as Iqxy from Iq.
This only works for Iqxy when Iq is written in python. :func:`make_source`
performs a similar role for Iq written in C. This also vectorizes
any functions that are not already marked as vectorized.
"""
# Note: Must call create_vector_Iq before create_vector_Iqxy.
_create_vector_Iq(model_info)
_create_vector_Iqxy(model_info)
def _create_vector_Iq(model_info):
"""
Define Iq as a vector function if it exists.
"""
Iq = model_info.Iq
if callable(Iq) and not getattr(Iq, 'vectorized', False):
#print("vectorizing Iq")
def vector_Iq(q, *args):
"""
Vectorized 1D kernel.
"""
return np.array([Iq(qi, *args) for qi in q])
vector_Iq.vectorized = True
model_info.Iq = vector_Iq
def _create_vector_Iqxy(model_info):
"""
Define Iqxy as a vector function if it exists, or default it from Iq().
"""
Iqxy = getattr(model_info, 'Iqxy', None)
if callable(Iqxy):
if not getattr(Iqxy, 'vectorized', False):
#print("vectorizing Iqxy")
def vector_Iqxy(qx, qy, *args):
"""
Vectorized 2D kernel.
"""
return np.array([Iqxy(qxi, qyi, *args) for qxi, qyi in zip(qx, qy)])
vector_Iqxy.vectorized = True
model_info.Iqxy = vector_Iqxy
else:
#print("defaulting Iqxy")
# Iq is vectorized because create_vector_Iq was already called.
Iq = model_info.Iq
def default_Iqxy(qx, qy, *args):
"""
Default 2D kernel.
"""
return Iq(np.sqrt(qx**2 + qy**2), *args)
default_Iqxy.vectorized = True
model_info.Iqxy = default_Iqxy
| 38.294985 | 137 | 0.598444 |
328b90e2c174bed024e6954f6bba107aa7a846cc | 748 | py | Python | oscar/lib/python2.7/site-packages/phonenumbers/data/region_TD.py | AMuratTuran/mkn | 557086426773ced10d82c969304bd349414a601e | [
"BSD-3-Clause"
] | 4 | 2018-10-19T04:36:20.000Z | 2020-02-13T16:14:09.000Z | oscar/lib/python2.7/site-packages/phonenumbers/data/region_TD.py | AMuratTuran/mkn | 557086426773ced10d82c969304bd349414a601e | [
"BSD-3-Clause"
] | 5 | 2020-03-24T16:37:25.000Z | 2021-06-10T21:24:54.000Z | upibo-venv/Lib/site-packages/phonenumbers/data/region_TD.py | smbpgroup/upibo | 625dcda9f9692c62aeb9fe8f7123a5d407c610ae | [
"BSD-3-Clause"
] | null | null | null | """Auto-generated file, do not edit by hand. TD metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TD = PhoneMetadata(id='TD', country_code=235, international_prefix='00|16',
general_desc=PhoneNumberDesc(national_number_pattern='[2679]\\d{7}', possible_length=(8,)),
fixed_line=PhoneNumberDesc(national_number_pattern='22(?:[3789]0|5[0-5]|6[89])\\d{4}', example_number='22501234', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:6[023568]\\d|77\\d|9\\d{2})\\d{5}', example_number='63012345', possible_length=(8,)),
preferred_international_prefix='00',
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4')])
| 74.8 | 140 | 0.709893 |
8d779eead5132dc37e994d970faadfcfdf6d5032 | 33,054 | py | Python | test_replays/test_replays.py | AdamSchunk/sc2reader | 5070cd035ab596506a3c9041fce249ebab4712c7 | [
"MIT"
] | 1 | 2021-11-27T01:56:13.000Z | 2021-11-27T01:56:13.000Z | test_replays/test_replays.py | AdamSchunk/sc2reader | 5070cd035ab596506a3c9041fce249ebab4712c7 | [
"MIT"
] | null | null | null | test_replays/test_replays.py | AdamSchunk/sc2reader | 5070cd035ab596506a3c9041fce249ebab4712c7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
from xml.dom import minidom
# Newer unittest features aren't built in for python 2.6
import sys
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest
else:
import unittest
# StringIO was changed in python 3
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sc2reader
from sc2reader.exceptions import CorruptTrackerFileError
from sc2reader.events.game import GameEvent
from sc2reader.objects import Player
sc2reader.log_utils.log_to_console("INFO")
class TestReplays(unittest.TestCase):
def test_teams(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/13.SC2Replay")
self.assertNotEqual(replay.player[1].team.number, replay.player[2].team.number)
replay = sc2reader.load_replay("test_replays/2.0.8.25604/mlg1.SC2Replay")
self.assertNotEqual(replay.player[1].team.number, replay.player[2].team.number)
def test_private_category(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/2.SC2Replay")
self.assertEqual(replay.expansion, "WoL")
self.assertTrue(replay.is_private, True)
self.assertFalse(replay.is_ladder, False)
def test_standard_1v1(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/1.SC2Replay")
self.assertEqual(replay.expansion, "WoL")
self.assertEqual(str(replay.length), "32.47")
self.assertEqual(str(replay.game_length), "32.47")
self.assertEqual(str(replay.real_length), "23.25")
self.assertEqual(replay.map_name, "Lost Temple")
self.assertEqual(replay.build, 17811)
self.assertEqual(replay.release_string, "1.2.2.17811")
self.assertEqual(replay.speed, "Faster")
self.assertEqual(replay.type, "1v1")
self.assertTrue(replay.is_ladder)
self.assertFalse(replay.is_private)
self.assertEqual(len(replay.players), 2)
self.assertEqual(replay.person[1].name, "Emperor")
self.assertEqual(replay.person[2].name, "Boom")
emperor = replay.person[1]
self.assertEqual(emperor.team.number, 1)
self.assertEqual(emperor.pick_race, "Protoss")
self.assertEqual(emperor.play_race, "Protoss")
# self.assertFalse(emperor.recorder)
boom = replay.person[2]
self.assertEqual(boom.team.number, 2)
self.assertEqual(boom.pick_race, "Terran")
self.assertEqual(boom.play_race, "Terran")
# self.assertTrue(boom.recorder)
for player in replay.players:
self.assertTrue(player.is_human)
# Because it is a 1v1 and the recording player quit, we should know the winner.
self.assertEqual(emperor.result, "Win")
self.assertEqual(boom.result, "Loss")
self.assertEqual(
emperor.url, "http://eu.battle.net/sc2/en/profile/520049/1/Emperor/"
)
self.assertEqual(
boom.url, "http://eu.battle.net/sc2/en/profile/1694745/1/Boom/"
)
self.assertEqual(len(replay.messages), 12)
self.assertEqual(replay.messages[0].text, "hf")
self.assertEqual(replay.messages[0].player.name, "Emperor")
self.assertEqual(replay.messages[1].text, "HEYA")
self.assertEqual(replay.messages[1].player.name, "Boom")
self.assertEqual(replay.messages[2].text, "gl hf")
self.assertEqual(replay.messages[2].player.name, "Boom")
self.assertEqual(replay.messages[3].text, "sry for caps")
self.assertEqual(replay.messages[3].player.name, "Boom")
self.assertEqual(replay.messages[4].text, "^^")
self.assertEqual(replay.messages[4].player.name, "Emperor")
self.assertEqual(replay.messages[5].text, "noppe")
self.assertEqual(replay.messages[5].player.name, "Emperor")
self.assertEqual(replay.messages[6].text, "you greedy bastard")
self.assertEqual(replay.messages[6].player.name, "Boom")
self.assertEqual(replay.messages[7].text, "ggg")
self.assertEqual(replay.messages[7].player.name, "Boom")
self.assertEqual(replay.messages[8].text, "WG")
self.assertEqual(replay.messages[8].player.name, "Emperor")
self.assertEqual(replay.messages[9].text, "wg? :)")
self.assertEqual(replay.messages[9].player.name, "Boom")
self.assertEqual(replay.messages[10].text, "wipe")
self.assertEqual(replay.messages[10].player.name, "Emperor")
self.assertEqual(replay.messages[11].text, "huh?")
self.assertEqual(replay.messages[11].player.name, "Boom")
for msg in replay.messages:
self.assertTrue(msg.to_all)
def test_2v2(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/7.SC2Replay")
self.assertEqual(replay.type, "2v2")
def test_3v3(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/3.SC2Replay")
self.assertEqual(replay.type, "3v3")
# Because it"s a 3v3 and all of the members of Team 2 quit, we should know the winner.
self.assertEqual(replay.team[1].result, "Win")
self.assertEqual(replay.team[2].result, "Loss")
def test_4v4(self):
replay = sc2reader.load_replay("test_replays/1.2.0.17326/9.SC2Replay")
self.assertEqual(replay.type, "4v4")
def test_ffa(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/8.SC2Replay")
self.assertEqual(replay.type, "FFA")
self.assertEqual(replay.winner.players[0].name, "Boom")
def test_unknown_winner(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/10.SC2Replay")
# Recording player (Boom) left second in a 4v4, so the winner shouldn"t be known
self.assertEqual(replay.winner, None)
def test_random_player(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/3.SC2Replay")
gogeta = next(player for player in replay.players if player.name == "Gogeta")
self.assertEqual(gogeta.pick_race, "Random")
self.assertEqual(gogeta.play_race, "Terran")
replay = sc2reader.load_replay("test_replays/1.2.2.17811/6.SC2Replay")
permafrost = next(
player for player in replay.players if player.name == "Permafrost"
)
self.assertEqual(permafrost.pick_race, "Random")
self.assertEqual(permafrost.play_race, "Protoss")
def test_us_realm(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/5.SC2Replay")
shadesofgray = [
player for player in replay.players if player.name == "ShadesofGray"
][0]
reddawn = [player for player in replay.players if player.name == "reddawn"][0]
self.assertEqual(
shadesofgray.url,
"http://us.battle.net/sc2/en/profile/2358439/1/ShadesofGray/",
)
self.assertEqual(
reddawn.url, "http://us.battle.net/sc2/en/profile/2198663/1/reddawn/"
)
def test_kr_realm_and_tampered_messages(self):
"""
# TODO: Current problem.. both players are set as the recording players
# Waiting for response https://github.com/arkx/mpyq/issues/closed#issue/7
"""
replay = sc2reader.load_replay("test_replays/1.1.3.16939/11.SC2Replay")
self.assertEqual(replay.expansion, "WoL")
first = [player for player in replay.players if player.name == "명지대학교"][0]
second = [player for player in replay.players if player.name == "티에스엘사기수"][0]
self.assertEqual(
first.url, "http://kr.battle.net/sc2/en/profile/258945/1/명지대학교/"
)
self.assertEqual(
second.url, "http://kr.battle.net/sc2/en/profile/102472/1/티에스엘사기수/"
)
self.assertEqual(replay.messages[0].text, "sc2.replays.net")
self.assertEqual(replay.messages[5].text, "sc2.replays.net")
def test_referee(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/14.SC2Replay")
def test_encrypted(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/4.SC2Replay")
def test_observers(self):
replay = sc2reader.load_replay("test_replays/1.2.2.17811/13.SC2Replay")
def test_datetimes(self):
# Ignore seconds in comparisons, because they are off by one what is reported by Windows.
# This might be a little nuance worth investigating at some point.
# Played at 20 Feb 2011 22:44:48 UTC+2
replay = sc2reader.load_replay("test_replays/1.2.2.17811/1.SC2Replay")
self.assertEqual(replay.end_time, datetime.datetime(2011, 2, 20, 20, 44, 47))
# Played at 21 Feb 2011 00:42:13 UTC+2
replay = sc2reader.load_replay("test_replays/1.2.2.17811/2.SC2Replay")
self.assertEqual(replay.end_time, datetime.datetime(2011, 2, 20, 22, 42, 12))
# Played at 25 Feb 2011 16:36:28 UTC+2
replay = sc2reader.load_replay("test_replays/1.2.2.17811/3.SC2Replay")
self.assertEqual(replay.end_time, datetime.datetime(2011, 2, 25, 14, 36, 26))
def test_hots_pids(self):
for replayfilename in [
"test_replays/2.0.3.24764/Akilon Wastes (10).SC2Replay",
"test_replays/2.0.3.24764/Antiga Shipyard (3).SC2Replay",
"test_replays/2.0.0.24247/molten.SC2Replay",
"test_replays/2.0.0.23925/Akilon Wastes.SC2Replay",
]:
replay = sc2reader.load_replay(replayfilename)
self.assertEqual(replay.expansion, "HotS")
player_pids = set(
[player.pid for player in replay.players if player.is_human]
)
ability_pids = set(
[
event.player.pid
for event in replay.events
if "CommandEvent" in event.name
]
)
self.assertEqual(ability_pids, player_pids)
def test_wol_pids(self):
replay = sc2reader.load_replay(
"test_replays/1.5.4.24540/ggtracker_1471849.SC2Replay"
)
self.assertEqual(replay.expansion, "WoL")
ability_pids = set(
[
event.player.pid
for event in replay.events
if "CommandEvent" in event.name
]
)
player_pids = set([player.pid for player in replay.players])
self.assertEqual(ability_pids, player_pids)
def test_hots_hatchfun(self):
replay = sc2reader.load_replay("test_replays/2.0.0.24247/molten.SC2Replay")
player_pids = set([player.pid for player in replay.players])
spawner_pids = set(
[
event.player.pid
for event in replay.events
if "TargetUnitCommandEvent" in event.name
and event.ability.name == "SpawnLarva"
]
)
self.assertTrue(spawner_pids.issubset(player_pids))
def test_hots_vs_ai(self):
replay = sc2reader.load_replay(
"test_replays/2.0.0.24247/Cloud Kingdom LE (13).SC2Replay"
)
self.assertEqual(replay.expansion, "HotS")
replay = sc2reader.load_replay(
"test_replays/2.0.0.24247/Korhal City (19).SC2Replay"
)
self.assertEqual(replay.expansion, "HotS")
def test_oracle_parsing(self):
replay = sc2reader.load_replay(
"test_replays/2.0.3.24764/ggtracker_1571740.SC2Replay"
)
self.assertEqual(replay.expansion, "HotS")
oracles = [unit for unit in replay.objects.values() if unit.name == "Oracle"]
self.assertEqual(len(oracles), 2)
def test_resume_from_replay(self):
replay = sc2reader.load_replay(
"test_replays/2.0.3.24764/resume_from_replay.SC2Replay"
)
self.assertTrue(replay.resume_from_replay)
self.assertEqual(replay.resume_method, 0)
def test_clan_players(self):
replay = sc2reader.load_replay(
"test_replays/2.0.4.24944/Lunar Colony V.SC2Replay"
)
self.assertEqual(replay.expansion, "WoL")
self.assertEqual(len(replay.people), 4)
def test_WoL_204(self):
replay = sc2reader.load_replay(
"test_replays/2.0.4.24944/ggtracker_1789768.SC2Replay"
)
self.assertEqual(replay.expansion, "WoL")
self.assertEqual(len(replay.people), 2)
def test_send_resources(self):
replay = sc2reader.load_replay(
"test_replays/2.0.4.24944/Backwater Complex (15).SC2Replay"
)
def test_cn_replays(self):
replay = sc2reader.load_replay("test_replays/2.0.5.25092/cn1.SC2Replay")
self.assertEqual(replay.region, "cn")
self.assertEqual(replay.expansion, "WoL")
def test_unit_types(self):
""" sc2reader#136 regression test """
replay = sc2reader.load_replay("test_replays/2.0.8.25604/issue136.SC2Replay")
hellion_times = [
u.started_at for u in replay.players[0].units if u.name == "Hellion"
]
hellbat_times = [
u.started_at for u in replay.players[0].units if u.name == "BattleHellion"
]
self.assertEqual(hellion_times, [5180, 5183])
self.assertEqual(hellbat_times, [6736, 6741, 7215, 7220, 12004, 12038])
@unittest.expectedFailure
def test_outmatched_pids(self):
replay = sc2reader.load_replay(
"test_replays/2.0.8.25604/issue131_arid_wastes.SC2Replay"
)
self.assertEqual(replay.players[0].pid, 1)
self.assertEqual(replay.players[1].pid, 3)
self.assertEqual(replay.players[2].pid, 4)
replay = sc2reader.load_replay("test_replays/2.0.8.25604/issue135.SC2Replay")
self.assertEqual(replay.players[0].pid, 1)
self.assertEqual(replay.players[1].pid, 2)
self.assertEqual(replay.players[2].pid, 4)
replay = sc2reader.load_replay("test_replays/2.0.8.25604/mlg1.SC2Replay")
self.assertEqual(replay.players[0].pid, 1)
self.assertEqual(replay.players[1].pid, 2)
self.assertEqual(len(replay.players), 2)
self.assertEqual(len(replay.people), 3)
@unittest.expectedFailure
def test_map_info(self):
replay = sc2reader.load_replay(
"test_replays/1.5.3.23260/ggtracker_109233.SC2Replay", load_map=True
)
self.assertEqual(replay.map.map_info.tile_set, "Avernus")
self.assertEqual(replay.map.map_info.fog_type, "Dark")
self.assertEqual(replay.map.map_info.width, 176)
self.assertEqual(replay.map.map_info.height, 160)
self.assertEqual(replay.map.map_info.camera_top, 134)
self.assertEqual(replay.map.map_info.camera_left, 14)
self.assertEqual(replay.map.map_info.camera_right, 162)
self.assertEqual(replay.map.map_info.camera_bottom, 14)
controllers = [(p.pid, p.control) for p in replay.map.map_info.players]
self.assertEqual(controllers, [(0, 3), (1, 1), (2, 1), (15, 4)])
def test_engine_plugins(self):
from sc2reader.engine.plugins import ContextLoader, APMTracker, SelectionTracker
replay = sc2reader.load_replay(
"test_replays/2.0.5.25092/cn1.SC2Replay",
engine=sc2reader.engine.GameEngine(
plugins=[ContextLoader(), APMTracker(), SelectionTracker()]
),
)
code, details = replay.plugins["ContextLoader"]
self.assertEqual(code, 0)
self.assertEqual(details, dict())
@unittest.expectedFailure
def test_factory_plugins(self):
from sc2reader.factories.plugins.replay import (
APMTracker,
SelectionTracker,
toJSON,
)
factory = sc2reader.factories.SC2Factory()
factory.register_plugin("Replay", APMTracker())
factory.register_plugin("Replay", SelectionTracker())
factory.register_plugin("Replay", toJSON())
replay = factory.load_replay("test_replays/2.0.5.25092/cn1.SC2Replay")
# Load and quickly check the JSON output consistency
result = json.loads(replay)
self.assertEqual(result["map_name"], "生化实验区")
self.assertEqual(result["players"][2]["name"], "ImYoonA")
self.assertEqual(result["players"][2]["avg_apm"], 84.52332657200812)
self.assertEqual(result["release"], "2.0.5.25092")
self.assertEqual(result["game_length"], 986)
self.assertEqual(result["real_length"], 704)
self.assertEqual(result["region"], "cn")
self.assertEqual(result["game_fps"], 16.0)
self.assertTrue(result["is_ladder"])
def test_gameheartnormalizer_plugin(self):
from sc2reader.engine.plugins import GameHeartNormalizer
sc2reader.engine.register_plugin(GameHeartNormalizer())
# Not a GameHeart game!
replay = sc2reader.load_replay("test_replays/2.0.0.24247/molten.SC2Replay")
player_pids = set([player.pid for player in replay.players])
spawner_pids = set(
[
event.player.pid
for event in replay.events
if "TargetUnitCommandEvent" in event.name
and event.ability.name == "SpawnLarva"
]
)
self.assertTrue(spawner_pids.issubset(player_pids))
replay = sc2reader.load_replay("test_replays/gameheart/gameheart.SC2Replay")
self.assertEqual(replay.events[0].frame, 0)
self.assertEqual(replay.game_length.seconds, 636)
self.assertEqual(len(replay.observers), 5)
self.assertEqual(replay.players[0].name, "SjoWBBII")
self.assertEqual(replay.players[0].play_race, "Terran")
self.assertEqual(replay.players[1].name, "Stardust")
self.assertEqual(replay.players[1].play_race, "Protoss")
self.assertEqual(len(replay.teams), 2)
self.assertEqual(replay.teams[0].players[0].name, "SjoWBBII")
self.assertEqual(replay.teams[1].players[0].name, "Stardust")
self.assertEqual(replay.winner, replay.teams[1])
replay = sc2reader.load_replay("test_replays/gameheart/gh_sameteam.SC2Replay")
self.assertEqual(replay.events[0].frame, 0)
self.assertEqual(replay.game_length.seconds, 424)
self.assertEqual(len(replay.observers), 5)
self.assertEqual(replay.players[0].name, "EGJDRC")
self.assertEqual(replay.players[0].play_race, "Zerg")
self.assertEqual(replay.players[1].name, "LiquidTaeJa")
self.assertEqual(replay.players[1].play_race, "Terran")
self.assertEqual(len(replay.teams), 2)
self.assertEqual(replay.teams[0].players[0].name, "EGJDRC")
self.assertEqual(replay.teams[1].players[0].name, "LiquidTaeJa")
self.assertEqual(replay.winner, replay.teams[0])
def test_replay_event_order(self):
replay = sc2reader.load_replay("test_replays/event_order.SC2Replay")
def test_creepTracker(self):
from sc2reader.engine.plugins import CreepTracker
for replayfilename in [
"test_replays/2.0.8.25605/ggtracker_3621322.SC2Replay",
"test_replays/2.0.8.25605/ggtracker_3621402.SC2Replay",
"test_replays/2.0.8.25605/ggtracker_3663861.SC2Replay",
"test_replays/2.0.8.25605/ggtracker_3695400.SC2Replay",
"test_replays/3.1.2/6494799.SC2Replay",
]:
factory = sc2reader.factories.SC2Factory()
pluginEngine = sc2reader.engine.GameEngine(plugins=[CreepTracker()])
replay = factory.load_replay(
replayfilename, engine=pluginEngine, load_map=True, load_level=4
)
for player_id in replay.player:
if replay.player[player_id].play_race == "Zerg":
assert replay.player[player_id].max_creep_spread[1] > 0
assert replay.player[player_id].creep_spread_by_minute[0] > 0
# print("MCS", replay.player[player_id].max_creep_spread)
# print("CSBM", replay.player[player_id].creep_spread_by_minute)
replay = factory.load_replay(
"test_replays/2.0.8.25605/ggtracker_3621402.SC2Replay",
load_map=True,
engine=pluginEngine,
load_level=4,
)
assert replay.player[2].max_creep_spread == (840, 24.83)
assert replay.player[2].creep_spread_by_minute[420] == 9.4
assert replay.player[2].creep_spread_by_minute[780] == 22.42
def test_bad_unit_ids(self):
with self.assertRaises(CorruptTrackerFileError):
replay = sc2reader.load_replay(
"test_replays/2.0.11.26825/bad_unit_ids_1.SC2Replay", load_level=4
)
with self.assertRaises(CorruptTrackerFileError):
replay = sc2reader.load_replay(
"test_replays/2.0.9.26147/bad_unit_ids_2.SC2Replay", load_level=4
)
def test_daedalus_point(self):
replay = sc2reader.load_replay(
"test_replays/2.0.11.26825/DaedalusPoint.SC2Replay"
)
def test_reloaded(self):
replay = sc2reader.load_replay(
"test_replays/2.1.3.28667/Habitation Station LE (54).SC2Replay"
)
def test_214(self):
replay = sc2reader.load_replay(
"test_replays/2.1.4/Catallena LE.SC2Replay", load_level=4
)
def test_lotv1(self):
replay = sc2reader.load_replay("test_replays/lotv/lotv1.SC2Replay")
self.assertEqual(replay.expansion, "LotV")
replay = sc2reader.load_replay("test_replays/lotv/lotv2.SC2Replay")
self.assertEqual(replay.expansion, "LotV")
def test_lotv_creepTracker(self):
from sc2reader.engine.plugins import CreepTracker
for replayfilename in ["test_replays/4.0.0.59587/1.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
pluginEngine = sc2reader.engine.GameEngine(plugins=[CreepTracker()])
replay = factory.load_replay(
replayfilename, engine=pluginEngine, load_map=True
)
is_at_least_one_zerg_in_game = False
for player_id in replay.player:
if replay.player[player_id].play_race == "Zerg":
is_at_least_one_zerg_in_game = True
assert replay.player[player_id].max_creep_spread != 0
assert replay.player[player_id].creep_spread_by_minute
assert is_at_least_one_zerg_in_game
def test_lotv_map(self):
for replayfilename in ["test_replays/4.0.0.59587/1.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename, load_level=1, load_map=True)
def test_30(self):
replay = sc2reader.load_replay("test_replays/3.0.0.38215/first.SC2Replay")
replay = sc2reader.load_replay("test_replays/3.0.0.38215/second.SC2Replay")
replay = sc2reader.load_replay("test_replays/3.0.0.38215/third.SC2Replay")
def test_31(self):
for i in range(1, 5):
print("DOING {}".format(i))
replay = sc2reader.load_replay("test_replays/3.1.0/{}.SC2Replay".format(i))
def test_30_map(self):
for replayfilename in ["test_replays/3.0.0.38215/third.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename, load_level=1, load_map=True)
def test_30_apms(self):
from sc2reader.factories.plugins.replay import (
APMTracker,
SelectionTracker,
toJSON,
)
factory = sc2reader.factories.SC2Factory()
factory.register_plugin("Replay", APMTracker())
replay = factory.load_replay("test_replays/3.0.0.38215/fourth.SC2Replay")
for player in replay.players:
if player.name == "Owl":
print(player.name, player.avg_apm)
self.assertTrue(player.avg_apm > 110)
def test_38749(self):
replay = sc2reader.load_replay("test_replays/3.0.0.38749/1.SC2Replay")
self.assertEqual(replay.expansion, "HotS")
replay = sc2reader.load_replay("test_replays/3.0.0.38749/2.SC2Replay")
self.assertEqual(replay.expansion, "HotS")
def test_38996(self):
replay = sc2reader.load_replay("test_replays/3.0.0.38996/1.SC2Replay")
self.assertEqual(replay.expansion, "LotV")
replay = sc2reader.load_replay("test_replays/3.0.0.38996/2.SC2Replay")
self.assertEqual(replay.expansion, "LotV")
def test_funny_minerals(self):
replay = sc2reader.load_replay("test_replays/3.1.0/centralprotocol.SC2Replay")
replay.load_map()
xmldoc = minidom.parseString(replay.map.archive.read_file("Objects"))
itemlist = xmldoc.getElementsByTagName("ObjectUnit")
mineralPosStrs = [
ou.attributes["Position"].value
for ou in itemlist
if "MineralField" in ou.attributes["UnitType"].value
]
mineralFieldNames = list(
set(
[
ou.attributes["UnitType"].value
for ou in itemlist
if "MineralField" in ou.attributes["UnitType"].value
]
)
)
# print(mineralFieldNames)
self.assertTrue(len(mineralPosStrs) > 0)
def test_dusk(self):
replay = sc2reader.load_replay("test_replays/3.1.0/dusktowers.SC2Replay")
self.assertEqual(replay.expansion, "LotV")
def test_32(self):
replay = sc2reader.load_replay("test_replays/3.2.0/1.SC2Replay")
self.assertTrue(replay is not None)
def test_33(self):
for replaynum in range(1, 4):
replay = sc2reader.load_replay(
"test_replays/3.3.0/{}.SC2Replay".format(replaynum)
)
self.assertTrue(replay is not None)
def test_33_shift_click_calldown_mule(self):
replay = sc2reader.load_replay("test_replays/3.3.0/ggissue48.SC2Replay")
def efilter(e):
return hasattr(e, "ability") and e.ability_name == "CalldownMULE"
self.assertEqual(len(list(filter(efilter, replay.events))), 29)
def test_33_shift_click_spawn_larva(self):
replay = sc2reader.load_replay("test_replays/3.3.0/ggissue49.SC2Replay")
def efilter(e):
return hasattr(e, "ability") and e.ability_name == "SpawnLarva"
self.assertEqual(len(list(filter(efilter, replay.events))), 23)
def test_34(self):
replay = sc2reader.load_replay("test_replays/3.4.0/issueYY.SC2Replay")
self.assertEqual(replay.expansion, "LotV")
def test_lotv_time(self):
replay = sc2reader.load_replay("test_replays/lotv/lotv1.SC2Replay")
self.assertEqual(replay.length.seconds, 1002)
self.assertEqual(replay.real_length.seconds, 1002)
def test_37(self):
replay = sc2reader.load_replay("test_replays/3.7.0/1.SC2Replay")
replay = sc2reader.load_replay("test_replays/3.7.0/2.SC2Replay")
def test_312(self):
for replayfilename in ["test_replays/3.12/Honorgrounds.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename, load_level=0)
replay = factory.load_replay(replayfilename, load_level=1)
def test_316(self):
for replayfilename in ["test_replays/3.16/AbyssalReef.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename)
def test_54518(self):
for replayfilename in [
"test_replays/3.14.0.54518/1.SC2Replay",
"test_replays/3.14.0.54518/2.SC2Replay",
"test_replays/3.14.0.54518/3.SC2Replay",
]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename)
def test_59587(self):
for replayfilename in ["test_replays/4.0.0.59587/1.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename)
def test_64469(self):
for replayfilename in ["test_replays/4.3.0.64469/1.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename)
def test_coop(self):
for replayfilename in ["test_replays/coop/CoA.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename)
def test_65895(self):
for replayfilename in ["test_replays/4.4.0.65895/1.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename)
def test_event_print(self):
replay = sc2reader.load_replay("test_replays/lotv/lotv1.SC2Replay")
sys.stdout = capturedOutput = StringIO()
for event in replay.events:
print(event)
self.assertIn("PlayerLeaveEvent", capturedOutput.getvalue())
sys.stdout = sys.__stdout__
capturedOutput.close()
def test_70154(self):
for replayfilename in ["test_replays/4.7.0.70154/1.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename)
def test_75689(self):
for replayfilename in ["test_replays/4.10.0.75689/trophy_id_13.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename)
self.assertEqual(replay.players[0].trophy_id, 13)
def test_anonymous_replay(self):
replayfilename = "test_replays/4.1.2.60604/1.SC2Replay"
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename)
def test_game_event_string(self):
time = "00.01"
# Global
player = MockPlayer()
player.name = "TestPlayer"
player.play_race = "TestRace"
event = GameEvent(16, 16)
event.player = player
self.assertEqual("{0}\t{1:<15} ".format(time, "Global"), event._str_prefix())
# Player with name
player = MockPlayer()
player.name = "TestPlayer"
player.play_race = "TestRace"
event = GameEvent(16, 1)
event.player = player
self.assertEqual("{0}\t{1:<15} ".format(time, player.name), event._str_prefix())
# No Player
player = MockPlayer()
event = GameEvent(16, 1)
self.assertEqual("{0}\t{1:<15} ".format(time, "no name"), event._str_prefix())
# Player without name
player = MockPlayer()
player.play_race = "TestRace"
player.pid = 1
event = GameEvent(16, 1)
event.player = player
self.assertEqual(
"{0}\tPlayer {1} - ({2}) ".format(time, player.pid, player.play_race),
event._str_prefix(),
)
class TestGameEngine(unittest.TestCase):
class TestEvent(object):
name = "TestEvent"
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class TestPlugin1(object):
name = "TestPlugin1"
def handleInitGame(self, event, replay):
yield TestGameEngine.TestEvent("b")
yield TestGameEngine.TestEvent("c")
def handleTestEvent(self, event, replay):
if event.value == "d":
yield sc2reader.engine.PluginExit(
self, code=1, details=dict(msg="Fail!")
)
else:
yield TestGameEngine.TestEvent("d")
def handleEndGame(self, event, replay):
yield TestGameEngine.TestEvent("g")
class TestPlugin2(object):
name = "TestPlugin2"
def handleInitGame(self, event, replay):
replay.engine_events = list()
def handleTestEvent(self, event, replay):
replay.engine_events.append(event)
def handlePluginExit(self, event, replay):
yield TestGameEngine.TestEvent("e")
def handleEndGame(self, event, replay):
yield TestGameEngine.TestEvent("f")
class MockReplay(object):
def __init__(self, events):
self.events = events
def test_plugin1(self):
engine = sc2reader.engine.GameEngine()
engine.register_plugin(self.TestPlugin1())
engine.register_plugin(self.TestPlugin2())
replay = self.MockReplay([self.TestEvent("a")])
engine.run(replay)
self.assertEqual("".join(str(e) for e in replay.engine_events), "bdecaf")
self.assertEqual(replay.plugin_failures, ["TestPlugin1"])
self.assertEqual(replay.plugin_result["TestPlugin1"], (1, dict(msg="Fail!")))
self.assertEqual(replay.plugin_result["TestPlugin2"], (0, dict()))
class MockPlayer(object):
def __init__(self):
self.name = None
self.play_race = None
self.pid = None
if __name__ == "__main__":
unittest.main()
| 40.706897 | 97 | 0.641768 |
7b23e6220ffc680815ffc6b3eb59f493ddfe9e3c | 5,502 | py | Python | deploy/python/predict_det.py | TxT1212/PaddleClas | 5a24c8700f738f036bf27f80ca12dbe8471a11b0 | [
"Apache-2.0"
] | 3,763 | 2020-04-10T04:48:11.000Z | 2022-03-31T13:24:37.000Z | deploy/python/predict_det.py | TxT1212/PaddleClas | 5a24c8700f738f036bf27f80ca12dbe8471a11b0 | [
"Apache-2.0"
] | 633 | 2020-04-08T18:27:31.000Z | 2022-03-31T01:09:43.000Z | deploy/python/predict_det.py | TxT1212/PaddleClas | 5a24c8700f738f036bf27f80ca12dbe8471a11b0 | [
"Apache-2.0"
] | 846 | 2020-04-08T08:13:18.000Z | 2022-03-31T12:28:37.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
from utils import logger
from utils import config
from utils.predictor import Predictor
from utils.get_image_list import get_image_list
from det_preprocess import det_preprocess
from preprocess import create_operators
import os
import argparse
import time
import yaml
import ast
from functools import reduce
import cv2
import numpy as np
import paddle
class DetPredictor(Predictor):
def __init__(self, config):
super().__init__(config["Global"],
config["Global"]["det_inference_model_dir"])
self.preprocess_ops = create_operators(config["DetPreProcess"][
"transform_ops"])
self.config = config
def preprocess(self, img):
im_info = {
'scale_factor': np.array(
[1., 1.], dtype=np.float32),
'im_shape': np.array(
img.shape[:2], dtype=np.float32),
'input_shape': self.config["Global"]["image_shape"],
"scale_factor": np.array(
[1., 1.], dtype=np.float32)
}
im, im_info = det_preprocess(img, im_info, self.preprocess_ops)
inputs = self.create_inputs(im, im_info)
return inputs
def create_inputs(self, im, im_info):
"""generate input for different model type
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
model_arch (str): model type
Returns:
inputs (dict): input of model
"""
inputs = {}
inputs['image'] = np.array((im, )).astype('float32')
inputs['im_shape'] = np.array(
(im_info['im_shape'], )).astype('float32')
inputs['scale_factor'] = np.array(
(im_info['scale_factor'], )).astype('float32')
return inputs
def parse_det_results(self, pred, threshold, label_list):
max_det_results = self.config["Global"]["max_det_results"]
keep_indexes = pred[:, 1].argsort()[::-1][:max_det_results]
results = []
for idx in keep_indexes:
single_res = pred[idx]
class_id = int(single_res[0])
score = single_res[1]
bbox = single_res[2:]
if score < threshold:
continue
label_name = label_list[class_id]
results.append({
"class_id": class_id,
"score": score,
"bbox": bbox,
"label_name": label_name,
})
return results
def predict(self, image, threshold=0.5, run_benchmark=False):
'''
Args:
image (str/np.ndarray): path of image/ np.ndarray read by cv2
threshold (float): threshold of predicted box' score
Returns:
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
MaskRCNN's results include 'masks': np.ndarray:
shape: [N, im_h, im_w]
'''
inputs = self.preprocess(image)
np_boxes = None
input_names = self.paddle_predictor.get_input_names()
for i in range(len(input_names)):
input_tensor = self.paddle_predictor.get_input_handle(input_names[
i])
input_tensor.copy_from_cpu(inputs[input_names[i]])
t1 = time.time()
self.paddle_predictor.run()
output_names = self.paddle_predictor.get_output_names()
boxes_tensor = self.paddle_predictor.get_output_handle(output_names[0])
np_boxes = boxes_tensor.copy_to_cpu()
t2 = time.time()
print("Inference: {} ms per batch image".format((t2 - t1) * 1000.0))
# do not perform postprocess in benchmark mode
results = []
if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
print('[WARNNING] No object detected.')
results = np.array([])
else:
results = np_boxes
results = self.parse_det_results(results,
self.config["Global"]["threshold"],
self.config["Global"]["labe_list"])
return results
def main(config):
det_predictor = DetPredictor(config)
image_list = get_image_list(config["Global"]["infer_imgs"])
assert config["Global"]["batch_size"] == 1
for idx, image_file in enumerate(image_list):
img = cv2.imread(image_file)[:, :, ::-1]
output = det_predictor.predict(img)
print(output)
return
if __name__ == "__main__":
args = config.parse_args()
config = config.get_config(args.config, overrides=args.override, show=True)
main(config)
| 34.603774 | 87 | 0.602145 |
67d72499e0f8415270282871e5df9d29cdc10c89 | 25,356 | py | Python | selfdrive/controls/controlsd.py | robbo600/openpilot | 02717fbb823a0af98bff50f552d0ba54496ab42f | [
"MIT"
] | 65 | 2019-07-27T11:27:02.000Z | 2022-02-03T09:10:38.000Z | selfdrive/controls/controlsd.py | robbo600/openpilot | 02717fbb823a0af98bff50f552d0ba54496ab42f | [
"MIT"
] | 41 | 2018-08-01T17:36:08.000Z | 2020-12-16T02:42:57.000Z | selfdrive/controls/controlsd.py | robbo600/openpilot | 02717fbb823a0af98bff50f552d0ba54496ab42f | [
"MIT"
] | 229 | 2019-07-27T20:31:02.000Z | 2021-09-21T11:02:49.000Z | #!/usr/bin/env python3
import os
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned"])
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
LongitudinalPlanSource = log.LongitudinalPlan.LongitudinalPlanSource
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.sm = sm
if self.sm is None:
ignore = ['ubloxRaw', 'driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration', 'ubloxRaw',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'roadCameraState', 'driverCameraState', 'managerState', 'liveParameters', 'radarState'], ignore_alive=ignore)
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
# read params
params = Params()
self.is_metric = params.get("IsMetric", encoding='utf8') == "1"
self.is_ldw_enabled = params.get("IsLdwEnabled", encoding='utf8') == "1"
community_feature_toggle = params.get("CommunityFeaturesToggle", encoding='utf8') == "1"
openpilot_enabled_toggle = params.get("OpenpilotEnabledToggle", encoding='utf8') == "1"
passive = params.get("Passive", encoding='utf8') == "1" or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
# If stock camera is disconnected, we loaded car controls and it's not dashcam mode
controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature_disallowed = self.CP.communityFeature and not community_feature_toggle
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard and boardd safety mode
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb)
self.VM = VehicleModel(self.CP)
if self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.sm['liveCalibration'].calStatus = Calibration.CALIBRATED
self.sm['deviceState'].freeSpacePercent = 100
self.sm['driverMonitoringState'].events = []
self.sm['driverMonitoringState'].awarenessStatus = 1.
self.sm['driverMonitoringState'].faceDetected = False
self.startup_event = get_startup_event(car_recognized, controller_available)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Create events for battery, temperature, disk space, and memory
if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
if self.sm['deviceState'].memoryUsagePercent > 90:
self.events.add(EventName.lowMemory)
# Alert if fan isn't spinning for 5 seconds
if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
elif self.sm['lateralPlan'].autoLaneChangeEnabled and self.sm['lateralPlan'].autoLaneChangeTimer > 0:
self.events.add(EventName.autoLaneChange)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or (not CS.canValid and self.sm.frame > 5 / DT_CTRL):
self.events.add(EventName.canError)
if (self.sm['pandaState'].safetyModel != self.CP.safetyModel and self.sm.frame > 2 / DT_CTRL) or \
self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid['liveParameters']:
self.events.add(EventName.vehicleModelInvalid)
elif not self.sm.all_alive_and_valid() and \
[i for i in self.sm.alive if self.sm.alive[i] is False] != ['ubloxRaw']:
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}")
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['longitudinalPlan'].fcw:
self.events.add(EventName.fcw)
# TODO: fix simulator
if not SIMULATION:
if not NOSENSOR or True:
if not self.sm.alive['ubloxRaw'] and (self.sm.frame > 10. / DT_CTRL):
self.events.add(EventName.gpsMalfunction)
elif not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and not TICI:
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
self.events.add(EventName.noGps)
if not self.sm.all_alive(['roadCameraState', 'driverCameraState']) and (self.sm.frame > 5 / DT_CTRL):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
# Check if all manager processes are running
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
if CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
if not self.sm['pandaState'].controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
self.CP.enableCruise = self.CI.CP.enableCruise
if not self.CP.enableCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled, self.is_metric)
elif self.CP.enableCruise and CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 300 # 3s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan'])
# no greater than dt mpc + dt, to prevent too high extraps
dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL
a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart)
v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0
# Gas/Brake PID loop
actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP)
# Steering PID loop and lateral MPC
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, lat_plan)
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
return actuators, v_acc_sol, a_acc_sol, lac_log
def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = self.CP.enableCruise and not self.enabled and CS.cruiseState.enabled
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget)
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - CAMERA_OFFSET))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only:
# send car controls over can
can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
steer_angle_rad = (CS.steeringAngleDeg - self.sm['lateralPlan'].angleOffsetDeg) * CV.DEG_TO_RAD
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = self.VM.calc_curvature(steer_angle_rad, CS.vEgo)
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.steeringAngleDesiredDeg = float(self.LaC.angle_steers_des)
controlsState.vTargetLead = float(v_acc)
controlsState.aTarget = float(a_acc)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
if self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_acc, a_acc, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log)
self.prof.checkpoint("Sent")
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| 43.195911 | 157 | 0.718015 |
6e1bd5b3e436c8dc8f42e923278a00e89398b042 | 7,695 | py | Python | src/utils/apps/msrtsimul.py | Fran89/seiscomp3 | a25d29966949769d2bce9c0d28db0a2128e00649 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2015-09-17T22:43:50.000Z | 2017-11-29T20:27:11.000Z | src/utils/apps/msrtsimul.py | Fran89/seiscomp3 | a25d29966949769d2bce9c0d28db0a2128e00649 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2016-04-26T00:03:09.000Z | 2017-12-05T02:24:50.000Z | src/utils/apps/msrtsimul.py | salichon/seiscomp3 | 4f7715f9ff9a35e7912c379ebf10446d0bceaeb2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2022-01-13T02:49:31.000Z | 2022-01-13T02:49:31.000Z | #!/usr/bin/env python
import sys, os, time, datetime, calendar, stat
from getopt import getopt, GetoptError
from seiscomp import mseedlite as mseed
tstart = datetime.datetime.utcnow()
ifile = sys.stdin
verbosity = 0
speed = 1.
jump = 0.
test = False
mode = 'realtime'
def read_mseed_with_delays(delaydict,reciterable):
"""
Create an iterator which takes into account configurable realistic delays.
This function creates an iterator which returns one miniseed record at a time. Artificial delays can be introduced by using delaydict.
This function can be used to make simulations in real time more realistic when e.g. some stations have a much higher delay than others
due to narrow bandwidth communication channels etc.
A delaydict has the following data structure:
keys: XX.ABC (XX: network code, ABC: station code). The key "default" is a special value for the default delay.
values: Delay to be introduced in seconds
This function will rearrange the iterable object which has been used as input for rt_simul() so that it can again be used by rt_simul
but taking artificial delays into account.
"""
import time
import heapq
import itertools
heap = []
min_delay = 0
default_delay = 0
if 'default' in delaydict:
default_delay = delaydict['default']
reciterator = itertools.chain(reciterable)
rec = reciterator.next()
while rec:
rec_time = calendar.timegm(rec.end_time.timetuple())
delay_time = rec_time
stationname = "%s.%s" % (rec.net,rec.sta)
if stationname in delaydict:
delay_time = rec_time + delaydict[stationname]
else:
delay_time = rec_time + default_delay
heapq.heappush(heap,(delay_time,rec))
toprectime = heap[0][0]
if toprectime - min_delay < rec_time:
topelement = heapq.heappop(heap)
yield topelement
rec = reciterator.next()
while h:
topelement = heapq.heappop(heap)
yield topelement
def rt_simul(f, speed=1., jump=0., delaydict = None):
"""
Iterator to simulate "real-time" MSeed input
At startup, the first MSeed record is read. The following records are
read in pseudo-real-time relative to the time of the first record,
resulting in data flowing at realistic speed. This is useful e.g. for
demonstrating real-time processing using real data of past events.
The data in the input file may be multiplexed, but *must* be sorted by
time, e.g. using 'mssort'.
"""
import time
rtime = time.time()
etime = None
skipping = True
record_iterable = mseed.Input(f)
if delaydict:
record_iterable = read_mseed_with_delays(delaydict,record_iterable)
for rec in record_iterable:
rec_time = None
if delaydict:
rec_time = rec[0]
rec = rec[1]
else:
rec_time = calendar.timegm(rec.end_time.timetuple())
if etime is None:
etime = rec_time
if skipping:
if (rec_time - etime) / 60.0 < jump:
continue
etime = rec_time
skipping = False
tmax = etime + speed * (time.time() - rtime)
last_sample_time = rec.begin_time + datetime.timedelta(microseconds = 1000000.0 * (rec.nsamp / rec.fsamp))
last_sample_time = calendar.timegm(last_sample_time.timetuple())
if last_sample_time > tmax:
time.sleep((last_sample_time - tmax + 0.001) / speed)
yield rec
usage_info = """
msrtsimul - read sorted (and possibly multiplexed) MiniSEED files and
write the individual records in pseudo-real-time. This is useful
e.g. for testing and simulating data acquisition. Output
is $SEISCOMP_ROOT/var/run/seedlink/mseedfifo unless -c is used.
Usage: msrtsimul.py [options] [file]
Options:
-c, --stdout write on standard output
-d, --delays add artificial delays
-s, --speed speed factor (float)
-j, --jump minutes to skip (float)
--test test mode
-m --mode choose between 'realtime' and 'historic'
-v, --verbose verbose mode
-h, --help display this help message
"""
def usage(exitcode=0):
sys.stderr.write(usage_info)
sys.exit(exitcode)
try:
opts, args = getopt(sys.argv[1:], "cd:s:j:hvm:",
[ "stdout","delays=", "speed=", "jump=", "test", "verbose", "help","mode=" ])
except GetoptError:
usage(exitcode=1)
out_channel = None
delays = None
for flag, arg in opts:
if flag in ("-c", "--stdout"): out_channel = sys.stdout
elif flag in ("-d","--delays"): delays = arg
elif flag in ("-s", "--speed"): speed = float(arg)
elif flag in ("-j", "--jump"): jump = float(arg)
elif flag in ("-h", "--help"): usage(exitcode=0)
elif flag in ("-m", "--mode"): mode = arg
elif flag in ("-v", "--verbose"): verbosity += 1
elif flag in ("--test"): test = True
else: usage(exitcode=1)
if len(args) == 0:
pass
elif len(args) == 1:
fname = args[0]
if fname != "-":
ifile = file(fname)
else: usage(exitcode=1)
if out_channel is None:
try: sc_root = os.environ["SEISCOMP_ROOT"]
except:
sys.stderr.write("SEISCOMP_ROOT environment variable is not set\n")
sys.exit(1)
mseed_fifo = os.path.join(sc_root, "var", "run", "seedlink", "mseedfifo")
if not os.path.exists(mseed_fifo):
sys.stderr.write("""\
ERROR: %s does not exist.
In order to push the records to SeedLink, it needs to run and must be configured for real-time playback.
""" % mseed_fifo)
sys.exit(1)
if not stat.S_ISFIFO(os.stat(mseed_fifo).st_mode):
sys.stderr.write("""\
ERROR: %s is not a named pipe
Check if SeedLink is running and configured for real-time playback.
""" % mseed_fifo)
sys.exit(1)
try: out_channel = open(mseed_fifo, "w")
except Exception, e:
sys.stderr.write("%s\n" % str(e))
sys.exit(1)
try:
stime = time.time()
delaydict = None
if delays:
delaydict = dict()
try:
f = open(delays, 'r')
for line in f:
content = line.split(':')
if len(content) != 2:
raise Exception("Could not parse a line in file %s: %s\n" % (delays, line))
delaydict[content[0].strip()] = float(content[1].strip())
except: pass
input = rt_simul(ifile, speed=speed, jump=jump, delaydict=delaydict)
#input = rt_simul(ifile, speed=speed, jump=jump)
time_diff = None
sys.stderr.write("Starting msrtsimul at %s\n" % datetime.datetime.utcnow())
for rec in input:
if time_diff is None:
time_diff = datetime.datetime.utcnow() - rec.begin_time - \
datetime.timedelta(microseconds = 1000000.0 * (rec.nsamp / rec.fsamp))
if mode == 'realtime':
rec.begin_time += time_diff
if verbosity:
sys.stderr.write("%s_%s %7.2f %s %7.2f\n" % (rec.net, rec.sta, (time.time() - stime), str(rec.begin_time),
time.time() - calendar.timegm(rec.begin_time.timetuple())))
#sys.stderr.write("%s_%s %7.2f %s\n" % (rec.net, rec.sta, (time.time()-stime), str(rec.begin_time)))
if not test:
rec.write(out_channel, 9)
out_channel.flush()
except KeyboardInterrupt:
pass
except Exception, e:
sys.stderr.write("Exception: %s\n" % str(e))
sys.exit(1)
| 34.352679 | 142 | 0.610136 |
538375239185a89c33391cd442a2b9e11aad757c | 1,073 | py | Python | src/olympia/amo/urls.py | jpetto/olympia | f4e9badac9634657068dfbd4733ab5d17798e3f6 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/amo/urls.py | jpetto/olympia | f4e9badac9634657068dfbd4733ab5d17798e3f6 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/amo/urls.py | jpetto/olympia | f4e9badac9634657068dfbd4733ab5d17798e3f6 | [
"BSD-3-Clause"
] | null | null | null | from waffle.views import wafflejs
from django.conf.urls import include, patterns, url
from django.views.decorators.cache import never_cache
from . import install, views
services_patterns = patterns(
'',
url('^monitor(.json)?$', never_cache(views.monitor),
name='amo.monitor'),
url('^loaded$', never_cache(views.loaded), name='amo.loaded'),
url('^csp/report$', views.cspreport, name='amo.csp.report'),
url('^pfs.php$', views.plugin_check_redirect, name='api.plugincheck'),
url('^install.php$', install.install, name='api.install'),
)
urlpatterns = patterns(
'',
url('^robots.txt$', views.robots, name='robots.txt'),
url('^contribute.json$', views.contribute, name='contribute.json'),
url(r'^wafflejs$', wafflejs, name='wafflejs'),
('^services/', include(services_patterns)),
url('^__version__$', views.version, name='version.json'),
url('^opensearch.xml$', 'olympia.api.views.render_xml',
{'template': 'amo/opensearch.xml'},
name='amo.opensearch'),
)
| 33.53125 | 74 | 0.641193 |
4aba8007291eab50ccc81943a69e70ca93338e23 | 439 | py | Python | module/execute.py | NORMA-Company/atear-beta | 245ec8d1d13aea2d0acfa0481e5814b80b84e333 | [
"Apache-2.0"
] | 254 | 2015-10-06T04:26:19.000Z | 2022-03-05T00:56:01.000Z | module/execute.py | NORMA-Company/atear-beta | 245ec8d1d13aea2d0acfa0481e5814b80b84e333 | [
"Apache-2.0"
] | 14 | 2015-09-16T02:59:55.000Z | 2021-11-18T14:13:43.000Z | module/execute.py | NORMA-Company/atear-beta | 245ec8d1d13aea2d0acfa0481e5814b80b84e333 | [
"Apache-2.0"
] | 104 | 2015-09-30T12:46:38.000Z | 2022-01-20T23:56:57.000Z | from subprocess import Popen, PIPE
def execute(command, wait=True):
if wait == True:
proc = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
retval = proc.returncode
else: # wait == False
_dn = open('/dev/null','wb')
proc = Popen(command, stdout=_dn, stderr=_dn)
retval = 1
out = None
err = None
return proc, retval, out, err
| 25.823529 | 67 | 0.583144 |
4ccd18598d524039f48d3767d7b4b9ef38a0b44a | 13,318 | py | Python | backend/api/python_http_client/kfp_server_api/configuration.py | Strasser-Pablo/pipelines | a1d513eb412f3ffd44edf82af2fa7edb05c3b952 | [
"Apache-2.0"
] | null | null | null | backend/api/python_http_client/kfp_server_api/configuration.py | Strasser-Pablo/pipelines | a1d513eb412f3ffd44edf82af2fa7edb05c3b952 | [
"Apache-2.0"
] | null | null | null | backend/api/python_http_client/kfp_server_api/configuration.py | Strasser-Pablo/pipelines | a1d513eb412f3ffd44edf82af2fa7edb05c3b952 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:Example:
API Key Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
cookieAuth: # name for the security scheme
type: apiKey
in: cookie
name: JSESSIONID # cookie name
You can programmatically set the cookie:
conf = kfp_server_api.Configuration(
api_key={'cookieAuth': 'abc123'}
api_key_prefix={'cookieAuth': 'JSESSIONID'}
)
The following cookie will be added to the HTTP request:
Cookie: JSESSIONID abc123
"""
_default = None
def __init__(self, host="http://localhost",
api_key=None, api_key_prefix=None,
username=None, password=None,
discard_unknown_keys=False,
):
"""Constructor
"""
self.host = host
"""Default Base url
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("kfp_server_api")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Disable client side validation
self.client_side_validation = True
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if 'authorization' in self.api_key:
auth['Bearer'] = {
'type': 'api_key',
'in': 'header',
'key': 'authorization',
'value': self.get_api_key_with_prefix('authorization')
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.7.0-alpha.1\n"\
"SDK Package Version: 1.7.0-alpha.1".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "/",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:return: URL based on host settings
"""
variables = {} if variables is None else variables
servers = self.get_host_settings()
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server['variables'].items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
| 32.965347 | 124 | 0.602343 |
bf2159b6397b6bd471f00e6631b81cba3d516ee0 | 1,510 | py | Python | src/purei9_unofficial/util.py | natoopotato/purei9_unofficial | db2f764cbb40ee876f1e87d7e9b93a1ea8ed949b | [
"MIT"
] | 14 | 2021-06-30T23:37:55.000Z | 2022-03-18T16:21:38.000Z | src/purei9_unofficial/util.py | natoopotato/purei9_unofficial | db2f764cbb40ee876f1e87d7e9b93a1ea8ed949b | [
"MIT"
] | 14 | 2021-04-04T20:49:34.000Z | 2022-03-18T16:30:02.000Z | src/purei9_unofficial/util.py | natoopotato/purei9_unofficial | db2f764cbb40ee876f1e87d7e9b93a1ea8ed949b | [
"MIT"
] | 3 | 2021-08-31T07:50:44.000Z | 2021-11-13T22:21:45.000Z | import logging
import time
import requests
import requests.auth
logger = logging.getLogger(__name__)
def do_http(method, url, retries=2, **kwargs):
try:
logger.debug("HTTP " + method + " " + url)
r = requests.request(method, url, timeout=10, **kwargs)
# Hide access tokens from log
if r.text:
if "accessToken" in r.text:
logger.debug("HTTP " + str(r.status_code) + " " + str(r) + " " + "(sensitive data not shown)")
else:
logger.debug("HTTP " + str(r.status_code) + " " + str(r) + " " + r.text)
else:
logger.debug("HTTP " + str(r.status_code) + " " + str(r) + " " + "-")
r.raise_for_status()
return r
except Exception as r:
if retries > 0:
return do_http(method, url, retries-1, **kwargs)
else:
logger.error("Giving up due to no left retries. Wrong credentials?")
raise r
class CachedData():
def __init__(self, maxage=5):
self._mark_changed()
self._cache_maxage = maxage
def _mark_changed(self):
self._cache_data = None
self._cache_time = time.time()
def _getinfo(self):
if self._cache_data != None and time.time() - self._cache_time < self._cache_maxage:
return self._cache_data
else:
self._cache_data = self._getinfo_inner()
self._cache_time = time.time()
return self._cache_data
| 31.458333 | 110 | 0.557616 |
48aa65da4bfa66088fe9ba9fdbb455946bd86bb2 | 2,846 | py | Python | mysite/settings.py | stevenpi/Link.Python.Django.OfficialTutorial | 06be10b8ef7d93c4aab7a0ba21abb307aa7d504a | [
"MIT"
] | null | null | null | mysite/settings.py | stevenpi/Link.Python.Django.OfficialTutorial | 06be10b8ef7d93c4aab7a0ba21abb307aa7d504a | [
"MIT"
] | null | null | null | mysite/settings.py | stevenpi/Link.Python.Django.OfficialTutorial | 06be10b8ef7d93c4aab7a0ba21abb307aa7d504a | [
"MIT"
] | null | null | null | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-x$2f5x96451nrryjawa+*4%#i1x=^bz*gebs*2ib(f%1e*2e!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| 25.872727 | 91 | 0.689037 |
4a361fbf6ac955c8c5650d4d5f93e532d62e0057 | 274 | py | Python | lib_ddos_simulator/ddos_simulators/__init__.py | jfuruness/lib_ddos_simulator | 2d860fd3f35f4c25262f5269251eed89975f95e8 | [
"BSD-4-Clause"
] | 1 | 2020-04-01T22:42:36.000Z | 2020-04-01T22:42:36.000Z | lib_ddos_simulator/ddos_simulators/__init__.py | jfuruness/lib_ddos_simulator | 2d860fd3f35f4c25262f5269251eed89975f95e8 | [
"BSD-4-Clause"
] | null | null | null | lib_ddos_simulator/ddos_simulators/__init__.py | jfuruness/lib_ddos_simulator | 2d860fd3f35f4c25262f5269251eed89975f95e8 | [
"BSD-4-Clause"
] | 1 | 2020-02-16T17:55:46.000Z | 2020-02-16T17:55:46.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This package runs a DDOS simulation"""
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "jfuruness@gmail.com, agorbenko97@gmail.com"
__status__ = "Development"
from .ddos_simulator import DDOS_Simulator
| 22.833333 | 56 | 0.737226 |
a28a4fba7ec624a5471f35cf346f726d71b5e038 | 2,424 | py | Python | src/WebServer/part3/webserver3d.py | rvats/SystemDesign | 425e71c02a9f107f84ed019d9f84f97c638026b8 | [
"MIT"
] | null | null | null | src/WebServer/part3/webserver3d.py | rvats/SystemDesign | 425e71c02a9f107f84ed019d9f84f97c638026b8 | [
"MIT"
] | null | null | null | src/WebServer/part3/webserver3d.py | rvats/SystemDesign | 425e71c02a9f107f84ed019d9f84f97c638026b8 | [
"MIT"
] | null | null | null | ###########################################################################
# Concurrent server - webserver3d.py #
# #
# Tested with Python 2.7.9 & Python 3.4 on Ubuntu 14.04 & Mac OS X #
# #
# - Parent and child processes DO NOT close duplicate descriptors #
# - Client connections are not terminated #
# - Server might run out of descriptors #
# * Set a limit of open files to 256 ($ ulimit -n 256) #
# * Use $ python client3.py to simulate the behavior #
# - OMG, Zombies!!! Server might run out of processes #
# * $ curl and $ ps to see zombies #
# * Set max user processes to 400 ($ ulimit -u 400) #
# * Use $ python client3.py to simulate the behavior #
# #
###########################################################################
import os
import socket
SERVER_ADDRESS = (HOST, PORT) = '', 8679
REQUEST_QUEUE_SIZE = 5
def handle_request(client_connection):
request = client_connection.recv(1024)
http_response = b"""\
HTTP/1.1 200 OK
Hello, World!
"""
client_connection.sendall(http_response)
def serve_forever():
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind(SERVER_ADDRESS)
listen_socket.listen(REQUEST_QUEUE_SIZE)
print('Serving HTTP on port {port} ...'.format(port=PORT))
clients = []
while True:
client_connection, client_address = listen_socket.accept()
# store the reference otherwise it's garbage collected
# on the next loop run
clients.append(client_connection)
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
handle_request(client_connection)
client_connection.close()
os._exit(0) # child exits here
else: # parent
# client_connection.close()
print(len(clients))
if __name__ == '__main__':
serve_forever()
| 41.084746 | 75 | 0.494224 |
0e9dedabfc85d729510abe7dc08fdbd5240f708d | 16,179 | py | Python | src/wordpress.py | nunogrl/sceptre-wordpress-example | c8b3e2b2a11c9a6de57f3b0b700de304eddbcad3 | [
"Apache-2.0"
] | 1 | 2018-10-16T10:04:51.000Z | 2018-10-16T10:04:51.000Z | src/wordpress.py | DalavanCloud/sceptre-wordpress-example | c8b3e2b2a11c9a6de57f3b0b700de304eddbcad3 | [
"Apache-2.0"
] | null | null | null | src/wordpress.py | DalavanCloud/sceptre-wordpress-example | c8b3e2b2a11c9a6de57f3b0b700de304eddbcad3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from troposphere import Base64, FindInMap, GetAtt, Join, Output
from troposphere import Parameter, Ref, Tags
from constants import *
import troposphere.ec2 as ec2
import troposphere.route53 as route53
import troposphere.elasticloadbalancing as elb
import troposphere.cloudwatch as cloudwatch
import troposphere.autoscaling as autoscaling
import troposphere.cloudformation as cloudformation
from base import CloudformationAbstractBaseClass
class WordpressASG(CloudformationAbstractBaseClass):
def __init__(self, sceptre_user_data):
super(self.__class__, self).__init__()
self.template.add_description("""Wordpress Web ASG""")
self.add_parameters()
self.add_mapping()
self.add_elb()
self.add_resources()
self.add_outputs()
def add_mapping(self):
self.template.add_mapping("AWSRegion2AMI", UBUNTU_16_AMI)
def add_parameters(self):
t = self.template
self.VpcId = t.add_parameter(Parameter(
"VpcId",
Description="VpcId",
Type="AWS::EC2::VPC::Id",
))
self.Hostname = t.add_parameter(Parameter(
"Hostname",
Type="String",
Default="wordpress",
AllowedPattern="[\\x20-\\x7E]*",
ConstraintDescription="can contain only ASCII characters.",
))
self.Domain = t.add_parameter(Parameter(
"Domain",
Type="String",
Default="lab.cloudreach.com",
AllowedPattern="[\\x20-\\x7E]*",
ConstraintDescription="can contain only ASCII characters.",
))
self.RDSEndpoint = t.add_parameter(Parameter(
"RDSEndpoint",
Type="String",
AllowedPattern="[\\x20-\\x7E]*",
ConstraintDescription="can contain only ASCII characters.",
))
self.DBName = t.add_parameter(Parameter(
"DBName",
Type="String",
Description="DB Name",
Default="mydb",
MinLength="1",
AllowedPattern="[a-zA-Z0-9]*",
MaxLength="64",
ConstraintDescription="Must be alphanumeric string",
))
self.DBPass = t.add_parameter(Parameter(
"DBPass",
MinLength="8",
Type="String",
NoEcho=True,
Description="The database admin account password",
MaxLength="41",
))
self.FileSystemID = t.add_parameter(Parameter(
"FileSystemID",
Type="String",
Description="EFS Id fs-xxxxxxx",
MinLength="1",
MaxLength="64",
ConstraintDescription="Must be a valid EFS FileSystemID",
))
self.DBUser = t.add_parameter(Parameter(
"DBUser",
ConstraintDescription=(
"must begin with a letter and contain only alphanumeric "
"characters."),
Description="Username for MySQL database access",
MinLength="1",
AllowedPattern="[a-zA-Z][a-zA-Z0-9]*",
NoEcho=True,
MaxLength="80",
Type="String",
))
self.KeyName = t.add_parameter(Parameter(
"KeyName",
ConstraintDescription=(
"must be the name of an existing EC2 KeyPair."),
Type="AWS::EC2::KeyPair::KeyName",
Description=(
"Name of an existing EC2 KeyPair to enable SSH access to the "
"instances"),
))
self.Subnet1 = self.template.add_parameter(Parameter(
"Subnet1",
Type="AWS::EC2::Subnet::Id",
Description="Subnet1 ID",
))
self.Subnet2 = self.template.add_parameter(Parameter(
"Subnet2",
Type="AWS::EC2::Subnet::Id",
Description="Subnet2 ID",
))
self.AvailabilityZone1 = t.add_parameter(Parameter(
"AvailabilityZone1",
Default="eu-west-1a",
Type="String",
Description="First AZ to use for PublicSubnet1/PrivateSubnet1.",
))
self.AvailabilityZone2 = t.add_parameter(Parameter(
"AvailabilityZone2",
Default="eu-west-1b",
Type="String",
Description="Second AZ to use for PublicSubnet2/PrivateSubnet2.",
))
self.InstanceType = t.add_parameter(Parameter(
"InstanceType",
Default="t2.micro",
ConstraintDescription="must be a valid EC2 instance type.",
Type="String",
Description="Instance type",
))
self.WebServerCapacity = t.add_parameter(Parameter(
"WebServerCapacity",
Description="The initial nuber of WebServer instances",
Default="2",
Type="Number",
MaxValue="20",
MinValue="1",
ConstraintDescription="must be between 1 and 20 EC2 instances.",
))
self.WebSecurityGroup = t.add_parameter(Parameter(
"WebSecurityGroup",
Description="Web SG",
Type="AWS::EC2::SecurityGroup::Id",
))
self.ElbSecurityGroup = t.add_parameter(Parameter(
"ElbSecurityGroup",
Description="ELB SG",
Type="AWS::EC2::SecurityGroup::Id",
))
def add_elb(self):
self.ElasticLoadBalancer = self.template.add_resource(elb.LoadBalancer(
"ElbWeb",
Subnets=[Ref(self.Subnet1), Ref(self.Subnet2)],
Listeners=[{"InstancePort": "80",
"LoadBalancerPort": "80", "Protocol": "HTTP"}],
CrossZone="true",
LoadBalancerName=Join("-", ["elb", Ref(self.Project)]),
SecurityGroups=[Ref(self.ElbSecurityGroup)],
ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
Enabled=True,
Timeout=300,
),
HealthCheck=elb.HealthCheck(
HealthyThreshold="3",
Interval="30",
Target="HTTP:80/",
Timeout="5",
UnhealthyThreshold="5",
),
Tags=Tags(
Name=Join("-", ["ELB", Ref(self.Project)]),
Environment=Ref(self.Environment),
),
))
self.ELBcname = self.template.add_resource(route53.RecordSetType(
"ELBcname",
HostedZoneName=Join("", [Ref(self.Domain), "."]),
Comment="CNAME to Web ELB",
Name=Join(".", [Ref(self.Hostname), Ref(self.Domain)]),
Type="CNAME",
TTL="60",
ResourceRecords=[GetAtt(self.ElasticLoadBalancer, "DNSName")]
))
def add_resources(self):
metadata = {
"AWS::CloudFormation::Init": {
"configSets": {
"wordpress_install": [
"install_wordpress"]
},
"install_wordpress": {
"packages": {
"apt": {
"apache2": [],
"php": [],
"php-mysql": [],
"php7.0": [],
"php7.0-mysql": [],
"libapache2-mod-php7.0": [],
"php7.0-cli": [],
"php7.0-cgi": [],
"php7.0-gd": [],
"mysql-client": [],
"sendmail": []
}
},
"sources": {
"/var/www/html": "http://wordpress.org/latest.tar.gz"
},
"files": {
"/tmp/create-wp-config": {
"content": {
"Fn::Join": ["", [
"#!/bin/bash\n",
"cp /var/www/html/wordpress/wp-config-sample.php /var/www/html/wordpress/wp-config.php\n",
"sed -i \"s/'database_name_here'/'", Ref(
self.DBName), "'/g\" wp-config.php\n",
"sed -i \"s/'username_here'/'", Ref(
self.DBUser), "'/g\" wp-config.php\n",
"sed -i \"s/'password_here'/'", Ref(
self.DBPass), "'/g\" wp-config.php\n",
"sed -i \"s/'localhost'/'", Ref(
self.RDSEndpoint), "'/g\" wp-config.php\n"
]]
},
"mode": "000500",
"owner": "root",
"group": "root"
}
},
"commands": {
"01_configure_wordpress": {
"command": "/tmp/create-wp-config",
"cwd": "/var/www/html/wordpress"
}
}
}
}
}
self.WaitHandle = self.template.add_resource(cloudformation.WaitConditionHandle(
"WaitHandle",
))
self.WaitCondition = self.template.add_resource(cloudformation.WaitCondition(
"WaitCondition",
Handle=Ref(self.WaitHandle),
Timeout="600",
DependsOn="WebServerAutoScalingGroup",
))
self.WebServerLaunchConfiguration = self.template.add_resource(autoscaling.LaunchConfiguration(
"WebServerLaunchConfiguration",
Metadata=metadata,
UserData=Base64(Join("", [
"#!/bin/bash -x\n",
"apt-get update\n",
"apt-get install python-pip nfs-common -y \n",
"mkdir -p /var/www/html/\n",
"EC2_AZ=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)\n",
"echo \"$EC2_AZ.", Ref(self.FileSystemID), ".efs.", Ref(
"AWS::Region"), ".amazonaws.com:/ /var/www/html/ nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0\" >> /etc/fstab\n"
"mount -a\n",
"pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n",
# "exec > /tmp/userdata.log 2>&1\n",
"/usr/local/bin/cfn-init -v --stack ", Ref("AWS::StackName"),
" --resource WebServerLaunchConfiguration ",
" --configsets wordpress_install ",
" --region ", Ref("AWS::Region"),
"\n",
"/bin/mv /var/www/html/wordpress/* /var/www/html/\n",
"/bin/rm -f /var/www/html/index.html\n",
"/bin/rm -rf /var/www/html/wordpress/\n",
"chown www-data:www-data /var/www/html/* -R\n",
"/usr/sbin/service apache2 restart\n",
"/usr/bin/curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar\n",
"/bin/chmod +x wp-cli.phar\n",
"/bin/mv wp-cli.phar /usr/local/bin/wp\n",
"cd /var/www/html/\n",
"if ! $(sudo -u www-data /usr/local/bin/wp core is-installed); then\n",
"sudo -u www-data /usr/local/bin/wp core install ",
"--url='", Ref(self.Hostname), ".", Ref(self.Domain), "' ",
"--title='Cloudreach Meetup - ", Ref(
self.Environment), "' ",
"--admin_user='root' ",
"--admin_password='wordpress' ",
"--admin_email='meetup@cloudreach.com'\n",
"wget https://s3-eu-west-1.amazonaws.com/sceptre-meetup-munich/header.jpg -O /var/www/html/wp-content/themes/twentyseventeen/assets/images/header.jpg\n",
"chown www-data:www-data /var/www/html/wp-content/themes/twentyseventeen/assets/images/header.jpg\n",
"fi\n",
"/usr/local/bin/cfn-signal -e $? --stack ", Ref(
"AWS::StackName"), " -r \"Webserver setup complete\" '", Ref(self.WaitHandle), "'\n"
]
)),
ImageId=FindInMap("AWSRegion2AMI", Ref("AWS::Region"), "AMI"),
KeyName=Ref(self.KeyName),
SecurityGroups=[Ref(self.WebSecurityGroup)],
InstanceType=Ref(self.InstanceType),
AssociatePublicIpAddress=True,
))
self.WebServerAutoScalingGroup = self.template.add_resource(autoscaling.AutoScalingGroup(
"WebServerAutoScalingGroup",
MinSize=Ref(self.WebServerCapacity),
DesiredCapacity=Ref(self.WebServerCapacity),
MaxSize=Ref(self.WebServerCapacity),
VPCZoneIdentifier=[Ref(self.Subnet1), Ref(self.Subnet2)],
AvailabilityZones=[Ref(self.AvailabilityZone1),
Ref(self.AvailabilityZone2)],
Tags=autoscaling.Tags(
Name=Join("-", [Ref(self.Project), "web", "asg"]),
Environment=Ref(self.Environment),
Project=Ref(self.Project),
),
LoadBalancerNames=[Ref(self.ElasticLoadBalancer)],
LaunchConfigurationName=Ref(self.WebServerLaunchConfiguration),
))
self.WebServerScaleUpPolicy = self.template.add_resource(autoscaling.ScalingPolicy(
"WebServerScaleUpPolicy",
ScalingAdjustment="1",
Cooldown="60",
AutoScalingGroupName=Ref(self.WebServerAutoScalingGroup),
AdjustmentType="ChangeInCapacity",
))
self.WebServerScaleDownPolicy = self.template.add_resource(autoscaling.ScalingPolicy(
"WebServerScaleDownPolicy",
ScalingAdjustment="-1",
Cooldown="60",
AutoScalingGroupName=Ref(self.WebServerAutoScalingGroup),
AdjustmentType="ChangeInCapacity",
))
self.CPUAlarmLow = self.template.add_resource(cloudwatch.Alarm(
"CPUAlarmLow",
EvaluationPeriods="2",
Dimensions=[
cloudwatch.MetricDimension(
Name="AutoScalingGroupName",
Value=Ref(self.WebServerAutoScalingGroup)
),
],
AlarmActions=[Ref(self.WebServerScaleDownPolicy)],
AlarmDescription="Scale-down if CPU < 70% for 1 minute",
Namespace="AWS/EC2",
Period="60",
ComparisonOperator="LessThanThreshold",
Statistic="Average",
Threshold="70",
MetricName="CPUUtilization",
))
self.CPUAlarmHigh = self.template.add_resource(cloudwatch.Alarm(
"CPUAlarmHigh",
EvaluationPeriods="2",
Dimensions=[
cloudwatch.MetricDimension(
Name="AutoScalingGroupName",
Value=Ref("WebServerAutoScalingGroup")
),
],
AlarmActions=[Ref(self.WebServerScaleUpPolicy)],
AlarmDescription="Scale-up if CPU > 50% for 1 minute",
Namespace="AWS/EC2",
Period="60",
ComparisonOperator="GreaterThanThreshold",
Statistic="Average",
Threshold="50",
MetricName="CPUUtilization",
))
def add_outputs(self):
self.out = self.template.add_output([
Output("FQDN", Value=Join(
".", [Ref(self.Hostname), Ref(self.Domain)])),
Output("WebSecurityGroup", Value=Ref(self.WebSecurityGroup)),
Output("ElbSecurityGroup", Value=Ref(self.ElbSecurityGroup)),
])
def sceptre_handler(sceptre_user_data):
return WordpressASG(sceptre_user_data).template.to_json()
if __name__ == '__main__':
print sceptre_handler()
| 38.891827 | 170 | 0.507942 |
3291944c33659339ba70613d8e6fa1861279d57f | 1,473 | py | Python | twitter_apod.py | GereksizPosta/APODTwitterBot | 6c3a77c9a03d4d416cd1f47af30b61bae3ec956c | [
"MIT"
] | null | null | null | twitter_apod.py | GereksizPosta/APODTwitterBot | 6c3a77c9a03d4d416cd1f47af30b61bae3ec956c | [
"MIT"
] | null | null | null | twitter_apod.py | GereksizPosta/APODTwitterBot | 6c3a77c9a03d4d416cd1f47af30b61bae3ec956c | [
"MIT"
] | null | null | null | from twython import Twython
from auth import (
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
import urllib.request
from bs4 import BeautifulSoup
import random
from datetime import datetime, date, time
from time import sleep, strftime, time
twitter = Twython(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
def apod():
totalapod = abs(date.today()-date(1995,6,16)).days
urlapod = "https://apod.nasa.gov/apod/archivepix.html"
htmlapod = urllib.request.urlopen(urlapod).read()
soupapod = BeautifulSoup(htmlapod, "lxml")
num = random.randrange(3, totalapod)
choseapod = soupapod.find_all('a')[num]
link = choseapod.get('href')
text = choseapod.getText()
apodlink = "https://apod.nasa.gov/apod/%s" % link
apodhtml = urllib.request.urlopen(apodlink).read()
apodsoup = BeautifulSoup(apodhtml, "lxml")
if apodsoup.find('img')!=None:
imgsrc = apodsoup.img.get('src')
img_link = "https://apod.nasa.gov/apod/%s" % imgsrc
img = urllib.request.urlopen(img_link)
message = "%s %s vai @apod" % (text, apodlink)
response = twitter.upload_media(media=img)
print(message)
twitter.update_status(status=message, media_ids=[response['media_id']])
elif apodsoup.find('iframe')!=None:
#vidsrc = apodsoup.iframe.get('src')
message = "%s %s vai @apod" % (text, apodlink)
print(message)
twitter.update_status(status=message)
else:
apod()
while True:
apod()
sleep(3600)
| 27.277778 | 74 | 0.710794 |
b1ddc71e8e85db08bcb7f1404abdc87caf90ad4c | 3,114 | py | Python | huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/list_flavors_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/list_flavors_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/list_flavors_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListFlavorsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'flavors': 'list[FlavorView]'
}
attribute_map = {
'flavors': 'flavors'
}
def __init__(self, flavors=None):
"""ListFlavorsResponse - a model defined in huaweicloud sdk"""
super(ListFlavorsResponse, self).__init__()
self._flavors = None
self.discriminator = None
if flavors is not None:
self.flavors = flavors
@property
def flavors(self):
"""Gets the flavors of this ListFlavorsResponse.
资源规格列表。
:return: The flavors of this ListFlavorsResponse.
:rtype: list[FlavorView]
"""
return self._flavors
@flavors.setter
def flavors(self, flavors):
"""Sets the flavors of this ListFlavorsResponse.
资源规格列表。
:param flavors: The flavors of this ListFlavorsResponse.
:type: list[FlavorView]
"""
self._flavors = flavors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListFlavorsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.078261 | 79 | 0.55684 |
297eae7b9e637fd01afa6e089ccaa393bf8bd9e9 | 1,031 | py | Python | sources/tiff/large_image_source_tiff/girder_source.py | naglepuff/large_image | 4e928166f228fe894c38e4b01af5370e72f7229c | [
"Apache-2.0"
] | 85 | 2017-03-10T09:48:17.000Z | 2022-03-31T18:55:58.000Z | sources/tiff/large_image_source_tiff/girder_source.py | naglepuff/large_image | 4e928166f228fe894c38e4b01af5370e72f7229c | [
"Apache-2.0"
] | 248 | 2017-01-27T16:11:13.000Z | 2022-03-31T14:05:18.000Z | sources/tiff/large_image_source_tiff/girder_source.py | naglepuff/large_image | 4e928166f228fe894c38e4b01af5370e72f7229c | [
"Apache-2.0"
] | 33 | 2017-03-10T14:06:35.000Z | 2022-03-19T08:32:06.000Z | ##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from girder_large_image.girder_tilesource import GirderTileSource
from . import TiffFileTileSource
class TiffGirderTileSource(TiffFileTileSource, GirderTileSource):
"""
Provides tile access to Girder items with a TIFF file.
"""
cacheName = 'tilesource'
name = 'tiff'
| 35.551724 | 78 | 0.634336 |
d7e3a686fa7c4f1747bdd08944f01475987afe21 | 5,226 | py | Python | synapse/python_dependencies.py | rzr/synapse | 16026e60c5381abcfea12f55b57f8d0ce474c402 | [
"Apache-2.0"
] | null | null | null | synapse/python_dependencies.py | rzr/synapse | 16026e60c5381abcfea12f55b57f8d0ce474c402 | [
"Apache-2.0"
] | null | null | null | synapse/python_dependencies.py | rzr/synapse | 16026e60c5381abcfea12f55b57f8d0ce474c402 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from distutils.version import LooseVersion
logger = logging.getLogger(__name__)
REQUIREMENTS = {
"syutil>=0.0.7": ["syutil>=0.0.7"],
"Twisted>=15.1.0": ["twisted>=15.1.0"],
"service_identity>=1.0.0": ["service_identity>=1.0.0"],
"pyopenssl>=0.14": ["OpenSSL>=0.14"],
"pyyaml": ["yaml"],
"pyasn1": ["pyasn1"],
"pynacl>=0.0.3": ["nacl>=0.0.3"],
"daemonize": ["daemonize"],
"py-bcrypt": ["bcrypt"],
"frozendict>=0.4": ["frozendict"],
"pillow": ["PIL"],
"pydenticon": ["pydenticon"],
"ujson": ["ujson"],
"blist": ["blist"],
"pysaml2": ["saml2"],
}
CONDITIONAL_REQUIREMENTS = {
"web_client": {
"matrix_angular_sdk>=0.6.6": ["syweb>=0.6.6"],
}
}
def requirements(config=None, include_conditional=False):
reqs = REQUIREMENTS.copy()
if include_conditional:
for _, req in CONDITIONAL_REQUIREMENTS.items():
reqs.update(req)
return reqs
def github_link(project, version, egg):
return "https://github.com/%s/tarball/%s/#egg=%s" % (project, version, egg)
DEPENDENCY_LINKS = {
"syutil": github_link(
project="matrix-org/syutil",
version="v0.0.7",
egg="syutil-0.0.7",
),
"matrix-angular-sdk": github_link(
project="matrix-org/matrix-angular-sdk",
version="v0.6.6",
egg="matrix_angular_sdk-0.6.6",
),
}
class MissingRequirementError(Exception):
pass
def check_requirements(config=None):
"""Checks that all the modules needed by synapse have been correctly
installed and are at the correct version"""
for dependency, module_requirements in (
requirements(config, include_conditional=False).items()):
for module_requirement in module_requirements:
if ">=" in module_requirement:
module_name, required_version = module_requirement.split(">=")
version_test = ">="
elif "==" in module_requirement:
module_name, required_version = module_requirement.split("==")
version_test = "=="
else:
module_name = module_requirement
version_test = None
try:
module = __import__(module_name)
except ImportError:
logging.exception(
"Can't import %r which is part of %r",
module_name, dependency
)
raise MissingRequirementError(
"Can't import %r which is part of %r"
% (module_name, dependency)
)
version = getattr(module, "__version__", None)
file_path = getattr(module, "__file__", None)
logger.info(
"Using %r version %r from %r to satisfy %r",
module_name, version, file_path, dependency
)
if version_test == ">=":
if version is None:
raise MissingRequirementError(
"Version of %r isn't set as __version__ of module %r"
% (dependency, module_name)
)
if LooseVersion(version) < LooseVersion(required_version):
raise MissingRequirementError(
"Version of %r in %r is too old. %r < %r"
% (dependency, file_path, version, required_version)
)
elif version_test == "==":
if version is None:
raise MissingRequirementError(
"Version of %r isn't set as __version__ of module %r"
% (dependency, module_name)
)
if LooseVersion(version) != LooseVersion(required_version):
raise MissingRequirementError(
"Unexpected version of %r in %r. %r != %r"
% (dependency, file_path, version, required_version)
)
def list_requirements():
result = []
linked = []
for link in DEPENDENCY_LINKS.values():
egg = link.split("#egg=")[1]
linked.append(egg.split('-')[0])
result.append(link)
for requirement in requirements(include_conditional=True):
is_linked = False
for link in linked:
if requirement.replace('-', '_').startswith(link):
is_linked = True
if not is_linked:
result.append(requirement)
return result
if __name__ == "__main__":
import sys
sys.stdout.writelines(req + "\n" for req in list_requirements())
| 34.84 | 79 | 0.56946 |
52eefd4995449d04a3cb61140793a944099d2969 | 341 | py | Python | src/manage.py | mrsbelo/message_scheduler | e4a84967407a107cfd49eefd65f948167f80f583 | [
"MIT"
] | null | null | null | src/manage.py | mrsbelo/message_scheduler | e4a84967407a107cfd49eefd65f948167f80f583 | [
"MIT"
] | null | null | null | src/manage.py | mrsbelo/message_scheduler | e4a84967407a107cfd49eefd65f948167f80f583 | [
"MIT"
] | 1 | 2020-12-18T15:40:47.000Z | 2020-12-18T15:40:47.000Z | from flask.cli import FlaskGroup
from app import app, session
from app.models import BaseModel
from app.db import ENGINE
cli = FlaskGroup(app)
@cli.command("create_db")
def create_db():
BaseModel.metadata.drop_all(bind=ENGINE)
BaseModel.metadata.create_all(bind=ENGINE)
session.commit()
if __name__ == "__main__":
cli()
| 18.944444 | 46 | 0.741935 |
b303945858724807f1f3c95b417ed18b8b8779fe | 307 | py | Python | submissions/arc067/b.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/arc067/b.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/arc067/b.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n, a, b = map(int, readline().split())
x = list(map(int, readline().split()))
ans = 0
for bf, af in zip(x, x[1:]):
ans += min((af - bf) * a, b)
print(ans)
| 23.615385 | 38 | 0.654723 |
e820b6a648441bf54e1b9728398498a9791f4f05 | 3,142 | py | Python | tests/parse/test_parse_omim.py | gmc-norr/scout | ea8eaaa079c63e4033af6216ec08da4a314f9b5c | [
"BSD-3-Clause"
] | null | null | null | tests/parse/test_parse_omim.py | gmc-norr/scout | ea8eaaa079c63e4033af6216ec08da4a314f9b5c | [
"BSD-3-Clause"
] | null | null | null | tests/parse/test_parse_omim.py | gmc-norr/scout | ea8eaaa079c63e4033af6216ec08da4a314f9b5c | [
"BSD-3-Clause"
] | null | null | null | from scout.parse.omim import (
get_mim_phenotypes,
parse_genemap2,
parse_genemap2_phenotypes,
parse_mim2gene,
parse_mim_titles,
parse_omim_line,
)
def test_parse_omim_line():
## GIVEN a header and a line
header = ["a", "b", "c"]
line = "1\t2\t3"
## WHEN parsing the omim line
res = parse_omim_line(line, header)
## THEN assert a dict was built by the header and the line
assert res["a"] == "1"
assert res["b"] == "2"
assert res["c"] == "3"
def test_parse_genemap2_phenotype_entry_single():
# GIVEN a phenotype description with one entry
entry = (
"Ehlers-Danlos syndrome, progeroid type," " 2, 615349 (3), Autosomal recessive"
)
# WHEN parsing the entry
parsed_entries = parse_genemap2_phenotypes(entry)
parsed_entry = parsed_entries[0]
# THEN assert that the information was parsed correct
assert parsed_entry["mim_number"] == 615349
assert parsed_entry["inheritance"] == {"AR"}
assert parsed_entry["status"] == "established"
def test_parse_genemap(genemap_lines):
for res in parse_genemap2(genemap_lines):
assert res["Chromosome"] == "chr1"
assert res["mim_number"] == 615291
assert res["hgnc_symbol"] == "B3GALT6"
assert res["inheritance"] == set(["AR"])
for phenotype in res["phenotypes"]:
assert phenotype["mim_number"]
assert phenotype["inheritance"]
def test_parse_genemap_file(genemap_handle):
for i, res in enumerate(parse_genemap2(genemap_handle)):
assert "mim_number" in res
assert i > 0
def test_parse_mim2gene(mim2gene_lines):
## GIVEN some lines from a mim2gene file
mim2gene_info = parse_mim2gene(mim2gene_lines)
## WHEN parsing the lines
first_entry = next(mim2gene_info)
## ASSERT that they are correctly parsed
# First entry is a gene so it should have a hgnc symbol
assert first_entry["mim_number"] == 615291
assert first_entry["entry_type"] == "gene"
assert first_entry["hgnc_symbol"] == "B3GALT6"
def test_parse_mim2gene_file(mim2gene_handle):
# Just check that the file exists and that some result is given
for i, res in enumerate(parse_mim2gene(mim2gene_handle)):
assert "mim_number" in res
assert i > 0
def test_get_mim_phenotypes(genemap_lines):
## GIVEN a small testdata set
# This will return a dictionary with mim number as keys and
# phenotypes as values
## WHEN parsing the phenotypes
phenotypes = get_mim_phenotypes(genemap_lines=genemap_lines)
## THEN assert they where parsed in a correct way
# There was only one line in genemap_lines that have two phenotypes
# so we expect that there should be two phenotypes
assert len(phenotypes) == 2
term = phenotypes[615349]
assert term["inheritance"] == set(["AR"])
assert term["hgnc_symbols"] == set(["B3GALT6"])
def test_get_mim_phenotypes_file(genemap_handle):
phenotypes = get_mim_phenotypes(genemap_lines=genemap_handle)
for i, mim_nr in enumerate(phenotypes):
assert phenotypes[mim_nr]["mim_number"]
assert i > 0
| 28.825688 | 87 | 0.687142 |
f8f8a56260b33d160f2eeaa798c19866134d394f | 771 | py | Python | Sensor_python_code/homework_and_develop/a2_confirm.py | Eric-IoT-2019-summer/QI_D_team | c317c9302ee843223c5d9387db3b3ad67dc78ce2 | [
"MIT"
] | 1 | 2021-03-28T15:54:13.000Z | 2021-03-28T15:54:13.000Z | Sensor_python_code/homework_and_develop/a2_confirm.py | Eric-IoT-2019-summer/QI_D_team | c317c9302ee843223c5d9387db3b3ad67dc78ce2 | [
"MIT"
] | 2 | 2021-03-28T15:49:07.000Z | 2021-05-10T03:37:02.000Z | Sensor_python_code/homework_and_develop/a2_confirm.py | Eric-IoT-2019-summer/QI_D_team | c317c9302ee843223c5d9387db3b3ad67dc78ce2 | [
"MIT"
] | null | null | null | import sqlite3
conn = sqlite3.connect("aqi.db")
c = conn.cursor()
c.execute("SELECT * FROM history")
row = c.fetchall()
for r in row:
print "{0}\n".format(r)
#print '\t'
#JSON_string = "{0} {1} {2} {3} {4} {5} {6}".format(r[0], r[1], r[2], r[3], r[4], r[5], r[6])
#print (JSON_string)
conn.commit()
conn.close()
'''
import sqlite3
from datetime import datetime
conn = sqlite3.connect("aqi.db")
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS history (time REAL PRIMARY KEY NOT NULL, temp REAL, sn1 REAL, sn2 REAL, sn3 REAL, sn4 REAL, pm25 REAL)")
c.execute("INSERT INTO history VALUES (1532, 24.523, 1.8764, 65.7817, 21.6671, 41.5976, 24.554)")
conn.commit()
conn.close()
#print "{0}".format(datetime.now())
'''
| 25.7 | 143 | 0.616083 |
fc6095449879b30991ddcc42734910a79f96988f | 5,770 | py | Python | lite/tests/unittest_py/op/test_hard_activation_op.py | Danielmic/Paddle-Lite | 8bf08425035cfae077754ac72629292fac7bb996 | [
"Apache-2.0"
] | 808 | 2018-04-17T17:43:12.000Z | 2019-08-18T07:39:13.000Z | lite/tests/unittest_py/op/test_hard_activation_op.py | Danielmic/Paddle-Lite | 8bf08425035cfae077754ac72629292fac7bb996 | [
"Apache-2.0"
] | 728 | 2018-04-18T08:15:25.000Z | 2019-08-16T07:14:43.000Z | lite/tests/unittest_py/op/test_hard_activation_op.py | Danielmic/Paddle-Lite | 8bf08425035cfae077754ac72629292fac7bb996 | [
"Apache-2.0"
] | 364 | 2018-04-18T17:05:02.000Z | 2019-08-18T03:25:38.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
import numpy as np
from functools import partial
class TestHardActivationOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2, 4])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP16,
DataLayoutType.NCHW,
thread=[1, 2, 4])
self.enable_testing_on_place(
TargetType.Host,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
metal_places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.ARM, PrecisionType.FP32),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=metal_places)
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(
device_names=["nvidia_tensorrt", "intel_openvino"])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=64), min_size=4, max_size=4))
alpha_data = draw(st.floats(min_value=0.1, max_value=0.9))
threshold_data = draw(st.floats(min_value=0.5, max_value=0.9))
scale_data = draw(st.floats(min_value=0.7, max_value=0.9))
offset_data = draw(st.floats(min_value=0.01, max_value=0.1))
op_type_str = draw(st.sampled_from(["hard_swish", "hard_sigmoid"]))
def generate_input(*args, **kwargs):
return 2 * np.random.random(in_shape).astype(np.float32) - 1
def get_attr_np(op_type):
attr = {}
if op_type == "hard_swish":
attr = {
"threshold": threshold_data,
"scale": scale_data,
"offset": offset_data
}
else:
attr = {"slope": scale_data, "offset": offset_data}
return attr
build_ops = OpConfig(
type=op_type_str,
inputs={"X": ["input_data"], },
outputs={"Out": ["output_data"], },
attrs=get_attr_np(op_type_str))
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
atol, rtol = 1e-5, 1e-5
target_str = self.get_target()
if target_str == "Metal":
atol, rtol = 1e-3, 1e-3
return self.get_predictor_configs(), ["hard_swish_and_sigmoid"], (atol,
rtol)
def add_ignore_pass_case(self):
def teller1(program_config, predictor_config):
if "nvidia_tensorrt" in self.get_nnadapter_device_name():
if program_config.ops[0].type == "hard_sigmoid":
return True
self.add_ignore_check_case(
teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"NNAdapter tensorrt will support hard_sigmoid later.")
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
| 38.466667 | 125 | 0.609879 |
c898c50e25fcec3bcb94959ca3a3dc932b07d117 | 2,541 | py | Python | da_concept_extractor.py | kent0304/dsbook | 0faf21ea54fbce52fa84d13733b622f173a047a9 | [
"MIT"
] | 34 | 2020-03-02T05:00:27.000Z | 2022-03-17T05:00:26.000Z | da_concept_extractor.py | kent0304/dsbook | 0faf21ea54fbce52fa84d13733b622f173a047a9 | [
"MIT"
] | 35 | 2020-03-07T04:12:26.000Z | 2022-03-31T11:04:03.000Z | da_concept_extractor.py | kent0304/dsbook | 0faf21ea54fbce52fa84d13733b622f173a047a9 | [
"MIT"
] | 22 | 2020-03-29T00:38:06.000Z | 2022-02-14T02:38:26.000Z | import MeCab
import json
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
from sklearn.preprocessing import LabelEncoder
import dill
import sklearn_crfsuite
from crf_util import word2features, sent2features, sent2labels
import re
# 発話文から対話行為タイプとコンセプトを抽出するクラス
class DA_Concept:
def __init__(self):
# MeCabの初期化
self.mecab = MeCab.Tagger()
self.mecab.parse('')
# SVMモデルの読み込み
with open("svc.model","rb") as f:
self.vectorizer = dill.load(f)
self.label_encoder = dill.load(f)
self.svc = dill.load(f)
# CRFモデルの読み込み
with open("crf.model","rb") as f:
self.crf = dill.load(f)
# 発話文から対話行為タイプをコンセプトを抽出
def process(self,utt):
lis = []
for line in self.mecab.parse(utt).splitlines():
if line == "EOS":
break
else:
word, feature_str = line.split("\t")
features = feature_str.split(',')
postag = features[0]
lis.append([word, postag, "O"])
words = [x[0] for x in lis]
tokens_str = " ".join(words)
X = self.vectorizer.transform([tokens_str])
Y = self.svc.predict(X)
# 数値を対応するラベルに戻す
da = self.label_encoder.inverse_transform(Y)[0]
X = [sent2features(s) for s in [lis]]
# 各単語に対応するラベル列
labels = self.crf.predict(X)[0]
# 単語列とラベル系列の対応を取って辞書に変換
conceptdic = {}
buf = ""
last_label = ""
for word, label in zip(words, labels):
if re.search(r'^B-',label):
if buf != "":
_label = last_label.replace('B-','').replace('I-','')
conceptdic[_label] = buf
buf = word
elif re.search(r'^I-',label):
buf += word
elif label == "O":
if buf != "":
_label = last_label.replace('B-','').replace('I-','')
conceptdic[_label] = buf
buf = ""
last_label = label
if buf != "":
_label = last_label.replace('B-','').replace('I-','')
conceptdic[_label] = buf
return da, conceptdic
if __name__ == '__main__':
da_concept = DA_Concept()
da, conceptdic = da_concept.process("東京の天気は?")
print(da, conceptdic)
| 31.37037 | 74 | 0.502952 |
fdfb5e7a1da66b82e4c865798a8554535ddf9285 | 5,394 | py | Python | semisup/tools/cifar100.py | jplapp/associative_clustering | 4fbda72143b9e9efe3b3eb5b911381679b1716ba | [
"Apache-2.0"
] | 11 | 2018-10-03T22:16:32.000Z | 2021-10-20T13:09:23.000Z | semisup/tools/cifar100.py | jplapp/associative_clustering | 4fbda72143b9e9efe3b3eb5b911381679b1716ba | [
"Apache-2.0"
] | 1 | 2018-10-11T15:08:18.000Z | 2018-10-11T15:08:18.000Z | semisup/tools/cifar100.py | jplapp/associative_clustering | 4fbda72143b9e9efe3b3eb5b911381679b1716ba | [
"Apache-2.0"
] | 1 | 2021-11-02T12:00:18.000Z | 2021-11-02T12:00:18.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR dataset input module.
"""
import tensorflow as tf
def build_input(dataset, data_path, batch_size, mode, subset_factor=1, seed=47, num_threads=8):
"""Build CIFAR image and labels.
Args:
dataset: Either 'cifar10' or 'cifar100'.
data_path: Filename for data.
batch_size: Input batch size.
mode: Either 'train' or 'eval'.
subset_factor: take every nth image.
Returns:
images: Batches of images. [batch_size, image_size, image_size, 3]
labels: Batches of labels. [batch_size, num_classes]
Raises:
ValueError: when the specified dataset is not supported.
"""
image_size = 32
if dataset == 'cifar10':
label_bytes = 1
label_offset = 0
num_classes = 10
elif dataset == 'cifar100':
label_bytes = 1
label_offset = 1
num_classes = 100
else:
raise ValueError('Not supported dataset %s', dataset)
depth = 3
image_bytes = image_size * image_size * depth
record_bytes = label_bytes + label_offset + image_bytes
data_files = tf.gfile.Glob(data_path)
file_queue = tf.train.string_input_producer(data_files, shuffle=True, seed=seed)
# Read examples from files in the filename queue.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes, hop_bytes=record_bytes * (subset_factor - 1))
_, value = reader.read(file_queue)
# Convert these examples to dense labels and processed images.
record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes])
label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32)
# Convert from string to [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(tf.slice(record, [label_offset + label_bytes], [image_bytes]),
[depth, image_size, image_size])
# Convert from [depth, height, width] to [height, width, depth].
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
if mode == 'train':
image = tf.image.resize_image_with_crop_or_pad(
image, image_size + 4, image_size + 4)
image = tf.random_crop(image, [image_size, image_size, 3])
image = tf.image.random_flip_left_right(image)
# Brightness/saturation/constrast provides small gains .2%~.5% on cifar.
# image = tf.image.random_brightness(image, max_delta=63. / 255.)
# image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
# image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
image = tf.image.per_image_standardization(image)
example_queue = tf.RandomShuffleQueue(
capacity=16 * batch_size,
min_after_dequeue=8 * batch_size,
dtypes=[tf.float32, tf.int32],
shapes=[[image_size, image_size, depth], [1]])
else:
image = tf.image.resize_image_with_crop_or_pad(
image, image_size, image_size)
image = tf.image.per_image_standardization(image)
example_queue = tf.FIFOQueue(
3 * batch_size,
dtypes=[tf.float32, tf.int32],
shapes=[[image_size, image_size, depth], [1]])
num_threads = 1
example_enqueue_op = example_queue.enqueue([image, label])
tf.train.add_queue_runner(tf.train.queue_runner.QueueRunner(
example_queue, [example_enqueue_op] * num_threads))
# Read 'batch' labels + images from the example queue.
images, labels = example_queue.dequeue_many(batch_size)
labels = tf.reshape(labels, [batch_size])
# uncomment this for one-hot encoding
# labels = tf.reshape(labels, [batch_size, 1])
# indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
# labels = tf.sparse_to_dense(
# tf.concat(values=[indices, labels], axis=1),
# [batch_size, num_classes], 1.0, 0.0)
assert len(images.get_shape()) == 4
assert images.get_shape()[0] == batch_size
assert images.get_shape()[-1] == 3
assert len(labels.get_shape()) == 1
assert labels.get_shape()[0] == batch_size
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, labels
if __name__ == '__main__':
print('tf version = ', tf.__version__)
import numpy as np
# test
images, labels = build_input('cifar10', '/usr/stud/plapp/data/cifar-10-batches-bin/train/*',
batch_size=200, mode='train', subset_factor=1)
sess = tf.Session()
tf.train.start_queue_runners(sess)
res = sess.run(labels)
print('labels of first batch:', res)
print('labels of first batch:', np.min(res), np.max(res))
| 39.955556 | 112 | 0.650352 |
0f1e7f45a10807ec3dc169900161c27e66bc53cd | 12,718 | py | Python | clustering_tools/euclidean_em.py | IRT-SystemX/Poincare_Embedding | 1d047c15deb8b2b236498b0f063ecc347fe6f349 | [
"Unlicense"
] | null | null | null | clustering_tools/euclidean_em.py | IRT-SystemX/Poincare_Embedding | 1d047c15deb8b2b236498b0f063ecc347fe6f349 | [
"Unlicense"
] | null | null | null | clustering_tools/euclidean_em.py | IRT-SystemX/Poincare_Embedding | 1d047c15deb8b2b236498b0f063ecc347fe6f349 | [
"Unlicense"
] | null | null | null | import math
import cmath
import torch
import numpy as np
import tqdm
import sklearn.cluster as skc
import sklearn.mixture as skm
from function_tools import distribution_function as df
from function_tools import euclidean_function as ef
class GaussianMixtureSKLearn(skm.GaussianMixture):
def __init__(self, n_gaussian, init_mod="rand", verbose=True):
self.n_gaussian = n_gaussian
self._distance = ef.distance
super(GaussianMixtureSKLearn, self).__init__(n_components=n_gaussian, max_iter=3000)
def norm_ff(self, sigma):
return df.euclidean_norm_factor(sigma)
def fit(self, X, Y=None):
if(Y is not None):
N, M, D = X.size(0), Y.size(-1), X.size(-1)
Y = Y.float()/(Y.float().sum(-1, keepdim=True).expand_as(Y))
self.weights_ = Y.mean(0).numpy()
# print("w -> ",self.weights_)
means = ((X.unsqueeze(1).expand(N,M,D) * Y.unsqueeze(-1).expand(N,M,D)).sum(0)/Y.sum(0).unsqueeze(-1).expand(M,D))
self.means_2 = means.numpy()
# print("mean -> ", self.means_2.shape)
# print("mean[0] -> ", self.means_2[:,0])
self.N_k = Y.sum(0)
XmM = X.unsqueeze(1).expand(N,M,D)-means.unsqueeze(0).expand(N,M,D)
# print((XmM * XmM).sum(-1))
self.covariances_2 = (((XmM * XmM).sum(-1)) * Y).sum(0)/Y.sum(0)/30
# print((XmM * XmM).sum(-1).mean())
# print("cov ", self.covariances_2)
super(GaussianMixtureSKLearn, self).__init__(n_components=self.n_gaussian, covariance_type="spherical", precisions_init=(1/self.covariances_2).numpy(),weights_init=self.weights_/self.weights_.sum(), means_init=self.means_2, max_iter=5)
super(GaussianMixtureSKLearn, self).fit(X.numpy())
else:
super(GaussianMixtureSKLearn, self).fit(X.numpy())
# self._w = torch.Tensor(self.weights_)
# self._mu = torch.Tensor(self.means_2)
# self._sigma = torch.Tensor(self.covariances_)
# self._sigma = self.covariances_2/X.size(-1)
# print("sigma ", self._sigma)
# print("w ", self._w)
# print("mean[0] ", self.means_[:,0])
def get_pik(self, z):
return torch.Tensor(super(GaussianMixtureSKLearn, self).predict_proba(z.numpy()))
def probs(self, z):
return torch.Tensor(super(GaussianMixtureSKLearn, self).predict_proba(z.numpy()))
def predict(self, z):
return torch.Tensor(super(GaussianMixtureSKLearn, self).predict(z.numpy()))
class GMM(object):
def norm_ff(self, sigma):
return df.euclidean_norm_factor(sigma)
def __init__(self, n_gaussian, init_mod="rand", verbose=False, mod="full"):
self._n_g = n_gaussian
self._distance = ef.distance
self._verbose = verbose
self._init_mod = init_mod
self._started = False
self.mod = mod
def update_w(self, z, wik, g_index=-1):
# get omega mu
if(g_index > 0):
self._w[g_index] = wik[:, g_index].mean()
else:
self._w = wik.mean(0)
def update_mu(self, z, wik, lr_mu, tau_mu, g_index=-1, max_iter=50):
N, D, M = z.shape + (wik.shape[-1],)
if(g_index>0):
self._mu[g_index] = (wik[:, g_index].unsqueeze(-1).expand(N, D) * z).sum(0)/wik[:, g_index].sum()
else:
self._mu = (wik.unsqueeze(-1).expand(N, M, D) * z.unsqueeze(1).expand(N, M, D)).sum(0)/wik.sum(0).unsqueeze(-1).expand(M,D)
# print("sqdqsdf", self._mu.size())
def update_sigma(self, z, wik, g_index=-1):
N, D, M = z.shape + (self._mu.shape[0],)
N_k = wik.sum(0)
if(g_index>0):
self._sigma[:, g_index] = ((self._distance(z, self._mu[:,g_index].expand(N))**2) * wik[:, g_index]).sum()/wik[:, g_index].sum()
else:
dtm = self._distance(z.unsqueeze(1).expand(N,M,D), self._mu.unsqueeze(0).expand(N,M,D))
ZmMU = z.unsqueeze(1).expand(N,M,D) - self._mu.unsqueeze(0).expand(N,M,D)
sigma = []
# print("M size", self._mu.shape)
if(self.mod=="full"):
for i in range(M):
ZmMU_k = ZmMU[:,i,:]
wik_k = wik[:, i]
#.unsqueeze(-1).unsqueeze(-1).expand(N, D, D).double()
# .unsqueeze(1).double()
index_nz = wik_k>0
n_nz = index_nz.sum().item()
wik_k = wik_k[index_nz].unsqueeze(-1).unsqueeze(-1).expand(n_nz, D, D).double()
ZmMU_k = ZmMU_k[index_nz].unsqueeze(1).double()
# print(ZmMU_k.size())
ZmMU_k_dot = (ZmMU_k.transpose(-1,1).bmm(ZmMU_k) * wik_k).sum(0)
sigma.append((ZmMU_k_dot/(wik[:, i].sum().double())).unsqueeze(0))
self._sigma = torch.cat(sigma, 0)
elif(self.mod=="diag"):
g = (ZmMU**2 * wik.unsqueeze(-1).expand(N,M,D)).sum(0)/wik.unsqueeze(-1).expand(N,M,D).sum(0)
self._sigma = g.unsqueeze(-1).expand(M,D,D) * torch.eye(D).unsqueeze(0).expand(M,D,D)
elif(self.mod=="spherical"):
g = ((ZmMU**2).sum(-1) * wik.expand(N,M)).sum(0)/wik.expand(N,M).sum(0)
self._sigma = g.unsqueeze(-1).unsqueeze(-1).expand(M,D,D) * torch.eye(D).unsqueeze(0).expand(M,D,D)
# print(torch.symeig(self._sigma)[0])
# print(self._sigma.sum(-1).sum(-1),"\n")
# print(wik.mean(0))
# print(wik.mean(0).sum())
# self._sigma =((self._distance(z.unsqueeze(1).expand(N,M,D), self._mu.unsqueeze(0).expand(N,M,D)))**2 * wik).sum(0)/wik.sum(0)
# ((((X.unsqueeze(1).expand(N,M,D)-means.unsqueeze(0).expand(N,M,D))**2).sum(-1)) * Y).sum(0)/Y.sum(0)
def _expectation(self, z):
# N, M, D = z.size(0), self._mu.size(0), z.size(-1)
# z = z.unsqueeze(1).expand(N, M, D)
# mu = self._mu.unsqueeze(0).expand(N,)
# ZmM =
# pdf = df.gaussianPDF(z, self._mu, self._sigma, norm_func=self.norm_ff, distance=self._distance)
# p_pdf = pdf * self._w.unsqueeze(0).expand_as(pdf)
# wik = p_pdf/p_pdf.sum(1, keepdim=True).expand_as(pdf)
# return wik
return torch.exp(self.log_probs_cholesky(z))
def _maximization(self, z, wik, lr_mu=5e-1, tau_mu=5e-3, max_iter_bar=50):
self.update_w(z, wik)
self.update_mu(z, wik, lr_mu=lr_mu, tau_mu=tau_mu, max_iter=max_iter_bar)
self.update_sigma(z, wik)
def fit(self, z, max_iter=5, lr_mu=5e-3, tau_mu=5e-3, Y=None):
progress_bar = tqdm.trange(max_iter) if(self._verbose) else range(max_iter)
# if it is the first time function fit is called
if(not self._started):
self._d = z.size(-1)
self._mu = (torch.rand(self._n_g, self._d) -0.5)/self._d
self._sigma = torch.rand(self._n_g)/10 +0.2
self._w = torch.ones(self._n_g)/self._n_g
if(Y is not None):
# print("Y size ", Y.size())
wik = Y.float()/(Y.float().sum(-1, keepdim=True).expand_as(Y))
# print("wik size ", wik.size())
# print("wik", wik[0])
self._maximization(z, wik, lr_mu=lr_mu, tau_mu=1e-5)
# print("_sig ", self._sigma)
# print("_w", self._w)
return
else:
if(not self._started):
# using kmeans for initializing means
if(self._init_mod == "kmeans"):
if(self._verbose):
print("Initialize means using kmeans algorithm")
km = skc.KMeans(self._n_g)
km.fit(z.numpy())
self._mu = torch.Tensor(km.cluster_centers_)
if(self._verbose):
print("\t mu -> ", self._mu)
print("\t sigma -> ", self._sigma)
self._started = True
for epoch in progress_bar:
wik = self._expectation(z)
self._maximization(z, wik)
def get_parameters(self):
return self._w, self._mu, self._sigma
def get_pik(self, z):
N, D, M = z.shape + (self._mu.shape[0],)
pdf = df.gaussianPDF(z, self._mu, self._sigma, norm_func=self.norm_ff, distance=self._distance)
print("pdf mean", pdf[20])
p_pdf = pdf * self._w.unsqueeze(0).expand_as(pdf)
print("ppd",p_pdf.size())
if(p_pdf.sum(-1).min() == 0):
print("EXPECTATION : pdf.sum(-1) contain zero -> ",(p_pdf.sum(-1) == 0).sum())
#same if we set = 1
p_pdf[p_pdf.sum(-1) == 0] = 1e-8
wik = p_pdf/p_pdf.sum(-1, keepdim=True).expand_as(pdf)
# print("wik 1", wik.mean(1))
# print("wik 2", wik.mean(0))
# wik[torch.arange(len(wik)), wik.max(1)[1]] = 1
# wik = wik.long().float()
# print("wik 2", wik.mean(0))
return wik
def probs(self, z):
# # by default log probs since probs can be easily untracktable
# N, M, D = z.size(0), self._mu.size(0), z.size(-1)
# z = z.unsqueeze(1).expand(N, M, D)
# mu = self._mu.unsqueeze(0).expand(N,M,D)
# ZmM = ((mu - z)**2).sum(-1)
# ZmMmS = -(1/2) * ZmM * 1/self._sigma.unsqueeze(0).expand(N,M)
# nor = -(z.size(-1)/2) * (math.log(2 * math.pi) + torch.log(self._sigma))
# nor = nor.unsqueeze(0).expand(N,M)
# log_pdf = nor + ZmMmS
# log_prob = torch.log(self._w.unsqueeze(0).expand(N,M)) + log_pdf
# print("log prob ", log_prob[0])
# return log_prob
return self.log_probs_cholesky(z)
def log_probs_cholesky(self, z):
# by default log probs since probs can be easily untracktable
N, M, D = z.size(0), self._mu.size(0), z.size(-1)
inv = []
log_det_l = []
# cholesky_root = torch.cholesky(self._sigma)
# computing log det using cholesky decomposition
for i in range(self._mu.size(0)):
try:
cholesky_root = torch.cholesky(self._sigma[i])
except:
print("There is negative eigen value for cov mat "+str(i))
eigen_value, eigen_vector = torch.symeig(self._sigma[i], eigenvectors=True)
print("Rule if min(eig) > -1e-5 replacing by 0")
if(eigen_value.min()>-1e-5):
print("Negative minimum eigen value is ", eigen_value.min().item(), " replace by 0")
eigen_value[eigen_value<1e-15] = 1e-10
self._sigma[i] = eigen_vector.mm(torch.diag(eigen_value).mm(eigen_vector.t()))
eigen_value, eigen_vector = torch.symeig(self._sigma[i], eigenvectors=True)
cholesky_root = torch.cholesky(self._sigma[i])
else:
print("Negative minimum eigen value is ", eigen_value.min().item(), " exiting ")
quit()
log_det = cholesky_root.diag().log().sum()
inv.append(torch.cholesky_inverse(cholesky_root).unsqueeze(0))
log_det_l.append(log_det.unsqueeze(0))
# MxDxD
log_det = torch.cat(log_det_l, 0).float()
# MX1
inv_sig = torch.cat(inv, 0).float()
dtm = z.unsqueeze(1).expand(N,M,D) - self._mu.unsqueeze(0).expand(N,M,D)
dtm_mm = []
for i in range(M):
dtm_k = dtm[:,i,:].unsqueeze(1)
inv_sig_k = inv_sig[i,:,:].unsqueeze(0).expand(N, D, D)
exp_dist = dtm_k.bmm(inv_sig_k).bmm(dtm_k.transpose(-1,1)).squeeze(-1)
dtm_mm.append(exp_dist)
dtm_mm = torch.cat(dtm_mm, -1)
log_norm = N*math.log(2*math.pi) + log_det
log_pdf = -0.5 * (log_norm.unsqueeze(0).expand(N, M) + dtm_mm)
weighted_pdf = torch.log(self._w.unsqueeze(0).expand(N,M)) + log_pdf
return weighted_pdf
# we must compute the
def GMMFull(n_g):
return GMM(n_g, mod="full")
def GMMDiag(n_g):
return GMM(n_g, mod="diag")
def GMMSpherical(n_g):
return GMM(n_g, mod="spherical")
def test():
# we take thre clusters sampled from normal
mu, sigma = torch.rand(2) - 0.5 , torch.rand(1)/5
x1 = torch.rand(100,2)*sigma + mu.unsqueeze(0).expand(100,2)
mu, sigma = torch.rand(2) - 0.5 , torch.rand(1)/5
x2 = torch.rand(100,2)*sigma + mu.unsqueeze(0).expand(100,2)
mu, sigma = torch.rand(2) - 0.5 , torch.rand(1)/5
x3 = torch.rand(100,2)*sigma + mu.unsqueeze(0).expand(100,2)
X = torch.cat((x1, x2, x3), 0)
EM = EuclideanEM(2, 3, init_mod="kmeans")
EM.fit(X) | 42.535117 | 249 | 0.553153 |
38f0ef6c8155251ea901ce10648ccca9881fbd62 | 2,405 | py | Python | utils/coco_utils.py | seb5666/image_captions | 27148935f39441eef71d20a9dc5b4ac3b5283d02 | [
"MIT"
] | null | null | null | utils/coco_utils.py | seb5666/image_captions | 27148935f39441eef71d20a9dc5b4ac3b5283d02 | [
"MIT"
] | null | null | null | utils/coco_utils.py | seb5666/image_captions | 27148935f39441eef71d20a9dc5b4ac3b5283d02 | [
"MIT"
] | null | null | null |
"""Util functions for handling caption data"""
import os, json
import numpy as np
import h5py
def load_coco_data(base_dir='/home/ubuntu/COCO/dataset/COCO_captioning/',
max_train=None):
data = {}
# loading train&val captions, and train&val image index
caption_file = os.path.join(base_dir, 'coco2014_captions.h5')
with h5py.File(caption_file, 'r') as f: # keys are: train_captions, val_captions, train_image_idxs, val_image_idxs
for k, v in f.items():
data[k] = np.asarray(v)
train_feat_file = os.path.join(base_dir, 'train2014_v3_pool_3.npy')
data['train_features'] = np.load(train_feat_file)
val_feat_file = os.path.join(base_dir, 'val2014_v3_pool_3.npy')
data['val_features'] = np.load(val_feat_file)
dict_file = os.path.join(base_dir, 'coco2014_vocab.json')
with open(dict_file, 'r') as f:
dict_data = json.load(f)
for k, v in dict_data.items():
data[k] = v
# convert string to int for the keys
data['idx_to_word'] = {int(k):v for k, v in data['idx_to_word'].items()}
train_url_file = os.path.join(base_dir, 'train2014_urls.txt')
with open(train_url_file, 'r') as f:
train_urls = np.asarray([line.strip() for line in f])
data['train_urls'] = train_urls
val_url_file = os.path.join(base_dir, 'val2014_urls.txt')
with open(val_url_file, 'r') as f:
val_urls = np.asarray([line.strip() for line in f])
data['val_urls'] = val_urls
# Maybe subsample the training data
if max_train is not None:
num_train = data['train_captions'].shape[0]
mask = np.random.randint(num_train, size=max_train)
data['train_captions'] = data['train_captions'][mask]
data['train_image_idx'] = data['train_image_idx'][mask]
return data
def decode_captions(captions, idx_to_word):
stop_words = ['<NULL>', '<START>', '<END>']
words = []
for id in captions:
word = idx_to_word[id]
if word not in stop_words:
words.append(word)
if word == "<END>":
break
return words
def sample_coco_minibatch(data, batch_size=100, split='train'):
split_size = data['%s_captions' % split].shape[0]
mask = np.random.choice(split_size, batch_size)
captions = data['%s_captions' % split][mask]
image_idxs = data['%s_image_idx' % split][mask]
image_features = data['%s_features' % split][image_idxs]
urls = data['%s_urls' % split][image_idxs]
return captions, image_features, urls
| 32.5 | 116 | 0.68815 |
c49c451048af11c27e55d74bec3137b0ffa46795 | 4,383 | py | Python | contrib/seeds/generate-seeds.py | Hallabois/hallacoin | 0132696c14a16150ef6aaa96b657ac73888bb51d | [
"MIT"
] | 4 | 2021-05-22T18:03:56.000Z | 2022-02-08T20:51:30.000Z | contrib/seeds/generate-seeds.py | Hallabois/hallacoin | 0132696c14a16150ef6aaa96b657ac73888bb51d | [
"MIT"
] | 2 | 2021-06-02T15:51:12.000Z | 2021-07-13T15:14:45.000Z | contrib/seeds/generate-seeds.py | Hallabois/hallacoin | 0132696c14a16150ef6aaa96b657ac73888bb51d | [
"MIT"
] | 1 | 2021-07-07T21:38:49.000Z | 2021-07-07T21:38:49.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the Hallacoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 9666)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 19335)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.307143 | 99 | 0.583619 |
ee507003b082fd026b87d786dded5f1205ae6966 | 3,264 | py | Python | large_cohort/data_utils.py | HanGuo97/federated | 7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9 | [
"BSD-3-Clause"
] | 330 | 2020-09-14T23:10:16.000Z | 2022-03-30T19:49:19.000Z | large_cohort/data_utils.py | HanGuo97/federated | 7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9 | [
"BSD-3-Clause"
] | 52 | 2020-09-30T06:10:51.000Z | 2022-03-31T19:25:16.000Z | large_cohort/data_utils.py | HanGuo97/federated | 7e64bfe86bb606fad2ea7bc2a0f8ebdb565546f9 | [
"BSD-3-Clause"
] | 119 | 2020-09-24T04:54:46.000Z | 2022-03-31T21:46:57.000Z | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data utilities for large cohort experiments."""
import functools
import math
from typing import Callable, List, Optional, Tuple
import numpy as np
import tensorflow_federated as tff
ClientDataType = tff.simulation.datasets.ClientData
def create_train_validation_split(
client_data: ClientDataType,
seed: int = 1) -> Tuple[ClientDataType, ClientDataType]:
"""Partitions client data into training and validation data."""
num_clients = len(client_data.client_ids)
# We sort the client ids to guarantee a fixed ordering before shuffling.
client_ids = sorted(client_data.client_ids)
np.random.RandomState(seed=seed).shuffle(client_ids)
# After shuffling, we perform an 80/20 split into train and validation ids.
num_train_clients = int(np.ceil(0.8 * num_clients))
train_ids = client_ids[:num_train_clients]
validation_ids = client_ids[num_train_clients:]
# We now create `tff.simulation.datasets.ClientData` objects from these ids.
train_data = client_data.from_clients_and_tf_fn(
train_ids, client_data.serializable_dataset_fn)
validation_data = client_data.from_clients_and_tf_fn(
validation_ids, client_data.serializable_dataset_fn)
return train_data, validation_data
def create_sampling_fn(
*,
seed: int,
client_ids: List[str],
clients_per_round: int,
rounds_to_double_cohort: Optional[int] = None
) -> Callable[[int], List[str]]:
"""Creates deterministic, uniform sampling function of client ids."""
client_sampling_fn = tff.simulation.build_uniform_sampling_fn(
sample_range=client_ids, replace=False, random_seed=seed)
if rounds_to_double_cohort is None:
return functools.partial(client_sampling_fn, size=clients_per_round)
elif not (isinstance(rounds_to_double_cohort, int) and
rounds_to_double_cohort > 0):
raise ValueError('rounds_to_double_cohort must be `None` or a positive '
f'integer. Got {rounds_to_double_cohort}')
# Wrap `tff.simulation.build_uniform_sampling_fn` such that every
# `rounds_to_double_cohort`, the `size` argument doubles and we create
# additional samples and concatenate them.
def doubling_train_client_sampling_fn(round_num) -> List[str]:
num_doublings = math.floor(round_num / rounds_to_double_cohort)
clients_to_sample = clients_per_round * int(math.pow(2, num_doublings))
if clients_to_sample > len(client_ids):
# Return the entire population if we've doubled past the entire population
# size.
return client_ids
return client_sampling_fn(round_num, size=clients_to_sample) # pytype: disable=wrong-keyword-args # gen-stub-imports
return doubling_train_client_sampling_fn
| 41.316456 | 122 | 0.765012 |
97f2e17ced158a32932124900e926c4e86d8201f | 1,738 | py | Python | chrome/test/kasko/kasko_integration_test.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | chrome/test/kasko/kasko_integration_test.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | chrome/test/kasko/kasko_integration_test.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A Windows-only end-to-end integration test for Kasko, Chrome and Crashpad.
This test ensures that the interface between Kasko and Chrome and Crashpad works
as expected. The test causes Kasko to set certain crash keys and invoke a crash
report, which is in turn delivered to a locally hosted test crash server. If the
crash report is received intact with the expected crash keys then all is well.
Note that this test only works against non-component Release and Official builds
of Chrome with Chrome branding, and attempting to use it with anything else will
most likely lead to constant failures.
Typical usage (assuming in root 'src' directory):
- generate project files with the following GYP variables:
branding=Chrome syzyasan=1 win_z7=0 chromium_win_pch=0
- build the release Chrome binaries:
ninja -C out\Release chrome.exe chromedriver.exe
- run the test:
python chrome/test/kasko/kasko_integration_test.py
"""
import logging
import os
import sys
# Bring in the Kasko module.
KASKO_DIR = os.path.join(os.path.dirname(__file__), 'py')
sys.path.append(KASKO_DIR)
import kasko
_LOGGER = logging.getLogger(os.path.basename(__file__))
def Main():
try:
options = kasko.config.ParseCommandLine()
kasko.integration_test.RunTest(
options,
'chrome://kasko/send-report',
10,
{'kasko-set-crash-key-value-impl': 'SetCrashKeyValueImpl'})
_LOGGER.info('Test passed successfully!')
except Exception as e:
_LOGGER.error(e)
return 1
if __name__ == '__main__':
sys.exit(Main())
| 29.965517 | 80 | 0.748562 |
0071ded629d4fa96eccbbed7dbe304c7cfa6fe8f | 2,620 | py | Python | core/tests/test_polypod/test_specs/test_contexts_spec.py | gregmbi/polyaxon | 8f24089fa9cb5df28fc7b70aec27d6d23ee81e8d | [
"Apache-2.0"
] | null | null | null | core/tests/test_polypod/test_specs/test_contexts_spec.py | gregmbi/polyaxon | 8f24089fa9cb5df28fc7b70aec27d6d23ee81e8d | [
"Apache-2.0"
] | null | null | null | core/tests/test_polypod/test_specs/test_contexts_spec.py | gregmbi/polyaxon | 8f24089fa9cb5df28fc7b70aec27d6d23ee81e8d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tests.utils import BaseTestCase
from polyaxon.polyaxonfile.specs import kinds
from polyaxon.polyflow import V1CompiledOperation, V1Plugins, V1RunKind
from polyaxon.polypod.specs.contexts import PluginsContextsSpec
@pytest.mark.polypod_mark
class TestPluginsContextsSpec(BaseTestCase):
def test_get_from_spec(self):
compiled_operation = V1CompiledOperation.read(
{
"version": 1.05,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"syncStatuses": False,
},
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"},},
}
)
spec = PluginsContextsSpec.from_config(compiled_operation.plugins)
assert spec.auth is False
assert spec.docker is False
assert spec.shm is False
assert spec.collect_artifacts is False
assert spec.collect_logs is False
assert spec.sync_statuses is False
def test_get_from_env(self):
config = V1Plugins(
auth=True,
shm=True,
docker=True,
collect_artifacts=True,
collect_logs=True,
sync_statuses=True,
)
spec = PluginsContextsSpec.from_config(config)
assert spec.auth is True
assert spec.docker is True
assert spec.shm is True
assert spec.collect_artifacts is True
assert spec.collect_logs is True
assert spec.sync_statuses is True
def test_get_from_empty_env(self):
spec = PluginsContextsSpec.from_config(V1Plugins())
assert spec.auth is True
assert spec.docker is False
assert spec.shm is False
assert spec.collect_artifacts is True
assert spec.collect_logs is True
assert spec.sync_statuses is True
| 34.025974 | 80 | 0.64542 |
081c915a3d4857ca323ee4bbd2b9457dce902fe3 | 4,327 | py | Python | canopy/openapi/models/get_study_download_url_query_result.py | CanopySimulations/canopy-python | 9ec37e674e65d6fbef0402ac0c612c163d55631e | [
"MIT"
] | null | null | null | canopy/openapi/models/get_study_download_url_query_result.py | CanopySimulations/canopy-python | 9ec37e674e65d6fbef0402ac0c612c163d55631e | [
"MIT"
] | 1 | 2022-01-31T10:18:08.000Z | 2022-01-31T10:18:08.000Z | canopy/openapi/models/get_study_download_url_query_result.py | CanopySimulations/canopy-python | 9ec37e674e65d6fbef0402ac0c612c163d55631e | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Canopy.Api
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from canopy.openapi.configuration import Configuration
class GetStudyDownloadUrlQueryResult(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_signature': 'str',
'expiry': 'str'
}
attribute_map = {
'access_signature': 'accessSignature',
'expiry': 'expiry'
}
def __init__(self, access_signature=None, expiry=None, local_vars_configuration=None): # noqa: E501
"""GetStudyDownloadUrlQueryResult - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_signature = None
self._expiry = None
self.discriminator = None
if access_signature is not None:
self.access_signature = access_signature
if expiry is not None:
self.expiry = expiry
@property
def access_signature(self):
"""Gets the access_signature of this GetStudyDownloadUrlQueryResult. # noqa: E501
:return: The access_signature of this GetStudyDownloadUrlQueryResult. # noqa: E501
:rtype: str
"""
return self._access_signature
@access_signature.setter
def access_signature(self, access_signature):
"""Sets the access_signature of this GetStudyDownloadUrlQueryResult.
:param access_signature: The access_signature of this GetStudyDownloadUrlQueryResult. # noqa: E501
:type: str
"""
self._access_signature = access_signature
@property
def expiry(self):
"""Gets the expiry of this GetStudyDownloadUrlQueryResult. # noqa: E501
:return: The expiry of this GetStudyDownloadUrlQueryResult. # noqa: E501
:rtype: str
"""
return self._expiry
@expiry.setter
def expiry(self, expiry):
"""Sets the expiry of this GetStudyDownloadUrlQueryResult.
:param expiry: The expiry of this GetStudyDownloadUrlQueryResult. # noqa: E501
:type: str
"""
self._expiry = expiry
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetStudyDownloadUrlQueryResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, GetStudyDownloadUrlQueryResult):
return True
return self.to_dict() != other.to_dict()
| 29.435374 | 124 | 0.609429 |
da2fa09dbd350721a909daba2a10d4a24546fb1c | 26,577 | py | Python | example/myshop/migrations/i18n_commodity/0001_initial.py | andyzsf/django-shop | 42a2463c39fe8e6136e104ee017c77f2222a6746 | [
"BSD-3-Clause"
] | 1 | 2019-03-22T15:20:18.000Z | 2019-03-22T15:20:18.000Z | example/myshop/migrations/i18n_commodity/0001_initial.py | andyzsf/django-shop | 42a2463c39fe8e6136e104ee017c77f2222a6746 | [
"BSD-3-Clause"
] | 13 | 2020-06-05T18:37:42.000Z | 2022-03-11T23:22:46.000Z | example/myshop/migrations/i18n_commodity/0001_initial.py | andyzsf/django-shop | 42a2463c39fe8e6136e104ee017c77f2222a6746 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-28 13:20
from __future__ import unicode_literals
import cms.models.fields
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_fsm
import djangocms_text_ckeditor.fields
import filer.fields.image
import jsonfield.fields
import shop.money.fields
import shop.payment.defaults
import shop.shipping.defaults
import shop_stripe.payment
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0013_urlconfrevision'),
('contenttypes', '0002_remove_content_type_name'),
('email_auth', '0002_auto_20160327_1119'),
('filer', '0004_auto_20160328_1434'),
]
operations = [
migrations.CreateModel(
name='BillingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.SmallIntegerField(help_text='Priority for using this address')),
('addressee', models.CharField(max_length=50, verbose_name='Addressee')),
('supplement', models.CharField(blank=True, max_length=50, null=True, verbose_name='Supplement')),
('street', models.CharField(max_length=50, verbose_name='Street')),
('zip_code', models.CharField(max_length=10, verbose_name='ZIP')),
('location', models.CharField(max_length=50, verbose_name='Location')),
('country', models.CharField(choices=[('AF', 'Afghanistan'), ('AX', 'Aland Islands'), ('AL', 'Albania'), ('DZ', 'Algeria'), ('AS', 'American Samoa'), ('AD', 'Andorra'), ('AO', 'Angola'), ('AI', 'Anguilla'), ('AQ', 'Antarctica'), ('AG', 'Antigua And Barbuda'), ('AR', 'Argentina'), ('AM', 'Armenia'), ('AW', 'Aruba'), ('AU', 'Australia'), ('AT', 'Austria'), ('AZ', 'Azerbaijan'), ('BS', 'Bahamas'), ('BH', 'Bahrain'), ('BD', 'Bangladesh'), ('BB', 'Barbados'), ('BY', 'Belarus'), ('BE', 'Belgium'), ('BZ', 'Belize'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BT', 'Bhutan'), ('BO', 'Bolivia, Plurinational State Of'), ('BQ', 'Bonaire, Saint Eustatius And Saba'), ('BA', 'Bosnia And Herzegovina'), ('BW', 'Botswana'), ('BV', 'Bouvet Island'), ('BR', 'Brazil'), ('IO', 'British Indian Ocean Territory'), ('BN', 'Brunei Darussalam'), ('BG', 'Bulgaria'), ('BF', 'Burkina Faso'), ('BI', 'Burundi'), ('KH', 'Cambodia'), ('CM', 'Cameroon'), ('CA', 'Canada'), ('CV', 'Cape Verde'), ('KY', 'Cayman Islands'), ('CF', 'Central African Republic'), ('TD', 'Chad'), ('CL', 'Chile'), ('CN', 'China'), ('CX', 'Christmas Island'), ('CC', 'Cocos (Keeling) Islands'), ('CO', 'Colombia'), ('KM', 'Comoros'), ('CG', 'Congo'), ('CD', 'Congo, The Democratic Republic Of The'), ('CK', 'Cook Islands'), ('CR', 'Costa Rica'), ('HR', 'Croatia'), ('CU', 'Cuba'), ('CW', 'Curacao'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DK', 'Denmark'), ('DJ', 'Djibouti'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('EC', 'Ecuador'), ('EG', 'Egypt'), ('SV', 'El Salvador'), ('GQ', 'Equatorial Guinea'), ('ER', 'Eritrea'), ('EE', 'Estonia'), ('ET', 'Ethiopia'), ('FK', 'Falkland Islands (Malvinas)'), ('FO', 'Faroe Islands'), ('FJ', 'Fiji'), ('FI', 'Finland'), ('FR', 'France'), ('GF', 'French Guiana'), ('PF', 'French Polynesia'), ('TF', 'French Southern Territories'), ('GA', 'Gabon'), ('GM', 'Gambia'), ('DE', 'Germany'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GR', 'Greece'), ('GL', 'Greenland'), ('GD', 'Grenada'), ('GP', 'Guadeloupe'), ('GU', 'Guam'), ('GT', 'Guatemala'), ('GG', 'Guernsey'), ('GN', 'Guinea'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HT', 'Haiti'), ('HM', 'Heard Island and McDonald Islands'), ('VA', 'Holy See (Vatican City State)'), ('HN', 'Honduras'), ('HK', 'Hong Kong'), ('HU', 'Hungary'), ('IS', 'Iceland'), ('IN', 'India'), ('ID', 'Indonesia'), ('IR', 'Iran, Islamic Republic Of'), ('IQ', 'Iraq'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IT', 'Italy'), ('CI', 'Ivory Coast'), ('JM', 'Jamaica'), ('JP', 'Japan'), ('JE', 'Jersey'), ('JO', 'Jordan'), ('KZ', 'Kazakhstan'), ('KE', 'Kenya'), ('KI', 'Kiribati'), ('KP', "Korea, Democratic People's Republic Of"), ('KR', 'Korea, Republic Of'), ('KS', 'Kosovo'), ('KW', 'Kuwait'), ('KG', 'Kyrgyzstan'), ('LA', "Lao People's Democratic Republic"), ('LV', 'Latvia'), ('LB', 'Lebanon'), ('LS', 'Lesotho'), ('LR', 'Liberia'), ('LY', 'Libyan Arab Jamahiriya'), ('LI', 'Liechtenstein'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('MO', 'Macao'), ('MK', 'Macedonia'), ('MG', 'Madagascar'), ('MW', 'Malawi'), ('MY', 'Malaysia'), ('MV', 'Maldives'), ('ML', 'Mali'), ('ML', 'Malta'), ('MH', 'Marshall Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MU', 'Mauritius'), ('YT', 'Mayotte'), ('MX', 'Mexico'), ('FM', 'Micronesia'), ('MD', 'Moldova'), ('MC', 'Monaco'), ('MN', 'Mongolia'), ('ME', 'Montenegro'), ('MS', 'Montserrat'), ('MA', 'Morocco'), ('MZ', 'Mozambique'), ('MM', 'Myanmar'), ('NA', 'Namibia'), ('NR', 'Nauru'), ('NP', 'Nepal'), ('NL', 'Netherlands'), ('AN', 'Netherlands Antilles'), ('NC', 'New Caledonia'), ('NZ', 'New Zealand'), ('NI', 'Nicaragua'), ('NE', 'Niger'), ('NG', 'Nigeria'), ('NU', 'Niue'), ('NF', 'Norfolk Island'), ('MP', 'Northern Mariana Islands'), ('NO', 'Norway'), ('OM', 'Oman'), ('PK', 'Pakistan'), ('PW', 'Palau'), ('PS', 'Palestinian Territory, Occupied'), ('PA', 'Panama'), ('PG', 'Papua New Guinea'), ('PY', 'Paraguay'), ('PE', 'Peru'), ('PH', 'Philippines'), ('PN', 'Pitcairn'), ('PL', 'Poland'), ('PT', 'Portugal'), ('PR', 'Puerto Rico'), ('QA', 'Qatar'), ('RE', 'Reunion'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('BL', 'Saint Barthelemy'), ('SH', 'Saint Helena, Ascension & Tristan Da Cunha'), ('KN', 'Saint Kitts and Nevis'), ('LC', 'Saint Lucia'), ('MF', 'Saint Martin (French Part)'), ('PM', 'Saint Pierre and Miquelon'), ('VC', 'Saint Vincent And The Grenadines'), ('WS', 'Samoa'), ('SM', 'San Marino'), ('ST', 'Sao Tome And Principe'), ('SA', 'Saudi Arabia'), ('SN', 'Senegal'), ('RS', 'Serbia'), ('SC', 'Seychelles'), ('SL', 'Sierra Leone'), ('SG', 'Singapore'), ('SX', 'Sint Maarten (Dutch Part)'), ('SK', 'Slovakia'), ('SI', 'Slovenia'), ('SB', 'Solomon Islands'), ('SO', 'Somalia'), ('ZA', 'South Africa'), ('GS', 'South Georgia And The South Sandwich Islands'), ('ES', 'Spain'), ('LK', 'Sri Lanka'), ('SD', 'Sudan'), ('SR', 'Suriname'), ('SJ', 'Svalbard And Jan Mayen'), ('SZ', 'Swaziland'), ('SE', 'Sweden'), ('CH', 'Switzerland'), ('SY', 'Syrian Arab Republic'), ('TW', 'Taiwan'), ('TJ', 'Tajikistan'), ('TZ', 'Tanzania'), ('TH', 'Thailand'), ('TL', 'Timor-Leste'), ('TG', 'Togo'), ('TK', 'Tokelau'), ('TO', 'Tonga'), ('TT', 'Trinidad and Tobago'), ('TN', 'Tunisia'), ('TR', 'Turkey'), ('TM', 'Turkmenistan'), ('TC', 'Turks And Caicos Islands'), ('TV', 'Tuvalu'), ('UG', 'Uganda'), ('UA', 'Ukraine'), ('AE', 'United Arab Emirates'), ('GB', 'United Kingdom'), ('US', 'United States'), ('UM', 'United States Minor Outlying Islands'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VU', 'Vanuatu'), ('VE', 'Venezuela, Bolivarian Republic Of'), ('VN', 'Viet Nam'), ('VG', 'Virgin Islands, British'), ('VI', 'Virgin Islands, U.S.'), ('WF', 'Wallis and Futuna'), ('EH', 'Western Sahara'), ('YE', 'Yemen'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')], max_length=3, verbose_name='Country')),
],
options={
'verbose_name': 'Billing Address',
'verbose_name_plural': 'Billing Addresses',
},
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('extra', jsonfield.fields.JSONField(default={}, verbose_name='Arbitrary information for this cart')),
('billing_address', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='myshop.BillingAddress')),
],
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('extra', jsonfield.fields.JSONField(default={}, verbose_name='Arbitrary information for this cart item')),
('quantity', models.IntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='myshop.Cart')),
],
),
migrations.CreateModel(
name='Commodity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('active', models.BooleanField(default=True, help_text='Is this product publicly visible.', verbose_name='Active')),
('product_code', models.CharField(max_length=255, unique=True, verbose_name='Product code')),
('unit_price', shop.money.fields.MoneyField(decimal_places=3, help_text='Net price for this product', verbose_name='Unit price')),
('order', models.PositiveIntegerField(db_index=True, verbose_name='Sort by')),
],
options={
'ordering': ('order',),
'verbose_name': 'Commodity',
'verbose_name_plural': 'Commodities',
},
),
migrations.CreateModel(
name='CommodityTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('translated_product_name', models.CharField(max_length=255, verbose_name='Product Name')),
('slug', models.SlugField(verbose_name='Slug')),
('description', djangocms_text_ckeditor.fields.HTMLField(help_text='Description for the list view of products.', verbose_name='Description')),
('master', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='myshop.Commodity')),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('recognized', models.PositiveSmallIntegerField(choices=[(0, 'Unrecognized'), (1, 'Guest'), (2, 'Registered')], default=0, help_text='Designates the state the customer is recognized as.', verbose_name='Recognized as')),
('salutation', models.CharField(choices=[('mrs', 'Mrs.'), ('mr', 'Mr.'), ('na', '(n/a)')], max_length=5, verbose_name='Salutation')),
('last_access', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Last accessed')),
('extra', jsonfield.fields.JSONField(default={}, editable=False, verbose_name='Extra information about this customer')),
('number', models.PositiveIntegerField(default=None, null=True, unique=True, verbose_name='Customer Number')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', django_fsm.FSMField(default='new', max_length=50, protected=True, verbose_name='Status')),
('currency', models.CharField(editable=False, help_text='Currency in which this order was concluded', max_length=7)),
('_subtotal', models.DecimalField(decimal_places=2, max_digits=30, verbose_name='Subtotal')),
('_total', models.DecimalField(decimal_places=2, max_digits=30, verbose_name='Total')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('extra', jsonfield.fields.JSONField(default={}, help_text='Arbitrary information for this order object on the moment of purchase.', verbose_name='Extra fields')),
('stored_request', jsonfield.fields.JSONField(default={}, help_text='Parts of the Request objects on the moment of purchase.')),
('number', models.PositiveIntegerField(default=None, null=True, unique=True, verbose_name='Order Number')),
('shipping_address_text', models.TextField(blank=True, editable=False, help_text='Shipping address at the moment of purchase.', null=True, verbose_name='Shipping Address')),
('billing_address_text', models.TextField(blank=True, editable=False, help_text='Billing address at the moment of purchase.', null=True, verbose_name='Billing Address')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='myshop.Customer', verbose_name='Customer')),
],
options={
'verbose_name': 'Order',
'verbose_name_plural': 'Orders',
},
bases=(shop.payment.defaults.PayInAdvanceWorkflowMixin, shop.shipping.defaults.CommissionGoodsWorkflowMixin, shop_stripe.payment.OrderWorkflowMixin, models.Model),
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(blank=True, help_text='Product name at the moment of purchase.', max_length=255, null=True, verbose_name='Product name')),
('product_code', models.CharField(blank=True, help_text='Product code at the moment of purchase.', max_length=255, null=True, verbose_name='Product code')),
('_unit_price', models.DecimalField(decimal_places=2, help_text='Products unit price at the moment of purchase.', max_digits=30, null=True, verbose_name='Unit price')),
('_line_total', models.DecimalField(decimal_places=2, help_text='Line total on the invoice at the moment of purchase.', max_digits=30, null=True, verbose_name='Line Total')),
('extra', jsonfield.fields.JSONField(default={}, help_text='Arbitrary information for this order item', verbose_name='Extra fields')),
('quantity', models.IntegerField(verbose_name='Ordered quantity')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='myshop.Order', verbose_name='Order')),
('product', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='myshop.Commodity', verbose_name='Product')),
],
),
migrations.CreateModel(
name='OrderPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', shop.money.fields.MoneyField(help_text='How much was paid with this particular transfer.', verbose_name='Amount paid')),
('transaction_id', models.CharField(help_text="The transaction processor's reference", max_length=255, verbose_name='Transaction ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Received at')),
('payment_method', models.CharField(help_text='The payment backend used to process the purchase', max_length=50, verbose_name='Payment method')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myshop.Order', verbose_name='Order')),
],
options={
'verbose_name': 'Order payment',
'verbose_name_plural': 'Order payments',
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.SmallIntegerField(default=0)),
('image', filer.fields.image.FilerImageField(on_delete=django.db.models.deletion.CASCADE, to='filer.Image')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myshop.Commodity')),
],
),
migrations.CreateModel(
name='ProductPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms.Page')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myshop.Commodity')),
],
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.SmallIntegerField(help_text='Priority for using this address')),
('addressee', models.CharField(max_length=50, verbose_name='Addressee')),
('supplement', models.CharField(blank=True, max_length=50, null=True, verbose_name='Supplement')),
('street', models.CharField(max_length=50, verbose_name='Street')),
('zip_code', models.CharField(max_length=10, verbose_name='ZIP')),
('location', models.CharField(max_length=50, verbose_name='Location')),
('country', models.CharField(choices=[('AF', 'Afghanistan'), ('AX', 'Aland Islands'), ('AL', 'Albania'), ('DZ', 'Algeria'), ('AS', 'American Samoa'), ('AD', 'Andorra'), ('AO', 'Angola'), ('AI', 'Anguilla'), ('AQ', 'Antarctica'), ('AG', 'Antigua And Barbuda'), ('AR', 'Argentina'), ('AM', 'Armenia'), ('AW', 'Aruba'), ('AU', 'Australia'), ('AT', 'Austria'), ('AZ', 'Azerbaijan'), ('BS', 'Bahamas'), ('BH', 'Bahrain'), ('BD', 'Bangladesh'), ('BB', 'Barbados'), ('BY', 'Belarus'), ('BE', 'Belgium'), ('BZ', 'Belize'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BT', 'Bhutan'), ('BO', 'Bolivia, Plurinational State Of'), ('BQ', 'Bonaire, Saint Eustatius And Saba'), ('BA', 'Bosnia And Herzegovina'), ('BW', 'Botswana'), ('BV', 'Bouvet Island'), ('BR', 'Brazil'), ('IO', 'British Indian Ocean Territory'), ('BN', 'Brunei Darussalam'), ('BG', 'Bulgaria'), ('BF', 'Burkina Faso'), ('BI', 'Burundi'), ('KH', 'Cambodia'), ('CM', 'Cameroon'), ('CA', 'Canada'), ('CV', 'Cape Verde'), ('KY', 'Cayman Islands'), ('CF', 'Central African Republic'), ('TD', 'Chad'), ('CL', 'Chile'), ('CN', 'China'), ('CX', 'Christmas Island'), ('CC', 'Cocos (Keeling) Islands'), ('CO', 'Colombia'), ('KM', 'Comoros'), ('CG', 'Congo'), ('CD', 'Congo, The Democratic Republic Of The'), ('CK', 'Cook Islands'), ('CR', 'Costa Rica'), ('HR', 'Croatia'), ('CU', 'Cuba'), ('CW', 'Curacao'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DK', 'Denmark'), ('DJ', 'Djibouti'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('EC', 'Ecuador'), ('EG', 'Egypt'), ('SV', 'El Salvador'), ('GQ', 'Equatorial Guinea'), ('ER', 'Eritrea'), ('EE', 'Estonia'), ('ET', 'Ethiopia'), ('FK', 'Falkland Islands (Malvinas)'), ('FO', 'Faroe Islands'), ('FJ', 'Fiji'), ('FI', 'Finland'), ('FR', 'France'), ('GF', 'French Guiana'), ('PF', 'French Polynesia'), ('TF', 'French Southern Territories'), ('GA', 'Gabon'), ('GM', 'Gambia'), ('DE', 'Germany'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GR', 'Greece'), ('GL', 'Greenland'), ('GD', 'Grenada'), ('GP', 'Guadeloupe'), ('GU', 'Guam'), ('GT', 'Guatemala'), ('GG', 'Guernsey'), ('GN', 'Guinea'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HT', 'Haiti'), ('HM', 'Heard Island and McDonald Islands'), ('VA', 'Holy See (Vatican City State)'), ('HN', 'Honduras'), ('HK', 'Hong Kong'), ('HU', 'Hungary'), ('IS', 'Iceland'), ('IN', 'India'), ('ID', 'Indonesia'), ('IR', 'Iran, Islamic Republic Of'), ('IQ', 'Iraq'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IT', 'Italy'), ('CI', 'Ivory Coast'), ('JM', 'Jamaica'), ('JP', 'Japan'), ('JE', 'Jersey'), ('JO', 'Jordan'), ('KZ', 'Kazakhstan'), ('KE', 'Kenya'), ('KI', 'Kiribati'), ('KP', "Korea, Democratic People's Republic Of"), ('KR', 'Korea, Republic Of'), ('KS', 'Kosovo'), ('KW', 'Kuwait'), ('KG', 'Kyrgyzstan'), ('LA', "Lao People's Democratic Republic"), ('LV', 'Latvia'), ('LB', 'Lebanon'), ('LS', 'Lesotho'), ('LR', 'Liberia'), ('LY', 'Libyan Arab Jamahiriya'), ('LI', 'Liechtenstein'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('MO', 'Macao'), ('MK', 'Macedonia'), ('MG', 'Madagascar'), ('MW', 'Malawi'), ('MY', 'Malaysia'), ('MV', 'Maldives'), ('ML', 'Mali'), ('ML', 'Malta'), ('MH', 'Marshall Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MU', 'Mauritius'), ('YT', 'Mayotte'), ('MX', 'Mexico'), ('FM', 'Micronesia'), ('MD', 'Moldova'), ('MC', 'Monaco'), ('MN', 'Mongolia'), ('ME', 'Montenegro'), ('MS', 'Montserrat'), ('MA', 'Morocco'), ('MZ', 'Mozambique'), ('MM', 'Myanmar'), ('NA', 'Namibia'), ('NR', 'Nauru'), ('NP', 'Nepal'), ('NL', 'Netherlands'), ('AN', 'Netherlands Antilles'), ('NC', 'New Caledonia'), ('NZ', 'New Zealand'), ('NI', 'Nicaragua'), ('NE', 'Niger'), ('NG', 'Nigeria'), ('NU', 'Niue'), ('NF', 'Norfolk Island'), ('MP', 'Northern Mariana Islands'), ('NO', 'Norway'), ('OM', 'Oman'), ('PK', 'Pakistan'), ('PW', 'Palau'), ('PS', 'Palestinian Territory, Occupied'), ('PA', 'Panama'), ('PG', 'Papua New Guinea'), ('PY', 'Paraguay'), ('PE', 'Peru'), ('PH', 'Philippines'), ('PN', 'Pitcairn'), ('PL', 'Poland'), ('PT', 'Portugal'), ('PR', 'Puerto Rico'), ('QA', 'Qatar'), ('RE', 'Reunion'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('BL', 'Saint Barthelemy'), ('SH', 'Saint Helena, Ascension & Tristan Da Cunha'), ('KN', 'Saint Kitts and Nevis'), ('LC', 'Saint Lucia'), ('MF', 'Saint Martin (French Part)'), ('PM', 'Saint Pierre and Miquelon'), ('VC', 'Saint Vincent And The Grenadines'), ('WS', 'Samoa'), ('SM', 'San Marino'), ('ST', 'Sao Tome And Principe'), ('SA', 'Saudi Arabia'), ('SN', 'Senegal'), ('RS', 'Serbia'), ('SC', 'Seychelles'), ('SL', 'Sierra Leone'), ('SG', 'Singapore'), ('SX', 'Sint Maarten (Dutch Part)'), ('SK', 'Slovakia'), ('SI', 'Slovenia'), ('SB', 'Solomon Islands'), ('SO', 'Somalia'), ('ZA', 'South Africa'), ('GS', 'South Georgia And The South Sandwich Islands'), ('ES', 'Spain'), ('LK', 'Sri Lanka'), ('SD', 'Sudan'), ('SR', 'Suriname'), ('SJ', 'Svalbard And Jan Mayen'), ('SZ', 'Swaziland'), ('SE', 'Sweden'), ('CH', 'Switzerland'), ('SY', 'Syrian Arab Republic'), ('TW', 'Taiwan'), ('TJ', 'Tajikistan'), ('TZ', 'Tanzania'), ('TH', 'Thailand'), ('TL', 'Timor-Leste'), ('TG', 'Togo'), ('TK', 'Tokelau'), ('TO', 'Tonga'), ('TT', 'Trinidad and Tobago'), ('TN', 'Tunisia'), ('TR', 'Turkey'), ('TM', 'Turkmenistan'), ('TC', 'Turks And Caicos Islands'), ('TV', 'Tuvalu'), ('UG', 'Uganda'), ('UA', 'Ukraine'), ('AE', 'United Arab Emirates'), ('GB', 'United Kingdom'), ('US', 'United States'), ('UM', 'United States Minor Outlying Islands'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VU', 'Vanuatu'), ('VE', 'Venezuela, Bolivarian Republic Of'), ('VN', 'Viet Nam'), ('VG', 'Virgin Islands, British'), ('VI', 'Virgin Islands, U.S.'), ('WF', 'Wallis and Futuna'), ('EH', 'Western Sahara'), ('YE', 'Yemen'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')], max_length=3, verbose_name='Country')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myshop.Customer')),
],
options={
'verbose_name': 'Shipping Address',
'verbose_name_plural': 'Shipping Addresses',
},
),
migrations.AddField(
model_name='commodity',
name='cms_pages',
field=models.ManyToManyField(help_text='Choose list view this product shall appear on.', through='myshop.ProductPage', to='cms.Page'),
),
migrations.AddField(
model_name='commodity',
name='placeholder',
field=cms.models.fields.PlaceholderField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, slotname='Commodity Details', to='cms.Placeholder'),
),
migrations.AddField(
model_name='commodity',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_myshop.commodity_set+', to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='commodity',
name='sample_image',
field=filer.fields.image.FilerImageField(on_delete=django.db.models.deletion.CASCADE, to='filer.Image'),
),
migrations.AddField(
model_name='cartitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myshop.Commodity'),
),
migrations.AddField(
model_name='cart',
name='customer',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='cart', to='myshop.Customer', verbose_name='Customer'),
),
migrations.AddField(
model_name='cart',
name='shipping_address',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='myshop.ShippingAddress'),
),
migrations.AddField(
model_name='billingaddress',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myshop.Customer'),
),
migrations.AlterUniqueTogether(
name='commoditytranslation',
unique_together=set([('language_code', 'master')]),
),
]
| 110.7375 | 5,814 | 0.591376 |
fc229cf3bd3f850e85b9cd7c7280d934501e7c44 | 25,091 | py | Python | tests/admin_views/models.py | sergeykolosov/django | 03049fb8d96ccd1f1ed0285486103542de42faba | [
"PSF-2.0",
"BSD-3-Clause"
] | 12 | 2018-06-30T15:20:10.000Z | 2020-10-20T02:15:00.000Z | tests/admin_views/models.py | sergeykolosov/django | 03049fb8d96ccd1f1ed0285486103542de42faba | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2018-01-08T08:13:33.000Z | 2018-01-08T08:13:33.000Z | tests/admin_views/models.py | sergeykolosov/django | 03049fb8d96ccd1f1ed0285486103542de42faba | [
"PSF-2.0",
"BSD-3-Clause"
] | 5 | 2018-07-17T05:41:04.000Z | 2020-07-31T12:30:46.000Z | import datetime
import os
import tempfile
import uuid
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
class Section(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@property
def name_property(self):
"""
A property that simply returns the name. Used to test #24461
"""
return self.name
class Article(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section, models.CASCADE, null=True, blank=True)
another_section = models.ForeignKey(Section, models.CASCADE, null=True, blank=True, related_name='+')
sub_section = models.ForeignKey(Section, models.SET_NULL, null=True, blank=True, related_name='+')
def __str__(self):
return self.title
def model_year(self):
return self.date.year
model_year.admin_order_field = 'date'
model_year.short_description = ''
def model_year_reversed(self):
return self.date.year
model_year_reversed.admin_order_field = '-date'
model_year_reversed.short_description = ''
class Book(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name='¿Name?')
def __str__(self):
return self.name
class Promo(models.Model):
name = models.CharField(max_length=100, verbose_name='¿Name?')
book = models.ForeignKey(Book, models.CASCADE)
author = models.ForeignKey(User, models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.name
class Chapter(models.Model):
title = models.CharField(max_length=100, verbose_name='¿Title?')
content = models.TextField()
book = models.ForeignKey(Book, models.CASCADE)
def __str__(self):
return self.title
class Meta:
# Use a utf-8 bytestring to ensure it works (see #11710)
verbose_name = '¿Chapter?'
class ChapterXtra1(models.Model):
chap = models.OneToOneField(Chapter, models.CASCADE, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
guest_author = models.ForeignKey(User, models.SET_NULL, blank=True, null=True)
def __str__(self):
return '¿Xtra1: %s' % self.xtra
class ChapterXtra2(models.Model):
chap = models.OneToOneField(Chapter, models.CASCADE, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra2: %s' % self.xtra
class RowLevelChangePermissionModel(models.Model):
name = models.CharField(max_length=100, blank=True)
class CustomArticle(models.Model):
content = models.TextField()
date = models.DateTimeField()
class ModelWithStringPrimaryKey(models.Model):
string_pk = models.CharField(max_length=255, primary_key=True)
def __str__(self):
return self.string_pk
def get_absolute_url(self):
return '/dummy/%s/' % self.string_pk
class Color(models.Model):
value = models.CharField(max_length=10)
warm = models.BooleanField(default=False)
def __str__(self):
return self.value
# we replicate Color to register with another ModelAdmin
class Color2(Color):
class Meta:
proxy = True
class Thing(models.Model):
title = models.CharField(max_length=20)
color = models.ForeignKey(Color, models.CASCADE, limit_choices_to={'warm': True})
pub_date = models.DateField(blank=True, null=True)
def __str__(self):
return self.title
class Actor(models.Model):
name = models.CharField(max_length=50)
age = models.IntegerField()
title = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return self.name
class Inquisition(models.Model):
expected = models.BooleanField(default=False)
leader = models.ForeignKey(Actor, models.CASCADE)
country = models.CharField(max_length=20)
def __str__(self):
return "by %s from %s" % (self.leader, self.country)
class Sketch(models.Model):
title = models.CharField(max_length=100)
inquisition = models.ForeignKey(
Inquisition,
models.CASCADE,
limit_choices_to={
'leader__name': 'Palin',
'leader__age': 27,
'expected': False,
},
)
defendant0 = models.ForeignKey(
Actor,
models.CASCADE,
limit_choices_to={'title__isnull': False},
related_name='as_defendant0',
)
defendant1 = models.ForeignKey(
Actor,
models.CASCADE,
limit_choices_to={'title__isnull': True},
related_name='as_defendant1',
)
def __str__(self):
return self.title
def today_callable_dict():
return {"last_action__gte": datetime.datetime.today()}
def today_callable_q():
return models.Q(last_action__gte=datetime.datetime.today())
class Character(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
def __str__(self):
return self.username
class StumpJoke(models.Model):
variation = models.CharField(max_length=100)
most_recently_fooled = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=today_callable_dict,
related_name="+",
)
has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+")
def __str__(self):
return self.variation
class Fabric(models.Model):
NG_CHOICES = (
('Textured', (
('x', 'Horizontal'),
('y', 'Vertical'),
)),
('plain', 'Smooth'),
)
surface = models.CharField(max_length=20, choices=NG_CHOICES)
class Person(models.Model):
GENDER_CHOICES = (
(1, "Male"),
(2, "Female"),
)
name = models.CharField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
age = models.IntegerField(default=21)
alive = models.BooleanField(default=True)
def __str__(self):
return self.name
class Persona(models.Model):
"""
A simple persona associated with accounts, to test inlining of related
accounts which inherit from a common accounts class.
"""
name = models.CharField(blank=False, max_length=80)
def __str__(self):
return self.name
class Account(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, models.CASCADE, related_name="accounts")
servicename = 'generic service'
def __str__(self):
return "%s: %s" % (self.servicename, self.username)
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = 'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = 'bar'
class Subscriber(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __str__(self):
return "%s (%s)" % (self.name, self.email)
class ExternalSubscriber(Subscriber):
pass
class OldSubscriber(Subscriber):
pass
class Media(models.Model):
name = models.CharField(max_length=60)
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class Vodcast(Media):
media = models.OneToOneField(Media, models.CASCADE, primary_key=True, parent_link=True)
released = models.BooleanField(default=False)
class Parent(models.Model):
name = models.CharField(max_length=128)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE, editable=False)
name = models.CharField(max_length=30, blank=True)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
class EmptyModel(models.Model):
def __str__(self):
return "Primary key = %s" % self.id
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class Gallery(models.Model):
name = models.CharField(max_length=100)
class Picture(models.Model):
name = models.CharField(max_length=100)
image = models.FileField(storage=temp_storage, upload_to='test_upload')
gallery = models.ForeignKey(Gallery, models.CASCADE, related_name="pictures")
class Language(models.Model):
iso = models.CharField(max_length=5, primary_key=True)
name = models.CharField(max_length=50)
english_name = models.CharField(max_length=50)
shortlist = models.BooleanField(default=False)
class Meta:
ordering = ('iso',)
# a base class for Recommender and Recommendation
class Title(models.Model):
pass
class TitleTranslation(models.Model):
title = models.ForeignKey(Title, models.CASCADE)
text = models.CharField(max_length=100)
class Recommender(Title):
pass
class Recommendation(Title):
the_recommender = models.ForeignKey(Recommender, models.CASCADE)
class Collector(models.Model):
name = models.CharField(max_length=100)
class Widget(models.Model):
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class DooHickey(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Grommet(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Whatsit(models.Model):
index = models.IntegerField(primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
class Doodad(models.Model):
name = models.CharField(max_length=100)
class FancyDoodad(Doodad):
owner = models.ForeignKey(Collector, models.CASCADE)
expensive = models.BooleanField(default=True)
class Category(models.Model):
collector = models.ForeignKey(Collector, models.CASCADE)
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
def __str__(self):
return '%s:o%s' % (self.id, self.order)
def link_posted_default():
return datetime.date.today() - datetime.timedelta(days=7)
class Link(models.Model):
posted = models.DateField(default=link_posted_default)
url = models.URLField()
post = models.ForeignKey("Post", models.CASCADE)
readonly_link_content = models.TextField()
class PrePopulatedPost(models.Model):
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
slug = models.SlugField()
class PrePopulatedSubPost(models.Model):
post = models.ForeignKey(PrePopulatedPost, models.CASCADE)
subtitle = models.CharField(max_length=100)
subslug = models.SlugField()
class Post(models.Model):
title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)")
content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)")
readonly_content = models.TextField()
posted = models.DateField(
default=datetime.date.today,
help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)"
)
public = models.NullBooleanField()
def awesomeness_level(self):
return "Very awesome."
# Proxy model to test overridden fields attrs on Post model so as not to
# interfere with other tests.
class FieldOverridePost(Post):
class Meta:
proxy = True
class Gadget(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Villain(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class SuperVillain(Villain):
pass
class FunkyTag(models.Model):
"Because we all know there's only one real use case for GFKs."
name = models.CharField(max_length=25)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.name
class Plot(models.Model):
name = models.CharField(max_length=100)
team_leader = models.ForeignKey(Villain, models.CASCADE, related_name='lead_plots')
contact = models.ForeignKey(Villain, models.CASCADE, related_name='contact_plots')
tags = GenericRelation(FunkyTag)
def __str__(self):
return self.name
class PlotDetails(models.Model):
details = models.CharField(max_length=100)
plot = models.OneToOneField(Plot, models.CASCADE, null=True, blank=True)
def __str__(self):
return self.details
class PlotProxy(Plot):
class Meta:
proxy = True
class SecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
villain = models.ForeignKey(Villain, models.CASCADE)
def __str__(self):
return self.location
class SuperSecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
supervillain = models.ForeignKey(SuperVillain, models.CASCADE)
def __str__(self):
return self.location
class Bookmark(models.Model):
name = models.CharField(max_length=60)
tag = GenericRelation(FunkyTag, related_query_name='bookmark')
def __str__(self):
return self.name
class CyclicOne(models.Model):
name = models.CharField(max_length=25)
two = models.ForeignKey('CyclicTwo', models.CASCADE)
def __str__(self):
return self.name
class CyclicTwo(models.Model):
name = models.CharField(max_length=25)
one = models.ForeignKey(CyclicOne, models.CASCADE)
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Pizza(models.Model):
name = models.CharField(max_length=20)
toppings = models.ManyToManyField('Topping', related_name='pizzas')
# Pizza's ModelAdmin has readonly_fields = ['toppings'].
# toppings is editable for this model's admin.
class ReadablePizza(Pizza):
class Meta:
proxy = True
class Album(models.Model):
owner = models.ForeignKey(User, models.SET_NULL, null=True, blank=True)
title = models.CharField(max_length=30)
class Employee(Person):
code = models.CharField(max_length=20)
class WorkHour(models.Model):
datum = models.DateField()
employee = models.ForeignKey(Employee, models.CASCADE)
class Question(models.Model):
question = models.CharField(max_length=20)
posted = models.DateField(default=datetime.date.today)
expires = models.DateTimeField(null=True, blank=True)
related_questions = models.ManyToManyField('self')
def __str__(self):
return self.question
class Answer(models.Model):
question = models.ForeignKey(Question, models.PROTECT)
answer = models.CharField(max_length=20)
def __str__(self):
return self.answer
class Answer2(Answer):
class Meta:
proxy = True
class Reservation(models.Model):
start_date = models.DateTimeField()
price = models.IntegerField()
DRIVER_CHOICES = (
('bill', 'Bill G'),
('steve', 'Steve J'),
)
RESTAURANT_CHOICES = (
('indian', 'A Taste of India'),
('thai', 'Thai Pography'),
('pizza', 'Pizza Mama'),
)
class FoodDelivery(models.Model):
reference = models.CharField(max_length=100)
driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True)
restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True)
class Meta:
unique_together = (("driver", "restaurant"),)
class CoverLetter(models.Model):
author = models.CharField(max_length=30)
date_written = models.DateField(null=True, blank=True)
def __str__(self):
return self.author
class Paper(models.Model):
title = models.CharField(max_length=30)
author = models.CharField(max_length=30, blank=True, null=True)
class ShortMessage(models.Model):
content = models.CharField(max_length=140)
timestamp = models.DateTimeField(null=True, blank=True)
class Telegram(models.Model):
title = models.CharField(max_length=30)
date_sent = models.DateField(null=True, blank=True)
def __str__(self):
return self.title
class Story(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class OtherStory(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class ComplexSortedPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
is_employee = models.NullBooleanField()
class PluggableSearchPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
class PrePopulatedPostLargeSlug(models.Model):
"""
Regression test for #15938: a large max_length for the slugfield must not
be localized in prepopulated_fields_js.html or it might end up breaking
the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000)
"""
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
# `db_index=False` because MySQL cannot index large CharField (#21196).
slug = models.SlugField(max_length=1000, db_index=False)
class AdminOrderedField(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedModelMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
def some_order(self):
return self.order
some_order.admin_order_field = 'order'
class AdminOrderedAdminMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedCallable(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class Report(models.Model):
title = models.CharField(max_length=100)
def __str__(self):
return self.title
class MainPrepopulated(models.Model):
name = models.CharField(max_length=100)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(blank=True)
slug2 = models.SlugField(blank=True)
slug3 = models.SlugField(blank=True, allow_unicode=True)
class RelatedPrepopulated(models.Model):
parent = models.ForeignKey(MainPrepopulated, models.CASCADE)
name = models.CharField(max_length=75)
fk = models.ForeignKey('self', models.CASCADE, blank=True, null=True)
m2m = models.ManyToManyField('self', blank=True)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(max_length=50)
slug2 = models.SlugField(max_length=60)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #16819.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
class UndeletableObject(models.Model):
"""
Model whose show_delete in admin change_view has been disabled
Refs #10057.
"""
name = models.CharField(max_length=255)
class UnchangeableObject(models.Model):
"""
Model whose change_view is disabled in admin
Refs #20640.
"""
class UserMessenger(models.Model):
"""
Dummy class for testing message_user functions on ModelAdmin
"""
class Simple(models.Model):
"""
Simple model with nothing on it for use in testing
"""
class Choice(models.Model):
choice = models.IntegerField(
blank=True, null=True,
choices=((1, 'Yes'), (0, 'No'), (None, 'No opinion')),
)
class ParentWithDependentChildren(models.Model):
"""
Issue #20522
Model where the validation of child foreign-key relationships depends
on validation of the parent
"""
some_required_info = models.PositiveIntegerField()
family_name = models.CharField(max_length=255, blank=False)
class DependentChild(models.Model):
"""
Issue #20522
Model that depends on validation of the parent class for one of its
fields to validate during clean
"""
parent = models.ForeignKey(ParentWithDependentChildren, models.CASCADE)
family_name = models.CharField(max_length=255)
class _Manager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(pk__gt=1)
class FilteredManager(models.Model):
def __str__(self):
return "PK=%d" % self.pk
pk_gt_1 = _Manager()
objects = models.Manager()
class EmptyModelVisible(models.Model):
""" See ticket #11277. """
class EmptyModelHidden(models.Model):
""" See ticket #11277. """
class EmptyModelMixin(models.Model):
""" See ticket #11277. """
class State(models.Model):
name = models.CharField(max_length=100)
class City(models.Model):
state = models.ForeignKey(State, models.CASCADE)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Restaurant(models.Model):
city = models.ForeignKey(City, models.CASCADE)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Worker(models.Model):
work_at = models.ForeignKey(Restaurant, models.CASCADE)
name = models.CharField(max_length=50)
surname = models.CharField(max_length=50)
# Models for #23329
class ReferencedByParent(models.Model):
name = models.CharField(max_length=20, unique=True)
class ParentWithFK(models.Model):
fk = models.ForeignKey(
ReferencedByParent,
models.CASCADE,
to_field='name',
related_name='hidden+',
)
class ChildOfReferer(ParentWithFK):
pass
# Models for #23431
class InlineReferer(models.Model):
pass
class ReferencedByInline(models.Model):
name = models.CharField(max_length=20, unique=True)
class InlineReference(models.Model):
referer = models.ForeignKey(InlineReferer, models.CASCADE)
fk = models.ForeignKey(
ReferencedByInline,
models.CASCADE,
to_field='name',
related_name='hidden+',
)
class Recipe(models.Model):
rname = models.CharField(max_length=20, unique=True)
class Ingredient(models.Model):
iname = models.CharField(max_length=20, unique=True)
recipes = models.ManyToManyField(Recipe, through='RecipeIngredient')
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(Ingredient, models.CASCADE, to_field='iname')
recipe = models.ForeignKey(Recipe, models.CASCADE, to_field='rname')
# Model for #23839
class NotReferenced(models.Model):
# Don't point any FK at this model.
pass
# Models for #23934
class ExplicitlyProvidedPK(models.Model):
name = models.IntegerField(primary_key=True)
class ImplicitlyGeneratedPK(models.Model):
name = models.IntegerField(unique=True)
# Models for #25622
class ReferencedByGenRel(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class GenRelReference(models.Model):
references = GenericRelation(ReferencedByGenRel)
class ParentWithUUIDPK(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=100)
def __str__(self):
return str(self.id)
class RelatedWithUUIDPKModel(models.Model):
parent = models.ForeignKey(ParentWithUUIDPK, on_delete=models.SET_NULL, null=True, blank=True)
class Author(models.Model):
pass
class Authorship(models.Model):
book = models.ForeignKey(Book, models.CASCADE)
author = models.ForeignKey(Author, models.CASCADE)
| 25.76078 | 110 | 0.699135 |
8026fea7bae4741b8f34c1cbadd7de2de3c00549 | 733 | py | Python | setup.py | luxunator/menupy | 83094b3d8dd30dcde4927399121e901346cfee3c | [
"MIT"
] | 16 | 2019-04-30T23:36:01.000Z | 2021-03-12T11:23:35.000Z | setup.py | luxunator/menupy | 83094b3d8dd30dcde4927399121e901346cfee3c | [
"MIT"
] | 2 | 2019-05-13T20:51:54.000Z | 2019-05-17T04:53:18.000Z | setup.py | luxunator/menupy | 83094b3d8dd30dcde4927399121e901346cfee3c | [
"MIT"
] | 1 | 2019-05-13T21:14:45.000Z | 2019-05-13T21:14:45.000Z | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="menupy",
version="1.0.2",
author="luxunator",
author_email="luxunator@pm.me",
url="https://github.com/luxunator/menupy",
description="Interactive Python Menu",
packages=['menupy'],
keywords='menu menupy curses',
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
]
)
| 28.192308 | 54 | 0.641201 |
eb61feac2fc8aa03cffd587e385db5a0466afb57 | 8,166 | py | Python | sso/verification/tests/test_view.py | uktrade/sso | f4fb527cfe12955c079251031261f2407956bad3 | [
"MIT"
] | null | null | null | sso/verification/tests/test_view.py | uktrade/sso | f4fb527cfe12955c079251031261f2407956bad3 | [
"MIT"
] | null | null | null | sso/verification/tests/test_view.py | uktrade/sso | f4fb527cfe12955c079251031261f2407956bad3 | [
"MIT"
] | null | null | null | from datetime import date, datetime, timedelta
import dateutil.parser
import pytest
from allauth.account.models import EmailAddress
from django.core.cache import cache
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.timezone import now
from freezegun import freeze_time
from pytz import UTC
from rest_framework.test import APIClient
from sso.user.tests.factories import UserFactory
from sso.verification import helpers, models
from sso.verification.tests.factories import VerificationCodeFactory
@pytest.fixture
def api_client():
return APIClient()
@freeze_time("2018-01-14 12:00:01")
@pytest.mark.django_db
def test_regenerate_code(api_client):
verification_code = VerificationCodeFactory()
old_code = verification_code.code
response = api_client.post(
reverse('api:verification-code-regenerate'),
{
'email': verification_code.user.email,
},
format='json',
)
assert response.status_code == 200
verification_code.refresh_from_db()
assert verification_code.created == datetime(2018, 1, 14, 12, 0, 1, tzinfo=UTC)
assert verification_code.code != old_code
new_code = response.json()
assert new_code['code'] == verification_code.code
expiration_date = dateutil.parser.parse(new_code['expiration_date'])
assert expiration_date == verification_code.expiration_date
@pytest.mark.django_db
def test_regenerate_code_verified_code(api_client):
verification_code = VerificationCodeFactory()
original_code = verification_code.code
original_date_verified = date(2018, 1, 14)
verification_code.date_verified = original_date_verified
verification_code.save()
response = api_client.post(
reverse('api:verification-code-regenerate'),
{
'email': verification_code.user.email,
},
format='json',
)
assert response.status_code == 400
verification_code.refresh_from_db()
assert verification_code.date_verified == original_date_verified
assert verification_code.code == original_code
@pytest.mark.django_db
def test_regenerate_code_no_user(api_client):
assert models.VerificationCode.objects.count() == 0
response = api_client.post(
reverse('api:verification-code-regenerate'),
{
'email': 'donot@exist.com',
},
format='json',
)
assert response.status_code == 404
assert models.VerificationCode.objects.count() == 0
@freeze_time("2018-01-14 12:00:01")
@pytest.mark.django_db
def test_verify_verification_code(api_client):
verification_code = VerificationCodeFactory()
assert verification_code.code
url = reverse('api:verification-code-verify')
response = api_client.post(
url,
{
'code': verification_code.code,
'email': verification_code.user.email,
},
format='json',
)
verification_code.refresh_from_db()
assert response.status_code == 200
assert response.cookies['debug_sso_session_cookie']
assert response.cookies['sso_display_logged_in'].value == 'true'
assert verification_code.date_verified == date(2018, 1, 14)
assert (
EmailAddress.objects.filter(
user=verification_code.user,
verified=True,
email=verification_code.user.email,
primary=True,
).count()
== 1
)
@pytest.mark.django_db
def test_verify_verification_code_invalid(api_client):
verification_code = VerificationCodeFactory()
assert verification_code.code
url = reverse('api:verification-code-verify')
response = api_client.post(
url,
{
'code': '12345',
'email': verification_code.user.email,
},
format='json',
)
assert response.status_code == 400
assert verification_code.date_verified is None
@pytest.mark.django_db
def test_verify_verification_code_expired(api_client):
with freeze_time(now() - timedelta(days=100)):
verification_code = VerificationCodeFactory()
url = reverse('api:verification-code-verify')
response = api_client.post(url, {'code': '12345', 'email': verification_code.user.email}, format='json')
assert response.status_code == 400
assert verification_code.date_verified is None
@pytest.mark.django_db
def test_verify_verification_code_verified(api_client):
with freeze_time(now() - timedelta(days=100)):
verification_code = VerificationCodeFactory(date_verified=date(2021, 12, 29))
url = reverse('api:verification-code-verify')
response = api_client.post(url, {'code': '12345', 'email': verification_code.user.email}, format='json')
assert response.status_code == 400
assert verification_code.date_verified == date(2021, 12, 29)
@pytest.mark.django_db
def test_verify_no_verification_code(api_client):
user = UserFactory()
api_client.force_authenticate(user=user)
url = reverse('api:verification-code-verify')
response = api_client.post(
url,
{
'code': 'my-name-is-jeff',
'email': user.email,
},
format='json',
)
assert response.status_code == 404
@pytest.mark.django_db
def test_verify_verification_code_limit_exceeded(api_client):
cache.clear()
verification_code = VerificationCodeFactory()
url = reverse('api:verification-code-verify')
invalid_code = '1234'
assert verification_code.code
for i in range(13):
response = api_client.post(
url,
{
'code': invalid_code,
'email': verification_code.user.email,
},
format='json',
)
if i < 12:
assert response.status_code == 400
else:
assert response.status_code == 403
@pytest.mark.django_db
def test_verify_verification_code_with_uidb64_and_token(api_client):
cache.clear()
verification_code = VerificationCodeFactory()
uidb64 = urlsafe_base64_encode(force_bytes(verification_code.user.pk))
token = helpers.verification_token.make_token(verification_code.user)
url = reverse('api:verification-code-verify')
response = api_client.post(
url,
{
'code': verification_code.code,
'uidb64': uidb64,
'token': token,
},
format='json',
)
assert response.json()['email'] == verification_code.user.email
assert response.status_code == 200
@pytest.mark.django_db
def test_verify_verification_code_with_token_missing(api_client):
cache.clear()
verification_code = VerificationCodeFactory()
uidb64 = urlsafe_base64_encode(force_bytes(verification_code.user.pk))
url = reverse('api:verification-code-verify')
response = api_client.post(
url,
{
'code': verification_code.code,
'uidb64': uidb64,
},
format='json',
)
assert response.status_code == 404
@pytest.mark.django_db
def test_verify_verification_code_with_wrong_uidb64(api_client):
cache.clear()
verification_code = VerificationCodeFactory()
uidb64 = 'aBcDe'
token = helpers.verification_token.make_token(verification_code.user)
url = reverse('api:verification-code-verify')
response = api_client.post(
url,
{
'code': verification_code.code,
'uidb64': uidb64,
'token': token,
},
format='json',
)
assert response.status_code == 404
@pytest.mark.django_db
def test_verify_verification_code_with_wrong_token(api_client):
cache.clear()
verification_code = VerificationCodeFactory()
uidb64 = urlsafe_base64_encode(force_bytes(verification_code.user.pk))
token = '12345'
url = reverse('api:verification-code-verify')
response = api_client.post(
url,
{
'code': verification_code.code,
'uidb64': uidb64,
'token': token,
},
format='json',
)
assert response.status_code == 404
| 28.256055 | 108 | 0.679035 |
c80d59ef061c8986c4c2aabe1b4e816f84d74850 | 1,679 | py | Python | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/common/types/value.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/common/types/value.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/common/types/value.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.common',
marshal='google.ads.googleads.v8',
manifest={
'Value',
},
)
class Value(proto.Message):
r"""A generic data container.
Attributes:
boolean_value (bool):
A boolean.
int64_value (int):
An int64.
float_value (float):
A float.
double_value (float):
A double.
string_value (str):
A string.
"""
boolean_value = proto.Field(
proto.BOOL,
number=1,
oneof='value',
)
int64_value = proto.Field(
proto.INT64,
number=2,
oneof='value',
)
float_value = proto.Field(
proto.FLOAT,
number=3,
oneof='value',
)
double_value = proto.Field(
proto.DOUBLE,
number=4,
oneof='value',
)
string_value = proto.Field(
proto.STRING,
number=5,
oneof='value',
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 23.319444 | 74 | 0.605122 |
974073117af14a224c8f5b7fbd41f265a20b2003 | 5,575 | py | Python | free_subnets.py | NixM0nk3y/net.subnet.ipv4.analyzer | a8b23b3261392f76e423cb4fbcebd3338969e842 | [
"Apache-2.0"
] | null | null | null | free_subnets.py | NixM0nk3y/net.subnet.ipv4.analyzer | a8b23b3261392f76e423cb4fbcebd3338969e842 | [
"Apache-2.0"
] | null | null | null | free_subnets.py | NixM0nk3y/net.subnet.ipv4.analyzer | a8b23b3261392f76e423cb4fbcebd3338969e842 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
__description__ = 'ipv4 network address analyzer. prints all unassigned ip blocks'
__author__ = 'Paul Komurka pawlex@gmail.com'
__version__ = '0.1'
__date__ = '2017/09/27'
import ipaddress
import itertools
import operator
import bisect
_subnets_config="subnets.txt" ## list of all the defined subnets in CIDR format (127.0.0.1/8)
_networks_config="networks.txt" ## list of all define networks in CIDR format delimited by newline.
# biggest subnet to chunk free space into
# script breaks if free subnets are larger than this
MASK_SIZE=8
def File2Strings(filename):
try:
f = open(filename, 'r')
except:
return None
try:
return [line.rstrip('\n') for line in f.readlines()]
except:
return None
finally:
f.close()
#
#
def ipstr2int(ip):
return int(ipaddress.ip_address(str(ip)))
def ipint2str(ip):
return str(ipaddress.ip_address(ip))
def expandNetwork(network):
_ = ipaddress.ip_network(str(network),False)
_ = _.__iter__()
return [int(x) for x in _]
#
def Main():
_defined_networks = File2Strings(_networks_config);
_defined_subnets = File2Strings(_subnets_config);
_ip_aggregate = []
_subnet_ip_aggregate = []
# expand the list of hosts within all defined networks.
# Make 2 lists: all ips and all ip's within defined subnets.
for net in _defined_networks:
_ip_aggregate += expandNetwork(net)
for subnet in _defined_subnets:
_subnet_ip_aggregate += expandNetwork(subnet)
# Make a list of the orphaned IP addresses. (not belonging to any assigned
# subnet)
_ip_orphans = []
for ip_address in _ip_aggregate:
if ip_address not in _subnet_ip_aggregate:
#print "%s" % ip_address
_ip_orphans.append(ip_address)
#
#
## Make sequential lists of the leftovers.
_ = []
for k, g in itertools.groupby(enumerate(_ip_orphans), lambda i_x: i_x[0]-i_x[1]):
#print map(operator.itemgetter(1), g)
_.append([ipint2str(y) for y in map(operator.itemgetter(1), g)])
_ip_orphans = _
##
##
## Now comes the fun part. Make the largest aligned subnets out of the left overs.
_unaligned = []
_aligned = []
print()
for i in _ip_orphans:
if(checkSubnetListForAlignment(i)):
_aligned.append(i)
else:
_unaligned.append(i)
#
#
print("Available IP subnets")
list(map(printSubnetSummary, _aligned))
## Handle the unaligned subnets. Keep iterating throught the list until we
## have made the largest subnets possible.
for u in _unaligned:
_largest = getInterval(len(u))
while(len(u)):
# check TOP down
if( checkSubnetListForAlignment(u[0:_largest]) ):
_sub = u[0:_largest]
printSubnetSummary( _sub )
u = [x for x in u if x not in _sub]
# reset largest to max size based on length
_largest = getInterval(len(u))
# check bottom up
elif( checkSubnetListForAlignment(u[(len(u)-_largest):]) ):
_sub = u[(len(u)-_largest):]
printSubnetSummary(_sub)
u = [x for x in u if x not in _sub]
_largest = getInterval(len(u))
else:
# did not find a solution.
# check the next smaller subnet block.
_largest = getInterval(_largest-1)
#
def getLastOctet(x):
return x.split(".")[3]
def checkSubnetListForAlignment(i):
""" takes an array of hosts """
""" checks to see if the first host and the number of hosts jive """
if( int(getLastOctet(i[0])) % len(i) == 0):
return True
else:
return False
#
def checkHostForAlignment(ip,hosts=None,bits=None):
_numHosts = None
_subdict = getSubnetLutDict(MASK_SIZE)
if(hosts is None and bits is None):
return False
if(hosts is None):
_numHosts = list(_subdict.keys())[list(_subdict.values()).index(bits)]
else:
if(hosts in list(_subdict.keys())):
_numHosts = hosts
else:
return False
#
if( int(getLastOctet(ip)) % _numHosts == 0 ):
return True
else:
return False
#
#
def hosts2bits(hosts):
_subdict = getSubnetLutDict(MASK_SIZE)
#print(hosts)
#print(_subdict)
if( hosts in _subdict ):
return _subdict[hosts]
else:
return 32
#
def bits2hosts(bits):
_subdict = getSubnetLutDict(MASK_SIZE)
if( bits in list(_subdict.values()) ):
return list(_subdict.keys())[list(_subdict.values()).index(bits)]
else:
return 32
#
def printSubnetSummary(i):
print("START:%16s\tEND:%16s\tSIZE:%3d (/%d)"%( i[0],i[-1],len(i), hosts2bits(len(i))))
def getSubnetLutDict(downto):
""" creates a subnet lut 32:downto entries with corresponding number of hosts as the value"""
lut = {}
num_hosts=1; #entrie 0 = 1 host (32 bits)
entries = list(range(downto,32+1,1))
entries.sort()
entries.reverse()
for i in entries:
lut.update({num_hosts:i})
num_hosts = num_hosts * 2
return lut
#
def getInterval(n):
""" gets the closest match (as defined as a key in our dict
rounded down """
_list = [x for x in getSubnetLutDict(MASK_SIZE)]
_list.sort()
return _list[bisect.bisect(_list,n)-1]
#return min(getSubnetLutDict(24), key=lambda x:abs(x-n))
if __name__ == '__main__':
Main()
#
| 28.156566 | 99 | 0.618655 |
2ea4508eda8b273ac6156a32d7e221f5685b57f1 | 608 | py | Python | my_env/Lib/site-packages/sklearn/ensemble/weight_boosting.py | obulrdy6881/Drowsinss | 61cb9281d7dd22aee282b517e2fbf500f0ff9935 | [
"MIT"
] | 2 | 2021-05-02T07:59:56.000Z | 2021-12-14T19:53:13.000Z | Web application/env/Lib/site-packages/sklearn/ensemble/weight_boosting.py | arpit0891/Covid-19-and-Pneumonia-detection-from-X-Ray | 6b2756e4672ab25083a0a50f44f36bec1833e789 | [
"MIT"
] | 7 | 2021-06-08T21:46:24.000Z | 2022-03-12T00:35:31.000Z | my_env/Lib/site-packages/sklearn/ensemble/weight_boosting.py | obulrdy6881/Drowsinss | 61cb9281d7dd22aee282b517e2fbf500f0ff9935 | [
"MIT"
] | 1 | 2021-05-02T07:59:59.000Z | 2021-05-02T07:59:59.000Z |
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
# mypy error: Module X has no attribute y (typically for C extensions)
from . import _weight_boosting # type: ignore
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.ensemble.weight_boosting'
correct_import_path = 'sklearn.ensemble'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_weight_boosting, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| 32 | 71 | 0.782895 |
b7b2946703341c0763810d636c9aeb6d8c3771cc | 1,632 | py | Python | ff/ff_api_translator.py | gillespiejim78/ffl-webapp | 942edc46efaacc85a33092ffb1511eda021caaab | [
"Apache-2.0"
] | null | null | null | ff/ff_api_translator.py | gillespiejim78/ffl-webapp | 942edc46efaacc85a33092ffb1511eda021caaab | [
"Apache-2.0"
] | null | null | null | ff/ff_api_translator.py | gillespiejim78/ffl-webapp | 942edc46efaacc85a33092ffb1511eda021caaab | [
"Apache-2.0"
] | null | null | null | import ff.ff_api as ff_api
import json
def get_teams():
teams = ff_api.fetch_teams()
dict_list = [None] * len(teams)
for team in teams:
team_info = {}
team_info['teamName'] = team['teamLocation'] + ' ' + team['teamNickname']
team_info['teamId'] = team['teamId']
team_info['record'] = str(team['record']['overallWins']) + '-' + \
str(team['record']['overallLosses']) + '-' + \
str(team['record']['overallTies'])
team_rank = team['overallStanding'] - 1
dict_list[team_rank] = team_info
json_string = json.dumps(dict_list)
return json_string
# [
# [{'teamName': 'Ertz', 'score': 31, 'logoUrl': 'logo.jpg'}, {'teamName': 'Hav', 'score': 0, 'logoUrl': 'logo.jpg'}],
# [{}, {}],
# [{}, {}]
# ]
def get_current_scoreboard():
scoreboard = ff_api.fetch_scoreboard()
matchups = scoreboard['matchups']
dict_list = []
for matchup in matchups:
matchup_team_info_list = [None] * 2
matchup_team_info_list[0] = get_team_score(matchup['teams'][0])
matchup_team_info_list[1] = get_team_score(matchup['teams'][1])
dict_list.append(matchup_team_info_list)
json_string = json.dumps(dict_list)
return json_string
def get_team_score(team_scoreboard):
team_score = {}
team_score['teamName'] = team_scoreboard['team']['teamLocation'] + ' ' + \
team_scoreboard['team']['teamNickname']
team_score['score'] = team_scoreboard['score']
team_score['logoUrl'] = team_scoreboard['team']['logoUrl']
return team_score
| 32.64 | 119 | 0.598652 |
ce9b1c885a8c65fa785030723ec87ed8e7886fc1 | 7,814 | py | Python | docs/conf.py | ShipJ/TargetingModel | 34358e6a74a1ae922b45cf609ee3a2bbcf385adb | [
"MIT"
] | null | null | null | docs/conf.py | ShipJ/TargetingModel | 34358e6a74a1ae922b45cf609ee3a2bbcf385adb | [
"MIT"
] | null | null | null | docs/conf.py | ShipJ/TargetingModel | 34358e6a74a1ae922b45cf609ee3a2bbcf385adb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# ChinaDelegates documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ChinaDelegates'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DelegateTargetingdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'DelegateTargeting.tex',
u'ChinaDelegates Documentation',
u"Jack S", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'DelegateTargeting', u'ChinaDelegates Documentation',
[u"Jack S"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DelegateTargeting', u'ChinaDelegates Documentation',
u"Jack S", 'ChinaDelegates',
'China Delegate Targeting Model', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.893878 | 80 | 0.7086 |
ff972ec5a8d459c1d77caba49ea09d467fb2ad37 | 1,108 | py | Python | hy/inspect.py | josephwillard/hy | a91d7dd3f5045cd92384901f6c5e1ecc1aed962a | [
"MIT"
] | null | null | null | hy/inspect.py | josephwillard/hy | a91d7dd3f5045cd92384901f6c5e1ecc1aed962a | [
"MIT"
] | 1 | 2017-06-15T02:11:48.000Z | 2017-06-15T16:03:47.000Z | hy/inspect.py | josephwillard/hy | a91d7dd3f5045cd92384901f6c5e1ecc1aed962a | [
"MIT"
] | null | null | null | # Copyright 2018 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from __future__ import absolute_import
import inspect
try:
# Check if we have the newer inspect.signature available.
# Otherwise fallback to the legacy getargspec.
inspect.signature # noqa
except AttributeError:
def get_arity(fn):
return len(inspect.getargspec(fn)[0])
def has_kwargs(fn):
argspec = inspect.getargspec(fn)
return argspec.keywords is not None
def format_args(fn):
argspec = inspect.getargspec(fn)
return inspect.formatargspec(*argspec)
else:
def get_arity(fn):
parameters = inspect.signature(fn).parameters
return sum(1 for param in parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD)
def has_kwargs(fn):
parameters = inspect.signature(fn).parameters
return any(param.kind == param.VAR_KEYWORD
for param in parameters.values())
def format_args(fn):
return str(inspect.signature(fn))
| 29.157895 | 74 | 0.676895 |
9d20f9d7b8c473a5718771bccb8a541d823958b4 | 1,973 | py | Python | datasetsnx/bamboo/tag.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | datasetsnx/bamboo/tag.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | datasetsnx/bamboo/tag.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | import copy
from collections import Iterable
from .base_internode import BaseInternode
__all__ = ['EraseTags', 'RenameTag', 'CopyTag']
class EraseTags(BaseInternode):
def __init__(self, tags):
if isinstance(tags, str):
self.tags = [tags]
else:
if isinstance(tags, Iterable):
self.tags = list(tags)
else:
raise ValueError
def __call__(self, data_dict):
# data_dict = super(EraseTags, self).__call__(data_dict)
for tag in self.tags:
data_dict.pop(tag)
return data_dict
def __repr__(self):
return 'EraseTags(tags={})'.format(tuple(self.tags))
def rper(self):
return 'EraseTags(not available)'
class RenameTag(BaseInternode):
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def __call__(self, data_dict):
# data_dict = super(RenameTag, self).__call__(data_dict)
data_dict[self.new_name] = data_dict.pop(self.old_name)
return data_dict
def reverse(self, **kwargs):
if self.new_name in kwargs.keys():
kwargs[self.old_name] = kwargs.pop(self.new_name)
return kwargs
def __repr__(self):
return 'RenameTag(old_name={}, new_name={})'.format(self.old_name, self.new_name)
def rper(self):
return 'RenameTag(old_name={}, new_name={})'.format(self.new_name, self.old_name)
class CopyTag(BaseInternode):
def __init__(self, src_tag, dst_tag):
self.src_tag = src_tag
self.dst_tag = dst_tag
def __call__(self, data_dict):
# data_dict = super(CopyTag, self).__call__(data_dict)
data_dict[self.dst_tag] = copy.deepcopy(data_dict[self.src_tag])
return data_dict
def __repr__(self):
return 'CopyTag(src_tag={}, dst_tag={})'.format(self.src_tag, self.dst_tag)
def rper(self):
return 'CopyTag(not available)'
| 28.594203 | 89 | 0.635073 |
c9636b18237628fff88565b91780a3c335d1b2e9 | 14,177 | py | Python | .history/src/Simulador_20200711121915.py | eduardodut/Trabalho_final_estatistica_cd | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | [
"MIT"
] | null | null | null | .history/src/Simulador_20200711121915.py | eduardodut/Trabalho_final_estatistica_cd | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | [
"MIT"
] | null | null | null | .history/src/Simulador_20200711121915.py | eduardodut/Trabalho_final_estatistica_cd | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix, lil_matrix
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.lista_infectados_tipo_2 = []
self.lista_infectados_tipo_1 = []
self.num_curados = 0
self.num_mortos = 0
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.atualizacoes_cura = atualizacoes_cura
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = 1 + int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.df_individuos = pd.DataFrame(index= range(tamanho_matriz), columns=range(tamanho_matriz))
self.matriz_status = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
#self.matriz_status = self.df_individuos.to_numpy()
self.popular(tamanho_matriz)
self.lista_matrizes_posicionamento = []
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict,index = [0])
self.salvar_posicionamento()
def criar_individuo(self, status, posicao):
return Individuo(status,self.atualizacoes_cura,posicao)
def salvar_posicionamento(self):
self.matriz_status = self.df_individuos.to_numpy()
self.lista_matrizes_posicionamento.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
print(lista_infectantes)
for X,Y in lista_infectantes:
print("Infectante na posição: ", (X,Y))
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(X, Y)
print("Lista vizinhos: ", lista_vizinhos)
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for x,y in lista_vizinhos:
print("posição vizinho: ", (x,y))
print("tipo de individuo ", type(self.df_individuos.iloc[x,y]) )
#verificação de SADIO
if self.df_individuos.iloc[x,y].status == Individuo.SADIO:
#verificação do novo status
novo_status = self.infectar(chance_infeccao, chance_infeccao_tipo2)
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append((x,y))
#modifica o status na matriz de status
print("Tipo antes de atribuir: ", self.criar_individuo(Individuo.INFECTADO_TIPO_1,(x,y)))
self.df_individuos.iloc[x,y] = self.criar_individuo(Individuo.INFECTADO_TIPO_1,(x,y))
print("Tipo depois de atribuir: ", self.df_individuos.loc[x,y])
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_1
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 2
lista_novos_infectados_tipo2.append((x,y))
#modifica o status na matriz de status
self.df_individuos.iloc[x,y] = self.criar_individuo(Individuo.INFECTADO_TIPO_2,(x,y))
self.matriz_status[x,y] = Individuo.INFECTADO_TIPO_2
print(self.df_individuos)
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def verificar_morte(self, lista_infectantes_tipo2):
lista_curados = []
lista_mortos = []
for x,y in lista_infectantes_tipo2:
novo_status = self.df_individuos.loc[x,y].checagem_morte(self.chance_morte)
if novo_status == Individuo.MORTO:
self.matriz_status[x,y] = Individuo.MORTO
lista_mortos.append((x,y))
if novo_status == Individuo.CURADO:
self.matriz_status[x,y] = Individuo.CURADO
lista_curados.append((x,y))
return lista_mortos, lista_curados
def verificar_cura(self, lista_infectantes):
lista_curados = []
for x,y in lista_infectantes:
print("AQUI======>: ", self.df_individuos.loc[x,y])
novo_status = self.df_individuos.loc[x,y].checagem_cura()
if novo_status == Individuo.CURADO:
self.matriz_status[x,y] = Individuo.CURADO
lista_curados.append((x,y))
return lista_curados
def iterar(self):
#Verifica os novos infectados a partir dos atuais infectantes na matriz
lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_1)
lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_2)
#Verifica morte dos tipo 2
lista_mortos_atualizacao, lista_curados_t2_atualizacao = self.verificar_morte(self.lista_infectados_tipo_2)
self.lista_infectados_tipo_2 = [indice for indice in self.lista_infectados_tipo_2 if indice not in lista_mortos_atualizacao and indice not in lista_curados_t2_atualizacao]
#atualiza o novo número de mortos
self.num_mortos += len(lista_mortos_atualizacao)
#Verificar cura
lista_curados_t1_atualizacao = self.verificar_cura(self.lista_infectados_tipo_1)
self.lista_infectados_tipo_1 = [indice for indice in self.lista_infectados_tipo_1 if indice not in lista_curados_t1_atualizacao ]
#adiciona os novos curados na lista geral de curados
self.num_curados = self.num_curados + len(lista_curados_t1_atualizacao) + len(lista_curados_t2_atualizacao)
# self. #movimentar infectantes:
# for x,y in self.lista_infectados_tipo_1:
# self.mover_infectante((x,y))
# for x,y in self.lista_infectados_tipo_2:
# self.mover_infectante((x,y))
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + lista_novos_infectados_tipo2
self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + lista_novos_infectados_tipo1
dict = {
'num_sadios':self.populacao_inicial - self.num_mortos - self.num_curados - len(self.lista_infectados_tipo_1) - len(self.lista_infectados_tipo_2) ,
'num_infect_t1':len(self.lista_infectados_tipo_1),
'num_infect_t2':len(self.lista_infectados_tipo_2),
'num_curados':self.num_curados,
'num_mortos':self.num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
print("num t1: ", len(self.lista_infectados_tipo_1))
print("num t2: ", len(self.lista_infectados_tipo_2))
print("num curados: ", self.num_curados)
print("num mortos: ", self.num_mortos)
print("---------")
#salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def infectar(self, chance_infeccao, chance_infeccao_tipo2):
saida = Individuo.SADIO
#número aleatório para chance de infectar o vizinho
rng_infeccao = random.random()
if rng_infeccao <= chance_infeccao:
#número aleatório para chance de infecção tipo 1 ou 2
rng_infeccao_tipo2 = random.random()
if rng_infeccao_tipo2 <= chance_infeccao_tipo2:
saida = Individuo.INFECTADO_TIPO_2
else:
saida = Individuo.INFECTADO_TIPO_1
return saida
def popular(self, tamanho_matriz):
self.df_individuos.iloc[:,:] = self.criar_individuo(Individuo.SADIO,(0,0))
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.lista_infectados_tipo_1.append((ind_x,ind_y))
#print(indice)
self.df_individuos.loc[ind_x,ind_y] = self.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
#print(self.df_individuos)
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.lista_infectados_tipo_1.append((ind_x,ind_y))
self.df_individuos.loc[ind_x,ind_y] = self.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
ind_x = indice[0]
ind_y = indice[1]
self.lista_infectados_tipo_2.append((ind_x,ind_y))
self.df_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_2
def trocar_status_localizacao(self,ponto_ini,ponto_final):
x_ini = ponto_ini[0]
y_ini = ponto_ini[1]
x_fin = ponto_final[0]
y_fin = ponto_final[1]
aux = self.df_individuos.loc[x_fin,y_fin]
print("Aux2====>: ",self.df_individuos.loc[x_fin,y_fin])
self.df_individuos.loc[x_fin,y_fin], self.df_individuos.loc[x_ini,y_ini] = self.df_individuos.loc[x_ini,y_ini], self.df_individuos.loc[x_fin,y_fin]
# self.df_individuos.loc[x_fin,y_fin] = self.df_individuos.loc[x_ini,y_ini]
# self.df_individuos.loc[x_ini,y_ini] = aux2
self.matriz_status[x_fin,y_fin] = self.df_individuos.loc[x_fin,y_fin].status
self.matriz_status[x_ini,y_ini] = self.df_individuos.loc[x_ini,y_ini].status
def mover_infectante(self, posicao_inicial):
pos_x, pos_y = posicao_inicial[0], posicao_inicial[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
posicao_final= self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
self.trocar_status_localizacao(posicao_inicial, posicao_final)
chance_infeccao = 1
chance_infeccao_tipo2 = 0.2
chance_morte = 0.1
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.0
percentual_inicial_tipo2 = 0.00
sim = Simulador(
2,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.lista_infectados_tipo_2)
#print(sim.lista_infectados_tipo_1)
cmap = ListedColormap(['w', 'y', 'r', 'blue', 'black'])
# while sim.dataframe.iloc[-1]['num_infect_t1']+sim.dataframe.iloc[-1]['num_infect_t2'] > 0:
# print(sim.df_individuos)
# #print("xxxxxxxxxxxxxxxxxTipo: ",type(sim.lista_matrizes_posicionamento[len(sim.lista_matrizes_posicionamento)-1].toarray()))
# #plt.matshow(sim.lista_matrizes_posicionamento[0], cmap = cmap, vmin= 0, vmax = 4)
# #plt.show()
# sim.iterar()
print(sim.dataframe)
print(sim.df_individuos)
print(sim.df_individuos.iloc[1,0].status)
print("Novos infectados: ", sim.verificar_infeccao(sim.lista_infectados_tipo_1))
plt.show()
| 41.820059 | 179 | 0.644283 |
5a9dfb1e378c67144d1b42ab5f533304a3a4a514 | 1,984 | py | Python | nsot/validators.py | jathanism/nsot | 9369c86189a64c682e28fd5663762e4d3205b058 | [
"Apache-2.0"
] | 1 | 2015-11-26T07:36:12.000Z | 2015-11-26T07:36:12.000Z | nsot/validators.py | jathanism/nsot | 9369c86189a64c682e28fd5663762e4d3205b058 | [
"Apache-2.0"
] | null | null | null | nsot/validators.py | jathanism/nsot | 9369c86189a64c682e28fd5663762e4d3205b058 | [
"Apache-2.0"
] | null | null | null | """
Validators for validating object fields.
"""
from __future__ import absolute_import
from django.conf import settings
from django.core.validators import EmailValidator
import ipaddress
import netaddr
import six
from . import exc
def validate_mac_address(value):
"""Validate whether ``value`` is a valid MAC address."""
if value is None:
return value
# If the incoming value is a string, cast it to an int
if isinstance(value, six.string_types) and value.isdigit():
value = int(value)
# Directly invoke EUI object instead of using MACAddressField
try:
value = netaddr.EUI(value, version=48)
except (ValueError, TypeError, netaddr.AddrFormatError):
raise exc.ValidationError(
{"mac_address": "Enter a valid MAC Address."}
)
return value
def validate_name(value):
"""Validate whether ``value`` is a valid name."""
if not value:
raise exc.ValidationError({"name": "This is a required field."})
return value
def validate_cidr(value):
"""Validate whether ``value`` is a validr IPv4/IPv6 CIDR."""
try:
cidr = ipaddress.ip_network(six.text_type(value))
except ValueError:
raise exc.ValidationError(
{
"cidr": "%r does not appear to be an IPv4 or IPv6 network"
% value
}
)
else:
return cidr
def validate_host_address(value):
"""Validate whether ``value`` is a host IP address."""
cidr = validate_cidr(value)
if cidr.prefixlen not in settings.HOST_PREFIXES:
raise exc.ValidationError(
{"address": "%r is not a valid host address!" % value}
)
return value
def validate_email(value):
"""Validate whether ``value`` is an email address."""
validator = EmailValidator()
try:
validator(value)
except exc.DjangoValidationError as err:
raise exc.ValidationError({"email": str(err)})
return value
| 26.453333 | 74 | 0.644657 |
52570956dd262003f0acd8be162bf95736e75104 | 28,202 | py | Python | build/gen.py | frida/gn | 75194c124f158d7fabdc94048f1a3f850a5f0701 | [
"BSD-3-Clause"
] | null | null | null | build/gen.py | frida/gn | 75194c124f158d7fabdc94048f1a3f850a5f0701 | [
"BSD-3-Clause"
] | 1 | 2021-03-20T21:31:46.000Z | 2021-03-20T21:41:04.000Z | build/gen.py | frida/gn | 75194c124f158d7fabdc94048f1a3f850a5f0701 | [
"BSD-3-Clause"
] | 2 | 2020-01-21T05:07:30.000Z | 2022-03-11T16:20:16.000Z | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates build.ninja that will build GN."""
import contextlib
import errno
import optparse
import os
import platform
import re
import subprocess
import sys
import tempfile
try: # py3
from shlex import quote as shell_quote
except ImportError: # py2
from pipes import quote as shell_quote
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
REPO_ROOT = os.path.dirname(SCRIPT_DIR)
class Platform(object):
"""Represents a host/target platform."""
def __init__(self, platform):
self._platform = platform
if self._platform is not None:
return
self._platform = sys.platform
if self._platform.startswith('linux'):
self._platform = 'linux'
elif self._platform.startswith('darwin'):
self._platform = 'darwin'
elif self._platform.startswith('mingw'):
self._platform = 'mingw'
elif self._platform.startswith('msys'):
self._platform = 'msys'
elif self._platform.startswith('win'):
self._platform = 'msvc'
elif self._platform.startswith('aix'):
self._platform = 'aix'
elif self._platform.startswith('fuchsia'):
self._platform = 'fuchsia'
elif self._platform.startswith('freebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('netbsd'):
self._platform = 'netbsd'
elif self._platform.startswith('openbsd'):
self._platform = 'openbsd'
elif self._platform.startswith('haiku'):
self._platform = 'haiku'
elif self._platform.startswith('sunos'):
self._platform = 'solaris'
@staticmethod
def known_platforms():
return ['linux', 'darwin', 'mingw', 'msys', 'msvc', 'aix', 'fuchsia', 'freebsd', 'netbsd', 'openbsd', 'haiku', 'solaris']
def platform(self):
return self._platform
def is_linux(self):
return self._platform == 'linux'
def is_mingw(self):
return self._platform == 'mingw'
def is_msys(self):
return self._platform == 'msys'
def is_msvc(self):
return self._platform == 'msvc'
def is_windows(self):
return self.is_mingw() or self.is_msvc()
def is_darwin(self):
return self._platform == 'darwin'
def is_aix(self):
return self._platform == 'aix'
def is_haiku(self):
return self._platform == 'haiku'
def is_solaris(self):
return self._platform == 'solaris'
def is_posix(self):
return self._platform in ['linux', 'freebsd', 'darwin', 'aix', 'openbsd', 'haiku', 'solaris', 'msys', 'netbsd']
def main(argv):
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-d', '--debug', action='store_true',
help='Do a debug build. Defaults to release build.')
parser.add_option('--platform',
help='target platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--host',
help='host platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--use-lto', action='store_true',
help='Enable the use of LTO')
parser.add_option('--use-icf', action='store_true',
help='Enable the use of Identical Code Folding')
parser.add_option('--no-last-commit-position', action='store_true',
help='Do not generate last_commit_position.h.')
parser.add_option('--out-path',
help='The path to generate the build files in.')
parser.add_option('--no-strip', action='store_true',
help='Don\'t strip release build. Useful for profiling.')
parser.add_option('--no-static-libstdc++', action='store_true',
default=False, dest='no_static_libstdcpp',
help='Don\'t link libstdc++ statically')
parser.add_option('--link-lib',
action='append',
metavar='LINK_LIB',
default=[],
dest='link_libs',
help=('Add a library to the final executable link. ' +
'LINK_LIB must be the path to a static or shared ' +
'library, or \'-l<name>\' on POSIX systems. Can be ' +
'used multiple times. Useful to link custom malloc ' +
'or cpu profiling libraries.'))
options, args = parser.parse_args(argv)
if args:
parser.error('Unrecognized command line arguments: %s.' % ', '.join(args))
platform = Platform(options.platform)
if options.host:
host = Platform(options.host)
else:
host = platform
out_dir = options.out_path or os.path.join(REPO_ROOT, 'out')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if not options.no_last_commit_position:
GenerateLastCommitPosition(host,
os.path.join(out_dir, 'last_commit_position.h'))
WriteGNNinja(os.path.join(out_dir, 'build.ninja'), platform, host, options)
return 0
def GenerateLastCommitPosition(host, header):
ROOT_TAG = 'initial-commit'
describe_output = subprocess.check_output(
['git', 'describe', 'HEAD', '--match', ROOT_TAG], shell=host.is_windows(),
cwd=REPO_ROOT)
mo = re.match(ROOT_TAG + '-(\d+)-g([0-9a-f]+)', describe_output.decode())
if not mo:
raise ValueError(
'Unexpected output from git describe when generating version header')
contents = '''// Generated by build/gen.py.
#ifndef OUT_LAST_COMMIT_POSITION_H_
#define OUT_LAST_COMMIT_POSITION_H_
#define LAST_COMMIT_POSITION_NUM %s
#define LAST_COMMIT_POSITION "%s (%s)"
#endif // OUT_LAST_COMMIT_POSITION_H_
''' % (mo.group(1), mo.group(1), mo.group(2))
# Only write/touch this file if the commit position has changed.
old_contents = ''
if os.path.isfile(header):
with open(header, 'r') as f:
old_contents = f.read()
if old_contents != contents:
with open(header, 'w') as f:
f.write(contents)
def WriteGenericNinja(path, static_libraries, executables,
cxx, ar, ld, platform, host, options,
cflags=[], ldflags=[], libflags=[],
include_dirs=[], solibs=[]):
args = ' -d' if options.debug else ''
for link_lib in options.link_libs:
args += ' --link-lib=' + shell_quote(link_lib)
ninja_header_lines = [
'cxx = ' + cxx,
'ar = ' + ar,
'ld = ' + ld,
'',
'rule regen',
' command = %s ../build/gen.py%s' % (sys.executable, args),
' description = Regenerating ninja files',
'',
'build build.ninja: regen',
' generator = 1',
' depfile = build.ninja.d',
'',
]
template_filename = os.path.join(SCRIPT_DIR, {
'msvc': 'build_win.ninja.template',
'mingw': 'build_linux.ninja.template',
'msys': 'build_linux.ninja.template',
'darwin': 'build_mac.ninja.template',
'linux': 'build_linux.ninja.template',
'freebsd': 'build_linux.ninja.template',
'aix': 'build_aix.ninja.template',
'openbsd': 'build_openbsd.ninja.template',
'haiku': 'build_haiku.ninja.template',
'solaris': 'build_linux.ninja.template',
'netbsd': 'build_linux.ninja.template',
}[platform.platform()])
with open(template_filename) as f:
ninja_template = f.read()
if platform.is_windows():
executable_ext = '.exe'
library_ext = '.lib'
object_ext = '.obj'
else:
executable_ext = ''
library_ext = '.a'
object_ext = '.o'
def escape_path_ninja(path):
return path.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:')
def src_to_obj(path):
return escape_path_ninja('%s' % os.path.splitext(path)[0] + object_ext)
def library_to_a(library):
return '%s%s' % (library, library_ext)
ninja_lines = []
def build_source(src_file, settings):
ninja_lines.extend([
'build %s: cxx %s' % (src_to_obj(src_file),
escape_path_ninja(
os.path.relpath(
os.path.join(REPO_ROOT, src_file),
os.path.dirname(path)))),
' includes = %s' % ' '.join(
['-I' + escape_path_ninja(dirname) for dirname in include_dirs]),
' cflags = %s' % ' '.join(cflags),
])
for library, settings in static_libraries.items():
for src_file in settings['sources']:
build_source(src_file, settings)
ninja_lines.append('build %s: alink_thin %s' % (
library_to_a(library),
' '.join([src_to_obj(src_file) for src_file in settings['sources']])))
ninja_lines.append(' libflags = %s' % ' '.join(libflags))
for executable, settings in executables.items():
for src_file in settings['sources']:
build_source(src_file, settings)
ninja_lines.extend([
'build %s%s: link %s | %s' % (
executable, executable_ext,
' '.join([src_to_obj(src_file) for src_file in settings['sources']]),
' '.join([library_to_a(library) for library in settings['libs']])),
' ldflags = %s' % ' '.join(ldflags),
' solibs = %s' % ' '.join(solibs),
' libs = %s' % ' '.join(
[library_to_a(library) for library in settings['libs']]),
])
ninja_lines.append('') # Make sure the file ends with a newline.
with open(path, 'w') as f:
f.write('\n'.join(ninja_header_lines))
f.write(ninja_template)
f.write('\n'.join(ninja_lines))
with open(path + '.d', 'w') as f:
f.write('build.ninja: ' +
os.path.relpath(os.path.join(SCRIPT_DIR, 'gen.py'),
os.path.dirname(path)) + ' ' +
os.path.relpath(template_filename, os.path.dirname(path)) + '\n')
def WriteGNNinja(path, platform, host, options):
if platform.is_msvc():
cxx = os.environ.get('CXX', 'cl.exe')
ld = os.environ.get('LD', 'link.exe')
ar = os.environ.get('AR', 'lib.exe')
elif platform.is_aix():
cxx = os.environ.get('CXX', 'g++')
ld = os.environ.get('LD', 'g++')
ar = os.environ.get('AR', 'ar -X64')
elif platform.is_msys() or platform.is_mingw():
cxx = os.environ.get('CXX', 'g++')
ld = os.environ.get('LD', 'g++')
ar = os.environ.get('AR', 'ar')
else:
cxx = os.environ.get('CXX', 'clang++')
ld = cxx
ar = os.environ.get('AR', 'ar')
cflags = os.environ.get('CFLAGS', '').split()
cflags += os.environ.get('CXXFLAGS', '').split()
ldflags = os.environ.get('LDFLAGS', '').split()
libflags = os.environ.get('LIBFLAGS', '').split()
include_dirs = [
os.path.relpath(os.path.join(REPO_ROOT, 'src'), os.path.dirname(path)),
'.',
]
libs = []
if not platform.is_msvc():
if options.debug:
cflags.extend(['-O0', '-g'])
else:
cflags.append('-DNDEBUG')
cflags.append('-O3')
if options.no_strip:
cflags.append('-g')
ldflags.append('-O3')
# Use -fdata-sections and -ffunction-sections to place each function
# or data item into its own section so --gc-sections can eliminate any
# unused functions and data items.
cflags.extend(['-fdata-sections', '-ffunction-sections'])
ldflags.extend(['-fdata-sections', '-ffunction-sections'])
if platform.is_darwin():
ldflags.append('-Wl,-dead_strip')
elif not platform.is_aix() and not platform.is_solaris():
# Garbage collection is done by default on aix.
ldflags.append('-Wl,--gc-sections')
# Omit all symbol information from the output file.
if options.no_strip is None:
if platform.is_darwin():
ldflags.append('-Wl,-S')
elif platform.is_aix():
ldflags.append('-Wl,-s')
elif platform.is_solaris():
ldflags.append('-Wl,--strip-all')
else:
ldflags.append('-Wl,-strip-all')
# Enable identical code-folding.
if options.use_icf and not platform.is_darwin():
ldflags.append('-Wl,--icf=all')
if options.use_lto:
cflags.extend(['-flto', '-fwhole-program-vtables'])
ldflags.extend(['-flto', '-fwhole-program-vtables'])
cflags.extend([
'-D_FILE_OFFSET_BITS=64',
'-D__STDC_CONSTANT_MACROS', '-D__STDC_FORMAT_MACROS',
'-pthread',
'-pipe',
'-fno-exceptions',
'-fno-rtti',
'-fdiagnostics-color',
'-Wall',
'-Wextra',
'-Wno-unused-parameter',
'-std=c++17'
])
if platform.is_linux() or platform.is_mingw() or platform.is_msys():
ldflags.append('-Wl,--as-needed')
if not options.no_static_libstdcpp:
ldflags.append('-static-libstdc++')
if platform.is_mingw() or platform.is_msys():
cflags.remove('-std=c++17')
cflags.extend([
'-Wno-deprecated-copy',
'-Wno-implicit-fallthrough',
'-Wno-redundant-move',
'-Wno-unused-variable',
'-Wno-format', # Use of %llx, which is supported by _UCRT, false positive
'-Wno-strict-aliasing', # Dereferencing punned pointer
'-Wno-cast-function-type', # Casting FARPROC to RegDeleteKeyExPtr
'-std=gnu++17',
])
else:
# This is needed by libc++.
libs.extend(['-ldl', '-lrt'])
elif platform.is_darwin():
min_mac_version_flag = '-mmacosx-version-min=10.9'
cflags.append(min_mac_version_flag)
ldflags.append(min_mac_version_flag)
elif platform.is_aix():
cflags.append('-maix64')
ldflags.append('-maix64')
elif platform.is_haiku():
cflags.append('-fPIC')
cflags.extend(['-D_BSD_SOURCE'])
if platform.is_posix() and not platform.is_haiku():
ldflags.append('-pthread')
if platform.is_mingw() or platform.is_msys():
cflags.extend(['-DUNICODE',
'-DNOMINMAX',
'-DWIN32_LEAN_AND_MEAN',
'-DWINVER=0x0A00',
'-D_CRT_SECURE_NO_DEPRECATE',
'-D_SCL_SECURE_NO_DEPRECATE',
'-D_UNICODE',
'-D_WIN32_WINNT=0x0A00',
'-D_HAS_EXCEPTIONS=0'
])
elif platform.is_msvc():
if not options.debug:
cflags.extend(['/O2', '/DNDEBUG', '/Zc:inline'])
ldflags.extend(['/OPT:REF'])
if options.use_icf:
libflags.extend(['/OPT:ICF'])
if options.use_lto:
cflags.extend(['/GL'])
libflags.extend(['/LTCG'])
ldflags.extend(['/LTCG'])
cflags.extend([
'/DNOMINMAX',
'/DUNICODE',
'/DWIN32_LEAN_AND_MEAN',
'/DWINVER=0x0A00',
'/D_CRT_SECURE_NO_DEPRECATE',
'/D_SCL_SECURE_NO_DEPRECATE',
'/D_UNICODE',
'/D_WIN32_WINNT=0x0A00',
'/FS',
'/W4',
'/WX',
'/Zi',
'/wd4099',
'/wd4100',
'/wd4127',
'/wd4244',
'/wd4267',
'/wd4505',
'/wd4838',
'/wd4996',
'/std:c++17',
'/GR-',
'/D_HAS_EXCEPTIONS=0',
])
ldflags.extend(['/DEBUG', '/MACHINE:x64'])
static_libraries = {
'base': {'sources': [
'src/base/command_line.cc',
'src/base/environment.cc',
'src/base/files/file.cc',
'src/base/files/file_enumerator.cc',
'src/base/files/file_path.cc',
'src/base/files/file_path_constants.cc',
'src/base/files/file_util.cc',
'src/base/files/scoped_file.cc',
'src/base/files/scoped_temp_dir.cc',
'src/base/json/json_parser.cc',
'src/base/json/json_reader.cc',
'src/base/json/json_writer.cc',
'src/base/json/string_escape.cc',
'src/base/logging.cc',
'src/base/md5.cc',
'src/base/memory/ref_counted.cc',
'src/base/memory/weak_ptr.cc',
'src/base/sha1.cc',
'src/base/strings/string_number_conversions.cc',
'src/base/strings/string_split.cc',
'src/base/strings/string_util.cc',
'src/base/strings/string_util_constants.cc',
'src/base/strings/stringprintf.cc',
'src/base/strings/utf_string_conversion_utils.cc',
'src/base/strings/utf_string_conversions.cc',
'src/base/third_party/icu/icu_utf.cc',
'src/base/timer/elapsed_timer.cc',
'src/base/value_iterators.cc',
'src/base/values.cc',
]},
'gn_lib': {'sources': [
'src/gn/action_target_generator.cc',
'src/gn/action_values.cc',
'src/gn/analyzer.cc',
'src/gn/args.cc',
'src/gn/binary_target_generator.cc',
'src/gn/build_settings.cc',
'src/gn/builder.cc',
'src/gn/builder_record.cc',
'src/gn/bundle_data.cc',
'src/gn/bundle_data_target_generator.cc',
'src/gn/bundle_file_rule.cc',
'src/gn/c_include_iterator.cc',
'src/gn/c_substitution_type.cc',
'src/gn/c_tool.cc',
'src/gn/command_analyze.cc',
'src/gn/command_args.cc',
'src/gn/command_check.cc',
'src/gn/command_clean.cc',
'src/gn/command_desc.cc',
'src/gn/command_format.cc',
'src/gn/command_gen.cc',
'src/gn/command_help.cc',
'src/gn/command_ls.cc',
'src/gn/command_meta.cc',
'src/gn/command_outputs.cc',
'src/gn/command_path.cc',
'src/gn/command_refs.cc',
'src/gn/commands.cc',
'src/gn/compile_commands_writer.cc',
'src/gn/rust_project_writer.cc',
'src/gn/config.cc',
'src/gn/config_values.cc',
'src/gn/config_values_extractors.cc',
'src/gn/config_values_generator.cc',
'src/gn/copy_target_generator.cc',
'src/gn/create_bundle_target_generator.cc',
'src/gn/deps_iterator.cc',
'src/gn/desc_builder.cc',
'src/gn/eclipse_writer.cc',
'src/gn/err.cc',
'src/gn/escape.cc',
'src/gn/exec_process.cc',
'src/gn/filesystem_utils.cc',
'src/gn/file_writer.cc',
'src/gn/frameworks_utils.cc',
'src/gn/function_exec_script.cc',
'src/gn/function_filter.cc',
'src/gn/function_foreach.cc',
'src/gn/function_forward_variables_from.cc',
'src/gn/function_get_label_info.cc',
'src/gn/function_get_path_info.cc',
'src/gn/function_get_target_outputs.cc',
'src/gn/function_process_file_template.cc',
'src/gn/function_read_file.cc',
'src/gn/function_rebase_path.cc',
'src/gn/function_set_default_toolchain.cc',
'src/gn/function_set_defaults.cc',
'src/gn/function_template.cc',
'src/gn/function_toolchain.cc',
'src/gn/function_write_file.cc',
'src/gn/functions.cc',
'src/gn/functions_target.cc',
'src/gn/general_tool.cc',
'src/gn/generated_file_target_generator.cc',
'src/gn/group_target_generator.cc',
'src/gn/header_checker.cc',
'src/gn/import_manager.cc',
'src/gn/inherited_libraries.cc',
'src/gn/input_conversion.cc',
'src/gn/input_file.cc',
'src/gn/input_file_manager.cc',
'src/gn/item.cc',
'src/gn/json_project_writer.cc',
'src/gn/label.cc',
'src/gn/label_pattern.cc',
'src/gn/lib_file.cc',
'src/gn/loader.cc',
'src/gn/location.cc',
'src/gn/metadata.cc',
'src/gn/metadata_walk.cc',
'src/gn/ninja_action_target_writer.cc',
'src/gn/ninja_binary_target_writer.cc',
'src/gn/ninja_build_writer.cc',
'src/gn/ninja_bundle_data_target_writer.cc',
'src/gn/ninja_c_binary_target_writer.cc',
'src/gn/ninja_copy_target_writer.cc',
'src/gn/ninja_create_bundle_target_writer.cc',
'src/gn/ninja_generated_file_target_writer.cc',
'src/gn/ninja_group_target_writer.cc',
'src/gn/ninja_rust_binary_target_writer.cc',
'src/gn/ninja_target_command_util.cc',
'src/gn/ninja_target_writer.cc',
'src/gn/ninja_toolchain_writer.cc',
'src/gn/ninja_utils.cc',
'src/gn/ninja_writer.cc',
'src/gn/operators.cc',
'src/gn/output_conversion.cc',
'src/gn/output_file.cc',
'src/gn/parse_node_value_adapter.cc',
'src/gn/parse_tree.cc',
'src/gn/parser.cc',
'src/gn/path_output.cc',
'src/gn/pattern.cc',
'src/gn/pool.cc',
'src/gn/qt_creator_writer.cc',
'src/gn/runtime_deps.cc',
'src/gn/rust_substitution_type.cc',
'src/gn/rust_tool.cc',
'src/gn/rust_values.cc',
'src/gn/rust_values_generator.cc',
'src/gn/rust_variables.cc',
'src/gn/scheduler.cc',
'src/gn/scope.cc',
'src/gn/scope_per_file_provider.cc',
'src/gn/settings.cc',
'src/gn/setup.cc',
'src/gn/source_dir.cc',
'src/gn/source_file.cc',
'src/gn/standard_out.cc',
'src/gn/string_atom.cc',
'src/gn/string_output_buffer.cc',
'src/gn/string_utils.cc',
'src/gn/substitution_list.cc',
'src/gn/substitution_pattern.cc',
'src/gn/substitution_type.cc',
'src/gn/substitution_writer.cc',
'src/gn/swift_values.cc',
'src/gn/swift_values_generator.cc',
'src/gn/swift_variables.cc',
'src/gn/switches.cc',
'src/gn/target.cc',
'src/gn/target_generator.cc',
'src/gn/template.cc',
'src/gn/token.cc',
'src/gn/tokenizer.cc',
'src/gn/tool.cc',
'src/gn/toolchain.cc',
'src/gn/trace.cc',
'src/gn/value.cc',
'src/gn/value_extractors.cc',
'src/gn/variables.cc',
'src/gn/version.cc',
'src/gn/visibility.cc',
'src/gn/visual_studio_utils.cc',
'src/gn/visual_studio_writer.cc',
'src/gn/xcode_object.cc',
'src/gn/xcode_writer.cc',
'src/gn/xml_element_writer.cc',
'src/util/exe_path.cc',
'src/util/msg_loop.cc',
'src/util/semaphore.cc',
'src/util/sys_info.cc',
'src/util/ticks.cc',
'src/util/worker_pool.cc',
]},
}
executables = {
'gn': {'sources': [ 'src/gn/gn_main.cc' ], 'libs': []},
'gn_unittests': { 'sources': [
'src/gn/action_target_generator_unittest.cc',
'src/gn/analyzer_unittest.cc',
'src/gn/args_unittest.cc',
'src/gn/builder_unittest.cc',
'src/gn/c_include_iterator_unittest.cc',
'src/gn/command_format_unittest.cc',
'src/gn/commands_unittest.cc',
'src/gn/compile_commands_writer_unittest.cc',
'src/gn/config_unittest.cc',
'src/gn/config_values_extractors_unittest.cc',
'src/gn/escape_unittest.cc',
'src/gn/exec_process_unittest.cc',
'src/gn/filesystem_utils_unittest.cc',
'src/gn/file_writer_unittest.cc',
'src/gn/frameworks_utils_unittest.cc',
'src/gn/function_filter_unittest.cc',
'src/gn/function_foreach_unittest.cc',
'src/gn/function_forward_variables_from_unittest.cc',
'src/gn/function_get_label_info_unittest.cc',
'src/gn/function_get_path_info_unittest.cc',
'src/gn/function_get_target_outputs_unittest.cc',
'src/gn/function_process_file_template_unittest.cc',
'src/gn/function_rebase_path_unittest.cc',
'src/gn/function_template_unittest.cc',
'src/gn/function_toolchain_unittest.cc',
'src/gn/function_write_file_unittest.cc',
'src/gn/functions_target_rust_unittest.cc',
'src/gn/functions_target_unittest.cc',
'src/gn/functions_unittest.cc',
'src/gn/hash_table_base_unittest.cc',
'src/gn/header_checker_unittest.cc',
'src/gn/inherited_libraries_unittest.cc',
'src/gn/input_conversion_unittest.cc',
'src/gn/json_project_writer_unittest.cc',
'src/gn/rust_project_writer_unittest.cc',
'src/gn/rust_project_writer_helpers_unittest.cc',
'src/gn/label_pattern_unittest.cc',
'src/gn/label_unittest.cc',
'src/gn/loader_unittest.cc',
'src/gn/metadata_unittest.cc',
'src/gn/metadata_walk_unittest.cc',
'src/gn/ninja_action_target_writer_unittest.cc',
'src/gn/ninja_binary_target_writer_unittest.cc',
'src/gn/ninja_build_writer_unittest.cc',
'src/gn/ninja_bundle_data_target_writer_unittest.cc',
'src/gn/ninja_c_binary_target_writer_unittest.cc',
'src/gn/ninja_copy_target_writer_unittest.cc',
'src/gn/ninja_create_bundle_target_writer_unittest.cc',
'src/gn/ninja_generated_file_target_writer_unittest.cc',
'src/gn/ninja_group_target_writer_unittest.cc',
'src/gn/ninja_rust_binary_target_writer_unittest.cc',
'src/gn/ninja_target_command_util_unittest.cc',
'src/gn/ninja_target_writer_unittest.cc',
'src/gn/ninja_toolchain_writer_unittest.cc',
'src/gn/operators_unittest.cc',
'src/gn/output_conversion_unittest.cc',
'src/gn/parse_tree_unittest.cc',
'src/gn/parser_unittest.cc',
'src/gn/path_output_unittest.cc',
'src/gn/pattern_unittest.cc',
'src/gn/runtime_deps_unittest.cc',
'src/gn/scope_per_file_provider_unittest.cc',
'src/gn/scope_unittest.cc',
'src/gn/setup_unittest.cc',
'src/gn/source_dir_unittest.cc',
'src/gn/source_file_unittest.cc',
'src/gn/string_atom_unittest.cc',
'src/gn/string_output_buffer_unittest.cc',
'src/gn/string_utils_unittest.cc',
'src/gn/substitution_pattern_unittest.cc',
'src/gn/substitution_writer_unittest.cc',
'src/gn/target_unittest.cc',
'src/gn/template_unittest.cc',
'src/gn/test_with_scheduler.cc',
'src/gn/test_with_scope.cc',
'src/gn/tokenizer_unittest.cc',
'src/gn/unique_vector_unittest.cc',
'src/gn/value_unittest.cc',
'src/gn/vector_utils_unittest.cc',
'src/gn/version_unittest.cc',
'src/gn/visibility_unittest.cc',
'src/gn/visual_studio_utils_unittest.cc',
'src/gn/visual_studio_writer_unittest.cc',
'src/gn/xcode_object_unittest.cc',
'src/gn/xml_element_writer_unittest.cc',
'src/util/test/gn_test.cc',
], 'libs': []},
}
if platform.is_posix():
static_libraries['base']['sources'].extend([
'src/base/files/file_enumerator_posix.cc',
'src/base/files/file_posix.cc',
'src/base/files/file_util_posix.cc',
'src/base/posix/file_descriptor_shuffle.cc',
'src/base/posix/safe_strerror.cc',
])
if platform.is_windows():
static_libraries['base']['sources'].extend([
'src/base/files/file_enumerator_win.cc',
'src/base/files/file_util_win.cc',
'src/base/files/file_win.cc',
'src/base/win/registry.cc',
'src/base/win/scoped_handle.cc',
'src/base/win/scoped_process_information.cc',
])
if platform.is_msvc():
libs.extend([
'advapi32.lib',
'dbghelp.lib',
'kernel32.lib',
'ole32.lib',
'shell32.lib',
'user32.lib',
'userenv.lib',
'version.lib',
'winmm.lib',
'ws2_32.lib',
'Shlwapi.lib',
])
else:
libs.extend([
'-ladvapi32',
'-ldbghelp',
'-lkernel32',
'-lole32',
'-lshell32',
'-luser32',
'-luserenv',
'-lversion',
'-lwinmm',
'-lws2_32',
'-lshlwapi',
])
libs.extend(options.link_libs)
# we just build static libraries that GN needs
executables['gn']['libs'].extend(static_libraries.keys())
executables['gn_unittests']['libs'].extend(static_libraries.keys())
WriteGenericNinja(path, static_libraries, executables, cxx, ar, ld,
platform, host, options, cflags, ldflags,
libflags, include_dirs, libs)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 34.903465 | 125 | 0.601553 |
c40b1628442c808f732ee5fe88066dd31f9bb0f4 | 13,501 | py | Python | duviz.py | hugovk/duviz | fc3af406ce697243781606c36df3743490733104 | [
"MIT"
] | null | null | null | duviz.py | hugovk/duviz | fc3af406ce697243781606c36df3743490733104 | [
"MIT"
] | null | null | null | duviz.py | hugovk/duviz | fc3af406ce697243781606c36df3743490733104 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
"""
Command line tool for visualization of the disk space usage of a directory
and its subdirectories.
Copyright: 2009-2019 Stefaan Lippens
License: MIT
Website: http://soxofaan.github.io/duviz/
"""
import os
import re
import subprocess
import sys
import time
import unicodedata
# TODO: catch absence/failure of du/ls subprocesses
# TODO: how to handle unreadable subdirs in du/ls?
# TODO: option to sort alphabetically (instead of on size)
def terminal_size():
"""
Best effort guess of terminal size.
@return (height, width)
"""
try:
# Try to get size from ioctl system call (Unix only).
import struct, fcntl, termios
# Dummy string, determining answer buffer size
# (for struct of two unsigend short ints) for ioctl call.
dummy_string = struct.pack('HH', 0, 0)
# File descriptor of standard output.
file_descriptor = sys.stdout.fileno()
# The ioctl call to get terminal size.
answer = fcntl.ioctl(file_descriptor, termios.TIOCGWINSZ, dummy_string)
# Unpack answer to height and width values.
height, width = struct.unpack('HH', answer)
except (ImportError, IOError):
try:
# Try to get size from environment variables.
height, width = int(os.environ['LINES']), int(os.environ['COLUMNS'])
except KeyError:
# No info found: just use some sensible defaults.
height, width = (25, 80)
return height, width
def bar(width, label, fill='-', left='[', right=']', one='|'):
"""
Helper function to render bar strings of certain width with a label.
@param width the desired total width
@param label the label to be rendered (will be clipped if too long).
@param fill the fill character to fill empty space
@param left the symbol to use at the left of the bar
@param right the symbol to use at the right of the bar
@param one the character to use when the bar should be only one character wide
@return rendered string
"""
if width >= 2:
label_width = width - len(left) - len(right)
# Normalize unicode so that unicode code point count corresponds to character count as much as possible
# (note the u'%s' trick to convert to unicode in python 2/3 compatible way)
label = unicodedata.normalize('NFC', u'%s' % label)
b = left + label[:label_width].center(label_width, fill) + right
elif width == 1:
b = one
else:
b = u''
return b
def _human_readable_size(size, base, formats):
"""Helper function to render counts and sizes in a easily readable format."""
for f in formats[:-1]:
if round(size, 2) < base:
return f % size
size = float(size) / base
return formats[-1] % size
def human_readable_byte_size(size, binary=False):
"""Return byte size as 11B, 12.34KB or 345.24MB (or binary: 12.34KiB, 345.24MiB)."""
if binary:
return _human_readable_size(size, 1024, ['%dB', '%.2fKiB', '%.2fMiB', '%.2fGiB', '%.2fTiB'])
else:
return _human_readable_size(size, 1000, ['%dB', '%.2fKB', '%.2fMB', '%.2fGB', '%.2fTB'])
def human_readable_count(count):
"""Return inode count as 11, 12.34k or 345.24M."""
return _human_readable_size(count, 1000, ['%d', '%.2fk', '%.2fM', '%.2fG', '%.2fT'])
def path_split(path, base=''):
"""
Split a file system path in a list of path components (as a recursive os.path.split()),
optionally only up to a given base path.
"""
if base.endswith(os.path.sep):
base = base.rstrip(os.path.sep)
items = []
while True:
if path == base:
items.insert(0, path)
break
path, tail = os.path.split(path)
if tail != '':
items.insert(0, tail)
if path == '':
break
if path == '/':
items.insert(0, path)
break
return items
class DirectoryTreeNode(object):
"""
Recursive data structure corresponding with node in a directory tree.
Holds the name of the node, its size (including subdirectories) and the subdirectories.
"""
def __init__(self, path):
# Name of the node. For root node: path up to root node as given, for subnodes: just the folder name
self.name = path
# Total size of node.
# By default this is assumed to be total node size, inclusive sub nodes,
# otherwise recalculate_own_sizes_to_total_sizes() should be called.
self.size = None
# Dictionary of subnodes
self._subnodes = {}
def import_path(self, path, size):
"""
Import directory tree data
@param path: Path object list of path directory components.
@param size: total size of the path in bytes.
"""
# Get relative path
path = path_split(path, base=self.name)[1:]
# Walk down path and create subnodes if required.
cursor = self
for component in path:
if component not in cursor._subnodes:
cursor._subnodes[component] = DirectoryTreeNode(component)
cursor = cursor._subnodes[component]
# Set size at cursor
assert cursor.size is None
cursor.size = size
def recalculate_own_sizes_to_total_sizes(self):
"""
If provided sizes were own sizes instead of total node sizes.
@return (recalculated) total size of node
"""
self.size = self.size + sum([n.recalculate_own_sizes_to_total_sizes() for n in self._subnodes.values()])
return self.size
def __lt__(self, other):
# We only implement rich comparison method __lt__ so make sorting work.
return (self.size, self.name) < (other.size, other.name)
def __repr__(self):
return '[%s(%d):%s]' % (self.name, self.size, repr(self._subnodes))
def block_display(self, width, max_depth=5, top=True, size_renderer=human_readable_byte_size):
if width < 1 or max_depth < 0:
return ''
lines = []
if top:
lines.append('_' * width)
# Display of current dir.
lines.append(bar(width, self.name, fill=' '))
lines.append(bar(width, size_renderer(self.size), fill='_'))
# Display of subdirectories.
subdirs = sorted(self._subnodes.values(), reverse=True)
if len(subdirs) > 0:
# Generate block display.
subdir_blocks = []
cumsize = 0
currpos = 0
lastpos = 0
for sd in subdirs:
cumsize += sd.size
currpos = int(float(width * cumsize) / self.size)
subdir_blocks.append(sd.block_display(
currpos - lastpos, max_depth - 1, top=False, size_renderer=size_renderer
).split('\n'))
lastpos = currpos
# Assemble blocks.
height = max([len(lns) for lns in subdir_blocks])
for i in range(height):
line = ''
for sdb in subdir_blocks:
if i < len(sdb):
line += sdb[i]
elif len(sdb) > 0:
line += ' ' * len(sdb[0])
lines.append(line.ljust(width))
return '\n'.join(lines)
class SubprocessException(Exception):
pass
def build_du_tree(directory, progress=None, one_filesystem=False, dereference=False):
"""
Build a tree of DirectoryTreeNodes, starting at the given directory.
"""
# Measure size in 1024 byte blocks. The GNU-du option -b enables counting
# in bytes directly, but it is not available in BSD-du.
duargs = ['-k']
# Handling of symbolic links.
if one_filesystem:
duargs.append('-x')
if dereference:
duargs.append('-L')
try:
du_pipe = subprocess.Popen(['du'] + duargs + [directory], stdout=subprocess.PIPE)
except OSError:
raise SubprocessException('Failed to launch "du" utility subprocess. Is it installed and in your PATH?')
dir_tree = _build_du_tree(directory, du_pipe.stdout, progress=progress)
du_pipe.stdout.close()
return dir_tree
def _build_du_tree(directory, du_pipe, progress=None):
"""
Helper function
"""
du_rep = re.compile(r'([0-9]*)\s*(.*)')
dir_tree = DirectoryTreeNode(directory)
for line in du_pipe:
mo = du_rep.match(line.decode('utf-8'))
# Size in bytes.
size = int(mo.group(1)) * 1024
path = mo.group(2)
if progress:
progress('scanning %s' % path)
dir_tree.import_path(path, size)
if progress:
progress('')
return dir_tree
def build_inode_count_tree(directory, progress=None):
"""
Build tree of DirectoryTreeNodes withinode counts.
"""
try:
process = subprocess.Popen(['ls', '-aiR'] + [directory], stdout=subprocess.PIPE)
except OSError:
raise SubprocessException('Failed to launch "ls" subprocess.')
tree = _build_inode_count_tree(directory, process.stdout, progress=progress)
process.stdout.close()
return tree
def _build_inode_count_tree(directory, ls_pipe, progress=None):
tree = DirectoryTreeNode(directory)
# Path of current directory.
path = directory
count = 0
all_inodes = set()
# Process data per directory block (separated by two newlines)
blocks = ls_pipe.read().decode('utf-8').rstrip('\n').split('\n\n')
for i, dir_ls in enumerate(blocks):
items = dir_ls.split('\n')
# Get current path in directory tree
if i == 0 and not items[0].endswith(':'):
# BSD compatibility: in first block the root directory can be omitted
path = directory
else:
path = items.pop(0).rstrip(':')
if progress:
progress('scanning %s' % path)
# Collect inodes for current directory
count = 0
for item in items:
inode, name = item.lstrip().split(' ', 1)
# Skip parent entry
if name == '..':
continue
# Get and process inode
inode = int(inode)
if inode not in all_inodes:
count += 1
all_inodes.add(inode)
# Store count.
tree.import_path(path, count)
# Clear feedback output.
if progress:
progress('')
tree.recalculate_own_sizes_to_total_sizes()
return tree
def get_progress_callback(stream=sys.stdout, interval=.2, terminal_width=80):
class State:
"""Python 2 compatible hack to have 'nonlocal' scoped state."""
threshold = 0
def progress(s):
now = time.time()
if now > State.threshold:
stream.write(s.ljust(terminal_width)[:terminal_width] + '\r')
State.threshold = now + interval
return progress
def main():
terminal_width = terminal_size()[1]
# Handle commandline interface.
import optparse
cliparser = optparse.OptionParser(
"""usage: %prog [options] [DIRS]
%prog gives a graphic representation of the disk space
usage of the folder trees under DIRS.""",
version='%prog 1.0')
cliparser.add_option(
'-w', '--width',
action='store', type='int', dest='display_width', default=terminal_width,
help='total width of all bars', metavar='WIDTH'
)
cliparser.add_option(
'-x', '--one-file-system',
action='store_true', dest='onefilesystem', default=False,
help='skip directories on different filesystems'
)
cliparser.add_option(
'-L', '--dereference',
action='store_true', dest='dereference', default=False,
help='dereference all symbolic links'
)
cliparser.add_option(
'--max-depth',
action='store', type='int', dest='max_depth', default=5,
help='maximum recursion depth', metavar='N'
)
cliparser.add_option(
'-i', '--inodes',
action='store_true', dest='inode_count', default=False,
help='count inodes instead of file size'
)
cliparser.add_option(
'--no-progress',
action='store_false', dest='show_progress', default=True,
help='disable progress reporting'
)
(opts, args) = cliparser.parse_args()
# Make sure we have a valid list of paths
if len(args) > 0:
paths = []
for path in args:
if os.path.exists(path):
paths.append(path)
else:
sys.stderr.write('Warning: not a valid path: "%s"\n' % path)
else:
# Do current dir if no dirs are given.
paths = ['.']
if opts.show_progress:
feedback = get_progress_callback(stream=sys.stdout, terminal_width=opts.display_width)
else:
feedback = None
if opts.inode_count:
for directory in paths:
tree = build_inode_count_tree(directory, progress=feedback)
print(tree.block_display(opts.display_width, max_depth=opts.max_depth, size_renderer=human_readable_count))
else:
for directory in paths:
tree = build_du_tree(directory, progress=feedback, one_filesystem=opts.onefilesystem,
dereference=opts.dereference)
print(tree.block_display(opts.display_width, max_depth=opts.max_depth))
if __name__ == '__main__':
main()
| 32.221957 | 119 | 0.606177 |
f954b5ef634a42e17d642cab87ede69a665f860b | 1,420 | py | Python | garnet/lib/magma/scripts/diff_json.py | opensource-assist/fuschia | 66646c55b3d0b36aae90a4b6706b87f1a6261935 | [
"BSD-3-Clause"
] | 10 | 2020-12-28T17:04:44.000Z | 2022-03-12T03:20:43.000Z | garnet/lib/magma/scripts/diff_json.py | opensource-assist/fuschia | 66646c55b3d0b36aae90a4b6706b87f1a6261935 | [
"BSD-3-Clause"
] | 1 | 2022-01-14T23:38:40.000Z | 2022-01-14T23:38:40.000Z | garnet/lib/magma/scripts/diff_json.py | opensource-assist/fuschia | 66646c55b3d0b36aae90a4b6706b87f1a6261935 | [
"BSD-3-Clause"
] | 4 | 2020-12-28T17:04:45.000Z | 2022-03-12T03:20:44.000Z | #!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import difflib
import json
import sys
def usage():
print 'Usage:'
print ' diff_json.py FILE1 FILE2 OUTPUT'
print ' Generates canonical json text for FILE1 and FILE2, then does a normal text comparison,'
print ' writing the diff output to OUTPUT.'
def main():
if (len(sys.argv) != 4):
usage()
exit(-1)
try:
with open(sys.argv[1], 'r') as file1:
with open(sys.argv[2], 'r') as file2:
with open(sys.argv[3], 'w') as result:
json1 = json.load(file1)
json2 = json.load(file2)
canon1 = json.dumps(json1, sort_keys=True, indent=2).splitlines()
canon2 = json.dumps(json2, sort_keys=True, indent=2).splitlines()
diff = difflib.unified_diff(canon1, canon2, sys.argv[1], sys.argv[2], lineterm="")
diffstr = '\n'.join(diff)
result.write(diffstr)
if (len(diffstr) != 0):
print 'Error: non-empty diff between canonical json representations:'
print(diffstr)
exit(-4)
except IOError as e:
print 'Error accessing files: ' + str(e)
usage()
exit(-2)
except ValueError as e:
print 'Error decoding json: ' + str(e)
exit(-3)
if __name__ == '__main__':
sys.exit(main())
| 31.555556 | 98 | 0.624648 |
2e467c96550c26314a8eea94f08741a884138210 | 7,465 | py | Python | desktop/libs/indexer/src/indexer/indexers/envelope.py | HSunboy/hue | caccd8c058eabb8f5899006a6566be46e3af871b | [
"Apache-2.0"
] | null | null | null | desktop/libs/indexer/src/indexer/indexers/envelope.py | HSunboy/hue | caccd8c058eabb8f5899006a6566be46e3af871b | [
"Apache-2.0"
] | null | null | null | desktop/libs/indexer/src/indexer/indexers/envelope.py | HSunboy/hue | caccd8c058eabb8f5899006a6566be46e3af871b | [
"Apache-2.0"
] | 1 | 2020-07-25T12:44:18.000Z | 2020-07-25T12:44:18.000Z | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.import logging
import logging
import os
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from desktop.conf import DISABLE_HUE_3
from hadoop.fs.hadoopfs import Hdfs
from notebook.models import make_notebook
from indexer.conf import CONFIG_JARS_LIBS_PATH
LOG = logging.getLogger(__name__)
class EnvelopeIndexer(object):
def __init__(self, username, fs=None, jt=None, solr_client=None):
self.fs = fs
self.jt = jt
self.username = username
def _upload_workspace(self, envelope):
from oozie.models2 import Job
hdfs_workspace_path = Job.get_workspace(self.username)
hdfs_envelope_path = os.path.join(hdfs_workspace_path, "envelope.conf")
# Create workspace on hdfs
self.fs.do_as_user(self.username, self.fs.mkdir, hdfs_workspace_path)
self.fs.do_as_user(self.username, self.fs.create, hdfs_envelope_path, data=envelope)
return hdfs_workspace_path
def run(self, request, collection_name, envelope, input_path, start_time=None, lib_path=None):
workspace_path = self._upload_workspace(envelope)
if lib_path is None:
lib_path = CONFIG_JARS_LIBS_PATH.get()
task = make_notebook(
name=_('Indexing into %s') % collection_name,
editor_type='notebook',
#on_success_url=reverse('search:browse', kwargs={'name': collection_name}),
#pub_sub_url='assist.collections.refresh',
is_task=True,
is_notebook=True,
last_executed=start_time
)
if not DISABLE_HUE_3.config.default_value or True: # CDH5
shell_command_name = "pipeline.sh"
shell_command = """#!/bin/bash
export SPARK_DIST_CLASSPATH=`hadoop classpath`
export SPARK_DIST_CLASSPATH=/etc/hive/conf:`hadoop classpath`
export JAVA_HOME=/usr/java/jdk1.8.0_162
SPARK_KAFKA_VERSION=0.10 spark2-submit envelope.jar envelope.conf"""
hdfs_shell_cmd_path = os.path.join(workspace_path, shell_command_name)
self.fs.do_as_user(self.username, self.fs.create, hdfs_shell_cmd_path, data=shell_command)
task.add_shell_snippet(
shell_command=shell_command_name,
files=[
{u'value': u'%s/envelope.conf' % workspace_path},
{u'value': hdfs_shell_cmd_path},
{u'value': lib_path}
]
)
else:
task.add_spark_snippet(
clazz='com.cloudera.labs.envelope.EnvelopeMain',
jars=Hdfs.basename(lib_path),
arguments=[
u'envelope.conf'
],
files=[
{u'path': u'%s/envelope.conf' % workspace_path, u'type': u'file'},
{u'path': lib_path, u'type': u'file'},
]
)
return task.execute(request, batch=True)
def generate_config(self, properties):
if properties['inputFormat'] == 'stream':
if properties['streamSelection'] == 'kafka':
input = """type = kafka
brokers = "%(brokers)s"
topics = [%(topics)s]
encoding = string
translator {
type = %(kafkaFieldType)s
delimiter = "%(kafkaFieldDelimiter)s"
field.names = [%(kafkaFieldNames)s]
field.types = [%(kafkaFieldTypes)s]
}
%(window)s
""" % properties
elif properties['streamSelection'] == 'sfdc':
input = """type = sfdc
mode = fetch-all
sobject = %(streamObject)s
sfdc: {
partner: {
username = "%(streamUsername)s"
password = "%(streamPassword)s"
token = "%(streamToken)s"
auth-endpoint = "%(streamEndpointUrl)s"
}
}
""" % properties
else:
raise PopupException(_('Stream format of %(inputFormat)s not recognized: %(streamSelection)s') % properties)
elif properties['inputFormat'] == 'file':
input = """type = filesystem
path = %(input_path)s
format = %(format)s
""" % properties
else:
raise PopupException(_('Input format not recognized: %(inputFormat)s') % properties)
if properties['ouputFormat'] == 'file':
output = """
planner = {
type = overwrite
}
output = {
type = filesystem
path = %(path)s
format = %(format)s
header = true
}""" % properties
elif properties['ouputFormat'] == 'table':
if properties['inputFormat'] == 'stream' and properties['streamSelection'] == 'kafka':
output = """
deriver {
type = sql
query.literal = \"""
SELECT * FROM inputdata\"""
}
planner {
type = upsert
}
output {
type = kudu
connection = "%(kudu_master)s"
table.name = "%(output_table)s"
}""" % properties
else:
output = """
planner {
type = append
}
output {
type = hive
table.name = "%(output_table)s"
}""" % properties
elif properties['ouputFormat'] == 'index':
if properties['inputFormat'] == 'stream':
if properties['topics'] == 'NavigatorAuditEvents':
output = ''
else:
output = """
planner {
type = upstert
}
output {
type = solr
connection = "%(connection)s"
collection.name = "%(collectionName)s"
}""" % properties
elif properties['ouputFormat'] == 'stream':
output = """
planner {
type = append
}
output {
type = kafka
brokers = "%(brokers)s"
topic = %(topics)s
serializer.type = delimited
serializer.field.delimiter = ","
}""" % properties
else:
raise PopupException(_('Output format not recognized: %(ouputFormat)s') % properties)
return """
application {
name = %(app_name)s
%(batch)s
executors = 1
executor.cores = 1
executor.memory = 1G
}
steps {
inputdata {
input {
%(input)s
}
}
outputdata {
dependencies = [inputdata]
deriver {
type = sql
query.literal = \"\"\"SELECT * from inputdata\"\"\"
}
%(output)s
}
}
""" % {
'input': input,
'output': output,
'app_name': properties['app_name'],
'batch': 'batch.milliseconds = 5000' if properties['inputFormat'] == 'stream' else ''
}
| 30.469388 | 116 | 0.590221 |
f36f5eda4666df238112342db5a90b46131d5840 | 166 | py | Python | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_DayOfWeek_AR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_DayOfWeek_AR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_DayOfWeek_AR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['PolyTrend'] , ['Seasonal_DayOfWeek'] , ['AR'] ); | 41.5 | 88 | 0.759036 |
b8d99adcfabad3dced8a2c9fafe6d3b3fb001e4f | 7,008 | py | Python | sewer/dns_providers/cloudflare.py | g-rich/sewer | ef1af91183bea65d6ebb3a39089249e7c4054b0a | [
"MIT"
] | null | null | null | sewer/dns_providers/cloudflare.py | g-rich/sewer | ef1af91183bea65d6ebb3a39089249e7c4054b0a | [
"MIT"
] | null | null | null | sewer/dns_providers/cloudflare.py | g-rich/sewer | ef1af91183bea65d6ebb3a39089249e7c4054b0a | [
"MIT"
] | null | null | null | import urllib.parse
import requests
import tldextract
from . import common
class CloudFlareDns(common.BaseDns):
"""
"""
dns_provider_name = "cloudflare"
def __init__(
self,
CLOUDFLARE_EMAIL,
CLOUDFLARE_API_KEY,
CLOUDFLARE_API_BASE_URL="https://api.cloudflare.com/client/v4/",
):
self.CLOUDFLARE_DNS_ZONE_ID = None
self.CLOUDFLARE_EMAIL = CLOUDFLARE_EMAIL
self.CLOUDFLARE_API_KEY = CLOUDFLARE_API_KEY
self.CLOUDFLARE_API_BASE_URL = CLOUDFLARE_API_BASE_URL
self.HTTP_TIMEOUT = 65 # seconds
if CLOUDFLARE_API_BASE_URL[-1] != "/":
self.CLOUDFLARE_API_BASE_URL = CLOUDFLARE_API_BASE_URL + "/"
else:
self.CLOUDFLARE_API_BASE_URL = CLOUDFLARE_API_BASE_URL
super(CloudFlareDns, self).__init__()
def find_dns_zone(self, domain_name):
self.logger.debug("find_dns_zone")
parsed_domain_name = tldextract.extract(domain_name)
if (parsed_domain_name.domain and parsed_domain_name.suffix):
dns_name = "{sld}.{tld}".format(sld=parsed_domain_name.domain,
tld=parsed_domain_name.suffix)
else:
raise ValueError(
"Error parsing domain name: {domain_name}, {parsed_domain_name}".format(
domain_name=domain_name,
parsed_domain_name=parsed_domain_name
)
)
url = urllib.parse.urljoin(self.CLOUDFLARE_API_BASE_URL, "zones?status=active&name={}".format(dns_name))
headers = {"X-Auth-Email": self.CLOUDFLARE_EMAIL, "X-Auth-Key": self.CLOUDFLARE_API_KEY}
find_dns_zone_response = requests.get(url, headers=headers, timeout=self.HTTP_TIMEOUT)
self.logger.debug(
"find_dns_zone_response. status_code={0}".format(find_dns_zone_response.status_code)
)
if find_dns_zone_response.status_code != 200:
raise ValueError(
"Error creating cloudflare dns record: status_code={status_code} response={response}".format(
status_code=find_dns_zone_response.status_code,
response=self.log_response(find_dns_zone_response),
)
)
result = find_dns_zone_response.json()["result"]
for i in result:
if i["name"] in domain_name:
setattr(self, "CLOUDFLARE_DNS_ZONE_ID", i["id"])
if isinstance(self.CLOUDFLARE_DNS_ZONE_ID, type(None)):
raise ValueError(
"Error unable to get DNS zone for domain_name={domain_name}: status_code={status_code} response={response}".format(
domain_name=domain_name,
status_code=find_dns_zone_response.status_code,
response=self.log_response(find_dns_zone_response),
)
)
self.logger.debug("find_dns_zone_success")
def create_dns_record(self, domain_name, domain_dns_value):
self.logger.info("create_dns_record")
# if we have been given a wildcard name, strip wildcard
domain_name = domain_name.lstrip("*.")
self.find_dns_zone(domain_name)
url = urllib.parse.urljoin(
self.CLOUDFLARE_API_BASE_URL,
"zones/{0}/dns_records".format(self.CLOUDFLARE_DNS_ZONE_ID),
)
headers = {"X-Auth-Email": self.CLOUDFLARE_EMAIL, "X-Auth-Key": self.CLOUDFLARE_API_KEY}
body = {
"type": "TXT",
"name": "_acme-challenge" + "." + domain_name + ".",
"content": "{0}".format(domain_dns_value),
}
create_cloudflare_dns_record_response = requests.post(
url, headers=headers, json=body, timeout=self.HTTP_TIMEOUT
)
self.logger.debug(
"create_cloudflare_dns_record_response. status_code={0}. response={1}".format(
create_cloudflare_dns_record_response.status_code,
self.log_response(create_cloudflare_dns_record_response),
)
)
if create_cloudflare_dns_record_response.status_code != 200:
# raise error so that we do not continue to make calls to ACME
# server
raise ValueError(
"Error creating cloudflare dns record: status_code={status_code} response={response}".format(
status_code=create_cloudflare_dns_record_response.status_code,
response=self.log_response(create_cloudflare_dns_record_response),
)
)
self.logger.info("create_dns_record_end")
def delete_dns_record(self, domain_name, domain_dns_value):
self.logger.info("delete_dns_record")
class MockResponse(object):
def __init__(self, status_code=200, content="mock-response"):
self.status_code = status_code
self.content = content
super(MockResponse, self).__init__()
def json(self):
return {}
delete_dns_record_response = MockResponse()
headers = {"X-Auth-Email": self.CLOUDFLARE_EMAIL, "X-Auth-Key": self.CLOUDFLARE_API_KEY}
dns_name = "_acme-challenge" + "." + domain_name
list_dns_payload = {"type": "TXT", "name": dns_name}
list_dns_url = urllib.parse.urljoin(
self.CLOUDFLARE_API_BASE_URL,
"zones/{0}/dns_records".format(self.CLOUDFLARE_DNS_ZONE_ID),
)
list_dns_response = requests.get(
list_dns_url, params=list_dns_payload, headers=headers, timeout=self.HTTP_TIMEOUT
)
for i in range(0, len(list_dns_response.json()["result"])):
dns_record_id = list_dns_response.json()["result"][i]["id"]
url = urllib.parse.urljoin(
self.CLOUDFLARE_API_BASE_URL,
"zones/{0}/dns_records/{1}".format(self.CLOUDFLARE_DNS_ZONE_ID, dns_record_id),
)
headers = {"X-Auth-Email": self.CLOUDFLARE_EMAIL, "X-Auth-Key": self.CLOUDFLARE_API_KEY}
delete_dns_record_response = requests.delete(
url, headers=headers, timeout=self.HTTP_TIMEOUT
)
self.logger.debug(
"delete_dns_record_response. status_code={0}. response={1}".format(
delete_dns_record_response.status_code,
self.log_response(delete_dns_record_response),
)
)
if delete_dns_record_response.status_code != 200:
# extended logging for debugging
# we do not need to raise exception
self.logger.error(
"delete_dns_record_response. status_code={0}. response={1}".format(
delete_dns_record_response.status_code,
self.log_response(delete_dns_record_response),
)
)
self.logger.info("delete_dns_record_success")
| 41.714286 | 131 | 0.614155 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.