hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0ae7ece3c69418050369e97c97c69c668d4452
| 3,751
|
py
|
Python
|
venv/Lib/site-packages/fsspec/implementations/jupyter.py
|
ZhangQingsen/CISC849Proj
|
ae89693648ead79d97805d663c1db58dfc0786a0
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/fsspec/implementations/jupyter.py
|
ZhangQingsen/CISC849Proj
|
ae89693648ead79d97805d663c1db58dfc0786a0
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/fsspec/implementations/jupyter.py
|
ZhangQingsen/CISC849Proj
|
ae89693648ead79d97805d663c1db58dfc0786a0
|
[
"MIT"
] | null | null | null |
import base64
import io
import fsspec
import re
import requests
class JupyterFileSystem(fsspec.AbstractFileSystem):
"""View of the files as seen by a Jupyter server (notebook or lab)"""
protocol = ("jupyter", "jlab")
def __init__(self, url, tok=None, **kwargs):
"""
Parameters
----------
url : str
Base URL of the server, like "http://127.0.0.1:8888". May include
token in the string, which is given by the process when starting up
tok : str
If the token is obtained separately, can be given here
kwargs
"""
if "?" in url:
if tok is None:
try:
tok = re.findall("token=([a-f0-9]+)", url)[0]
except IndexError as e:
raise ValueError("Could not determine token") from e
url = url.split("?", 1)[0]
self.url = url.rstrip("/") + "/api/contents"
self.session = requests.Session()
if tok:
self.session.headers["Authorization"] = f"token {tok}"
super().__init__(**kwargs)
def ls(self, path, detail=True, **kwargs):
path = self._strip_protocol(path)
r = self.session.get(self.url + "/" + path)
if r.status_code == 404:
return FileNotFoundError(path)
r.raise_for_status()
out = r.json()
if out["type"] == "directory":
out = out["content"]
else:
out = [out]
for o in out:
o["name"] = o.pop("path")
o.pop("content")
if o["type"] == "notebook":
o["type"] = "file"
if detail:
return out
return [o["name"] for o in out]
def cat_file(self, path):
path = self._strip_protocol(path)
r = self.session.get(self.url + "/" + path)
if r.status_code == 404:
return FileNotFoundError(path)
r.raise_for_status()
out = r.json()
if out["format"] == "text":
# data should be binary
return out["content"].encode()
else:
return base64.b64decode(out["content"])
def pipe_file(self, path, value, **_):
path = self._strip_protocol(path)
json = {
"name": path.rsplit("/", 1)[-1],
"path": path,
"size": len(value),
"content": base64.b64encode(value),
"format": "base64",
"type": "file",
}
self.session.put(self.url + "/" + path, json=json)
def mkdir(self, path, create_parents=True, **kwargs):
path = self._strip_protocol(path)
if create_parents and "/" in path:
self.mkdir(path.rsplit("/", 1)[0], True)
json = {
"name": path.rsplit("/", 1)[-1],
"path": path,
"size": None,
"content": None,
"type": "directory",
}
self.session.put(self.url + "/" + path, json=json)
def _rm(self, path):
path = self._strip_protocol(path)
self.session.delete(self.url + "/" + path)
def _open(self, path, mode="rb", **kwargs):
path = self._strip_protocol(path)
if mode == "rb":
data = self.cat_file(path)
return io.BytesIO(data)
else:
return SimpleFileWriter(self, path, mode="wb")
class SimpleFileWriter(fsspec.spec.AbstractBufferedFile):
def _upload_chunk(self, final=False):
"""Never uploads a chunk until file is done
Not suitable for large files
"""
if final is False:
return False
self.buffer.seek(0)
data = self.buffer.read()
self.fs.pipe_file(self.path, data)
| 30.745902
| 79
| 0.517729
|
4a0ae8494d6b3840a8df85ed1119a46faa5c24f2
| 1,475
|
py
|
Python
|
progressivis/table/loc.py
|
jdfekete/progressivis
|
3bc79ce229cd628ef0aa4663136a674743697b47
|
[
"BSD-2-Clause"
] | 51
|
2015-09-14T16:31:02.000Z
|
2022-01-12T17:56:53.000Z
|
progressivis/table/loc.py
|
jdfekete/progressivis
|
3bc79ce229cd628ef0aa4663136a674743697b47
|
[
"BSD-2-Clause"
] | 10
|
2017-11-15T15:10:05.000Z
|
2022-01-19T07:36:43.000Z
|
progressivis/table/loc.py
|
jdfekete/progressivis
|
3bc79ce229cd628ef0aa4663136a674743697b47
|
[
"BSD-2-Clause"
] | 5
|
2017-11-14T20:20:56.000Z
|
2020-01-22T06:26:51.000Z
|
import numpy as np
from collections import Iterable
from progressivis.core.utils import integer_types
from progressivis.core.bitmap import bitmap
class Loc(object):
ITERABLE = 8
INT = 1
LIST = 2 | ITERABLE
SLICE = 3
NDARRAY = 4 | ITERABLE
NDBOOLARRAY = 6
BITMAP = 5 | ITERABLE
@staticmethod
def isiterable(l):
return (l&Loc.ITERABLE)!=0
@staticmethod
def dispatch(locs):
if isinstance(locs, integer_types):
return Loc.INT
elif isinstance(locs, slice):
return Loc.SLICE
elif isinstance(locs, np.ndarray):
if locs.dtype==np.int32 or locs.dtype==np.int64:
return Loc.NDARRAY
elif locs.dtype==np.bool:
return Loc.NDBOOLARRAY
elif isinstance(locs, bitmap):
return Loc.BITMAP
elif isinstance(locs, Iterable):
return Loc.ITERABLE
raise ValueError('Unhandled type for %s', locs)
@staticmethod
def to_iterable(locs, size):
l = Loc.dispatch(locs)
if Loc.isiterable(l):
return l
elif l == Loc.INT:
return [locs]
elif l == Loc.SLICE:
return range(*locs.index(size))
elif l == Loc.NDARRAY:
return locs
elif l == Loc.NDBOOLARRAY:
return np.where(locs)[0]
raise ValueError('Cannot convert %s into an iterable', locs)
| 27.830189
| 68
| 0.570169
|
4a0ae8bf1dfbaceaa22a0f549451a8d084f3d513
| 6,288
|
py
|
Python
|
ADVENT/advent/domain_adaptation/eval_UDA.py
|
BinBrent/IntraDA
|
8acba3671462808cc831bc9da842513b9f742540
|
[
"MIT"
] | 261
|
2020-04-16T15:34:10.000Z
|
2022-03-30T08:56:31.000Z
|
ADVENT/advent/domain_adaptation/eval_UDA.py
|
I-Hope-Peace/IntraDA
|
65a118754b063285f2d93cc66e15b3bb4166328d
|
[
"MIT"
] | 14
|
2020-05-14T09:03:58.000Z
|
2021-11-02T05:53:47.000Z
|
ADVENT/advent/domain_adaptation/eval_UDA.py
|
I-Hope-Peace/IntraDA
|
65a118754b063285f2d93cc66e15b3bb4166328d
|
[
"MIT"
] | 32
|
2020-04-16T15:35:02.000Z
|
2021-05-27T07:18:05.000Z
|
# --------------------------------------------------------
# Domain adpatation evaluation
# Copyright (c) 2019 valeo.ai
#
# Written by Tuan-Hung Vu
# --------------------------------------------------------
import os.path as osp
import time
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from advent.utils.func import per_class_iu, fast_hist
from advent.utils.serialization import pickle_dump, pickle_load
def evaluate_domain_adaptation( models, test_loader, cfg,
fixed_test_size=True,
verbose=True):
device = cfg.GPU_ID
interp = None
if fixed_test_size:
interp = nn.Upsample(size=(cfg.TEST.OUTPUT_SIZE_TARGET[1], cfg.TEST.OUTPUT_SIZE_TARGET[0]), mode='bilinear', align_corners=True)
# eval
if cfg.TEST.MODE == 'single':
eval_single(cfg, models,
device, test_loader, interp, fixed_test_size,
verbose)
elif cfg.TEST.MODE == 'best':
eval_best(cfg, models,
device, test_loader, interp, fixed_test_size,
verbose)
else:
raise NotImplementedError(f"Not yet supported test mode {cfg.TEST.MODE}")
def eval_single(cfg, models,
device, test_loader, interp,
fixed_test_size, verbose):
assert len(cfg.TEST.RESTORE_FROM) == len(models), 'Number of models are not matched'
for checkpoint, model in zip(cfg.TEST.RESTORE_FROM, models):
load_checkpoint_for_evaluation(model, checkpoint, device)
# eval
hist = np.zeros((cfg.NUM_CLASSES, cfg.NUM_CLASSES))
for index, batch in tqdm(enumerate(test_loader)):
image, label, _, name = batch
if not fixed_test_size:
interp = nn.Upsample(size=(label.shape[1], label.shape[2]), mode='bilinear', align_corners=True)
with torch.no_grad():
output = None
for model, model_weight in zip(models, cfg.TEST.MODEL_WEIGHT):
pred_main = model(image.cuda(device))[1]
output_ = interp(pred_main).cpu().data[0].numpy()
if output is None:
output = model_weight * output_
else:
output += model_weight * output_
assert output is not None, 'Output is None'
output = output.transpose(1, 2, 0)
output = np.argmax(output, axis=2)
label = label.numpy()[0]
hist += fast_hist(label.flatten(), output.flatten(), cfg.NUM_CLASSES)
inters_over_union_classes = per_class_iu(hist)
print(f'mIoU = \t{round(np.nanmean(inters_over_union_classes) * 100, 2)}')
if verbose:
display_stats(cfg, test_loader.dataset.class_names, inters_over_union_classes)
def eval_best(cfg, models,
device, test_loader, interp,
fixed_test_size, verbose):
assert len(models) == 1, 'Not yet supported multi models in this mode'
assert osp.exists(cfg.TEST.SNAPSHOT_DIR[0]), 'SNAPSHOT_DIR is not found'
start_iter = cfg.TEST.SNAPSHOT_STEP
step = cfg.TEST.SNAPSHOT_STEP
max_iter = cfg.TEST.SNAPSHOT_MAXITER
cache_path = osp.join(cfg.TEST.SNAPSHOT_DIR[0], 'all_res.pkl')
if osp.exists(cache_path):
all_res = pickle_load(cache_path)
else:
all_res = {}
cur_best_miou = -1
cur_best_model = ''
for i_iter in range(start_iter, max_iter + 1, step):
restore_from = osp.join(cfg.TEST.SNAPSHOT_DIR[0], f'model_{i_iter}.pth')
if not osp.exists(restore_from):
# continue
if cfg.TEST.WAIT_MODEL:
print('Waiting for model..!')
while not osp.exists(restore_from):
time.sleep(5)
print("Evaluating model", restore_from)
if i_iter not in all_res.keys():
load_checkpoint_for_evaluation(models[0], restore_from, device)
# eval
hist = np.zeros((cfg.NUM_CLASSES, cfg.NUM_CLASSES))
# for index, batch in enumerate(test_loader):
# image, _, _, name = batch
test_iter = iter(test_loader)
for index in tqdm(range(len(test_loader))):
image, label, _, name = next(test_iter)
if not fixed_test_size:
interp = nn.Upsample(size=(label.shape[1], label.shape[2]), mode='bilinear', align_corners=True)
with torch.no_grad():
pred_main = models[0](image.cuda(device))[1]
output = interp(pred_main).cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.argmax(output, axis=2)
label = label.numpy()[0]
hist += fast_hist(label.flatten(), output.flatten(), cfg.NUM_CLASSES)
if verbose and index > 0 and index % 100 == 0:
print('{:d} / {:d}: {:0.2f}'.format(
index, len(test_loader), 100 * np.nanmean(per_class_iu(hist))))
inters_over_union_classes = per_class_iu(hist)
all_res[i_iter] = inters_over_union_classes
pickle_dump(all_res, cache_path)
else:
inters_over_union_classes = all_res[i_iter]
computed_miou = round(np.nanmean(inters_over_union_classes) * 100, 2)
if cur_best_miou < computed_miou:
cur_best_miou = computed_miou
cur_best_model = restore_from
print('\tCurrent mIoU:', computed_miou)
print('\tCurrent best model:', cur_best_model)
print('\tCurrent best mIoU:', cur_best_miou)
if verbose:
display_stats(cfg, test_loader.dataset.class_names, inters_over_union_classes)
def load_checkpoint_for_evaluation(model, checkpoint, device):
saved_state_dict = torch.load(checkpoint)
model.load_state_dict(saved_state_dict)
model.eval()
model.cuda(device)
def display_stats(cfg, name_classes, inters_over_union_classes):
for ind_class in range(cfg.NUM_CLASSES):
print(name_classes[ind_class]
+ '\t' + str(round(inters_over_union_classes[ind_class] * 100, 2)))
| 43.666667
| 137
| 0.591126
|
4a0ae9d4c9e2defa4f123d45689bbc8136144c0f
| 81,928
|
py
|
Python
|
tensorflow/lite/python/lite.py
|
EricLi404/tensorflow
|
23759800d89f7b5362c338d9a3fd72a6810c3e22
|
[
"Apache-2.0"
] | 2
|
2020-09-08T15:04:52.000Z
|
2020-09-08T15:04:54.000Z
|
tensorflow/lite/python/lite.py
|
EricLi404/tensorflow
|
23759800d89f7b5362c338d9a3fd72a6810c3e22
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:16:36.000Z
|
2022-02-10T04:51:33.000Z
|
tensorflow/lite/python/lite.py
|
EricLi404/tensorflow
|
23759800d89f7b5362c338d9a3fd72a6810c3e22
|
[
"Apache-2.0"
] | 2
|
2019-03-07T05:54:18.000Z
|
2019-05-16T20:31:25.000Z
|
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite tooling helper functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import shutil
import tempfile
import warnings
from absl import logging
import six
from six import PY2
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.experimental.examples.lstm.rnn import dynamic_rnn # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TFLiteLSTMCell # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TfLiteRNNCell # pylint: disable=unused-import
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op # pylint: disable=unused-import
from tensorflow.lite.experimental.tensorboard.ops_util import get_potentially_supported_ops # pylint: disable=unused-import
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert import ConverterError # pylint: disable=unused-import
from tensorflow.lite.python.convert import mlir_quantize as _mlir_quantize
from tensorflow.lite.python.convert import mlir_sparsify as _mlir_sparsify
from tensorflow.lite.python.convert import OpsSet
from tensorflow.lite.python.convert import toco_convert # pylint: disable=unused-import
from tensorflow.lite.python.convert import toco_convert_graph_def as _toco_convert_graph_def
from tensorflow.lite.python.convert import toco_convert_impl as _toco_convert_impl
from tensorflow.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
from tensorflow.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.lite.python.interpreter import load_delegate # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import is_ophint_converted as _is_ophint_converted
from tensorflow.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.lite.python.optimize import calibrator as _calibrator
from tensorflow.lite.python.util import build_debug_info_func as _build_debug_info_func
from tensorflow.lite.python.util import convert_debug_info_func as _convert_debug_info_func
from tensorflow.lite.python.util import freeze_graph as _freeze_graph
from tensorflow.lite.python.util import get_debug_info as _get_debug_info
from tensorflow.lite.python.util import get_grappler_config as _get_grappler_config
from tensorflow.lite.python.util import get_tensor_name as _get_tensor_name
from tensorflow.lite.python.util import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
from tensorflow.lite.python.util import is_frozen_graph as _is_frozen_graph
from tensorflow.lite.python.util import modify_integer_quantized_model_io_type as _modify_integer_quantized_model_io_type
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tensorflow.lite.python.util import set_tensor_shapes as _set_tensor_shapes
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function as _def_function
from tensorflow.python.eager import function as _function
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.keras.saving import saving_utils as _saving_utils
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.saved_model import loader_impl as _loader_impl
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
from tensorflow.python.saved_model.load import load as _load
from tensorflow.python.saved_model.loader_impl import parse_saved_model_with_debug_info as _parse_saved_model_with_debug_info
from tensorflow.python.util import deprecation as _deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
@_tf_export("lite.Optimize")
class Optimize(enum.Enum):
"""Enum defining the optimizations to apply when generating tflite graphs.
Some optimizations may come at the cost of accuracy.
DEFAULT
Default optimization strategy.
Converter will do its best to improve size and latency based on the
information provided.
Enhanced optimizations are gained by providing a representative_dataset.
This is recommended, and is currently equivalent to the modes below.
Currently, weights will be quantized and if representative_dataset is
provided, activations for quantizable operations will also be quantized.
OPTIMIZE_FOR_SIZE
Deprecated. Does the same as DEFAULT.
OPTIMIZE_FOR_LATENCY
Deprecated. Does the same as DEFAULT.
"""
# Default optimization strategy.
#
# Converter will do its best to improve size and latency based on the
# information provided.
# Enhanced optimizations can be gained by providing a representative_dataset.
# This is recommended, and is currently equivalent to the modes below.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
DEFAULT = "DEFAULT"
# Deprecated. Does the same as DEFAULT.
OPTIMIZE_FOR_SIZE = "OPTIMIZE_FOR_SIZE"
# Deprecated. Does the same as DEFAULT.
OPTIMIZE_FOR_LATENCY = "OPTIMIZE_FOR_LATENCY"
def __str__(self):
return str(self.value)
@_tf_export("lite.RepresentativeDataset")
class RepresentativeDataset(object):
"""Representative dataset to evaluate optimizations.
A representative dataset that can be used to evaluate optimizations by the
converter. E.g. converter can use these examples to estimate (min, max) ranges
by calibrating the model on inputs. This can allow converter to quantize a
converted floating point model.
"""
def __init__(self, input_gen):
"""Creates a representative dataset.
Args:
input_gen: an input generator that can be used to generate input samples
for the model. This must be a callable object that returns an object
that supports the `iter()` protocol (e.g. a generator function). The
elements generated must have same type and shape as inputs to the model.
"""
self.input_gen = input_gen
@_tf_export("lite.TargetSpec")
class TargetSpec(object):
"""Specification of target device.
Details about target device. Converter optimizes the generated model for
specific device.
Attributes:
supported_ops: Experimental flag, subject to change. Set of OpsSet options
supported by the device. (default set([OpsSet.TFLITE_BUILTINS]))
supported_types: List of types for constant values on the target device.
Supported values are types exported by lite.constants. Frequently, an
optimization choice is driven by the most compact (i.e. smallest) type in
this list (default [constants.FLOAT])
"""
def __init__(self, supported_ops=None, supported_types=None):
if supported_ops is None:
supported_ops = set([OpsSet.TFLITE_BUILTINS])
self.supported_ops = supported_ops
if supported_types is None:
supported_types = []
self.supported_types = supported_types
class QuantizationMode(object):
"""QuantizationMode determines the quantized conversion from user options."""
def __init__(self, optimizations, target_spec, representative_dataset,
graph_def):
self._optimizations = optimizations
self._target_spec = target_spec
self._representative_dataset = representative_dataset
self._graph_def = graph_def
self._validate_int8_required()
def post_training_int8_no_float(self):
"""Post training int8 quantize, disallow float fallback."""
return (self._is_int8_target_required() and
not self._is_int16x8_target_required() and
self._representative_dataset is not None)
def post_training_int8_allow_float(self):
"""Post training int8 quantize, allow float fallback."""
return (self._any_optimization_enabled() and
not self._is_int16x8_target_required() and
self._representative_dataset is not None and
self._smallest_supported_type() == constants.INT8)
def is_post_training_integer_quantize_8(self):
"""Post training integer 8 quantization."""
return (self.post_training_int8_no_float() or
self.post_training_int8_allow_float())
def is_post_training_integer_quantize_16x8(self):
"""Post training integer 16x8 quantization."""
return (self.post_training_int16x8_no_float() or
self.post_training_int16x8_allow_float())
def is_post_training_integer_quantize(self):
"""Post training integer quantization."""
return (self.is_post_training_integer_quantize_8() or
self.is_post_training_integer_quantize_16x8())
def training_time_int8_allow_float(self):
"""Training-time int8 quantize, allow float fallback."""
return (self._any_optimization_enabled() and
not self.post_training_dynamic_range_int8() and
not self.post_training_fp16())
def post_training_int16x8_no_float(self):
"""Post training int16x8 quantize, disallow float fallback."""
return (not self._is_int8_target_required() and
self._is_int16x8_target_required() and
not self._is_allow_float() and
self._representative_dataset is not None)
def post_training_int16x8_allow_float(self):
"""Post training int16x8 quantize, allow float fallback."""
return self._is_int16x8_target_required() and self._is_allow_float()
def post_training_dynamic_range_int8(self):
"""Post training int8 const, on-the-fly int8 quantize of dynamic tensors."""
# Post-training dynamic range quantization is only enabled if post-training
# int8 quantization and training time quantization was not done.
return (self._any_optimization_enabled() and
self._representative_dataset is None and
not self.contains_training_quant_op() and
self._smallest_supported_type() == constants.INT8)
def post_training_fp16(self):
"""Post training fp16 quantize."""
return (self._any_optimization_enabled() and
self._smallest_supported_type() == constants.FLOAT16)
def fp32_execution(self):
"""If none of the above are true."""
return not (self.post_training_int8_no_float() or
self.post_training_int8_allow_float() or
self.training_time_int8_allow_float() or
self.post_training_int16x8_no_float() or
self.post_training_int16x8_allow_float() or
self.post_training_dynamic_range_int8() or
self.post_training_fp16())
def activations_type(self):
return constants.INT16 if self._is_int16x8_target_required() \
else constants.INT8
def converter_flags(self, inference_ty=None, inference_input_ty=None):
"""Flags to the converter."""
if self.is_post_training_integer_quantize():
# The inference_input_type is for the quantizer, then we need to keep the
# converter inference_input_type to float.
inference_input_ty = constants.FLOAT
if self.training_time_int8_allow_float():
return {
"inference_type": inference_ty if inference_ty else \
self.activations_type(),
"inference_input_type":
inference_input_ty if inference_input_ty else constants.FLOAT,
"post_training_quantize": False, # disable dynamic range quantization
"quantize_to_float16": False # disable float16 quantization
}
elif self.post_training_dynamic_range_int8():
return {
"inference_type": constants.FLOAT,
"inference_input_type": constants.FLOAT,
"post_training_quantize": True, # enable dynamic range quantization
"quantize_to_float16": False # disable float16 quantization
}
elif self.post_training_fp16():
return {
"inference_type": constants.FLOAT,
"inference_input_type": constants.FLOAT,
"post_training_quantize": True,
"quantize_to_float16": True # enable float16 quantization
}
else:
# Note this might still trigger (uint8) quantization to be compatible with
# TOCO.
return {
"inference_type": inference_ty if inference_ty else constants.FLOAT,
"inference_input_type": inference_input_ty,
"post_training_quantize": False, # enable dynamic range quantization
"quantize_to_float16": False # disable float16 quantization
}
def quantizer_flags(self, input_ty=None, output_ty=None):
"""Default flags to the TFMOT quantizer."""
inference_input_type = input_ty if input_ty else constants.FLOAT
inference_output_type = output_ty if output_ty else constants.FLOAT
if self.post_training_int8_no_float() \
or self.post_training_int16x8_no_float():
return True, {
"inference_input_type": inference_input_type,
"inference_output_type": inference_output_type,
"activations_type": self.activations_type(),
"allow_float": False
}
elif self.post_training_int8_allow_float() \
or self.post_training_int16x8_allow_float():
return True, {
"inference_input_type": inference_input_type,
"inference_output_type": inference_output_type,
"activations_type": self.activations_type(),
"allow_float": True
}
else:
return False, None
def flags_modify_model_io_type(
self, input_type=constants.FLOAT, output_type=constants.FLOAT):
"""Flags for modifying the input and output type of a tflite model."""
is_post_training_quantize = self.quantizer_flags(input_type, output_type)[0]
is_training_time_only_quantize = self.training_time_int8_allow_float() and \
not is_post_training_quantize
# TODO(b/153576658): Consolidate post/during training quantization workflows
# to modify model input/output type after MLIR conversion.
if is_training_time_only_quantize:
return {
"inference_input_type": input_type,
"inference_output_type": output_type,
}
else:
return None
# Below are helpers for the above functions.
def _validate_int8_required(self):
"""Int8 mode requires certain parameters to exist and be compatible."""
if not self._is_int8_target_required():
return
if self._target_spec.supported_types and (self._smallest_supported_type() !=
constants.INT8):
raise ValueError("TFLITE_BUILTINS_INT8 requires smallest supported "
"type to be INT8.")
if self._representative_dataset:
if not isinstance(self._representative_dataset, RepresentativeDataset):
self._representative_dataset = RepresentativeDataset(
self._representative_dataset)
if self._representative_dataset.input_gen is None:
raise ValueError(
"Provide an input generator for representative_dataset")
else:
# TODO(b/150661651): Relax this check for QAT.
raise ValueError("representative_dataset is required when specifying "
"TFLITE_BUILTINS_INT8 or INT8 supported types.")
def _is_int8_target_required(self):
return (set([OpsSet.TFLITE_BUILTINS_INT8]) == set(
self._target_spec.supported_ops) or
set(self._target_spec.supported_types) == set([constants.INT8]))
def _is_int16x8_target_required(self):
return bool(
set(self._target_spec.supported_ops).intersection([
OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]))
def _is_allow_float(self):
return bool(
set(self._target_spec.supported_ops).intersection(
[OpsSet.TFLITE_BUILTINS]))
def _any_optimization_enabled(self):
return bool(
set(self._optimizations).intersection([
Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE,
Optimize.DEFAULT
]))
def _smallest_supported_type(self):
if self._target_spec.supported_types:
return min(self._target_spec.supported_types, key=lambda x: x.size)
else:
# The default smallest supported type is INT8.
return constants.INT8
def contains_training_quant_op(self):
"""Checks if the graph contains any training-time quantization ops."""
training_quant_ops = frozenset({
"FakeQuantWithMinMaxVars", "FakeQuantWithMinMaxVarsPerChannel",
"QuantizeAndDequantizeV2", "QuantizeAndDequantizeV3"
})
for node_def in self._graph_def.node:
if node_def.op in training_quant_ops:
return True
return False
class TFLiteConverterBase(object):
"""Converter subclass to share functionality between V1 and V2 converters."""
def __init__(self):
self.allow_custom_ops = False
self.target_spec = TargetSpec()
self.optimizations = []
self.representative_dataset = None
self.experimental_new_converter = True
self._experimental_new_quantizer = False
self._experimental_calibrate_only = False
# The 'GraphDebugInfo' contains the stack traces of all the original nodes
# in the `GraphDef` to the converter.
self._debug_info = None
self.saved_model_dir = None
self._saved_model_tags = None
self._saved_model_version = 0
self._saved_model_exported_names = []
self._experimental_sparsify_model = False
def _grappler_config(self, optimizers=None):
"""Creates a tf.compat.v1.ConfigProto for configuring Grappler.
Args:
optimizers: List of strings that represents the list of optimizers.
Returns:
tf.ConfigProto.
"""
if not optimizers:
optimizers = []
# MLIR converter will take care of constant folding instead of grappler.
if not self.experimental_new_converter:
optimizers.append("constfold")
is_only_flex_enabled = (
set([OpsSet.SELECT_TF_OPS]) == set(self.target_spec.supported_ops))
if is_only_flex_enabled:
# The layout optimizer turns NHCW to NCHW. This provides performance
# optimizations when Flex mode is enabled. However, this is not compatible
# with builtin ops.
optimizers.append("layout")
return _get_grappler_config(optimizers)
def _calibrate_quantize_model(self, result, inference_input_type,
inference_output_type, activations_type,
allow_float):
"""Calibrate and quantize the model."""
if not isinstance(self.representative_dataset, RepresentativeDataset):
self.representative_dataset = RepresentativeDataset(
self.representative_dataset)
calibrate_quantize = _calibrator.Calibrator(result)
if self._experimental_calibrate_only or self._experimental_new_quantizer:
calibrated = calibrate_quantize.calibrate(
self.representative_dataset.input_gen)
if self._experimental_calibrate_only:
return calibrated
elif self._experimental_new_quantizer:
return _mlir_quantize(calibrated)
else:
return calibrate_quantize.calibrate_and_quantize(
self.representative_dataset.input_gen, inference_input_type,
inference_output_type, allow_float, activations_type)
def _is_unknown_shapes_allowed(self):
# Unknown dimensions are only allowed with the new converter.
return self.experimental_new_converter
def _get_base_converter_args(self):
"""Returns the base converter args.
Returns:
{key str: val}
"""
args = {
"input_format": constants.TENSORFLOW_GRAPHDEF,
"allow_custom_ops": self.allow_custom_ops,
"debug_info": self._debug_info,
"target_ops": self.target_spec.supported_ops,
"enable_mlir_converter": self.experimental_new_converter,
}
if self.saved_model_dir:
args.update({
"saved_model_dir": self.saved_model_dir,
"saved_model_version": self._saved_model_version,
"saved_model_tags": self._saved_model_tags,
"saved_model_exported_names": self._saved_model_exported_names,
})
return args
def _contains_function_with_implements_attr(self, saved_model_proto):
meta_graph = saved_model_proto.meta_graphs[0]
for function in meta_graph.graph_def.library.function:
if function.attr.get("_implements", None) or function.attr.get(
"api_implements", None):
return True
return False
def _parse_saved_model_args(self, always_enable_saved_model_import=False):
"""Parses SavedModel arguments from the given Keras/RNN SavedModel.
Args:
always_enable_saved_model_import: Bool. When the value is true, it enables
MLIR saved model import path regardless of checking the conditions.
"""
if not self.experimental_new_converter:
self.saved_model_dir = None
return
if self.saved_model_dir:
try:
saved_model_proto, _ = (
_parse_saved_model_with_debug_info(self.saved_model_dir))
except OSError:
# If it fails to read the given saved model, it will fall back to the
# frozen graph def path.
self.saved_model_dir = None
return
if (not always_enable_saved_model_import and
not self._contains_function_with_implements_attr(saved_model_proto)):
self.saved_model_dir = None
return
if not self._saved_model_exported_names:
self._saved_model_exported_names = []
self._saved_model_version = saved_model_proto.saved_model_schema_version
if self._saved_model_version == 0:
self.saved_model_dir = None
logging.warning("SavedModel schema version is zero.")
return
if self._saved_model_version not in [1, 2]:
raise ValueError("SavedModel file format({0}) is not supported".format(
self._saved_model_version))
class TFLiteConverterBaseV2(TFLiteConverterBase):
"""Converter subclass to share functionality between V2 converters."""
def __init__(self):
"""Constructor for TFLiteConverter."""
super(TFLiteConverterBaseV2, self).__init__()
self.inference_input_type = constants.FLOAT
self.inference_output_type = constants.FLOAT
def _validate_inference_input_output_types(self, quant_mode):
"""Validate inference_input_type and inference_output_type flags."""
default_types = [constants.FLOAT]
# We support integer input/output for integer quantized models only.
if quant_mode.training_time_int8_allow_float():
if quant_mode.is_post_training_integer_quantize_16x8():
all_types = default_types + [constants.INT16]
else:
all_types = default_types + [constants.INT8, constants.QUANTIZED_UINT8]
if self.inference_input_type not in all_types or \
self.inference_output_type not in all_types:
all_types_names = ["tf." + t.name for t in all_types]
raise ValueError("The inference_input_type and inference_output_type "
"must be in {}.".format(all_types_names))
elif self.inference_input_type not in default_types or \
self.inference_output_type not in default_types:
raise ValueError("The inference_input_type and inference_output_type "
"must be tf.float32.")
def convert(self, graph_def, input_tensors, output_tensors):
"""Converts a TensorFlow GraphDef based on instance variables.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset, graph_def)
self._validate_inference_input_output_types(quant_mode)
if not self._is_unknown_shapes_allowed():
# Checks dimensions in input tensor.
for tensor in input_tensors:
# Note that shape_list might be empty for scalar shapes.
shape_list = tensor.shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
# Set the batch size to 1 if undefined.
shape = tensor.shape.as_list()
shape[0] = 1
tensor.set_shape(shape)
if self._trackable_obj is None:
self._debug_info = _get_debug_info(
_build_debug_info_func(self._funcs[0].graph), graph_def)
else:
self._debug_info = _get_debug_info(
_convert_debug_info_func(self._trackable_obj.graph_debug_info),
graph_def)
converter_kwargs = self._get_base_converter_args()
converter_kwargs.update(quant_mode.converter_flags())
if not self.experimental_new_converter:
logging.warning(
"Please consider switching to the new converter by setting "
"experimental_new_converter=True. "
"The old converter (TOCO) is deprecated.")
else:
logging.info("Using new converter: If you encounter a problem "
"please file a bug. You can opt-out "
"by setting experimental_new_converter=False")
# Converts model.
result = _toco_convert_impl(
input_data=graph_def,
input_tensors=input_tensors,
output_tensors=output_tensors,
**converter_kwargs)
calibrate_and_quantize, flags = quant_mode.quantizer_flags(
self.inference_input_type, self.inference_output_type)
if calibrate_and_quantize:
result = self._calibrate_quantize_model(result, **flags)
flags_modify_model_io_type = quant_mode.flags_modify_model_io_type(
self.inference_input_type, self.inference_output_type)
if flags_modify_model_io_type:
result = _modify_integer_quantized_model_io_type(
result, **flags_modify_model_io_type)
if self._experimental_sparsify_model:
result = _mlir_sparsify(result)
return result
class TFLiteSavedModelConverterV2(TFLiteConverterBaseV2):
"""Converts the given SavedModel into TensorFlow Lite model.
Attributes:
saved_model_dir: Directory of the SavedModel.
"""
def __init__(self,
saved_model_dir,
saved_model_tags=None,
saved_model_exported_names=None,
trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
saved_model_dir: Directory of the SavedModel.
saved_model_tags: Set of tags identifying the MetaGraphDef within the
SavedModel to analyze. All tags in the tag set must be present. (default
set(SERVING)).
saved_model_exported_names: Names to be exported (default: export all)
when the saved model import path is on.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteSavedModelConverterV2, self).__init__()
self.saved_model_dir = saved_model_dir
self._saved_model_tags = saved_model_tags
self._saved_model_exported_names = saved_model_exported_names
self._trackable_obj = trackable_obj
self._parse_saved_model_args(always_enable_saved_model_import=True)
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
graph = _ops.Graph()
saved_model = _loader_impl.SavedModelLoader(self.saved_model_dir)
saved_model.load_graph(graph, tags=self._saved_model_tags)
meta_graph = saved_model.get_meta_graph_def_from_tags(
self._saved_model_tags)
signature_def = meta_graph.signature_def[
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_tensors = [
graph.get_tensor_by_name(signature_def.inputs[key].name)
for key in signature_def.inputs
]
output_tensors = [
graph.get_tensor_by_name(signature_def.outputs[key].name)
for key in signature_def.outputs
]
return super(TFLiteSavedModelConverterV2,
self).convert(meta_graph.graph_def, input_tensors,
output_tensors)
class TFLiteKerasModelConverterV2(TFLiteConverterBaseV2):
"""Converts the given Keras model into TensorFlow Lite model."""
def __init__(self, keras_model, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
keras_model: tf.Keras.Model.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteKerasModelConverterV2, self).__init__()
self._keras_model = keras_model
self._trackable_obj = trackable_obj
def _convert_as_saved_model(self):
"""Converts a Keras model as a saved model.
Returns:
The converted data in serialized format.
"""
temp_dir = tempfile.mkdtemp()
try:
try:
self._keras_model.save(temp_dir, save_format="tf")
except Exception: # pylint: disable=broad-except
# When storing the given keras model to a saved model is failed, let's
# use original keras model conversion pipeline.
return None
self.saved_model_dir = temp_dir
self._saved_model_tags = set([_tag_constants.SERVING])
self._saved_model_exported_names = [
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
self._parse_saved_model_args()
if self.saved_model_dir:
graph = _ops.Graph()
saved_model = _loader_impl.SavedModelLoader(self.saved_model_dir)
saved_model.load_graph(graph, tags=self._saved_model_tags)
meta_graph = saved_model.get_meta_graph_def_from_tags(
self._saved_model_tags)
signature_def = meta_graph.signature_def[
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_tensors = [
graph.get_tensor_by_name(signature_def.inputs[key].name)
for key in signature_def.inputs
]
output_tensors = [
graph.get_tensor_by_name(signature_def.outputs[key].name)
for key in signature_def.outputs
]
self._trackable_obj = _load(self.saved_model_dir,
self._saved_model_tags)
return super(TFLiteKerasModelConverterV2,
self).convert(meta_graph.graph_def, input_tensors,
output_tensors)
finally:
shutil.rmtree(temp_dir, True)
def convert(self):
"""Converts a keras model based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
saved_model_convert_result = self._convert_as_saved_model()
if saved_model_convert_result:
return saved_model_convert_result
input_signature = None
# If the model's call is not a `tf.function`, then we need to first get its
# input signature from `model_input_signature` method. We can't directly
# call `trace_model_call` because otherwise the batch dimension is set
# to None.
# Once we have better support for dynamic shapes, we can remove this.
if not isinstance(self._keras_model.call, _def_function.Function):
# Pass `keep_original_batch_size=True` will ensure that we get an input
# signature including the batch dimension specified by the user.
input_signature = _saving_utils.model_input_signature(
self._keras_model, keep_original_batch_size=True)
func = _saving_utils.trace_model_call(self._keras_model, input_signature)
concrete_func = func.get_concrete_function()
self._funcs = [concrete_func]
frozen_func, graph_def = (
_convert_to_constants.convert_variables_to_constants_v2_as_graph(
self._funcs[0], lower_control_flow=False))
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
# Run a Grappler pass.
grappler_config = self._grappler_config()
# Skip running grappler when there are no optimizers to run. If not,
# grappler will run with the default optimizer set and it will lead to
# causing an unexpected behavior.
if grappler_config.graph_options.rewrite_options.optimizers:
graph_def = _run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=grappler_config,
graph=frozen_func.graph)
return super(TFLiteKerasModelConverterV2,
self).convert(graph_def, input_tensors, output_tensors)
class TFLiteFrozenGraphConverterV2(TFLiteConverterBaseV2):
"""Converts the given frozen graph into TensorFlow Lite model."""
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteFrozenGraphConverterV2, self).__init__()
self._funcs = funcs
self._trackable_obj = trackable_obj
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
# TODO(b/130297984): Add support for converting multiple function.
if len(self._funcs) == 0: # pylint: disable=g-explicit-length-test
raise ValueError("No ConcreteFunction is specified.")
if len(self._funcs) > 1:
raise ValueError("This converter can only convert a single "
"ConcreteFunction. Converting multiple functions is "
"under development.")
frozen_func, graph_def = (
_convert_to_constants.convert_variables_to_constants_v2_as_graph(
self._funcs[0], lower_control_flow=False))
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
# Run a Grappler pass.
grappler_config = self._grappler_config()
# Skip running grappler when there are no optimizers to run. If not,
# grappler will run with the default optimizer set and it will lead to
# causing an unexpected behavior.
if grappler_config.graph_options.rewrite_options.optimizers:
graph_def = _run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=grappler_config,
graph=frozen_func.graph)
return super(TFLiteFrozenGraphConverterV2,
self).convert(graph_def, input_tensors, output_tensors)
@_tf_export("lite.TFLiteConverter", v1=[])
class TFLiteConverterV2(TFLiteFrozenGraphConverterV2):
"""Converts a TensorFlow model into TensorFlow Lite model.
Attributes:
allow_custom_ops: Boolean indicating whether to allow custom operations.
When False, any unknown operation is an error. When True, custom ops are
created for any op that is unknown. The developer needs to provide these
to the TensorFlow Lite runtime with a custom resolver. (default False)
optimizations: Experimental flag, subject to change. A list of optimizations
to apply when converting the model. E.g. `[Optimize.DEFAULT]`
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use the
dataset to evaluate different optimizations. Note that this is an optional
attribute but it is necessary if INT8 is the only support builtin ops in
target ops.
target_spec: Experimental flag, subject to change. Specification of target
device.
inference_input_type: Data type of the input layer. Note that integer types
(tf.int8 and tf.uint8) are currently only supported for post training
integer quantization and quantization aware training. (default tf.float32,
must be in {tf.float32, tf.int8, tf.uint8})
inference_output_type: Data type of the output layer. Note that integer
types (tf.int8 and tf.uint8) are currently only supported for post
training integer quantization and quantization aware training. (default
tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
experimental_new_converter: Experimental flag, subject to change. Enables
MLIR-based conversion instead of TOCO conversion. (default True)
Example usage:
```python
# Converting a SavedModel to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Converting a tf.Keras model to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Converting ConcreteFunctions to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_concrete_functions([func])
tflite_model = converter.convert()
```
"""
# pylint: disable=useless-super-delegation
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteConverterV2, self).__init__(funcs, trackable_obj)
@classmethod
def from_concrete_functions(cls, funcs):
"""Creates a TFLiteConverter object from ConcreteFunctions.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements. Currently converter can only convert a single
ConcreteFunction. Converting multiple functions is under development.
Returns:
TFLiteConverter object.
Raises:
Invalid input type.
"""
for func in funcs:
if not isinstance(func, _function.ConcreteFunction):
message = "This function takes in a list of ConcreteFunction."
if isinstance(func, _def_function.Function):
message += (" To get the ConcreteFunction from a Function,"
" call get_concrete_function.")
raise ValueError(message)
return cls(funcs)
@classmethod
def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None):
"""Creates a TFLiteConverter object from a SavedModel directory.
Args:
saved_model_dir: SavedModel directory to convert.
signature_keys: List of keys identifying SignatureDef containing inputs
and outputs. Elements should not be duplicated. By default the
`signatures` attribute of the MetaGraphdef is used. (default
saved_model.signatures)
tags: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set(SERVING))
Returns:
TFLiteConverter object.
Raises:
Invalid signature keys.
"""
# When run without eager enabled, this will return the legacy
# TFLiteConverter.
if not context.executing_eagerly():
signature_key = None
if signature_keys:
if len(signature_keys) != 1:
raise ValueError("Only support a single signature key.")
else:
signature_key = signature_keys[0]
logging.warning("Invoking the TF1 implementation of TFLiteConverter "
"because eager is disabled. Consider enabling eager.")
return TFLiteConverter.from_saved_model(saved_model_dir,
signature_key=signature_key,
tag_set=tags)
# Ensures any graphs created in Eager mode are able to run. This is required
# in order to create a tf.estimator.Exporter that exports a TFLite model.
if tags is None:
tags = set([_tag_constants.SERVING])
with context.eager_mode():
saved_model = _load(saved_model_dir, tags)
if not signature_keys:
signature_keys = saved_model.signatures
if len(signature_keys) != 1:
raise ValueError("Only support a single signature key.")
funcs = []
for key in signature_keys:
if key not in saved_model.signatures:
raise ValueError("Invalid signature key '{}' found. Valid keys are "
"'{}'.".format(key, ",".join(saved_model.signatures)))
funcs.append(saved_model.signatures[key])
saved_model_converter = TFLiteSavedModelConverterV2(saved_model_dir, tags,
signature_keys,
saved_model)
if saved_model_converter.saved_model_dir:
return saved_model_converter
return cls(funcs, saved_model)
@classmethod
def from_keras_model(cls, model):
"""Creates a TFLiteConverter object from a Keras model.
Args:
model: tf.Keras.Model
Returns:
TFLiteConverter object.
"""
return TFLiteKerasModelConverterV2(model)
# pylint: disable=useless-super-delegation
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
return super(TFLiteConverterV2, self).convert()
class TFLiteConverterBaseV1(TFLiteConverterBase):
"""Converter subclass to share functionality between V1 converters."""
def __init__(self, experimental_debug_info_func):
"""Constructor for TFLiteConverter.
Args:
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
"""
super(TFLiteConverterBaseV1, self).__init__()
self.inference_type = constants.FLOAT
self.inference_input_type = None
self.inference_output_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self.conversion_summary_dir = None
self._debug_info_func = experimental_debug_info_func
self._custom_opdefs = None
def __setattr__(self, name, value):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
if value:
self.optimizations = [Optimize.DEFAULT]
else:
self.optimizations = []
return
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
self.target_spec.supported_ops = value
return
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
return Optimize.DEFAULT in set(self.optimizations)
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
return self.target_spec.supported_ops
return object.__getattribute__(self, name)
def _validate_quantized_input_stats(self, converter_kwargs, calibrate):
"""Ensure the `quantized_input_stats` flag is provided if required."""
quantized_types = frozenset({constants.INT8, constants.QUANTIZED_UINT8})
requires_quantized_input_stats = (
(converter_kwargs["inference_type"] in quantized_types or
converter_kwargs["inference_input_type"] in quantized_types) and
not calibrate)
if (requires_quantized_input_stats and
not converter_kwargs["quantized_input_stats"]):
raise ValueError("The `quantized_input_stats` flag must be defined when "
"either `inference_type` flag or `inference_input_type` "
"flag is set to tf.uint8 or tf.int8.")
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset, self._graph_def)
if (not self._is_unknown_shapes_allowed() and self._has_valid_tensors()):
# Checks dimensions in input tensor.
for tensor in self._input_tensors:
shape = tensor.shape
if not shape:
raise ValueError("Provide an input shape for input array "
"'{0}'.".format(_get_tensor_name(tensor)))
# Note that shape_list might be empty for scalar shapes.
shape_list = shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
self._set_batch_size(batch_size=1)
# Get quantization stats. Ensures there is one stat per name if the stats
# are specified.
if self.quantized_input_stats:
quantized_stats = []
invalid_stats = []
for name in self.get_input_arrays():
if name in self.quantized_input_stats:
quantized_stats.append(self.quantized_input_stats[name])
else:
invalid_stats.append(name)
if invalid_stats:
raise ValueError("Quantization input stats are not available for input "
"tensors '{0}'.".format(",".join(invalid_stats)))
else:
quantized_stats = None
optimized_graph = self._graph_def
if not self.saved_model_dir:
# Disable grappler constant folding if there are training quant ops.
if not quant_mode.contains_training_quant_op():
try:
# TODO(b/150163103): Merge `disabling lower using switch merge' calls.
# Grappler will also try to lower while loop into switch merge
# representation which is undesired for Ophints, so we simply remove
# those attributes to prevent Grappler from doing so.
graph_def = _convert_to_constants.disable_lower_using_switch_merge(
optimized_graph)
# Run function inlining optimization to ensure any models generated
# through the from_frozen_graph path have been inlined.
optimized_graph = _run_graph_optimizations(
graph_def,
self._input_tensors,
self._output_tensors,
config=self._grappler_config(["function"]))
except Exception: # pylint: disable=broad-except
optimized_graph = self._graph_def
self._debug_info = _get_debug_info(self._debug_info_func, optimized_graph)
converter_kwargs = self._get_base_converter_args()
converter_kwargs.update(
quant_mode.converter_flags(self.inference_type,
self.inference_input_type))
converter_kwargs.update({
"output_format": self.output_format,
"quantized_input_stats": quantized_stats,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video,
"conversion_summary_dir": self.conversion_summary_dir,
"custom_opdefs": self._custom_opdefs,
})
if not self.experimental_new_converter:
logging.warning(
"Please consider switching to the new converter by setting "
"experimental_new_converter=True. "
"The old converter (TOCO) is deprecated.")
else:
logging.info("Using experimental converter: If you encountered a problem "
"please file a bug. You can opt-out "
"by setting experimental_new_converter=False")
calibrate_quantize, flags = quant_mode.quantizer_flags(
self.inference_input_type, self.inference_output_type)
self._validate_quantized_input_stats(converter_kwargs, calibrate_quantize)
# Converts model.
if self._has_valid_tensors():
result = _toco_convert_impl(
input_data=optimized_graph,
input_tensors=self._input_tensors,
output_tensors=self._output_tensors,
**converter_kwargs)
else:
result = _toco_convert_graph_def(
input_data=optimized_graph,
input_arrays_with_shape=self._input_arrays_with_shape,
output_arrays=self._output_arrays,
**converter_kwargs)
if calibrate_quantize:
result = self._calibrate_quantize_model(result, **flags)
if self._experimental_sparsify_model:
result = _mlir_sparsify(result)
return result
def get_input_arrays(self):
"""Returns a list of the names of the input tensors.
Returns:
List of strings.
"""
if self._has_valid_tensors():
return [_get_tensor_name(tensor) for tensor in self._input_tensors]
else:
return [name for name, _ in self._input_arrays_with_shape]
def _has_valid_tensors(self):
"""Checks if the input and output tensors have been initialized.
Returns:
Bool.
"""
return self._input_tensors and self._output_tensors
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
Args:
batch_size: Batch size for the model. Replaces the first dimension of an
input size array if undefined. (default 1)
Raises:
ValueError: input_tensor is not defined.
"""
if not self._has_valid_tensors():
raise ValueError("The batch size cannot be set for this model. Please "
"use input_shapes parameter.")
for tensor in self._input_tensors:
shape = tensor.shape.as_list()
if shape[0] is None:
shape[0] = batch_size
tensor.set_shape(shape)
def _is_unknown_shapes_allowed(self):
# Ophint Converted nodes will need the shapes to be known.
if _is_ophint_converted(self._graph_def):
return False
if not super(TFLiteConverterBaseV1, self)._is_unknown_shapes_allowed():
return False
# `conversion_summary_dir` calls TOCO. Unknown shapes are only supported by
# the MLIR converter.
if self.conversion_summary_dir:
logging.warning(
"`conversion_summary_dir` does not work with unknown shapes. "
"Graphs with unknown shapes might be different than when this flag "
"is disabled.")
return False
return True
class TFLiteSavedModelConverter(TFLiteConverterBaseV1):
"""Converts the given SavedModel into TensorFlow Lite model.
Attributes:
saved_model_dir: Directory of the SavedModel.
"""
def __init__(self,
saved_model_dir,
saved_model_tags,
saved_model_exported_names,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
saved_model_dir: Directory of the SavedModel.
saved_model_tags: Set of tags identifying the MetaGraphDef within the
SavedModel to analyze. All tags in the tag set must be present. (default
set(SERVING)).
saved_model_exported_names: Names to be exported (default: export all)
when the saved model import path is on.
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteSavedModelConverter,
self).__init__(experimental_debug_info_func)
self.saved_model_dir = saved_model_dir
self._saved_model_tags = saved_model_tags
self._saved_model_exported_names = saved_model_exported_names
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
if len(self._saved_model_exported_names) != 1:
raise ValueError("Only support a single signature key.")
signature_key = self._saved_model_exported_names[0]
result = _freeze_saved_model(self.saved_model_dir, None, None, None,
self._saved_model_tags, signature_key)
self._graph_def = result[0]
self._input_tensors = result[1]
self._output_tensors = result[2]
self._parse_saved_model_args()
class TFLiteKerasModelConverter(TFLiteConverterBaseV1):
"""Converts the given SavedModel into TensorFlow Lite model."""
def __init__(self,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None):
"""Constructor for TFLiteConverter.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteKerasModelConverter,
self).__init__(experimental_debug_info_func=None)
# Handles Keras when Eager mode is enabled.
if context.executing_eagerly():
if input_arrays or output_arrays:
raise ValueError("`input_arrays` and `output_arrays` are unsupported "
"with Eager mode. If your model requires any of these "
"parameters, please use disable_eager_execution().")
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file, custom_objects)
function = _saving_utils.trace_model_call(keras_model)
concrete_func = function.get_concrete_function()
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
concrete_func, lower_control_flow=False)
_set_tensor_shapes(frozen_func.inputs, input_shapes)
self._keras_model = keras_model
self._graph_def = frozen_func.graph.as_graph_def()
self._input_tensors = frozen_func.inputs
self._output_tensors = frozen_func.outputs
self._debug_info_func = _build_debug_info_func(frozen_func.graph)
return
# Handles Keras when Eager mode is disabled.
_keras.backend.clear_session()
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file, custom_objects)
sess = _keras.backend.get_session()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
self._keras_model = keras_model
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._debug_info_func = _build_debug_info_func(sess.graph)
def _convert_as_saved_model(self):
"""Converts a Keras model as a saved model.
Returns:
The converted data in serialized format.
"""
temp_dir = tempfile.mkdtemp()
try:
try:
self._keras_model.save(temp_dir, save_format="tf")
except Exception: # pylint: disable=broad-except
# When storing the given keras model to a saved model is failed, let's
# use original keras model conversion pipeline.
return None
tag_set = set([_tag_constants.SERVING])
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
result = _freeze_saved_model(temp_dir, None, None, None, tag_set,
signature_key)
self.saved_model_dir = temp_dir
self._saved_model_tags = tag_set
self._saved_model_exported_names = [signature_key]
self._parse_saved_model_args()
if self.saved_model_dir:
self._graph_def = result[0]
self._input_tensors = result[1]
self._output_tensors = result[2]
self._debug_info_func = _build_debug_info_func(result[3])
return super(TFLiteKerasModelConverter, self).convert()
finally:
shutil.rmtree(temp_dir, True)
def convert(self):
"""Converts a Keras model based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
saved_model_convert_result = self._convert_as_saved_model()
if saved_model_convert_result:
return saved_model_convert_result
return super(TFLiteKerasModelConverter, self).convert()
class TFLiteFrozenGraphConverter(TFLiteConverterBaseV1):
"""Converts the given frozen graph def into TensorFlow Lite model."""
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteFrozenGraphConverter,
self).__init__(experimental_debug_info_func)
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
if not input_arrays_with_shape or not output_arrays:
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays must be defined.")
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
@_tf_export(v1=["lite.TFLiteConverter"])
class TFLiteConverter(TFLiteFrozenGraphConverter):
"""Convert a TensorFlow model into `output_format`.
This is used to convert from a TensorFlow GraphDef, SavedModel or tf.keras
model into either a TFLite FlatBuffer or graph visualization.
Attributes:
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. If `optimzations` are provided, this
parameter is ignored. (default tf.float32)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays.
If an integer type is provided and `optimizations` are not used,
`quantized_input_stats` must be provided.
If `inference_type` is tf.uint8, signaling conversion to a fully quantized
model from a quantization-aware trained input model, then
`inference_input_type` defaults to tf.uint8.
In all other cases, `inference_input_type` defaults to tf.float32.
Must be `{tf.float32, tf.uint8, tf.int8}`
inference_output_type: Target data type of real-number output arrays. Allows
for a different type for output arrays.
If `inference_type` is tf.uint8, signaling conversion to a fully quantized
model from a quantization-aware trained output model, then
`inference_output_type` defaults to tf.uint8.
In all other cases, `inference_output_type` must be tf.float32, an error
will be thrown otherwise.
Must be `{tf.float32, tf.uint8, tf.int8}`
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: Dict of strings representing input tensor names
mapped to tuple of floats representing the mean and standard deviation
of the training data (e.g., {"foo" : (0., 1.)}). Only need if
`inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default {})
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
post_training_quantize: Deprecated. Please specify `[Optimize.DEFAULT]` for
`optimizations` instead. Boolean indicating whether to quantize the
weights of the converted float model. Model size will be reduced and
there will be latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
conversion_summary_dir: A string indicating the path to the generated
conversion logs.
target_ops: Deprecated. Please specify `target_spec.supported_ops` instead.
Set of OpsSet options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
target_spec: Experimental flag, subject to change. Specification of target
device.
optimizations: Experimental flag, subject to change. A list of optimizations
to apply when converting the model. E.g. `[Optimize.DEFAULT]`
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use
the dataset to evaluate different optimizations.
experimental_new_converter: Experimental flag, subject to change.
Enables MLIR-based conversion instead of TOCO conversion. (default True)
Example usage:
```python
# Converting a GraphDef from session.
converter = tf.compat.v1.lite.TFLiteConverter.from_session(
sess, in_tensors, out_tensors)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a GraphDef from file.
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a SavedModel.
converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(
saved_model_dir)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a tf.keras model.
converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file(
keras_model)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
```
"""
# pylint: disable=useless-super-delegation
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteConverter,
self).__init__(graph_def, input_tensors, output_tensors,
input_arrays_with_shape, output_arrays,
experimental_debug_info_func)
@classmethod
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TFLiteConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TFLiteConverter class.
"""
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(
graph_def,
input_tensors,
output_tensors,
experimental_debug_info_func=_build_debug_info_func(sess.graph))
@classmethod
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TFLiteConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TFLiteConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
"""
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
if not _file_io.file_exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with _file_io.FileIO(graph_def_file, "rb") as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
if PY2:
file_content = six.ensure_binary(file_content, "utf-8")
else:
file_content = six.ensure_text(file_content, "utf-8")
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
# TensorFlow.
load_model_in_session = True
try:
_import_graph_def(graph_def, name="")
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
# Check if graph is frozen.
if not _is_frozen_graph(sess):
raise ValueError("Please freeze the graph using freeze_graph.py.")
# Get input and output tensors.
input_tensors = _get_tensors_from_tensor_names(
sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(
sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError("input_shapes must be defined for this model.")
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError("input_shapes must contain a value for each item "
"in input_array.")
input_arrays_with_shape = [
(name, input_shapes[name]) for name in input_arrays
]
return cls(
graph_def,
input_tensors=None,
output_tensors=None,
input_arrays_with_shape=input_arrays_with_shape,
output_arrays=output_arrays)
@classmethod
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TFLiteConverter class from a SavedModel.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set("serve"))
signature_key: Key identifying SignatureDef containing inputs and outputs.
(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)
Returns:
TFLiteConverter class.
"""
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
saved_model_converter = TFLiteSavedModelConverter(saved_model_dir, tag_set,
[signature_key])
if saved_model_converter.saved_model_dir:
return saved_model_converter
result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0],
input_tensors=result[1],
output_tensors=result[2],
experimental_debug_info_func=_build_debug_info_func(result[3]))
@classmethod
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None):
"""Creates a TFLiteConverter class from a tf.keras model file.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Returns:
TFLiteConverter class.
"""
return TFLiteKerasModelConverter(model_file, input_arrays, input_shapes,
output_arrays, custom_objects)
# pylint: disable=useless-super-delegation
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
return super(TFLiteConverter, self).convert()
@_tf_export(v1=["lite.TocoConverter"])
class TocoConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This class has been deprecated. Please use `lite.TFLiteConverter` instead.
"""
@classmethod
@_deprecation.deprecated(None,
"Use `lite.TFLiteConverter.from_session` instead.")
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TocoConverter class from a TensorFlow Session."""
return TFLiteConverter.from_session(sess, input_tensors, output_tensors)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_frozen_graph` instead.")
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TocoConverter class from a file containing a frozen graph."""
return TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays, input_shapes)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_saved_model` instead.")
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TocoConverter class from a SavedModel."""
return TFLiteConverter.from_saved_model(saved_model_dir, input_arrays,
input_shapes, output_arrays,
tag_set, signature_key)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TocoConverter class from a tf.keras model file."""
return TFLiteConverter.from_keras_model_file(model_file, input_arrays,
input_shapes, output_arrays)
| 41.524582
| 125
| 0.702507
|
4a0aeb2e679256a5a4266bc02c98f3983a046524
| 11,087
|
py
|
Python
|
kedro/extras/datasets/spark/spark_hive_dataset.py
|
PaulEmmanuelSotir/kedro
|
0b0d095bfc11324dc5a0fcf6e8dec891426b3a01
|
[
"Apache-2.0"
] | 2
|
2020-07-27T12:20:55.000Z
|
2020-08-15T17:06:15.000Z
|
kedro/extras/datasets/spark/spark_hive_dataset.py
|
PaulEmmanuelSotir/kedro
|
0b0d095bfc11324dc5a0fcf6e8dec891426b3a01
|
[
"Apache-2.0"
] | 1
|
2021-05-11T19:22:42.000Z
|
2021-05-11T19:22:42.000Z
|
kedro/extras/datasets/spark/spark_hive_dataset.py
|
PaulEmmanuelSotir/kedro
|
0b0d095bfc11324dc5a0fcf6e8dec891426b3a01
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""``AbstractDataSet`` implementation to access Spark dataframes using
``pyspark`` on Apache Hive.
"""
import pickle
import uuid
from typing import Any, Dict, List
from pyspark.sql import DataFrame, SparkSession # pylint: disable=import-error
from pyspark.sql.functions import ( # pylint: disable=import-error,no-name-in-module
coalesce,
col,
lit,
)
from kedro.io.core import AbstractDataSet, DataSetError
class StagedHiveDataSet:
"""
Provides a context manager for temporarily writing data to a staging hive table, for example
where you want to replace the contents of a hive table with data which relies on the data
currently present in that table.
Once initialised, the ``staged_data`` ``DataFrame`` can be queried and underlying tables used to
define the initial dataframe can be modified without affecting ``staged_data``.
Upon exiting this object it will drop the redundant staged table.
"""
def __init__(
self, data: DataFrame, stage_table_name: str, stage_database_name: str
):
"""
Creates a new instance eof `StagedHiveDataSet`.
Args:
data: The spark dataframe to be staged
stage_table_name: the database destination for the staged data
stage_database_name: the table destination for the staged data
"""
self.staged_data = None
self._data = data
self._stage_table_name = stage_table_name
self._stage_database_name = stage_database_name
self._spark_session = SparkSession.builder.getOrCreate()
def __enter__(self):
self._data.createOrReplaceTempView("tmp")
_table = f"{self._stage_database_name}.{self._stage_table_name}"
self._spark_session.sql(
f"create table {_table} as select * from tmp" # nosec
).take(1)
self.staged_data = self._spark_session.sql(f"select * from {_table}") # nosec
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._spark_session.sql(
f"drop table {self._stage_database_name}.{self._stage_table_name}" # nosec
)
class SparkHiveDataSet(AbstractDataSet):
"""``SparkHiveDataSet`` loads and saves Spark dataframes stored on Hive.
This data set also handles some incompatible file types such as using partitioned parquet on
hive which will not normally allow upserts to existing data without a complete replacement
of the existing file/partition.
This DataSet has some key assumptions:
- Schemas do not change during the pipeline run (defined PKs must be present for the
duration of the pipeline)
- Tables are not being externally modified during upserts. The upsert method is NOT ATOMIC
to external changes to the target table while executing.
Example:
::
>>> from pyspark.sql import SparkSession
>>> from pyspark.sql.types import (StructField, StringType,
>>> IntegerType, StructType)
>>>
>>> from kedro.extras.datasets.spark import SparkHiveDataSet
>>>
>>> schema = StructType([StructField("name", StringType(), True),
>>> StructField("age", IntegerType(), True)])
>>>
>>> data = [('Alex', 31), ('Bob', 12), ('Clarke', 65), ('Dave', 29)]
>>>
>>> spark_df = SparkSession.builder.getOrCreate().createDataFrame(data, schema)
>>>
>>> data_set = SparkHiveDataSet(database="test_database", table="test_table",
>>> write_mode="overwrite")
>>> data_set.save(spark_df)
>>> reloaded = data_set.load()
>>>
>>> reloaded.take(4)
"""
# pylint: disable=too-many-arguments
def __init__(
self, database: str, table: str, write_mode: str, table_pk: List[str] = None,
) -> None:
"""Creates a new instance of ``SparkHiveDataSet``.
Args:
database: The name of the hive database.
table: The name of the table within the database.
write_mode: ``insert``, ``upsert`` or ``overwrite`` are supported.
table_pk: If performing an upsert, this identifies the primary key columns used to
resolve preexisting data. Is required for ``write_mode="upsert"``.
Raises:
DataSetError: Invalid configuration supplied
"""
valid_write_modes = ["insert", "upsert", "overwrite"]
if write_mode not in valid_write_modes:
valid_modes = ", ".join(valid_write_modes)
raise DataSetError(
f"Invalid `write_mode` provided: {write_mode}. "
f"`write_mode` must be one of: {valid_modes}"
)
if write_mode == "upsert" and not table_pk:
raise DataSetError("`table_pk` must be set to utilise `upsert` read mode")
self._write_mode = write_mode
self._table_pk = table_pk or []
self._database = database
self._table = table
self._stage_table = "_temp_" + table
# self._table_columns is set up in _save() to speed up initialization
self._table_columns = [] # type: List[str]
def _describe(self) -> Dict[str, Any]:
return dict(
database=self._database,
table=self._table,
write_mode=self._write_mode,
table_pk=self._table_pk,
)
@staticmethod
def _get_spark() -> SparkSession:
return SparkSession.builder.getOrCreate()
def _create_empty_hive_table(self, data):
data.createOrReplaceTempView("tmp")
self._get_spark().sql(
f"create table {self._database}.{self._table} select * from tmp limit 1" # nosec
)
self._get_spark().sql(f"truncate table {self._database}.{self._table}") # nosec
def _load(self) -> DataFrame:
if not self._exists():
raise DataSetError(
f"Requested table not found: {self._database}.{self._table}"
)
return self._get_spark().sql(
f"select * from {self._database}.{self._table}" # nosec
)
def _save(self, data: DataFrame) -> None:
if not self._exists():
self._create_empty_hive_table(data)
self._table_columns = data.columns
else:
self._table_columns = self._load().columns
if self._write_mode == "upsert":
non_existent_columns = set(self._table_pk) - set(self._table_columns)
if non_existent_columns:
colnames = ", ".join(sorted(non_existent_columns))
raise DataSetError(
f"Columns [{colnames}] selected as primary key(s) not found in "
f"table {self._database}.{self._table}"
)
self._validate_save(data)
write_methods = {
"insert": self._insert_save,
"upsert": self._upsert_save,
"overwrite": self._overwrite_save,
}
write_methods[self._write_mode](data)
def _insert_save(self, data: DataFrame) -> None:
data.createOrReplaceTempView("tmp")
columns = ", ".join(self._table_columns)
self._get_spark().sql(
f"insert into {self._database}.{self._table} select {columns} from tmp" # nosec
)
def _upsert_save(self, data: DataFrame) -> None:
if self._load().rdd.isEmpty():
self._insert_save(data)
else:
joined_data = data.alias("new").join(
self._load().alias("old"), self._table_pk, "outer"
)
upsert_dataset = joined_data.select(
[ # type: ignore
coalesce(f"new.{col_name}", f"old.{col_name}").alias(col_name)
for col_name in set(data.columns)
- set(self._table_pk) # type: ignore
]
+ self._table_pk
)
temporary_persisted_tbl_name = f"temp_{uuid.uuid4().int}"
with StagedHiveDataSet(
upsert_dataset,
stage_database_name=self._database,
stage_table_name=temporary_persisted_tbl_name,
) as temp_table:
self._overwrite_save(temp_table.staged_data)
def _overwrite_save(self, data: DataFrame) -> None:
self._get_spark().sql(f"truncate table {self._database}.{self._table}") # nosec
self._insert_save(data)
def _validate_save(self, data: DataFrame):
hive_dtypes = set(self._load().dtypes)
data_dtypes = set(data.dtypes)
if data_dtypes != hive_dtypes:
new_cols = data_dtypes - hive_dtypes
missing_cols = hive_dtypes - data_dtypes
raise DataSetError(
f"Dataset does not match hive table schema.\n"
f"Present on insert only: {sorted(new_cols)}\n"
f"Present on schema only: {sorted(missing_cols)}"
)
def _exists(self) -> bool:
if (
self._get_spark()
.sql("show databases")
.filter(col("databaseName") == lit(self._database))
.take(1)
):
self._get_spark().sql(f"use {self._database}")
if (
self._get_spark()
.sql("show tables")
.filter(col("tableName") == lit(self._table))
.take(1)
):
return True
return False
def __getstate__(self) -> None:
raise pickle.PicklingError("PySpark datasets can't be serialized")
| 39.738351
| 100
| 0.622441
|
4a0aebbd4b723d0beaef3d583dd374ef8105930f
| 5,940
|
py
|
Python
|
oliver/subcommands/inspect.py
|
stjudecloud/csub
|
d68476f825eed359fda00e89c9ffa3eef21c7bd9
|
[
"MIT"
] | null | null | null |
oliver/subcommands/inspect.py
|
stjudecloud/csub
|
d68476f825eed359fda00e89c9ffa3eef21c7bd9
|
[
"MIT"
] | null | null | null |
oliver/subcommands/inspect.py
|
stjudecloud/csub
|
d68476f825eed359fda00e89c9ffa3eef21c7bd9
|
[
"MIT"
] | null | null | null |
import argparse
from typing import Any, Dict
import pendulum
from ..lib import api, constants, errors, reporting
def report_failure(
failure: Dict[str, Any], indent: int, step: int = 2, offset: int = 2
) -> None:
print((" " * offset) + "| " + (" " * indent) + failure["message"])
for f in failure["causedBy"]:
report_failure(f, indent + step)
async def call(args: Dict[str, Any], cromwell: api.CromwellAPI) -> None:
"""Execute the subcommand.
Args:
args (Dict): Arguments parsed from the command line.
"""
metadata = await cromwell.get_workflows_metadata(args["workflow-id"])
oliver_job_name = metadata.get("labels", {}).get(constants.OLIVER_JOB_NAME_KEY, "")
oliver_group_name = metadata.get("labels", {}).get(
constants.OLIVER_JOB_GROUP_KEY, ""
)
workflow_name = metadata.get("workflowName", "")
workflow_id = metadata.get("id", "")
workflow_language = metadata.get("actualWorkflowLanguage", "")
if workflow_language:
workflow_language += " " + metadata.get("actualWorkflowLanguageVersion", "")
workflow_submission_date = metadata.get("submission")
if workflow_submission_date is None:
errors.report(
message="Workflow submission date cannot be empty.",
fatal=True,
exitcode=errors.ERROR_INVALID_INPUT,
suggest_report=True,
)
return
workflow_start_date = metadata.get("start")
workflow_end_date = metadata.get("end")
workflow_start_to_report = ""
workflow_duration_to_report = ""
if workflow_start_date:
workflow_start_to_report = reporting.localize_date(workflow_start_date)
workflow_duration_to_report = (
reporting.duration_to_text(
pendulum.now() - pendulum.parse(workflow_start_date)
)
+ " (In progress)"
)
if workflow_start_date and workflow_end_date:
workflow_duration_to_report = reporting.duration_to_text(
pendulum.parse(workflow_end_date) - pendulum.parse(workflow_start_date)
)
calls = []
for name, cur_call in metadata["calls"].items():
for process in cur_call:
attempt = process.get("attempt")
shard = process.get("shardIndex")
# TODO: experimental, this code can be removed in the future if no
# runtime errors are raised. If they are raised, we'll need to
# further flesh out how Cromwell is reporting results.
if not attempt:
errors.report(
"Expected key is missing! The code needs to be updated, please contact the author!",
fatal=True,
exitcode=errors.ERROR_UNEXPECTED_RESPONSE,
)
call_start_date = process.get("start")
call_end_date = process.get("end")
call_duration_to_report = ""
if call_start_date:
call_duration_to_report = (
reporting.duration_to_text(
pendulum.now() - pendulum.parse(call_start_date)
)
+ " (In progress)"
)
if call_start_date and call_end_date:
call_duration_to_report = reporting.duration_to_text(
pendulum.parse(call_end_date) - pendulum.parse(call_start_date)
)
result = {
"Call Name": name,
"Attempt": attempt,
"Shard": shard,
"Status": process.get("executionStatus", ""),
"Start": call_start_date,
"Duration": call_duration_to_report,
}
calls.append(result)
calls = sorted(calls, key=lambda k: k["Start"]) # type: ignore
for cur_call in calls:
cur_call["Start"] = reporting.localize_date(call_start_date)
if oliver_job_name:
print(f"Job Name: {oliver_job_name}")
if oliver_group_name:
print(f"Group Name: {oliver_group_name}")
print(f"Workflow Name: {workflow_name}")
print(f"Workflow ID: {workflow_id}")
print(f"Workflow Version: {workflow_language}")
print(f"Submission: {reporting.localize_date(workflow_submission_date)}")
print(f"Start: {workflow_start_to_report}")
print(f"Duration: {workflow_duration_to_report}")
# Show labels if they exist
if args["show_labels"]:
if not metadata.get("labels"):
print("Labels: None")
else:
print("Labels:")
print()
for k, v in metadata["labels"].items():
print(f" {k} = {v}")
print()
# Show failures if they exist
if metadata.get("failures", []):
print("Failures:")
print()
for i, failure in enumerate(metadata["failures"]):
print(f" == Failure {i + 1} ==")
report_failure(failure, 0, offset=2)
print()
if calls:
print()
reporting.print_dicts_as_table(calls)
def register_subparser(
subparser: argparse._SubParsersAction, # pylint: disable=protected-access
) -> argparse.ArgumentParser:
"""Registers a subparser for the current command.
Args:
subparser (argparse._SubParsersAction): Subparsers action.
"""
subcommand = subparser.add_parser(
"inspect", aliases=["i"], help="Describe the state of a Cromwell workflow."
)
subcommand.add_argument("workflow-id", help="Cromwell workflow ID.")
subcommand.add_argument(
"-l",
"--show-labels",
help="Show labels associated with the workflow",
default=False,
action="store_true",
)
subcommand.add_argument(
"--grid-style",
help="Any valid `tablefmt` for python-tabulate.",
default="fancy_grid",
)
subcommand.set_defaults(func=call)
return subcommand
| 33
| 104
| 0.601684
|
4a0aebc98e9556758d388cfa729c77411b4643a9
| 144
|
py
|
Python
|
StringOps.py
|
AnupamaDhekane/Python-Data-Structures
|
c742f9833cbbe4842b6b6ef71b4051155142efba
|
[
"MIT"
] | null | null | null |
StringOps.py
|
AnupamaDhekane/Python-Data-Structures
|
c742f9833cbbe4842b6b6ef71b4051155142efba
|
[
"MIT"
] | null | null | null |
StringOps.py
|
AnupamaDhekane/Python-Data-Structures
|
c742f9833cbbe4842b6b6ef71b4051155142efba
|
[
"MIT"
] | 1
|
2020-08-01T20:50:37.000Z
|
2020-08-01T20:50:37.000Z
|
f = open("alice.txt", "r")
text = f.read()
print(text.count('Alice'))
print(text.split("Alice", 12))
print(len(text.split("Alice", 12)))
| 20.571429
| 36
| 0.604167
|
4a0aed1bc5953d0bfe189b7495930e0a0fd101da
| 3,438
|
py
|
Python
|
dashboard/dashboard/__init__.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/__init__.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/__init__.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | 1
|
2020-07-24T05:13:01.000Z
|
2020-07-24T05:13:01.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
_CATAPULT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
# Directories in catapult/third_party required by dashboard.
THIRD_PARTY_LIBRARIES = [
'apiclient',
'beautifulsoup4',
'graphy',
'httplib2',
'mapreduce',
'mock',
'oauth2client',
'pipeline',
'uritemplate',
'webtest',
'flot',
'jquery',
'polymer',
'six',
]
# Files and directories in catapult/dashboard.
DASHBOARD_FILES = [
'appengine_config.py',
'app.yaml',
'dashboard',
'index.yaml',
'mapreduce.yaml',
'pinpoint.yaml',
'queue.yaml',
]
TRACING_PATHS = [
'tracing/tracing',
'tracing/third_party/gl-matrix/dist/gl-matrix-min.js'
]
def PathsForDeployment():
"""Returns a list of paths to things required for deployment.
This includes both Python libraries that are required, and also
other files, such as config files.
This list is used when building a temporary deployment directory;
each of the items in this list will have a corresponding file or
directory with the same basename in the deployment directory.
"""
paths = []
paths.extend(_CatapultThirdPartyLibraryPaths())
for name in DASHBOARD_FILES:
paths.append(os.path.join(_CATAPULT_PATH, 'dashboard', name))
paths.extend(_TracingPaths())
return paths
def PathsForTesting():
"""Returns a list of Python library paths required for dashboard tests."""
paths = []
paths.append(os.path.join(_CATAPULT_PATH, 'dashboard'))
paths += _CatapultThirdPartyLibraryPaths()
paths += _AllSdkThirdPartyLibraryPaths()
return paths
def _AllSdkThirdPartyLibraryPaths():
"""Returns a list of all third party library paths from the SDK.
The AppEngine documentation directs us to add App Engine libraries from the
SDK to our Python path for local unit tests.
https://cloud.google.com/appengine/docs/python/tools/localunittesting
"""
for sdk_bin_path in os.environ['PATH'].split(os.pathsep):
if 'google-cloud-sdk' not in sdk_bin_path:
continue
appengine_path = os.path.join(
os.path.dirname(sdk_bin_path), 'platform', 'google_appengine')
sys.path.insert(0, appengine_path)
break
try:
import dev_appserver
except ImportError:
# TODO: Put the Cloud SDK in the path with the binary dependency manager.
# https://github.com/catapult-project/catapult/issues/2135
print 'This script requires the Google Cloud SDK to be in PATH.'
print 'Install at https://cloud.google.com/sdk/ and then run'
print '`gcloud components install app-engine-python`'
sys.exit(1)
return dev_appserver.EXTRA_PATHS
def _CatapultThirdPartyLibraryPaths():
"""Returns a list of required third-party libraries in catapult."""
paths = []
for library in THIRD_PARTY_LIBRARIES:
paths.append(os.path.join(_CATAPULT_PATH, 'third_party', library))
return paths
def _TracingPaths():
"""Returns a list of paths that may be imported from tracing."""
# TODO(sullivan): This should either pull from tracing_project or be generated
# via gypi. See https://github.com/catapult-project/catapult/issues/3048.
paths = []
for path in TRACING_PATHS:
paths.append(os.path.join(_CATAPULT_PATH, os.path.normpath(path)))
return paths
| 29.135593
| 80
| 0.715241
|
4a0aed53280c067cf262a3702910eac32a6cb1c0
| 883
|
py
|
Python
|
Scripts/county_pops_5yr_0816.py
|
hassenmorad/Home-to-Income-Ratio
|
777754693150ed6f777d084dbace7b8189317c55
|
[
"MIT"
] | null | null | null |
Scripts/county_pops_5yr_0816.py
|
hassenmorad/Home-to-Income-Ratio
|
777754693150ed6f777d084dbace7b8189317c55
|
[
"MIT"
] | null | null | null |
Scripts/county_pops_5yr_0816.py
|
hassenmorad/Home-to-Income-Ratio
|
777754693150ed6f777d084dbace7b8189317c55
|
[
"MIT"
] | null | null | null |
# Percentage of homeowners & renters devoting 30+% of household income to housing
# Source: Census (census.data.gov) advanced search (Topics: 'Housing' & 'Income and Poverty'; Geography: All US Counties; Years: ACS 1-Yr. Estimates)
import pandas as pd
import numpy as np
import os
master_df = pd.DataFrame()
for file in os.listdir('County Population 5-yr')[::3]:
year = int(file[7:11])
print(year)
df = pd.read_csv('County Population 5-yr/' + file)
df = df[['GEO_ID', 'NAME', 'S0101_C01_001E']][1:]
df.columns = ['FIPS', 'County', 'Population']
df.Population = df.Population.astype(int)
df.FIPS = df.FIPS.apply(lambda x:x[-5:]).astype(int)
df['Year'] = list(np.full(len(df), year-2))
df = df.sort_values(['Year', 'FIPS'])
master_df = pd.concat([master_df, df])
master_df.to_csv('county_pops_5yr_0816.csv', index=False)
| 44.15
| 150
| 0.657984
|
4a0aede82df99d8461b505496f79f80f53e0c29b
| 95
|
py
|
Python
|
pertanyaan/apps.py
|
rafimuhammad01/donasi-covid-19-2
|
eee8fb28ce492639d900923c985bbb5e0a8ec07f
|
[
"Unlicense"
] | null | null | null |
pertanyaan/apps.py
|
rafimuhammad01/donasi-covid-19-2
|
eee8fb28ce492639d900923c985bbb5e0a8ec07f
|
[
"Unlicense"
] | null | null | null |
pertanyaan/apps.py
|
rafimuhammad01/donasi-covid-19-2
|
eee8fb28ce492639d900923c985bbb5e0a8ec07f
|
[
"Unlicense"
] | null | null | null |
from django.apps import AppConfig
class PertanyaanConfig(AppConfig):
name = 'pertanyaan'
| 15.833333
| 34
| 0.768421
|
4a0aee3030221e118e83e79858bb1932dde020b0
| 803
|
py
|
Python
|
challenges/left_join/conftest.py
|
jayadams011/data-structures-and-algorithms
|
b9a49c65ca769c82b2a34d840bd1e4dd626be025
|
[
"MIT"
] | null | null | null |
challenges/left_join/conftest.py
|
jayadams011/data-structures-and-algorithms
|
b9a49c65ca769c82b2a34d840bd1e4dd626be025
|
[
"MIT"
] | 4
|
2018-03-22T16:56:06.000Z
|
2018-03-28T23:30:29.000Z
|
challenges/left_join/conftest.py
|
jayadams011/data-structures-and-algorithms
|
b9a49c65ca769c82b2a34d840bd1e4dd626be025
|
[
"MIT"
] | null | null | null |
"""Imports."""
import pytest
from hash_table import HashTable
from left_join import left_join
@pytest.fixture
def empty_hash_table():
"""Make empty table."""
return HashTable()
@pytest.fixture
def small_table():
"""Small table."""
small_table = HashTable(10)
small_table.set('John', 1234)
small_table.set('Jane', 123456)
small_table.set('John', '123')
small_table.set('Jane', 'abc')
return small_table
@pytest.fixture
def large_table():
"""Large table."""
large_table = HashTable()
large_table.set('John', 1234)
large_table.set('Jane', 123456)
large_table.set('John', '123')
large_table.set('Jim', 'abc')
large_table.set('Jane', 123456)
large_table.set('Jane', '123')
large_table.set('Jane', 'abc')
return large_table
| 20.589744
| 35
| 0.657534
|
4a0aeeb23ba9e3cf38fe9eb85194d03f7a59e580
| 3,071
|
py
|
Python
|
automationtools/models.py
|
mark-bondo/moondance
|
3347c3fb8ac3e40a5c66b61a21cfb562841531ba
|
[
"MIT"
] | null | null | null |
automationtools/models.py
|
mark-bondo/moondance
|
3347c3fb8ac3e40a5c66b61a21cfb562841531ba
|
[
"MIT"
] | null | null | null |
automationtools/models.py
|
mark-bondo/moondance
|
3347c3fb8ac3e40a5c66b61a21cfb562841531ba
|
[
"MIT"
] | null | null | null |
from django.db import models
from moondance.meta_models import MetaModel
class Chart(MetaModel):
type_choices = (
("pie", "pie"),
("donut", "donut"),
("line", "line"),
("spline", "spline"),
("area", "area"),
("column", "column"),
("bar", "bar"),
)
table = models.CharField(max_length=200)
title = models.CharField(max_length=200, unique=True)
type = models.CharField(max_length=200, choices=type_choices)
ordering = models.IntegerField(null=True, blank=True)
show_legend = models.BooleanField(default=True)
show_title = models.BooleanField(default=True)
def __str__(self):
return self.title
class Meta:
verbose_name = "Chart"
verbose_name_plural = "Charts"
class Dashboard(MetaModel):
type_choices = (
("Operations", "Operations"),
("Sales", "Sales"),
("Marketing", "Marketing"),
("Finance", "Finance"),
)
name = models.CharField(max_length=200, unique=True)
type = models.CharField(max_length=200, choices=type_choices)
charts = models.ManyToManyField(Chart, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "Dashboard"
verbose_name_plural = "Dashboards"
class Chart_Options(MetaModel):
yaxis_prefix_choices = (
("$", "$"),
(None, "----"),
)
xaxis_type_list = (
("datetime", "datetime"),
("category", "category"),
)
type_list = {
("grouping", "grouping"),
("xaxis", "xaxis"),
("yaxis", "yaxis"),
}
chart = models.ForeignKey(Chart, on_delete=models.CASCADE)
is_default = models.BooleanField(default=False)
is_visible = models.BooleanField(default=True)
field = models.CharField(max_length=200)
name = models.CharField(max_length=200, blank=True, null=True)
filter = models.CharField(max_length=200, blank=True, null=True)
type = models.CharField(max_length=200)
yaxis_prefix = models.CharField(
max_length=200, choices=yaxis_prefix_choices, null=True, blank=True
)
yaxis_decimals = models.IntegerField(default=0)
xaxis_type = models.CharField(
max_length=200, choices=xaxis_type_list, null=True, blank=True
)
def __str__(self):
return self.field
class Meta:
verbose_name = "Chart Option"
verbose_name_plural = "Chart Options"
unique_together = (
(
"chart",
"name",
"type",
),
)
class Chart_XAxis(Chart_Options):
class Meta:
verbose_name = "X-Axis Option"
verbose_name_plural = "X-Axis Options"
proxy = True
class Chart_YAxis(Chart_Options):
class Meta:
verbose_name = "Y-Axis Option"
verbose_name_plural = "Y-Axis Options"
proxy = True
class Chart_Grouping(Chart_Options):
class Meta:
verbose_name = "Grouping Option"
verbose_name_plural = "Grouping Options"
proxy = True
| 26.704348
| 75
| 0.609248
|
4a0aeee0dc1902a892056887faee708e7eef5b50
| 7,541
|
py
|
Python
|
tensorflow_datasets/rl_unplugged/atari_utils.py
|
yukimasano/datasets
|
6b2c79be6b27991e3d7ecba6aa5edc8d13d19b6d
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/rl_unplugged/atari_utils.py
|
yukimasano/datasets
|
6b2c79be6b27991e3d7ecba6aa5edc8d13d19b6d
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/rl_unplugged/atari_utils.py
|
yukimasano/datasets
|
6b2c79be6b27991e3d7ecba6aa5edc8d13d19b6d
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils common to Atari datasets."""
import dataclasses
from typing import Any, Dict
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_ATARI_DESCRIPTION = """
We are releasing a large and diverse dataset of gameplay following the protocol
described by [Agarwal et al., 2020](https://arxiv.org/abs/1907.04543), which can
be used to evaluate several discrete offline RL algorithms. The dataset is
generated by running an online DQN agent and recording transitions from its
replay during training with sticky actions
[Machado et al., 2018](https://arxiv.org/abs/1709.06009). As stated in
[Agarwal et al., 2020](https://arxiv.org/abs/1907.04543), for each game we use
data from five runs with 50 million transitions each. We release datasets for 46
Atari games. For details on how the dataset was generated, please refer to the
paper.
Atari is a standard RL benchmark. We recommend you to try offline RL methods on
Atari if you are interested in comparing your approach to other state of the art
offline RL methods with discrete actions.
The reward of each step is clipped (obtained with [-1, 1] clipping) and the
episode includes the sum of the clipped reward per episode.
"""
_CITATION = """
@misc{gulcehre2020rl,
title={RL Unplugged: Benchmarks for Offline Reinforcement Learning},
author={Caglar Gulcehre and Ziyu Wang and Alexander Novikov and Tom Le Paine
and Sergio Gómez Colmenarejo and Konrad Zolna and Rishabh Agarwal and
Josh Merel and Daniel Mankowitz and Cosmin Paduraru and Gabriel
Dulac-Arnold and Jerry Li and Mohammad Norouzi and Matt Hoffman and
Ofir Nachum and George Tucker and Nicolas Heess and Nando deFreitas},
year={2020},
eprint={2006.13888},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
def description():
return _ATARI_DESCRIPTION
def citation():
return _CITATION
@dataclasses.dataclass
class BuilderConfig(tfds.core.BuilderConfig):
"""Configuration of the task.
Attributes:
game: name of the Atari game
run: name of the game run
"""
game: str = 'Asterix'
run: int = 1
_GAMES = [
'Alien',
'Amidar',
'Assault',
'Asterix',
'Atlantis',
'BankHeist',
'BattleZone',
'BeamRider',
'Boxing',
'Breakout',
'Carnival',
'Centipede',
'ChopperCommand',
'CrazyClimber',
'DemonAttack',
'DoubleDunk',
'Enduro',
'FishingDerby',
'Freeway',
'Frostbite',
'Gopher',
'Gravitar',
'Hero',
'IceHockey',
'Jamesbond',
'Kangaroo',
'Krull',
'KungFuMaster',
'MsPacman',
'NameThisGame',
'Phoenix',
'Pong',
'Pooyan',
'Qbert',
'Riverraid',
'RoadRunner',
'Robotank',
'Seaquest',
'SpaceInvaders',
'StarGunner',
'TimePilot',
'UpNDown',
'VideoPinball',
'WizardOfWor',
'YarsRevenge',
'Zaxxon',
]
_SHORT_GAMES = [
'Carnival',
'Gravitar',
'StarGunner',
]
# Note that rewards and episode_return are actually also clipped.
_FEATURE_DESCRIPTION = {
'checkpoint_idx':
tf.io.FixedLenFeature([], tf.int64),
'episode_idx':
tf.io.FixedLenFeature([], tf.int64),
'episode_return':
tf.io.FixedLenFeature([], tf.float32),
'clipped_episode_return':
tf.io.FixedLenFeature([], tf.float32),
'observations':
tf.io.FixedLenSequenceFeature([], tf.string, allow_missing=True),
'actions':
tf.io.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'unclipped_rewards':
tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'clipped_rewards':
tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'discounts':
tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
}
def num_shards(game: str, shards: int) -> int:
if game in _SHORT_GAMES:
return shards - 1
else:
return shards
def builder_configs():
configs = []
for game in _GAMES:
for run in range(1, 6):
# pytype: disable=wrong-keyword-args
configs.append(
BuilderConfig(name=f'{game}_run_{run}', game=game, run=run))
# pytype: enable=wrong-keyword-args
return configs
def atari_example_to_rlds(tf_example: tf.train.Example) -> Dict[str, Any]:
"""Generates an RLDS episode from an Atari TF Example.
Args:
tf_example: example from an Atari dataset.
Returns:
RLDS episode.
"""
data = tf.io.parse_single_example(tf_example, _FEATURE_DESCRIPTION)
episode_length = tf.size(data['actions'])
is_first = tf.concat([[True], [False] * tf.ones(episode_length - 1)], axis=0)
is_last = tf.concat([[False] * tf.ones(episode_length - 1), [True]], axis=0)
is_terminal = [False] * tf.ones_like(data['actions'])
discounts = data['discounts']
if discounts[-1] == 0.:
is_terminal = tf.concat(
[[False] * tf.ones(episode_length - 1, tf.int64), [True]], axis=0)
# If the episode ends in a terminal state, in the last step only the
# observation has valid information (the terminal state).
discounts = tf.concat([discounts[1:], [0.]], axis=0)
episode = {
# Episode Metadata
'episode_id': data['episode_idx'],
'checkpoint_id': data['checkpoint_idx'],
'episode_return': data['episode_return'],
'steps': {
'observation': data['observations'],
'action': data['actions'],
'reward': data['unclipped_rewards'],
'discount': discounts,
'is_first': is_first,
'is_last': is_last,
'is_terminal': is_terminal,
}
}
return episode
def file_prefix(prefix, run, game):
return f'{prefix}/{game}/run_{run}'
def features_dict():
return tfds.features.FeaturesDict({
'steps':
tfds.features.Dataset({
'observation':
tfds.features.Image(
shape=(
84,
84,
1,
), dtype=tf.uint8, encoding_format='png'),
'action':
tf.int64,
'reward':
tfds.features.Scalar(
dtype=tf.float32,
doc=tfds.features.Documentation(
desc='Clipped reward.', value_range='[-1, 1]')),
'is_terminal':
tf.bool,
'is_first':
tf.bool,
'is_last':
tf.bool,
'discount':
tf.float32,
}),
'checkpoint_id':
tf.int64,
'episode_id':
tf.int64,
'episode_return':
tfds.features.Scalar(
dtype=tf.float32,
doc=tfds.features.Documentation(
desc='Sum of the clipped rewards.')),
})
def episode_id(episode):
return f'{episode["checkpoint_id"]}_{episode["episode_id"]}'
| 28.564394
| 80
| 0.631349
|
4a0af082605f4abc70d8e096aeb0b6369f3f9dcb
| 2,406
|
py
|
Python
|
www/app.py
|
mcwall/miot
|
3ccb8730023085be52ff866f1ccbba88b201d834
|
[
"MIT"
] | null | null | null |
www/app.py
|
mcwall/miot
|
3ccb8730023085be52ff866f1ccbba88b201d834
|
[
"MIT"
] | 1
|
2021-03-25T23:30:18.000Z
|
2021-03-25T23:30:18.000Z
|
www/app.py
|
mcwall/miot
|
3ccb8730023085be52ff866f1ccbba88b201d834
|
[
"MIT"
] | null | null | null |
import logging
import os
from flask import Flask, request, render_template
from flask_ask import Ask, session, question, statement
import RPi.GPIO as GPIO
from rpi_rf import RFDevice
app = Flask(__name__)
ask = Ask(app, "/ask")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
tx_gpio = 17
n_repeat = 1
n_devices = 1
protocols = [1]
pulselengths = [355]
on_codes = [5768088]
off_codes = [5768084]
STATUSON = ['on']
STATUSOFF = ['off']
# Should we do mode setting here?
GPIO.setmode(GPIO.BCM)
GPIO.setup(tx_gpio, GPIO.OUT)
rfdevice = RFDevice(tx_gpio)
rfdevice.enable_tx()
@ask.launch
def launch():
speech_text = 'Welcome to Raspberry Pi Automation.'
return question(speech_text).reprompt(speech_text).simple_card(speech_text)
@ask.intent('GpioIntent', mapping = {'status':'status'})
def Gpio_Intent(status,room):
print("GOT INTENT CALL!!!")
# We should do multiple attempts here to mitigate chance of RF interference
if status in STATUSON:
toggle(True)
return statement('turning {} lights'.format(status))
elif status in STATUSOFF:
toggle(False)
return statement('turning {} lights'.format(status))
else:
return statement('Sorry not possible.')
@ask.intent('AMAZON.HelpIntent')
def help():
speech_text = 'You can say hello to me!'
return question(speech_text).reprompt(speech_text).simple_card('HelloWorld', speech_text)
@ask.session_ended
def session_ended():
return "{}", 200
@app.route('/ui')
def index():
return render_template('index.html')
# Send signal a few times for robustness
def toggle(state):
codes = on_codes if state else off_codes
for _ in range(n_repeat):
for i in range(n_devices):
rfdevice.tx_code(codes[i], protocols[i], pulselengths[i])
@app.route('/ui/light')
def light():
state = request.args.get('state')
on = state == 'true'
print('Turning ' + ('on' if on else 'off') + '...' )
toggle(on)
return render_template('index.html')
if __name__ == '__main__':
app.config['ASK_VERIFY_REQUESTS'] = False
if 'ASK_VERIFY_REQUESTS' in os.environ:
verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()
if verify == 'false':
app.config['ASK_VERIFY_REQUESTS'] = False
#app.run(debug=True, host='0.0.0.0', ssl_context=('../ssl/cert.pem', '../ssl/key.pem'))
app.run(debug=True, host='0.0.0.0')
| 27.340909
| 93
| 0.680798
|
4a0af1a29ae69ecb8419f1fc5cbfdcc4ce61e315
| 5,737
|
py
|
Python
|
pyclustering/cluster/tests/unit/ut_elbow.py
|
JosephChataignon/pyclustering
|
bf4f51a472622292627ec8c294eb205585e50f52
|
[
"BSD-3-Clause"
] | 1,013
|
2015-01-26T19:50:14.000Z
|
2022-03-31T07:38:48.000Z
|
pyclustering/cluster/tests/unit/ut_elbow.py
|
peterlau0626/pyclustering
|
bf4f51a472622292627ec8c294eb205585e50f52
|
[
"BSD-3-Clause"
] | 542
|
2015-01-20T16:44:32.000Z
|
2022-01-29T14:57:20.000Z
|
pyclustering/cluster/tests/unit/ut_elbow.py
|
peterlau0626/pyclustering
|
bf4f51a472622292627ec8c294eb205585e50f52
|
[
"BSD-3-Clause"
] | 262
|
2015-03-19T07:28:12.000Z
|
2022-03-30T07:28:24.000Z
|
"""!
@brief Unit-tests for Elbow method.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.center_initializer import random_center_initializer
from pyclustering.cluster.elbow import elbow
from pyclustering.cluster.tests.elbow_template import elbow_test_template
from pyclustering.samples.definitions import SIMPLE_SAMPLES, SIMPLE_ANSWERS
class elbow_unit_test(unittest.TestCase):
def test_elbow_simple_01(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, SIMPLE_ANSWERS.ANSWER_SIMPLE1, 1, 10, False)
def test_elbow_simple_01_random_initializer(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, SIMPLE_ANSWERS.ANSWER_SIMPLE1, 1, 10, False, initializer=random_center_initializer)
def test_elbow_simple_01_step_2(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 3, 1, 10, False, kstep=2)
def test_elbow_simple_01_step_3(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 4, 1, 10, False, kstep=3)
def test_elbow_simple_01_step_4(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 5, 1, 10, False, kstep=4)
def test_elbow_simple_02(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, SIMPLE_ANSWERS.ANSWER_SIMPLE2, 1, 10, False)
def test_elbow_simple_02_step_2(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, SIMPLE_ANSWERS.ANSWER_SIMPLE2, 1, 10, False, kstep=2)
def test_elbow_simple_02_step_3(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, None, 1, 10, False, kstep=3)
def test_elbow_simple_02_step_4(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, None, 1, 10, False, kstep=4)
def test_elbow_simple_02_random_initializer(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, SIMPLE_ANSWERS.ANSWER_SIMPLE2, 1, 10, False, initializer=random_center_initializer)
def test_elbow_simple_03(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, SIMPLE_ANSWERS.ANSWER_SIMPLE3, 1, 10, False)
def test_elbow_simple_03_random_initializer(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, SIMPLE_ANSWERS.ANSWER_SIMPLE3, 1, 10, False, initializer=random_center_initializer)
def test_elbow_simple_05(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, SIMPLE_ANSWERS.ANSWER_SIMPLE5, 1, 10, False)
def test_elbow_simple_06(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, SIMPLE_ANSWERS.ANSWER_SIMPLE6, 1, 10, False)
def test_elbow_simple_10(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, SIMPLE_ANSWERS.ANSWER_SIMPLE10, 1, 10, False)
def test_elbow_simple_12(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, SIMPLE_ANSWERS.ANSWER_SIMPLE12, 1, 10, False)
def test_elbow_simple_15(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE15, 5, 1, 20, False, kstep=2)
def test_elbow_one_dimensional_simple_07(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, SIMPLE_ANSWERS.ANSWER_SIMPLE7, 1, 10, False)
def test_elbow_one_dimensional_simple_09(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, SIMPLE_ANSWERS.ANSWER_SIMPLE9, 1, 10, False)
def test_elbow_three_dimensional_simple_11(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, SIMPLE_ANSWERS.ANSWER_SIMPLE11, 1, 10, False)
def test_elbow_random_state(self):
elbow_test_template.random_state_fixed(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 10, False, random_state=5)
def test_elbow_random_state_random_initializer(self):
elbow_test_template.random_state_fixed(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 10, False, random_state=5, initializer=random_center_initializer)
def test_elbow_random_state_continuous(self):
elbow_test_template.random_state_fixed(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 10, False, random_state=5, repeat=10)
def test_incorrect_data(self):
self.assertRaises(ValueError, elbow, [], 1, 5)
def test_incorrect_kmin(self):
self.assertRaises(ValueError, elbow, [[0], [1], [2]], 0, 2)
def test_incorrect_difference(self):
self.assertRaises(ValueError, elbow, [[0], [1], [2]], 1, 2)
def test_border_step_1(self):
elbow_test_template.calculate_elbow(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, SIMPLE_ANSWERS.ANSWER_SIMPLE1, 1, 3, False, kstep=1, random_state=1000)
def test_border_exception_step_2(self):
self.assertRaises(ValueError, elbow, [[0], [1], [2]], 1, 3, kstep=2, random_state=1000)
def test_border_step_4(self):
elbow_test_template.random_state_fixed(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 9, False, kstep=4, random_state=1000)
def test_incorrect_difference_with_kstep(self):
self.assertRaises(ValueError, elbow, [[0], [1], [2]], 1, 10, kstep=5)
self.assertRaises(ValueError, elbow, [[0], [1], [2]], 1, 10, kstep=6)
def test_incorrect_kmax(self):
self.assertRaises(ValueError, elbow, [[0], [1], [2]], 1, 10)
def test_incorrect_kstep(self):
self.assertRaises(ValueError, elbow, [[0], [1], [2]], 1, 3, kstep=0)
| 46.266129
| 159
| 0.757364
|
4a0af1f9494c3a2eb8183e3a119cad5a5d1a9b51
| 315
|
py
|
Python
|
context/context.py
|
Joejiong/joelang
|
2eef19c427fa9559dece8705e8c6868e75280068
|
[
"WTFPL"
] | 1
|
2019-11-25T14:04:55.000Z
|
2019-11-25T14:04:55.000Z
|
context/context.py
|
Joejiong/joelang
|
2eef19c427fa9559dece8705e8c6868e75280068
|
[
"WTFPL"
] | null | null | null |
context/context.py
|
Joejiong/joelang
|
2eef19c427fa9559dece8705e8c6868e75280068
|
[
"WTFPL"
] | null | null | null |
#######################################
# CONTEXT
#######################################
class Context:
def __init__(self, display_name, parent=None, parent_entry_pos=None):
self.display_name = display_name
self.parent = parent
self.parent_entry_pos = parent_entry_pos
self.symbol_table = None
| 26.25
| 71
| 0.55873
|
4a0af2735c3346bff73746dec6252a96a83db931
| 1,132
|
py
|
Python
|
project/apps/django_backend_template/models/student.py
|
adosaa/Backend-django-app
|
3a7eb746ebc703e2cdbf1e4b2ac5703b3fedcd85
|
[
"MIT"
] | 2
|
2020-11-04T21:47:48.000Z
|
2020-11-04T21:47:50.000Z
|
project/apps/django_backend_template/models/student.py
|
adosaa/Backend-Django-App
|
3a7eb746ebc703e2cdbf1e4b2ac5703b3fedcd85
|
[
"MIT"
] | null | null | null |
project/apps/django_backend_template/models/student.py
|
adosaa/Backend-Django-App
|
3a7eb746ebc703e2cdbf1e4b2ac5703b3fedcd85
|
[
"MIT"
] | null | null | null |
"""
Student Model.
Part of models module.
"""
__license__ = "MIT"
__version__ = "0.0.1"
__author__ = "Ariel Saavedra D"
__email__ = "ariel@gmail.com"
__copyright__ = "Copyright 2020"
from django.db import models
from django_backend_template.models.core import BaseModel
class Student(BaseModel):
"""
Student Model.
A model that has the responsibility to save and maintain student's information.
it's assumed that by the lack of a real way to identify the uniqueness between
each student, the name will be not-repeatable.
:@attr {UUID} id - unique identification for the instance/record (inherited from BaseModel).
:@attr {String} name - name of the instance.
:@attr {datetime} created_at - date and time of the record creation (inherited from BaseModel).
:@attr {datetime} updated_at - date and time of the record edition (inherited from BaseModel).
"""
name = models.CharField(max_length=20, unique=True, blank=False, null=False)
class Meta:
"""Metadata options for Student."""
verbose_name_plural = 'students'
app_label = 'django_backend_template'
| 29.789474
| 99
| 0.713781
|
4a0af2bde9867ad39e81e0b3ff35f5a48538b1f6
| 3,066
|
py
|
Python
|
bauh/view/util/disk.py
|
leoneii/bauh
|
ceef6c30851552ec37e21ef6335a4cbdd126622f
|
[
"Zlib"
] | 1
|
2020-06-16T17:08:32.000Z
|
2020-06-16T17:08:32.000Z
|
bauh/view/util/disk.py
|
octopusSD/bauh
|
c1f210fef87ddb4614b201ec2030330b71e43fe4
|
[
"Zlib"
] | null | null | null |
bauh/view/util/disk.py
|
octopusSD/bauh
|
c1f210fef87ddb4614b201ec2030330b71e43fe4
|
[
"Zlib"
] | null | null | null |
import json
import logging
import os
import time
from threading import Thread, Lock
from typing import Type, Dict
import yaml
from bauh.api.abstract.cache import MemoryCache
from bauh.api.abstract.disk import DiskCacheLoader, DiskCacheLoaderFactory
from bauh.api.abstract.model import SoftwarePackage
class AsyncDiskCacheLoader(Thread, DiskCacheLoader):
def __init__(self, enabled: bool, cache_map: Dict[Type[SoftwarePackage], MemoryCache], logger: logging.Logger):
super(AsyncDiskCacheLoader, self).__init__(daemon=True)
self.pkgs = []
self._work = True
self.lock = Lock()
self.cache_map = cache_map
self.enabled = enabled
self.logger = logger
self.processed = 0
def fill(self, pkg: SoftwarePackage):
"""
Adds a package which data must be read from the disk to a queue.
:param pkg:
:return:
"""
if self.enabled and pkg and pkg.supports_disk_cache():
self.pkgs.append(pkg)
def stop_working(self):
self._work = False
def run(self):
if self.enabled:
last = 0
while True:
time.sleep(0.00001)
if len(self.pkgs) > self.processed:
pkg = self.pkgs[last]
self._fill_cached_data(pkg)
self.processed += 1
last += 1
elif not self._work:
break
def _fill_cached_data(self, pkg: SoftwarePackage) -> bool:
if self.enabled:
if os.path.exists(pkg.get_disk_data_path()):
disk_path = pkg.get_disk_data_path()
ext = disk_path.split('.')[-1]
with open(disk_path) as f:
if ext == 'json':
cached_data = json.loads(f.read())
elif ext in {'yml', 'yaml'}:
cached_data = yaml.load(f.read())
else:
raise Exception('The cached data file {} has an unsupported format'.format(disk_path))
if cached_data:
pkg.fill_cached_data(cached_data)
cache = self.cache_map.get(pkg.__class__)
if cache:
cache.add_non_existing(pkg.id, cached_data)
return True
return False
class DefaultDiskCacheLoaderFactory(DiskCacheLoaderFactory):
def __init__(self, disk_cache_enabled: bool, logger: logging.Logger):
super(DefaultDiskCacheLoaderFactory, self).__init__()
self.disk_cache_enabled = disk_cache_enabled
self.logger = logger
self.cache_map = {}
def map(self, pkg_type: Type[SoftwarePackage], cache: MemoryCache):
if pkg_type:
if pkg_type not in self.cache_map:
self.cache_map[pkg_type] = cache
def new(self) -> AsyncDiskCacheLoader:
return AsyncDiskCacheLoader(enabled=self.disk_cache_enabled, cache_map=self.cache_map, logger=self.logger)
| 32.617021
| 115
| 0.587084
|
4a0af4519dfd919a25b7484520628165bf93f1e5
| 188
|
py
|
Python
|
api/app/utilities/extensions/bcrypt.py
|
wafer-bw/generic-flask-api
|
b4ad6ac1ef3cab2d4e91f13a1fd1664f9de1fe6c
|
[
"MIT"
] | 2
|
2019-11-08T16:57:44.000Z
|
2020-01-30T18:24:24.000Z
|
api/app/utilities/extensions/bcrypt.py
|
wafer-bw/generic-flask-api
|
b4ad6ac1ef3cab2d4e91f13a1fd1664f9de1fe6c
|
[
"MIT"
] | null | null | null |
api/app/utilities/extensions/bcrypt.py
|
wafer-bw/generic-flask-api
|
b4ad6ac1ef3cab2d4e91f13a1fd1664f9de1fe6c
|
[
"MIT"
] | 1
|
2019-11-04T13:30:03.000Z
|
2019-11-04T13:30:03.000Z
|
"""
Flask bcrypt Extension
https://flask-bcrypt.readthedocs.io/en/latest/
This must be registered to the app within app/factory.py
"""
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
| 17.090909
| 56
| 0.760638
|
4a0af467aedd0132c85fcf3831c29299d2e6a22f
| 1,422
|
py
|
Python
|
processing/osm/utils.py
|
fieldmaps/styles
|
c2a5b63a5117d9e46f775dd98876e040186c1de7
|
[
"MIT"
] | null | null | null |
processing/osm/utils.py
|
fieldmaps/styles
|
c2a5b63a5117d9e46f775dd98876e040186c1de7
|
[
"MIT"
] | null | null | null |
processing/osm/utils.py
|
fieldmaps/styles
|
c2a5b63a5117d9e46f775dd98876e040186c1de7
|
[
"MIT"
] | null | null | null |
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# MAX_ZOOM = 13
MAX_ZOOM = 10
polygon_steps = [
(0, 65536e5),
(3, 16384e5),
(4, 4096e5),
(5, 1024e5),
(6, 256e5),
(7, 64e5),
(8, 16e5),
(9, 4e5),
(10, 1e5),
]
point_steps = [
(0, 9e99),
(1, 1048576e2),
(2, 262144e2),
(3, 65536e2),
(4, 16384e2),
(5, 4096e2),
(6, 1024e2),
(7, 256e2),
(8, 64e2),
(9, 16e2),
(10, 4e2)
]
polygons = ['water', 'wetlands']
lines = ['rivers', 'rails']
points = ['health', 'education', 'markets', 'airports', 'seaports']
roads = [
('trunk', "type IN ('motorway','motorway_link','trunk','trunk_link')"),
('primary', "type IN ('primary','primary_link')"),
('secondary', "type IN ('secondary','secondary_link')"),
('tertiary', "type IN ('tertiary','tertiary_link')")
]
places = [
('city', "type = 'city'"),
('town', "type = 'town'"),
('village', "type NOT IN ('city','town')"),
]
polygon_zooms = [0, 3, 4, 5, 6, 7, 8, 9, 10]
point_zooms = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lines_zooms = [
('rivers', [('all', 10)]),
('rails', [('all', 5)]),
('roads',
[('trunk', 4), ('primary', 5), ('secondary', 7), ('tertiary', 9)]),
]
places_zooms = [
('city', 6),
('town', 8),
('village', 10)
]
| 21.223881
| 75
| 0.491561
|
4a0af4f36b989a8dbc261eb8d0a3b7a0c0025b94
| 10,242
|
py
|
Python
|
integration_tests/test_full_ingestion.py
|
Kirill888/datacube-core
|
996b395e15f975decb77c0ca9fa0555177674b2f
|
[
"Apache-2.0"
] | 1
|
2020-04-15T16:10:27.000Z
|
2020-04-15T16:10:27.000Z
|
integration_tests/test_full_ingestion.py
|
Kirill888/datacube-core
|
996b395e15f975decb77c0ca9fa0555177674b2f
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/test_full_ingestion.py
|
Kirill888/datacube-core
|
996b395e15f975decb77c0ca9fa0555177674b2f
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
import warnings
from uuid import UUID
import netCDF4
import pytest
import yaml
import rasterio
from affine import Affine
from datacube.api.query import query_group_by
from datacube.utils import geometry, read_documents, netcdf_extract_string
from integration_tests.utils import prepare_test_ingestion_configuration, GEOTIFF
from integration_tests.test_end_to_end import INGESTER_CONFIGS
EXPECTED_STORAGE_UNIT_DATA_SHAPE = (1, 40, 40)
COMPLIANCE_CHECKER_NORMAL_LIMIT = 2
@pytest.mark.timeout(20)
@pytest.mark.parametrize('datacube_env_name', ('datacube',), indirect=True)
@pytest.mark.usefixtures('default_metadata_type',
'indexed_ls5_scene_products')
def test_full_ingestion(clirunner, index, tmpdir, example_ls5_dataset_paths, ingest_configs):
config = INGESTER_CONFIGS/ingest_configs['ls5_nbar_albers']
config_path, config = prepare_test_ingestion_configuration(tmpdir, None, config, mode='fast_ingest')
valid_uuids = []
for uuid, example_ls5_dataset_path in example_ls5_dataset_paths.items():
valid_uuids.append(uuid)
clirunner([
'dataset',
'add',
str(example_ls5_dataset_path)
])
ensure_datasets_are_indexed(index, valid_uuids)
# TODO(csiro) Set time dimension when testing
# config['storage']['tile_size']['time'] = 2
clirunner([
'ingest',
'--config-file',
str(config_path)
])
datasets = index.datasets.search_eager(product='ls5_nbar_albers')
assert len(datasets) > 0
assert datasets[0].managed
check_open_with_api(index, len(valid_uuids))
check_data_with_api(index, len(valid_uuids))
# NetCDF specific checks, based on the saved NetCDF file
ds_path = str(datasets[0].local_path)
with netCDF4.Dataset(ds_path) as nco:
check_data_shape(nco)
check_grid_mapping(nco)
check_cf_compliance(nco)
check_dataset_metadata_in_storage_unit(nco, example_ls5_dataset_paths)
check_attributes(nco, config['global_attributes'])
name = config['measurements'][0]['name']
check_attributes(nco[name], config['measurements'][0]['attrs'])
check_open_with_xarray(ds_path)
@pytest.mark.timeout(20)
@pytest.mark.parametrize('datacube_env_name', ('datacube',), indirect=True)
@pytest.mark.usefixtures('default_metadata_type',
'indexed_ls5_scene_products')
def test_process_all_ingest_jobs(clirunner, index, tmpdir, example_ls5_dataset_paths, ingest_configs):
"""
Test for the case where ingestor processes upto `--queue-size` number of tasks and not all the available scenes
"""
# Make a test ingestor configuration
config = INGESTER_CONFIGS / ingest_configs['ls5_nbar_albers']
config_path, config = prepare_test_ingestion_configuration(tmpdir, None,
config, mode='fast_ingest')
def index_dataset(path):
return clirunner(['dataset', 'add', str(path)])
# Number of scenes generated is 3 (as per NUM_TIME_SLICES const from conftest.py)
# Set the queue size to process 2 tiles
queue_size = 2
valid_uuids = []
for uuid, ls5_dataset_path in example_ls5_dataset_paths.items():
valid_uuids.append(uuid)
index_dataset(ls5_dataset_path)
# Ensure that datasets are actually indexed
ensure_datasets_are_indexed(index, valid_uuids)
# Ingest all scenes (Though the queue size is 2, all 3 tiles will be ingested)
clirunner([
'ingest',
'--config-file',
str(config_path),
'--queue-size',
queue_size,
'--allow-product-changes',
])
# Validate that the ingestion is working as expected
datasets = index.datasets.search_eager(product='ls5_nbar_albers')
assert len(datasets) > 0
assert datasets[0].managed
check_open_with_api(index, len(valid_uuids))
# NetCDF specific checks, based on the saved NetCDF file
ds_path = str(datasets[0].local_path)
with netCDF4.Dataset(ds_path) as nco:
check_data_shape(nco)
check_grid_mapping(nco)
check_cf_compliance(nco)
check_dataset_metadata_in_storage_unit(nco, example_ls5_dataset_paths)
check_attributes(nco, config['global_attributes'])
name = config['measurements'][0]['name']
check_attributes(nco[name], config['measurements'][0]['attrs'])
check_open_with_xarray(ds_path)
def ensure_datasets_are_indexed(index, valid_uuids):
datasets = index.datasets.search_eager(product='ls5_nbar_scene')
assert len(datasets) == len(valid_uuids)
for dataset in datasets:
assert dataset.id in valid_uuids
def check_grid_mapping(nco):
assert 'grid_mapping' in nco.variables['blue'].ncattrs()
grid_mapping = nco.variables['blue'].grid_mapping
assert grid_mapping in nco.variables
assert 'GeoTransform' in nco.variables[grid_mapping].ncattrs()
assert 'spatial_ref' in nco.variables[grid_mapping].ncattrs()
def check_data_shape(nco):
assert nco.variables['blue'].shape == EXPECTED_STORAGE_UNIT_DATA_SHAPE
def check_cf_compliance(dataset):
try:
from compliance_checker.runner import CheckSuite, ComplianceChecker
import compliance_checker
except ImportError:
warnings.warn('compliance_checker unavailable, skipping NetCDF-CF Compliance Checks')
return
if compliance_checker.__version__ < '4.0.0':
warnings.warn('Please upgrade compliance-checker to 4+ version')
warnings.warn('compliance_checker version is too old, skipping NetCDF-CF Compliance Checks')
return
skip = ['check_dimension_order',
'check_all_features_are_same_type',
'check_conventions_version',
'check_appendix_a']
cs = CheckSuite()
cs.load_all_available_checkers()
score_groups = cs.run(dataset, skip, 'cf')
score_dict = {dataset.filepath(): score_groups}
groups = ComplianceChecker.stdout_output(cs, score_dict, verbose=1, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
assert cs.passtree(groups, limit=COMPLIANCE_CHECKER_NORMAL_LIMIT)
def check_attributes(obj, attrs):
for k, v in attrs.items():
assert k in obj.ncattrs()
assert obj.getncattr(k) == v
def check_dataset_metadata_in_storage_unit(nco, dataset_dirs):
"""Check one of the NetCDF files metadata against the original
metadata."""
assert len(nco.variables['dataset']) == 1 # 1 time slice
stored_metadata = netcdf_extract_string(nco.variables['dataset'][0])
stored = yaml.safe_load(stored_metadata)
assert 'lineage' in stored
assert 'source_datasets' in stored['lineage']
assert '0' in stored['lineage']['source_datasets']
assert 'id' in stored['lineage']['source_datasets']['0']
source_uuid = UUID(stored['lineage']['source_datasets']['0']['id'])
assert source_uuid in dataset_dirs
ds_filename = dataset_dirs[source_uuid] / 'agdc-metadata.yaml'
[(_, original)] = read_documents(ds_filename)
assert len(stored['lineage']['source_datasets']) == 1
assert next(iter(stored['lineage']['source_datasets'].values())) == original
def check_open_with_xarray(file_path):
import xarray
xarray.open_dataset(str(file_path))
def check_open_with_api(index, time_slices):
with rasterio.Env():
from datacube import Datacube
dc = Datacube(index=index)
input_type_name = 'ls5_nbar_albers'
input_type = dc.index.products.get_by_name(input_type_name)
geobox = geometry.GeoBox(200, 200, Affine(25, 0.0, 638000, 0.0, -25, 6276000), geometry.CRS('EPSG:28355'))
observations = dc.find_datasets(product='ls5_nbar_albers', geopolygon=geobox.extent)
group_by = query_group_by('time')
sources = dc.group_datasets(observations, group_by)
data = dc.load_data(sources, geobox, input_type.measurements.values())
assert data.blue.shape == (time_slices, 200, 200)
chunk_profile = {'time': 1, 'x': 100, 'y': 100}
lazy_data = dc.load_data(sources, geobox, input_type.measurements.values(), dask_chunks=chunk_profile)
assert lazy_data.blue.shape == (time_slices, 200, 200)
assert (lazy_data.blue.load() == data.blue).all()
def check_data_with_api(index, time_slices):
"""Chek retrieved data for specific values.
We scale down by 100 and check for predefined values in the
corners.
"""
from datacube import Datacube
dc = Datacube(index=index)
# TODO: this test needs to change, it tests that results are exactly the
# same as some time before, but with the current zoom out factor it's
# hard to verify that results are as expected even with human
# judgement. What it should test is that reading native from the
# ingested product gives exactly the same results as reading into the
# same GeoBox from the original product. Separate to that there
# should be a read test that confirms that what you read from native
# product while changing projection is of expected value
# Make the retrieved data lower res
ss = 100
shape_x = int(GEOTIFF['shape']['x'] / ss)
shape_y = int(GEOTIFF['shape']['y'] / ss)
pixel_x = int(GEOTIFF['pixel_size']['x'] * ss)
pixel_y = int(GEOTIFF['pixel_size']['y'] * ss)
input_type_name = 'ls5_nbar_albers'
input_type = dc.index.products.get_by_name(input_type_name)
geobox = geometry.GeoBox(shape_x + 2, shape_y + 2,
Affine(pixel_x, 0.0, GEOTIFF['ul']['x'], 0.0, pixel_y, GEOTIFF['ul']['y']),
geometry.CRS(GEOTIFF['crs']))
observations = dc.find_datasets(product='ls5_nbar_albers', geopolygon=geobox.extent)
group_by = query_group_by('time')
sources = dc.group_datasets(observations, group_by)
data = dc.load_data(sources, geobox, input_type.measurements.values())
assert hashlib.md5(data.green.data).hexdigest() == '0f64647bad54db4389fb065b2128025e'
assert hashlib.md5(data.blue.data).hexdigest() == '41a7b50dfe5c4c1a1befbc378225beeb'
for time_slice in range(time_slices):
assert data.blue.values[time_slice][-1, -1] == -999
| 39.392308
| 115
| 0.699668
|
4a0af6e9bf851c31a68f46e16143097bd281eb6a
| 2,223
|
py
|
Python
|
prjxray/state_gen.py
|
rw1nkler/prjxray
|
aff076b47dcf6d653eb3ce791b41fd6cf4343edd
|
[
"ISC"
] | 583
|
2017-12-21T11:06:13.000Z
|
2022-02-20T21:27:33.000Z
|
prjxray/state_gen.py
|
rw1nkler/prjxray
|
aff076b47dcf6d653eb3ce791b41fd6cf4343edd
|
[
"ISC"
] | 1,212
|
2017-12-22T15:05:06.000Z
|
2022-02-19T13:04:59.000Z
|
prjxray/state_gen.py
|
mfkiwl/prjxray-xilinx-7-bitstream-fortmat
|
5349556bc2c230801d6df0cf11bccb9cfd171639
|
[
"ISC"
] | 134
|
2017-12-21T10:16:50.000Z
|
2022-02-16T06:42:04.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
from prjxray import util
class StateGen(object):
""" Manages fuzzer state generation across multiple sites.
sites - List of sites.
states_per_site - Maximum number of states used per site.
If states_per_site is too small, next_state may throw AssertionError.
StateGen should be used as a iterator for the sites given. Call next_state
within each site output loop. Once 'next' is called on StateGen, StateGen
will advance state output to the correct position, even if next_state was
called less than states_per_site.
"""
def __init__(self, sites, states_per_site):
self.sites = sites
self.states_per_site = states_per_site
self.curr_site_idx = 0
self.curr_state = None
self.states = None
self.curr_site = None
def __iter__(self):
assert self.curr_state is None
assert self.states is None
assert self.curr_state is None
self.curr_site_idx = 0
self.curr_state = None
self.states = util.gen_fuzz_states(
len(self.sites) * self.states_per_site)
self.curr_site = iter(self.sites)
return self
def __next__(self):
next_site = next(self.curr_site)
self.curr_site_idx += 1
if self.curr_state is not None:
while self.curr_state < self.states_per_site:
self.next_state()
assert self.curr_state == self.states_per_site, self.curr_state
self.curr_state = 0
return next_site
def next_state(self):
""" Returns next state within site.
Should only be called states_per_site for each site.
"""
self.curr_state += 1
try:
state = next(self.states)
except StopIteration:
assert False, "Insufficent states, at state {} for site {}".format(
self.curr_state, self.curr_site_idx)
return state
| 28.5
| 79
| 0.647773
|
4a0af704336d2f97ea96469140fc9a295d97c5c1
| 4,248
|
py
|
Python
|
src/oci/mysql/models/update_analytics_cluster_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/mysql/models/update_analytics_cluster_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/mysql/models/update_analytics_cluster_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateAnalyticsClusterDetails(object):
"""
DEPRECATED -- please use HeatWave API instead.
Details about the Analytics Cluster properties to be updated.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateAnalyticsClusterDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param shape_name:
The value to assign to the shape_name property of this UpdateAnalyticsClusterDetails.
:type shape_name: str
:param cluster_size:
The value to assign to the cluster_size property of this UpdateAnalyticsClusterDetails.
:type cluster_size: int
"""
self.swagger_types = {
'shape_name': 'str',
'cluster_size': 'int'
}
self.attribute_map = {
'shape_name': 'shapeName',
'cluster_size': 'clusterSize'
}
self._shape_name = None
self._cluster_size = None
@property
def shape_name(self):
"""
Gets the shape_name of this UpdateAnalyticsClusterDetails.
A change to the shape of the nodes in the Analytics Cluster will
result in the entire cluster being torn down and re-created with
Compute instances of the new Shape. This may result in significant
downtime for the analytics capability while the Analytics Cluster is
re-provisioned.
:return: The shape_name of this UpdateAnalyticsClusterDetails.
:rtype: str
"""
return self._shape_name
@shape_name.setter
def shape_name(self, shape_name):
"""
Sets the shape_name of this UpdateAnalyticsClusterDetails.
A change to the shape of the nodes in the Analytics Cluster will
result in the entire cluster being torn down and re-created with
Compute instances of the new Shape. This may result in significant
downtime for the analytics capability while the Analytics Cluster is
re-provisioned.
:param shape_name: The shape_name of this UpdateAnalyticsClusterDetails.
:type: str
"""
self._shape_name = shape_name
@property
def cluster_size(self):
"""
Gets the cluster_size of this UpdateAnalyticsClusterDetails.
A change to the number of nodes in the Analytics Cluster will result
in the entire cluster being torn down and re-created with the new
cluster of nodes. This may result in a significant downtime for the
analytics capability while the Analytics Cluster is
re-provisioned.
:return: The cluster_size of this UpdateAnalyticsClusterDetails.
:rtype: int
"""
return self._cluster_size
@cluster_size.setter
def cluster_size(self, cluster_size):
"""
Sets the cluster_size of this UpdateAnalyticsClusterDetails.
A change to the number of nodes in the Analytics Cluster will result
in the entire cluster being torn down and re-created with the new
cluster of nodes. This may result in a significant downtime for the
analytics capability while the Analytics Cluster is
re-provisioned.
:param cluster_size: The cluster_size of this UpdateAnalyticsClusterDetails.
:type: int
"""
self._cluster_size = cluster_size
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 35.697479
| 245
| 0.680556
|
4a0af7198704a240a11049a75e709b07d96499e4
| 529
|
py
|
Python
|
config/re_token.py
|
Four-sun/Requests_Load
|
472f3f6d9bd407f1c4ed30a5557ec141e2434188
|
[
"Apache-2.0"
] | null | null | null |
config/re_token.py
|
Four-sun/Requests_Load
|
472f3f6d9bd407f1c4ed30a5557ec141e2434188
|
[
"Apache-2.0"
] | null | null | null |
config/re_token.py
|
Four-sun/Requests_Load
|
472f3f6d9bd407f1c4ed30a5557ec141e2434188
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
"""
Created: on 2018-07-11
@author: Four
Project: config\re_token.py
Description:token值读取
"""
import yaml
import os
current_path = os.path.dirname(os.path.realpath(__file__))
def get_token(yamlName = "token.yaml"):
"""
从token.yaml读取token值
:param yamlName: 配置文件名称
:return: token值
"""
path = os.path.join(current_path, yamlName)
file = open(path)
text_read = file.read()
t = yaml.load(text_read)
file.close()
return t
if __name__ == "__main__":
print(get_token())
| 18.241379
| 58
| 0.663516
|
4a0af8d26d1e6e8a0b0bf7f5a4a5559a622d0f99
| 870
|
py
|
Python
|
etc/pip_install/toree/_version.py
|
hmost1/incubator-toree
|
f58a86f2a9466c122d52ed5abd8122c8ce40b2c5
|
[
"Apache-2.0"
] | 762
|
2016-01-07T18:33:03.000Z
|
2022-03-30T13:15:42.000Z
|
etc/pip_install/toree/_version.py
|
hmost1/incubator-toree
|
f58a86f2a9466c122d52ed5abd8122c8ce40b2c5
|
[
"Apache-2.0"
] | 157
|
2016-01-16T15:19:16.000Z
|
2022-02-10T03:21:52.000Z
|
etc/pip_install/toree/_version.py
|
hmost1/incubator-toree
|
f58a86f2a9466c122d52ed5abd8122c8ce40b2c5
|
[
"Apache-2.0"
] | 276
|
2016-01-11T21:39:30.000Z
|
2022-03-13T14:50:04.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is populated when making the pip package. It should be empty by default.
| 45.789474
| 84
| 0.773563
|
4a0afbe20399dd96c3a4b154eb5c30b349267b05
| 28,304
|
py
|
Python
|
ludwig/data/preprocessing.py
|
istefano82/alabala
|
025e5796a1ce3feab8d783782cb57f5be3136dec
|
[
"Apache-2.0"
] | null | null | null |
ludwig/data/preprocessing.py
|
istefano82/alabala
|
025e5796a1ce3feab8d783782cb57f5be3136dec
|
[
"Apache-2.0"
] | 6
|
2020-01-28T22:42:31.000Z
|
2022-02-10T00:16:09.000Z
|
ludwig/data/preprocessing.py
|
istefano82/ludwig_clone
|
025e5796a1ce3feab8d783782cb57f5be3136dec
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import logging
import os
import h5py
import numpy as np
import yaml
from ludwig.constants import *
from ludwig.constants import TEXT
from ludwig.data.concatenate_datasets import concatenate_csv
from ludwig.data.concatenate_datasets import concatenate_df
from ludwig.data.dataset import Dataset
from ludwig.features.feature_registries import base_type_registry
from ludwig.globals import MODEL_HYPERPARAMETERS_FILE_NAME
from ludwig.utils import data_utils
from ludwig.utils.data_utils import collapse_rare_labels
from ludwig.utils.data_utils import load_json
from ludwig.utils.data_utils import read_csv
from ludwig.utils.data_utils import replace_file_extension
from ludwig.utils.data_utils import split_dataset_tvt
from ludwig.utils.data_utils import text_feature_data_field
from ludwig.utils.defaults import default_preprocessing_parameters
from ludwig.utils.defaults import default_random_seed
from ludwig.utils.misc import get_from_registry
from ludwig.utils.misc import merge_dict
from ludwig.utils.misc import set_random_seed
def build_dataset(
dataset_csv,
features,
global_preprocessing_parameters,
train_set_metadata=None,
random_seed=default_random_seed,
**kwargs
):
dataset_df = read_csv(dataset_csv)
dataset_df.csv = dataset_csv
return build_dataset_df(
dataset_df,
features,
global_preprocessing_parameters,
train_set_metadata,
random_seed,
**kwargs
)
def build_dataset_df(
dataset_df,
features,
global_preprocessing_parameters,
train_set_metadata=None,
random_seed=default_random_seed,
**kwargs
):
global_preprocessing_parameters = merge_dict(
default_preprocessing_parameters,
global_preprocessing_parameters
)
if train_set_metadata is None:
train_set_metadata = build_metadata(
dataset_df,
features,
global_preprocessing_parameters
)
data_val = build_data(
dataset_df,
features,
train_set_metadata,
global_preprocessing_parameters
)
data_val['split'] = get_split(
dataset_df,
force_split=global_preprocessing_parameters['force_split'],
split_probabilities=global_preprocessing_parameters[
'split_probabilities'
],
stratify=global_preprocessing_parameters['stratify'],
random_seed=random_seed
)
return data_val, train_set_metadata
def build_metadata(dataset_df, features, global_preprocessing_parameters):
train_set_metadata = {}
for feature in features:
get_feature_meta = get_from_registry(
feature['type'],
base_type_registry
).get_feature_meta
if 'preprocessing' in feature:
preprocessing_parameters = merge_dict(
global_preprocessing_parameters[feature['type']],
feature['preprocessing']
)
else:
preprocessing_parameters = global_preprocessing_parameters[
feature['type']
]
train_set_metadata[feature['name']] = get_feature_meta(
dataset_df[feature['name']].astype(str),
preprocessing_parameters
)
return train_set_metadata
def build_data(
dataset_df,
features,
train_set_metadata,
global_preprocessing_parameters
):
data = {}
for feature in features:
add_feature_data = get_from_registry(
feature['type'],
base_type_registry
).add_feature_data
if 'preprocessing' in feature:
preprocessing_parameters = merge_dict(
global_preprocessing_parameters[feature['type']],
feature['preprocessing']
)
else:
preprocessing_parameters = global_preprocessing_parameters[
feature['type']
]
handle_missing_values(
dataset_df,
feature,
preprocessing_parameters
)
if feature['name'] not in train_set_metadata:
train_set_metadata[feature['name']] = {}
train_set_metadata[
feature['name']
]['preprocessing'] = preprocessing_parameters
add_feature_data(
feature,
dataset_df,
data,
train_set_metadata,
preprocessing_parameters
)
return data
def handle_missing_values(dataset_df, feature, preprocessing_parameters):
missing_value_strategy = preprocessing_parameters['missing_value_strategy']
if missing_value_strategy == FILL_WITH_CONST:
dataset_df[feature['name']] = dataset_df[feature['name']].fillna(
preprocessing_parameters['fill_value'],
)
elif missing_value_strategy == FILL_WITH_MODE:
dataset_df[feature['name']] = dataset_df[feature['name']].fillna(
dataset_df[feature['name']].value_counts().index[0],
)
elif missing_value_strategy == FILL_WITH_MEAN:
if feature['type'] != NUMERICAL:
raise ValueError(
'Filling missing values with mean is supported '
'only for numerical types',
)
dataset_df[feature['name']] = dataset_df[feature['name']].fillna(
dataset_df[feature['name']].mean(),
)
elif missing_value_strategy in ['backfill', 'bfill', 'pad', 'ffill']:
dataset_df[feature['name']] = dataset_df[feature['name']].fillna(
method=missing_value_strategy,
)
else:
raise ValueError('Invalid missing value strategy')
def get_split(
dataset_df,
force_split=False,
split_probabilities=(0.7, 0.1, 0.2),
stratify=None,
random_seed=default_random_seed,
):
if 'split' in dataset_df and not force_split:
split = dataset_df['split']
else:
set_random_seed(random_seed)
if stratify is None or stratify not in dataset_df:
split = np.random.choice(
3,
len(dataset_df),
p=split_probabilities,
).astype(np.int8)
else:
split = np.zeros(len(dataset_df))
for val in dataset_df[stratify].unique():
idx_list = (
dataset_df.index[dataset_df[stratify] == val].tolist()
)
val_list = np.random.choice(
3,
len(idx_list),
p=split_probabilities,
).astype(np.int8)
split[idx_list] = val_list
return split
def load_data(
hdf5_file_path,
input_features,
output_features,
split_data=True,
shuffle_training=False
):
logging.info('Loading data from: {0}'.format(hdf5_file_path))
# Load data from file
hdf5_data = h5py.File(hdf5_file_path, 'r')
dataset = {}
for input_feature in input_features:
if input_feature['type'] == TEXT:
text_data_field = text_feature_data_field(input_feature)
dataset[text_data_field] = hdf5_data[text_data_field].value
else:
dataset[input_feature['name']] = hdf5_data[
input_feature['name']
].value
for output_feature in output_features:
if output_feature['type'] == TEXT:
dataset[text_feature_data_field(output_feature)] = hdf5_data[
text_feature_data_field(output_feature)
].value
else:
dataset[output_feature['name']] = hdf5_data[
output_feature['name']].value
if 'limit' in output_feature:
dataset[output_feature['name']] = collapse_rare_labels(
dataset[output_feature['name']],
output_feature['limit']
)
if not split_data:
hdf5_data.close()
return dataset
split = hdf5_data['split'].value
hdf5_data.close()
training_set, test_set, validation_set = split_dataset_tvt(dataset, split)
# shuffle up
if shuffle_training:
training_set = data_utils.shuffle_dict_unison_inplace(training_set)
return training_set, test_set, validation_set
def load_metadata(metadata_file_path):
logging.info('Loading metadata from: {0}'.format(metadata_file_path))
return data_utils.load_json(metadata_file_path)
def preprocess_for_training(
model_definition,
data_df=None,
data_train_df=None,
data_validation_df=None,
data_test_df=None,
data_csv=None,
data_train_csv=None,
data_validation_csv=None,
data_test_csv=None,
data_hdf5=None,
data_train_hdf5=None,
data_validation_hdf5=None,
data_test_hdf5=None,
train_set_metadata_json=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
random_seed=default_random_seed
):
# Sanity Check to make sure some data source is provided
data_sources_provided = [data_df, data_train_df, data_csv, data_train_csv,
data_hdf5, data_train_hdf5]
data_sources_not_none = [x is not None for x in data_sources_provided]
if not any(data_sources_not_none):
raise ValueError('No training data is provided!')
# Check if hdf5 and json already exist. If they do, use the hdf5 data,
# instead of the csvs
data_hdf5_fp = None
if data_csv is not None:
data_hdf5_fp = replace_file_extension(data_csv, 'hdf5')
train_set_metadata_json_fp = replace_file_extension(data_csv, 'json')
if os.path.isfile(data_hdf5_fp) and os.path.isfile(
train_set_metadata_json_fp):
logging.info(
'Found hdf5 and json with the same filename '
'of the csv, using them instead'
)
data_csv = None
data_hdf5 = data_hdf5_fp
train_set_metadata_json = train_set_metadata_json_fp
if data_train_csv is not None:
data_train_hdf5_fp = replace_file_extension(data_train_csv, 'hdf5')
train_set_metadata_json_fp = replace_file_extension(
data_train_csv,
'json',
)
if os.path.isfile(data_train_hdf5_fp) and os.path.isfile(
train_set_metadata_json_fp):
logging.info(
'Found hdf5 and json with the same filename of '
'the train csv, using them instead'
)
data_train_csv = None
data_train_hdf5 = data_train_hdf5_fp
train_set_metadata_json = train_set_metadata_json_fp
if data_validation_csv is not None:
data_validation_hdf5_fp = replace_file_extension(
data_validation_csv,
'hdf5'
)
if os.path.isfile(data_validation_hdf5_fp):
logging.info(
'Found hdf5 with the same filename of '
'the validation csv, using it instead'
)
data_validation_csv = None
data_validation_hdf5 = data_validation_hdf5_fp
if data_test_csv is not None:
data_test_hdf5_fp = replace_file_extension(data_test_csv, 'hdf5')
if os.path.isfile(data_test_hdf5_fp):
logging.info(
'Found hdf5 with the same filename of '
'the test csv, using it instead'
)
data_test_csv = None
data_test_hdf5 = data_test_hdf5_fp
model_definition['data_hdf5_fp'] = data_hdf5_fp
# Decide if to preprocess or just load
features = (model_definition['input_features'] +
model_definition['output_features'])
if data_df is not None or data_train_df is not None:
# Preprocess data frames
(
training_set,
test_set,
validation_set,
train_set_metadata
) = _preprocess_df_for_training(
features,
data_df,
data_train_df,
data_validation_df,
data_test_df,
preprocessing_params,
random_seed
)
elif data_csv is not None or data_train_csv is not None:
# Preprocess csv data
(
training_set,
test_set,
validation_set,
train_set_metadata
) = _preprocess_csv_for_training(
features,
data_csv,
data_train_csv,
data_validation_csv,
data_test_csv,
skip_save_processed_input,
preprocessing_params,
random_seed
)
elif data_hdf5 is not None and train_set_metadata_json is not None:
# use data and train set metadata
# does not need preprocessing, just load
logging.info('Using full hdf5 and json')
training_set, test_set, validation_set = load_data(
data_hdf5,
model_definition['input_features'],
model_definition['output_features'],
shuffle_training=True
)
train_set_metadata = load_metadata(train_set_metadata_json)
elif data_train_hdf5 is not None and train_set_metadata_json is not None:
# use data and train set metadata
# does not need preprocessing, just load
logging.info('Using hdf5 and json')
training_set = load_data(
data_train_hdf5,
model_definition['input_features'],
model_definition['output_features'],
split_data=False
)
train_set_metadata = load_metadata(train_set_metadata_json)
if data_validation_hdf5 is not None:
validation_set = load_data(
data_validation_hdf5,
model_definition['input_features'],
model_definition['output_features'],
split_data=False
)
else:
validation_set = None
if data_test_hdf5 is not None:
test_set = load_data(
data_test_hdf5,
model_definition['input_features'],
model_definition['output_features'],
split_data=False
)
else:
test_set = None
else:
raise RuntimeError('Insufficient input parameters')
replace_text_feature_level(
model_definition['input_features'] +
model_definition['output_features'],
[training_set, validation_set, test_set]
)
training_dataset = Dataset(
training_set,
model_definition['input_features'],
model_definition['output_features'],
data_hdf5_fp
)
validation_dataset = None
if validation_set is not None:
validation_dataset = Dataset(
validation_set,
model_definition['input_features'],
model_definition['output_features'],
data_hdf5_fp
)
test_dataset = None
if test_set is not None:
test_dataset = Dataset(
test_set,
model_definition['input_features'],
model_definition['output_features'],
data_hdf5_fp
)
return (
training_dataset,
validation_dataset,
test_dataset,
train_set_metadata
)
def _preprocess_csv_for_training(
features,
data_csv=None,
data_train_csv=None,
data_validation_csv=None,
data_test_csv=None,
skip_save_processed_input=False,
preprocessing_params=default_preprocessing_parameters,
random_seed=default_random_seed
):
"""
Method to pre-process csv data
:param features: list of all features (input + output)
:param data_csv: path to the csv data
:param data_train_csv: training csv data
:param data_validation_csv: validation csv data
:param data_test_csv: test csv data
:param skip_save_processed_input: if False, the pre-processed data is saved
as .hdf5 files in the same location as the csvs with the same names.
:param preprocessing_params: preprocessing parameters
:param random_seed: random seed
:return: training, test, validation datasets, training metadata
"""
if data_csv is not None:
# Use data and ignore _train, _validation and _test.
# Also ignore data and train set metadata needs preprocessing
logging.info(
'Using full raw csv, no hdf5 and json file '
'with the same name have been found'
)
logging.info('Building dataset (it may take a while)')
data, train_set_metadata = build_dataset(
data_csv,
features,
preprocessing_params,
random_seed=random_seed
)
if not skip_save_processed_input:
logging.info('Writing dataset')
data_hdf5_fp = replace_file_extension(data_csv, 'hdf5')
data_utils.save_hdf5(data_hdf5_fp, data, train_set_metadata)
logging.info('Writing train set metadata with vocabulary')
train_set_metadata_json_fp = replace_file_extension(
data_csv,
'json'
)
data_utils.save_json(
train_set_metadata_json_fp, train_set_metadata)
training_set, test_set, validation_set = split_dataset_tvt(
data,
data['split']
)
elif data_train_csv is not None:
# use data_train (including _validation and _test if they are present)
# and ignore data and train set metadata
# needs preprocessing
logging.info(
'Using training raw csv, no hdf5 and json '
'file with the same name have been found'
)
logging.info('Building dataset (it may take a while)')
concatenated_df = concatenate_csv(
data_train_csv,
data_validation_csv,
data_test_csv
)
concatenated_df.csv = data_train_csv
data, train_set_metadata = build_dataset_df(
concatenated_df,
features,
preprocessing_params,
random_seed=random_seed
)
training_set, test_set, validation_set = split_dataset_tvt(
data,
data['split']
)
if not skip_save_processed_input:
logging.info('Writing dataset')
data_train_hdf5_fp = replace_file_extension(data_train_csv, 'hdf5')
data_utils.save_hdf5(
data_train_hdf5_fp,
training_set,
train_set_metadata
)
if validation_set is not None:
data_validation_hdf5_fp = replace_file_extension(
data_validation_csv,
'hdf5'
)
data_utils.save_hdf5(
data_validation_hdf5_fp,
validation_set,
train_set_metadata
)
if test_set is not None:
data_test_hdf5_fp = replace_file_extension(data_test_csv,
'hdf5')
data_utils.save_hdf5(
data_test_hdf5_fp,
test_set,
train_set_metadata
)
logging.info('Writing train set metadata with vocabulary')
train_set_metadata_json_fp = replace_file_extension(data_train_csv,
'json')
data_utils.save_json(train_set_metadata_json_fp, train_set_metadata)
return training_set, test_set, validation_set, train_set_metadata
def _preprocess_df_for_training(
features,
data_df=None,
data_train_df=None,
data_validation_df=None,
data_test_df=None,
preprocessing_params=default_preprocessing_parameters,
random_seed=default_random_seed
):
""" Method to pre-process dataframes. This doesn't have the optoin to save the
processed data as hdf5 as we don't expect users to do this as the data can
be processed in memory
"""
if data_df is not None:
# needs preprocessing
logging.info('Using full dataframe')
logging.info('Building dataset (it may take a while)')
elif data_train_df is not None:
# needs preprocessing
logging.info('Using training dataframe')
logging.info('Building dataset (it may take a while)')
data_df = concatenate_df(
data_train_df,
data_validation_df,
data_test_df
)
data, train_set_metadata = build_dataset_df(
data_df,
features,
preprocessing_params,
random_seed=random_seed
)
training_set, test_set, validation_set = split_dataset_tvt(
data,
data['split']
)
return training_set, test_set, validation_set, train_set_metadata
def preprocess_for_prediction(
model_path,
split,
data_csv=None,
data_hdf5=None,
train_set_metadata=None,
evaluate_performance=True
):
"""Preprocesses the dataset to parse it into a format that is usable by the
Ludwig core
:param model_path: The input data that is joined with the model
hyperparameter file to create the model definition file
:type model_path: Str
:param split: Splits the data into the train and test sets
:param data_csv: The CSV input data file
:param data_hdf5: The hdf5 data file if there is no csv data file
:param train_set_metadata: Train set metadata for the input features
:param evaluate_performance: If False does not load output features
:returns: Dataset, Train set metadata
"""
model_definition = load_json(
os.path.join(model_path, MODEL_HYPERPARAMETERS_FILE_NAME)
)
for input_feature in model_definition['input_features']:
if 'preprocessing' in input_feature:
if 'in_memory' in input_feature['preprocessing']:
if not input_feature['preprocessing']['in_memory']:
logging.warning(
'WARNING: When running predict in_memory flag should '
'be true. Overriding and setting it to true for '
'feature <{}>'.format(input_feature['name'])
)
input_feature['preprocessing']['in_memory'] = True
preprocessing_params = merge_dict(
default_preprocessing_parameters,
model_definition['preprocessing']
)
output_features = model_definition[
'output_features'] if evaluate_performance else []
features = model_definition['input_features'] + output_features
# Check if hdf5 file already exists
if data_csv is not None:
data_hdf5_fp = replace_file_extension(data_csv, 'hdf5')
if os.path.isfile(data_hdf5_fp):
logging.info('Found hdf5 with the same filename of the csv, '
'using it instead')
data_csv = None
data_hdf5 = data_hdf5_fp
else:
data_hdf5_fp = None
# Load data
train_set_metadata = load_metadata(train_set_metadata)
if split == 'full':
if data_hdf5 is not None:
dataset = load_data(
data_hdf5,
model_definition['input_features'],
output_features,
split_data=False, shuffle_training=False
)
else:
dataset, train_set_metadata = build_dataset(
data_csv,
features,
preprocessing_params,
train_set_metadata=train_set_metadata
)
else:
if data_hdf5 is not None:
training, test, validation = load_data(
data_hdf5,
model_definition['input_features'],
output_features,
shuffle_training=False
)
if split == 'training':
dataset = training
elif split == 'validation':
dataset = validation
else: # if split == 'test':
dataset = test
else:
dataset, train_set_metadata = build_dataset(
data_csv,
features,
preprocessing_params,
train_set_metadata=train_set_metadata
)
replace_text_feature_level(
features,
[dataset]
)
dataset = Dataset(
dataset,
model_definition['input_features'],
output_features,
data_hdf5_fp,
)
return dataset, train_set_metadata
def replace_text_feature_level(features, datasets):
for feature in features:
if feature['type'] == TEXT:
for dataset in datasets:
if dataset is not None:
dataset[feature['name']] = dataset[
'{}_{}'.format(
feature['name'],
feature['level']
)
]
for level in ('word', 'char'):
name_level = '{}_{}'.format(
feature['name'],
level)
if name_level in dataset:
del dataset[name_level]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='This script takes csv files as input and outputs a HDF5 '
'and JSON file containing a dataset and the train set '
'metadata associated with it'
)
parser.add_argument(
'-id',
'--dataset_csv',
help='CSV containing contacts',
required=True
)
parser.add_argument(
'-ime',
'--train_set_metadata_json',
help='Input JSON containing metadata'
)
parser.add_argument(
'-od',
'--output_dataset_h5',
help='HDF5 containing output data',
required=True
)
parser.add_argument(
'-ome',
'--output_metadata_json',
help='JSON containing metadata',
required=True
)
parser.add_argument(
'-f',
'--features',
type=yaml.safe_load,
help='list of features in the CSV to map to hdf5 and JSON files'
)
parser.add_argument(
'-p',
'--preprocessing_parameters',
type=yaml.safe_load,
default='{}',
help='the parameters for preprocessing the different features'
)
parser.add_argument(
'-rs',
'--random_seed',
type=int,
default=42,
help='a random seed that is going to be used anywhere there is a call '
'to a random number generator: data splitting, parameter '
'initialization and training set shuffling'
)
args = parser.parse_args()
data, train_set_metadata = build_dataset(
args.dataset_csv,
args.train_set_metadata_json,
args.features,
args.preprocessing_parameters,
args.random_seed
)
# write train set metadata, dataset
logging.info('Writing train set metadata with vocabulary')
data_utils.save_json(args.output_metadata_json, train_set_metadata)
logging.info('Writing dataset')
data_utils.save_hdf5(args.output_dataset_h5, data, train_set_metadata)
| 33.575326
| 82
| 0.610479
|
4a0afbf30f151822a86a33949589612e38d46908
| 406
|
py
|
Python
|
Project Euler Problems/Problem39.py
|
rishusingh022/My-Journey-of-Data-Structures-and-Algorithms
|
28a70fdf10366fc97ddb9f6a69852b3478b564e6
|
[
"MIT"
] | null | null | null |
Project Euler Problems/Problem39.py
|
rishusingh022/My-Journey-of-Data-Structures-and-Algorithms
|
28a70fdf10366fc97ddb9f6a69852b3478b564e6
|
[
"MIT"
] | 1
|
2021-10-01T18:26:34.000Z
|
2021-10-01T18:26:34.000Z
|
Project Euler Problems/Problem39.py
|
rishusingh022/My-Journey-of-Data-Structures-and-Algorithms
|
28a70fdf10366fc97ddb9f6a69852b3478b564e6
|
[
"MIT"
] | 7
|
2021-10-01T16:07:29.000Z
|
2021-10-04T13:23:48.000Z
|
def count_right_angle(p):
ans = 0
for i in range(1,p):
for j in range(i+1,p):
if p - i - j >0 and p - i - j >j:
c = p - i - j
if i**2 + j**2 == c**2:
ans += 1
return ans
ans = 0
count = 0
for i in range(1,1001):
check = count_right_angle(i)
if check > count:
ans = i
count = check
print(ans)
| 21.368421
| 47
| 0.426108
|
4a0afc10462c85709ec5b2a7cb48e9e8479f3716
| 1,167
|
py
|
Python
|
S4/S4 Library/simulation/vet/vet_clinic_utils.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:37.000Z
|
2021-05-20T19:33:37.000Z
|
S4/S4 Library/simulation/vet/vet_clinic_utils.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
S4/S4 Library/simulation/vet/vet_clinic_utils.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
from event_testing.resolver import SingleSimResolver
from vet.vet_clinic_tuning import VetClinicTuning, logger
import services
def get_vet_clinic_zone_director():
venue_service = services.venue_service()
if venue_service is None or not venue_service.venue_is_type(VetClinicTuning.VET_CLINIC_VENUE):
return
return venue_service.get_zone_director()
def get_bonus_payment(difficulty):
for bonus_item in reversed(VetClinicTuning.DIFFICULTY_BONUS_PAYMENT):
if bonus_item.threshold.compare(difficulty):
return bonus_item.bonus_amount
return 0
def get_value_of_service_buff(markup, vet_sim_info):
resolver = SingleSimResolver(vet_sim_info)
for markup_tests in reversed(VetClinicTuning.VALUE_OF_SERVICE_AWARDS):
if markup_tests.markup_threshold.compare(markup):
for skill_tests in reversed(markup_tests.skill_to_buffs):
if resolver(skill_tests.skill_range):
return skill_tests.value_of_service_buff
logger.error('Could not find an appropriate value of service buff for {}. Please verify there are no holes in VALUE_OF_SERVICE_AWARDS tuning', vet_sim_info)
| 46.68
| 160
| 0.774636
|
4a0afc76ae465f6bc6a857f54370e9f8e8e18e4d
| 29,697
|
py
|
Python
|
tests/package/test_manifest.py
|
World-Enterprise-Collision/platformio-core
|
c6e0c4d89d8aeaf6e733e3a668cd500fc7078e15
|
[
"Apache-2.0"
] | null | null | null |
tests/package/test_manifest.py
|
World-Enterprise-Collision/platformio-core
|
c6e0c4d89d8aeaf6e733e3a668cd500fc7078e15
|
[
"Apache-2.0"
] | null | null | null |
tests/package/test_manifest.py
|
World-Enterprise-Collision/platformio-core
|
c6e0c4d89d8aeaf6e733e3a668cd500fc7078e15
|
[
"Apache-2.0"
] | 1
|
2021-01-27T21:40:48.000Z
|
2021-01-27T21:40:48.000Z
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import tarfile
import jsondiff
import pytest
from platformio.compat import WINDOWS
from platformio.package.manifest import parser
from platformio.package.manifest.schema import ManifestSchema, ManifestValidationError
def test_library_json_parser():
contents = """
{
"name": "TestPackage",
"keywords": "kw1, KW2, kw3",
"platforms": ["atmelavr", "espressif"],
"repository": {
"type": "git",
"url": "http://github.com/username/repo/"
},
"url": "http://old.url.format",
"exclude": [".gitignore", "tests"],
"include": "mylib",
"build": {
"flags": ["-DHELLO"]
},
"examples": ["examples/*/*.pde"],
"dependencies": {
"deps1": "1.2.0",
"deps2": "https://github.com/username/package.git",
"owner/deps3": "^2.1.3"
},
"customField": "Custom Value"
}
"""
raw_data = parser.LibraryJsonManifestParser(contents).as_dict()
raw_data["dependencies"] = sorted(raw_data["dependencies"], key=lambda a: a["name"])
assert not jsondiff.diff(
raw_data,
{
"name": "TestPackage",
"platforms": ["atmelavr", "espressif8266"],
"repository": {
"type": "git",
"url": "https://github.com/username/repo.git",
},
"export": {"exclude": [".gitignore", "tests"], "include": ["mylib"]},
"keywords": ["kw1", "kw2", "kw3"],
"homepage": "http://old.url.format",
"build": {"flags": ["-DHELLO"]},
"dependencies": [
{"name": "deps1", "version": "1.2.0"},
{"name": "deps2", "version": "https://github.com/username/package.git"},
{"owner": "owner", "name": "deps3", "version": "^2.1.3"},
],
"customField": "Custom Value",
},
)
contents = """
{
"keywords": ["sound", "audio", "music", "SD", "card", "playback"],
"frameworks": "arduino",
"platforms": "atmelavr",
"export": {
"exclude": "audio_samples"
},
"dependencies": [
{"name": "deps1", "version": "1.0.0"},
{"owner": "owner", "name": "deps2", "version": "1.0.0", "platforms": "*", "frameworks": "arduino, espidf"},
{"name": "deps3", "version": "1.0.0", "platforms": ["ststm32", "sifive"]}
]
}
"""
raw_data = parser.LibraryJsonManifestParser(contents).as_dict()
raw_data["dependencies"] = sorted(raw_data["dependencies"], key=lambda a: a["name"])
assert not jsondiff.diff(
raw_data,
{
"keywords": ["sound", "audio", "music", "sd", "card", "playback"],
"frameworks": ["arduino"],
"export": {"exclude": ["audio_samples"]},
"platforms": ["atmelavr"],
"dependencies": [
{"name": "deps1", "version": "1.0.0"},
{
"owner": "owner",
"name": "deps2",
"version": "1.0.0",
"platforms": ["*"],
"frameworks": ["arduino", "espidf"],
},
{
"name": "deps3",
"version": "1.0.0",
"platforms": ["ststm32", "sifive"],
},
],
},
)
raw_data = parser.LibraryJsonManifestParser(
'{"dependencies": ["dep1", "dep2", "owner/dep3@1.2.3"]}'
).as_dict()
raw_data["dependencies"] = sorted(raw_data["dependencies"], key=lambda a: a["name"])
assert not jsondiff.diff(
raw_data,
{
"dependencies": [
{"name": "dep1"},
{"name": "dep2"},
{"name": "owner/dep3@1.2.3"},
],
},
)
# broken dependencies
with pytest.raises(parser.ManifestParserError):
parser.LibraryJsonManifestParser({"dependencies": ["deps1", "deps2"]})
def test_module_json_parser():
contents = """
{
"author": "Name Surname <name@surname.com>",
"description": "This is Yotta library",
"homepage": "https://yottabuild.org",
"keywords": [
"mbed",
"Yotta"
],
"licenses": [
{
"type": "Apache-2.0",
"url": "https://spdx.org/licenses/Apache-2.0"
}
],
"name": "YottaLibrary",
"repository": {
"type": "git",
"url": "git@github.com:username/repo.git"
},
"version": "1.2.3",
"dependencies": {
"usefulmodule": "^1.2.3",
"simplelog": "ARMmbed/simplelog#~0.0.1"
},
"customField": "Custom Value"
}
"""
raw_data = parser.ModuleJsonManifestParser(contents).as_dict()
raw_data["dependencies"] = sorted(raw_data["dependencies"], key=lambda a: a["name"])
assert not jsondiff.diff(
raw_data,
{
"name": "YottaLibrary",
"description": "This is Yotta library",
"homepage": "https://yottabuild.org",
"keywords": ["mbed", "yotta"],
"license": "Apache-2.0",
"platforms": ["*"],
"frameworks": ["mbed"],
"export": {"exclude": ["tests", "test", "*.doxyfile", "*.pdf"]},
"authors": [{"email": "name@surname.com", "name": "Name Surname"}],
"version": "1.2.3",
"repository": {"type": "git", "url": "git@github.com:username/repo.git"},
"dependencies": [
{
"name": "simplelog",
"version": "ARMmbed/simplelog#~0.0.1",
"frameworks": ["mbed"],
},
{"name": "usefulmodule", "version": "^1.2.3", "frameworks": ["mbed"]},
],
"customField": "Custom Value",
},
)
def test_library_properties_parser():
# Base
contents = """
name=TestPackage
version=1.2.3
author=SomeAuthor <info AT author.com>, Maintainer Author (nickname) <www.example.com>
maintainer=Maintainer Author (nickname) <www.example.com>
sentence=This is Arduino library
customField=Custom Value
depends=First Library (=2.0.0), Second Library (>=1.2.0), Third
ignore_empty_field=
"""
raw_data = parser.LibraryPropertiesManifestParser(contents).as_dict()
raw_data["dependencies"] = sorted(raw_data["dependencies"], key=lambda a: a["name"])
assert not jsondiff.diff(
raw_data,
{
"name": "TestPackage",
"version": "1.2.3",
"description": "This is Arduino library",
"sentence": "This is Arduino library",
"platforms": ["*"],
"frameworks": ["arduino"],
"export": {
"exclude": ["extras", "docs", "tests", "test", "*.doxyfile", "*.pdf"]
},
"authors": [
{"name": "SomeAuthor", "email": "info@author.com"},
{"name": "Maintainer Author", "maintainer": True},
],
"keywords": ["uncategorized"],
"customField": "Custom Value",
"depends": "First Library (=2.0.0), Second Library (>=1.2.0), Third",
"dependencies": [
{
"name": "First Library",
"version": "=2.0.0",
"frameworks": ["arduino"],
},
{
"name": "Second Library",
"version": ">=1.2.0",
"frameworks": ["arduino"],
},
{"name": "Third", "frameworks": ["arduino"]},
],
},
)
# Platforms ALL
data = parser.LibraryPropertiesManifestParser(
"architectures=*\n" + contents
).as_dict()
assert data["platforms"] == ["*"]
# Platforms specific
data = parser.LibraryPropertiesManifestParser(
"architectures=avr, esp32\n" + contents
).as_dict()
assert data["platforms"] == ["atmelavr", "espressif32"]
# Remote URL
data = parser.LibraryPropertiesManifestParser(
contents,
remote_url=(
"https://raw.githubusercontent.com/username/reponame/master/"
"libraries/TestPackage/library.properties"
),
).as_dict()
assert data["export"] == {
"exclude": ["extras", "docs", "tests", "test", "*.doxyfile", "*.pdf"],
"include": ["libraries/TestPackage"],
}
assert data["repository"] == {
"url": "https://github.com/username/reponame.git",
"type": "git",
}
# Home page
data = parser.LibraryPropertiesManifestParser(
"url=https://github.com/username/reponame.git\n" + contents
).as_dict()
assert data["repository"] == {
"type": "git",
"url": "https://github.com/username/reponame.git",
}
# Author + Maintainer
data = parser.LibraryPropertiesManifestParser(
"""
author=Rocket Scream Electronics <broken-email.com>
maintainer=Rocket Scream Electronics
"""
).as_dict()
assert data["authors"] == [
{"name": "Rocket Scream Electronics", "maintainer": True}
]
def test_library_json_schema():
contents = """
{
"name": "ArduinoJson",
"keywords": "JSON, rest, http, web",
"description": "An elegant and efficient JSON library for embedded systems",
"homepage": "https://arduinojson.org",
"repository": {
"type": "git",
"url": "https://github.com/bblanchon/ArduinoJson.git"
},
"version": "6.12.0",
"authors": {
"name": "Benoit Blanchon",
"url": "https://blog.benoitblanchon.fr"
},
"downloadUrl": "https://example.com/package.tar.gz",
"exclude": [
"fuzzing",
"scripts",
"test",
"third-party"
],
"frameworks": "arduino",
"platforms": "*",
"license": "MIT",
"examples": [
{
"name": "JsonConfigFile",
"base": "examples/JsonConfigFile",
"files": ["JsonConfigFile.ino"]
},
{
"name": "JsonHttpClient",
"base": "examples/JsonHttpClient",
"files": ["JsonHttpClient.ino"]
}
],
"dependencies": [
{"name": "deps1", "version": "1.0.0"},
{"name": "@owner/deps2", "version": "1.0.0", "frameworks": "arduino"},
{"name": "deps3", "version": "1.0.0", "platforms": ["ststm32", "sifive"]}
]
}
"""
raw_data = parser.ManifestParserFactory.new(
contents, parser.ManifestFileType.LIBRARY_JSON
).as_dict()
raw_data["dependencies"] = sorted(raw_data["dependencies"], key=lambda a: a["name"])
data = ManifestSchema().load_manifest(raw_data)
assert data["repository"]["url"] == "https://github.com/bblanchon/ArduinoJson.git"
assert data["examples"][1]["base"] == "examples/JsonHttpClient"
assert data["examples"][1]["files"] == ["JsonHttpClient.ino"]
assert not jsondiff.diff(
data,
{
"name": "ArduinoJson",
"keywords": ["json", "rest", "http", "web"],
"description": "An elegant and efficient JSON library for embedded systems",
"homepage": "https://arduinojson.org",
"repository": {
"url": "https://github.com/bblanchon/ArduinoJson.git",
"type": "git",
},
"version": "6.12.0",
"authors": [
{"name": "Benoit Blanchon", "url": "https://blog.benoitblanchon.fr"}
],
"downloadUrl": "https://example.com/package.tar.gz",
"export": {"exclude": ["fuzzing", "scripts", "test", "third-party"]},
"frameworks": ["arduino"],
"platforms": ["*"],
"license": "MIT",
"examples": [
{
"name": "JsonConfigFile",
"base": "examples/JsonConfigFile",
"files": ["JsonConfigFile.ino"],
},
{
"name": "JsonHttpClient",
"base": "examples/JsonHttpClient",
"files": ["JsonHttpClient.ino"],
},
],
"dependencies": [
{"name": "@owner/deps2", "version": "1.0.0", "frameworks": ["arduino"]},
{"name": "deps1", "version": "1.0.0"},
{
"name": "deps3",
"version": "1.0.0",
"platforms": ["ststm32", "sifive"],
},
],
},
)
# legacy dependencies format
contents = """
{
"name": "DallasTemperature",
"version": "3.8.0",
"dependencies":
{
"name": "OneWire",
"authors": "Paul Stoffregen",
"frameworks": "arduino"
}
}
"""
raw_data = parser.LibraryJsonManifestParser(contents).as_dict()
data = ManifestSchema().load_manifest(raw_data)
assert not jsondiff.diff(
data,
{
"name": "DallasTemperature",
"version": "3.8.0",
"dependencies": [
{
"name": "OneWire",
"authors": ["Paul Stoffregen"],
"frameworks": ["arduino"],
}
],
},
)
def test_library_properties_schema():
contents = """
name=U8glib
version=1.19.1
author=oliver <olikraus@gmail.com>
maintainer=oliver <olikraus@gmail.com>
sentence=A library for monochrome TFTs and OLEDs
paragraph=Supported display controller: SSD1306, SSD1309, SSD1322, SSD1325
category=Display
url=https://github.com/olikraus/u8glib
architectures=avr,sam
depends=First Library (=2.0.0), Second Library (>=1.2.0), Third
"""
raw_data = parser.ManifestParserFactory.new(
contents, parser.ManifestFileType.LIBRARY_PROPERTIES
).as_dict()
raw_data["dependencies"] = sorted(raw_data["dependencies"], key=lambda a: a["name"])
data = ManifestSchema().load_manifest(raw_data)
assert not jsondiff.diff(
data,
{
"description": (
"A library for monochrome TFTs and OLEDs. Supported display "
"controller: SSD1306, SSD1309, SSD1322, SSD1325"
),
"repository": {
"url": "https://github.com/olikraus/u8glib.git",
"type": "git",
},
"frameworks": ["arduino"],
"platforms": ["atmelavr", "atmelsam"],
"version": "1.19.1",
"export": {
"exclude": ["extras", "docs", "tests", "test", "*.doxyfile", "*.pdf"]
},
"authors": [
{"maintainer": True, "email": "olikraus@gmail.com", "name": "oliver"}
],
"keywords": ["display"],
"name": "U8glib",
"dependencies": [
{
"name": "First Library",
"version": "=2.0.0",
"frameworks": ["arduino"],
},
{
"name": "Second Library",
"version": ">=1.2.0",
"frameworks": ["arduino"],
},
{"name": "Third", "frameworks": ["arduino"]},
],
},
)
# Broken fields
contents = """
name=Mozzi
version=1.0.3
author=Lorem Ipsum is simply dummy text of the printing and typesetting industry Lorem Ipsum has been the industry's standard dummy text ever since the 1500s when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries but also the leap into electronic typesetting remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.
maintainer=Tim Barrass <faveflave@gmail.com>
sentence=Sound synthesis library for Arduino
paragraph=With Mozzi, you can construct sounds using familiar synthesis units like oscillators, delays, filters and envelopes.
category=Signal Input/Output
url=https://sensorium.github.io/Mozzi/
architectures=*
dot_a_linkage=false
includes=MozziGuts.h
"""
raw_data = parser.ManifestParserFactory.new(
contents,
parser.ManifestFileType.LIBRARY_PROPERTIES,
remote_url=(
"https://raw.githubusercontent.com/sensorium/Mozzi/"
"master/library.properties"
),
).as_dict()
errors = None
try:
ManifestSchema().load_manifest(raw_data)
except ManifestValidationError as e:
data = e.valid_data
errors = e.messages
assert errors["authors"]
assert not jsondiff.diff(
data,
{
"name": "Mozzi",
"version": "1.0.3",
"description": (
"Sound synthesis library for Arduino. With Mozzi, you can construct "
"sounds using familiar synthesis units like oscillators, delays, "
"filters and envelopes."
),
"repository": {
"url": "https://github.com/sensorium/Mozzi.git",
"type": "git",
},
"platforms": ["*"],
"frameworks": ["arduino"],
"export": {
"exclude": ["extras", "docs", "tests", "test", "*.doxyfile", "*.pdf"]
},
"authors": [
{
"maintainer": True,
"email": "faveflave@gmail.com",
"name": "Tim Barrass",
}
],
"keywords": ["signal", "input", "output"],
"homepage": "https://sensorium.github.io/Mozzi/",
},
)
def test_platform_json_schema():
contents = """
{
"name": "atmelavr",
"title": "Atmel AVR",
"description": "Atmel AVR 8- and 32-bit MCUs deliver a unique combination of performance, power efficiency and design flexibility. Optimized to speed time to market-and easily adapt to new ones-they are based on the industrys most code-efficient architecture for C and assembly programming.",
"keywords": "arduino, atmel, avr",
"homepage": "http://www.atmel.com/products/microcontrollers/avr/default.aspx",
"license": "Apache-2.0",
"engines": {
"platformio": "<5"
},
"repository": {
"type": "git",
"url": "https://github.com/platformio/platform-atmelavr.git"
},
"version": "1.15.0",
"frameworks": {
"arduino": {
"package": "framework-arduinoavr",
"script": "builder/frameworks/arduino.py"
},
"simba": {
"package": "framework-simba",
"script": "builder/frameworks/simba.py"
}
},
"packages": {
"toolchain-atmelavr": {
"type": "toolchain",
"version": "~1.50400.0"
},
"framework-arduinoavr": {
"type": "framework",
"optional": true,
"version": "~4.2.0"
},
"tool-avrdude": {
"type": "uploader",
"optional": true,
"version": "~1.60300.0"
}
}
}
"""
raw_data = parser.ManifestParserFactory.new(
contents, parser.ManifestFileType.PLATFORM_JSON
).as_dict()
raw_data["frameworks"] = sorted(raw_data["frameworks"])
raw_data["dependencies"] = sorted(raw_data["dependencies"], key=lambda a: a["name"])
data = ManifestSchema().load_manifest(raw_data)
assert not jsondiff.diff(
data,
{
"name": "atmelavr",
"title": "Atmel AVR",
"description": (
"Atmel AVR 8- and 32-bit MCUs deliver a unique combination of "
"performance, power efficiency and design flexibility. Optimized to "
"speed time to market-and easily adapt to new ones-they are based "
"on the industrys most code-efficient architecture for C and "
"assembly programming."
),
"keywords": ["arduino", "atmel", "avr"],
"homepage": "http://www.atmel.com/products/microcontrollers/avr/default.aspx",
"license": "Apache-2.0",
"repository": {
"url": "https://github.com/platformio/platform-atmelavr.git",
"type": "git",
},
"frameworks": sorted(["arduino", "simba"]),
"version": "1.15.0",
"dependencies": [
{"name": "framework-arduinoavr", "version": "~4.2.0"},
{"name": "tool-avrdude", "version": "~1.60300.0"},
{"name": "toolchain-atmelavr", "version": "~1.50400.0"},
],
},
)
def test_package_json_schema():
contents = """
{
"name": "tool-scons",
"description": "SCons software construction tool",
"keywords": "SCons, build",
"homepage": "http://www.scons.org",
"system": ["linux_armv6l", "linux_armv7l", "linux_armv8l"],
"version": "3.30101.0"
}
"""
raw_data = parser.ManifestParserFactory.new(
contents, parser.ManifestFileType.PACKAGE_JSON
).as_dict()
data = ManifestSchema().load_manifest(raw_data)
assert not jsondiff.diff(
data,
{
"name": "tool-scons",
"description": "SCons software construction tool",
"keywords": ["scons", "build"],
"homepage": "http://www.scons.org",
"system": ["linux_armv6l", "linux_armv7l", "linux_armv8l"],
"version": "3.30101.0",
},
)
mp = parser.ManifestParserFactory.new(
'{"system": "*"}', parser.ManifestFileType.PACKAGE_JSON
)
assert "system" not in mp.as_dict()
mp = parser.ManifestParserFactory.new(
'{"system": "all"}', parser.ManifestFileType.PACKAGE_JSON
)
assert "system" not in mp.as_dict()
mp = parser.ManifestParserFactory.new(
'{"system": "darwin_x86_64"}', parser.ManifestFileType.PACKAGE_JSON
)
assert mp.as_dict()["system"] == ["darwin_x86_64"]
# shortcut repository syntax (npm-style)
contents = """
{
"name": "tool-github",
"version": "1.2.0",
"repository": "github:user/repo"
}
"""
raw_data = parser.ManifestParserFactory.new(
contents, parser.ManifestFileType.PACKAGE_JSON
).as_dict()
data = ManifestSchema().load_manifest(raw_data)
assert data["repository"]["url"] == "https://github.com/user/repo.git"
def test_parser_from_dir(tmpdir_factory):
pkg_dir = tmpdir_factory.mktemp("package")
pkg_dir.join("package.json").write('{"name": "package.json"}')
pkg_dir.join("library.json").write('{"name": "library.json"}')
pkg_dir.join("library.properties").write("name=library.properties")
data = parser.ManifestParserFactory.new_from_dir(str(pkg_dir)).as_dict()
assert data["name"] == "library.json"
data = parser.ManifestParserFactory.new_from_dir(
str(pkg_dir), remote_url="http://localhost/library.properties"
).as_dict()
assert data["name"] == "library.properties"
def test_examples_from_dir(tmpdir_factory):
package_dir = tmpdir_factory.mktemp("project")
package_dir.join("library.json").write(
'{"name": "pkg", "version": "1.0.0", "examples": ["examples/*/*.pde"]}'
)
examples_dir = package_dir.mkdir("examples")
# PlatformIO project #1
pio_dir = examples_dir.mkdir("PlatformIO").mkdir("hello")
pio_dir.join(".vimrc").write("")
pio_ini = pio_dir.join("platformio.ini")
pio_ini.write("")
if not WINDOWS:
pio_dir.join("platformio.ini.copy").mksymlinkto(pio_ini)
pio_dir.mkdir("include").join("main.h").write("")
pio_dir.mkdir("src").join("main.cpp").write("")
# wiring examples
arduino_dir = examples_dir.mkdir("1. General")
arduino_dir.mkdir("SomeSketchIno").join("SomeSketchIno.ino").write("")
arduino_dir.mkdir("SomeSketchPde").join("SomeSketchPde.pde").write("")
# custom examples
demo_dir = examples_dir.mkdir("demo")
demo_dir.join("demo.cpp").write("")
demo_dir.join("demo.h").write("")
demo_dir.join("util.h").write("")
# PlatformIO project #2
pio_dir = examples_dir.mkdir("world")
pio_dir.join("platformio.ini").write("")
pio_dir.join("README").write("")
pio_dir.join("extra.py").write("")
pio_dir.mkdir("include").join("world.h").write("")
pio_dir.mkdir("src").join("world.c").write("")
# example files in root
examples_dir.join("root.c").write("")
examples_dir.join("root.h").write("")
# invalid example
examples_dir.mkdir("invalid-example").join("hello.json")
# Do testing
raw_data = parser.ManifestParserFactory.new_from_dir(str(package_dir)).as_dict()
assert isinstance(raw_data["examples"], list)
assert len(raw_data["examples"]) == 6
def _to_unix_path(path):
return re.sub(r"[\\/]+", "/", path)
def _sort_examples(items):
for i, _ in enumerate(items):
items[i]["base"] = _to_unix_path(items[i]["base"])
items[i]["files"] = [_to_unix_path(f) for f in sorted(items[i]["files"])]
return sorted(items, key=lambda item: item["name"])
raw_data["examples"] = _sort_examples(raw_data["examples"])
data = ManifestSchema().load_manifest(raw_data)
assert not jsondiff.diff(
data,
{
"version": "1.0.0",
"name": "pkg",
"examples": _sort_examples(
[
{
"name": "PlatformIO/hello",
"base": os.path.join("examples", "PlatformIO", "hello"),
"files": [
"platformio.ini",
os.path.join("include", "main.h"),
os.path.join("src", "main.cpp"),
],
},
{
"name": "1_General/SomeSketchIno",
"base": os.path.join("examples", "1. General", "SomeSketchIno"),
"files": ["SomeSketchIno.ino"],
},
{
"name": "1_General/SomeSketchPde",
"base": os.path.join("examples", "1. General", "SomeSketchPde"),
"files": ["SomeSketchPde.pde"],
},
{
"name": "demo",
"base": os.path.join("examples", "demo"),
"files": ["demo.h", "util.h", "demo.cpp"],
},
{
"name": "world",
"base": "examples/world",
"files": [
"platformio.ini",
os.path.join("include", "world.h"),
os.path.join("src", "world.c"),
"README",
"extra.py",
],
},
{
"name": "Examples",
"base": "examples",
"files": ["root.c", "root.h"],
},
]
),
},
)
def test_parser_from_archive(tmpdir_factory):
pkg_dir = tmpdir_factory.mktemp("package")
pkg_dir.join("package.json").write('{"name": "package.json"}')
pkg_dir.join("library.json").write('{"name": "library.json"}')
pkg_dir.join("library.properties").write("name=library.properties")
archive_path = os.path.join(str(pkg_dir), "package.tar.gz")
with tarfile.open(archive_path, mode="w|gz") as tf:
for item in os.listdir(str(pkg_dir)):
tf.add(os.path.join(str(pkg_dir), item), item)
data = parser.ManifestParserFactory.new_from_archive(archive_path).as_dict()
assert data["name"] == "library.json"
def test_broken_schemas():
# missing required field
with pytest.raises(
ManifestValidationError, match=("Invalid semantic versioning format")
) as exc_info:
ManifestSchema().load_manifest(dict(name="MyPackage", version="broken_version"))
assert exc_info.value.valid_data == {"name": "MyPackage"}
# invalid StrictList
with pytest.raises(
ManifestValidationError, match=("Invalid manifest fields.+keywords")
) as exc_info:
ManifestSchema().load_manifest(
dict(name="MyPackage", version="1.0.0", keywords=["kw1", "*^[]"])
)
assert list(exc_info.value.messages.keys()) == ["keywords"]
assert exc_info.value.valid_data["keywords"] == ["kw1"]
# broken SemVer
with pytest.raises(
ManifestValidationError, match=("Invalid semantic versioning format")
):
ManifestSchema().load_manifest(dict(name="MyPackage", version="broken_version"))
# broken value for Nested
with pytest.raises(ManifestValidationError, match=r"authors.*Invalid input type"):
ManifestSchema().load_manifest(
dict(
name="MyPackage",
description="MyDescription",
keywords=["a", "b"],
authors=["should be dict here"],
version="1.2.3",
)
)
# invalid package name
with pytest.raises(ManifestValidationError, match=("are not allowed")):
ManifestSchema().load_manifest(dict(name="C/C++ :library", version="1.2.3"))
| 33.939429
| 580
| 0.538809
|
4a0afc9f3ad18b1cd94d4e9a3c2abd54975aea38
| 9,020
|
py
|
Python
|
examples/plot_optimization.py
|
wmvanvliet/posthoc
|
a011a4219cee1e80cf77895543597438f71cd299
|
[
"BSD-3-Clause"
] | 6
|
2019-01-13T04:18:07.000Z
|
2020-10-10T22:22:09.000Z
|
examples/plot_optimization.py
|
wmvanvliet/posthoc
|
a011a4219cee1e80cf77895543597438f71cd299
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_optimization.py
|
wmvanvliet/posthoc
|
a011a4219cee1e80cf77895543597438f71cd299
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Automatic post-hoc optimization of linear models
=================================================
This example will demonstrate how to define custom modifications to a linear
model that introduce new hyperparameters. We will then use post-hoc's optimizer
to find the optimal values for these hyperparameters.
We will start with ordinary linear regression as a base model. Then, we will
modify the covariance matrix by applying shrinkage, modify the pattern with a
Gaussian kernel and modify the normalizer to be "unit noise gain", meaning the
weights all sum to 1.
Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
"""
# Required imports
from matplotlib import pyplot as plt
from posthoc import Workbench, WorkbenchOptimizer, cov_estimators, normalizers
from scipy.stats import norm, pearsonr
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import normalize
from functools import partial
import mne
import numpy as np
###############################################################################
# We will use some data from the original publication [1]_. A participant was
# silently reading word-pairs. In these pairs, the two words had a varying
# forward association strength between them. For example: ``locomotiv ->
# train`` has a high association strength, and ``dog -> submarine`` has not. In
# the case of word-pairs with high association strength, the brain will process
# second word is faster, since it has been semantically primed by the first
# word.
#
# We are going to deduce the memory priming effect from epochs of EEG data and
# use that to predict what the forward association strength was for a given
# word-pair.
#
# Let's first load the data and plot a contrast between word-pairs with a high
# versus low association strength, so we can observe how the memory priming
# effect manifests in the EEG data.
epochs = mne.read_epochs('subject04-epo.fif')
related = epochs['FAS > 0.2'].average()
related.comment = 'related'
unrelated = epochs['FAS < 0.2'].average()
unrelated.comment = 'unrelated'
mne.viz.plot_evoked_topo([related, unrelated])
###############################################################################
# Around 400ms after the presentation of the second word, there is a negative
# peak named the N400 potential. We can clearly observe the semantic priming
# effect as the N400 is more prominent in cases where the words have a low
# forward associative strength.
#
# A naive approach to deduce the forward association strength from a word pair
# is to take the average signal around 400ms at some sensors that show the N400
# well:
ROI = epochs.copy()
ROI.pick_channels(['P3', 'Pz', 'P4'])
ROI.crop(0.3, 0.47)
FAS_pred = ROI.get_data().mean(axis=(1, 2))
perf_naive, _ = pearsonr(epochs.metadata['FAS'], FAS_pred)
print(f'Performance: {perf_naive:.2f}')
###############################################################################
# Let's try ordinary linear regression next, using 10-fold cross-validation.
X = normalize(epochs.get_data().reshape(200, 32 * 60))
y = epochs.metadata['FAS'].values
ols = LinearRegression()
FAS_pred = cross_val_predict(ols, X, y, cv=10)
perf_ols, _ = pearsonr(epochs.metadata['FAS'], FAS_pred)
print(f'Performance: {perf_ols:.2f} (to beat: {perf_naive:.2f})')
###############################################################################
# Feeding all data into a linear regression model performs worse than taking
# the average signal in a well chosen sensors. That is because the model is
# overfitting. We could restrict the data going into the model to the same
# sensors and time window as we did when averaging the signal, but we can do so
# much better.
#
# Let's use the post-hoc framework to modify the linear regression model and
# incorporate some information about the nature of the data and the N400
# potential.
#
# First, let's try to reduce overfitting by applying some shrinkage to the
# covariance matrix. The data consists of 32 EEG electrodes, each recording 60
# samples of data. This causes a clear pattern to appear in the covariance
# matrix:
plt.figure()
plt.matshow(np.cov(X.T), cmap='magma')
###############################################################################
# The covariance matrix is build up from 32x32 squares, each square being
# 60x60. The ``KroneckerShrinkage`` class can make use of this information and
# apply different amounts of shrinkage to the diagonal of each square and the
# covariance matrix overall.
cov = cov_estimators.KroneckerKernel(outer_size=32, inner_size=60)
###############################################################################
# To use the Kronecker shrinkage determine the optimal amount of shrinkage to
# apply, we can wrap our linear regression model in the ``WorkbenchOptimizer``
# class. By default, this uses heavily optimized leave-one-out cross-validation
# with a gradient descent algorithm to find the best values.
# We're optimizing for correlation between model prediction and true FAS
def scorer(model, X, y):
return pearsonr(model.predict(X), y)[0]
# Construct the post-hoc workbench, tell it to modify the model by applying
# Kronecker shrinkage.
model = WorkbenchOptimizer(ols, cov=cov, scoring=scorer).fit(X, y)
shrinkage_params = model.cov_params_
print('Optimal shrinkage parameters:', shrinkage_params)
###############################################################################
# Let's inspect the pattern that the model has learned:
plt.figure()
plt.plot(epochs.times, model.pattern_.reshape(32, 60).T, color='black', alpha=0.2)
plt.xlabel('Time (s)')
plt.ylabel('Signal (normalized units)')
plt.title('Pattern learned by the model using Kronecker shrinkage')
###############################################################################
# We can clearly see that the model is picking up on the N400. Let's fine-tune
# the pattern a bit by multiplying it with a Guassian kernel, centered around
# 400 ms.
def pattern_modifier(pattern, X, y, mean, std):
"""Multiply the pattern with a Gaussian kernel."""
n_channels, n_samples = 32, 60
kernel = norm(mean, std).pdf(np.arange(n_samples))
kernel /= kernel.max()
mod_pattern = pattern.reshape(n_channels, n_samples)
mod_pattern = mod_pattern * kernel[np.newaxis, :]
return mod_pattern.reshape(pattern.shape)
###############################################################################
# Now the optimizer has four hyperparameters to tune: two shrinkage values and
# two values dictating the shape of the Gaussian kernel.
model_opt = WorkbenchOptimizer(
ols,
cov=cov,
pattern_modifier=pattern_modifier,
pattern_param_x0=[30, 5], # Initial guess for decent kernel shape
pattern_param_bounds=[(0, 60), (2, None)], # Boundaries for what values to try
normalizer_modifier=normalizers.unit_gain,
scoring=scorer,
).fit(X, y)
###############################################################################
# Let's take a look at the optimal parameters:
shrinkage_params = model_opt.cov_params_
pattern_params = model_opt.pattern_modifier_params_
print('Optimal shrinkage parameters:', shrinkage_params)
print('Optimal pattern parameters:', pattern_params)
###############################################################################
# To evaluate the performance of the new model, you can pass the
# :class:`WorkbenchOptimizer` object into :func:`cross_val_predict`. This would
# cause the optimization procedure to be run during every iteration of the
# cross-validation loop. To save time in this example, we are going to do
# freeze the parameters before entering the model into the cross-validation
# loop. So take this result with a grain of salt, as the hyperparameters have
# been tuned using all data, not just the training set!
model = Workbench(
ols,
cov=cov_estimators.ShrinkageKernel(alpha=shrinkage_params[0]),
pattern_modifier=partial(pattern_modifier, mean=pattern_params[0], std=pattern_params[1]),
normalizer_modifier=normalizers.unit_gain,
)
FAS_pred = cross_val_predict(model, X, y, cv=10)
perf_opt, _ = pearsonr(epochs.metadata['FAS'], FAS_pred)
print(f'Performance: {perf_opt:.2f} (to beat: {perf_naive:.2f})')
###############################################################################
# Here is the final pattern:
model.fit(X, y)
plt.figure()
plt.plot(epochs.times, model.pattern_.reshape(32, 60).T, color='black', alpha=0.2)
plt.xlabel('Time (s)')
plt.ylabel('Signal (normalized units)')
plt.title('Pattern learned by the post-hoc model')
###############################################################################
# References
# ----------
# .. [1] Marijn van Vliet and Riitta Salmelin (2020). Post-hoc modification
# of linear models: combining machine learning with domain information
# to make solid inferences from noisy data. Neuroimage, 204, 116221.
# https://doi.org/10.1016/j.neuroimage.2019.116221
#
# sphinx_gallery_thumbnail_number = 5
| 46.020408
| 94
| 0.67051
|
4a0afcce4dbc9c484f81c7e02709f906ead56d88
| 814
|
py
|
Python
|
manage.py
|
njcx/django_blog_njcx
|
66c7ff949cb933bb72da91a8f31c12734b380737
|
[
"MIT"
] | null | null | null |
manage.py
|
njcx/django_blog_njcx
|
66c7ff949cb933bb72da91a8f31c12734b380737
|
[
"MIT"
] | null | null | null |
manage.py
|
njcx/django_blog_njcx
|
66c7ff949cb933bb72da91a8f31c12734b380737
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_blog_nicx.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.391304
| 80
| 0.646192
|
4a0afd0b88cf2f5c21d5bc08d87666b67bfaef99
| 816
|
py
|
Python
|
pythonforandroid/recipes/openssl/__init__.py
|
micahjohnson150/python-for-android
|
3faf827ad6dff77b8e8d6923637e4b8e2cb771f7
|
[
"MIT"
] | null | null | null |
pythonforandroid/recipes/openssl/__init__.py
|
micahjohnson150/python-for-android
|
3faf827ad6dff77b8e8d6923637e4b8e2cb771f7
|
[
"MIT"
] | null | null | null |
pythonforandroid/recipes/openssl/__init__.py
|
micahjohnson150/python-for-android
|
3faf827ad6dff77b8e8d6923637e4b8e2cb771f7
|
[
"MIT"
] | 1
|
2021-02-18T05:23:20.000Z
|
2021-02-18T05:23:20.000Z
|
from pythonforandroid.toolchain import Recipe, shprint, current_directory
from os.path import exists, join
import sh
class OpenSSLRecipe(Recipe):
version = '1.0.2d'
url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'
def should_build(self):
return not exists(join(self.get_build_dir('armeabi'), 'libssl.a'))
def build_arch(self, arch):
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
# sh fails with code 255 trying to execute ./Configure
# so instead we manually run perl passing in Configure
perl = sh.Command('perl')
shprint(perl, 'Configure', 'no-dso', 'no-krb5', 'linux-armv4', _env=env)
shprint(sh.make, 'build_libs', _env=env)
recipe = OpenSSLRecipe()
| 34
| 84
| 0.66299
|
4a0afd95dc44f190ebdfe47da59bf04532812dff
| 23,161
|
py
|
Python
|
osx/devkit/plug-ins/scripted/pyPointOnMeshInfo.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | 10
|
2018-03-30T16:09:02.000Z
|
2021-12-07T07:29:19.000Z
|
osx/devkit/plug-ins/scripted/pyPointOnMeshInfo.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | null | null | null |
osx/devkit/plug-ins/scripted/pyPointOnMeshInfo.py
|
leegoonz/Maya-devkit
|
b81fe799b58e854e4ef16435426d60446e975871
|
[
"ADSL"
] | 9
|
2018-06-02T09:18:49.000Z
|
2021-12-20T09:24:35.000Z
|
#
# ==========================================================================
# Copyright 2015 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk
# license agreement provided at the time of installation or download,
# or which otherwise accompanies this software in either electronic
# or hard copy form.
# ==========================================================================
#
import sys
import maya.api.OpenMaya as om
def maya_useNewAPI():
"""
The presence of this function tells Maya that the plugin produces, and
expects to be passed, objects created using the Maya Python API 2.0.
"""
pass
# FUNCTION THAT FINDS THE POINT AND NORMAL OF A POLY AT A SPECIFIED FACE UV COORD ABOUT A SPECIFIED FACE:
def getPointAndNormal(meshDagPath, faceIndex, relative, parameterU, parameterV, point, normal, theMesh):
polyObj = meshDagPath
if not theMesh.isNull():
polyObj = theMesh
# CREATE FACE ITERATOR, AND SET ITS INDEX TO THAT OF THE SPECIFIED FACE:
faceIter = om.MItMeshPolygon(polyObj)
faceIter.setIndex(faceIndex)
# WHEN "RELATIVE" MODE IS SPECIFIED, CALCULATE THE *ABSOLUTE* UV'S FROM THE SPECIFIED FACE AND "RELATIVE" UV'S:
# OTHERWISE, JUST TAKE THE ABSOLUTE UV'S TO BE THE ONES SPECIFIED:
u = parameterU
v = parameterV
if relative:
uvs = faceIter.getUVs()
uArray = uvs[0]
vArray = uvs[1]
minU=999999
minV=999999
maxU=0
maxV=0
for i in range(len(uArray)):
if uArray[i] < minU:
minU = uArray[i]
if vArray[i] < minV:
minV = vArray[i]
if uArray[i] > maxU:
maxU = uArray[i]
if vArray[i] > maxV:
maxV = vArray[i]
u = minU + parameterU * (maxU - minU)
v = minV + parameterV * (maxV - minV)
# FIND THE WORLDSPACE COORDINATE AT THE SPECIFIED UV:
UV = [u, v]
try:
newPoint = faceIter.getPointAtUV(UV, om.MSpace.kWorld)
point.x = newPoint.x
point.y = newPoint.y
point.z = newPoint.z
point.w = newPoint.w
except:
pass
# FIND THE NORMAL AT THE SPECIFIED UV:
meshFn = om.MFnMesh(meshDagPath)
if not theMesh.isNull():
meshFn.setObject(theMesh)
newNormal = meshFn.getClosestNormal(point, om.MSpace.kWorld)
normal.x = newNormal[0].x
normal.y = newNormal[0].y
normal.z = newNormal[0].z
#
# MAIN CLASS DECLARATION FOR THE MEL COMMAND:
#
class pointOnMeshCommand(om.MPxCommand):
nodeCreated = False
positionSpecified = False
normalSpecified = False
faceIndexSpecified = False
relativeSpecified = False
parameterUSpecified = False
parameterVSpecified = False
meshNodeName = ""
pointOnMeshInfoName = ""
faceIndex = -1
relative = False
parameterU = 0.0
parameterV = 0.0
def __init__(self):
om.MPxCommand.__init__(self)
# METHOD FOR CREATING AN INSTANCE OF THIS COMMAND:
@staticmethod
def cmdCreator():
return pointOnMeshCommand()
# MAKE THIS COMMAND UNDOABLE:
def isUndoable(self):
return True
# FIRST INVOKED WHEN COMMAND IS CALLED, PARSING THE COMMAND ARGUMENTS, INITIALIZING DEFAULT PARAMETERS, THEN CALLING redoIt():
def doIt(self, args):
# PARSE THE COMMAND'S ARGUMENTS:
for i in range(len(args)):
if ("-name" == args.asString(i)) or ("-na" == args.asString(i)):
i = i+1
self.pointOnMeshInfoName = args.asString(i)
elif ("-position" == args.asString(i)) or ("-p" == args.asString(i)):
self.positionSpecified = True
elif ("-normal" == args.asString(i)) or ("-nr" == args.asString(i)):
self.normalSpecified = True
elif ("-faceIndex" == args.asString(i)) or ("-f" == args.asString(i)):
self.faceIndexSpecified = True
i = i+1
temp = args.asInt(i)
if temp < 0:
raise ValueError("Invalid faceIndex!")
faceIndex = temp
elif ("-relative" == args.asString(i)) or ("-r" ==args.asString(i)):
self.relativeSpecified = True
i = i+1
self.relative = args.asBool(i)
elif ("-parameterU" == args.asString(i)) or ("-u" == args.asString(i)):
self.parameterUSpecified = True
i = i+1
temp = args.asDouble(i)
if temp < 0 or temp > 1:
raise ValueError("Invalid parameterU!")
self.parameterU = temp
elif ("-parameterV" == args.asString(i)) or ("-v" == args.asString(i)):
self.parameterVSpecified = True
i = i+1
temp = args.asDouble(i)
if temp < 0 or temp > 1:
raise ValueError("Invalid parameterV!")
self.parameterV = temp
elif i == (len(args)-1):
self.meshNodeName = args.asString(i)
else:
raise ValueError("Invalid flag:" + args.asString(i))
# MAKE SURE UNSPECIFIED INPUT PARAMETER FLAGS GET DEFAULT VALUES:
if not self.faceIndexSpecified:
self.faceIndex = 0
if not self.relativeSpecified:
self.relative = True
if not self.parameterUSpecified:
parameterU = 0.5
if not self.parameterVSpecified:
self.parameterV = 0.5
# DO THE WORK:
self.redoIt()
# DOES MOST OF THE WORK IN COMPUTING THE POSITION, NORMAL, OR CREATING A "pointOnMeshInfo" NODE:
def redoIt(self):
# WHEN NO MESH IS SPECIFIED IN THE COMMAND, GET THE FIRST SELECTED MESH FROM THE SELECTION LIST:
sList = om.MSelectionList()
if self.meshNodeName == "":
sList = om.MGlobal.getActiveSelectionList()
if sList.length() == 0:
raise ValueError("No mesh or mesh transform specified!")
# OTHERWISE, USE THE NODE NAME SPECIFIED IN THE LAST ARGUMENT OF THE COMMAND:
else:
sList.add(self.meshNodeName)
# RETRIEVE THE FIRST ITEM FROM THE SELECTION LIST:
meshDagPath = sList.getDagPath(0)
# CREATE AND CONNECT A "pointOnMeshInfo" NODE, OR GET THE POINT AND NORMAL ACCORDING TO
# WHETHER THE "-position/-p" AND/OR "-normal/-nr" FLAGS WERE SPECIFIED, AND WHETHER THE MESH
# "SHAPE" OR ITS "TRANSFORM" WAS SPECIFIED/SELECTED:
point = om.MPoint()
normal = om.MVector()
# WHEN THE SPECIFIED NODE IS THE MESH "SHAPE":
if meshDagPath.node().hasFn(om.MFn.kMesh):
# WHEN NEITHER "-position/-p" NOR "-normal/-nr" ARE SPECIFIED, CREATE AND CONNECT A "pointOnMeshInfo" NODE AND RETURN ITS NODE NAME:
if not self.positionSpecified and not self.normalSpecified:
# CREATE THE NODE:
self.nodeCreated = True
depNodeFn = om.MFnDependencyNode()
if self.pointOnMeshInfoName == "":
depNodeFn.create("pointOnMeshInfo")
else:
depNodeFn.create("pointOnMeshInfo", self.pointOnMeshInfoName)
self.pointOnMeshInfoName = depNodeFn.name()
# SET THE ".faceIndex" ATTRIBUTE, IF SPECIFIED IN THE COMMAND:
if self.faceIndexSpecified:
faceIndexPlug = depNodeFn.findPlug("faceIndex", True)
faceIndexPlug.setValue(self.faceIndex)
# SET THE ".relative" ATTRIBUTE, IF SPECIFIED IN THE COMMAND:
if self.relativeSpecified:
relativePlug = depNodeFn.findPlug("relative", True)
relativePlug.setValue(self.relative)
# SET THE ".parameterU" ATTRIBUTE, IF SPECIFIED IN THE COMMAND:
if self.parameterUSpecified:
parameterUPlug = depNodeFn.findPlug("parameterU", True)
parameterUPlug.setValue(self.parameterU)
# SET THE ".parameterV" ATTRIBUTE, IF SPECIFIED IN THE COMMAND:
if self.parameterVSpecified:
parameterVPlug = depNodeFn.findPlug("parameterV", True)
parameterVPlug.setValue(self.parameterV)
# CONNECT THE NODES:
inMeshPlug = depNodeFn.findPlug("inMesh", True)
depNodeFn.setObject(meshDagPath.node())
worldMeshPlug = depNodeFn.findPlug("worldMesh", True)
worldMeshPlug = worldMeshPlug.elementByLogicalIndex(0) # ASSUME THE *FIRST* INSTANCE OF THE MESH IS REQUESTED FOR MESH SHAPES.
dgModifier = om.MDGModifier()
dgModifier.connect(worldMeshPlug, inMeshPlug)
dgModifier.doIt()
# SET COMMAND RESULT AND RETURN:
om.MPxCommand.setResult(self.pointOnMeshInfoName)
# OTHERWISE, COMPUTE THE POINT-POSITION AND NORMAL, USING THE *FIRST* INSTANCE'S TRANSFORM:
else:
getPointAndNormal(meshDagPath, self.faceIndex, self.relative, self.parameterU, self.parameterV, point, normal)
# WHEN THE SPECIFIED NODE IS A "TRANSFORM" OF A MESH SHAPE:
elif meshDagPath.node().hasFn(om.MFn.kTransform) and meshDagPath.hasFn(om.MFn.kMesh):
# WHEN NEITHER "-position/-p" NOR "-normal/-nr" ARE SPECIFIED, CREATE AND CONNECT A "pointOnMeshInfo" NODE AND RETURN ITS NODE NAME:
if not self.positionSpecified and not self.normalSpecified:
# CREATE THE NODE:
self.nodeCreated = True
meshDagPath.extendToShape()
depNodeFn = om.MFnDependencyNode()
if self.pointOnMeshInfoName == "":
depNodeFn.create("pointOnMeshInfo")
else:
depNodeFn.create("pointOnMeshInfo", self.pointOnMeshInfoName)
self.pointOnMeshInfoName = depNodeFn.name()
# SET THE ".faceIndex" ATTRIBUTE, IF SPECIFIED IN THE COMMAND:
if self.faceIndexSpecified:
faceIndexPlug = depNodeFn.findPlug("faceIndex", True)
faceIndexPlug.setValue(self.faceIndex)
# SET THE ".relative" ATTRIBUTE, IF SPECIFIED IN THE COMMAND:
if self.relativeSpecified:
relativePlug = depNodeFn.findPlug("relative", True)
relativePlug.setValue(self.relative)
# SET THE ".parameterU" ATTRIBUTE, IF SPECIFIED IN THE COMMAND:
if self.parameterUSpecified:
parameterUPlug = depNodeFn.findPlug("parameterU", True)
parameterUPlug.setValue(self.parameterU)
# SET THE ".parameterV" ATTRIBUTE, IF SPECIFIED IN THE COMMAND:
if self.parameterVSpecified:
parameterVPlug = depNodeFn.findPlug("parameterV", True)
parameterVPlug.setValue(self.parameterV)
# CONNECT THE NODES:
inMeshPlug = depNodeFn.findPlug("inMesh", True)
depNodeFn.setObject(meshDagPath.node())
worldMeshPlug = depNodeFn.findPlug("worldMesh", True)
worldMeshPlug = worldMeshPlug.elementByLogicalIndex(meshDagPath.instanceNumber())
dgModifier = om.MDGModifier()
dgModifier.connect(worldMeshPlug, inMeshPlug)
dgModifier.doIt()
# SET COMMAND RESULT AND RETURN:
om.MPxCommand.setResult(self.pointOnMeshInfoName)
# OTHERWISE, COMPUTE THE POINT-POSITION AND NORMAL:
else:
getPointAndNormal(meshDagPath, self.faceIndex, self.relative, self.parameterU, self.parameterV, point, normal)
# INVALID INPUT WHEN SPECIFIED/SELECTED NODE IS NOT A MESH NOR TRANSFORM:
else:
raise ValueError("Invalid type! Only a mesh or its transform can be specified!")
# SET THE RETURN VALUES OF THE COMMAND'S RESULT TO BE AN ARRAY OF FLOATS OUTPUTTING THE POSITION AND/OR NORMAL:
result = om.MDoubleArray()
if self.positionSpecified:
result.append(point.x)
result.append(point.y)
result.append(point.z)
if self.normalSpecified:
result.append(normal.x)
result.append(normal.y)
result.append(normal.z)
om.MPxCommand.setResult(result)
# CALLED WHEN USER UNDOES THE COMMAND:
def undoIt(self):
# MERELY DELETE THE "pointOnMeshInfo" NODE THAT WAS CREATED, IF ONE WAS CREATED:
if self.nodeCreated:
deleteCmd = "delete " + self.pointOnMeshInfoName
om.MGlobal.executeCommand(deleteCmd)
#
# MAIN CLASS DECLARATION FOR THE CUSTOM NODE:
#
class pointOnMeshInfoNode(om.MPxNode):
id = om.MTypeId(0x00105480)
aInMesh = None
aFaceIndex = None
aRelative = None
aParameterU = None
aParameterV = None
aPosition = None
aPositionX = None
aPositionY = None
aPositionZ = None
aNormal = None
aNormalX = None
aNormalY = None
aNormalZ = None
aNurbsCurve = None
def __init__(self):
om.MPxNode.__init__(self)
# FOR CREATING AN INSTANCE OF THIS NODE:
@staticmethod
def cmdCreator():
return pointOnMeshInfoNode()
# INITIALIZES THE NODE BY CREATING ITS ATTRIBUTES:
@staticmethod
def initialize():
# CREATE AND ADD ".inMesh" ATTRIBUTE:
inMeshAttrFn = om.MFnTypedAttribute()
pointOnMeshInfoNode.aInMesh = inMeshAttrFn.create("inMesh", "im", om.MFnData.kMesh)
inMeshAttrFn.storable = True
inMeshAttrFn.keyable = False
inMeshAttrFn.readable = True
inMeshAttrFn.writable = True
inMeshAttrFn.cached = False
om.MPxNode.addAttribute(pointOnMeshInfoNode.aInMesh)
# CREATE AND ADD ".faceIndex" ATTRIBUTE:
faceIndexAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aFaceIndex = faceIndexAttrFn.create("faceIndex", "f", om.MFnNumericData.kLong, 0)
faceIndexAttrFn.storable = True
faceIndexAttrFn.keyable = True
faceIndexAttrFn.readable = True
faceIndexAttrFn.writable = True
faceIndexAttrFn.setMin(0)
om.MPxNode.addAttribute(pointOnMeshInfoNode.aFaceIndex)
# CREATE AND ADD ".relative" ATTRIBUTE:
relativeAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aRelative = relativeAttrFn.create("relative", "r", om.MFnNumericData.kBoolean, 1)
relativeAttrFn.storable = True
relativeAttrFn.keyable = True
relativeAttrFn.readable = True
relativeAttrFn.writable = True
om.MPxNode.addAttribute(pointOnMeshInfoNode.aRelative)
# CREATE AND ADD ".parameterU" ATTRIBUTE:
parameterUAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aParameterU = parameterUAttrFn.create("parameterU", "u", om.MFnNumericData.kDouble, 0.5)
parameterUAttrFn.storable = True
parameterUAttrFn.keyable = True
parameterUAttrFn.readable = True
parameterUAttrFn.writable = True
om.MPxNode.addAttribute(pointOnMeshInfoNode.aParameterU)
# CREATE AND ADD ".parameterV" ATTRIBUTE:
parameterVAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aParameterV = parameterVAttrFn.create("parameterV", "v", om.MFnNumericData.kDouble, 0.5)
parameterVAttrFn.storable = True
parameterVAttrFn.keyable = True
parameterVAttrFn.readable = True
parameterVAttrFn.writable = True
om.MPxNode.addAttribute(pointOnMeshInfoNode.aParameterV)
# CREATE AND ADD ".positionX" ATTRIBUTE:
pointXAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aPositionX = pointXAttrFn.create("positionX", "px", om.MFnNumericData.kDouble, 0.0)
pointXAttrFn.storable = False
pointXAttrFn.keyable = False
pointXAttrFn.readable = True
pointXAttrFn.writable = False
om.MPxNode.addAttribute(pointOnMeshInfoNode.aPositionX)
# CREATE AND ADD ".positionY" ATTRIBUTE:
pointYAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aPositionY = pointYAttrFn.create("positionY", "py", om.MFnNumericData.kDouble, 0.0)
pointYAttrFn.storable = False
pointYAttrFn.keyable = False
pointYAttrFn.readable = True
pointYAttrFn.writable = False
om.MPxNode.addAttribute(pointOnMeshInfoNode.aPositionY)
# CREATE AND ADD ".positionZ" ATTRIBUTE:
pointZAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aPositionZ = pointZAttrFn.create("positionZ", "pz", om.MFnNumericData.kDouble, 0.0)
pointZAttrFn.storable = False
pointZAttrFn.keyable = False
pointZAttrFn.readable = True
pointZAttrFn.writable = False
om.MPxNode.addAttribute(pointOnMeshInfoNode.aPositionZ)
# CREATE AND ADD ".position" ATTRIBUTE:
pointAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aPosition = pointAttrFn.create("position", "p", pointOnMeshInfoNode.aPositionX, pointOnMeshInfoNode.aPositionY, pointOnMeshInfoNode.aPositionZ)
pointAttrFn.storable = False
pointAttrFn.keyable = False
pointAttrFn.readable = True
pointAttrFn.writable = False
om.MPxNode.addAttribute(pointOnMeshInfoNode.aPosition)
# CREATE AND ADD ".normalX" ATTRIBUTE:
normalXAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aNormalX = normalXAttrFn.create("normalX", "nx", om.MFnNumericData.kDouble, 0.0)
normalXAttrFn.storable = False
normalXAttrFn.keyable = False
normalXAttrFn.readable = True
normalXAttrFn.writable = False
om.MPxNode.addAttribute(pointOnMeshInfoNode.aNormalX)
# CREATE AND ADD ".normalY" ATTRIBUTE:
normalYAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aNormalY = normalYAttrFn.create("normalY", "ny", om.MFnNumericData.kDouble, 0.0)
normalYAttrFn.storable = False
normalYAttrFn.keyable = False
normalYAttrFn.readable = True
normalYAttrFn.writable = False
om.MPxNode.addAttribute(pointOnMeshInfoNode.aNormalY)
# CREATE AND ADD ".normalZ" ATTRIBUTE:
normalZAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aNormalZ = normalZAttrFn.create("normalZ", "nz", om.MFnNumericData.kDouble, 0.0)
normalZAttrFn.storable = False
normalZAttrFn.keyable = False
normalZAttrFn.readable = True
normalZAttrFn.writable = False
om.MPxNode.addAttribute(pointOnMeshInfoNode.aNormalZ)
# CREATE AND ADD ".normal" ATTRIBUTE:
normalAttrFn = om.MFnNumericAttribute()
pointOnMeshInfoNode.aNormal = normalAttrFn.create("normal", "n", pointOnMeshInfoNode.aNormalX, pointOnMeshInfoNode.aNormalY, pointOnMeshInfoNode.aNormalZ)
normalAttrFn.storable = False
normalAttrFn.keyable = False
normalAttrFn.readable = True
normalAttrFn.writable = False
om.MPxNode.addAttribute(pointOnMeshInfoNode.aNormal)
# DEPENDENCY RELATIONS FOR ".inMesh":
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aInMesh, pointOnMeshInfoNode.aPosition)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aInMesh, pointOnMeshInfoNode.aPositionX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aInMesh, pointOnMeshInfoNode.aPositionY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aInMesh, pointOnMeshInfoNode.aPositionZ)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aInMesh, pointOnMeshInfoNode.aNormal)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aInMesh, pointOnMeshInfoNode.aNormalX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aInMesh, pointOnMeshInfoNode.aNormalY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aInMesh, pointOnMeshInfoNode.aNormalZ)
# DEPENDENCY RELATIONS FOR ".faceIndex":
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aFaceIndex, pointOnMeshInfoNode.aPosition)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aFaceIndex, pointOnMeshInfoNode.aPositionX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aFaceIndex, pointOnMeshInfoNode.aPositionY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aFaceIndex, pointOnMeshInfoNode.aPositionZ)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aFaceIndex, pointOnMeshInfoNode.aNormal)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aFaceIndex, pointOnMeshInfoNode.aNormalX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aFaceIndex, pointOnMeshInfoNode.aNormalY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aFaceIndex, pointOnMeshInfoNode.aNormalZ)
# DEPENDENCY RELATIONS FOR ".relative":
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aRelative, pointOnMeshInfoNode.aPosition)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aRelative, pointOnMeshInfoNode.aPositionX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aRelative, pointOnMeshInfoNode.aPositionY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aRelative, pointOnMeshInfoNode.aPositionZ)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aRelative, pointOnMeshInfoNode.aNormal)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aRelative, pointOnMeshInfoNode.aNormalX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aRelative, pointOnMeshInfoNode.aNormalY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aRelative, pointOnMeshInfoNode.aNormalZ)
# DEPENDENCY RELATIONS FOR ".parameterU":
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterU, pointOnMeshInfoNode.aPosition)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterU, pointOnMeshInfoNode.aPositionX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterU, pointOnMeshInfoNode.aPositionY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterU, pointOnMeshInfoNode.aPositionZ)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterU, pointOnMeshInfoNode.aNormal)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterU, pointOnMeshInfoNode.aNormalX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterU, pointOnMeshInfoNode.aNormalY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterU, pointOnMeshInfoNode.aNormalZ)
# DEPENDENCY RELATIONS FOR ".parameterV":
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterV, pointOnMeshInfoNode.aPosition)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterV, pointOnMeshInfoNode.aPositionX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterV, pointOnMeshInfoNode.aPositionY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterV, pointOnMeshInfoNode.aPositionZ)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterV, pointOnMeshInfoNode.aNormal)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterV, pointOnMeshInfoNode.aNormalX)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterV, pointOnMeshInfoNode.aNormalY)
om.MPxNode.attributeAffects(pointOnMeshInfoNode.aParameterV, pointOnMeshInfoNode.aNormalZ)
# COMPUTE METHOD'S DEFINITION:
def compute(self, plug, data):
assert(isinstance(data.context(), om.MDGContext))
assert(data.setContext(data.context()) == data)
# DO THE COMPUTE ONLY FOR THE *OUTPUT* PLUGS THAT ARE DIRTIED:
if plug == pointOnMeshInfoNode.aPosition or plug == pointOnMeshInfoNode.aPositionX or plug == pointOnMeshInfoNode.aPositionY or plug == pointOnMeshInfoNode.aPositionZ or plug == pointOnMeshInfoNode.aNormal or plug == pointOnMeshInfoNode.aNormalX or plug == pointOnMeshInfoNode.aNormalY or plug == pointOnMeshInfoNode.aNormalZ:
# READ IN ".inMesh" DATA:
inMeshDataHandle = data.inputValue(pointOnMeshInfoNode.aInMesh)
inMesh = inMeshDataHandle.asMesh()
# READ IN ".faceIndex" DATA:
faceIndexDataHandle = data.inputValue(pointOnMeshInfoNode.aFaceIndex)
faceIndex = faceIndexDataHandle.asInt()
# READ IN ".relative" DATA:
relativeDataHandle = data.inputValue(pointOnMeshInfoNode.aRelative)
relative = relativeDataHandle.asBool()
# READ IN ".parameterU" DATA:
parameterUDataHandle = data.inputValue(pointOnMeshInfoNode.aParameterU)
parameterU = parameterUDataHandle.asDouble()
# READ IN ".parameterV" DATA:
parameterVDataHandle = data.inputValue(pointOnMeshInfoNode.aParameterV)
parameterV = parameterVDataHandle.asDouble()
# GET THE POINT AND NORMAL:
point = om.MPoint()
normal = om.MVector()
dummyDagPath = om.MDagPath()
getPointAndNormal(dummyDagPath, faceIndex, relative, parameterU, parameterV, point, normal, inMesh)
# WRITE OUT ".position" DATA:
pointDataHandle = data.outputValue(pointOnMeshInfoNode.aPosition)
pointDataHandle.set3Double(point.x, point.y, point.z)
data.setClean(plug)
# WRITE OUT ".normal" DATA:
normalDataHandle = data.outputValue(pointOnMeshInfoNode.aNormal)
normalDataHandle.set3Double(normal.x, normal.y, normal.z)
data.setClean(plug)
else:
return None # let Maya handle this attribute
# INITIALIZES THE PLUGIN BY REGISTERING THE COMMAND AND NODE:
#
def initializePlugin(obj):
plugin = om.MFnPlugin(obj)
try:
plugin.registerCommand("pointOnMesh", pointOnMeshCommand.cmdCreator)
except:
sys.stderr.write("Failed to register command\n")
raise
try:
plugin.registerNode("pointOnMeshInfo", pointOnMeshInfoNode.id, pointOnMeshInfoNode.cmdCreator, pointOnMeshInfoNode.initialize)
except:
sys.stderr.write("Failed to register node\n")
raise
#
# UNINITIALIZES THE PLUGIN BY DEREGISTERING THE COMMAND AND NODE:
#
def uninitializePlugin(obj):
plugin = om.MFnPlugin(obj)
try:
plugin.deregisterCommand("pointOnMesh")
except:
sys.stderr.write("Failed to deregister command\n")
raise
try:
plugin.deregisterNode(pointOnMeshInfoNode.id)
except:
sys.stderr.write("Failed to deregister node\n")
raise
| 38.860738
| 328
| 0.758128
|
4a0afd9b913e46cf13d1787e71894c428845e3cb
| 1,917
|
py
|
Python
|
test/test_iam_permission_all_of.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21
|
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
test/test_iam_permission_all_of.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14
|
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
test/test_iam_permission_all_of.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18
|
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.iam_permission_all_of import IamPermissionAllOf # noqa: E501
from intersight.rest import ApiException
class TestIamPermissionAllOf(unittest.TestCase):
"""IamPermissionAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIamPermissionAllOf(self):
"""Test IamPermissionAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.iam_permission_all_of.IamPermissionAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 50.447368
| 1,052
| 0.780386
|
4a0afe23588c11d714ba325bb43646923a664102
| 45
|
py
|
Python
|
coursera/python_programming_basics/1_week_17_clock's_time.py
|
anklav24/Python-Education
|
49ebcfabda1376390ee71e1fe321a51e33831f9e
|
[
"Apache-2.0"
] | null | null | null |
coursera/python_programming_basics/1_week_17_clock's_time.py
|
anklav24/Python-Education
|
49ebcfabda1376390ee71e1fe321a51e33831f9e
|
[
"Apache-2.0"
] | null | null | null |
coursera/python_programming_basics/1_week_17_clock's_time.py
|
anklav24/Python-Education
|
49ebcfabda1376390ee71e1fe321a51e33831f9e
|
[
"Apache-2.0"
] | null | null | null |
n = int(input())
print(n // 60 % 24, n % 60)
| 15
| 27
| 0.488889
|
4a0afe25338f6869d406bcc38a4a2e58410bfc10
| 3,883
|
py
|
Python
|
model/Resnet.py
|
zhyhan/RDA
|
8407c01630e09565199a1bad823f3e69e6193c43
|
[
"Apache-2.0"
] | 15
|
2020-04-23T01:14:50.000Z
|
2022-03-26T12:51:16.000Z
|
model/Resnet.py
|
zhyhan/RDA
|
8407c01630e09565199a1bad823f3e69e6193c43
|
[
"Apache-2.0"
] | 3
|
2020-07-18T01:13:23.000Z
|
2021-12-24T00:34:52.000Z
|
model/Resnet.py
|
zhyhan/RDA
|
8407c01630e09565199a1bad823f3e69e6193c43
|
[
"Apache-2.0"
] | 5
|
2020-08-18T12:41:41.000Z
|
2022-03-26T12:51:09.000Z
|
import torch.nn as nn
import model.backbone as backbone
import torch.nn.functional as F
import torch
import numpy as np
import random
class GradientReverseLayer(torch.autograd.Function):
def __init__(self, iter_num=0, alpha=1.0, low_value=0.0, high_value=0.1, max_iter=1000.0):
self.iter_num = iter_num
self.alpha = alpha
self.low_value = low_value
self.high_value = high_value
self.max_iter = max_iter
def forward(self, input):
self.iter_num += 1
output = input * 1.0
return output
def backward(self, grad_output):
self.coeff = np.float(
2.0 * (self.high_value - self.low_value) / (1.0 + np.exp(-self.alpha * self.iter_num / self.max_iter)) - (self.high_value - self.low_value) + self.low_value)
return -self.coeff * grad_output
class ResNetPlus(nn.Module):
def __init__(self, base_net='ResNet50', use_bottleneck=True, bottleneck_dim=1024, width=1024, class_num=31):
super(ResNetPlus, self).__init__()
## set base network
self.base_network = backbone.network_dict[base_net]()
self.use_bottleneck = use_bottleneck
self.bottleneck_layer_list = [nn.Linear(self.base_network.output_num(), bottleneck_dim), nn.BatchNorm1d(bottleneck_dim), nn.ReLU(), nn.Dropout(0.5)]
self.bottleneck_layer = nn.Sequential(*self.bottleneck_layer_list)
self.classifier_layer_list = [nn.Linear(bottleneck_dim, width), nn.ReLU(), nn.Dropout(0.5),
nn.Linear(width, class_num)]
self.classifier_layer = nn.Sequential(*self.classifier_layer_list)
self.softmax = nn.Softmax(dim=1)
#self.temperature = nn.Parameter((torch.ones(1)*1.5).cuda())
#self.temperature = nn.Parameter(torch.ones(1).cuda())
## initialization
self.bottleneck_layer[0].weight.data.normal_(0, 0.005)
self.bottleneck_layer[0].bias.data.fill_(0.1)
for dep in range(2):
self.classifier_layer[dep * 3].weight.data.normal_(0, 0.01)
self.classifier_layer[dep * 3].bias.data.fill_(0.0)
## collect parameters
self.parameter_list = [{"params":self.base_network.parameters(), "lr":0.1},
{"params":self.bottleneck_layer.parameters(), "lr":1}]
def T_scaling(self, logits, temperature):
temperature = temperature.unsqueeze(1).expand(logits.size(0), logits.size(1))
return logits / temperature
def forward(self, inputs):
features = self.base_network(inputs)
if self.use_bottleneck:
features = self.bottleneck_layer(features)
outputs = self.classifier_layer(features)
#softmax_outputs = self.softmax(self.T_scaling(outputs, self.temperature))
softmax_outputs = self.softmax(outputs)
return features, outputs, softmax_outputs
class ResNetModel(object):
def __init__(self, base_net='ResNet50', width=1024, class_num=31, use_bottleneck=True, use_gpu=True, srcweight=3):
self.c_net = ResNetPlus(base_net, use_bottleneck, width, width, class_num)
self.use_gpu = use_gpu
self.is_train = False
self.iter_num = 0
self.class_num = class_num
if self.use_gpu:
self.c_net = self.c_net.cuda()
self.srcweight = srcweight
def get_loss(self, inputs, labels_source):
class_criterion = nn.CrossEntropyLoss()
_, outputs, _, = self.c_net(inputs)
classifier_loss = class_criterion(outputs, labels_source)
return classifier_loss
def predict(self, inputs):
features, logits, softmax_outputs = self.c_net(inputs)
return features, logits, softmax_outputs
def get_parameter_list(self):
return self.c_net.parameter_list
def set_train(self, mode):
self.c_net.train(mode)
self.is_train = mode
| 41.308511
| 169
| 0.661344
|
4a0afeab5517a9d37a8f83e0bfbb0ba69f5742a8
| 147
|
py
|
Python
|
starter_code/api_keys.py
|
AmrAlwakeal/Python-API
|
24a27c704d550358384c0bfda9c4c893ce4dde3c
|
[
"ADSL"
] | null | null | null |
starter_code/api_keys.py
|
AmrAlwakeal/Python-API
|
24a27c704d550358384c0bfda9c4c893ce4dde3c
|
[
"ADSL"
] | null | null | null |
starter_code/api_keys.py
|
AmrAlwakeal/Python-API
|
24a27c704d550358384c0bfda9c4c893ce4dde3c
|
[
"ADSL"
] | null | null | null |
# OpenWeatherMap API Key
weather_api_key = "3452081acc23c00e2fde75d756503f8e"
# Google API Key
g_key = "AIzaSyCkBVdD7f-qTJQcvEdqXXA3t2BX0EEbnMA"
| 21
| 52
| 0.829932
|
4a0afee07352db79d9c2fe236d07495ba16f979f
| 483
|
py
|
Python
|
palindrome_check.py
|
jmmL/misc
|
6bdbcd977fd29ea9a73f99364ff21caccd30d3d0
|
[
"MIT"
] | null | null | null |
palindrome_check.py
|
jmmL/misc
|
6bdbcd977fd29ea9a73f99364ff21caccd30d3d0
|
[
"MIT"
] | null | null | null |
palindrome_check.py
|
jmmL/misc
|
6bdbcd977fd29ea9a73f99364ff21caccd30d3d0
|
[
"MIT"
] | null | null | null |
def main():
""" A program for checking whether a string is a palindrome or not"""
palindrome_string = input("Please enter a string:\n")
# print(len(palindrome_string))
def is_palindrome(string):
for i in range(len(string)//2):
if string[i] != string[len(string)-i-1]:
return False
break
# print(str(i) + " index checked")
return True
print(is_palindrome(palindrome_string))
main()
| 28.411765
| 73
| 0.57764
|
4a0affbb18e8c0ad3d69b21fea172b575434e9d9
| 8,543
|
py
|
Python
|
osbot_aws/apis/SQS.py
|
pbx-gs/OSBot-AWS
|
202f9347c861508a4780224384202c971fb54a45
|
[
"Apache-2.0"
] | 2
|
2019-04-19T07:42:08.000Z
|
2019-06-23T11:46:18.000Z
|
osbot_aws/apis/SQS.py
|
pbx-gs/OSBot-AWS
|
202f9347c861508a4780224384202c971fb54a45
|
[
"Apache-2.0"
] | 8
|
2020-02-16T23:43:07.000Z
|
2021-02-26T01:58:20.000Z
|
osbot_aws/apis/SQS.py
|
owasp-sbot/OSBot-AWS
|
202f9347c861508a4780224384202c971fb54a45
|
[
"Apache-2.0"
] | 3
|
2020-02-16T15:45:58.000Z
|
2021-02-11T01:04:58.000Z
|
from osbot_utils.decorators.lists.index_by import index_by
from osbot_utils.decorators.methods.cache_on_self import cache_on_self
from osbot_utils.utils.Json import json_loads, json_to_str
from osbot_utils.utils.Misc import array_pop, array_get, list_set
from osbot_utils.decorators.methods.catch import catch
from osbot_utils.decorators.methods.cache import cache
from osbot_aws.apis.Session import Session
class SQS:
@cache_on_self
def client(self):
return Session().client('sqs')
@catch
def queue_attributes(self, queue_url, attribute_names='All'):
queue_data = self.client().get_queue_attributes(QueueUrl=queue_url, AttributeNames=[attribute_names]).get('Attributes')
queue_data['QueueUrl' ] = queue_url # add this important value (which is missing from the current data returned from AWS)
queue_data['QueueName'] = queue_data['QueueUrl' ].split('/').pop()
return queue_data
def queue_attributes_update(self, queue_url, new_attributes):
return self.client().set_queue_attributes(QueueUrl=queue_url, Attributes=new_attributes)
def queue_create(self, queue_name, attributes=None):
kwargs = { "QueueName": queue_name,
"Attributes": attributes or {} }
return self.client().create_queue(**kwargs).get('QueueUrl')
def queue_create_fifo(self, queue_name, attributes=None):
if attributes is None:
attributes = {}
attributes.update({'FifoQueue' : 'True' ,
'ContentBasedDeduplication': 'True' })
return self.queue_create(queue_name=queue_name, attributes=attributes)
def queue_delete(self, queue_url):
if self.queue_exists(queue_url=queue_url) is False: return False
self.client().delete_queue(QueueUrl=queue_url)
return self.queue_exists(queue_url=queue_url) is False
def queue_exists(self, queue_url):
return self.queue_info(queue_url=queue_url).get('error') is None
def queue_info(self, queue_url): # consistent method with similar classes like Lambda
return self.queue_attributes(queue_url=queue_url)
def queue_message_delete(self, queue_url, receipt_handle):
return self.client().delete_message(QueueUrl=queue_url, ReceiptHandle=receipt_handle)
def queue_message_get(self, queue_url, delete_message=True):
message = self.queue_message_get_raw(queue_url)
if message:
body = message.get('Body')
receipt_handle = message.get('ReceiptHandle')
if delete_message:
self.queue_message_delete(queue_url, receipt_handle)
return body
def queue_message_get_raw(self, queue_url, message_attribute_names=None):
kwargs = {"QueueUrl" : queue_url,
"MessageAttributeNames": message_attribute_names or ['All'],
"MaxNumberOfMessages" : 1 }
messages = self.client().receive_message(**kwargs).get('Messages')
return array_get(messages,0) # there will only be a max of one message in queue
def queue_message_get_with_attributes(self, queue_url, delete_message=True):
message = self.queue_message_get_raw(queue_url=queue_url)
if message:
body = message.get('Body')
attributes = message.get('MessageAttributes')
receipt_handle = message.get('ReceiptHandle')
attributes_data = {}
if attributes:
for key,value in attributes.items():
attributes_data[key] = value.get('StringValue')
if delete_message:
self.queue_message_delete(queue_url=queue_url,receipt_handle=receipt_handle)
return body, attributes_data
def queue_messages_get_n(self, queue_url, n=1, delete_message=True): # todo see if we can use client().receive_message with a higher value of MaxNumberOfMessages
messages = []
for i in range(0,n):
message = self.queue_message_get(queue_url=queue_url, delete_message=delete_message)
if message:
messages.append(message)
else:
break
return messages
def queue_message_send(self, queue_url, body, message_group_id=None, attributes_data=None):
kwargs = {"QueueUrl" : queue_url,
"MessageBody" : body ,
"MessageAttributes": {}}
if attributes_data:
for key,value in attributes_data.items():
kwargs['MessageAttributes'][key] = { 'StringValue': value , 'DataType': 'String'}
if message_group_id:
kwargs["MessageGroupId"] = message_group_id
return self.client().send_message(**kwargs).get('MessageId')
def queue_message_send_fifo(self, queue_url, message_group_id, body, attributes_data=None):
return self.queue_message_send(queue_url=queue_url, message_group_id=message_group_id, body=body, attributes_data=attributes_data)
def queue_messages_count(self, queue_url):
return int(self.queue_attributes(queue_url=queue_url).get('ApproximateNumberOfMessages'))
def queue_messages_count_not_visible(self, queue_url):
return int(self.queue_attributes(queue_url=queue_url).get('ApproximateNumberOfMessagesNotVisible'))
def queue_url(self, queue_name):
return self.client().get_queue_url(QueueName=queue_name).get('QueueUrl')
def queue_url_from_queue_arn(self, queue_arn):
result = self.queues(index_by='QueueArn')
return result.get(queue_arn, {}).get('QueueUrl')
@index_by
def queues(self): # todo: see if there is a better way to get this info about existing queues
"""Note: there could be missing entries since the list of queues doesn't update as fast as it should
(i.e. SQS.queues_urls(): might have more entries that SQS.queues() )
"""
data = []
for queue_url in self.queues_urls():
queue_info = self.queue_info(queue_url=queue_url)
if queue_info.get('error') is None:
data.append(queue_info)
return data
def queues_arns(self):
return list_set(self.queues(index_by="QueueArn"))
def queues_urls(self):
return self.client().list_queues().get('QueueUrls')
@catch
def permission_add(self, queue_url, label, aws_account_ids, actions):
self.permission_delete(queue_url, label) # delete if already exists
params = {'QueueUrl' : queue_url ,
'Label' : label ,
'AWSAccountIds': aws_account_ids ,
'Actions' : actions }
return self.client().add_permission(**params)
# todo: see if there is a better way to do this. It shouldn't be needed to write this method, but boto3's add_permission doesn't seem to have the ability to control the Condition and Pricipal values
def permission_add_for_service(self, queue_url, source_arn, service, resource, action='sqs:SendMessage', effect='Allow'):
policy_statement_id = f'{action}-rule-{source_arn}'
statement = { 'Action' : action,
'Condition': {'ArnEquals': {'aws:SourceArn':source_arn}},
'Effect' : effect,
'Principal': {'Service': service},
'Resource' : resource,
'Sid' : policy_statement_id }
policy = self.policy(queue_url=queue_url)
if policy == {}:
policy = {'Statement' : [] ,
'Version' : '2008-10-17' }
policy.get('Statement').append(statement)
policy_str = json_to_str(policy)
self.queue_attributes_update(queue_url=queue_url, new_attributes={"Policy": policy_str})
return self.policy(queue_url=queue_url)
@catch
def permission_delete(self, queue_url, label):
params = { 'QueueUrl' : queue_url ,
'Label' : label ,}
return self.client().remove_permission(**params)
def permissions(self, queue_url):
return self.policy(queue_url).get('Statement', [])
def policy(self, queue_url):
value = self.queue_attributes(queue_url=queue_url).get('Policy')
if value:
return json_loads(value)
return {}
| 45.684492
| 202
| 0.640876
|
4a0b0071aa1def60423d78a0f86e1de1dda99537
| 1,524
|
py
|
Python
|
test/runtime/frontend_test/chainer_test/functions_test/normalization_test/normalize_test.py
|
urantialife/webdnn
|
dedc5da424288594cdfa605a015ddc7a3afcf2b7
|
[
"MIT"
] | 1
|
2021-04-09T15:55:35.000Z
|
2021-04-09T15:55:35.000Z
|
test/runtime/frontend_test/chainer_test/functions_test/normalization_test/normalize_test.py
|
urantialife/webdnn
|
dedc5da424288594cdfa605a015ddc7a3afcf2b7
|
[
"MIT"
] | null | null | null |
test/runtime/frontend_test/chainer_test/functions_test/normalization_test/normalize_test.py
|
urantialife/webdnn
|
dedc5da424288594cdfa605a015ddc7a3afcf2b7
|
[
"MIT"
] | null | null | null |
import chainer
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.placeholder import Placeholder
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable
@wrap_template
def template(eps=1e-5, axis=1, description=""):
vx = chainer.Variable(np.random.rand(2, 4).astype(np.float32))
vy = chainer.functions.normalize(vx, eps=eps, axis=axis)
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] F.normalize {description}",
graph=graph,
backend=["webgpu", "webassembly", "fallback"],
inputs={x: vx.data},
expected={y: vy.data}
)
def test():
template()
def test_eps():
template(eps=1e-1)
def test_with_placeholder():
vx = chainer.Variable(np.random.rand(1, 8).astype(np.float32))
vy = chainer.functions.normalize(vx)
N = Placeholder(label="N")
C = Placeholder(label="C")
px = PlaceholderVariable([N, C])
py = chainer.functions.normalize(px)
graph = ChainerConverter().convert([px], [py])
x = graph.inputs[0]
y = graph.outputs[0]
N.value = 1
C.value = 8
generate_kernel_test_case(
description=f"[chainer] F.normalize with placeholder",
graph=graph,
backend=["webgpu", "webassembly"],
inputs={x: vx.data},
expected={y: vy.data},
)
| 25.4
| 76
| 0.664698
|
4a0b00da7d7c00738d045321fcfbd4faeb0e57d8
| 10,415
|
py
|
Python
|
config/settings/production.py
|
SamwelOpiyo/simple_company_employee_management_backend
|
75723dd6aed002a1bc30f2d78d077e498eb25226
|
[
"MIT"
] | null | null | null |
config/settings/production.py
|
SamwelOpiyo/simple_company_employee_management_backend
|
75723dd6aed002a1bc30f2d78d077e498eb25226
|
[
"MIT"
] | null | null | null |
config/settings/production.py
|
SamwelOpiyo/simple_company_employee_management_backend
|
75723dd6aed002a1bc30f2d78d077e498eb25226
|
[
"MIT"
] | null | null | null |
import logging
from google.oauth2 import service_account
from .base import * # noqa
from .base import env
# Get an instance of a logger
logger = logging.getLogger(__name__)
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"])
if env.bool("GET_ALLOWED_HOSTS_FROM_KUBERNETES_API", default=True):
from kubernetes import client, config
try:
config.incluster_config.load_incluster_config()
v1 = client.CoreV1Api()
ALLOWED_HOSTS += sum(
[
[
addr.address
for addr in node.status.addresses
if addr.type == "InternalIP" or addr.type == "ExternalIP"
]
for node in v1.list_node().items
],
[],
)
except Exception:
logger.exception("Could not load ALLOWED_HOSTS from kubernetes.")
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int(
"CONN_MAX_AGE", default=60
) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = env.bool("DJANGO_SESSION_COOKIE_SECURE", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = env.bool("DJANGO_CSRF_COOKIE_SECURE", default=True)
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = env.bool("DJANGO_SECURE_HSTS_SECONDS", default=60)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ["storages"]
# optional
GS_PROJECT_ID = env("GCP_PROJECT_ID")
GCS_CREDENTIALS_FILE = env("GCS_CREDENTIALS_FILE", default=None)
if GCS_CREDENTIALS_FILE:
GS_CREDENTIALS = service_account.Credentials.from_service_account_file(
GCS_CREDENTIALS_FILE
)
# or GOOGLE_APPLICATION_CREDENTIALS = ""
GS_AUTO_CREATE_BUCKET = True
GS_AUTO_CREATE_ACL = "projectPrivate"
GS_FILE_CHARSET = "utf-8"
# GS_FILE_OVERWRITE = False
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
GS_MEDIA_BUCKET_NAME = env("GS_MEDIA_BUCKET_NAME", default="")
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "https://storage.googleapis.com/%s/" % GS_MEDIA_BUCKET_NAME
DEFAULT_FILE_STORAGE = "config.storage_backends.GoogleCloudMediaStorage"
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
GS_STATIC_BUCKET_NAME = env("GS_STATIC_BUCKET_NAME", default="")
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "https://storage.googleapis.com/%s/" % GS_STATIC_BUCKET_NAME
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_STORAGE = "config.storage_backends.GoogleCloudStaticStorage"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL",
default="Employee Management Backend <noreply@example.com>",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Employee Management Backend]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ["gunicorn"] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ["collectfast"] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}
},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
"file": {
"level": "ERROR",
"class": "logging.FileHandler",
"filename": "/var/log/app/billbored-django",
},
"file_spam": {
"level": "ERROR",
"class": "logging.FileHandler",
"filename": "/var/log/app/disallowed_host",
},
},
"loggers": {
"django.request": {
"handlers": ["mail_admins", "file"],
"level": "ERROR",
"propagate": True,
},
"django": {
"handlers": ["console"],
"level": "WARNING",
"propagate": True,
},
"django.db.backends": {
"level": "ERROR",
"handlers": ["mail_admins", "file"],
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["file_spam"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
| 37.599278
| 98
| 0.600768
|
4a0b01251d39af931efceda9cdb0ea9c8b6bf378
| 15,848
|
py
|
Python
|
frille-lang/lib/python3.6/site-packages/typer/completion.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | 1
|
2020-12-01T07:02:53.000Z
|
2020-12-01T07:02:53.000Z
|
frille-lang/lib/python3.6/site-packages/typer/completion.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | 3
|
2021-07-17T17:00:28.000Z
|
2021-07-18T14:22:12.000Z
|
frille-lang/lib/python3.6/site-packages/typer/completion.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | 1
|
2021-07-07T16:07:13.000Z
|
2021-07-07T16:07:13.000Z
|
import os
import re
import subprocess
import sys
from enum import Enum
from pathlib import Path
from typing import Any, Optional, Tuple
import click
import click._bashcomplete
from .models import ParamMeta
from .params import Option
from .utils import get_params_from_function
try:
import shellingham
except ImportError: # pragma: nocover
shellingham = None
_click_patched = False
def get_completion_inspect_parameters() -> Tuple[ParamMeta, ParamMeta]:
completion_init()
test_disable_detection = os.getenv("_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION")
if shellingham and not test_disable_detection:
parameters = get_params_from_function(_install_completion_placeholder_function)
else:
parameters = get_params_from_function(
_install_completion_no_auto_placeholder_function
)
install_param, show_param = parameters.values()
return install_param, show_param
def install_callback(ctx: click.Context, param: click.Parameter, value: Any) -> Any:
if not value or ctx.resilient_parsing:
return value # pragma no cover
if isinstance(value, str):
shell, path = install(shell=value)
else:
shell, path = install()
click.secho(f"{shell} completion installed in {path}", fg="green")
click.echo("Completion will take effect once you restart the terminal")
sys.exit(0)
def show_callback(ctx: click.Context, param: click.Parameter, value: Any) -> Any:
if not value or ctx.resilient_parsing:
return value # pragma no cover
prog_name = ctx.find_root().info_name
assert prog_name
complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper())
if isinstance(value, str):
shell = value
elif shellingham:
shell, _ = shellingham.detect_shell()
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
click.echo(script_content)
sys.exit(0)
class Shells(str, Enum):
bash = "bash"
zsh = "zsh"
fish = "fish"
powershell = "powershell"
pwsh = "pwsh"
# Create a fake command function to extract the completion parameters
def _install_completion_placeholder_function(
install_completion: bool = Option(
None,
"--install-completion",
is_flag=True,
callback=install_callback,
expose_value=False,
help="Install completion for the current shell.",
),
show_completion: bool = Option(
None,
"--show-completion",
is_flag=True,
callback=show_callback,
expose_value=False,
help="Show completion for the current shell, to copy it or customize the installation.",
),
) -> Any:
pass # pragma no cover
def _install_completion_no_auto_placeholder_function(
install_completion: Shells = Option(
None,
callback=install_callback,
expose_value=False,
help="Install completion for the specified shell.",
),
show_completion: Shells = Option(
None,
callback=show_callback,
expose_value=False,
help="Show completion for the specified shell, to copy it or customize the installation.",
),
) -> Any:
pass # pragma no cover
COMPLETION_SCRIPT_BASH = """
%(complete_func)s() {
local IFS=$'\n'
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
%(autocomplete_var)s=complete_bash $1 ) )
return 0
}
complete -o default -F %(complete_func)s %(prog_name)s
"""
COMPLETION_SCRIPT_ZSH = """
#compdef %(prog_name)s
%(complete_func)s() {
eval $(env _TYPER_COMPLETE_ARGS="${words[1,$CURRENT]}" %(autocomplete_var)s=complete_zsh %(prog_name)s)
}
compdef %(complete_func)s %(prog_name)s
"""
COMPLETION_SCRIPT_FISH = 'complete --command %(prog_name)s --no-files --arguments "(env %(autocomplete_var)s=complete_fish _TYPER_COMPLETE_FISH_ACTION=get-args _TYPER_COMPLETE_ARGS=(commandline -cp) %(prog_name)s)" --condition "env %(autocomplete_var)s=complete_fish _TYPER_COMPLETE_FISH_ACTION=is-args _TYPER_COMPLETE_ARGS=(commandline -cp) %(prog_name)s"'
COMPLETION_SCRIPT_POWER_SHELL = """
Import-Module PSReadLine
Set-PSReadLineKeyHandler -Chord Tab -Function MenuComplete
$scriptblock = {
param($wordToComplete, $commandAst, $cursorPosition)
$Env:%(autocomplete_var)s = "complete_powershell"
$Env:_TYPER_COMPLETE_ARGS = $commandAst.ToString()
$Env:_TYPER_COMPLETE_WORD_TO_COMPLETE = $wordToComplete
%(prog_name)s | ForEach-Object {
$commandArray = $_ -Split ":::"
$command = $commandArray[0]
$helpString = $commandArray[1]
[System.Management.Automation.CompletionResult]::new(
$command, $command, 'ParameterValue', $helpString)
}
$Env:%(autocomplete_var)s = ""
$Env:_TYPER_COMPLETE_ARGS = ""
$Env:_TYPER_COMPLETE_WORD_TO_COMPLETE = ""
}
Register-ArgumentCompleter -Native -CommandName %(prog_name)s -ScriptBlock $scriptblock
"""
def install(
shell: Optional[str] = None,
prog_name: Optional[str] = None,
complete_var: Optional[str] = None,
) -> Tuple[str, Path]:
prog_name = prog_name or click.get_current_context().find_root().info_name
assert prog_name
if complete_var is None:
complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper())
if shell is None and shellingham is not None:
shell, _ = shellingham.detect_shell()
if shell == "bash":
installed_path = install_bash(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
elif shell == "zsh":
installed_path = install_zsh(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
elif shell == "fish":
installed_path = install_fish(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
elif shell in {"powershell", "pwsh"}:
installed_path = install_powershell(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
else:
click.echo(f"Shell {shell} is not supported.")
raise click.exceptions.Exit(1)
def install_bash(*, prog_name: str, complete_var: str, shell: str) -> Path:
# Ref: https://github.com/scop/bash-completion#faq
# It seems bash-completion is the official completion system for bash:
# Ref: https://www.gnu.org/software/bash/manual/html_node/A-Programmable-Completion-Example.html
# But installing in the locations from the docs doesn't seem to have effect
completion_path = Path.home() / f".bash_completions/{prog_name}.sh"
rc_path = Path.home() / ".bashrc"
rc_path.parent.mkdir(parents=True, exist_ok=True)
rc_content = ""
if rc_path.is_file():
rc_content = rc_path.read_text()
completion_init_lines = [f"source {completion_path}"]
for line in completion_init_lines:
if line not in rc_content: # pragma: nocover
rc_content += f"\n{line}"
rc_content += "\n"
rc_path.write_text(rc_content)
# Install completion
completion_path.parent.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
completion_path.write_text(script_content)
return completion_path
def install_zsh(*, prog_name: str, complete_var: str, shell: str) -> Path:
# Setup Zsh and load ~/.zfunc
zshrc_path = Path.home() / ".zshrc"
zshrc_path.parent.mkdir(parents=True, exist_ok=True)
zshrc_content = ""
if zshrc_path.is_file():
zshrc_content = zshrc_path.read_text()
completion_init_lines = [
"autoload -Uz compinit",
"compinit",
"zstyle ':completion:*' menu select",
"fpath+=~/.zfunc",
]
for line in completion_init_lines:
if line not in zshrc_content: # pragma: nocover
zshrc_content += f"\n{line}"
zshrc_content += "\n"
zshrc_path.write_text(zshrc_content)
# Install completion under ~/.zfunc/
path_obj = Path.home() / f".zfunc/_{prog_name}"
path_obj.parent.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
path_obj.write_text(script_content)
return path_obj
def install_fish(*, prog_name: str, complete_var: str, shell: str) -> Path:
path_obj = Path.home() / f".config/fish/completions/{prog_name}.fish"
parent_dir: Path = path_obj.parent
parent_dir.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
path_obj.write_text(f"{script_content}\n")
return path_obj
def install_powershell(*, prog_name: str, complete_var: str, shell: str) -> Path:
subprocess.run(
[
shell,
"-Command",
"Set-ExecutionPolicy",
"Unrestricted",
"-Scope",
"CurrentUser",
]
)
result = subprocess.run(
[shell, "-NoProfile", "-Command", "echo", "$profile"],
check=True,
stdout=subprocess.PIPE,
)
if result.returncode != 0: # pragma: nocover
click.echo("Couldn't get PowerShell user profile", err=True)
raise click.exceptions.Exit(result.returncode)
path_str = ""
if isinstance(result.stdout, str): # pragma: nocover
path_str = result.stdout
if isinstance(result.stdout, bytes):
try:
# PowerShell would be predominant in Windows
path_str = result.stdout.decode("windows-1252")
except UnicodeDecodeError: # pragma: nocover
try:
path_str = result.stdout.decode("utf8")
except UnicodeDecodeError:
click.echo("Couldn't decode the path automatically", err=True)
raise click.exceptions.Exit(1)
path_obj = Path(path_str.strip())
parent_dir: Path = path_obj.parent
parent_dir.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
with path_obj.open(mode="a") as f:
f.write(f"{script_content}\n")
return path_obj
def do_bash_complete(cli: click.Command, prog_name: str) -> bool:
cwords = click.parser.split_arg_string(os.getenv("COMP_WORDS", ""))
cword = int(os.getenv("COMP_CWORD", 0))
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
for item in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
click.echo(item[0])
return True
def do_zsh_complete(cli: click.Command, prog_name: str) -> bool:
completion_args = os.getenv("_TYPER_COMPLETE_ARGS", "")
cwords = click.parser.split_arg_string(completion_args)
args = cwords[1:]
if args and not completion_args.endswith(" "):
incomplete = args[-1]
args = args[:-1]
else:
incomplete = ""
def escape(s: str) -> str:
return (
s.replace('"', '""')
.replace("'", "''")
.replace("$", "\\$")
.replace("`", "\\`")
)
res = []
for item, help in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
if help:
res.append(f'"{escape(item)}":"{escape(help)}"')
else:
res.append(f'"{escape(item)}"')
if res:
args_str = "\n".join(res)
click.echo(f"_arguments '*: :(({args_str}))'")
else:
click.echo("_files")
return True
def do_fish_complete(cli: click.Command, prog_name: str) -> bool:
completion_args = os.getenv("_TYPER_COMPLETE_ARGS", "")
complete_action = os.getenv("_TYPER_COMPLETE_FISH_ACTION", "")
cwords = click.parser.split_arg_string(completion_args)
args = cwords[1:]
if args and not completion_args.endswith(" "):
incomplete = args[-1]
args = args[:-1]
else:
incomplete = ""
show_args = []
for item, help in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
if help:
formatted_help = re.sub(r"\s", " ", help)
show_args.append(f"{item}\t{formatted_help}")
else:
show_args.append(item)
if complete_action == "get-args":
if show_args:
for arg in show_args:
click.echo(arg)
elif complete_action == "is-args":
if show_args:
# Activate complete args (no files)
sys.exit(0)
else:
# Deactivate complete args (allow files)
sys.exit(1)
return True
def do_powershell_complete(cli: click.Command, prog_name: str) -> bool:
completion_args = os.getenv("_TYPER_COMPLETE_ARGS", "")
incomplete = os.getenv("_TYPER_COMPLETE_WORD_TO_COMPLETE", "")
cwords = click.parser.split_arg_string(completion_args)
args = cwords[1:]
for item, help in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
click.echo(f"{item}:::{help or ' '}")
return True
def do_shell_complete(*, cli: click.Command, prog_name: str, shell: str) -> bool:
if shell == "bash":
return do_bash_complete(cli, prog_name)
elif shell == "zsh":
return do_zsh_complete(cli, prog_name)
elif shell == "fish":
return do_fish_complete(cli, prog_name)
elif shell in {"powershell", "pwsh"}:
return do_powershell_complete(cli, prog_name)
return False
_completion_scripts = {
"bash": COMPLETION_SCRIPT_BASH,
"zsh": COMPLETION_SCRIPT_ZSH,
"fish": COMPLETION_SCRIPT_FISH,
"powershell": COMPLETION_SCRIPT_POWER_SHELL,
"pwsh": COMPLETION_SCRIPT_POWER_SHELL,
}
def get_completion_script(*, prog_name: str, complete_var: str, shell: str) -> str:
cf_name = click._bashcomplete._invalid_ident_char_re.sub(
"", prog_name.replace("-", "_")
)
script = _completion_scripts.get(shell)
if script is None:
click.echo(f"Shell {shell} not supported.", err=True)
sys.exit(1)
return (
script
% dict(
complete_func="_{}_completion".format(cf_name),
prog_name=prog_name,
autocomplete_var=complete_var,
)
).strip()
def handle_shell_complete(
cli: click.Command, prog_name: str, complete_var: str, complete_instr: str
) -> bool:
if "_" not in complete_instr:
click.echo("Invalid completion instruction.", err=True)
sys.exit(1)
command, shell = complete_instr.split("_", 1)
if command == "source":
click.echo(
get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
)
return True
elif command == "complete":
return do_shell_complete(cli=cli, prog_name=prog_name, shell=shell)
return False
def completion_init() -> None:
global _click_patched
if not _click_patched:
testing = os.getenv("_TYPER_COMPLETE_TESTING")
def testing_handle_shell_complete(
cli: click.Command, prog_name: str, complete_var: str, complete_instr: str
) -> bool:
result = handle_shell_complete(cli, prog_name, complete_var, complete_instr)
if result:
# Avoid fast_exit(1) in Click so Coverage can finish
sys.exit(1)
return result
if testing:
click._bashcomplete.bashcomplete = testing_handle_shell_complete
else:
click._bashcomplete.bashcomplete = handle_shell_complete
_click_patched = True
| 33.719149
| 357
| 0.653584
|
4a0b0244e8162a8b197c6dd12ec001a803372313
| 6,214
|
py
|
Python
|
app/systems/commands/messages.py
|
criticallycode/zima
|
cd38cac1c0c33b362d110ae28deba3828daa3f4a
|
[
"Apache-2.0"
] | null | null | null |
app/systems/commands/messages.py
|
criticallycode/zima
|
cd38cac1c0c33b362d110ae28deba3828daa3f4a
|
[
"Apache-2.0"
] | null | null | null |
app/systems/commands/messages.py
|
criticallycode/zima
|
cd38cac1c0c33b362d110ae28deba3828daa3f4a
|
[
"Apache-2.0"
] | null | null | null |
from django.utils.module_loading import import_string
from utility.runtime import Runtime
from utility.terminal import TerminalMixin
from utility.encryption import Cipher
from utility.display import format_data
import sys
import json
import logging
logger = logging.getLogger(__name__)
class AppMessage(TerminalMixin):
cipher = Cipher.get('message')
@classmethod
def get(cls, data, decrypt = True):
if decrypt:
message = cls.cipher.decrypt(data['package'], False)
data = json.loads(message)
try:
msg = import_string(data['type'])
except Exception:
msg = getattr(sys.modules[__name__], data['type'])()
msg.load(data)
return msg
def __init__(self, message = '', name = None, prefix = None, silent = False):
super().__init__()
self.type = self.__class__.__name__
self.name = name
self.prefix = prefix
self.message = message
self.silent = silent
def load(self, data):
for field, value in data.items():
if field != 'type':
setattr(self, field, value)
def render(self):
data = {
'type': self.type,
'message': self.message
}
if self.name:
data['name'] = self.name
if self.prefix:
data['prefix'] = self.prefix
if self.silent:
data['silent'] = self.silent
return data
def to_json(self):
return json.dumps(self.render())
def to_package(self):
json_text = self.to_json()
cipher_text = self.__class__.cipher.encrypt(json_text).decode('utf-8')
package = json.dumps({ 'package': cipher_text }) + "\n"
return package
def format(self, debug = False, disable_color = False, width = None):
return "{}{}".format(self._format_prefix(disable_color), self.message)
def _format_prefix(self, disable_color):
if self.prefix:
prefix = self.prefix if disable_color else self.prefix_color(self.prefix)
return prefix + ' '
else:
return ''
def display(self, debug = False, disable_color = False, width = None):
if not self.silent:
self.print(self.format(
debug = debug,
disable_color = disable_color,
width = width
), sys.stdout)
sys.stdout.flush()
class DataMessage(AppMessage):
def __init__(self, message = '', data = None, name = None, prefix = None, silent = False):
super().__init__(message,
name = name,
prefix = prefix,
silent = silent
)
self.data = data
def render(self):
result = super().render()
result['data'] = self.data
return result
def format(self, debug = False, disable_color = False, width = None):
data = self.data if disable_color else self.value_color(self.data)
return "{}{}: {}".format(
self._format_prefix(disable_color),
self.message,
data
)
class InfoMessage(AppMessage):
pass
class NoticeMessage(AppMessage):
def format(self, debug = False, disable_color = False, width = None):
message = self.message if disable_color else self.notice_color(self.message)
return "{}{}".format(self._format_prefix(disable_color), message)
class SuccessMessage(AppMessage):
def format(self, debug = False, disable_color = False, width = None):
message = self.message if disable_color else self.success_color(self.message)
return "{}{}".format(self._format_prefix(disable_color), message)
class WarningMessage(AppMessage):
def format(self, debug = False, disable_color = False, width = None):
message = self.message if disable_color else self.warning_color(self.message)
return "{}{}".format(self._format_prefix(disable_color), message)
def display(self, debug = False, disable_color = False, width = None):
if not self.silent:
self.print(self.format(debug), sys.stderr)
sys.stderr.flush()
class ErrorMessage(AppMessage):
def __init__(self, message = '', traceback = None, name = None, prefix = None, silent = False):
super().__init__(message,
name = name,
prefix = prefix,
silent = silent
)
self.traceback = traceback
def render(self):
result = super().render()
result['traceback'] = self.traceback
return result
def format(self, debug = False, disable_color = False, width = None):
message = self.message if disable_color else self.error_color(self.message)
if Runtime.debug() or debug:
traceback = [ item.strip() for item in self.traceback ]
traceback_message = "\n".join(traceback) if disable_color else self.traceback_color("\n".join(traceback))
return "\n{}** {}\n\n> {}\n".format(
self._format_prefix(disable_color),
message,
traceback_message
)
return "{}** {}".format(self._format_prefix(disable_color), message)
def display(self, debug = False, disable_color = False, width = None):
if not self.silent and self.message:
self.print(self.format(
debug = debug,
disable_color = disable_color,
width = width
), sys.stderr)
sys.stderr.flush()
class TableMessage(AppMessage):
def __init__(self, message = '', name = None, prefix = None, silent = False, row_labels = False):
super().__init__(message,
name = name,
prefix = prefix,
silent = silent
)
self.row_labels = row_labels
def render(self):
result = super().render()
result['row_labels'] = self.row_labels
return result
def format(self, debug = False, disable_color = False, width = None):
return format_data(self.message, self._format_prefix(disable_color),
row_labels = self.row_labels,
width = width
)
| 30.019324
| 117
| 0.594947
|
4a0b02604ca5aae87e9469f3b1cd07f253aa71e5
| 285
|
py
|
Python
|
huf/generic_utils.py
|
jackd/huf
|
2ca50eb24dcb907c66d6caad3d0a680c3efcae86
|
[
"Apache-2.0"
] | null | null | null |
huf/generic_utils.py
|
jackd/huf
|
2ca50eb24dcb907c66d6caad3d0a680c3efcae86
|
[
"Apache-2.0"
] | null | null | null |
huf/generic_utils.py
|
jackd/huf
|
2ca50eb24dcb907c66d6caad3d0a680c3efcae86
|
[
"Apache-2.0"
] | null | null | null |
import typing as tp
from collections import defaultdict
V = tp.TypeVar("V")
K = tp.TypeVar("K")
def group_by(objs: tp.Iterable[V], key: tp.Callable[[V], K]) -> tp.Dict[K, tp.List[V]]:
out = defaultdict(list)
for obj in objs:
out[key(obj)].append(obj)
return out
| 21.923077
| 87
| 0.638596
|
4a0b0265b2808a5c7eb20932d44275881b03d893
| 1,118
|
py
|
Python
|
lrschedule.py
|
seo3650/wavenet_vocoder
|
106ee674b1b19d4d20606231198552a8a574249d
|
[
"MIT"
] | 2,068
|
2017-12-31T06:16:58.000Z
|
2022-03-29T08:16:22.000Z
|
lrschedule.py
|
seo3650/wavenet_vocoder
|
106ee674b1b19d4d20606231198552a8a574249d
|
[
"MIT"
] | 202
|
2017-12-31T05:33:24.000Z
|
2022-03-15T19:20:22.000Z
|
lrschedule.py
|
seo3650/wavenet_vocoder
|
106ee674b1b19d4d20606231198552a8a574249d
|
[
"MIT"
] | 486
|
2017-12-31T10:06:06.000Z
|
2022-03-31T05:26:30.000Z
|
import numpy as np
# https://github.com/tensorflow/tensor2tensor/issues/280#issuecomment-339110329
def noam_learning_rate_decay(init_lr, global_step, warmup_steps=4000):
# Noam scheme from tensor2tensor:
warmup_steps = float(warmup_steps)
step = global_step + 1.
lr = init_lr * warmup_steps**0.5 * np.minimum(
step * warmup_steps**-1.5, step**-0.5)
return lr
def step_learning_rate_decay(init_lr, global_step,
anneal_rate=0.98,
anneal_interval=30000):
return init_lr * anneal_rate ** (global_step // anneal_interval)
def cyclic_cosine_annealing(init_lr, global_step, T, M):
"""Cyclic cosine annealing
https://arxiv.org/pdf/1704.00109.pdf
Args:
init_lr (float): Initial learning rate
global_step (int): Current iteration number
T (int): Total iteration number (i,e. nepoch)
M (int): Number of ensembles we want
Returns:
float: Annealed learning rate
"""
TdivM = T // M
return init_lr / 2.0 * (np.cos(np.pi * ((global_step - 1) % TdivM) / TdivM) + 1.0)
| 31.055556
| 86
| 0.644007
|
4a0b039a396689360dc56ab19e0a907abe011b49
| 107
|
py
|
Python
|
numpyro/version.py
|
ibab/numpyro
|
076bb2a15026734a41abf4eb39ce384ab68c297f
|
[
"Apache-2.0"
] | 1
|
2020-11-30T23:51:45.000Z
|
2020-11-30T23:51:45.000Z
|
numpyro/version.py
|
ibab/numpyro
|
076bb2a15026734a41abf4eb39ce384ab68c297f
|
[
"Apache-2.0"
] | null | null | null |
numpyro/version.py
|
ibab/numpyro
|
076bb2a15026734a41abf4eb39ce384ab68c297f
|
[
"Apache-2.0"
] | 1
|
2020-11-30T23:52:33.000Z
|
2020-11-30T23:52:33.000Z
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
__version__ = '0.2.4'
| 21.4
| 45
| 0.747664
|
4a0b058da68acdb2dd3c36435fd997713249fdde
| 2,915
|
py
|
Python
|
adobe_tools/adobe_tools.py
|
1dustindavis/IT-CPE
|
bf7c097cb0c3749a533799e1ceeb60eebc658f10
|
[
"BSD-3-Clause"
] | 1
|
2020-08-13T07:56:12.000Z
|
2020-08-13T07:56:12.000Z
|
adobe_tools/adobe_tools.py
|
1dustindavis/IT-CPE
|
bf7c097cb0c3749a533799e1ceeb60eebc658f10
|
[
"BSD-3-Clause"
] | null | null | null |
adobe_tools/adobe_tools.py
|
1dustindavis/IT-CPE
|
bf7c097cb0c3749a533799e1ceeb60eebc658f10
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
"""Adobe API tools."""
import adobe_api
# These are the most common actions that one would use the Adobe UM API for.
def user_exists(username):
"""Return True if the username exists, or False if it doesn't."""
try:
adobe_api.AdobeAPIObject(username)
except adobe_api.AdobeAPINoUserException:
return False
return True
def user_is_federated(username):
"""
Return if the username exists and is federated.
If the username does not exist, the result will be False.
"""
try:
instance = adobe_api.AdobeAPIObject(username)
except adobe_api.AdobeAPINoUserException:
return False
return instance.is_federated()
def does_user_have_product(product, username):
"""Return True/False if a user has the specified product."""
try:
instance = adobe_api.AdobeAPIObject(username)
except adobe_api.AdobeAPINoUserException:
return False
return instance.has_product(product)
def list_user_products(username):
"""Return a list of the user's product configs."""
instance = adobe_api.AdobeAPIObject(username)
return instance.list_products()
def does_product_exist(productname):
"""Return True if a product config exists."""
instance = adobe_api.AdobeAPIObject(
"fake@fake.com",
allow_nonexistent_user=True
)
return instance.product_exists(productname)
def get_product_list():
"""Return a list of product configs available."""
instance = adobe_api.AdobeAPIObject(
"fake@fake.com",
allow_nonexistent_user=True
)
productlist = instance.gather_product_list()
return [x['groupName'] for x in productlist]
def add_federated_user(username, email, firstname, lastname, country='US'):
"""Add federated user account."""
instance = adobe_api.AdobeAPIObject(
username,
allow_nonexistent_user=True
)
return instance.add_federated_user(email, country, firstname, lastname)
def remove_user(username):
"""Remove user account from organization."""
instance = adobe_api.AdobeAPIObject(username)
return instance.remove_user_from_org(username)
def add_products(desired_products, username):
"""Add products to specific user."""
instance = adobe_api.AdobeAPIObject(username)
return instance.add_products_to_user(desired_products)
def remove_products(removed_products, username):
"""Remove products from specific user."""
instance = adobe_api.AdobeAPIObject(username)
return instance.remove_product_from_user(removed_products)
def api_reachable():
"""Return True if the API is reachable."""
try:
adobe_api.AdobeAPIObject(
"fake@fake.com",
allow_nonexistent_user=True
)
except (adobe_api.AdobeAPIBadStatusException,
adobe_api.AdobeAPIMissingRequirementsException):
return False
return True
| 28.300971
| 76
| 0.709777
|
4a0b05ab108f6372801d46878e43cedc3beccd98
| 11,650
|
py
|
Python
|
pptx/shapes/connector.py
|
auxi-ai/python_pptx_auxi
|
d55ff63a808f7acb52354f0b0cb8d3176def34cd
|
[
"MIT"
] | null | null | null |
pptx/shapes/connector.py
|
auxi-ai/python_pptx_auxi
|
d55ff63a808f7acb52354f0b0cb8d3176def34cd
|
[
"MIT"
] | null | null | null |
pptx/shapes/connector.py
|
auxi-ai/python_pptx_auxi
|
d55ff63a808f7acb52354f0b0cb8d3176def34cd
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""Connector (line) shape and related objects.
A connector is a line shape having end-points that can be connected to other
objects (but not to other connectors). A connector can be straight, have
elbows, or can be curved.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from pptx.dml.line import LineFormat
from pptx.shapes.base import BaseShape
from pptx.util import Emu, lazyproperty
class Connector(BaseShape):
"""Connector (line) shape.
A connector is a linear shape having end-points that can be connected to
other objects (but not to other connectors). A connector can be straight,
have elbows, or can be curved.
"""
def begin_connect(self, shape, cxn_pt_idx):
"""
**EXPERIMENTAL** - *The current implementation only works properly
with rectangular shapes, such as pictures and rectangles. Use with
other shape types may cause unexpected visual alignment of the
connected end-point and could lead to a load error if cxn_pt_idx
exceeds the connection point count available on the connected shape.
That said, a quick test should reveal what to expect when using this
method with other shape types.*
Connect the beginning of this connector to *shape* at the connection
point specified by *cxn_pt_idx*. Each shape has zero or more
connection points and they are identified by index, starting with 0.
Generally, the first connection point of a shape is at the top center
of its bounding box and numbering proceeds counter-clockwise from
there. However this is only a convention and may vary, especially
with non built-in shapes.
"""
self._connect_begin_to(shape, cxn_pt_idx)
self._move_begin_to_cxn(shape, cxn_pt_idx)
@property
def begin_x(self):
"""
Return the X-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
"""
cxnSp = self._element
x, cx, flipH = cxnSp.x, cxnSp.cx, cxnSp.flipH
begin_x = x + cx if flipH else x
return Emu(begin_x)
@begin_x.setter
def begin_x(self, value):
cxnSp = self._element
x, cx, flipH, new_x = cxnSp.x, cxnSp.cx, cxnSp.flipH, int(value)
if flipH:
old_x = x + cx
dx = abs(new_x - old_x)
if new_x >= old_x:
cxnSp.cx = cx + dx
elif dx <= cx:
cxnSp.cx = cx - dx
else:
cxnSp.flipH = False
cxnSp.x = new_x
cxnSp.cx = dx - cx
else:
dx = abs(new_x - x)
if new_x <= x:
cxnSp.x = new_x
cxnSp.cx = cx + dx
elif dx <= cx:
cxnSp.x = new_x
cxnSp.cx = cx - dx
else:
cxnSp.flipH = True
cxnSp.x = x + cx
cxnSp.cx = dx - cx
@property
def begin_y(self):
"""
Return the Y-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
"""
cxnSp = self._element
y, cy, flipV = cxnSp.y, cxnSp.cy, cxnSp.flipV
begin_y = y + cy if flipV else y
return Emu(begin_y)
@begin_y.setter
def begin_y(self, value):
cxnSp = self._element
y, cy, flipV, new_y = cxnSp.y, cxnSp.cy, cxnSp.flipV, int(value)
if flipV:
old_y = y + cy
dy = abs(new_y - old_y)
if new_y >= old_y:
cxnSp.cy = cy + dy
elif dy <= cy:
cxnSp.cy = cy - dy
else:
cxnSp.flipV = False
cxnSp.y = new_y
cxnSp.cy = dy - cy
else:
dy = abs(new_y - y)
if new_y <= y:
cxnSp.y = new_y
cxnSp.cy = cy + dy
elif dy <= cy:
cxnSp.y = new_y
cxnSp.cy = cy - dy
else:
cxnSp.flipV = True
cxnSp.y = y + cy
cxnSp.cy = dy - cy
def end_connect(self, shape, cxn_pt_idx):
"""
**EXPERIMENTAL** - *The current implementation only works properly
with rectangular shapes, such as pictures and rectangles. Use with
other shape types may cause unexpected visual alignment of the
connected end-point and could lead to a load error if cxn_pt_idx
exceeds the connection point count available on the connected shape.
That said, a quick test should reveal what to expect when using this
method with other shape types.*
Connect the ending of this connector to *shape* at the connection
point specified by *cxn_pt_idx*.
"""
self._connect_end_to(shape, cxn_pt_idx)
self._move_end_to_cxn(shape, cxn_pt_idx)
@property
def end_x(self):
"""
Return the X-position of the end point of this connector, in English
Metric Units (as a |Length| object).
"""
cxnSp = self._element
x, cx, flipH = cxnSp.x, cxnSp.cx, cxnSp.flipH
end_x = x if flipH else x + cx
return Emu(end_x)
@end_x.setter
def end_x(self, value):
cxnSp = self._element
x, cx, flipH, new_x = cxnSp.x, cxnSp.cx, cxnSp.flipH, int(value)
if flipH:
dx = abs(new_x - x)
if new_x <= x:
cxnSp.x = new_x
cxnSp.cx = cx + dx
elif dx <= cx:
cxnSp.x = new_x
cxnSp.cx = cx - dx
else:
cxnSp.flipH = False
cxnSp.x = x + cx
cxnSp.cx = dx - cx
else:
old_x = x + cx
dx = abs(new_x - old_x)
if new_x >= old_x:
cxnSp.cx = cx + dx
elif dx <= cx:
cxnSp.cx = cx - dx
else:
cxnSp.flipH = True
cxnSp.x = new_x
cxnSp.cx = dx - cx
@property
def end_y(self):
"""
Return the Y-position of the end point of this connector, in English
Metric Units (as a |Length| object).
"""
cxnSp = self._element
y, cy, flipV = cxnSp.y, cxnSp.cy, cxnSp.flipV
end_y = y if flipV else y + cy
return Emu(end_y)
@end_y.setter
def end_y(self, value):
cxnSp = self._element
y, cy, flipV, new_y = cxnSp.y, cxnSp.cy, cxnSp.flipV, int(value)
if flipV:
dy = abs(new_y - y)
if new_y <= y:
cxnSp.y = new_y
cxnSp.cy = cy + dy
elif dy <= cy:
cxnSp.y = new_y
cxnSp.cy = cy - dy
else:
cxnSp.flipV = False
cxnSp.y = y + cy
cxnSp.cy = dy - cy
else:
old_y = y + cy
dy = abs(new_y - old_y)
if new_y >= old_y:
cxnSp.cy = cy + dy
elif dy <= cy:
cxnSp.cy = cy - dy
else:
cxnSp.flipV = True
cxnSp.y = new_y
cxnSp.cy = dy - cy
def get_or_add_ln(self):
"""
Return the ``<a:ln>`` element containing the line format properties
XML for this connector.
"""
return self._element.spPr.get_or_add_ln()
@lazyproperty
def line(self):
"""
|LineFormat| instance for this connector, providing access to line
properties such as line color.
"""
return LineFormat(self)
@property
def ln(self):
"""
The ``<a:ln>`` element containing the line format properties such as
line color and width. |None| if no ``<a:ln>`` element is present.
"""
return self._element.spPr.ln
def _connect_begin_to(self, shape, cxn_pt_idx):
"""
Add or update a stCxn element for this connector that connects its
begin point to the connection point of *shape* specified by
*cxn_pt_idx*.
"""
cNvCxnSpPr = self._element.nvCxnSpPr.cNvCxnSpPr
stCxn = cNvCxnSpPr.get_or_add_stCxn()
stCxn.id = shape.shape_id
stCxn.idx = cxn_pt_idx
def _connect_end_to(self, shape, cxn_pt_idx):
"""
Add or update an endCxn element for this connector that connects its
end point to the connection point of *shape* specified by
*cxn_pt_idx*.
"""
cNvCxnSpPr = self._element.nvCxnSpPr.cNvCxnSpPr
endCxn = cNvCxnSpPr.get_or_add_endCxn()
endCxn.id = shape.shape_id
endCxn.idx = cxn_pt_idx
def _move_begin_to_cxn(self, shape, cxn_pt_idx):
"""
Move the begin point of this connector to coordinates of the
connection point of *shape* specified by *cxn_pt_idx*.
"""
self.begin_x, self.begin_y = self._get_shp_cxn_pt(shape, cxn_pt_idx)
def _move_end_to_cxn(self, shape, cxn_pt_idx):
"""
Move the end point of this connector to the coordinates of the
connection point of *shape* specified by *cxn_pt_idx*.
"""
self.end_x, self.end_y = self._get_shp_cxn_pt(shape, cxn_pt_idx)
def _get_shp_cxn_pt(self, shape, cxn_pt_idx):
"""
Function to compute approximate location of the connection points
for a given shape.
"""
name, x, y, cx, cy = shape.name, shape.left, shape.top, shape.width, shape.height
name = name.lower()
if 'parallelogram' in name or 'data' in name:
buffer_x = 0.1477732794
connection_points = [
((x + ((1 - buffer_x) * cx / 2) + +buffer_x * cx), y), # Top parallelogram-midpoint
((x + cx / 2), y), # Top shape-midpoint
((x + (buffer_x * cx) / 2), (y + cy / 2)), # Left
((x + ((1 - buffer_x) * cx / 2)), y + cy), # Bottom parallelogram-midpoint
((x + cx / 2), y + cy), # Bottom shape-midpoint
(((x + cx) - buffer_x * cx / 2), (y + cy / 2)) # Right
]
elif 'oval' in name or 'connector' in name:
buffer_x = 0.1398963731
buffer_y = 0.1509433962
connection_points = [
((x + cx / 2), y), # Top
((x + buffer_x * cx), (y + buffer_y * cy)), # Top-left
(x, (y + cy / 2)), # Left
((x + buffer_x * cx), ((y + cy) - buffer_y * cy)), # Bottom-left
((x + cx / 2), y + cy), # Bottom
(((x + cx) - buffer_x * cx), ((y + cy) - buffer_y * cy)), # Bottom-right
(x + cx, (y + cy / 2)), # Right
(((x + cx) - buffer_x * cx), (y + buffer_y * cy)) # Top-right
]
else:
connection_points = [
((x + cx / 2), y), # Top
(x, (y + cy / 2)), # Left
((x + cx / 2), y + cy), # Bottom
(x + cx, (y + cy / 2)) # Right
]
return connection_points[cxn_pt_idx]
| 36.520376
| 105
| 0.511502
|
4a0b0673e68217390407994f6c23ac5277d212ea
| 1,995
|
py
|
Python
|
App/migrations/0003_mainshow.py
|
luomantic/AiXianFeng
|
52723fb764889f583c010f9889313b6f50cbc3bd
|
[
"MIT"
] | null | null | null |
App/migrations/0003_mainshow.py
|
luomantic/AiXianFeng
|
52723fb764889f583c010f9889313b6f50cbc3bd
|
[
"MIT"
] | 1
|
2019-03-01T12:00:36.000Z
|
2019-03-01T12:01:12.000Z
|
App/migrations/0003_mainshow.py
|
luomantic/AiXianFeng
|
52723fb764889f583c010f9889313b6f50cbc3bd
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-02-15 08:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0002_mainmustbuy_mainnav_mainshop'),
]
operations = [
migrations.CreateModel(
name='MainShow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.CharField(max_length=100)),
('name', models.CharField(max_length=30)),
('trackid', models.CharField(max_length=20)),
('categoryid', models.CharField(max_length=20)),
('brandname', models.CharField(max_length=20)),
('img1', models.CharField(max_length=200)),
('childcid1', models.CharField(max_length=20)),
('productid1', models.CharField(max_length=20)),
('longname1', models.CharField(max_length=50)),
('price1', models.CharField(max_length=20)),
('marketprice1', models.CharField(max_length=20)),
('img2', models.CharField(max_length=200)),
('childcid2', models.CharField(max_length=20)),
('productid2', models.CharField(max_length=20)),
('longname2', models.CharField(max_length=50)),
('price2', models.CharField(max_length=20)),
('marketprice2', models.CharField(max_length=20)),
('img3', models.CharField(max_length=200)),
('childcid3', models.CharField(max_length=20)),
('productid3', models.CharField(max_length=20)),
('longname3', models.CharField(max_length=50)),
('price3', models.CharField(max_length=20)),
('marketprice3', models.CharField(max_length=20)),
],
options={
'db_table': 'axf_mainshow',
},
),
]
| 43.369565
| 114
| 0.55589
|
4a0b06d8271ac2605cf92db54eec08b29859c61c
| 807
|
py
|
Python
|
tasks/apscheduler_tasks.py
|
kfrime/explore
|
98e498e244900f81591f8a190dac87813404cf61
|
[
"MIT"
] | null | null | null |
tasks/apscheduler_tasks.py
|
kfrime/explore
|
98e498e244900f81591f8a190dac87813404cf61
|
[
"MIT"
] | null | null | null |
tasks/apscheduler_tasks.py
|
kfrime/explore
|
98e498e244900f81591f8a190dac87813404cf61
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
from models.test_models import (db, Role, User, Post)
def create_data():
print('start')
db.create_all()
admin = Role(name='Admin')
guest = Role(name='Guest')
user = Role(name='User')
u1 = User(username='u1', role=admin)
u2 = User(username='u2', role=guest)
u3 = User(username='u3', role=user)
db.session.add_all([admin, guest, user])
db.session.add_all([u1, u2, u3])
db.session.commit()
db.session.close()
print('success')
def task_1(a, b):
print(str(a) + ' ' + str(b))
def task_2():
print('Hello World!')
with db.app.app_context():
print(User.query.all())
if __name__ == '__main__':
print('tasks')
| 20.175
| 72
| 0.61834
|
4a0b0874a3691970c615f666519bab7b218e1cf5
| 953
|
py
|
Python
|
python/perspective/perspective/core/data/__init__.py
|
sebinsua/perspective
|
2c19c5fa0046597e30ec780ae08655767c5253d4
|
[
"Apache-2.0"
] | null | null | null |
python/perspective/perspective/core/data/__init__.py
|
sebinsua/perspective
|
2c19c5fa0046597e30ec780ae08655767c5253d4
|
[
"Apache-2.0"
] | null | null | null |
python/perspective/perspective/core/data/__init__.py
|
sebinsua/perspective
|
2c19c5fa0046597e30ec780ae08655767c5253d4
|
[
"Apache-2.0"
] | null | null | null |
# *****************************************************************************
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from .base import _is_dict, _is_list, Data
from .pd import _is_pandas
from .pa import _is_pyarrow
EXPORTERS = [_is_dict, _is_list, _is_pandas, _is_pyarrow]
def type_detect(data, schema=None, columns=None, transfer_as_arrow=False):
schema = schema or {}
for foo in EXPORTERS:
data_object = foo(data, schema=schema, columns=columns, transfer_as_arrow=transfer_as_arrow)
if data_object.type:
if transfer_as_arrow and foo != _is_pyarrow:
return _is_pyarrow(data_object.data, data_object.schema, data_object.columns)
else:
return data_object
# throw error?
return Data.Empty()
| 36.653846
| 100
| 0.647429
|
4a0b0a1b21de65298943e3e399e79be52387a1d4
| 10,825
|
py
|
Python
|
tools/ParameterSpaceConversion/visualize_poses_parameter_space_trajectory.py
|
murilovarges/HARBoP
|
326bb20d073b97a8bca451509c5ce38083b7d415
|
[
"Apache-2.0"
] | 4
|
2020-06-25T20:19:54.000Z
|
2020-11-11T07:05:04.000Z
|
tools/ParameterSpaceConversion/visualize_poses_parameter_space_trajectory.py
|
murilovarges/HARBoP
|
326bb20d073b97a8bca451509c5ce38083b7d415
|
[
"Apache-2.0"
] | 1
|
2020-11-17T09:09:38.000Z
|
2020-11-18T07:27:00.000Z
|
tools/ParameterSpaceConversion/visualize_poses_parameter_space_trajectory.py
|
murilovarges/HARBoP
|
326bb20d073b97a8bca451509c5ce38083b7d415
|
[
"Apache-2.0"
] | 1
|
2021-08-19T07:27:18.000Z
|
2021-08-19T07:27:18.000Z
|
import glob
import json
import os
import argparse
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from sklearn.preprocessing import MinMaxScaler
POSE_BODY_25_PAIRS_RENDER_GPU = \
[1, 8, 1, 2, 1, 5, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11,
8, 12, 12, 13, 13, 14, 1, 0, 0, 15, 15, 17, 0, 16, 16, 18, 14,
19, 19, 20, 14, 21, 11, 22, 22, 23, 11, 24]
POSE_BODY_25_COLORS_RENDER_GPU = \
[255, 0, 85,
255, 0, 0,
255, 85, 0,
255, 170, 0,
255, 255, 0,
170, 255, 0,
85, 255, 0,
0, 255, 0,
255, 0, 0,
0, 255, 85,
0, 255, 170,
0, 255, 255,
0, 170, 255,
0, 85, 255,
0, 0, 255,
255, 0, 170,
170, 0, 255,
255, 0, 255,
85, 0, 255,
0, 0, 255,
0, 0, 255,
0, 0, 255,
0, 255, 255,
0, 255, 255,
0, 255, 255]
def main():
parser = argparse.ArgumentParser(
description="Convert poses to Parameter Space to Human Action Recognition"
)
parser.add_argument("--poses_base_dir", type=str,
default='/home/murilo/dataset/KTH',
help="Name of directory where input points are located.")
parser.add_argument("--input_dir", type=str,
default='2DPoses',
help="Name of directory to output computed features.")
parser.add_argument("--output_images_dir", type=str,
default='2DPoses_SpaceParam_Images',
help="Name of directory to output Parameter Space images.")
parser.add_argument("--image_height", type=int,
default='240',
help="(Frame Size)Image height to compute max distance in Parameter Space.")
parser.add_argument("--image_width", type=int,
default='320',
help="(Frame Size)Image width to compute max distance in Parameter Space.")
parser.add_argument("--draw_body_ids", type=int,
default='1',
help="Whether draw body joint ids in image with points in Parameter Space.")
parser.add_argument("--number_frames", type=int,
default=20,
help="Number of frames to extract features.")
parser.add_argument("--stride", type=int,
default=1,
help="Stride to compute features from the frames.")
args = parser.parse_args()
convert_parameter_space(args)
def convert_parameter_space(args):
# here compute image diagonal = max distance in Parameter Space
max_distance = int(((args.image_height ** 2) + (args.image_width ** 2)) ** (1 / 2))
print(max_distance)
thetas = np.linspace(-np.pi / 2, np.pi / 2, 180)
#poses_dir = os.path.join(args.poses_base_dir, args.input_dir)
points = 14
for root, directories, filenames in os.walk(os.path.join(args.poses_base_dir, args.input_dir)):
for directory in directories:
video_dir = os.path.join(root, directory)
print(video_dir)
frames = sorted(glob.glob(video_dir + '/*.json'))
if len(frames) > 0:
for x in range(0, len(frames), args.stride):
if x + args.number_frames < len(frames):
img_parameter_traj = {}
draw = {}
for u in range(14):
img_parameter_traj[u] = Image.new('RGB', (180 + 20, int(max_distance)), color='black')
draw[u] = ImageDraw.Draw(img_parameter_traj[u])
prev_points_parameter_space = None
for y in range(x, x + args.number_frames + 1):
body_parts = read_body_parts_file(frames[y])
if len(body_parts) > 0:
# compute parameter space points and draw image with points
points_parameter_space = \
compute_parameter_space(body_parts, max_distance, thetas)
if prev_points_parameter_space is None:
prev_points_parameter_space = points_parameter_space
else:
for a in range(len(points_parameter_space)):
#for a in [2,3,4,5]:
#if 1 == 1:
#a = 4
x1 = prev_points_parameter_space[a][0]
y1 = prev_points_parameter_space[a][1]
x2 = points_parameter_space[a][0]
y2 = points_parameter_space[a][1]
color_id = points_parameter_space[a][2]
shape = (x1, y1, x2, y2)
draw[a].line(shape, fill=get_color(color_id))
e_size = 2
draw[a].ellipse((x1 - e_size, abs(y1) - e_size, x1 + e_size, abs(y1) + e_size),
fill=get_color(color_id))
draw[a].ellipse((x2 - e_size, abs(y2) - e_size, x2 + e_size, abs(y2) + e_size),
fill=get_color(color_id))
prev_points_parameter_space = points_parameter_space
images_dir = video_dir.replace(args.input_dir, args.output_images_dir)
#images_dir, video_name = os.path.split(images_dir)
if not os.path.exists(images_dir):
os.makedirs(images_dir)
for i in range(14):
file = os.path.join(images_dir, str(i) + '_'+ str(x) + '_trajectories.png')
img_parameter_traj[i].save(file)
def read_body_parts_file(key_points_file):
body_parts_int = {}
# Read json pose points
with open(key_points_file) as f:
data = json.load(f)
body_parts = data['part_candidates'][0]
if len(body_parts) > 0:
for key, value in body_parts.items():
body_parts_int[int(key)] = [item for item in value]
return body_parts_int
def compute_parameter_space(body_parts, max_distance, thetas, draw_body_ids=True):
# Create image degrees x max_distance
points_parameter_space = {}
for i in range(0, 14, 1):
degree = degree_disc = theta = rho1 = rho2 = 0
x1, y1, x2, y2, color_id, id1, id2 = return_body_points_coord(i, body_parts)
if x1 > 0 and y1 > 0 and x2 > 0 and y2 > 0:
#print(i)
# print('x1:\t%i\ty1:\t%i\t\tx2:\t%i\ty2:\t%i' % (x1, y1, x2, y2))
if y1 - y2 != 0:
theta = np.arctan((x2 - x1) / (y1 - y2))
else:
theta = 0
# here convert theta from radians to degrees
degree = round(theta * (180 / np.pi))
# here find theta in thetas discrete list (only for image plot)
degree_disc = min(range(len(thetas)), key=lambda x: abs(thetas[x] - theta))
# position_min_degree = min(thetas, key=lambda x: abs(x - theta))
# compute rho from theta
rho1 = x1 * np.cos(theta) + y1 * np.sin(theta)
rho2 = x2 * np.cos(theta) + y2 * np.sin(theta)
#print(rho1, rho2)
#print(int(rho1), int(degree), x1, y1)
points_parameter_space[i] = (degree_disc, rho1, color_id)
# points_hough[i] = (degree, degree_disc, theta, int(rho))
return points_parameter_space
def return_body_points_coord(i, body_parts):
x1 = y1 = x2 = y2 = x = color_id = id1 = id2 = 0
if i == 0: # 1 => 0 Neck
x = 13
elif i == 1: # 1 => 8 Upper body
x = 0
elif i == 2: # 2 => 3 Right Arm
x = 3
elif i == 3: # 3 => 4 Right Forearm
x = 4
elif i == 4: # 5 => 6 Left Arm
x = 5
elif i == 5: # 6 => 7 Left Forearm
x = 6
elif i == 6: # 9 => 10 Right Thigh
x = 8
elif i == 7: # 10 => 11 Right Leg
x = 9
elif i == 8: # 12 => 13 Left Thigh
x = 11
elif i == 9: # 13 => 14 Left Leg
x = 12
elif i == 10: # 8 => 9 Right Hip
x = 7
elif i == 11: # 8 => 12 Left Hip
x = 10
elif i == 12: # 1 => 2 Right Shoulder
x = 1
elif i == 13: # 1 => 5 Left Shoulder
x = 2
x = x * 2
if (len(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]]) > 0 and len(
body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]]) > 0):
x1, y1 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]])
x2, y2 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])
color_id = POSE_BODY_25_PAIRS_RENDER_GPU[x + 1] * 3
id1 = POSE_BODY_25_PAIRS_RENDER_GPU[x]
id2 = POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]
return x1, y1, x2, y2, color_id, id1, id2
def draw_body(body_parts, height, width):
img = Image.new('RGB', (width, height), color='black')
draw = ImageDraw.Draw(img)
for k in sorted(body_parts):
if len(body_parts[k]) > 0:
x, y = get_max_prob(body_parts[k])
draw.point((x, y), fill=get_color(k * 3))
ctd = 0
for x in range(0, len(POSE_BODY_25_PAIRS_RENDER_GPU), 2):
print(x, x + 1)
print(POSE_BODY_25_PAIRS_RENDER_GPU[x], POSE_BODY_25_PAIRS_RENDER_GPU[x + 1])
print(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]], body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])
print('\n')
if (len(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]]) > 0 and len(
body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]]) > 0):
x1, y1 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]])
x2, y2 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])
draw.line((x1, y1, x2, y2), fill=get_color(POSE_BODY_25_PAIRS_RENDER_GPU[x + 1] * 3), width=1)
ctd = ctd + 1
print(ctd)
img.show()
img.save('pil_red.png')
def get_max_prob(body_part):
m = 0
x = 0
y = 0
for p in range(0, len(body_part), 3):
if body_part[p + 2] > m:
m = float(body_part[p + 2])
x = int(body_part[p])
y = int(body_part[p + 1])
return x, y
def get_color(k):
return POSE_BODY_25_COLORS_RENDER_GPU[k], \
POSE_BODY_25_COLORS_RENDER_GPU[k + 1], \
POSE_BODY_25_COLORS_RENDER_GPU[k + 2]
if __name__ == "__main__":
main()
| 37.982456
| 119
| 0.52194
|
4a0b0a4836b010ca4d72995c8857a8bb0ddd7aa2
| 2,041
|
py
|
Python
|
modules/image/Image_gan/gan/photopen/model.py
|
AK391/PaddleHub
|
a51ab7447e089776766becb3297e560dfed98573
|
[
"Apache-2.0"
] | 1
|
2022-03-23T14:33:07.000Z
|
2022-03-23T14:33:07.000Z
|
modules/image/Image_gan/gan/photopen/model.py
|
AK391/PaddleHub
|
a51ab7447e089776766becb3297e560dfed98573
|
[
"Apache-2.0"
] | null | null | null |
modules/image/Image_gan/gan/photopen/model.py
|
AK391/PaddleHub
|
a51ab7447e089776766becb3297e560dfed98573
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import numpy as np
import paddle
from PIL import Image
from PIL import ImageOps
from ppgan.models.generators import SPADEGenerator
from ppgan.utils.filesystem import load
from ppgan.utils.photopen import data_onehot_pro
class PhotoPenPredictor:
def __init__(self, weight_path, gen_cfg):
# 初始化模型
gen = SPADEGenerator(
gen_cfg.ngf,
gen_cfg.num_upsampling_layers,
gen_cfg.crop_size,
gen_cfg.aspect_ratio,
gen_cfg.norm_G,
gen_cfg.semantic_nc,
gen_cfg.use_vae,
gen_cfg.nef,
)
gen.eval()
para = load(weight_path)
if 'net_gen' in para:
gen.set_state_dict(para['net_gen'])
else:
gen.set_state_dict(para)
self.gen = gen
self.gen_cfg = gen_cfg
def run(self, image):
sem = Image.fromarray(image).convert('L')
sem = sem.resize((self.gen_cfg.crop_size, self.gen_cfg.crop_size), Image.NEAREST)
sem = np.array(sem).astype('float32')
sem = paddle.to_tensor(sem)
sem = sem.reshape([1, 1, self.gen_cfg.crop_size, self.gen_cfg.crop_size])
one_hot = data_onehot_pro(sem, self.gen_cfg)
predicted = self.gen(one_hot)
pic = predicted.numpy()[0].reshape((3, 256, 256)).transpose((1, 2, 0))
pic = ((pic + 1.) / 2. * 255).astype('uint8')
return pic
| 32.396825
| 89
| 0.657521
|
4a0b0ac79e331b2c0e5d42be9a19e2efc9c6357c
| 11,931
|
py
|
Python
|
otherCodeTaskSnippets/27.11.2021.py
|
s2812135/Data_Challenges_WiSe2122
|
a55372f444e7344af4e2e1f04e4244fb8cefeefe
|
[
"MIT"
] | null | null | null |
otherCodeTaskSnippets/27.11.2021.py
|
s2812135/Data_Challenges_WiSe2122
|
a55372f444e7344af4e2e1f04e4244fb8cefeefe
|
[
"MIT"
] | null | null | null |
otherCodeTaskSnippets/27.11.2021.py
|
s2812135/Data_Challenges_WiSe2122
|
a55372f444e7344af4e2e1f04e4244fb8cefeefe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 27 16:28:00 2021
@author: dariu
"""
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import pacmap
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import umap
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
#import sklearn.cluster
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.cluster import OPTICS, cluster_optics_dbscan
import matplotlib.gridspec as gridspec
path = "C:\\Users\dariu\\Documents\\Master Wirtschaftsinformatik\\Data Challenges\Data\\"
directorys = [
['training_setA/training/', 'p0'],
['training_setB/training_setB/', 'p1']
]
#%%
dfs = []
for z, (directory, file_head) in enumerate(directorys):
for i, filename in enumerate(tqdm(os.listdir(path + directory))):
df_temp = pd.read_csv(path + directory + filename, skiprows=0, sep='|')
# patient_gender = df_temp["Gender"][1]
# if df_temp["Age"][1] >= 40:
dfs.append(df_temp)
df = pd.concat(dfs)
labels_true = df["SepsisLabel"].tolist()
#%%
df = df[["HR", "O2Sat", "Temp", "SBP", "MAP", "DBP", "Resp", "EtCO2"]]
#############################################
'''
imputation_dims = [
'DBP',
'HR',
'O2Sat',
'Temp',
'SBP',
'MAP',
'Resp',
]
for d in imputation_dims:
mean = round(df[d].sum()/df.shape[0], 2)
df.loc[df[d].isna(), d] = mean
####################################################
'''
df_current = df.fillna(df.mean())
#f_current = df.fillna(0)
###########################################################
#df_current = df
##############################
#85 labels_pred?
def calc_scores(X, labels_true, labels_pred):
rand_score = metrics.rand_score(labels_true, labels_pred)
adjusted_rand_score = metrics.adjusted_rand_score(labels_true, labels_pred)
adjusted_mutual_info_score = metrics.cluster.adjusted_mutual_info_score(labels_true, labels_pred)
silhouette_score = metrics.silhouette_score(X, labels_pred, metric='euclidean', sample_size=None, random_state=None)
print("Rand Score: " , str(rand_score) + "\n" +
"Adjusted Rand Score: " , str(adjusted_rand_score) + "\n"
"Adjusted Mutual Information Score: " + str(adjusted_mutual_info_score) + "\n"
"Silhouette Score: " , str(silhouette_score) + "\n"
)
############################################################
# initializing the pacmap instance
# Setting n_neighbors to "None" leads to a default choice shown below in "parameter" section
embedding = pacmap.PaCMAP(n_dims=2, n_neighbors=None, MN_ratio=0.5, FP_ratio=2.0)
# fit the data (The index of transformed data corresponds to the index of the original data)
X_transformed = embedding.fit_transform(df_current.values, init="pca")
####################################################################
#reduced_data = PCA(n_components=2).fit_transform(df_current)
#reduced_data = np.double(X_transformed)
X_transformed = np.double(X_transformed)
kmeans = KMeans(n_clusters=5, random_state=0).fit(X_transformed)
###################################################################
#sample_size = 100000
#db = DBSCAN(eps=1, min_samples=10).fit(X_transformed[0:sample_size])
#%%
#Plot kmeans (https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_digits.html#sphx-glr-auto-examples-cluster-plot-kmeans-digits-py)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = 0.4 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = X_transformed[:, 0].min() - 1, X_transformed[:, 0].max() + 1
y_min, y_max = X_transformed[:, 1].min() - 1, X_transformed[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(
Z,
interpolation="nearest",
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect="auto",
origin="lower",
)
plt.plot(X_transformed[:, 0], X_transformed[:, 1], "k.", markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(
centroids[:, 0],
centroids[:, 1],
marker="x",
s=169,
linewidths=3,
color="w",
zorder=10,
)
#plt.title(
# "K-means clustering on the whole dataset (PCA-reduced data)\n"
# "Centroids are marked with white cross"
#)
#plt.xlim(x_min, x_max)
#plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
sample_size = 100000
calc_scores(X_transformed[0:sample_size], labels_true[0:sample_size], kmeans.labels_[0:sample_size])
#%%
################################################################################################
'''
#Plot dbscan https://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html#sphx-glr-auto-examples-cluster-plot-dbscan-py
#####################
#reduced_sepsis = []
#reduced_no_sepsis = []
#labels_true = df_current["SepsisLabel"].tolist()
#labels_true = labels_true[0:sample_size]
#X = reduced_data[0:sample_size]
#labels_true = df_current["SepsisLabel"].tolist()
labels_true = labels_true[0:sample_size]
#X = reduced_data[0:sample_size]
#Dumme Zeile!!!!!!!!!!!!!!!
X_transformed = X_transformed[0:sample_size]
#%%
#for j in range(len(lables)):
# if lables_true[j] == 1:
# reduced_sepsis.append(X_transformed[j])
# else:
# reduced_no_sepsis.append(X_transformed[j])
#%%
##########################
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels_true, labels))
print(
"Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels)
)
print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X_transformed, labels))
print("-------------------------")
calc_scores(X_transformed, labels_true, labels)
# #############################################################################
# Plot result
#import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = labels == k
xy = X_transformed[class_member_mask & core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=14,
)
xy = X_transformed[class_member_mask & ~core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=6,
)
plt.title("Estimated number of clusters: %d" % n_clusters_)
plt.show()
'''
#
'''
##############################################################################
clust = OPTICS(min_samples=5, xi=0.6, min_cluster_size=5)
sample_size = 100000
# Run the fit
clust.fit(X_transformed[0:sample_size])
##############################################################
space = np.arange(len(X_transformed[0:sample_size]))
reachability = clust.reachability_[clust.ordering_]
labels = clust.labels_[clust.ordering_]
#print(min(clust.labels_))
#print(max(clust.labels_))
unique_labels = set(clust.labels_)
#print(len(unique_labels)-1)
#print(len(unique_labels)-1)
###############################################################
#%%Optics Plot https://scikit-learn.org/stable/auto_examples/cluster/plot_optics.html#sphx-glr-auto-examples-cluster-plot-optics-py
labels_050 = cluster_optics_dbscan(
reachability=clust.reachability_,
core_distances=clust.core_distances_,
ordering=clust.ordering_,
eps=0.5,
)
labels_200 = cluster_optics_dbscan(
reachability=clust.reachability_,
core_distances=clust.core_distances_,
ordering=clust.ordering_,
eps=0.7,
)
plt.figure(figsize=(10, 7))
G = gridspec.GridSpec(2, 3)
ax1 = plt.subplot(G[0, :])
ax2 = plt.subplot(G[1, 0])
ax3 = plt.subplot(G[1, 1])
ax4 = plt.subplot(G[1, 2])
# Reachability plot
#colors = ["g.", "r.", "b.", "y.", "c."]
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
#for klass, color in zip(range(0, 5), colors):
#for klass, color in zip(range(len(unique_labels)), colors):
for klass, color in zip(range(-1,len(unique_labels)-1), colors):
Xk = space[labels == klass]
Rk = reachability[labels == klass]
ax1.plot(Xk, Rk, color, alpha=0.3)
ax1.plot(space[labels == -1], reachability[labels == -1], "k.", alpha=0.3)
ax1.plot(space, np.full_like(space, 2.0, dtype=float), "k-", alpha=0.5)
ax1.plot(space, np.full_like(space, 0.5, dtype=float), "k-.", alpha=0.5)
ax1.set_ylabel("Reachability (epsilon distance)")
ax1.set_title("Reachability Plot")
# OPTICS
#colors = ["g.", "r.", "b.", "y.", "c."]
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
#for klass, color in zip(range(0, 5), colors):
for klass, color in zip(range(-1,len(unique_labels)-1), colors):
Xk = X_transformed[0:sample_size][clust.labels_ == klass]
ax2.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3)
ax2.plot(X_transformed[0:sample_size][clust.labels_ == -1, 0], X_transformed[0:sample_size][clust.labels_ == -1, 1], "k+", alpha=0.1)
ax2.set_title("Automatic Clustering\nOPTICS")
# DBSCAN at 0.5
#colors = ["g", "greenyellow", "olive", "r", "b", "c"]
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
#for klass, color in zip(range(0, 6), colors):
for klass, color in zip(range(-1,len(unique_labels)-1), colors):
Xk = X_transformed[0:sample_size][labels_050 == klass]
ax3.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3, marker=".")
ax3.plot(X_transformed[0:sample_size][labels_050 == -1, 0], X_transformed[0:sample_size][labels_050 == -1, 1], "k+", alpha=0.1)
ax3.set_title("Clustering at 0.5 epsilon cut\nDBSCAN")
# DBSCAN at 2.
#colors = ["g.", "m.", "y.", "c."]
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
#for klass, color in zip(range(0, 4), colors):
for klass, color in zip(range(-1,len(unique_labels)-1), colors):
Xk = X_transformed[0:sample_size][labels_200 == klass]
ax4.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3)
ax4.plot(X_transformed[0:sample_size][labels_200 == -1, 0], X_transformed[0:sample_size][labels_200 == -1, 1], "k+", alpha=0.1)
ax4.set_title("Clustering at 0.7 epsilon cut\nDBSCAN")
plt.tight_layout()
plt.show()
calc_scores(X_transformed, labels_true, labels)
'''
##########################################
'''
##########################################
#%%
reduced_sepsis = []
reduced_no_sepsis = []
for j in range(0,sample_size):
if labels_true[j] == 1:
reduced_sepsis.append(X_transformed[j])
else:
reduced_no_sepsis.append(X_transformed[j])
plt.scatter(*zip(*reduced_no_sepsis), cmap="Spectral")
plt.scatter(*zip(*reduced_sepsis), c="r")
plt.show()
#%%
'''
#########################################
| 28.958738
| 145
| 0.626519
|
4a0b0acd89eb769d07920f9fa5f1b0ea5a50508f
| 39,317
|
py
|
Python
|
Fig_9-13.py
|
pcampeti/SGWBProbe
|
eaa33cefba9bdcba88ab4032543f5370b7f4925b
|
[
"MIT"
] | 2
|
2021-04-25T06:22:21.000Z
|
2022-02-07T18:10:52.000Z
|
Fig_9-13.py
|
pcampeti/SGWBProbe
|
eaa33cefba9bdcba88ab4032543f5370b7f4925b
|
[
"MIT"
] | null | null | null |
Fig_9-13.py
|
pcampeti/SGWBProbe
|
eaa33cefba9bdcba88ab4032543f5370b7f4925b
|
[
"MIT"
] | 1
|
2022-02-22T00:15:16.000Z
|
2022-02-22T00:15:16.000Z
|
"""
@author: Paolo Campeti
This script reproduces Figures 9 - 13 in the paper.
Uses methods imported from module sgwbprobecomb/SGWB_Signal.py,
sgwbprobecomb/Binned_errors.py. and sgwbprobecomb/error_boxes.py.
"""
import os.path as op
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
# import our classes and methods
from sgwbprobe.SGWB_Signal import Signal_GW
from sgwbprobe.Binned_errors import Binned_GW
from sgwbprobe.error_boxes import make_error_boxes
#seaborn settings
sns.set()
sns.set(style='whitegrid')
#matplotlib settings
mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['figure.figsize'] = [5,3]
mpl.rcParams['text.usetex'] = True
mpl.rc('font',**{'family':'serif','serif':['Times New Roman'],'size':14})
axissize = 6
labelsize = 8
legendsize = 10
colornorm = colors.Normalize(vmin=0.0, vmax=5.0)
linesize = 2
# Useful constant: seconds in a year
year_sec = 60*60*24*365
# Load and unpack PTA and Interferometers instrumental strains
# SKA
SKA_file = np.load(op.join(op.dirname(__file__),'files/hc_SKA.npz'))
SKA_freq = SKA_file['x']
SKA_hc = SKA_file['y']
SKA_strain = SKA_hc**2/SKA_freq
eff_SKA = 1. # mission efficiency factor
SKA_T_obs = 10 * year_sec * eff_SKA
# Einstein Telescope
ET = np.load(op.join(op.dirname(__file__),'files/S_h_ET.npz'))
ET_freq = ET['x']
ET_strain = ET['y']
eff_ET = 1. # mission efficiency factor
ET_T_obs = 1 * year_sec * eff_ET
# LISA for Cosmologists
LISA_xcosmo = np.load(op.join(op.dirname(__file__),'files/S_h_LISA_xcosmo.npz'))
LISA_xcosmo_freq = LISA_xcosmo['x']
LISA_xcosmo_strain = LISA_xcosmo['y']
eff_LISA = 0.75
LISA_xcosmo_T_obs = 4 * year_sec * eff_LISA
# muAres without fgs
Ares_nofgs = np.load(op.join(op.dirname(__file__),'files/S_h_muAres_nofgs.npz'))
Ares_nofgs_freq = Ares_nofgs['x']
Ares_nofgs_strain = Ares_nofgs['y']
eff_Ares = 1.
Ares_nofgs_T_obs = 10 * year_sec * eff_Ares
Ares_R12 = np.load(op.join(op.dirname(__file__),'files/Responses/Resp_muAres.npy'))
Ares_f_R = np.load(op.join(op.dirname(__file__),'files/Responses/f_R_Ares.npy'))
# BBO
BBO = np.load(op.join(op.dirname(__file__),'files/S_h_BBO_STAR.npz'))
BBO_freq = BBO['x']
BBO_strain = BBO['y']
eff_BBO = 1.
BBO_T_obs = 10 * year_sec * eff_BBO
# DECIGO
DECIGO = np.load(op.join(op.dirname(__file__),'files/S_h_DECIGO.npz'))
DECIGO_freq = DECIGO['x']
DECIGO_strain = DECIGO['y']/3
eff_DECIGO = 1.
DECIGO_T_obs = 10 * year_sec * eff_DECIGO
# DO Optimal
DO = np.load(op.join(op.dirname(__file__),'files/S_h_DO_Optimal.npz'))
DO_freq = DO['x']
DO_strain = DO['y']
eff_DO = 0.75
DO_T_obs = 4 * year_sec * eff_DO
DO_R12 = np.load(op.join(op.dirname(__file__),'files/Responses/Resp_DO.npy'))
DO_f_R = np.load(op.join(op.dirname(__file__),'files/Responses/f_R_DO.npy'))
# DO Conservative
DO_cons = np.load(op.join(op.dirname(__file__),'files/S_h_DO_Conservative.npz'))
DO_cons_freq = DO_cons['x']
DO_cons_strain = DO_cons['y']
eff_DO = 0.75
DO_cons_T_obs = 4 * year_sec * eff_DO
# AEDGE
AEDGE = np.load(op.join(op.dirname(__file__),'files/S_h_AEDGE.npz'))
AEDGE_freq = AEDGE['x']
AEDGE_strain = AEDGE['y']
eff_AEDGE = 0.6
AEDGE_T_obs = 5 * year_sec * eff_AEDGE
###############################################################################
# Generate primordial signals using our class Signal_GW
class_axion_SKA = Signal_GW(r_vac=1e-5, r_star=800, k_p=1e8, sigma=5.1, axion=True, running=True)
class_axion1 = Signal_GW(r_vac=1e-5, r_star=400, k_p=1e13, sigma=8.1, axion=True, running=True)
class_axion2 = Signal_GW(r_vac=1e-5, r_star=0.15 , k_p=1e11, sigma=8, axion=True, running=True)
class_no_axion = Signal_GW(r_vac=0.01, axion=None, running=True)
class_no_axion_r0001 = Signal_GW(r_vac=0.001, axion=None, running=True)
class_no_axion_rBICEP = Signal_GW(r_vac=0.06, axion=None, running=True)
# wavenumber array
k = np.logspace(np.log10(1e-5), np.log10(1e20), 100000)
###############################################################################
# class for SKA
sens_curve_SKA = np.array(SKA_strain)
omega_gw = class_axion1.analytic_omega_WK(k)
k_SKA = np.array(SKA_freq) * 6.5e14
class_binned_SKA = Binned_GW(
name_exp='SKA',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_SKA,
omega_gw=omega_gw,
k_sens=k_SKA,
kmin_sens=k_SKA[0],
N_bins_sens=5,
T_obs=SKA_T_obs,
n_det=1.,
interp=True,
sigma_L=1.0
)
xerr_SKA, yerr_SKA, bins_mean_point_SKA, binned_signal_SKA, binned_curve_SKA = class_binned_SKA.sens_curve_binning()
###############################################################################
# class for AEDGE
sens_curve_AEDGE = np.array(AEDGE_strain)
omega_gw = class_axion1.analytic_omega_WK(k)
k_AEDGE = np.array(AEDGE_freq) * 6.5e14
class_binned_AEDGE = Binned_GW(
name_exp='AEDGE',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_AEDGE,
omega_gw=omega_gw,
k_sens=k_AEDGE,
kmin_sens=k_AEDGE[0],
N_bins_sens=4,
T_obs=AEDGE_T_obs,
n_det = 1.,
interp=True,
sigma_L=0.1
)
xerr_AEDGE, yerr_AEDGE, bins_mean_point_AEDGE, binned_signal_AEDGE, binned_curve_AEDGE = class_binned_AEDGE.sens_curve_binning()
###############################################################################
#class for ET
sens_curve_ET = np.array(ET_strain)
omega_gw = class_axion1.analytic_omega_WK(k)
k_ET = np.array(ET_freq) * 6.5e14
class_binned_ET = Binned_GW(
name_exp='ET',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_ET,
omega_gw=omega_gw,
k_sens=k_ET,
kmin_sens=1.5*6.5e14 ,
N_bins_sens=5,
T_obs=ET_T_obs,
n_det = 3.,
interp=True,
sigma_L=1.0
)
xerr_ET, yerr_ET, bins_mean_point_ET, binned_signal_ET, binned_curve_ET = class_binned_ET.sens_curve_binning()
###############################################################################
#class for LISA
sens_curve_LISA = np.array(LISA_xcosmo_strain)
omega_gw = class_axion1.analytic_omega_WK(k)
k_LISA = np.array(LISA_xcosmo_freq) * 6.5e14
class_binned = Binned_GW(
name_exp = 'LISA',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_LISA,
omega_gw=omega_gw,
k_sens=k_LISA,
kmin_sens=1.21303790e+10,
N_bins_sens=7,
T_obs=LISA_xcosmo_T_obs,
interp=True,
n_det = 1.,
sigma_L=1.0
)
binned_signal_whole, bins_mean_point_whole = class_binned.Omega_GW_binning()
xerr, yerr, bins_mean_point, binned_signal, binned_curve = class_binned.sens_curve_binning()
################################################################################
# BBO
omega_gw_axion2 = class_axion2.analytic_omega_WK(k)
k_BBO = np.array(BBO_freq) * 6.5e14
sens_curve_BBO = np.array(BBO_strain)
class_BBO = Binned_GW(
name_exp = 'BBO',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_BBO,
omega_gw=omega_gw_axion2,
k_sens=k_BBO,
kmin_sens=k_BBO[0],
N_bins_sens=10,
T_obs=BBO_T_obs,
interp=True,
n_det = 2.,
sigma_L=1.0
)
binned_signal_axion2, bins_mean_point_axion2 = class_BBO.Omega_GW_binning()
xerr_BBO, yerr_BBO, bins_mean_point_BBO, binned_signal_BBO, binned_curve_BBO = class_BBO.sens_curve_binning()
###############################################################################
#class for LiteBIRD and r=0.01
Fisher = np.load(op.join(op.dirname(__file__),'files/LiteBIRD_Fisher_matrices/Fisher_1.2_r001.npy'))
omega_gw_flat = class_no_axion.analytic_omega_WK(k)
power_spectrum = class_no_axion.tensor_spect(k)
class_binned_flat_CMB = Binned_GW(
name_exp = 'LiteBIRD',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
omega_gw=omega_gw_flat,
kmin_sens=1e-4,
N_bins_sens=5,
CMB=True,
F=Fisher,
tensor_spect=power_spectrum,
sigma_L=1.0
)
binned_signal_whole_flat, bins_mean_point_whole_flat = class_binned_flat_CMB.Omega_GW_binning()
xerr_flat, yerr_flat, bins_mean_point_flat, binned_signal_flat, binned_curve_flat = class_binned_flat_CMB.sens_curve_binning()
###############################################################################
# class for LiteBIRD and Axion model r_vac=1e-5, r_star=835, k_p=1e13, sigma=9
Fisher_axion = np.load(op.join(op.dirname(__file__),'files/LiteBIRD_Fisher_matrices/Fisher_1.2_AX1.npy'))
power_spectrum_axion = class_axion1.total_spect(k)
class_binned_axion_CMB = Binned_GW(
name_exp = 'LiteBIRD',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
omega_gw=omega_gw,
kmin_sens=1e-4,
N_bins_sens=6,
CMB=True,
F=Fisher_axion,
tensor_spect=power_spectrum_axion,
sigma_L=1.0
)
binned_signal_whole_axion, bins_mean_point_whole_axion = class_binned_axion_CMB.Omega_GW_binning()
xerr_axion, yerr_axion, bins_mean_point_axion, binned_signal_axion, binned_curve_axion = class_binned_axion_CMB.sens_curve_binning()
################################################################################
#class for DECIGO
sens_curve_DECIGO = np.array(DECIGO_strain)
k_decigo = class_axion1.freq_k_conv(DECIGO_freq)
class_DECIGO = Binned_GW(
name_exp = 'DECIGO',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_DECIGO,
omega_gw=omega_gw_axion2,
k_sens=k_decigo,
kmin_sens=k_decigo[0],
N_bins_sens=9,
T_obs = DECIGO_T_obs,
interp=True,
n_det = 2.,
sigma_L=1.0
)
xerr_decigo, yerr_decigo, bins_mean_point_decigo, binned_signal_decigo, binned_curve_decigo = class_DECIGO.sens_curve_binning()
###############################################################################
#class for muAres without foregrounds
sens_curve_MUARES_nofgs = np.array(Ares_nofgs_strain)
k_muares_nofgs = class_axion1.freq_k_conv(Ares_nofgs_freq)
class_MUARES_nofgs = Binned_GW(
name_exp = 'muAres',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_MUARES_nofgs,
omega_gw=omega_gw,
k_sens=k_muares_nofgs,
kmin_sens=k_muares_nofgs[0],
N_bins_sens=12,
T_obs=Ares_nofgs_T_obs,
interp=True,
n_det = 2.,
sigma_L=1.0,
R_auto=Ares_R12[0],
R_12=Ares_R12,
f_R=Ares_f_R,
cosmic_var=True
)
xerr_muares_nofgs, yerr_muares_nofgs, bins_mean_point_muares_nofgs, binned_signal_muares_nofgs, binned_curve_muares_nofgs = class_MUARES_nofgs.sens_curve_binning()
###############################################################################
#class for DO Optimal
sens_curve_DO = np.array(DO_strain)
k_DO = class_axion1.freq_k_conv(DO_freq)
class_DO = Binned_GW(
name_exp = 'DO_Optimal',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_DO,
omega_gw=omega_gw,
k_sens=k_DO,
kmin_sens=k_DO[0],
N_bins_sens=7,
T_obs=DO_T_obs,
interp=True,
n_det = 1.,
sigma_L=1.0,
R_auto=DO_R12[0],
R_12=DO_R12,
f_R=DO_f_R,
cosmic_var=None
)
xerr_DO, yerr_DO, bins_mean_point_DO, binned_signal_DO, binned_curve_DO = class_DO.sens_curve_binning()
###############################################################################
#class for DO Conservative
sens_curve_DO_cons = np.array(DO_cons_strain)
k_DO_cons = class_axion1.freq_k_conv(DO_cons_freq)
class_DO_cons = Binned_GW(
name_exp = 'DO_Conservative',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_DO_cons,
omega_gw=omega_gw,
k_sens=k_DO_cons,
kmin_sens=k_DO_cons[0],
N_bins_sens=7,
T_obs=DO_cons_T_obs,
interp=True,
n_det = 1.,
sigma_L=1.0,
R_auto=DO_R12[0],
R_12=DO_R12,
f_R=DO_f_R,
cosmic_var=None
)
xerr_DO_cons, yerr_DO_cons, bins_mean_point_DO_cons, binned_signal_DO_cons, binned_curve_DO_cons = class_DO_cons.sens_curve_binning()
###############################################################################
#class for LiteBIRD and r=0.001 (I use it only for the r=1e-3 signal)
Fisher = np.load(op.join(op.dirname(__file__),'files/LiteBIRD_Fisher_matrices/Fisher_1.2_r001.npy'))
omega_gw_flat_r0001 = class_no_axion_r0001.analytic_omega_WK(k)
power_spectrum_r0001 = class_no_axion_r0001.tensor_spect(k)
omega_gw_BICEP = class_no_axion_rBICEP.analytic_omega_WK(k)
class_binned_flat_CMB_r0001 = Binned_GW(
name_exp = 'LiteBIRD',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
omega_gw=omega_gw_flat_r0001,
kmin_sens=1e-4,
N_bins_sens=8,
CMB=True,
F=Fisher,
tensor_spect=power_spectrum_r0001,
sigma_L=1.0
)
binned_signal_whole_flat_r0001, bins_mean_point_whole_flat_r0001 = class_binned_flat_CMB_r0001.Omega_GW_binning()
###############################################################################
#class for LiteBIRD and r=0.06
Fisher = np.load(op.join(op.dirname(__file__),'files/LiteBIRD_Fisher_matrices/Fisher_1.2_r001.npy'))
omega_gw_BICEP = class_no_axion_rBICEP.analytic_omega_WK(k)
power_spectrum_rBICEP = class_no_axion_rBICEP.tensor_spect(k)
class_binned_flat_CMB_rBICEP = Binned_GW(
name_exp = 'LiteBIRD',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
omega_gw=omega_gw_BICEP,
kmin_sens=1e-4,
N_bins_sens=8,
CMB=True,
F=Fisher,
tensor_spect=power_spectrum_rBICEP,
sigma_L=1.0
)
binned_signal_whole_BICEP, bins_mean_point_whole_BICEP = class_binned_flat_CMB_rBICEP.Omega_GW_binning()
###############################################################################
# FOREGROUNDS BELOW
#class for SKA for fgs
class_binned_SKA_fgs = Binned_GW(
name_exp='SKA_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_SKA,
omega_gw=omega_gw,
k_sens=k_SKA,
kmin_sens=k_SKA[0],
N_bins_sens=5,
T_obs=SKA_T_obs,
n_det=1.,
fgs=True ,
interp=True,
sigma_L=0.1
)
xerr_SKA_fgs, yerr_SKA_fgs, bins_mean_point_SKA_fgs, binned_signal_SKA_fgs, binned_curve_SKA_fgs = class_binned_SKA_fgs.sens_curve_binning()
###############################################################################
## Class for ET with fgs
class_binned_ET_fgs = Binned_GW(
name_exp='ET_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_ET,
omega_gw=omega_gw,
k_sens=k_ET,
kmin_sens=1.5*6.5e14 ,
N_bins_sens=5,
T_obs=ET_T_obs,
n_det = 3.,
fgs=True,
interp=True,
sigma_L=1.
)
xerr_ET_fgs, yerr_ET_fgs, bins_mean_point_ET_fgs, binned_signal_ET_fgs, binned_curve_ET_fgs = class_binned_ET_fgs.sens_curve_binning()
###############################################################################
#class for AEDGE with fgs
omega_gw = class_axion1.analytic_omega_WK(k)
class_binned_AEDGE_fgs = Binned_GW(
name_exp = 'AEDGE_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_AEDGE,
omega_gw=omega_gw,
k_sens=k_AEDGE,
kmin_sens=k_AEDGE[0],
N_bins_sens=4,
T_obs=AEDGE_T_obs,
n_det = 1.,
fgs=True,
interp=True,
sigma_L=0.1
)
xerr_AEDGE_fgs, yerr_AEDGE_fgs, bins_mean_point_AEDGE_fgs, binned_signal_AEDGE_fgs, binned_curve_AEDGE_fgs = class_binned_AEDGE_fgs.sens_curve_binning()
###############################################################################
#class for LISA with fgs
omega_gw = class_axion1.analytic_omega_WK(k)
class_binned_fgs = Binned_GW(
name_exp='LISA_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_LISA,
omega_gw=omega_gw,
k_sens=k_LISA,
kmin_sens=1.21303790e+10,
N_bins_sens=7,
T_obs=LISA_xcosmo_T_obs,
n_det = 1.,
fgs=True,
interp=True,
sigma_L=0.1
)
xerr_fgs, yerr_fgs, bins_mean_point_fgs, binned_signal_fgs, binned_curve_fgs = class_binned_fgs.sens_curve_binning()
################################################################################
# class for BBO with fgs
class_BBO_fgs = Binned_GW(
name_exp='BBO_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_BBO,
omega_gw=omega_gw,
k_sens=k_BBO,
kmin_sens=k_BBO[0],
N_bins_sens=10,
T_obs=BBO_T_obs,
n_det = 2.,
fgs=True,
interp=True,
sigma_L=1.0
)
xerr_BBO_fgs, yerr_BBO_fgs, bins_mean_point_BBO_fgs, binned_signal_BBO_fgs, binned_curve_BBO_fgs = class_BBO_fgs.sens_curve_binning()
################################################################################
#class for DECIGO with fgs
class_DECIGO_fgs = Binned_GW(
name_exp = 'DECIGO_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_DECIGO,
omega_gw=omega_gw,
k_sens=k_decigo,
kmin_sens=k_decigo[0],
N_bins_sens=9,
T_obs = DECIGO_T_obs,
interp=True,
n_det = 2.,
fgs=True,
sigma_L=1.
)
xerr_decigo_fgs, yerr_decigo_fgs, bins_mean_point_decigo_fgs, binned_signal_decigo_fgs, binned_curve_decigo_fgs = class_DECIGO_fgs.sens_curve_binning()
###############################################################################
#class for DO Optimal with fgs
class_DO_fgs = Binned_GW(
name_exp = 'DO_Optimal_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_DO,
omega_gw=omega_gw,
k_sens=k_DO,
kmin_sens=k_DO[0],
N_bins_sens=7,
T_obs=DO_T_obs,
interp=True,
n_det = 1.,
fgs=True,
sigma_L=0.1
)
xerr_DO_fgs, yerr_DO_fgs, bins_mean_point_DO_fgs, binned_signal_DO_fgs, binned_curve_DO_fgs = class_DO_fgs.sens_curve_binning()
###############################################################################
#class for DO Conservative with fgs
class_DO_cons_fgs = Binned_GW(
name_exp = 'DO_Conservative_with_fgs',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_DO_cons,
omega_gw=omega_gw,
k_sens=k_DO_cons,
kmin_sens=k_DO_cons[0],
N_bins_sens=7,
T_obs=DO_cons_T_obs,
interp=True,
n_det = 1.,
fgs=True,
sigma_L=0.1
)
xerr_DO_cons_fgs, yerr_DO_cons_fgs, bins_mean_point_DO_cons_fgs, binned_signal_DO_cons_fgs, binned_curve_DO_cons_fgs = class_DO_cons_fgs.sens_curve_binning()
###############################################################################
#class for muAres with foregrounds
class_MUARES = Binned_GW(
name_exp='muAres_two_fgs_spectral',
kmin=1e-4,
k=k,
N_bins=80,
delta_log_k=1.2,
sens_curve=sens_curve_MUARES_nofgs,
omega_gw=omega_gw,
k_sens=k_muares_nofgs,
kmin_sens=k_muares_nofgs[0],
N_bins_sens=12,
T_obs=Ares_nofgs_T_obs,
interp=True,
n_det = 2.,
fgs=True,
sigma_L=0.1,
cosmic_var=True,
R_auto=Ares_R12[0],
R_12=Ares_R12,
f_R=Ares_f_R
)
xerr_muares, yerr_muares, bins_mean_point_muares, binned_signal_muares, binned_curve_muares = class_MUARES.sens_curve_binning()
###############################################################################
#PLOT
fig = plt.figure()
ax = plt.gca()
#plot for LISA axion
plt.loglog(np.array(bins_mean_point_whole)/6.5e14, binned_signal_whole, color='blue',label=r'Axion Signal $r_{\star}=400$, $k_{p}=10^{15}$ $Mpc^{-1}$, $\sigma=9.1$',linewidth=1.0, zorder=18)
_ = make_error_boxes(ax, np.array(bins_mean_point)/6.5e14, binned_signal, xerr/6.5e14, yerr, facecolor='b', alpha=0.7, zorder=10)
_ = make_error_boxes(ax, np.array(bins_mean_point_fgs)/6.5e14, binned_signal_fgs, xerr_fgs/6.5e14, yerr_fgs, facecolor='b', alpha=0.4, zorder=9)
## ET axion
_ = make_error_boxes(ax, np.array(bins_mean_point_ET)/6.5e14, binned_signal_ET, xerr_ET/6.5e14, yerr_ET, facecolor='purple', alpha=0.7, zorder=10)
_ = make_error_boxes(ax, np.array(bins_mean_point_ET_fgs)/6.5e14, binned_signal_ET_fgs, xerr_ET_fgs/6.5e14, yerr_ET_fgs, facecolor='purple', alpha=0.4, zorder=9)
# r=0.06 nT=-r/8 signal
plt.loglog(np.array(bins_mean_point_whole_BICEP)/6.5e14, binned_signal_whole_BICEP, color='violet',label=r'Primordial Signal $r=0.06$',linewidth=1.0, zorder=1, linestyle='--')
# r=0.001 signal
plt.loglog(np.array(bins_mean_point_whole_flat_r0001)/6.5e14, binned_signal_whole_flat_r0001, color='green',label=r'Primordial Signal $r=0.001$',linewidth=1.0, zorder=1, linestyle='--')
#plot for LiteBIRD flat spectrum r=0.01
plt.loglog(np.array(bins_mean_point_whole_flat)/6.5e14, binned_signal_whole_flat, color='red',label=r'Primordial Signal $r=0.01$',linewidth=1.0, zorder=1,
alpha=0.55, linestyle='--')
#plot for LiteBIRD axion spectrum
_ = make_error_boxes(ax, np.array(bins_mean_point_axion)/6.5e14, binned_signal_axion, xerr_axion/6.5e14, yerr_axion, facecolor='g', alpha=0.55, zorder=4)
#plot for BBO axion spectrum
plt.loglog(np.array(bins_mean_point_axion2)/6.5e14, binned_signal_axion2, color='orange', linewidth=1.0, zorder=2,
label='Axion Signal $r_{\star}=0.15$, $k_{p}=10^{11}$ $Mpc^{-1}$, $\sigma=8$', alpha=0.55, linestyle='--')
#plot for SKA
_ = make_error_boxes(ax, np.array(bins_mean_point_SKA)/6.5e14, binned_signal_SKA, xerr_SKA/6.5e14, yerr_SKA, facecolor='orange', alpha=0.7, zorder=1)
_ = make_error_boxes(ax, np.array(bins_mean_point_SKA_fgs)/6.5e14, binned_signal_SKA_fgs, xerr_SKA_fgs/6.5e14, yerr_SKA_fgs, facecolor='orange', alpha=0.3, zorder=1)
plt.text(5e-19, 3e-11, r'$\bf LiteBIRD$', fontsize=10, color='green')
plt.text(2e-12, 5e-12, r'$\bf SKA$', fontsize=10, color='orange')
plt.text(3e-4, 2e-9, r'$\bf LISA$', fontsize=10, color='blue')
plt.text(3e0, 1.5e-11, r'$\bf ET$', fontsize=10, color='purple')
plt.xlabel(r'f $[Hz]$',fontsize = labelsize)
plt.ylabel(r'$h^{2} \Omega_{GW}$',fontsize = labelsize)
plt.tick_params(axis = 'both',which = 'major', labelsize = axissize)
plt.legend(fontsize=6, loc='upper left')#, bbox_to_anchor=(1, 0.5))
axes = plt.gca()
axes.set_ylim([1e-25,1e-2])
ax = 1e-20
bx = 1e3
ay = 1e-21
by = 1e-4
plot1 = plt.subplot(111)
plt.xscale('log')
plt.yscale('log')
plt.xlim(ax, bx)
plt.ylim(ay, by)
plt.savefig(op.join(op.dirname(__file__),'figures/Fig_9.pdf'), format='pdf', dpi=1000, bbox_inches='tight')
plt.show()
###############################################################################
#PLOT
fig = plt.figure()
ax = plt.gca()
#plot for LISA axion
plt.loglog(np.array(bins_mean_point_whole)/6.5e14, binned_signal_whole, color='blue',label=r'Axion Signal $r_{\star}=400$, $k_{p}=10^{15}$ $Mpc^{-1}$, $\sigma=9.1$',linewidth=1.0, zorder=18)
# r=0.001 signal
plt.loglog(np.array(bins_mean_point_whole_flat_r0001)/6.5e14, binned_signal_whole_flat_r0001, color='green',label=r'Primordial Signal $r=0.001$',linewidth=1.0, zorder=1, linestyle='--')
#plot for LiteBIRD flat spectrum r=0.01
plt.loglog(np.array(bins_mean_point_whole_flat)/6.5e14, binned_signal_whole_flat, color='red',label=r'Primordial Signal $r=0.01$',linewidth=1.0, zorder=1,
alpha=0.55, linestyle='--')
# r=0.06 nT=-r/8 signal
plt.loglog(np.array(bins_mean_point_whole_BICEP)/6.5e14, binned_signal_whole_BICEP, color='violet',label=r'Primordial Signal $r=0.06$',linewidth=1.0, zorder=1, linestyle='--')
#plot for LiteBIRD axion spectrum
_ = make_error_boxes(ax, np.array(bins_mean_point_axion)/6.5e14, binned_signal_axion, xerr_axion/6.5e14, yerr_axion, facecolor='g', alpha=0.55, zorder=4)
#plot for muAres without fgs axion spectrum
_ = make_error_boxes(ax, np.array(bins_mean_point_muares_nofgs)/6.5e14, binned_signal_muares_nofgs, xerr_muares_nofgs/6.5e14, yerr_muares_nofgs, facecolor=sns.xkcd_rgb["amber"], alpha=0.7, zorder=9)
_ = make_error_boxes(ax, np.array(bins_mean_point_muares)/6.5e14, binned_signal_muares, xerr_muares/6.5e14, yerr_muares, facecolor=sns.xkcd_rgb["amber"], alpha=0.3, zorder=8)
#plot for BBO axion spectrum
plt.loglog(np.array(bins_mean_point_axion2)/6.5e14, binned_signal_axion2, color='orange', linewidth=1.0, zorder=2,
label='Axion Signal $r_{\star}=0.15$, $k_{p}=10^{11}$ $Mpc^{-1}$, $\sigma=8$', alpha=0.55, linestyle='--')
plt.text(1e-18, 4e-12, r'$\bf LiteBIRD$', fontsize=10, color='green')
plt.text(5e-6, 3e-11, r'$\bf \mu Ares$', fontsize=10, color=sns.xkcd_rgb["amber"])
plt.xlabel(r'f $[Hz]$',fontsize = labelsize)
plt.ylabel(r'$h^{2} \Omega_{GW}$',fontsize = labelsize)
plt.tick_params(axis = 'both',which = 'major', labelsize = axissize)
plt.legend(fontsize=6, loc='upper left')#, bbox_to_anchor=(1, 0.5))
axes = plt.gca()
axes.set_ylim([1e-25,1e-2])
ax = 1e-20
bx = 5e1
ay = 1e-18
by = 1e-7
plot1 = plt.subplot(111)
plt.xscale('log')
plt.yscale('log')
plt.xlim(ax, bx)
plt.ylim(ay, by)
plt.savefig(op.join(op.dirname(__file__),'figures/Fig_13.pdf'), format='pdf', dpi=1000, bbox_inches='tight')
plt.show()
###############################################################################
#PLOT
fig = plt.figure()
ax = plt.gca()
#plot for LISA axion
plt.loglog(np.array(bins_mean_point_whole)/6.5e14, binned_signal_whole, color='blue',label=r'Axion Signal $r_{\star}=400$, $k_{p}=10^{15}$ $Mpc^{-1}$, $\sigma=9.1$',linewidth=1.0, zorder=18)
# r=0.001 signal
plt.loglog(np.array(bins_mean_point_whole_flat_r0001)/6.5e14, binned_signal_whole_flat_r0001, color='green',label=r'Primordial Signal $r=0.001$',linewidth=1.0, zorder=1, linestyle='--')
#plot for LiteBIRD flat spectrum r=0.01
plt.loglog(np.array(bins_mean_point_whole_flat)/6.5e14, binned_signal_whole_flat, color='red',label=r'Primordial Signal $r=0.01$',linewidth=1.0, zorder=1,
alpha=0.55, linestyle='--')
#plot for LiteBIRD axion spectrum
_ = make_error_boxes(ax, np.array(bins_mean_point_axion)/6.5e14, binned_signal_axion, xerr_axion/6.5e14, yerr_axion, facecolor='g', alpha=0.55, zorder=4)
# r=0.06 nT=-r/8 signal
plt.loglog(np.array(bins_mean_point_whole_BICEP)/6.5e14, binned_signal_whole_BICEP, color='violet',label=r'Primordial Signal $r=0.06$',linewidth=1.0, zorder=1, linestyle='--')
#plot for BBO axion spectrum
plt.loglog(np.array(bins_mean_point_axion2)/6.5e14, binned_signal_axion2, color='orange', linewidth=1.0, zorder=2,
label='Axion Signal $r_{\star}=0.15$, $k_{p}=10^{11}$ $Mpc^{-1}$, $\sigma=8$', alpha=0.55, linestyle='--')
#plot for DO Optimal axion spectrum
_ = make_error_boxes(ax, np.array(bins_mean_point_DO)/6.5e14, binned_signal_DO, xerr_DO/6.5e14, yerr_DO, facecolor='black', alpha=0.7, zorder=30)
_ = make_error_boxes(ax, np.array(bins_mean_point_DO_fgs)/6.5e14, binned_signal_DO_fgs, xerr_DO_fgs/6.5e14, yerr_DO_fgs, facecolor='black', alpha=0.4, zorder=29)
plt.text(5e-19, 3e-11, r'$\bf LiteBIRD$', fontsize=10, color='green')
plt.text(2e-2, 1e-10, r'$\bf DO$', fontsize=10, color='black')
plt.text(2e-3, 1e-11, r'$\bf Optimal$', fontsize=10, color='black')
plt.xlabel(r'f $[Hz]$',fontsize = labelsize)
plt.ylabel(r'$h^{2} \Omega_{GW}$',fontsize = labelsize)
plt.tick_params(axis = 'both',which = 'major', labelsize = axissize)
plt.legend(fontsize=6, loc='upper left')#, bbox_to_anchor=(1, 0.5))
axes = plt.gca()
axes.set_ylim([1e-25,1e-2])
ax = 1e-20
bx = 5e1
ay = 1e-21
by = 1e-4
plot1 = plt.subplot(111)
plt.xscale('log')
plt.yscale('log')
plt.xlim(ax, bx)
plt.ylim(ay, by)
plt.savefig(op.join(op.dirname(__file__),'figures/Fig_11.pdf'), format='pdf', dpi=1000, bbox_inches='tight')
plt.show()
###############################################################################
#PLOT
fig = plt.figure()
ax = plt.gca()
#plot for LISA axion
plt.loglog(np.array(bins_mean_point_whole)/6.5e14, binned_signal_whole, color='blue',label=r'Axion Signal $r_{\star}=400$, $k_{p}=10^{15}$ $Mpc^{-1}$, $\sigma=9.1$',linewidth=1.0, zorder=18)
# r=0.001 signal
plt.loglog(np.array(bins_mean_point_whole_flat_r0001)/6.5e14, binned_signal_whole_flat_r0001, color='green',label=r'Primordial Signal $r=0.001$',linewidth=1.0, zorder=1, linestyle='--')
#plot for LiteBIRD flat spectrum r=0.01
plt.loglog(np.array(bins_mean_point_whole_flat)/6.5e14, binned_signal_whole_flat, color='red',label=r'Primordial Signal $r=0.01$',linewidth=1.0, zorder=1,
alpha=0.55, linestyle='--')
#plot for LiteBIRD axion spectrum
_ = make_error_boxes(ax, np.array(bins_mean_point_axion)/6.5e14, binned_signal_axion, xerr_axion/6.5e14, yerr_axion, facecolor='g', alpha=0.55, zorder=4)
#plot for BBO axion spectrum
plt.loglog(np.array(bins_mean_point_axion2)/6.5e14, binned_signal_axion2, color='orange', linewidth=1.0, zorder=2,
label='Axion Signal $r_{\star}=0.15$, $k_{p}=10^{11}$ $Mpc^{-1}$, $\sigma=8$', alpha=0.55, linestyle='--')
# r=0.06 nT=-r/8 signal
plt.loglog(np.array(bins_mean_point_whole_BICEP)/6.5e14, binned_signal_whole_BICEP, color='violet',label=r'Primordial Signal $r=0.06$',linewidth=1.0, zorder=1, linestyle='--')
#plot for DO Conservative axion spectrum
_ = make_error_boxes(ax, np.array(bins_mean_point_DO_cons)/6.5e14, binned_signal_DO_cons, xerr_DO_cons/6.5e14, yerr_DO_cons, facecolor='grey', alpha=0.7, zorder=29)
_ = make_error_boxes(ax, np.array(bins_mean_point_DO_cons_fgs)/6.5e14, binned_signal_DO_cons_fgs, xerr_DO_cons_fgs/6.5e14, yerr_DO_cons_fgs, facecolor='grey', alpha=0.4, zorder=29)
plt.text(5e-19, 3e-11, r'$\bf LiteBIRD$', fontsize=10, color='green')
plt.text(2e-3, 1e-9, r'$\bf DO$', fontsize=10, color='grey')
plt.text(2e-5, 1e-10, r'$\bf Conservative$', fontsize=10, color='grey')
plt.xlabel(r'f $[Hz]$',fontsize = labelsize)
plt.ylabel(r'$h^{2} \Omega_{GW}$',fontsize = labelsize)
plt.tick_params(axis = 'both',which = 'major', labelsize = axissize)
plt.legend(fontsize=6, loc='upper left')#, bbox_to_anchor=(1, 0.5))
axes = plt.gca()
axes.set_ylim([1e-25,1e-2])
ax = 1e-20
bx = 5e1
ay = 1e-21
by = 1e-4
plot1 = plt.subplot(111)
plt.xscale('log')
plt.yscale('log')
plt.xlim(ax, bx)
plt.ylim(ay, by)
plt.savefig(op.join(op.dirname(__file__),'figures/Fig_10.pdf'), format='pdf', dpi=1000, bbox_inches='tight')
plt.show()
###############################################################################
#PLOT
fig = plt.figure()
ax = plt.gca()
#plot for LISA axion
plt.loglog(np.array(bins_mean_point_whole)/6.5e14, binned_signal_whole, color='blue',label=r'Axion Signal $r_{\star}=400$, $k_{p}=10^{15}$ $Mpc^{-1}$, $\sigma=9.1$',linewidth=1.0, zorder=18)
# AEDGE axion
_ = make_error_boxes(ax, np.array(bins_mean_point_AEDGE)/6.5e14, binned_signal_AEDGE, xerr_AEDGE/6.5e14, yerr_AEDGE, facecolor='#17becf', alpha=0.7, zorder=10)
_ = make_error_boxes(ax, np.array(bins_mean_point_AEDGE_fgs)/6.5e14, binned_signal_AEDGE_fgs, xerr_AEDGE_fgs/6.5e14, yerr_AEDGE_fgs, facecolor='#17becf', alpha=0.4, zorder=9)
# r=0.001 signal
plt.loglog(np.array(bins_mean_point_whole_flat_r0001)/6.5e14, binned_signal_whole_flat_r0001, color='green',label=r'Primordial Signal $r=0.001$',linewidth=1.0, zorder=1, linestyle='--')
#plot for LiteBIRD flat spectrum r=0.01
plt.loglog(np.array(bins_mean_point_whole_flat)/6.5e14, binned_signal_whole_flat, color='red',label=r'Primordial Signal $r=0.01$',linewidth=1.0, zorder=1,
alpha=0.55, linestyle='--')
#plot for LiteBIRD axion spectrum
_ = make_error_boxes(ax, np.array(bins_mean_point_axion)/6.5e14, binned_signal_axion, xerr_axion/6.5e14, yerr_axion, facecolor='g', alpha=0.55, zorder=4)
# r=0.06 nT=-r/8 signal
plt.loglog(np.array(bins_mean_point_whole_BICEP)/6.5e14, binned_signal_whole_BICEP, color='violet',label=r'Primordial Signal $r=0.06$',linewidth=1.0, zorder=1, linestyle='--')
plt.text(1e-14, 1e-13, r'$\bf LiteBIRD$', fontsize=10, color='green')
plt.text(1e-3, 5e-13, r'$\bf AEDGE$', fontsize=10, color='#17becf')
plt.xlabel(r'f $[Hz]$',fontsize = labelsize)
plt.ylabel(r'$h^{2} \Omega_{GW}$',fontsize = labelsize)
plt.tick_params(axis = 'both',which = 'major', labelsize = axissize)
plt.legend(fontsize=6, loc='upper left')#, bbox_to_anchor=(1, 0.5))
axes = plt.gca()
axes.set_ylim([1e-25,1e-2])
ax = 1e-20
bx = 5e2
ay = 5e-17
by = 1e-11
plot1 = plt.subplot(111)
plt.xscale('log')
plt.yscale('log')
plt.xlim(ax, bx)
plt.ylim(ay, by)
plt.savefig(op.join(op.dirname(__file__),'figures/Fig_12.pdf'), format='pdf', dpi=1000, bbox_inches='tight')
plt.show()
| 40.201431
| 198
| 0.545159
|
4a0b0aef6e2b8be1dd939a2074810206f111ac59
| 6,325
|
py
|
Python
|
getchanges.py
|
jrbudda/minecrift
|
081c39921be750f7d9ca27fd4c31f8a4231b754f
|
[
"BSD-3-Clause"
] | 353
|
2016-05-22T19:15:21.000Z
|
2022-03-24T17:49:31.000Z
|
getchanges.py
|
jrbudda/minecrift
|
081c39921be750f7d9ca27fd4c31f8a4231b754f
|
[
"BSD-3-Clause"
] | 332
|
2016-05-23T15:00:11.000Z
|
2022-03-14T17:41:34.000Z
|
getchanges.py
|
jrbudda/minecrift
|
081c39921be750f7d9ca27fd4c31f8a4231b754f
|
[
"BSD-3-Clause"
] | 46
|
2016-05-24T19:25:18.000Z
|
2020-03-25T21:02:11.000Z
|
#!/usr/bin/env python
import os, os.path, sys
import shutil, glob, fnmatch
import subprocess, logging, shlex, re
from optparse import OptionParser
from minecriftversion import mcp_version, minecrift_version_num, minecrift_build
from build import replacelineinfile
base_dir = os.path.dirname(os.path.abspath(__file__))
def cmdsplit(args):
if os.sep == '\\':
args = args.replace('\\', '\\\\')
return shlex.split(args)
def create_patch( target_dir, src_file, mod_file, label, patch_file ):
print "Checking patch status for %s..." % src_file
if os.name == 'nt':
diff = os.path.abspath(os.path.join(base_dir, 'bin', 'diff.exe'))
else:
diff = "diff"
cmd = cmdsplit('"%s" -u --label "a/%s" "%s" --label "b/%s" "%s"' % (diff, label, src_file, label, mod_file ))
process = subprocess.Popen(cmd, cwd=target_dir, bufsize=-1, stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
if stdout:
with open( patch_file, 'wb') as out:
out.write( stdout.replace('\r\n','\n').replace('\r','\n') )
def pythonisdumb(func, path, excinfo):
print path + str(excinfo)
def main(mcp_dir, patch_dir = "patches", orig_dir = ".minecraft_orig"):
new_src_dir = os.path.join( base_dir , "src" )
patch_base_dir = os.path.join( base_dir , patch_dir )
patchsrc_base_dir = os.path.join( base_dir , "patchsrc" )
assets_base_dir = os.path.join(base_dir, "assets", "vivecraft" )
try:
shutil.rmtree( new_src_dir, onerror=pythonisdumb, ignore_errors=True)
shutil.rmtree( patch_base_dir, onerror=pythonisdumb, ignore_errors=True)
shutil.rmtree( patchsrc_base_dir, onerror=pythonisdumb, ignore_errors=True)
shutil.rmtree( assets_base_dir, onerror=pythonisdumb, ignore_errors=True)
if not os.path.exists( new_src_dir ):
os.mkdir( new_src_dir )
if not os.path.exists( patch_base_dir ):
os.mkdir( patch_base_dir )
if not os.path.exists( patchsrc_base_dir ):
os.mkdir( patchsrc_base_dir )
if not os.path.exists( assets_base_dir ):
os.makedirs( assets_base_dir )
except OSError as e:
quit
mod_src_dir = os.path.join( mcp_dir , "src", "minecraft" )
assets_src_dir = os.path.join( mcp_dir , "src", "assets" )
org_src_dir = os.path.join( mcp_dir , "src", orig_dir )
for src_dir, dirs, files in os.walk(mod_src_dir):
pkg = os.path.relpath(src_dir,mod_src_dir)
new_dir = os.path.join( new_src_dir, pkg )
mod_dir = os.path.join( org_src_dir, pkg )
patch_dir = os.path.join( patch_base_dir, pkg )
patchsrc_dir = os.path.join( patchsrc_base_dir, pkg )
if not os.path.exists(new_dir):
os.mkdir(new_dir)
if not os.path.exists(patch_dir):
os.mkdir(patch_dir)
if not os.path.exists(patchsrc_dir):
os.mkdir(patchsrc_dir)
for file_ in files:
mod_file = os.path.join(src_dir, file_)
org_file = os.path.join(mod_dir, file_)
if mod_file[-4:]!="java":
continue
if file_ == "Minecraft.java":
# Update Minecrift version
print "Updating Minecraft.java Minecrift version: [Minecrift %s %s] %s" % ( minecrift_version_num, minecrift_build, org_file )
replacelineinfile( mod_file, "public final String minecriftVerString", " public final String minecriftVerString = \"Vivecraft %s %s\";\n" % (minecrift_version_num, minecrift_build) );
if os.path.exists(org_file):
patch_file = os.path.join(patch_dir,file_+".patch")
label = pkg.replace("\\","/") + "/" + file_ #patch label always has "/"
create_patch( mcp_dir, org_file, mod_file, label, patch_file )
if os.path.exists( patch_file ):
shutil.copy(mod_file, patchsrc_dir)
else:
new_file = os.path.join(new_dir, file_)
#new class file, just replace
if os.path.exists( new_file ):
os.remove( new_file )
shutil.copy(mod_file, new_dir)
for asset_dir, dirs, files in os.walk(assets_src_dir):
pkg = os.path.relpath(asset_dir,assets_src_dir)
new_dir = os.path.join( assets_base_dir, pkg )
if not os.path.exists(new_dir):
os.mkdir(new_dir)
for file_ in files:
new_file = os.path.join(new_dir, file_)
mod_file = os.path.join(asset_dir, file_)
print "Copy asset %s" % (mod_file)
#new class file, just replace
if os.path.exists( new_file ):
os.remove( new_file )
shutil.copy(mod_file, new_dir)
removeEmptyFolders(patch_base_dir)
removeEmptyFolders(new_src_dir)
removeEmptyFolders(patchsrc_base_dir)
removeEmptyFolders(assets_base_dir)
def removeEmptyFolders(path):
if not os.path.isdir(path):
return
# remove empty subfolders
files = os.listdir(path)
if len(files):
for f in files:
fullpath = os.path.join(path, f)
if os.path.isdir(fullpath):
removeEmptyFolders(fullpath)
# if folder empty, delete it
files = os.listdir(path)
if len(files) == 0:
os.rmdir(path)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-m', '--mcp-dir', action='store', dest='mcp_dir', help='Path to MCP to use', default=None)
parser.add_option('-o', '--orig-dir', action='store', dest='orig_dir', help='Name of original source dir', default=".minecraft_orig")
parser.add_option('-p', '--patch-dir', action='store', dest='patch_dir', help='Patch dest dir base name to use', default='patches')
options, _ = parser.parse_args()
if not options.mcp_dir is None:
main(os.path.abspath(options.mcp_dir), options.patch_dir, options.orig_dir)
elif os.path.isfile(os.path.join('..', 'runtime', 'commands.py')):
main(os.path.abspath('..'), options.patch_dir, options.orig_dir)
else:
main(os.path.abspath(mcp_version), options.patch_dir, options.orig_dir)
| 40.544872
| 206
| 0.614545
|
4a0b0d150d90b23f3aa753c1cfd571856049bdfa
| 24
|
py
|
Python
|
geanno/__init__.py
|
HiDiHlabs/geanno
|
625d815e88d6f3c10a232697a707f2c8b28d899e
|
[
"BSD-3-Clause"
] | null | null | null |
geanno/__init__.py
|
HiDiHlabs/geanno
|
625d815e88d6f3c10a232697a707f2c8b28d899e
|
[
"BSD-3-Clause"
] | null | null | null |
geanno/__init__.py
|
HiDiHlabs/geanno
|
625d815e88d6f3c10a232697a707f2c8b28d899e
|
[
"BSD-3-Clause"
] | null | null | null |
from . import Annotator
| 12
| 23
| 0.791667
|
4a0b0d2674d8eb48893225aa4e496ba34db92d87
| 1,641
|
py
|
Python
|
test/test_workspaces.py
|
delftrobotics-forks/catkin_pkg
|
122eae0971f13a6080b72af6bb0eb52656c00bea
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_workspaces.py
|
delftrobotics-forks/catkin_pkg
|
122eae0971f13a6080b72af6bb0eb52656c00bea
|
[
"BSD-3-Clause"
] | 1
|
2020-08-25T11:24:44.000Z
|
2020-09-22T14:01:26.000Z
|
test/test_workspaces.py
|
plusone-robotics/catkin_pkg
|
9d68332b97db07f77a8b56bb5afaf89ec2536dfa
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import os
import shutil
import tempfile
import unittest
try:
from catkin_pkg.workspaces import ensure_workspace_marker, get_spaces, order_paths,\
CATKIN_WORKSPACE_MARKER_FILE
except ImportError as e:
raise ImportError('Please adjust your PYTHONPATH before running this test: %s' % str(e))
class WorkspacesTest(unittest.TestCase):
def test_ensure_workspace_marker(self):
try:
root_dir = tempfile.mkdtemp()
ensure_workspace_marker(root_dir)
self.assertTrue(os.path.exists(os.path.join(root_dir, CATKIN_WORKSPACE_MARKER_FILE)))
# assert no exception on revisit
ensure_workspace_marker(root_dir)
finally:
shutil.rmtree(root_dir)
def test_get_spaces(self):
self.assertEqual([], get_spaces([]))
try:
root_dir = tempfile.mkdtemp()
self.assertEqual([], get_spaces([root_dir]))
with open(os.path.join(root_dir, '.catkin'), 'a') as fhand:
fhand.write('')
self.assertEqual([root_dir], get_spaces([root_dir]))
finally:
shutil.rmtree(root_dir)
def test_order_paths(self):
self.assertEqual([], order_paths([], []))
self.assertEqual(['bar', 'baz'], order_paths(['bar', 'baz'], ['foo']))
self.assertEqual(['foo', 'bar'], order_paths(['bar', 'foo'], ['foo']))
self.assertEqual(['baz', 'foo', 'bar'], order_paths(['bar', 'foo', 'baz'], ['baz', 'foo']))
self.assertEqual(['foo' + os.sep + 'bim', 'bar'], order_paths(['bar', 'foo' + os.sep + 'bim'], ['foo']))
| 37.295455
| 112
| 0.621572
|
4a0b0d5bb01c3bdf3b3cdc0f9fbca77f61822b8c
| 34,695
|
py
|
Python
|
desktop/core/src/desktop/middleware.py
|
10088/hue
|
802811941dabd015a4fd7a640d349f9d26ac5572
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/middleware.py
|
10088/hue
|
802811941dabd015a4fd7a640d349f9d26ac5572
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/middleware.py
|
10088/hue
|
802811941dabd015a4fd7a640d349f9d26ac5572
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import object
import inspect
import json
import logging
import mimetypes
import os.path
import re
import socket
import sys
import tempfile
import time
import traceback
import kerberos
import django.db
import django.views.static
import django_prometheus
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, BACKEND_SESSION_KEY, authenticate, load_backend, login
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.core import exceptions
from django.http import HttpResponseNotAllowed, HttpResponseForbidden
from django.urls import resolve
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.deprecation import MiddlewareMixin
from hadoop import cluster
from dashboard.conf import IS_ENABLED as DASHBOARD_ENABLED
from useradmin.models import User
import desktop.views
from desktop import appmanager, metrics
from desktop.auth.backend import is_admin, find_or_create_user, ensure_has_a_group, rewrite_user
from desktop.conf import AUTH, HTTP_ALLOWED_METHODS, ENABLE_PROMETHEUS, KNOX, DJANGO_DEBUG_MODE, AUDIT_EVENT_LOG_DIR, \
SERVER_USER, REDIRECT_WHITELIST, SECURE_CONTENT_SECURITY_POLICY, has_connectors
from desktop.context_processors import get_app_name
from desktop.lib import apputil, i18n, fsmanager
from desktop.lib.django_util import JsonResponse, render, render_json
from desktop.lib.exceptions import StructuredException
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.view_util import is_ajax
from desktop.log import get_audit_logger
from desktop.log.access import access_log, log_page_hit, access_warn
from libsaml.conf import CDP_LOGOUT_URL
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
from django.utils.http import url_has_allowed_host_and_scheme
from urllib.parse import quote
else:
from django.utils.translation import ugettext as _
from django.utils.http import is_safe_url as url_has_allowed_host_and_scheme, urlquote as quote
LOG = logging.getLogger(__name__)
MIDDLEWARE_HEADER = "X-Hue-Middleware-Response"
# Views inside Django that don't require login
# (see LoginAndPermissionMiddleware)
DJANGO_VIEW_AUTH_WHITELIST = [
django.views.static.serve,
desktop.views.is_alive,
]
if ENABLE_PROMETHEUS.get():
DJANGO_VIEW_AUTH_WHITELIST.append(django_prometheus.exports.ExportToDjangoView)
class AjaxMiddleware(MiddlewareMixin):
"""
Middleware that augments request to set request.ajax
for either is_ajax() (looks at HTTP headers) or ?format=json
GET parameters.
"""
def process_request(self, request):
request.ajax = is_ajax(request) or request.GET.get("format", "") == "json"
return None
class ExceptionMiddleware(MiddlewareMixin):
"""
If exceptions know how to render themselves, use that.
"""
def process_exception(self, request, exception):
tb = traceback.format_exc()
logging.info("Processing exception: %s: %s" % (
i18n.smart_unicode(exception), i18n.smart_unicode(tb))
)
if isinstance(exception, PopupException):
return exception.response(request)
if isinstance(exception, StructuredException):
if request.ajax:
response = render_json(exception.response_data)
response[MIDDLEWARE_HEADER] = 'EXCEPTION'
response.status_code = getattr(exception, 'error_code', 500)
return response
else:
response = render("error.mako", request, {
'error': exception.response_data.get("message"),
'is_embeddable': request.GET.get('is_embeddable', False),
})
response.status_code = getattr(exception, 'error_code', 500)
return response
return None
class ClusterMiddleware(MiddlewareMixin):
"""
Manages setting request.fs and request.jt
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Sets request.fs and request.jt on every request to point to the configured filesystem.
"""
request.fs_ref = request.GET.get('fs', view_kwargs.get('fs', 'default'))
if "fs" in view_kwargs:
del view_kwargs["fs"]
request.fs = None
if request.user.is_authenticated:
request.fs = fsmanager.get_filesystem(request.fs_ref)
if request.fs is not None:
request.fs.setuser(request.user.username)
else:
LOG.warning("request.fs user was not set")
else:
LOG.warning("request.fs was not set for anonymous user")
# Deprecated
request.jt = None
class NotificationMiddleware(MiddlewareMixin):
"""
Manages setting request.info and request.error
"""
def process_view(self, request, view_func, view_args, view_kwargs):
def message(title, detail=None):
if detail is None:
detail = ''
else:
detail = '<br/>%s' % detail
return '%s %s' % (title, detail)
def info(title, detail=None):
messages.info(request, message(title, detail))
def error(title, detail=None):
messages.error(request, message(title, detail))
def warn(title, detail=None):
messages.warning(request, message(title, detail))
request.info = info
request.error = error
request.warn = warn
class AppSpecificMiddleware(object):
@classmethod
def augment_request_with_app(cls, request, view_func):
"""Inject the app name into the request for use in later-stage middleware"""
if not hasattr(request, "_desktop_app"):
module = inspect.getmodule(view_func)
request._desktop_app = apputil.get_app_for_module(module)
if not request._desktop_app and not module.__name__.startswith('django.'):
logging.debug("no app for view func: %s in %s" % (view_func, module))
def __init__(self):
self.middlewares_by_app = {}
for app in appmanager.DESKTOP_APPS:
self.middlewares_by_app[app.name] = self._load_app_middleware(app)
def _get_middlewares(self, app, type):
return self.middlewares_by_app.get(app, {}).get(type, [])
def process_view(self, request, view_func, view_args, view_kwargs):
self.augment_request_with_app(request, view_func)
if not request._desktop_app:
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'view'):
ret = middleware(request, view_func, view_args, view_kwargs)
if ret: return ret # Short circuit
return ret
def process_response(self, request, response):
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for request.")
return response
for middleware in reversed(self._get_middlewares(request._desktop_app, 'response')):
response = middleware(request, response)
return response
def process_exception(self, request, exception):
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for exception.")
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'exception'):
ret = middleware(request, exception)
if ret: return ret # short circuit
return ret
def _load_app_middleware(cls, app):
app_settings = app.settings
if not app_settings:
return
mw_classes = app_settings.__dict__.get('MIDDLEWARE_CLASSES', [])
result = {'view': [], 'response': [], 'exception': []}
for middleware_path in mw_classes:
# This code brutally lifted from django.core.handlers
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured(_('%(module)s isn\'t a middleware module.') % {'module': middleware_path})
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = __import__(mw_module, {}, {}, [''])
except ImportError as e:
raise exceptions.ImproperlyConfigured(
_('Error importing middleware %(module)s: "%(error)s".') % {'module': mw_module, 'error': e}
)
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(
_('Middleware module "%(module)s" does not define a "%(class)s" class.') % {'module': mw_module, 'class': mw_classname}
)
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
# End brutal code lift
# We need to make sure we don't have a process_request function because we don't know what
# application will handle the request at the point process_request is called
if hasattr(mw_instance, 'process_request'):
raise exceptions.ImproperlyConfigured(_('AppSpecificMiddleware module "%(module)s" has a process_request function' + \
' which is impossible.') % {'module': middleware_path})
if hasattr(mw_instance, 'process_view'):
result['view'].append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
result['response'].insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
result['exception'].insert(0, mw_instance.process_exception)
return result
class LoginAndPermissionMiddleware(MiddlewareMixin):
"""
Middleware that forces all views (except those that opt out) through authentication.
"""
def process_request(self, request):
# When local user login, oidc middleware refresh token if oidc_id_token_expiration doesn't exists!
if request.session.get('_auth_user_backend', '') == 'desktop.auth.backend.AllowFirstUserDjangoBackend' \
and 'desktop.auth.backend.OIDCBackend' in AUTH.BACKEND.get():
request.session['oidc_id_token_expiration'] = time.time() + 300
def process_view(self, request, view_func, view_args, view_kwargs):
"""
We also perform access logging in ``process_view()`` since we have the view function,
which tells us the log level. The downside is that we don't have the status code,
which isn't useful for status logging anyways.
"""
request.ts = time.time()
request.view_func = view_func
access_log_level = getattr(view_func, 'access_log_level', None)
# Skip loop for oidc
if request.path in ['/oidc/authenticate/', '/oidc/callback/', '/oidc/logout/', '/hue/oidc_failed/']:
return None
if AUTH.AUTO_LOGIN_ENABLED.get() and request.path.startswith('/api/token/auth'):
pass # allow /api/token/auth can create user or make it active
elif request.path.startswith('/api/'):
return None
# Skip views not requiring login
# If the view has "opted out" of login required, skip
if hasattr(view_func, "login_notrequired"):
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# There are certain django views which are also opt-out, but
# it would be evil to go add attributes to them
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# If user is logged in, check that he has permissions to access the app
if request.user.is_active and request.user.is_authenticated:
AppSpecificMiddleware.augment_request_with_app(request, view_func)
# Until Django 1.3 which resolves returning the URL name, just do a match of the name of the view
try:
access_view = 'access_view:%s:%s' % (request._desktop_app, resolve(request.path)[0].__name__)
except Exception as e:
access_log(request, 'error checking view perm: %s' % e, level=access_log_level)
access_view = ''
app_accessed = request._desktop_app
app_libs_whitelist = ["desktop", "home", "home2", "about", "hue", "editor", "notebook", "indexer", "404", "500", "403"]
if has_connectors():
app_libs_whitelist.append('metadata')
if DASHBOARD_ENABLED.get():
app_libs_whitelist.append('dashboard')
# Accessing an app can access an underlying other app.
# e.g. impala or spark uses code from beeswax and so accessing impala shows up as beeswax here.
# Here we trust the URL to be the real app we need to check the perms.
ui_app_accessed = get_app_name(request)
if app_accessed != ui_app_accessed and ui_app_accessed not in ('logs', 'accounts', 'login'):
app_accessed = ui_app_accessed
if app_accessed and \
app_accessed not in app_libs_whitelist and \
not (
is_admin(request.user) or
request.user.has_hue_permission(action="access", app=app_accessed) or
request.user.has_hue_permission(action=access_view, app=app_accessed)):
access_log(request, 'permission denied', level=access_log_level)
return PopupException(
_("You do not have permission to access the %(app_name)s application.") % {'app_name': app_accessed.capitalize()},
error_code=401
).response(request)
else:
if not hasattr(request, 'view_func'):
log_page_hit(request, view_func, level=access_log_level)
return None
if AUTH.AUTO_LOGIN_ENABLED.get():
# Auto-create the hue/hue user if not already present
user = find_or_create_user(username='hue', password='hue')
ensure_has_a_group(user)
user = rewrite_user(user)
user.is_active = True
user.save()
user = authenticate(request, username='hue', password='hue')
if user is not None:
login(request, user)
return None
logging.info("Redirecting to login page: %s", request.get_full_path())
access_log(request, 'login redirection', level=access_log_level)
no_idle_backends = [
"desktop.auth.backend.SpnegoDjangoBackend",
"desktop.auth.backend.KnoxSpnegoDjangoBackend"
]
if CDP_LOGOUT_URL.get() == "":
no_idle_backends.append("libsaml.backend.SAML2Backend")
if request.ajax and all(no_idle_backend not in AUTH.BACKEND.get() for no_idle_backend in no_idle_backends):
# Send back a magic header which causes Hue.Request to interpose itself
# in the ajax request and make the user login before resubmitting the
# request.
response = HttpResponse("/* login required */", content_type="text/javascript")
response[MIDDLEWARE_HEADER] = 'LOGIN_REQUIRED'
return response
else:
if request.GET.get('is_embeddable'):
return JsonResponse({
'url': "%s?%s=%s" % (
settings.LOGIN_URL,
REDIRECT_FIELD_NAME,
quote('/hue' + request.get_full_path().replace('is_embeddable=true', '').replace('&&', '&'))
)
}) # Remove embeddable so redirect from & to login works. Login page is not embeddable
else:
return HttpResponseRedirect("%s?%s=%s" % (settings.LOGIN_URL, REDIRECT_FIELD_NAME, quote(request.get_full_path())))
def process_response(self, request, response):
if hasattr(request, 'ts') and hasattr(request, 'view_func'):
log_page_hit(request, request.view_func, level=logging.INFO, start_time=request.ts, response=response)
return response
class JsonMessage(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __str__(self):
return json.dumps(self.kwargs)
class AuditLoggingMiddleware(MiddlewareMixin):
def __init__(self, get_response):
self.get_response = get_response
self.impersonator = SERVER_USER.get()
if not AUDIT_EVENT_LOG_DIR.get():
LOG.info('Unloading AuditLoggingMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
response['audited'] = False
try:
if hasattr(request, 'audit') and request.audit is not None:
self._log_message(request, response)
response['audited'] = True
except Exception as e:
LOG.error('Could not audit the request: %s' % e)
return response
def _log_message(self, request, response=None):
audit_logger = get_audit_logger()
audit_logger.debug(JsonMessage(**{
'username': self._get_username(request),
'impersonator': self.impersonator,
'ipAddress': self._get_client_ip(request),
'operation': request.audit['operation'],
'operationText': request.audit.get('operationText', ''),
'eventTime': self._milliseconds_since_epoch(),
'allowed': self._get_allowed(request, response),
'service': get_app_name(request),
'url': request.path
}))
def _get_client_ip(self, request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
x_forwarded_for = x_forwarded_for.split(',')[0]
return request.META.get('HTTP_CLIENT_IP') or x_forwarded_for or request.META.get('REMOTE_ADDR')
def _get_username(self, request):
username = 'anonymous'
if request.audit.get('username', None):
username = request.audit.get('username')
elif hasattr(request, 'user') and not request.user.is_anonymous:
username = request.user.get_username()
return username
def _milliseconds_since_epoch(self):
return int(time.time() * 1000)
def _get_allowed(self, request, response=None):
allowed = response.status_code != 401
if 'allowed' in request.audit:
return request.audit['allowed']
return allowed
try:
import tidylib
_has_tidylib = True
except Exception as ex:
# The exception type is not ImportError. It's actually an OSError.
logging.warn("Failed to import tidylib (for debugging). Is libtidy installed?")
_has_tidylib = False
class HtmlValidationMiddleware(MiddlewareMixin):
"""
If configured, validate output html for every response.
"""
def __init__(self, get_response):
self.get_response = get_response
self._logger = logging.getLogger('HtmlValidationMiddleware')
if not _has_tidylib:
logging.error("HtmlValidationMiddleware not activatived: Failed to import tidylib.")
return
# Things that we don't care about
self._to_ignore = (
re.compile('- Warning: <.*> proprietary attribute "data-'),
re.compile('- Warning: trimming empty'),
re.compile('- Info:'),
)
# Find the directory to write tidy html output
try:
self._outdir = os.path.join(tempfile.gettempdir(), 'hue_html_validation')
if not os.path.isdir(self._outdir):
os.mkdir(self._outdir, 0o755)
except Exception as ex:
self._logger.exception('Failed to get temp directory: %s', (ex,))
self._outdir = tempfile.mkdtemp(prefix='hue_html_validation-')
# Options to pass to libtidy. See
# http://tidy.sourceforge.net/docs/quickref.html
self._options = {
'show-warnings': 1,
'output-html': 0,
'output-xhtml': 1,
'char-encoding': 'utf8',
'output-encoding': 'utf8',
'indent': 1,
'wrap': 0,
}
def process_response(self, request, response):
if not _has_tidylib or not self._is_html(request, response):
return response
html, errors = tidylib.tidy_document(response.content,
self._options,
keep_doc=True)
if not errors:
return response
# Filter out what we care about
err_list = errors.rstrip().split('\n')
err_list = self._filter_warnings(err_list)
if not err_list:
return response
try:
fn = resolve(request.path)[0]
fn_name = '%s.%s' % (fn.__module__, fn.__name__)
except:
LOG.exception('failed to resolve url')
fn_name = '<unresolved_url>'
# Write the two versions of html out for offline debugging
filename = os.path.join(self._outdir, fn_name)
result = "HTML tidy result: %s [%s]:" \
"\n\t%s" \
"\nPlease see %s.orig %s.tidy\n-------" % \
(request.path, fn_name, '\n\t'.join(err_list), filename, filename)
file(filename + '.orig', 'w').write(i18n.smart_str(response.content))
file(filename + '.tidy', 'w').write(i18n.smart_str(html))
file(filename + '.info', 'w').write(i18n.smart_str(result))
self._logger.error(result)
return response
def _filter_warnings(self, err_list):
"""A hacky way to filter out things that we don't care about."""
res = []
for err in err_list:
for ignore in self._to_ignore:
if ignore.search(err):
break
else:
res.append(err)
return res
def _is_html(self, request, response):
return not is_ajax(request) and \
'html' in response['Content-Type'] and \
200 <= response.status_code < 300
class ProxyMiddleware(MiddlewareMixin):
def __init__(self, get_response):
self.get_response = get_response
if not 'desktop.auth.backend.AllowAllBackend' in AUTH.BACKEND.get():
LOG.info('Unloading ProxyMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
return response
def process_request(self, request):
view_func = resolve(request.path)[0]
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
return
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise exceptions.ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the SpnegoUserMiddleware class.")
if request.GET.get('user.name'):
try:
username = request.GET.get('user.name')
user = authenticate(username=username, password='')
if user:
request.user = user
login(request, user)
msg = 'Successful login for user: %s' % request.user.username
else:
msg = 'Failed login for user: %s' % request.user.username
request.audit = {
'operation': 'USER_LOGIN',
'username': request.user.username,
'operationText': msg
}
return
except:
LOG.exception('Unexpected error when authenticating')
return
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError:
pass
return username
class SpnegoMiddleware(MiddlewareMixin):
"""
Based on the WSGI SPNEGO middlware class posted here:
http://code.activestate.com/recipes/576992/
"""
def __init__(self, get_response):
self.get_response = get_response
if not set(AUTH.BACKEND.get()).intersection(
set(['desktop.auth.backend.SpnegoDjangoBackend', 'desktop.auth.backend.KnoxSpnegoDjangoBackend'])
):
LOG.info('Unloading SpnegoMiddleware')
raise exceptions.MiddlewareNotUsed
def process_request(self, request):
"""
The process_request() method needs to communicate some state to the
process_response() method. The two options for this are to return an
HttpResponse object or to modify the META headers in the request object. In
order to ensure that all of the middleware is properly invoked, this code
currently uses the later approach. The following headers are currently used:
GSS-String:
This means that GSS authentication was successful and that we need to pass
this value for the WWW-Authenticate header in the response.
Return-401:
This means that the SPNEGO backend is in use, but we didn't get an
AUTHORIZATION header from the client. The way that the protocol works
(http://tools.ietf.org/html/rfc4559) is by having the first response to an
un-authenticated request be a 401 with the WWW-Authenticate header set to
Negotiate. This will cause the browser to re-try the request with the
AUTHORIZATION header set.
"""
view_func = resolve(request.path)[0]
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
return
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise exceptions.ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the SpnegoUserMiddleware class.")
if 'HTTP_AUTHORIZATION' in request.META:
type, authstr = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if type == 'Negotiate':
try:
result, context = kerberos.authGSSServerInit('HTTP')
if result != 1:
return
gssstring = ''
r = kerberos.authGSSServerStep(context, authstr)
if r == 1:
gssstring = kerberos.authGSSServerResponse(context)
request.META['GSS-String'] = 'Negotiate %s' % gssstring
else:
kerberos.authGSSServerClean(context)
return
username = kerberos.authGSSServerUserName(context)
kerberos.authGSSServerClean(context)
# In Trusted knox proxy, Hue must expect following:
# Trusted knox user: KNOX_PRINCIPAL
# Trusted knox proxy host: KNOX_PROXYHOSTS
if 'desktop.auth.backend.KnoxSpnegoDjangoBackend' in AUTH.BACKEND.get():
knox_verification = False
principals = self.clean_principal(KNOX.KNOX_PRINCIPAL.get())
principal = self.clean_principal(username)
if principal.intersection(principals):
# This may contain chain of reverse proxies, e.g. knox proxy, hue load balancer
# Compare hostname on both HTTP_X_FORWARDED_HOST & KNOX_PROXYHOSTS.
# Both of these can be configured to use either hostname or IPs and we have to normalize to one or the other
req_hosts = self.clean_host(request.META['HTTP_X_FORWARDED_HOST'])
knox_proxy = self.clean_host(KNOX.KNOX_PROXYHOSTS.get())
if req_hosts.intersection(knox_proxy):
knox_verification = True
else:
access_warn(request, 'Failed to verify provided host %s with %s ' % (req_hosts, knox_proxy))
else:
access_warn(request, 'Failed to verify provided username %s with %s ' % (principal, principals))
# If knox authentication failed then generate 401 (Unauthorized error)
if not knox_verification:
request.META['Return-401'] = ''
return
if request.user.is_authenticated:
if request.user.username == self.clean_username(username, request):
return
user = authenticate(username=username, request=request)
if user:
request.user = user
login(request, user)
msg = 'Successful login for user: %s' % request.user.username
else:
msg = 'Failed login for user: %s' % request.user.username
request.audit = {
'operation': 'USER_LOGIN',
'username': request.user.username,
'operationText': msg
}
access_warn(request, msg)
return
except:
LOG.exception('Unexpected error when authenticating against KDC')
return
else:
request.META['Return-401'] = ''
return
else:
if not request.user.is_authenticated:
request.META['Return-401'] = ''
return
def process_response(self, request, response):
if 'GSS-String' in request.META:
response['WWW-Authenticate'] = request.META['GSS-String']
elif 'Return-401' in request.META:
response = HttpResponse("401 Unauthorized", content_type="text/plain",
status=401)
response['WWW-Authenticate'] = 'Negotiate'
response.status = 401
return response
def clean_host(self, pattern):
hosts = []
if pattern:
pattern_list = pattern if isinstance(pattern, list) else pattern.split(',')
for hostport in pattern_list:
host = hostport.split(':')[0].strip()
try:
hosts.append(socket.gethostbyaddr(host)[0])
except Exception:
LOG.exception('Could not resolve host addr %s' % host)
hosts.append(host)
return set(hosts)
def clean_principal(self, pattern):
principals = []
if pattern:
pattern_list = pattern if isinstance(pattern, list) else pattern.split(',')
for principal_host in pattern_list:
principal = principal_host.split('/')[0].strip()
principals.append(principal)
return set(principals)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_str)
try:
username = backend.clean_username(username, request)
except AttributeError:
pass
return username
class HueRemoteUserMiddleware(RemoteUserMiddleware):
"""
Middleware to delegate authentication to a proxy server. The proxy server
will set an HTTP header (defaults to Remote-User) with the name of the
authenticated user. This class extends the RemoteUserMiddleware class
built into Django with the ability to configure the HTTP header and to
unload the middleware if the RemoteUserDjangoBackend is not currently
in use.
"""
def __init__(self, get_response):
if not 'desktop.auth.backend.RemoteUserDjangoBackend' in AUTH.BACKEND.get():
LOG.info('Unloading HueRemoteUserMiddleware')
raise exceptions.MiddlewareNotUsed
super().__init__(get_response)
self.header = AUTH.REMOTE_USER_HEADER.get()
class EnsureSafeMethodMiddleware(MiddlewareMixin):
"""
Middleware to white list configured HTTP request methods.
"""
def process_request(self, request):
if request.method not in HTTP_ALLOWED_METHODS.get():
return HttpResponseNotAllowed(HTTP_ALLOWED_METHODS.get())
class EnsureSafeRedirectURLMiddleware(MiddlewareMixin):
"""
Middleware to white list configured redirect URLs.
"""
def process_response(self, request, response):
if response.status_code in (301, 302, 303, 305, 307, 308) and response.get('Location') and not hasattr(response, 'redirect_override'):
redirection_patterns = REDIRECT_WHITELIST.get()
location = response['Location']
if any(regexp.match(location) for regexp in redirection_patterns):
return response
if url_has_allowed_host_and_scheme(location, allowed_hosts={request.get_host()}):
return response
if request.path in ['/oidc/authenticate/', '/oidc/callback/', '/oidc/logout/', '/hue/oidc_failed/']:
return response
response = render("error.mako", request, {
'error': _('Redirect to %s is not allowed.') % response['Location'],
'is_embeddable': request.GET.get('is_embeddable', False),
})
response.status_code = 403
return response
else:
return response
class MetricsMiddleware(MiddlewareMixin):
"""
Middleware to track the number of active requests.
"""
def process_request(self, request):
self._response_timer = metrics.response_time.time()
metrics.active_requests.inc()
def process_exception(self, request, exception):
self._response_timer.stop()
metrics.request_exceptions.inc()
def process_response(self, request, response):
self._response_timer.stop()
metrics.active_requests.dec()
return response
class ContentSecurityPolicyMiddleware(MiddlewareMixin):
def __init__(self, get_response):
self.get_response = get_response
self.secure_content_security_policy = SECURE_CONTENT_SECURITY_POLICY.get()
if not self.secure_content_security_policy:
LOG.info('Unloading ContentSecurityPolicyMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
if self.secure_content_security_policy and not 'Content-Security-Policy' in response:
response["Content-Security-Policy"] = self.secure_content_security_policy
return response
class MimeTypeJSFileFixStreamingMiddleware(MiddlewareMixin):
"""
Middleware to detect and fix ".js" mimetype. SLES 11SP4 as example OS which detect js file
as "text/x-js" and if strict X-Content-Type-Options=nosniff is set then browser fails to
execute javascript file.
"""
def __init__(self, get_response):
self.get_response = get_response
jsmimetypes = ['application/javascript', 'application/ecmascript']
if mimetypes.guess_type("dummy.js")[0] in jsmimetypes:
LOG.info('Unloading MimeTypeJSFileFixStreamingMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
if request.path_info.endswith('.js'):
response['Content-Type'] = "application/javascript"
return response
class MultipleProxyMiddleware:
FORWARDED_FOR_FIELDS = [
'HTTP_X_FORWARDED_FOR',
'HTTP_X_FORWARDED_HOST',
'HTTP_X_FORWARDED_SERVER',
]
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
"""
Rewrites the proxy headers so that only the most
recent proxy is used.
"""
for field in self.FORWARDED_FOR_FIELDS:
if field in request.META:
if ',' in request.META[field]:
parts = request.META[field].split(',')
request.META[field] = parts[-1].strip()
return self.get_response(request)
| 36.675476
| 138
| 0.691252
|
4a0b0da5eeb937f2e1cfd584e38ae487a41c08f6
| 267
|
py
|
Python
|
starlette_cbge/endpoints/__init__.py
|
gvbgduh/starlette-cbge
|
4c18a1cf1cfa088d67a61b89e64217e2e4dac809
|
[
"MIT"
] | 7
|
2019-09-01T21:37:23.000Z
|
2020-05-12T19:36:04.000Z
|
starlette_cbge/endpoints/__init__.py
|
gvbgduh/starlette-cbge
|
4c18a1cf1cfa088d67a61b89e64217e2e4dac809
|
[
"MIT"
] | null | null | null |
starlette_cbge/endpoints/__init__.py
|
gvbgduh/starlette-cbge
|
4c18a1cf1cfa088d67a61b89e64217e2e4dac809
|
[
"MIT"
] | null | null | null |
from starlette_cbge.endpoints.base import BaseEndpoint
from starlette_cbge.endpoints.list_endpoint import ListEndpoint
from starlette_cbge.endpoints.pydantic_base import PydanticBaseEndpoint
from starlette_cbge.endpoints.typesystem_base import TypesystemBaseEndpoint
| 53.4
| 75
| 0.910112
|
4a0b0db3690c48784cfc8d99f64ab8c1f317686a
| 4,107
|
py
|
Python
|
venv/lib/python3.8/site-packages/matplotlib/testing/jpl_units/StrConverter.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 15
|
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
venv/lib/python3.8/site-packages/matplotlib/testing/jpl_units/StrConverter.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
venv/lib/python3.8/site-packages/matplotlib/testing/jpl_units/StrConverter.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 11
|
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
"""StrConverter module containing class StrConverter."""
import numpy as np
import matplotlib.units as units
__all__ = ['StrConverter']
class StrConverter(units.ConversionInterface):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for string data values.
Valid units for string are:
- 'indexed' : Values are indexed as they are specified for plotting.
- 'sorted' : Values are sorted alphanumerically.
- 'inverted' : Values are inverted so that the first value is on top.
- 'sorted-inverted' : A combination of 'sorted' and 'inverted'
"""
@staticmethod
def axisinfo(unit, axis):
""": Returns information on how to handle an axis that has string data.
= INPUT VARIABLES
- axis The axis using this converter.
- unit The units to use for a axis with string data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
return None
@staticmethod
def convert(value, unit, axis):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need to be converted.
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
if units.ConversionInterface.is_numlike(value):
return value
if value == []:
return []
# we delay loading to make matplotlib happy
ax = axis.axes
if axis is ax.get_xaxis():
isXAxis = True
else:
isXAxis = False
axis.get_major_ticks()
ticks = axis.get_ticklocs()
labels = axis.get_ticklabels()
labels = [l.get_text() for l in labels if l.get_text()]
if not labels:
ticks = []
labels = []
if not np.iterable(value):
value = [value]
newValues = []
for v in value:
if v not in labels and v not in newValues:
newValues.append(v)
labels.extend(newValues)
# DISABLED: This is disabled because matplotlib bar plots do not
# DISABLED: recalculate the unit conversion of the data values
# DISABLED: this is due to design and is not really a bug.
# DISABLED: If this gets changed, then we can activate the following
# DISABLED: block of code. Note that this works for line plots.
# DISABLED if unit:
# DISABLED if unit.find("sorted") > -1:
# DISABLED labels.sort()
# DISABLED if unit.find("inverted") > -1:
# DISABLED labels = labels[::-1]
# add padding (so they do not appear on the axes themselves)
labels = [''] + labels + ['']
ticks = list(range(len(labels)))
ticks[0] = 0.5
ticks[-1] = ticks[-1] - 0.5
axis.set_ticks(ticks)
axis.set_ticklabels(labels)
# we have to do the following lines to make ax.autoscale_view work
loc = axis.get_major_locator()
loc.set_bounds(ticks[0], ticks[-1])
if isXAxis:
ax.set_xlim(ticks[0], ticks[-1])
else:
ax.set_ylim(ticks[0], ticks[-1])
result = [ticks[labels.index(v)] for v in value]
ax.viewLim.ignore(-1)
return result
@staticmethod
def default_units(value, axis):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
Return the default unit for value, or None.
"""
# The default behavior for string indexing.
return "indexed"
| 31.113636
| 81
| 0.59216
|
4a0b0e7111f982e547040c4422c213e76cb24eab
| 948
|
py
|
Python
|
makepost.py
|
sdlyyxy/sdlyyxy.github.io
|
2e8c00f5cbb56ad65e60aa82dc6f6d7569244d91
|
[
"MIT"
] | null | null | null |
makepost.py
|
sdlyyxy/sdlyyxy.github.io
|
2e8c00f5cbb56ad65e60aa82dc6f6d7569244d91
|
[
"MIT"
] | 3
|
2020-02-25T07:56:10.000Z
|
2022-02-26T03:30:52.000Z
|
makepost.py
|
sdlyyxy/sdlyyxy.github.io
|
2e8c00f5cbb56ad65e60aa82dc6f6d7569244d91
|
[
"MIT"
] | null | null | null |
#-*_coding:utf8-*-
#!/usr/bin/python
import sys
import os
import time
import platform
fileName = ''
if len(sys.argv) > 1:
fileName = sys.argv[1]
if fileName == '':
print('Usage: ./makepost.py <filename>')
exit()
output = ''
output += '---\n'
output += 'layout: post\n'
output += 'title: %s\n' % (os.path.basename(fileName)[:-3])
formatTime = time.strftime("%Y-%m-%d %H:%M:%S +0800", time.localtime())
output += 'date: ' + formatTime + '\n'
ver = platform.python_version()
if ver[0] == '2':
postTags = raw_input("Please input the tags of this article:")
else:
postTags = input("Please input the tags of this article:")
output += 'tag: [' + postTags + ']\n'
output += '---\n\n'
f = open(fileName, encoding="utf8")
output += f.read()
f.close()
outFileName = '_posts/' + \
time.strftime("%Y-%m-%d-", time.localtime()) + os.path.basename(fileName)
f = open(outFileName, 'w', encoding='utf8')
f.write(output)
os.remove(fileName)
| 27.085714
| 77
| 0.627637
|
4a0b0eaaad15bd5e40bd490813ec8a76371daf97
| 799
|
py
|
Python
|
rackio_AI/preprocessing/synthetic_data_base.py
|
JesusDBS/RackioAI
|
01bcb0c06e73ae6f3ed0bdcf25ce3328456d6786
|
[
"MIT"
] | null | null | null |
rackio_AI/preprocessing/synthetic_data_base.py
|
JesusDBS/RackioAI
|
01bcb0c06e73ae6f3ed0bdcf25ce3328456d6786
|
[
"MIT"
] | null | null | null |
rackio_AI/preprocessing/synthetic_data_base.py
|
JesusDBS/RackioAI
|
01bcb0c06e73ae6f3ed0bdcf25ce3328456d6786
|
[
"MIT"
] | 1
|
2021-05-19T22:32:44.000Z
|
2021-05-19T22:32:44.000Z
|
from rackio_AI.decorators import typeCheckedAttribute
import numpy as np
import pandas as pd
import functools
from abc import abstractmethod, ABCMeta
@typeCheckedAttribute.typeassert(data=[pd.Series, pd.DataFrame, np.ndarray])
class PrepareData(metaclass=ABCMeta):
def __init__(self):
pass
def __str__(self):
return '{}'.format(self.__dict__)
@staticmethod
def step(function=None, *args, **kwargs):
def decorator(function):
@functools.wraps(function)
def wrap(*args, **options):
function(*args, **options)
return wrap
if function is None:
return decorator
else:
return decorator(function)
@abstractmethod
def done(self, *args, **kwargs):
pass
| 23.5
| 76
| 0.634543
|
4a0b0f2679fe1b8c534f34e3301540d2a3e9c212
| 1,398
|
py
|
Python
|
face_recognition.py
|
lotus-biswas/Real-Time-Face-Recognition
|
e205eb9fc402d45046e14dd5429a90d9e2dfe334
|
[
"MIT"
] | 1
|
2019-09-14T18:11:52.000Z
|
2019-09-14T18:11:52.000Z
|
face_recognition.py
|
lotus-biswas/Real-Time-Face-Recognition
|
e205eb9fc402d45046e14dd5429a90d9e2dfe334
|
[
"MIT"
] | null | null | null |
face_recognition.py
|
lotus-biswas/Real-Time-Face-Recognition
|
e205eb9fc402d45046e14dd5429a90d9e2dfe334
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import os
def assure_path_exists(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
recognizer = cv2.face.LBPHFaceRecognizer_create()
assure_path_exists("trainer/")
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
cam = cv2.VideoCapture(0)
while True:
ret, im =cam.read()
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2,5)
for(x,y,w,h) in faces:
cv2.rectangle(im, (x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)
Id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
if(Id == 1):
Id = "Lotus {0:.2f}%".format(round(100 - confidence, 2))
cv2.rectangle(im, (x-22,y-90), (x+w+22, y-22), (0,255,0), -1)
cv2.putText(im, str(Id), (x,y-40), font, 1, (255,255,255), 3)
if(confidence>35):
cv2.putText(im, "Face Found", (x,y), font, 1, (255,255,255), 3)
if(confidence<35):
cv2.putText(im, "Face Not Found", (x,y), font, 1, (255,255,255), 3)
cv2.imshow('im',im)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
| 17.696203
| 79
| 0.577969
|
4a0b10d327b93ad41d3335a69afdb1a6b77c1523
| 3,265
|
py
|
Python
|
detectron2/utils/testing.py
|
FunkyKoki/SparseR-CNN
|
84ccd42e974b7c7a7a4d05942b6e8b120146d5b3
|
[
"MIT"
] | 1,158
|
2020-11-24T04:44:06.000Z
|
2022-03-31T07:24:11.000Z
|
detectron2/utils/testing.py
|
FunkyKoki/SparseR-CNN
|
84ccd42e974b7c7a7a4d05942b6e8b120146d5b3
|
[
"MIT"
] | 94
|
2020-11-25T08:29:07.000Z
|
2022-03-30T09:18:09.000Z
|
detectron2/utils/testing.py
|
FunkyKoki/SparseR-CNN
|
84ccd42e974b7c7a7a4d05942b6e8b120146d5b3
|
[
"MIT"
] | 189
|
2020-11-24T07:32:03.000Z
|
2022-03-28T06:16:30.000Z
|
import numpy as np
import torch
from detectron2 import model_zoo
from detectron2.data import DatasetCatalog
from detectron2.data.detection_utils import read_image
from detectron2.modeling import build_model
from detectron2.structures import Boxes
from detectron2.utils.file_io import PathManager
"""
Internal utilities for tests. Don't use except for writing tests.
"""
def get_model_no_weights(config_path):
"""
Like model_zoo.get, but do not load any weights (even pretrained)
"""
cfg = model_zoo.get_config(config_path)
if not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
return build_model(cfg)
def random_boxes(num_boxes, max_coord=100, device="cpu"):
"""
Create a random Nx4 boxes tensor, with coordinates < max_coord.
"""
boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5)
boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression
# Note: the implementation of this function in torchvision is:
# boxes[:, 2:] += torch.rand(N, 2) * 100
# but it does not guarantee non-negative widths/heights constraints:
# boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]:
boxes[:, 2:] += boxes[:, :2]
return boxes
def get_sample_coco_image(tensor=True):
"""
Args:
tensor (bool): if True, returns 3xHxW tensor.
else, returns a HxWx3 numpy array.
Returns:
an image, in BGR color.
"""
try:
file_name = DatasetCatalog.get("coco_2017_train")[0]["file_name"]
if not PathManager.exists(file_name):
raise FileNotFoundError()
except IOError:
# for public CI to run
file_name = "http://images.cocodataset.org/train2017/000000000009.jpg"
ret = read_image(file_name, format="BGR")
if tensor:
ret = torch.from_numpy(np.ascontiguousarray(ret.transpose(2, 0, 1)))
return ret
def assert_instances_allclose(input, other, rtol=1e-5, msg=""):
"""
Args:
input, other (Instances):
"""
if not msg:
msg = "Two Instances are different! "
else:
msg = msg.rstrip() + " "
assert input.image_size == other.image_size, (
msg + f"image_size is {input.image_size} vs. {other.image_size}!"
)
fields = sorted(input.get_fields().keys())
fields_other = sorted(other.get_fields().keys())
assert fields == fields_other, msg + f"Fields are {fields} vs {fields_other}!"
for f in fields:
val1, val2 = input.get(f), other.get(f)
if isinstance(val1, Boxes):
# boxes in the range of O(100) and can have a larger tolerance
assert torch.allclose(val1.tensor, val2.tensor, atol=100 * rtol), (
msg + f"Field {f} differs too much!"
)
elif isinstance(val1, torch.Tensor):
if val1.dtype.is_floating_point:
mag = torch.abs(val1).max().cpu().item()
assert torch.allclose(val1, val2, atol=mag * rtol), (
msg + f"Field {f} differs too much!"
)
else:
assert torch.equal(val1, val2), msg + f"Field {f} is different!"
else:
raise ValueError(f"Don't know how to compare type {type(val1)}")
| 34.010417
| 85
| 0.627871
|
4a0b11dd6043ea4acffc58db58153bb81cb06d58
| 1,756
|
py
|
Python
|
2021/day4.py
|
blin00/advent-of-code
|
0a8cafb1eb6c2ec0b35af2af1fdbf6498ca0a83f
|
[
"Unlicense"
] | 7
|
2021-12-11T00:04:11.000Z
|
2021-12-30T11:13:36.000Z
|
2021/day4.py
|
blin00/advent-of-code
|
0a8cafb1eb6c2ec0b35af2af1fdbf6498ca0a83f
|
[
"Unlicense"
] | null | null | null |
2021/day4.py
|
blin00/advent-of-code
|
0a8cafb1eb6c2ec0b35af2af1fdbf6498ca0a83f
|
[
"Unlicense"
] | 2
|
2021-12-18T10:15:43.000Z
|
2021-12-22T05:11:32.000Z
|
from sortedcontainers import *
from bisect import *
from collections import *
from functools import *
from heapq import *
import itertools
from string import whitespace, ascii_lowercase, ascii_uppercase, ascii_letters, digits, hexdigits, octdigits, punctuation, printable
from util import *
"""
echo "=== sample ===" ; py day4.py < sample.in
echo "=== real ===" ; py day4.py < day4.in
echo "=== sample ===" ; py day4.py < sample.in ; echo "=== real ===" ; py day4.py < day4.in
"""
A = read_input('/dev/stdin')
N = len(A)
R = N
order = map(int, A[0].split(','))
A = A[2:]
idx = 0
boards = []
marked = []
def gen_empty_mark():
return [[False] * 5 for _ in range(5)]
def is_win(idx):
mark = marked[idx]
# row
for row in mark:
if all(row):
return True
# col
for j in range(5):
if all(mark[i][j] for i in range(5)):
return True
return False
while True:
if idx >= len(A):
break
board = []
for i in range(5):
line = A[idx]
board.append(map(maybe_int, line.split()))
idx += 1
idx += 1
boards.append(board)
marked.append(gen_empty_mark())
# print(boards)
N = len(boards)
def sum_unmarked(idx):
res = 0
for i in range(5):
for j in range(5):
if not marked[idx][i][j]:
res += boards[idx][i][j]
return res
already_won = [False] * N
for num in order:
for idx in range(N):
for i in range(5):
for j in range(5):
if boards[idx][i][j] == num:
marked[idx][i][j] = True
for idx in range(N):
if is_win(idx) and not already_won[idx]:
already_won[idx] = True
print(num * sum_unmarked(idx))
| 21.156627
| 132
| 0.556948
|
4a0b1278861dad6e88e015739aa3a5b376e8d695
| 1,580
|
py
|
Python
|
django_fsm_log/managers.py
|
lananelson/django-fsm-log
|
3e82a2461d452691c3fb8b539fb8b950bafdd4f1
|
[
"MIT"
] | null | null | null |
django_fsm_log/managers.py
|
lananelson/django-fsm-log
|
3e82a2461d452691c3fb8b539fb8b950bafdd4f1
|
[
"MIT"
] | null | null | null |
django_fsm_log/managers.py
|
lananelson/django-fsm-log
|
3e82a2461d452691c3fb8b539fb8b950bafdd4f1
|
[
"MIT"
] | null | null | null |
import django
from django.db import models
from django.db.models.query import QuerySet
from django.contrib.contenttypes.models import ContentType
from django_fsm_log.backends import cache
class StateLogQuerySet(QuerySet):
def _get_content_type(self, obj):
return ContentType.objects.get_for_model(obj)
def for_(self, obj):
return self.filter(
content_type=self._get_content_type(obj),
object_id=obj.pk
)
class StateLogManager(models.Manager):
def get_queryset(self):
return StateLogQuerySet(self.model)
if django.VERSION < (1, 7):
get_query_set = get_queryset
def __getattr__(self, attr, *args):
# see https://code.djangoproject.com/ticket/15062 for details
if attr.startswith("_"):
raise AttributeError
return getattr(self.get_queryset(), attr, *args)
class PendingStateLogManager(models.Manager):
def _get_cache_key_for_object(self, obj):
return 'StateLog:{}:{}'.format(
obj.__class__.__name__,
obj.pk
)
def create(self, *args, **kwargs):
log = self.model(**kwargs)
key = self._get_cache_key_for_object(kwargs['content_object'])
cache.set(key, log)
return log
def commit_for_object(self, obj):
key = self._get_cache_key_for_object(obj)
log = self.get_for_object(obj)
log.save()
cache.delete(key)
return log
def get_for_object(self, obj):
key = self._get_cache_key_for_object(obj)
return cache.get(key)
| 28.214286
| 70
| 0.657595
|
4a0b143a95c1bda1d634802426daeb2adccb6ac9
| 514
|
py
|
Python
|
tests/unit_tests/cx_core/color_helper_test.py
|
xaviml/z2m_ikea_controller
|
e612af5a913e8b4784dcaa23ea5319115427d083
|
[
"MIT"
] | 19
|
2019-11-21T19:51:40.000Z
|
2020-01-14T09:24:33.000Z
|
tests/unit_tests/cx_core/color_helper_test.py
|
xaviml/z2m_ikea_controller
|
e612af5a913e8b4784dcaa23ea5319115427d083
|
[
"MIT"
] | 11
|
2019-11-20T16:43:35.000Z
|
2020-01-17T16:23:06.000Z
|
tests/unit_tests/cx_core/color_helper_test.py
|
xaviml/z2m_ikea_controller
|
e612af5a913e8b4784dcaa23ea5319115427d083
|
[
"MIT"
] | 5
|
2019-12-20T21:31:07.000Z
|
2020-01-06T18:49:52.000Z
|
import pytest
from cx_core.color_helper import Colors, get_color_wheel
from tests.test_utils import wrap_execution
@pytest.mark.parametrize(
"colors, error_expected",
[
("default_color_wheel", False),
("non_existing", True),
([(0.2, 0.3), (0.4, 0.5)], False),
(0, True),
],
)
def test_get_color_wheel(colors: Colors, error_expected: bool) -> None:
with wrap_execution(error_expected=error_expected, exception=ValueError):
colors = get_color_wheel(colors)
| 27.052632
| 77
| 0.678988
|
4a0b14f2216dceef82f732cde6938e736d7064d7
| 409
|
py
|
Python
|
mypackage/janken.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
mypackage/janken.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | 1
|
2021-11-13T08:03:04.000Z
|
2021-11-13T08:03:04.000Z
|
mypackage/janken.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
import random
HANDS = ['グー', 'チョキ', 'パー']
class Janken():
def __init__(self):
self.hand = random.choice(HANDS)
def __eq__(self, other): # 「__eq__()」を定義するとselfとotherを「==」で比較できる
return self.hand == other.hand
def __gt__(self, other): # 「__gt__()」を定義するとselfとotherを「>」で比較できる
diff = HANDS.index(self.hand) - HANDS.index(other.hand)
return diff == -1 or diff == 2
| 25.5625
| 69
| 0.613692
|
4a0b151eb902d3ce87cea9650c5518dc7f63cace
| 402
|
py
|
Python
|
tests/data/expected/openapi/default_template/body_and_parameters/models.py
|
lakkay/fastapi-code-generator
|
9f562b2d2ce18c4d037e36b7d3f229793f1eaed5
|
[
"MIT"
] | null | null | null |
tests/data/expected/openapi/default_template/body_and_parameters/models.py
|
lakkay/fastapi-code-generator
|
9f562b2d2ce18c4d037e36b7d3f229793f1eaed5
|
[
"MIT"
] | null | null | null |
tests/data/expected/openapi/default_template/body_and_parameters/models.py
|
lakkay/fastapi-code-generator
|
9f562b2d2ce18c4d037e36b7d3f229793f1eaed5
|
[
"MIT"
] | null | null | null |
# generated by datamodel-codegen:
# filename: body_and_parameters.yaml
# timestamp: 2020-06-19T00:00:00+00:00
from typing import Optional
from pydantic import BaseModel
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
class Error(BaseModel):
code: int
message: str
class PetForm(BaseModel):
name: Optional[str] = None
age: Optional[int] = None
| 16.75
| 40
| 0.691542
|
4a0b15ad702664b08ddf8fd8d24d6504cd0c102a
| 531
|
py
|
Python
|
styczen/generator.py
|
angelm1974/Bydgoszcz_2_spotkanie
|
377eab44cd7d39aa23c83762c2dc2514d8a1edd8
|
[
"MIT"
] | null | null | null |
styczen/generator.py
|
angelm1974/Bydgoszcz_2_spotkanie
|
377eab44cd7d39aa23c83762c2dc2514d8a1edd8
|
[
"MIT"
] | null | null | null |
styczen/generator.py
|
angelm1974/Bydgoszcz_2_spotkanie
|
377eab44cd7d39aa23c83762c2dc2514d8a1edd8
|
[
"MIT"
] | null | null | null |
class Fib:
def __init__(self,nn):
print("inicjujemy")
self.__n=nn
self.__i=0
self.__p1=self.__p2=1
def __iter__(self):
print('iter')
return self
def __next__(self):
print('next')
self.__i+=1
if self.__i>self.__n:
raise StopIteration
if self.__i in[1,2]:
return 1
ret = self.__p1 + self.__p2
self.__p1,self.__p2 = self.__p2,ret
return ret
for i in Fib(10):
print(i)
| 22.125
| 43
| 0.500942
|
4a0b16e3462e87c204b6e5acb0c599d3bda74043
| 12,802
|
py
|
Python
|
pytorch_probgraph/hm.py
|
kpoeppel/pytorch_probgraph
|
b78595ab03bbe92595ad2f6b35f5dd8bf84d6da0
|
[
"BSD-3-Clause"
] | 47
|
2020-08-10T02:04:26.000Z
|
2022-03-23T22:20:56.000Z
|
pytorch_probgraph/hm.py
|
kpoeppel/pytorch_probgraph
|
b78595ab03bbe92595ad2f6b35f5dd8bf84d6da0
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch_probgraph/hm.py
|
kpoeppel/pytorch_probgraph
|
b78595ab03bbe92595ad2f6b35f5dd8bf84d6da0
|
[
"BSD-3-Clause"
] | 4
|
2020-08-10T15:32:06.000Z
|
2021-12-29T15:04:20.000Z
|
'''
A library implementing a generic sigmoid belief network aka Helmholtz Machine.
'''
from typing import List, Tuple, Union
from .interaction import Interaction
from .unitlayer import UnitLayer
from itertools import chain
import torch
import numpy as np
from .utils import ListModule
def logsumexp(x, dim=0, keepdim=False):
maxval = torch.max(x, dim=dim, keepdim=True).values
return torch.log(torch.sum(torch.exp(x - maxval), dim=dim, keepdim=keepdim))\
+ torch.sum(maxval, dim, keepdim=keepdim)
class HelmholtzMachine(torch.nn.Module):
'''
A multilayer sigmoid belief network with (reweighted) wake-sleep learning.
Using asymmetric conditional probabilities (interaction weights).
From:
[1] G.Hinton et al. "The wake-sleep algorithm for unsupervised
neural networks"
[2] Peter Dayan: Helmholtz Machines and Wake-Sleep Learning
http://www.gatsby.ucl.ac.uk/~dayan/papers/d2000a.pdf
Note that this implementation uses tied biases for generative and
reconstructed probabilities.
[3] https://arxiv.org/pdf/1406.2751.pdf
[4] https://github.com/jbornschein/reweighted-ws
'''
def __init__(self,
layers: List[UnitLayer],
interactionsUp: List[Interaction],
interactionsDown: List[Interaction],
optimizer: torch.optim.Optimizer):
'''
:param layers: UnitLayers of Random Units
:param interactionsUp: List of Interactions upwards
:param interactionsDown: List of Interactions downwards
:param optimizer: Optimizer for training
'''
super().__init__()
if len(interactionsUp) != len(interactionsDown) or \
len(layers)-1 != len(interactionsUp):
raise ValueError('Non fitting layers')
self.layers = ListModule(*layers)
self.intsUp = ListModule(*interactionsUp)
self.intsDown = ListModule(*interactionsDown)
self.optim = optimizer
def sampleQ(self,
data: torch.Tensor
) -> Tuple[List[torch.Tensor],
List[torch.Tensor],
List[torch.Tensor],
torch.Tensor]:
'''
:param data: Data to sample Q (reconstruction model) from.
:return: Samples/Means/Logprobs from reconstruction distribution (for all layers) + Total LogProb
'''
samplesUp = [data]
meansUp = [None]
logprobsUp = [0.]
logprobsUp_total = 0.
nlayers = len(self.layers)
for i in range(nlayers-1):
intterm = self.intsUp[i].gradOutput(self.layers[i].transform(samplesUp[i]))
mean = self.layers[i+1].mean_cond(interaction=intterm)
samp = self.layers[i+1].sample_cond(interaction=intterm)
logprob = self.layers[i+1].logprob_cond(samp, intterm)
samplesUp.append(samp)
meansUp.append(mean)
logprobsUp.append(logprob)
logprobsUp_total += logprob
return samplesUp, meansUp, logprobsUp, logprobsUp_total
def logprobP(self,
total_samples: List[torch.Tensor]
) -> Tuple[List[torch.Tensor], torch.Tensor]:
'''
:param total_samples: Samples from all layers
:return: logprob P of generative model of these samples
'''
logprob = [self.layers[-1].logprob_cond(total_samples[-1], interaction=0.)]
logprob_total = logprob[0]
for n in reversed(range(len(self.layers)-1)):
interaction = self.intsDown[n].gradOutput(self.layers[n+1].transform(total_samples[n+1]))
logprobn = self.layers[n].logprob_cond(total_samples[n], interaction=interaction)
logprob = [logprobn] + logprob
logprob_total += logprobn
return logprob, logprob_total
def wakePhaseReweighted(self,
data: torch.Tensor,
ksamples: int=1,
kmax_parallel: int=1000,
train: bool=True,
wakePhaseQ: bool=True
) -> torch.Tensor:
'''
According to https://github.com/jbornschein/reweighted-ws/blob/master/learning/models/rws.py
So k samples are drawn with each data point in batch.
:param data: training batch
:param ksamples: number of samples for reweighting
:param kmax_parallel: max number of samples to run in parallel (for lower memory footprint)
:param train: actually modifiy weights / apply gradients (as this function also returns likelihood)
:param wakePhaseQ: use also wake phase for learning reconstruction model Q
:return: log likelihood of data in the generative model
'''
nthrun = 0
left = ksamples
logprobP_total = None
logprobQ_total = None
while left > 0:
take = min(kmax_parallel, left)
left -= take
shape = list(data.shape)
shape_exp = [take] + shape
shape[0] *= take # data is expanded to ksamples*batchsize in dim 0
# print("Nth Run {}, Take: {}".format(nthrun, take))
nthrun+=1
# sample upward pass q(h | x)
dataExp = data.expand(shape_exp).transpose(0,1).reshape(shape)
samplesUp, meansUp, logprobQ, logprobQ_total_take = self.sampleQ(dataExp)
#
logprobP, logprobP_total_take = self.logprobP(samplesUp)
logprobP_total_take = logprobP_total_take.reshape((-1, take))
logprobQ_total_take = logprobQ_total_take.reshape((-1, take))
if logprobP_total is None:
logprobP_total = logprobP_total_take.detach()
logprobQ_total = logprobQ_total_take.detach()
else:
logprobP_total = torch.cat([logprobP_total, logprobP_total_take.detach()], dim=1)
logprobQ_total = torch.cat([logprobQ_total, logprobQ_total_take.detach()], dim=1)
# loglikelihood
# calculate sampling weights
if train:
nlayers = len(self.layers)-1
logPQ = (logprobP_total_take - logprobQ_total_take - np.log(take))
wnorm = logsumexp(logPQ, dim=1)
logw = logPQ - wnorm.reshape(-1, 1)
w = torch.exp(logw).flatten().reshape(-1,1)
# downward pass, taking same batch size
samplesDown = [None]*nlayers + [self.layers[nlayers].sample_cond(N=data.shape[0])]
meansDown = [None]*nlayers + [self.layers[nlayers].mean_cond(N=data.shape[0])]
for i in reversed(range(nlayers)):
intterm = self.intsDown[i].gradOutput(self.layers[i].transform(samplesUp[i+1]))
mean = self.layers[i].mean_cond(interaction=intterm)
samp = self.layers[i].sample_cond(interaction=intterm)
samplesDown[i] = samp
meansDown[i] = mean
# add stochastic batch gradients, ksamples needed because of internal normalziation
for i in range(len(self.layers)-1):
self.layers[i].backward(samplesUp[i] - meansDown[i], factor=-w.view(-1, *([1]*(len(meansDown[i].shape)-1)))*take)
for i in range(len(self.layers)-1):
self.intsDown[i].backward(self.layers[i+1].transform(samplesUp[i+1]),
self.layers[i].transform(samplesUp[i]),
factor=-w*take)
self.intsDown[i].backward(self.layers[i+1].transform(samplesUp[i+1]),
self.layers[i].transform(meansDown[i]),
factor=w*take)
logPX = logsumexp(logprobP_total - logprobQ_total, dim=1) - np.log(ksamples)
return logPX
def sleepPhase(self,
N: int=1,
train: bool=False
) -> torch.Tensor:
'''
Learning Q in the sleep phase, generating samples.
:param N: number of samples to generate
:param train: actually train weights
:return: (samples, means) N samples and their means generating downwards
'''
nlayers = len(self.layers)-1
samplesDown = [None]*nlayers + [self.layers[nlayers].sample_cond(N=N)]
meansDown = [None]*nlayers + [self.layers[nlayers].mean_cond(N=N)]
# downward pass
for i in reversed(range(nlayers)):
intterm = self.intsDown[i].gradOutput(self.layers[i+1].transform(samplesDown[i+1]))
mean = self.layers[i].mean_cond(interaction=intterm)
samp = self.layers[i].sample_cond(interaction=intterm)
samplesDown[i] = samp
meansDown[i] = mean
# upward pass
samplesUp = [None]*(nlayers+1)
meansUp = [None]*(nlayers+1)
for i in range(nlayers):
intterm = self.intsUp[i].gradOutput(self.layers[i].transform(samplesDown[i]))
mean = self.layers[i+1].mean_cond(interaction=intterm)
samp = self.layers[i+1].sample_cond(interaction=intterm)
samplesUp[i+1] = samp
meansUp[i+1] = mean
# add stochastic batch gradients
if train:
for i in range(1, len(self.layers)):
self.layers[i].backward(samplesDown[i] - meansUp[i], factor=-1)
for i in range(len(self.layers)-1):
self.intsUp[i].backward(self.layers[i].transform(samplesDown[i]),
self.layers[i+1].transform(samplesDown[i+1]),
factor=-1)
self.intsUp[i].backward(self.layers[i].transform(samplesDown[i]),
self.layers[i+1].transform(meansUp[i+1]),
factor=1)
# self.interaction.backward()
return samplesDown, meansDown
def trainReweightedWS(self,
data: torch.Tensor,
ksamples: int = 1,
sleepPhaseQ: bool = True,
wakePhaseQ: bool = False
) -> torch.Tensor:
'''
Reweighted Wake-Sleep following https://arxiv.org/pdf/1406.2751.pdf
:param data: training batch
:param ksamples: number of samples for reweighting
:param sleepPhaseQ: use sleep phase for learning Q
:param wakePhaseQ: use wake phase for learning Q
:return: (estimated) loglikelihood of data
'''
self.zero_grad()
loglik = self.wakePhaseReweighted(data, ksamples=ksamples, train=True, wakePhaseQ=wakePhaseQ)
if sleepPhaseQ:
self.sleepPhase(N=data.shape[0], train=True)
self.optim.step()
return loglik
def trainWS(self,
data: torch.Tensor
) -> torch.Tensor:
'''
Traditional wake sleep-algorithm, using only one sample (no reweighting)
and no wake phase Q learning.
:param data: training data batch
'''
return self.trainReweightedWS(data, ksamples=1, sleepPhaseQ=True, wakePhaseQ=False)
def loglikelihood(self,
data: torch.Tensor,
ksamples: int=1,
kmax_parallel: int=1000
) -> torch.Tensor:
'''
Estimate log likelihood as a byproduct of reweighting.
:param data: data batch
:param ksamples: number of reweighting samples
:param kmax_parallel: maximal number of parallel samples (memory footprint)
:return: loglikelihood of each batch sample
'''
return self.wakePhaseReweighted(data, ksamples=ksamples, kmax_parallel=kmax_parallel, train=False)
def sampleAll(self,
N: int=1
)-> Tuple[List[torch.Tensor],
List[torch.Tensor],
List[torch.Tensor],
torch.Tensor]:
'''
Sample all layers from generative P, (list of samples).
:param N: number of samples
:return: batch of N generated data samples and their means for each layer
'''
return self.sleepPhase(N=N, train=False)
def sample(self,
N: int = 1
) -> torch.Tensor:
'''
Sample only visible layer from generative P.
:param N: number of samples
:return: batch of N generated data samples
'''
return self.sleepPhase(N=N, train=False)[0][0]
| 42.959732
| 133
| 0.574754
|
4a0b174aabf43ee9910d2ad4c9d7508a6bbd62e8
| 3,847
|
py
|
Python
|
maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py
|
CityU-AIM-Group/NLTE
|
a111390bcc38bd0c759a3a9d971a7d9defce88fb
|
[
"MIT"
] | 1
|
2022-03-30T14:04:55.000Z
|
2022-03-30T14:04:55.000Z
|
maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py
|
CityU-AIM-Group/NLTE
|
a111390bcc38bd0c759a3a9d971a7d9defce88fb
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/modeling/roi_heads/box_head/box_head.py
|
CityU-AIM-Group/NLTE
|
a111390bcc38bd0c759a3a9d971a7d9defce88fb
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
import torch.nn.functional as F
from .roi_box_feature_extractors import make_roi_box_feature_extractor
from .roi_box_predictors import make_roi_box_predictor
from .inference import make_roi_box_post_processor
from .loss import make_roi_box_loss_evaluator
from maskrcnn_benchmark.modeling.utils import cat
class ROIBoxHead(torch.nn.Module):
"""
Generic Box Head class.
"""
def __init__(self, cfg):
super(ROIBoxHead, self).__init__()
self.feature_extractor = make_roi_box_feature_extractor(cfg)
self.predictor = make_roi_box_predictor(cfg)
self.post_processor = make_roi_box_post_processor(cfg)
self.loss_evaluator = make_roi_box_loss_evaluator(cfg)
self.cls_loss_weight = cfg.MODEL.CLS_LOSS_WEIGHT
def forward(self, features, proposals, targets=None):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the subsampled proposals
are returned. During testing, the predicted boxlists are returned
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
if self.training:
# Faster R-CNN subsamples during training the proposals with a fixed
# positive / negative ratio
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
# extract features that will be fed to the final classifier. The
# feature_extractor generally corresponds to the pooler + heads
x = self.feature_extractor(features, proposals)
if self.training:
domain_masks = cat([proposal.get_field("domain_labels") for proposal in proposals], dim=0)
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
else:
domain_masks = None
labels = None
# final classifier that converts the features into predictions
class_logits, box_regression = self.predictor(x, domain_masks, labels)
if not self.training:
result = self.post_processor((class_logits, box_regression), proposals)
return x, result, {}, None, None
loss_classifier, loss_box_reg, da_ins_labels, labels = self.loss_evaluator(
[class_logits], [box_regression]
)
# if self.training:
# with torch.no_grad():
# da_proposals = self.loss_evaluator.subsample_for_da(proposals, targets)
# da_ins_feas = self.feature_extractor(features, da_proposals)
# class_logits, box_regression = self.predictor(da_ins_feas)
# _, _, da_ins_labels, labels = self.loss_evaluator(
# [class_logits], [box_regression]
# )
others = {}
others["labels"] = labels
others["class_logits"] = class_logits
others["domain_labels"] = da_ins_labels
others["box_regression"] = box_regression
return (
x,
proposals,
dict(loss_classifier=self.cls_loss_weight * loss_classifier, loss_box_reg=loss_box_reg),
da_ins_labels,
others
)
def build_roi_box_head(cfg):
"""
Constructs a new box head.
By default, uses ROIBoxHead, but if it turns out not to be enough, just register a new class
and make it a parameter in the config
"""
return ROIBoxHead(cfg)
| 37.715686
| 102
| 0.658695
|
4a0b17d72595acaa580c50726add31772ff59249
| 856
|
py
|
Python
|
Chapter06/programs/prog14.py
|
gits00/raspberry-pi-computer-vision-programming
|
dfd5588c5d3e410945f862427c0f987536b04d9f
|
[
"MIT"
] | 17
|
2020-08-08T20:47:29.000Z
|
2022-03-12T03:08:21.000Z
|
Chapter06/programs/prog14.py
|
gits00/raspberry-pi-computer-vision-programming
|
dfd5588c5d3e410945f862427c0f987536b04d9f
|
[
"MIT"
] | 1
|
2020-07-27T09:57:19.000Z
|
2020-08-18T10:57:31.000Z
|
Chapter06/programs/prog14.py
|
gits00/raspberry-pi-computer-vision-programming
|
dfd5588c5d3e410945f862427c0f987536b04d9f
|
[
"MIT"
] | 15
|
2020-06-30T01:52:06.000Z
|
2022-02-08T08:28:48.000Z
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
img = cv2.imread('/home/pi/book/dataset/gray21.512.tiff', 1)
th = 127
max_val = 255
ret, o1 = cv2.threshold(img, th, max_val,
cv2.THRESH_BINARY)
print(o1)
ret, o2 = cv2.threshold(img, th, max_val,
cv2.THRESH_BINARY_INV)
ret, o3 = cv2.threshold(img, th, max_val,
cv2.THRESH_TOZERO)
ret, o4 = cv2.threshold(img, th, max_val,
cv2.THRESH_TOZERO_INV)
ret, o5 = cv2.threshold(img, th, max_val,
cv2.THRESH_TRUNC)
titles = ['Input Image', 'BINARY', 'BINARY_INV',
'TOZERO', 'TOZERO_INV', 'TRUNC']
output = [img, o1, o2, o3, o4, o5]
for i in range(6):
plt.subplot(2, 3, i+1)
plt.imshow(output[i], cmap='gray')
plt.title(titles[i])
plt.axis('off')
plt.show()
| 31.703704
| 60
| 0.586449
|
4a0b18886525a3b356f1c1ff4e29d124495dd812
| 12,319
|
py
|
Python
|
examples/deprecated/bench_conv2d_sigmoid.py
|
wqruan/tf-encrypted
|
50ee4ae3ba76b7c1f70a90e18f875191adea0a07
|
[
"Apache-2.0"
] | 825
|
2019-04-18T09:21:32.000Z
|
2022-03-30T05:55:26.000Z
|
examples/deprecated/bench_conv2d_sigmoid.py
|
wqruan/tf-encrypted
|
50ee4ae3ba76b7c1f70a90e18f875191adea0a07
|
[
"Apache-2.0"
] | 354
|
2019-04-18T08:42:40.000Z
|
2022-03-31T18:06:31.000Z
|
examples/deprecated/bench_conv2d_sigmoid.py
|
wqruan/tf-encrypted
|
50ee4ae3ba76b7c1f70a90e18f875191adea0a07
|
[
"Apache-2.0"
] | 161
|
2019-05-02T16:43:31.000Z
|
2022-03-31T01:35:03.000Z
|
import sys
import tensorflow as tf
import tf_encrypted as tfe
from tf_encrypted.layers import Conv2D
from tf_encrypted.layers import Dense
from tf_encrypted.layers import Reshape
from tf_encrypted.layers import Sigmoid
config = tfe.LocalConfig(
["server0", "server1", "crypto-producer", "weights-provider", "prediction-client"]
)
# config = tfe.RemoteConfig([
# ('server0', 'localhost:4440'),
# ('server1', 'localhost:4441'),
# ('crypto-producer', 'localhost:4442'),
# ('weights-provider', 'localhost:4443'),
# ('prediction-client', 'localhost:4444')
# ])
if len(sys.argv) > 1:
if isinstance(config, tfe.LocalConfig):
raise Exception(
"You can launch a configured server only with a remote configuration"
)
#
# assume we're running as a server
#
player_name = str(sys.argv[1])
server = config.server(player_name)
server.start()
server.join()
else:
#
# assume we're running as master
#
input_shape = [1, 3, 192, 192]
conv11_fshape = [3, 3, 3, 64]
conv12_fshape = [3, 3, 64, 64]
pool1_shape = [1, 1, 64, 64]
conv21_fshape = [3, 3, 64, 128]
conv22_fshape = [3, 3, 128, 128]
pool2_shape = [1, 1, 128, 128]
conv31_fshape = [3, 3, 128, 256]
conv32_fshape = [3, 3, 256, 256]
conv33_fshape = [3, 3, 256, 256]
pool3_shape = [1, 1, 256, 256]
conv41_fshape = [3, 3, 256, 512]
conv42_fshape = [3, 3, 512, 512]
conv43_fshape = [3, 3, 512, 512]
pool4_shape = [1, 1, 512, 512]
conv51_fshape = [3, 3, 512, 512]
conv52_fshape = [3, 3, 512, 512]
conv53_fshape = [3, 3, 512, 512]
pool5_shape = [1, 1, 512, 512]
def provide_input_conv11weights() -> tf.Tensor:
w = tf.random_normal(shape=conv11_fshape, dtype=tf.float32)
tf.print(w, [w], message="w11:")
return w
def provide_input_conv12weights() -> tf.Tensor:
w = tf.random_normal(shape=conv12_fshape, dtype=tf.float32)
tf.print(w, [w], message="w12:")
return w
def provide_input_pool1weights() -> tf.Tensor:
w = tf.random_normal(shape=pool1_shape, dtype=tf.float32)
tf.print(w, [w], message="p1:")
return w
def provide_input_conv21weights() -> tf.Tensor:
w = tf.random_normal(shape=conv21_fshape, dtype=tf.float32)
tf.print(w, [w], message="w21:")
return w
def provide_input_conv22weights() -> tf.Tensor:
w = tf.random_normal(shape=conv22_fshape, dtype=tf.float32)
tf.print(w, [w], message="w22:")
return w
def provide_input_pool2weights() -> tf.Tensor:
w = tf.random_normal(shape=pool2_shape, dtype=tf.float32)
tf.print(w, [w], message="p2:")
return w
def provide_input_conv31weights() -> tf.Tensor:
w = tf.random_normal(shape=conv31_fshape, dtype=tf.float32)
tf.print(w, [w], message="w31:")
return w
def provide_input_conv32weights() -> tf.Tensor:
w = tf.random_normal(shape=conv32_fshape, dtype=tf.float32)
tf.print(w, [w], message="w32:")
return w
def provide_input_conv33weights() -> tf.Tensor:
w = tf.random_normal(shape=conv33_fshape, dtype=tf.float32)
tf.print(w, [w], message="w33:")
return w
def provide_input_pool3weights() -> tf.Tensor:
w = tf.random_normal(shape=pool3_shape, dtype=tf.float32)
tf.print(w, [w], message="p3:")
return w
def provide_input_conv41weights() -> tf.Tensor:
w = tf.random_normal(shape=conv41_fshape, dtype=tf.float32)
tf.print(w, [w], message="w41:")
return w
def provide_input_conv42weights() -> tf.Tensor:
w = tf.random_normal(shape=conv42_fshape, dtype=tf.float32)
tf.print(w, [w], message="w42:")
return w
def provide_input_conv43weights() -> tf.Tensor:
w = tf.random_normal(shape=conv43_fshape, dtype=tf.float32)
tf.print(w, [w], message="w43:")
return w
def provide_input_pool4weights() -> tf.Tensor:
w = tf.random_normal(shape=pool4_shape, dtype=tf.float32)
tf.print(w, [w], message="p4:")
return w
def provide_input_conv51weights() -> tf.Tensor:
w = tf.random_normal(shape=conv51_fshape, dtype=tf.float32)
tf.print(w, [w], message="w51:")
return w
def provide_input_conv52weights() -> tf.Tensor:
w = tf.random_normal(shape=conv52_fshape, dtype=tf.float32)
tf.print(w, [w], message="w52:")
return w
def provide_input_conv53weights() -> tf.Tensor:
w = tf.random_normal(shape=conv53_fshape, dtype=tf.float32)
tf.print(w, [w], message="w53:")
return w
def provide_input_pool5weights() -> tf.Tensor:
w = tf.random_normal(shape=pool5_shape, dtype=tf.float32)
tf.print(w, [w], message="p5:")
return w
def provide_input_prediction() -> tf.Tensor:
x = tf.random_normal(shape=input_shape, dtype=tf.float32)
tf.print(x, [x], message="x:")
return x
def receive_output(tensor: tf.Tensor) -> tf.Operation:
tf.print(tensor, [tensor, tf.shape(tensor)], message="output:")
return tensor
with tfe.protocol.Pond(
*config.get_players("server0, server1, crypto-producer")
) as prot:
print("Define the distributed graph")
print("5 blocks of convolutions and a 2-layer FC")
# load input for prediction
x = prot.define_private_input("prediction-client", provide_input_prediction)
print("Define Block 1")
# Block 1
conv11 = Conv2D(input_shape, conv11_fshape, 1, "SAME")
initial_w_conv11 = prot.define_private_input(
"weights-provider", provide_input_conv11weights
)
conv11.initialize(initial_w_conv11)
x = conv11.forward(x)
x = Sigmoid(conv11.get_output_shape()).forward(x)
conv12 = Conv2D(conv11.get_output_shape(), conv12_fshape, 1, "SAME")
initial_w_conv12 = prot.define_private_input(
"weights-provider", provide_input_conv12weights
)
conv12.initialize(initial_w_conv12)
x = conv12.forward(x)
x = Sigmoid(conv12.get_output_shape()).forward(x)
fake_pool1 = Conv2D(conv12.get_output_shape(), pool1_shape, 2, "SAME")
initial_w_pool1 = prot.define_private_input(
"weights-provider", provide_input_pool1weights
)
fake_pool1.initialize(initial_w_pool1)
x = fake_pool1.forward(x)
print("Define Block 2")
# Block 2
conv21 = Conv2D(fake_pool1.get_output_shape(), conv21_fshape, 1, "SAME")
initial_w_conv21 = prot.define_private_input(
"weights-provider", provide_input_conv21weights
)
conv21.initialize(initial_w_conv21)
x = conv21.forward(x)
x = Sigmoid(conv21.get_output_shape()).forward(x)
conv22 = Conv2D(conv21.get_output_shape(), conv22_fshape, 1, "SAME")
initial_w_conv22 = prot.define_private_input(
"weights-provider", provide_input_conv22weights
)
conv22.initialize(initial_w_conv22)
x = conv22.forward(x)
x = Sigmoid(conv22.get_output_shape()).forward(x)
fake_pool2 = Conv2D(conv22.get_output_shape(), pool2_shape, 2, "SAME")
initial_w_pool2 = prot.define_private_input(
"weights-provider", provide_input_pool2weights
)
fake_pool2.initialize(initial_w_pool2)
x = fake_pool2.forward(x)
print("Define Block 3")
# Block 3
conv31 = Conv2D(fake_pool2.get_output_shape(), conv31_fshape, 1, "SAME")
initial_w_conv31 = prot.define_private_input(
"weights-provider", provide_input_conv31weights
)
conv31.initialize(initial_w_conv31)
x = conv31.forward(x)
x = Sigmoid(conv31.get_output_shape()).forward(x)
conv32 = Conv2D(conv31.get_output_shape(), conv32_fshape, 1, "SAME")
initial_w_conv32 = prot.define_private_input(
"weights-provider", provide_input_conv32weights
)
conv32.initialize(initial_w_conv32)
x = conv32.forward(x)
x = Sigmoid(conv32.get_output_shape()).forward(x)
conv33 = Conv2D(conv32.get_output_shape(), conv33_fshape, 1, "SAME")
initial_w_conv33 = prot.define_private_input(
"weights-provider", provide_input_conv33weights
)
conv33.initialize(initial_w_conv33)
x = conv33.forward(x)
x = Sigmoid(conv33.get_output_shape()).forward(x)
fake_pool3 = Conv2D(conv33.get_output_shape(), pool3_shape, 2, "SAME")
initial_w_pool3 = prot.define_private_input(
"weights-provider", provide_input_pool3weights
)
fake_pool3.initialize(initial_w_pool3)
x = fake_pool3.forward(x)
print("Define Block 4")
# Block 4
conv41 = Conv2D(fake_pool3.get_output_shape(), conv41_fshape, 1, "SAME")
initial_w_conv41 = prot.define_private_input(
"weights-provider", provide_input_conv41weights
)
conv41.initialize(initial_w_conv41)
x = conv41.forward(x)
x = Sigmoid(conv41.get_output_shape()).forward(x)
conv42 = Conv2D(conv41.get_output_shape(), conv42_fshape, 1, "SAME")
initial_w_conv42 = prot.define_private_input(
"weights-provider", provide_input_conv42weights
)
conv42.initialize(initial_w_conv42)
x = conv42.forward(x)
x = Sigmoid(conv42.get_output_shape()).forward(x)
conv43 = Conv2D(conv42.get_output_shape(), conv43_fshape, 1, "SAME")
initial_w_conv43 = prot.define_private_input(
"weights-provider", provide_input_conv43weights
)
conv43.initialize(initial_w_conv43)
x = conv43.forward(x)
x = Sigmoid(conv43.get_output_shape()).forward(x)
fake_pool4 = Conv2D(conv43.get_output_shape(), pool4_shape, 2, "SAME")
initial_w_pool4 = prot.define_private_input(
"weights-provider", provide_input_pool4weights
)
fake_pool4.initialize(initial_w_pool4)
x = fake_pool4.forward(x)
print("Define Block 5")
# Block 5
conv51 = Conv2D(fake_pool4.get_output_shape(), conv51_fshape, 1, "SAME")
initial_w_conv51 = prot.define_private_input(
"weights-provider", provide_input_conv51weights
)
conv51.initialize(initial_w_conv51)
x = conv51.forward(x)
x = Sigmoid(conv51.get_output_shape()).forward(x)
conv52 = Conv2D(conv51.get_output_shape(), conv52_fshape, 1, "SAME")
initial_w_conv52 = prot.define_private_input(
"weights-provider", provide_input_conv52weights
)
conv52.initialize(initial_w_conv52)
x = conv52.forward(x)
x = Sigmoid(conv52.get_output_shape()).forward(x)
conv53 = Conv2D(conv52.get_output_shape(), conv53_fshape, 1, "SAME")
initial_w_conv53 = prot.define_private_input(
"weights-provider", provide_input_conv53weights
)
conv53.initialize(initial_w_conv53)
x = conv53.forward(x)
x = Sigmoid(conv53.get_output_shape()).forward(x)
fake_pool5 = Conv2D(conv53.get_output_shape(), pool5_shape, 2, "SAME")
initial_w_pool5 = prot.define_private_input(
"weights-provider", provide_input_pool5weights
)
fake_pool5.initialize(initial_w_pool5)
x = fake_pool5.forward(x)
print("Define Reshape")
reshape1 = Reshape(fake_pool5.get_output_shape(), [1, -1])
x = reshape1.forward(x)
print("Define 2-layer FC")
dense1 = Dense(reshape1.get_output_shape(), 512)
dense1.initialize()
x = dense1.forward(x)
x = Sigmoid(dense1.get_output_shape()).forward(x)
dense2 = Dense(dense1.get_output_shape(), 2)
dense2.initialize()
y = dense2.forward(x)
# send output
prediction_op = prot.define_output(y, receive_output)
with tfe.Session(config=config) as sess:
print("Initialize tensors")
sess.run(tfe.global_variables_initializer(), tag="init")
print("Predict")
sess.run(prediction_op, tag="prediction")
| 36.773134
| 86
| 0.636009
|
4a0b1bba1e34cc3bbf31779159f458c7581e474e
| 1,337
|
py
|
Python
|
pybb/middleware.py
|
magatz/pybbm
|
6d80889386df22547395ca67af8184957a6d3ada
|
[
"BSD-2-Clause"
] | null | null | null |
pybb/middleware.py
|
magatz/pybbm
|
6d80889386df22547395ca67af8184957a6d3ada
|
[
"BSD-2-Clause"
] | null | null | null |
pybb/middleware.py
|
magatz/pybbm
|
6d80889386df22547395ca67af8184957a6d3ada
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import translation
from django.db.models import ObjectDoesNotExist
from pybb import util
class PybbMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
try:
# Here we try to load profile, but can get error
# if user created during syncdb but profile model
# under south control. (Like pybb.Profile).
profile = util.get_pybb_profile(request.user)
except ObjectDoesNotExist:
# Ok, we should create new profile for this user
# and grant permissions for add posts
# It should be caused rarely, so we move import signal here
# to prevent circular import
from pybb.signals import user_saved
user_saved(request.user, created=True)
profile = util.get_pybb_profile(request.user)
if not profile.language:
profile.language = translation.get_language_from_request(request)
profile.save()
request.session['django_language'] = profile.language
translation.activate(profile.language)
request.LANGUAGE_CODE = translation.get_language()
| 41.78125
| 81
| 0.62902
|
4a0b1be994971cb9b72cb98661b27c9f9ee51214
| 3,255
|
py
|
Python
|
setup.py
|
JulesGrd/google-calendar-simple-api
|
c4033807d0f35a0ad149e311bb51d128cd19cc4a
|
[
"MIT"
] | null | null | null |
setup.py
|
JulesGrd/google-calendar-simple-api
|
c4033807d0f35a0ad149e311bb51d128cd19cc4a
|
[
"MIT"
] | null | null | null |
setup.py
|
JulesGrd/google-calendar-simple-api
|
c4033807d0f35a0ad149e311bb51d128cd19cc4a
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages, Command
from sphinx.setup_command import BuildDoc
from shutil import rmtree
import os
import sys
here = os.path.abspath(os.path.dirname(__file__))
VERSION = '1.2.0'
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine...')
os.system('twine upload dist/*')
self.status('Pushing git tags...')
os.system('git tag v{0}'.format(VERSION))
os.system('git push --tags')
sys.exit()
class Doctest(Command):
description = 'Run doctests with Sphinx'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from sphinx.application import Sphinx
sph = Sphinx('./docs/source', # source directory
'./docs/source', # directory containing conf.py
'./docs/build', # output directory
'./docs/build/doctrees', # doctree directory
'doctest') # finally, specify the doctest builder
sph.build()
with open('README.rst') as f:
long_description = ''.join(f.readlines())
setup(
name='gcsa',
version=VERSION,
keywords='google calendar simple api recurrence',
description='Simple API for Google Calendar management',
long_description=long_description,
author='Yevhen Kuzmovych',
author_email='kuzmovych.yevhen@gmail.com',
license='MIT',
url='https://github.com/kuzmoyev/google-calendar-simple-api',
zip_safe=False,
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=[
"tzlocal>=2,<3",
"google-api-python-client>=1.8",
"google-auth-httplib2>=0.0.4",
"google-auth-oauthlib>=0.4,<0.5",
"python-dateutil>=2.7",
"beautiful_date>=2.0.0",
],
tests_require=[
"pytest>=5.4",
"pytest-cov>=2.10",
"flake8>3.8.3",
"pep8-naming>=0.11.1",
"pyfakefs>=4.3.1,<5.0",
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
cmdclass={
'upload': UploadCommand,
'build_sphinx': BuildDoc,
'doctest': Doctest
}
)
| 28.060345
| 86
| 0.590476
|
4a0b1d277efd289b4ff0008a19020d4f614f1ae5
| 303
|
py
|
Python
|
data/multilingual/Latn.AME/Sans_16/pdf_to_json_test_Latn.AME_Sans_16.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.AME/Sans_16/pdf_to_json_test_Latn.AME_Sans_16.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.AME/Sans_16/pdf_to_json_test_Latn.AME_Sans_16.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.AME/Sans_16/udhr_Latn.AME_Sans_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3
| 73
| 0.811881
|
4a0b1e6788501989b359ab08e758a5f33b3fa879
| 2,667
|
py
|
Python
|
renamePackage.py
|
stuartsoft/Incubator
|
b33c237647d811d4878b51ccf4613c8541f43c7e
|
[
"Apache-2.0"
] | null | null | null |
renamePackage.py
|
stuartsoft/Incubator
|
b33c237647d811d4878b51ccf4613c8541f43c7e
|
[
"Apache-2.0"
] | null | null | null |
renamePackage.py
|
stuartsoft/Incubator
|
b33c237647d811d4878b51ccf4613c8541f43c7e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Usage renamePackage.py package
# Ex: renamePackage.py com.demo.mobile
import os, sys
import shutil
import platform
from functools import reduce
stuffToRemove = [".gradle", ".git", ".idea", "build", "app/build", ".iml", "local.properties"]
dirChar = os.sep
args = sys.argv
if (len(args) != 2):
print("please enter a new package name")
exit()
new_package = args[1]
original_package = "com.mycompany.myapp"
new_package_directory = dirChar + new_package.lower().replace('.', dirChar) + dirChar
original_package_directory = dirChar + original_package.lower().replace('.', dirChar) + dirChar
#deletes files and folders
def nuke(folders):
for f in folders:
f = f.replace('/', dirChar)#Make sure it has the correct dir separator
print('Removing ' + f)
try:
if (platform.system() == 'Windows'):
os.system('rmdir /s /q ' + f)
else:
os.system('rm -rf ' + f)
except:
None
return
def refactorPackagenameInFile(file,oldPackageName, newPackageName):
#only refactor these files
if (file.endswith(".java") or file.endswith(".kt") or file.endswith(".xml") or file.endswith(".properties") or file.endswith(".txt") or file.endswith(".gradle")):
f = open(file, 'r')
contents = f.read()
f.close()
refactored = contents.replace(oldPackageName, newPackageName)
f = open(file, 'w')
f.write(refactored)
return
def refactorAllFolders():
for root, dir, files in os.walk('app'):
for f in files:
fpath = os.path.join(root, f)
if original_package_directory in fpath:
oldPath = fpath
newPath = fpath.replace(original_package_directory,new_package_directory)
try:#attempt to make the new package directory incase it doesn't exist
os.makedirs((root+dirChar).replace(original_package_directory, new_package_directory))
except:
None
shutil.copy(oldPath, newPath)#copy the file to the new path
refactorPackagenameInFile(newPath, original_package, new_package)
else:
refactorPackagenameInFile(fpath, original_package, new_package)
for root, dir, files in os.walk('app/src'):
#only use the first iteration, we just want the immidate children of this folder
for folder in dir:
folderpath = 'app' + dirChar + 'src' + dirChar + folder + dirChar + 'java' + dirChar + 'com' + dirChar + 'mycompany'
shutil.rmtree(folderpath)
break
nuke(stuffToRemove)
refactorAllFolders()
f = open('complete.txt', 'r')
artwork = f.read()
f.close()
os.remove('complete.txt')
os.system('git init')
os.system('git add .')
os.system('git commit -q -m "Initial import from github.com/madebyatomicrobot/android-starter-project"')
#print artwork :)
print(artwork)
os.system('git log --oneline')
| 30.306818
| 163
| 0.710911
|
4a0b1eb1f82363d90a5f09305fcbc4c27aa10911
| 2,372
|
py
|
Python
|
datasette_auth_github/utils.py
|
simonw/datasette-auth-github
|
9564e25d756e328bf60e5d01f63c688ea2e44ca0
|
[
"Apache-2.0"
] | 39
|
2019-07-08T04:29:08.000Z
|
2022-02-22T22:38:58.000Z
|
datasette_auth_github/utils.py
|
simonw/datasette-auth-github
|
9564e25d756e328bf60e5d01f63c688ea2e44ca0
|
[
"Apache-2.0"
] | 66
|
2019-07-03T16:18:14.000Z
|
2021-03-12T17:47:53.000Z
|
datasette_auth_github/utils.py
|
simonw/datasette-auth-github
|
9564e25d756e328bf60e5d01f63c688ea2e44ca0
|
[
"Apache-2.0"
] | 6
|
2019-08-27T22:44:37.000Z
|
2020-11-03T22:37:21.000Z
|
import httpx
import time
async def load_orgs_and_teams(config, profile, access_token):
store_timestamp = False
extras = {}
if config.get("load_orgs"):
load_orgs = config["load_orgs"]
gh_orgs = []
for org in force_list(load_orgs):
url = "https://api.github.com/orgs/{}/memberships/{}".format(
org, profile["login"]
)
async with httpx.AsyncClient() as client:
response = await client.get(
url, headers={"Authorization": "token {}".format(access_token)}
)
if response.status_code == 200 and response.json()["state"] == "active":
gh_orgs.append(org)
extras["gh_orgs"] = gh_orgs
store_timestamp = True
if config.get("load_teams"):
load_teams = config["load_teams"]
gh_teams = []
for team in force_list(load_teams):
org_slug, _, team_slug = team.partition("/")
# Figure out the team_id
lookup_url = "https://api.github.com/orgs/{}/teams/{}".format(
org_slug, team_slug
)
async with httpx.AsyncClient() as client:
response = await client.get(
lookup_url,
headers={"Authorization": "token {}".format(access_token)},
)
if response.status_code == 200:
team_id = response.json()["id"]
else:
continue
# Now check if user is an active member of the team:
team_membership_url = (
"https://api.github.com/teams/{}/memberships/{}".format(
team_id, profile["login"]
)
)
async with httpx.AsyncClient() as client:
response = await client.get(
team_membership_url,
headers={"Authorization": "token {}".format(access_token)},
)
if response.status_code == 200 and response.json()["state"] == "active":
gh_teams.append(team)
extras["gh_teams"] = gh_teams
store_timestamp = True
if store_timestamp:
extras["gh_ts"] = int(time.time())
return extras
def force_list(value):
if isinstance(value, str):
return [value]
return value
| 35.402985
| 84
| 0.526981
|
4a0b20512f95ce66368571c264c96d7a3822e481
| 1,009
|
py
|
Python
|
sdk/python/kfp/__init__.py
|
mbaijal/pipelines
|
78200c8e0317c28e05642363296efba427993547
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/kfp/__init__.py
|
mbaijal/pipelines
|
78200c8e0317c28e05642363296efba427993547
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/kfp/__init__.py
|
mbaijal/pipelines
|
78200c8e0317c28e05642363296efba427993547
|
[
"Apache-2.0"
] | 1
|
2022-03-04T14:26:55.000Z
|
2022-03-04T14:26:55.000Z
|
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# `kfp` is a namespace package.
# https://packaging.python.org/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
__version__ = '1.7.0'
from . import components
from . import containers
from . import dsl
from . import auth
from ._client import Client
from ._config import *
from ._local_client import LocalClient
from ._runners import *
| 34.793103
| 100
| 0.77106
|
4a0b2165c225696b95d09e55fb3e76a4bf1cbdd9
| 1,030
|
py
|
Python
|
fn_qradar_advisor/tools/offense_insights.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 65
|
2017-12-04T13:58:32.000Z
|
2022-03-24T18:33:17.000Z
|
fn_qradar_advisor/tools/offense_insights.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 48
|
2018-03-02T19:17:14.000Z
|
2022-03-09T22:00:38.000Z
|
fn_qradar_advisor/tools/offense_insights.py
|
nickpartner-goahead/resilient-community-apps
|
097c0dbefddbd221b31149d82af9809420498134
|
[
"MIT"
] | 95
|
2018-01-11T16:23:39.000Z
|
2022-03-21T11:34:29.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ToolCommand import ToolCommand
import sys
from fn_qradar_advisor.lib.qradar_advisor_client import QRadarAdvisorClient
import logging
logging.basicConfig(filename="testing.log", level=logging.DEBUG)
HELP_STR = """
Usage:\n
\t offense_insights.py -i app_id -f offense_id
"""
arg_str = "hi:f:"
arg_list = ["help", "app_id", "offense"]
class SampleCmd(ToolCommand):
def do_command(self):
client = QRadarAdvisorClient(qradar_host=self.system_host,
qradar_token=self.system_token,
advisor_app_id=self.opts_dict["app_id"],
cafile=False, log=logging,
opts={}, function_opts=self.opts_dict)
resp = client.offense_insights(self.opts_dict["offense"])
print("Return: {}".format(str(resp)))
if __name__ == "__main__":
sample_cmd = SampleCmd(HELP_STR)
sample_cmd.run_command(sys.argv[1:], arg_str, arg_list)
| 34.333333
| 77
| 0.63301
|
4a0b21dab78744772d784ba6542836a971a26486
| 410
|
py
|
Python
|
lets_party/migrations/0007_letspartyredflag_dt.py
|
dchaplinsky/ragoogle
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
[
"MIT"
] | 3
|
2018-06-10T21:20:56.000Z
|
2021-04-04T11:21:06.000Z
|
lets_party/migrations/0007_letspartyredflag_dt.py
|
dchaplinsky/ragoogle
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
[
"MIT"
] | 7
|
2018-08-14T20:54:49.000Z
|
2020-06-05T18:17:30.000Z
|
lets_party/migrations/0007_letspartyredflag_dt.py
|
dchaplinsky/ragoogle
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
[
"MIT"
] | 3
|
2018-06-27T12:53:13.000Z
|
2020-09-25T19:41:46.000Z
|
# Generated by Django 2.2.4 on 2019-09-12 18:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lets_party', '0006_letspartyredflag'),
]
operations = [
migrations.AddField(
model_name='letspartyredflag',
name='dt',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
| 21.578947
| 69
| 0.614634
|
4a0b2267786c3ed40f4d1e4bd0b8c4a329eebcf6
| 623
|
py
|
Python
|
LeetCodeSolutions/LeetCode_1169.py
|
lih627/python-algorithm-templates
|
a61fd583e33a769b44ab758990625d3381793768
|
[
"MIT"
] | 24
|
2020-03-28T06:10:25.000Z
|
2021-11-23T05:01:29.000Z
|
LeetCodeSolutions/LeetCode_1169.py
|
lih627/python-algorithm-templates
|
a61fd583e33a769b44ab758990625d3381793768
|
[
"MIT"
] | null | null | null |
LeetCodeSolutions/LeetCode_1169.py
|
lih627/python-algorithm-templates
|
a61fd583e33a769b44ab758990625d3381793768
|
[
"MIT"
] | 8
|
2020-05-18T02:43:16.000Z
|
2021-05-24T18:11:38.000Z
|
class Solution:
def invalidTransactions(self, transactions: List[str]) -> List[str]:
trans = [t.split(',') + [idx] for idx, t in enumerate(transactions)]
res = []
for i, val in enumerate(trans):
if int(val[2]) > 1000:
res.append(transactions[i])
continue
for j, vval in enumerate(trans):
if i == j:
continue
if val[0] == vval[0] and val[3] != vval[3] and abs(int(val[1]) - int(vval[1])) <= 60:
res.append(transactions[i])
break
return res
| 38.9375
| 101
| 0.47512
|
4a0b23dffa318b2c52542937334c5a4f960ab07a
| 8,445
|
py
|
Python
|
specutils/io/parsing_utils.py
|
hamogu/specutils
|
b873f2ac9b3c207c9e670246d102f46a9606d6ed
|
[
"BSD-3-Clause"
] | null | null | null |
specutils/io/parsing_utils.py
|
hamogu/specutils
|
b873f2ac9b3c207c9e670246d102f46a9606d6ed
|
[
"BSD-3-Clause"
] | null | null | null |
specutils/io/parsing_utils.py
|
hamogu/specutils
|
b873f2ac9b3c207c9e670246d102f46a9606d6ed
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from astropy.table import Table
import astropy.units as u
from astropy.nddata import StdDevUncertainty
from astropy.utils.exceptions import AstropyUserWarning
import warnings
import logging
from specutils.spectra import Spectrum1D
def spectrum_from_column_mapping(table, column_mapping, wcs=None):
"""
Given a table and a mapping of the table column names to attributes
on the Spectrum1D object, parse the information into a Spectrum1D.
Parameters
----------
table : :class:`~astropy.table.Table`
The table object returned from parsing the data file.
column_mapping : dict
A dictionary describing the relation between the file columns
and the arguments of the `Spectrum1D` class, along with unit
information. The dictionary keys should be the file column names
while the values should be a two-tuple where the first element is the
associated `Spectrum1D` keyword argument, and the second element is the
unit for the file column::
column_mapping = {'FLUX': ('flux', 'Jy')}
wcs : :class:`~astropy.wcs.WCS` or :class:`gwcs.WCS`
WCS object passed to the Spectrum1D initializer.
"""
spec_kwargs = {}
# Associate columns of the file with the appropriate spectrum1d arguments
for col_name, (kwarg_name, cm_unit) in column_mapping.items():
# If the table object couldn't parse any unit information,
# fallback to the column mapper defined unit
tab_unit = table[col_name].unit
if tab_unit and cm_unit is not None:
# If the table unit is defined, retrieve the quantity array for
# the column
kwarg_val = u.Quantity(table[col_name], tab_unit)
# Attempt to convert the table unit to the user-defined unit.
logging.debug("Attempting auto-convert of table unit '%s' to "
"user-provided unit '%s'.", tab_unit, cm_unit)
if cm_unit.physical_type in ('length', 'frequency'):
# Spectral axis column information
kwarg_val = kwarg_val.to(cm_unit, equivalence=u.spectral())
elif 'spectral flux' in cm_unit.physical_type:
# Flux/error column information
kwarg_val = kwarg_val.to(
cm_unit, equivalencies=u.spectral_density(1 * u.AA))
elif cm_unit is not None:
# In this case, the user has defined a unit in the column mapping
# but no unit has been defined in the table object.
kwarg_val = u.Quantity(table[col_name], cm_unit)
else:
# Neither the column mapping nor the table contain unit information.
# This may be desired e.g. for the mask or bit flag arrays.
kwarg_val = table[col_name]
spec_kwargs.setdefault(kwarg_name, kwarg_val)
# Ensure that the uncertainties are a subclass of NDUncertainty
if spec_kwargs.get('uncertainty') is not None:
spec_kwargs['uncertainty'] = StdDevUncertainty(
spec_kwargs.get('uncertainty'))
return Spectrum1D(**spec_kwargs, wcs=wcs, meta=table.meta)
def generic_spectrum_from_table(table, wcs=None, **kwargs):
"""
Load spectrum from an Astropy table into a Spectrum1D object.
Uses the following logic to figure out which column is which:
* Spectral axis (dispersion) is the first column with units
compatible with u.spectral() or with length units such as 'pix'.
* Flux is taken from the first column with units compatible with
u.spectral_density(), or with other likely culprits such as
'adu' or 'cts/s'.
* Uncertainty comes from the next column with the same units as flux.
Parameters
----------
file_name: str
The path to the ECSV file
wcs : :class:`~astropy.wcs.WCS`
A FITS WCS object. If this is present, the machinery will fall back
to using the wcs to find the dispersion information.
Returns
-------
data: Spectrum1D
The spectrum that is represented by the data in this table.
Raises
------
Warns if uncertainty has zeros or negative numbers.
Raises IOError if it can't figure out the columns.
"""
# Local function to find the wavelength or frequency column
def _find_spectral_axis_column(table,columns_to_search):
"""
Figure out which column in a table holds the spectral axis (dispersion).
Take the first column that has units compatible with u.spectral()
equivalencies. If none meet that criterion, look for other likely
length units such as 'pix'.
"""
additional_valid_units = [u.Unit('pix')]
found_column = None
# First, search for a column with units compatible with Angstroms
for c in columns_to_search:
try:
table[c].to("AA",equivalencies=u.spectral())
found_column = c
break
except:
continue
# If no success there, check for other possible length units
if found_column is None:
for c in columns_to_search:
if table[c].unit in additional_valid_units:
found_column = c
break
return found_column
# Local function to find the flux column
def _find_spectral_column(table,columns_to_search,spectral_axis):
"""
Figure out which column in a table holds the fluxes or uncertainties.
Take the first column that has units compatible with
u.spectral_density() equivalencies. If none meet that criterion,
look for other likely length units such as 'adu' or 'cts/s'.
"""
additional_valid_units = [u.Unit('adu'),u.Unit('ct/s')]
found_column = None
# First, search for a column with units compatible with Janskies
for c in columns_to_search:
try:
table[c].to("Jy",equivalencies=u.spectral_density(spectral_axis))
found_column = c
break
except:
continue
# If no success there, check for other possible flux units
if found_column is None:
for c in columns_to_search:
if table[c].unit in additional_valid_units:
found_column = c
break
return found_column
# Make a copy of the column names so we can remove them as they are found
colnames = table.colnames.copy()
# Use the first column that has spectral unit as the dispersion axis
spectral_axis_column = _find_spectral_axis_column(table, colnames)
if spectral_axis_column is None and wcs is None:
raise IOError("Could not identify column containing the wavelength, frequency or energy")
elif wcs is not None:
spectral_axis = None
else:
spectral_axis = table[spectral_axis_column].to(table[spectral_axis_column].unit)
colnames.remove(spectral_axis_column)
# Use the first column that has a spectral_density equivalence as the flux
flux_column = _find_spectral_column(table,colnames,spectral_axis)
if flux_column is None:
raise IOError("Could not identify column containing the flux")
flux = table[flux_column].to(table[flux_column].unit)
colnames.remove(flux_column)
# Use the next column with the same units as flux as the uncertainty
# Interpret it as a standard deviation and check if it has zeros or negative values
err_column = None
for c in colnames:
if table[c].unit == table[flux_column].unit:
err_column = c
break
if err_column is not None:
err = StdDevUncertainty(table[err_column].to(table[err_column].unit))
if np.min(table[err_column]) <= 0.:
warnings.warn("Standard Deviation has values of 0 or less", AstropyUserWarning)
# Create the Spectrum1D object and return it
if wcs is not None or spectral_axis_column is not None and flux_column is not None:
if err_column is not None:
spectrum = Spectrum1D(flux=flux, spectral_axis=spectral_axis,
uncertainty=err, meta=table.meta, wcs=wcs)
else:
spectrum = Spectrum1D(flux=flux, spectral_axis=spectral_axis,
meta=table.meta, wcs=wcs)
return spectrum
| 40.023697
| 97
| 0.65151
|
4a0b245333a68feb280645a25b8862654eed837d
| 248
|
py
|
Python
|
qa/reputations.py
|
SMMousaviSP/gapbug
|
464469f712df5ec02ebbc80ef0c500bffd096eb9
|
[
"MIT"
] | null | null | null |
qa/reputations.py
|
SMMousaviSP/gapbug
|
464469f712df5ec02ebbc80ef0c500bffd096eb9
|
[
"MIT"
] | null | null | null |
qa/reputations.py
|
SMMousaviSP/gapbug
|
464469f712df5ec02ebbc80ef0c500bffd096eb9
|
[
"MIT"
] | null | null | null |
from enum import Enum
class Reputation(Enum):
QUESTION_VOTE_UP = 10
ANSWER_VOTE_UP = 10
ANSWER_MARKED_ACCEPTED = 15
ANSWER_MARKED_ACCEPTED_ACCEPTOR = 2
QUESTION_VOTE_DOWN = -2
ANSWER_VOTE_DOWN = -2
USER_VOTE_DOWN = -1
| 20.666667
| 39
| 0.717742
|
4a0b247122c3657894f61ff9b789dbdc98357253
| 7,693
|
py
|
Python
|
python/mxnet/recordio.py
|
bill-teng/mxnet-test
|
39a2c0cff1be8b8277b2e0a8c55214acc186a49c
|
[
"Apache-2.0"
] | 5
|
2017-01-21T08:53:55.000Z
|
2021-08-20T13:06:43.000Z
|
python/mxnet/recordio.py
|
dmmiller612/mxnet
|
3f410c23cb02df64625d7c8f9f299b580236f6a5
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/recordio.py
|
dmmiller612/mxnet
|
3f410c23cb02df64625d7c8f9f299b580236f6a5
|
[
"Apache-2.0"
] | 5
|
2017-02-20T18:55:16.000Z
|
2020-04-17T21:34:22.000Z
|
# coding: utf-8
# pylint: disable=invalid-name, protected-access, fixme, too-many-arguments, no-member
"""Python interface for DLMC RecrodIO data format"""
from __future__ import absolute_import
from collections import namedtuple
import ctypes
import struct
import numbers
import numpy as np
from .base import _LIB
from .base import RecordIOHandle
from .base import check_call
from .base import c_str
try:
import cv2
except ImportError:
cv2 = None
class MXRecordIO(object):
"""Python interface for read/write RecordIO data formmat
Parameters
----------
uri : string
uri path to recordIO file.
flag : string
"r" for reading or "w" writing.
"""
def __init__(self, uri, flag):
self.uri = c_str(uri)
self.handle = RecordIOHandle()
self.flag = flag
self.is_open = False
self.open()
def open(self):
"""Open record file"""
if self.flag == "w":
check_call(_LIB.MXRecordIOWriterCreate(self.uri, ctypes.byref(self.handle)))
self.writable = True
elif self.flag == "r":
check_call(_LIB.MXRecordIOReaderCreate(self.uri, ctypes.byref(self.handle)))
self.writable = False
else:
raise ValueError("Invalid flag %s"%self.flag)
self.is_open = True
def __del__(self):
self.close()
def close(self):
"""close record file"""
if not self.is_open:
return
if self.writable:
check_call(_LIB.MXRecordIOWriterFree(self.handle))
else:
check_call(_LIB.MXRecordIOReaderFree(self.handle))
self.is_open = False
def reset(self):
"""Reset pointer to first item. If record is opened with 'w',
this will truncate the file to empty"""
self.close()
self.open()
def write(self, buf):
"""Write a string buffer as a record
Parameters
----------
buf : string (python2), bytes (python3)
buffer to write.
"""
assert self.writable
check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle,
ctypes.c_char_p(buf),
ctypes.c_size_t(len(buf))))
def read(self):
"""Read a record as string
Returns
----------
buf : string
buffer read.
"""
assert not self.writable
buf = ctypes.c_char_p()
size = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderReadRecord(self.handle,
ctypes.byref(buf),
ctypes.byref(size)))
if buf:
buf = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char*size.value))
return buf.contents.raw
else:
return None
class MXIndexedRecordIO(MXRecordIO):
"""Python interface for read/write RecordIO data formmat with index.
Support random access.
Parameters
----------
idx_path : str
Path to index file
uri : str
Path to record file. Only support file types that are seekable.
flag : str
'w' for write or 'r' for read
key_type : type
data type for keys
"""
def __init__(self, idx_path, uri, flag, key_type=int):
self.idx_path = idx_path
self.idx = {}
self.keys = []
self.key_type = key_type
self.fidx = None
super(MXIndexedRecordIO, self).__init__(uri, flag)
def open(self):
super(MXIndexedRecordIO, self).open()
self.idx = {}
self.keys = []
self.fidx = open(self.idx_path, self.flag)
if not self.writable:
for line in iter(self.fidx.readline, ''):
line = line.strip().split('\t')
key = self.key_type(line[0])
self.idx[key] = int(line[1])
self.keys.append(key)
def close(self):
if not self.is_open:
return
super(MXIndexedRecordIO, self).close()
self.fidx.close()
def seek(self, idx):
"""Query current read head position"""
assert not self.writable
pos = ctypes.c_size_t(self.idx[idx])
check_call(_LIB.MXRecordIOReaderSeek(self.handle, pos))
def tell(self):
"""Query current write head position"""
assert self.writable
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOWriterTell(self.handle, ctypes.byref(pos)))
return pos.value
def read_idx(self, idx):
"""Read record with index"""
self.seek(idx)
return self.read()
def write_idx(self, idx, buf):
"""Write record with index"""
key = self.key_type(idx)
pos = self.tell()
self.write(buf)
self.fidx.write('%s\t%d\n'%(str(key), pos))
self.idx[key] = pos
self.keys.append(key)
IRHeader = namedtuple('HEADER', ['flag', 'label', 'id', 'id2'])
_IRFormat = 'IfQQ'
_IRSize = struct.calcsize(_IRFormat)
def pack(header, s):
"""pack an string into MXImageRecord
Parameters
----------
header : IRHeader
header of the image record.
header.label can be a number or an array.
s : str
string to pack
"""
header = IRHeader(*header)
if isinstance(header.label, numbers.Number):
header = header._replace(flag=0)
else:
label = np.asarray(header.label, dtype=np.float32)
header = header._replace(flag=label.size, label=0)
s = label.tostring() + s
s = struct.pack(_IRFormat, *header) + s
return s
def unpack(s):
"""unpack a MXImageRecord to string
Parameters
----------
s : str
string buffer from MXRecordIO.read
Returns
-------
header : IRHeader
header of the image record
s : str
unpacked string
"""
header = IRHeader(*struct.unpack(_IRFormat, s[:_IRSize]))
s = s[_IRSize:]
if header.flag > 0:
header = header._replace(label=np.fromstring(s, np.float32, header.flag))
s = s[header.flag*4:]
return header, s
def unpack_img(s, iscolor=-1):
"""unpack a MXImageRecord to image
Parameters
----------
s : str
string buffer from MXRecordIO.read
iscolor : int
image format option for cv2.imdecode
Returns
-------
header : IRHeader
header of the image record
img : numpy.ndarray
unpacked image
"""
header, s = unpack(s)
img = np.fromstring(s, dtype=np.uint8)
assert cv2 is not None
img = cv2.imdecode(img, iscolor)
return header, img
def pack_img(header, img, quality=95, img_fmt='.jpg'):
"""pack an image into MXImageRecord
Parameters
----------
header : IRHeader
header of the image record
header.label can be a number or an array.
img : numpy.ndarray
image to pack
quality : int
quality for JPEG encoding. 1-100, or compression for PNG encoding. 1-9.
img_fmt : str
Encoding of the image. .jpg for JPEG, .png for PNG.
Returns
-------
s : str
The packed string
"""
assert cv2 is not None
jpg_formats = ['.JPG', '.JPEG']
png_formats = ['.PNG']
encode_params = None
if img_fmt.upper() in jpg_formats:
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
elif img_fmt.upper() in png_formats:
encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]
ret, buf = cv2.imencode(img_fmt, img, encode_params)
assert ret, 'failed encoding image'
return pack(header, buf.tostring())
| 28.076642
| 88
| 0.578318
|
4a0b251ae97f8a1f366c336d2cb6a8cf9a520233
| 1,946
|
py
|
Python
|
flask_tinyclients/nessus.py
|
certeu/Flask-Tinyclients
|
317f52b943bde1170ede89cccb53e37fa5bc6170
|
[
"MIT"
] | 2
|
2017-12-14T12:35:12.000Z
|
2018-02-23T15:54:10.000Z
|
flask_tinyclients/nessus.py
|
certeu/Flask-Tinyclients
|
317f52b943bde1170ede89cccb53e37fa5bc6170
|
[
"MIT"
] | null | null | null |
flask_tinyclients/nessus.py
|
certeu/Flask-Tinyclients
|
317f52b943bde1170ede89cccb53e37fa5bc6170
|
[
"MIT"
] | null | null | null |
"""
flask_tinyclients.nessus
~~~~~~~~~~~~~~~~~~~~~~~~
Nessus API client
"""
from urllib.parse import urljoin
import requests
from . import RESTAPIClient
__all__ = ['Nessus']
class NessusAPIClient(RESTAPIClient):
base_url = None
accesskey = None
secretkey = None
def request(self, *args, **kwargs):
method, path, *rest = args
url = urljoin(self.base_url, path)
defaults = {
'headers': {
'Content-type': 'application/json',
'Accept': 'application/json',
'X-ApiKeys': 'accessKey={0}; secretKey={1}'.
format(self.accesskey, self.secretkey),
'User-Agent': 'Flask Tinyclients ({0})'.
format(self.__class__.__name__)
},
}
defaults.update(kwargs)
try:
response = requests.request(method, url, **defaults)
except requests.exceptions.RequestException as err:
raise err
except Exception as e:
raise e
if response.raise_for_status() is not None:
return response.raise_for_status()
accepts = defaults.get('headers', {}).get('Accept', None)
if accepts == 'application/json':
return response.json()
return response.content
class Nessus(object):
api = None
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
self.api = NessusAPIClient(
base_url=app.config['REST_CLIENT_NESSUS_BASE_URL'],
accesskey=app.config['REST_CLIENT_NESSUS_API_KEY'],
secretkey=app.config['REST_CLIENT_NESSUS_API_SECRET'])
def submit(self, data, **kwargs):
return self.api.post('scans', data=data, **kwargs)
def submiturl(self, data, **kwargs):
return self.api.post('scans', data=data, **kwargs)
| 28.202899
| 74
| 0.568859
|
4a0b25be51cfcf111a891a76a07f2d1ced7cd301
| 1,223
|
py
|
Python
|
expensiveoptimbenchmark/problems/ESP2/evaluateSimulation4.py
|
AlgTUDelft/ExpensiveOptimBenchmark
|
642056f8d94c7f953e50c3cd05bbbf9f39ad5c3d
|
[
"MIT"
] | 9
|
2021-03-03T15:17:04.000Z
|
2022-02-11T14:30:29.000Z
|
expensiveoptimbenchmark/problems/ESP2/evaluateSimulation4.py
|
WholeG/ExpensiveOptimBenchmark
|
642056f8d94c7f953e50c3cd05bbbf9f39ad5c3d
|
[
"MIT"
] | 6
|
2021-06-08T22:24:31.000Z
|
2022-03-12T00:49:58.000Z
|
expensiveoptimbenchmark/problems/ESP2/evaluateSimulation4.py
|
WholeG/ExpensiveOptimBenchmark
|
642056f8d94c7f953e50c3cd05bbbf9f39ad5c3d
|
[
"MIT"
] | 3
|
2021-03-22T12:12:03.000Z
|
2022-02-25T15:24:07.000Z
|
#!/usr/bin/env python
# coding: utf8
# ver 0.2 TBB modified system calls to ensure bash shell
import os
import sys
import csv
import ntpath
import time
from subprocess import call
## Accept decimal information string via command line from R
baffleStr = sys.argv[1]
# Modification: Do not replace `,` with ''. It is no longer the case that every element has their own index.
# baffleStr = baffleStr.replace(',', '')
## Create a copy of the base simulation configuration
os.system('/bin/bash -c "cp -r Exeter_CFD_Problems/ESP/baseCase Exeter_CFD_Problems/ESP/foamWorkingDir"')
# Parse the baffle configuration vector into an OpenFOAM readable format
os.system("python2.7 Exeter_CFD_Problems/ESP/createBafflesDict4.py " + baffleStr)
call('/bin/bash -c "cd Exeter_CFD_Problems/ESP/foamWorkingDir\ncreateBaffles -overwrite"',shell=True,stdout = open(os.devnull,'wb'))
call('/bin/bash -c "cd Exeter_CFD_Problems/ESP/foamWorkingDir\nsimpleFoam"',shell=True,stdout = open(os.devnull,'wb'))
call('/bin/bash -c "cd Exeter_CFD_Problems/ESP/foamWorkingDir\npostProcess -func sampleDict -latestTime"',shell=True,stdout = open(os.devnull,'wb'))
sys.exit(call("python2.7 Exeter_CFD_Problems/ESP/postProcessConsole.py",shell=True))
| 42.172414
| 148
| 0.774325
|
4a0b261388cc7421f1ea5fe93c0bbebb6cccfd82
| 4,007
|
py
|
Python
|
tests/test_float.py
|
prakritichauhan07/python-rapidjson
|
936cecc2f95215a55f1956066e3a0e4b2d262eb7
|
[
"MIT"
] | 341
|
2017-01-23T09:40:08.000Z
|
2022-03-25T07:10:41.000Z
|
tests/test_float.py
|
prakritichauhan07/python-rapidjson
|
936cecc2f95215a55f1956066e3a0e4b2d262eb7
|
[
"MIT"
] | 109
|
2017-01-20T20:15:40.000Z
|
2022-02-19T08:51:40.000Z
|
tests/test_float.py
|
prakritichauhan07/python-rapidjson
|
936cecc2f95215a55f1956066e3a0e4b2d262eb7
|
[
"MIT"
] | 40
|
2017-02-28T11:49:26.000Z
|
2022-03-07T23:00:52.000Z
|
# -*- coding: utf-8 -*-
# :Project: python-rapidjson -- Tests on floats
# :Author: John Anderson <sontek@gmail.com>
# :License: MIT License
# :Copyright: © 2015 John Anderson
# :Copyright: © 2016, 2017, 2020 Lele Gaifax
#
from decimal import Decimal
import math
import pytest
import rapidjson as rj
def test_infinity_f():
inf = float("inf")
dumped = rj.dumps(inf)
loaded = rj.loads(dumped)
assert loaded == inf
dumped = rj.dumps(inf, allow_nan=True)
loaded = rj.loads(dumped, allow_nan=True)
assert loaded == inf
with pytest.raises(ValueError):
rj.dumps(inf, number_mode=None)
with pytest.raises(ValueError):
rj.dumps(inf, allow_nan=False)
d = Decimal(inf)
assert d.is_infinite()
with pytest.raises(ValueError):
rj.dumps(d, number_mode=rj.NM_DECIMAL)
dumped = rj.dumps(d, number_mode=rj.NM_DECIMAL, allow_nan=True)
loaded = rj.loads(dumped, number_mode=rj.NM_DECIMAL|rj.NM_NAN)
assert loaded == inf
assert loaded.is_infinite()
def test_infinity_c():
inf = float("inf")
dumped = rj.Encoder()(inf)
loaded = rj.Decoder()(dumped)
assert loaded == inf
with pytest.raises(ValueError):
rj.Encoder(number_mode=None)(inf)
d = Decimal(inf)
assert d.is_infinite()
with pytest.raises(ValueError):
rj.Encoder(number_mode=rj.NM_DECIMAL)(d)
dumped = rj.Encoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(d)
loaded = rj.Decoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(dumped)
assert loaded == inf
assert loaded.is_infinite()
def test_negative_infinity_f():
inf = float("-infinity")
dumped = rj.dumps(inf)
loaded = rj.loads(dumped)
assert loaded == inf
dumped = rj.dumps(inf, allow_nan=True)
loaded = rj.loads(dumped, allow_nan=True)
assert loaded == inf
with pytest.raises(ValueError):
rj.dumps(inf, number_mode=None)
with pytest.raises(ValueError):
rj.dumps(inf, allow_nan=False)
d = Decimal(inf)
assert d.is_infinite()
with pytest.raises(ValueError):
rj.dumps(d, number_mode=rj.NM_DECIMAL)
dumped = rj.dumps(d, number_mode=rj.NM_DECIMAL|rj.NM_NAN)
loaded = rj.loads(dumped, number_mode=rj.NM_DECIMAL, allow_nan=True)
assert loaded == inf
assert loaded.is_infinite()
def test_negative_infinity_c():
inf = float("-infinity")
dumped = rj.Encoder()(inf)
loaded = rj.Decoder()(dumped)
assert loaded == inf
with pytest.raises(ValueError):
rj.Encoder(number_mode=None)(inf)
d = Decimal(inf)
assert d.is_infinite()
with pytest.raises(ValueError):
rj.Encoder(number_mode=rj.NM_DECIMAL)(d)
dumped = rj.Encoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(d)
loaded = rj.Decoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(dumped)
assert loaded == inf
assert loaded.is_infinite()
def test_nan_f():
nan = float("nan")
dumped = rj.dumps(nan)
loaded = rj.loads(dumped)
assert math.isnan(nan)
assert math.isnan(loaded)
with pytest.raises(ValueError):
rj.dumps(nan, number_mode=None)
with pytest.raises(ValueError):
rj.dumps(nan, allow_nan=False)
d = Decimal(nan)
assert d.is_nan()
with pytest.raises(ValueError):
rj.dumps(d, number_mode=rj.NM_DECIMAL)
dumped = rj.dumps(d, number_mode=rj.NM_DECIMAL|rj.NM_NAN)
loaded = rj.loads(dumped, number_mode=rj.NM_DECIMAL|rj.NM_NAN)
assert loaded.is_nan()
def test_nan_c():
nan = float("nan")
dumped = rj.Encoder()(nan)
loaded = rj.Decoder()(dumped)
assert math.isnan(nan)
assert math.isnan(loaded)
with pytest.raises(ValueError):
rj.Encoder(number_mode=None)(nan)
d = Decimal(nan)
assert d.is_nan()
with pytest.raises(ValueError):
rj.Encoder(number_mode=rj.NM_DECIMAL)(d)
dumped = rj.Encoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(d)
loaded = rj.Decoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(dumped)
assert loaded.is_nan()
| 24.734568
| 72
| 0.667083
|
4a0b26302e48033749e7b72fa82a13c4d135eda6
| 1,037
|
py
|
Python
|
tests/test_merge.py
|
parlaylabs/model
|
d6e9e5bede6b6d31bdf3f29a9873b5faa69d7ec5
|
[
"Apache-2.0"
] | 4
|
2020-05-03T05:27:51.000Z
|
2021-10-19T22:45:36.000Z
|
tests/test_merge.py
|
parlaylabs/model
|
d6e9e5bede6b6d31bdf3f29a9873b5faa69d7ec5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_merge.py
|
parlaylabs/model
|
d6e9e5bede6b6d31bdf3f29a9873b5faa69d7ec5
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from model import utils
import jsonmerge
a = dict(
this="that",
nest=dict(a=1, b=2, c=3),
lst=[1, 2, 3],
d=[{"name": "foo", "val": 9}, {"name": "bar", "val": 11}],
)
b = dict(
flubber="blubber",
nest=dict(a=99, b=2),
lst=[3, 4, 4],
d=[{"name": "alpha", "val": 1}, {"name": "bar", "val": 1}],
)
def test_merge_dict():
# utils.deepmerge(a, b)
# x = always_merger.merge(a, b)
schema = {
"properties": {
"d": {
"mergeStrategy": "arrayMergeById",
"mergeOptions": {"idRef": "name"},
},
"lst": {"mergeStrategy": "append"},
}
}
merger = jsonmerge.Merger(schema)
x = merger.merge(a, b)
assert x == {
"d": [
{"name": "foo", "val": 9},
{"name": "bar", "val": 1},
{"name": "alpha", "val": 1},
],
"flubber": "blubber",
"lst": [1, 2, 3, 3, 4, 4],
"nest": {"a": 99, "b": 2, "c": 3},
"this": "that",
}
| 22.543478
| 63
| 0.419479
|
4a0b26b26a77dbf8e2b591455da23d884d0967b6
| 95
|
py
|
Python
|
app/apps/versioning/apps.py
|
lawi21/escriptorium
|
6043b0cb3894bb308a37ed97a26114dcea883834
|
[
"MIT"
] | 4
|
2021-09-21T09:15:24.000Z
|
2022-02-12T13:36:33.000Z
|
app/apps/versioning/apps.py
|
lawi21/escriptorium
|
6043b0cb3894bb308a37ed97a26114dcea883834
|
[
"MIT"
] | 1
|
2021-11-30T12:04:11.000Z
|
2021-11-30T12:04:11.000Z
|
app/apps/versioning/apps.py
|
stweil/escriptorium
|
63a063f2dbecebe9f79aa6376e99030f49a02502
|
[
"MIT"
] | 2
|
2021-11-10T09:39:52.000Z
|
2022-01-10T08:52:40.000Z
|
from django.apps import AppConfig
class VersioningConfig(AppConfig):
name = 'versioning'
| 15.833333
| 34
| 0.768421
|
4a0b27d2aab9ac545d42712bece78ef05bef10ec
| 11,638
|
py
|
Python
|
src/oci/data_integration/models/connection_validation.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/data_integration/models/connection_validation.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/data_integration/models/connection_validation.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ConnectionValidation(object):
"""
The information about connection validation.
"""
def __init__(self, **kwargs):
"""
Initializes a new ConnectionValidation object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param validation_message:
The value to assign to the validation_message property of this ConnectionValidation.
:type validation_message: oci.data_integration.models.Message
:param key:
The value to assign to the key property of this ConnectionValidation.
:type key: str
:param model_type:
The value to assign to the model_type property of this ConnectionValidation.
:type model_type: str
:param model_version:
The value to assign to the model_version property of this ConnectionValidation.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this ConnectionValidation.
:type parent_ref: oci.data_integration.models.ParentReference
:param name:
The value to assign to the name property of this ConnectionValidation.
:type name: str
:param description:
The value to assign to the description property of this ConnectionValidation.
:type description: str
:param object_version:
The value to assign to the object_version property of this ConnectionValidation.
:type object_version: int
:param object_status:
The value to assign to the object_status property of this ConnectionValidation.
:type object_status: int
:param identifier:
The value to assign to the identifier property of this ConnectionValidation.
:type identifier: str
:param metadata:
The value to assign to the metadata property of this ConnectionValidation.
:type metadata: oci.data_integration.models.ObjectMetadata
"""
self.swagger_types = {
'validation_message': 'Message',
'key': 'str',
'model_type': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'name': 'str',
'description': 'str',
'object_version': 'int',
'object_status': 'int',
'identifier': 'str',
'metadata': 'ObjectMetadata'
}
self.attribute_map = {
'validation_message': 'validationMessage',
'key': 'key',
'model_type': 'modelType',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'name': 'name',
'description': 'description',
'object_version': 'objectVersion',
'object_status': 'objectStatus',
'identifier': 'identifier',
'metadata': 'metadata'
}
self._validation_message = None
self._key = None
self._model_type = None
self._model_version = None
self._parent_ref = None
self._name = None
self._description = None
self._object_version = None
self._object_status = None
self._identifier = None
self._metadata = None
@property
def validation_message(self):
"""
Gets the validation_message of this ConnectionValidation.
:return: The validation_message of this ConnectionValidation.
:rtype: oci.data_integration.models.Message
"""
return self._validation_message
@validation_message.setter
def validation_message(self, validation_message):
"""
Sets the validation_message of this ConnectionValidation.
:param validation_message: The validation_message of this ConnectionValidation.
:type: oci.data_integration.models.Message
"""
self._validation_message = validation_message
@property
def key(self):
"""
Gets the key of this ConnectionValidation.
Objects will use a 36 character key as unique ID. It is system generated and cannot be modified.
:return: The key of this ConnectionValidation.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this ConnectionValidation.
Objects will use a 36 character key as unique ID. It is system generated and cannot be modified.
:param key: The key of this ConnectionValidation.
:type: str
"""
self._key = key
@property
def model_type(self):
"""
Gets the model_type of this ConnectionValidation.
The type of the object.
:return: The model_type of this ConnectionValidation.
:rtype: str
"""
return self._model_type
@model_type.setter
def model_type(self, model_type):
"""
Sets the model_type of this ConnectionValidation.
The type of the object.
:param model_type: The model_type of this ConnectionValidation.
:type: str
"""
self._model_type = model_type
@property
def model_version(self):
"""
Gets the model_version of this ConnectionValidation.
The model version of an object.
:return: The model_version of this ConnectionValidation.
:rtype: str
"""
return self._model_version
@model_version.setter
def model_version(self, model_version):
"""
Sets the model_version of this ConnectionValidation.
The model version of an object.
:param model_version: The model_version of this ConnectionValidation.
:type: str
"""
self._model_version = model_version
@property
def parent_ref(self):
"""
Gets the parent_ref of this ConnectionValidation.
:return: The parent_ref of this ConnectionValidation.
:rtype: oci.data_integration.models.ParentReference
"""
return self._parent_ref
@parent_ref.setter
def parent_ref(self, parent_ref):
"""
Sets the parent_ref of this ConnectionValidation.
:param parent_ref: The parent_ref of this ConnectionValidation.
:type: oci.data_integration.models.ParentReference
"""
self._parent_ref = parent_ref
@property
def name(self):
"""
Gets the name of this ConnectionValidation.
Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.
:return: The name of this ConnectionValidation.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ConnectionValidation.
Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.
:param name: The name of this ConnectionValidation.
:type: str
"""
self._name = name
@property
def description(self):
"""
Gets the description of this ConnectionValidation.
Detailed description for the object.
:return: The description of this ConnectionValidation.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this ConnectionValidation.
Detailed description for the object.
:param description: The description of this ConnectionValidation.
:type: str
"""
self._description = description
@property
def object_version(self):
"""
Gets the object_version of this ConnectionValidation.
The version of the object that is used to track changes in the object instance.
:return: The object_version of this ConnectionValidation.
:rtype: int
"""
return self._object_version
@object_version.setter
def object_version(self, object_version):
"""
Sets the object_version of this ConnectionValidation.
The version of the object that is used to track changes in the object instance.
:param object_version: The object_version of this ConnectionValidation.
:type: int
"""
self._object_version = object_version
@property
def object_status(self):
"""
Gets the object_status of this ConnectionValidation.
The status of an object that can be set to value 1 for shallow references across objects, other values reserved.
:return: The object_status of this ConnectionValidation.
:rtype: int
"""
return self._object_status
@object_status.setter
def object_status(self, object_status):
"""
Sets the object_status of this ConnectionValidation.
The status of an object that can be set to value 1 for shallow references across objects, other values reserved.
:param object_status: The object_status of this ConnectionValidation.
:type: int
"""
self._object_status = object_status
@property
def identifier(self):
"""
Gets the identifier of this ConnectionValidation.
Value can only contain upper case letters, underscore and numbers. It should begin with upper case letter or underscore. The value can be modified.
:return: The identifier of this ConnectionValidation.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this ConnectionValidation.
Value can only contain upper case letters, underscore and numbers. It should begin with upper case letter or underscore. The value can be modified.
:param identifier: The identifier of this ConnectionValidation.
:type: str
"""
self._identifier = identifier
@property
def metadata(self):
"""
Gets the metadata of this ConnectionValidation.
:return: The metadata of this ConnectionValidation.
:rtype: oci.data_integration.models.ObjectMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this ConnectionValidation.
:param metadata: The metadata of this ConnectionValidation.
:type: oci.data_integration.models.ObjectMetadata
"""
self._metadata = metadata
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 31.539295
| 245
| 0.645042
|
4a0b28107e754c10085f32a8ff425f68238986ec
| 2,314
|
py
|
Python
|
p7_ex5_difference_calculator_orig.py
|
eduardo-jh/HW02_Binary_Fission
|
22a3cc619946ec875b13f26fbe788e23511844b9
|
[
"MIT"
] | null | null | null |
p7_ex5_difference_calculator_orig.py
|
eduardo-jh/HW02_Binary_Fission
|
22a3cc619946ec875b13f26fbe788e23511844b9
|
[
"MIT"
] | null | null | null |
p7_ex5_difference_calculator_orig.py
|
eduardo-jh/HW02_Binary_Fission
|
22a3cc619946ec875b13f26fbe788e23511844b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
BE523 Biosystems Analysis & Design
HW2 - Problem 7. Bacteria growth, difference calculator
https://mathinsight.org/bacteria_growth_initial_model_exercises Exercise 5
Created on Thu Jan 21 12:11:46 2021
@author: eduardo
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# Bacteria density data
# B = np.array([0.028, 0.047, 0.082, 0.141, 0.240, 0.381]) # Exercise 5
B = np.array([0.022, 0.036, 0.060, 0.101, 0.169, 0.266]) # From problem 2
steps = len(B) # Adjust the length of vectors to the number of steps
dB = np.zeros(steps)
Bgraph = np.zeros(steps)
dt = 16 # time interval
t = np.linspace(0, (steps-1)*dt, steps) # actual time vector
for i in range(1, steps):
dB[i] = B[i] - B[i-1] # compute the increment between time steps
Bgraph[i] = B[i-1]
print(B, Bgraph)
# Perform a linear regression with the data
slope, intercept, r_value, p_value, std_err = stats.linregress(Bgraph, dB)
# Figure 1, plotting dB vs B
plt.figure(1)
plt.plot(Bgraph, dB, 'bx', Bgraph, slope*Bgraph, 'r-') # plot and linear eq.
plt.legend(['data', 'linear regression $R^2$=%.3f' % r_value**2], loc='best')
plt.xlabel('B')
plt.ylabel('dB')
plt.savefig('p7_bacteria_linear.png', dpi=300, bbox_inches='tight')
print('slope =', slope, 'intercept =', intercept)
# Generate an exponential equation ('exact solution')
tdouble = np.log(2)/np.log(1+slope)*dt
K = np.log(2)/tdouble
Bexp = B[0] * np.exp(K*t)
print('tdouble =', tdouble, 'K =', K)
# Make 'predictions' using the analytical solution to the linear dynamical system,
# (also an exponential equation) in the form B(t) = B[0]*R^t with R>1
# we don't need to know the previous value, each calculation is only dependant of the time 't'
Bmodel = B[0]*pow(slope+1, t/dt)
print("The population after %d steps is: %.3f" % (steps, Bmodel[-1]))
# Figure 2, plotting B vs t
plt.figure(2)
plt.plot(t, B, 'bx', t, Bmodel, 'r-', t, Bexp, 'k+') # Plot data vs exponential growth eq.
plt.legend(['data',
'numerical B=%g$\cdot$(1+%.4f)$^t$' % (B[0], slope),
'exact B=%g$\cdot$exp(%.4f$\cdot$t)' % (B[0], K)],
loc='best')
plt.xlabel('Time (minutes)')
plt.ylabel('Bacteria population')
plt.savefig('p7_bacteria_%dsteps.png' % steps, dpi=300, bbox_inches='tight')
| 35.6
| 94
| 0.668539
|
4a0b28eded1289dd1eca944d6365e2f3f8b69925
| 2,058
|
py
|
Python
|
restorm/registry.py
|
gt3389b/restorm
|
7901ffb8d05589508604e84352257486f350ac79
|
[
"MIT"
] | 3
|
2017-01-03T16:41:35.000Z
|
2020-09-17T09:39:02.000Z
|
restorm/registry.py
|
gt3389b/restorm
|
7901ffb8d05589508604e84352257486f350ac79
|
[
"MIT"
] | null | null | null |
restorm/registry.py
|
gt3389b/restorm
|
7901ffb8d05589508604e84352257486f350ac79
|
[
"MIT"
] | 3
|
2017-01-02T18:59:02.000Z
|
2020-09-17T09:53:31.000Z
|
import warnings
from collections import OrderedDict, defaultdict
class Registry(object):
"""
Registry class for Restorm Resources.
Adapted from django.apps.registry.Apps.
"""
def __init__(self):
# Mapping of app labels => resource names => model classes. Every time
# a resource is imported, ResourceBase.__new__ calls register() which
# creates an entry in all_resources. All imported resources are
# registered, regardless of whether they're defined in an installed
# application and whether the apps registry has been populated.
# Since it isn't possible to reimport a module safely (it could
# reexecute initialization code) all_resources is never overridden
# or reset.
self.all_resources = defaultdict(OrderedDict)
def register(self, app_label, resource):
"""
Registers a resource for a given app.
Taken from django.apps.registry.Apps#register_model
"""
resource_name = getattr(resource._meta, 'resource_name', None)
if not resource_name:
raise RuntimeError(
"Resource '%s.%s' does not define a resource_name and is not "
"marked abstract." % (app_label, resource.__name__))
app_resources = self.all_resources[app_label]
if resource_name in app_resources:
existing = app_resources[resource_name]
if (resource.__name__ == existing.__name__ and
resource.__module__ == existing.__module__):
info = (app_label, resource_name)
warnings.warn(
"Resource '%s.%s' was already registered." % info,
RuntimeWarning, stacklevel=2)
else:
info = (resource_name, app_label, existing, resource)
raise RuntimeError(
"Conflicting '%s' resources in application '%s':"
"%s and %s." % info)
app_resources[resource_name] = resource
registry = Registry()
| 38.830189
| 78
| 0.622449
|
4a0b295fe27259d1f29b3c16d40f307bc1330aeb
| 50,492
|
py
|
Python
|
overwrite_inventory/models/models.py
|
xpheragroup/Preproductivo2
|
b176d3e0500ae0055b61d1370bff4eb40a129724
|
[
"MIT"
] | null | null | null |
overwrite_inventory/models/models.py
|
xpheragroup/Preproductivo2
|
b176d3e0500ae0055b61d1370bff4eb40a129724
|
[
"MIT"
] | null | null | null |
overwrite_inventory/models/models.py
|
xpheragroup/Preproductivo2
|
b176d3e0500ae0055b61d1370bff4eb40a129724
|
[
"MIT"
] | null | null | null |
from copy import copy
import datetime
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
from odoo.osv import expression
from odoo.tools import float_compare, float_is_zero
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
from odoo.tools.float_utils import float_round, float_compare, float_is_zero
class Inventory(models.Model):
_name = 'stock.inventory'
_inherit = ['stock.inventory', 'mail.thread']
name = fields.Char(tracking=1)
date = fields.Datetime(tracking=1)
line_ids = fields.One2many(tracking=1)
move_ids = fields.One2many(tracking=1)
state = fields.Selection(tracking=1)
company_id = fields.Many2one(tracking=1)
location_ids = fields.Many2many(tracking=1)
product_ids = fields.Many2many(tracking=1)
start_empty = fields.Boolean(tracking=1)
prefill_counted_quantity = fields.Selection(tracking=1)
class Inventory(models.Model):
_inherit = "stock.inventory"
AJUSTES = [('conteo', 'Por conteo'), ('diferencia',
'Por diferencia'), ('baja', 'Baja de inventario')]
location_dest_id = fields.Many2one('stock.location', 'Location destiny', check_company=True,
domain="[['scrap_location', '=', True]]",
index=True)
ajuste = fields.Selection(AJUSTES,
string='Tipo de ajuste',
readonly=True,
states={'draft': [('readonly', False)]},
help="Tipo de ajuste del inventario.")
user_cre = fields.Many2one('res.users', string='Creó', required=False, copy=False)
date_cre = fields.Datetime(string='Fecha creación', copy=False)
user_val = fields.Many2one('res.users', string='Validó', required=False, copy=False)
date_val = fields.Datetime(string='Fecha validació', copy=False)
notes = fields.Char('Notas')
def action_open_inventory_lines(self):
self.ensure_one()
if self.ajuste == 'conteo':
action = {
'type': 'ir.actions.act_window',
'views': [(self.env.ref('overwrite_inventory.stock_inventory_line_tree3').id, 'tree')],
'view_mode': 'tree',
'name': _('Por conteo'),
'res_model': 'stock.inventory.line',
}
elif self.ajuste == 'baja':
action = {
'type': 'ir.actions.act_window',
'views': [(self.env.ref('overwrite_inventory.stock_inventory_line_tree5').id, 'tree')],
'view_mode': 'tree',
'name': _('Baja de inventario'),
'res_model': 'stock.inventory.line',
}
else:
action = {
'type': 'ir.actions.act_window',
'views': [(self.env.ref('overwrite_inventory.stock_inventory_line_tree4').id, 'tree')],
'view_mode': 'tree',
'name': _('por Diferencia'),
'res_model': 'stock.inventory.line',
}
context = {
'default_is_editable': True,
'default_inventory_id': self.id,
'default_company_id': self.company_id.id,
}
# Define domains and context
domain = [
('inventory_id', '=', self.id),
('location_id.usage', 'in', ['internal', 'transit'])
]
if self.location_ids:
context['default_location_id'] = self.location_ids[0].id
if len(self.location_ids) == 1:
if not self.location_ids[0].child_ids:
context['readonly_location_id'] = True
if self.product_ids:
if len(self.product_ids) == 1:
context['default_product_id'] = self.product_ids[0].id
action['context'] = context
action['domain'] = domain
return action
def _get_inventory_lines_values(self):
# TDE CLEANME: is sql really necessary ? I don't think so
locations = self.env['stock.location']
if self.location_ids:
locations = self.env['stock.location'].search(
[('id', 'child_of', self.location_ids.ids)])
else:
locations = self.env['stock.location'].search(
[('company_id', '=', self.company_id.id), ('usage', 'in', ['internal', 'transit'])])
domain = ' sq.location_id in %s AND pp.active'
args = (tuple(locations.ids),)
vals = []
Product = self.env['product.product']
# Empty recordset of products available in stock_quants
quant_products = self.env['product.product']
# If inventory by company
if self.company_id:
domain += ' AND sq.company_id = %s'
args += (self.company_id.id,)
if self.product_ids:
domain += ' AND sq.product_id in %s'
args += (tuple(self.product_ids.ids),)
for product in self.product_ids:
stock_quants = self.env['stock.quant'].search(
['&', ['product_id', '=', product.id], ['location_id', 'in', locations.ids]])
if len(stock_quants) < 1 and product.x_studio_perecedero:
for location in locations.ids:
self.env['stock.quant'].create({
'product_id': product.id,
'location_id': location,
'company_id': self.company_id.id
})
self.env['stock.quant'].flush(
['company_id', 'product_id', 'quantity', 'location_id', 'lot_id', 'package_id', 'owner_id'])
self.env['product.product'].flush(['active'])
self.env.cr.execute("""SELECT sq.product_id, sum(sq.quantity) as product_qty, sq.location_id, sq.lot_id as prod_lot_id, sq.package_id, sq.owner_id as partner_id
FROM stock_quant sq
LEFT JOIN product_product pp
ON pp.id = sq.product_id
WHERE %s
GROUP BY sq.product_id, sq.location_id, sq.lot_id, sq.package_id, sq.owner_id """ % domain, args)
for product_data in self.env.cr.dictfetchall():
product_data['company_id'] = self.company_id.id
product_data['inventory_id'] = self.id
product_data['revisado'] = False
# replace the None the dictionary by False, because falsy values are tested later on
for void_field in [item[0] for item in product_data.items() if item[1] is None]:
product_data[void_field] = False
product_data['theoretical_qty'] = product_data['product_qty']
if self.prefill_counted_quantity == 'zero':
if 'difference_qty_2' in product_data.keys():
product_data['product_qty'] = 0 + \
product_data['difference_qty_2']
else:
product_data['product_qty'] = 0
if product_data['product_id']:
product_data['product_uom_id'] = Product.browse(
product_data['product_id']).uom_id.id
quant_products |= Product.browse(product_data['product_id'])
vals.append(product_data)
return vals
def action_validate(self):
if not self.exists():
return
self.ensure_one()
if not (self.user_has_groups('overwrite_inventory.inventory_adjustment_user') or self.user_has_groups('stock.group_stock_manager')):
raise UserError(_("Only a stock manager can validate an inventory adjustment."))
if self.state != 'confirm':
raise UserError(_(
"You can't validate the inventory '%s', maybe this inventory " +
"has been already validated or isn't ready.") % (self.name))
inventory_lines = self.line_ids.filtered(lambda l: l.product_id.tracking in ['lot', 'serial'] and not l.prod_lot_id and l.theoretical_qty != l.product_qty)
lines = self.line_ids.filtered(lambda l: float_compare(l.product_qty, 1, precision_rounding=l.product_uom_id.rounding) > 0 and l.product_id.tracking == 'serial' and l.prod_lot_id)
if inventory_lines and not lines:
wiz_lines = [(0, 0, {'product_id': product.id, 'tracking': product.tracking}) for product in inventory_lines.mapped('product_id')]
wiz = self.env['stock.track.confirmation'].create({'inventory_id': self.id, 'tracking_line_ids': wiz_lines})
return {
'name': _('Tracked Products in Inventory Adjustment'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'views': [(False, 'form')],
'res_model': 'stock.track.confirmation',
'target': 'new',
'res_id': wiz.id,
}
self._action_done()
self.line_ids._check_company()
self._check_company()
for inventory in self:
inventory.write({'user_val': self.env.uid})
inventory.write({'date_val': datetime.datetime.now()})
return True
def _action_done(self):
negative = next((line for line in self.mapped(
'line_ids') if line.product_qty < 0 and line.product_qty != line.theoretical_qty), False)
not_checked = next((line for line in self.mapped(
'line_ids') if not line.revisado), False)
negative_lost = next((line for line in self.mapped(
'line_ids') if line.perdida < 0), False)
print(not_checked)
if negative:
raise UserError(_('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s') %
(negative.product_id.name, negative.product_qty))
if not_checked:
raise UserError(_('No se ha revisado algún producto.'))
if negative_lost:
raise UserError(_('Algún producto tiene pérdida negativa.'))
self.action_check()
self.write({'state': 'done'})
self.post_inventory()
return True
@api.model
def create(self, vals):
vals['user_cre'] = self.env.uid
vals['date_cre'] = datetime.datetime.now()
res = super(Inventory, self).create(vals)
return res
class InventoryLine(models.Model):
_inherit = "stock.inventory.line"
revisado = fields.Boolean('Revisado', required=True)
motivo_de_baja = fields.Selection([
('obs', 'Obsolecencia de Bien'),
('da', 'Daño'),
('fec', 'Fecha de Vencimiento'),
('hur', 'Hurto')],
string='Motivo de Baja')
showed_qty = fields.Float('Contado',
help="Campo que muestra la cantidad contada.",
compute="update_showed_quantity",
digits='Product Unit of Measure', default=0)
difference_qty_2 = fields.Float('Diferencia',
help="Diferencia ingresada para el cálculo de la cantidad contada.",
digits='Product Unit of Measure', default=0)
perdida = fields.Float('Pérdida',
help="Productos perdidos.",
digits='Product Unit of Measure', default=0)
prueba = fields.Image('Evidencia')
costo = fields.Float(related='product_id.standard_price')
total_perdida = fields.Float(compute='_compute_lost')
disposicion_final = fields.Char()
fecha_disposicion_final = fields.Date()
@ api.depends('costo', 'perdida')
def _compute_lost(self):
for line in self:
line.total_perdida = line.costo * line.perdida
@ api.onchange('perdida')
def update_quantity_by_perdida(self):
for line in self:
line.product_qty = line.theoretical_qty - line.perdida
@ api.onchange('difference_qty_2')
def update_quantity_by_difference(self):
for line in self:
line.product_qty = line.theoretical_qty + line.difference_qty_2
@ api.onchange('product_qty')
def update_showed_quantity(self):
for line in self:
line.showed_qty = line.product_qty
@ api.onchange('product_id', 'location_id', 'product_uom_id', 'prod_lot_id', 'partner_id', 'package_id')
def _onchange_quantity_context(self):
product_qty = False
if self.product_id:
self.product_uom_id = self.product_id.uom_id
# TDE FIXME: last part added because crash
if self.product_id and self.location_id and self.product_id.uom_id.category_id == self.product_uom_id.category_id:
theoretical_qty = self.product_id.get_theoretical_quantity(
self.product_id.id,
self.location_id.id,
lot_id=self.prod_lot_id.id,
package_id=self.package_id.id,
owner_id=self.partner_id.id,
to_uom=self.product_uom_id.id,
)
else:
theoretical_qty = 0
# Sanity check on the lot.
if self.prod_lot_id:
if self.product_id.tracking == 'none' or self.product_id != self.prod_lot_id.product_id:
self.prod_lot_id = False
if self.prod_lot_id and self.product_id.tracking == 'serial':
# We force `product_qty` to 1 for SN tracked product because it's
# the only relevant value aside 0 for this kind of product.
self.product_qty = 1
elif self.product_id and float_compare(self.product_qty, self.theoretical_qty, precision_rounding=self.product_uom_id.rounding) == 0:
# We update `product_qty` only if it equals to `theoretical_qty` to
# avoid to reset quantity when user manually set it.
self.product_qty = theoretical_qty + self.difference_qty_2
self.theoretical_qty = theoretical_qty
def _get_virtual_location(self):
if self.inventory_id.ajuste == 'baja':
return self.inventory_id.location_dest_id
else:
return self.product_id.with_context(force_company=self.company_id.id).property_stock_inventory
class StockScrap(models.Model):
_inherit = 'stock.scrap'
state = fields.Selection([
('draft', 'Elaboración'),
('review', 'Revisión'),
('auth', 'Autorización'),
('approv', 'Aprobación'),
('done', 'Done')],
string='Status', default="draft", readonly=True, tracking=True)
rule = {
'review': [('readonly', True)],
'auth': [('readonly', True)],
'approv': [('readonly', True)],
'done': [('readonly', True)],
}
company_id = fields.Many2one(states=rule, tracking=1)
product_id = fields.Many2one(states=rule, tracking=1)
origin = fields.Char(states=rule)
product_uom_id = fields.Many2one(states=rule, tracking=1)
lot_id = fields.Many2one(states=rule, tracking=1)
package_id = fields.Many2one(states=rule, tracking=1)
owner_id = fields.Many2one(states=rule, tracking=1)
picking_id = fields.Many2one(states=rule, tracking=1)
location_id = fields.Many2one(states=rule, tracking=1)
scrap_location_id = fields.Many2one(states=rule, tracking=1)
scrap_qty = fields.Float(states=rule, tracking=1)
user_cre = fields.Many2one('res.users', string='Creó', required=False, copy=False)
user_rev = fields.Many2one('res.users', string='Revisó', required=False, copy=False)
user_aut = fields.Many2one('res.users', string='Autorizó', required=False, copy=False)
user_apr = fields.Many2one('res.users', string='Aprobó', required=False, copy=False)
user_ter = fields.Many2one('res.users', string='Terminó', required=False, copy=False)
date_cre = fields.Datetime(string='Fecha creó', copy=False)
date_rev = fields.Datetime(string='Fecha revisó', copy=False)
date_aut = fields.Datetime(string='Fecha autorizó', copy=False)
date_apr = fields.Datetime(string='Fecha aprobó', copy=False)
date_ter = fields.Datetime(string='Fecha terminó', copy=False)
motivo_de_baja = fields.Selection([
('obs', 'Obsolecencia de Bien'),
('da', 'Daño'),
('fec', 'Fecha de Vencimiento'),
('hur', 'Hurto')],
string='Motivo de Baja', states=rule, tracking=1)
def to_review(self):
self._check_company()
for scrap in self:
scrap.name = self.env['ir.sequence'].next_by_code(
'stock.scrap') or _('New')
scrap.date_done = fields.Datetime.now()
scrap.write({'state': 'review'})
if self.product_id.type != 'product':
return True
precision = self.env['decimal.precision'].precision_get(
'Product Unit of Measure')
location_id = self.location_id
if self.picking_id and self.picking_id.picking_type_code == 'incoming':
location_id = self.picking_id.location_dest_id
available_qty = sum(self.env['stock.quant']._gather(self.product_id,
location_id,
self.lot_id,
self.package_id,
self.owner_id,
strict=True).mapped('quantity'))
scrap_qty = self.product_uom_id._compute_quantity(
self.scrap_qty, self.product_id.uom_id)
if float_compare(available_qty, scrap_qty, precision_digits=precision) >= 0:
return True
else:
ctx = dict(self.env.context)
ctx.update({
'default_product_id': self.product_id.id,
'default_location_id': self.location_id.id,
'default_scrap_id': self.id
})
return {
'name': _('Insufficient Quantity'),
'view_mode': 'form',
'res_model': 'stock.warn.insufficient.qty.scrap',
'view_id': self.env.ref('stock.stock_warn_insufficient_qty_scrap_form_view').id,
'type': 'ir.actions.act_window',
'context': ctx,
'target': 'new'
}
def to_auth(self):
self._check_company()
for scrap in self:
scrap.write({'state': 'auth'})
scrap.write({'user_rev': self.env.uid})
scrap.write({'date_rev': datetime.datetime.today()})
return True
def to_approv(self):
self._check_company()
for scrap in self:
scrap.write({'state': 'approv'})
scrap.write({'user_aut': self.env.uid})
scrap.write({'date_aut': datetime.datetime.today()})
return True
def to_draft(self):
self._check_company()
for scrap in self:
scrap.write({'state': 'draft'})
scrap.write({'user_rev': False})
scrap.write({'user_aut': False})
scrap.write({'user_apr': False})
scrap.write({'date_rev': False})
scrap.write({'date_aut': False})
scrap.write({'date_apr': False})
return True
def do_scrap(self):
self._check_company()
for scrap in self:
move = self.env['stock.move'].create(scrap._prepare_move_values())
# master: replace context by cancel_backorder
move.with_context(is_scrap=True)._action_done()
scrap.write({'move_id': move.id, 'state': 'done'})
scrap.write({'user_apr': self.env.uid})
scrap.write({'date_apr': datetime.datetime.today()})
return True
def _prepare_move_values(self):
self.ensure_one()
location_id = self.location_id.id
if self.picking_id and self.picking_id.picking_type_code == 'incoming':
location_id = self.picking_id.location_dest_id.id
return {
'name': self.name,
'origin': self.origin or self.picking_id.name or self.name,
'company_id': self.company_id.id,
'product_id': self.product_id.id,
'product_uom': self.product_uom_id.id,
'state': 'draft',
'product_uom_qty': self.scrap_qty,
'location_id': location_id,
'scrapped': True,
'location_dest_id': self.scrap_location_id.id,
'move_line_ids': [(0, 0, {'product_id': self.product_id.id,
'product_uom_id': self.product_uom_id.id,
'qty_done': self.scrap_qty,
'location_id': location_id,
'location_dest_id': self.scrap_location_id.id,
'package_id': self.package_id.id,
'owner_id': self.owner_id.id,
'lot_id': self.lot_id.id, })],
# 'restrict_partner_id': self.owner_id.id,
'picking_id': self.picking_id.id
}
def action_validate(self):
self.ensure_one()
if self.location_id.id == self._get_default_location_id():
view = self.env.ref('overwrite_inventory.button_confirm_form')
return {
'type': 'ir.actions.act_window',
'name': "Confirmar 'Ubicación Origen'",
'res_model': 'overwrite_inventory.button.confirm',
'views': [(view.id, 'form')],
'target': 'new',
'context': {'scrap': self.id}
}
else:
return self.do_scrap()
def action_validate_second_confirm(self):
self.ensure_one()
return self.do_scrap()
@api.model
def create(self, vals):
vals['user_cre'] = self.env.uid
vals['date_cre'] = datetime.datetime.now()
res = super(StockScrap, self).create(vals)
return res
class StockWarnInsufficientQtyScrapOver(models.TransientModel):
_inherit = 'stock.warn.insufficient.qty.scrap'
def action_done(self):
return True
def action_cancel(self):
return self.scrap_id.to_draft()
class Picking(models.Model):
_inherit = 'stock.picking'
name = fields.Char(
'Reference', default='/',
copy=False, index=True, readonly=True)
state = fields.Selection([
('draft', 'Draft'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting'),
('assigned', 'Ready'),
('approved', 'Aprobado'),
('done', 'Done'),
('cancel', 'Cancelled')])
location_id_usage = fields.Selection(related='location_id.usage')
location_dest_id_usage = fields.Selection(related='location_dest_id.usage')
picking_type_id = fields.Many2one(
'stock.picking.type', 'Operation Type',
required=True, readonly=True,
states={'draft': [('readonly', False)]})
parent_id = fields.Many2one(comodel_name='stock.picking')
children_ids = fields.One2many(
comodel_name='stock.picking', inverse_name='parent_id')
warehouse_orig = fields.Many2one(comodel_name='stock.warehouse')
warehouse_dest = fields.Many2one(comodel_name='stock.warehouse')
user_apprv = fields.Many2one('res.users', string='Aprobó', required=False)
date_apprv = fields.Datetime(string='Fecha aprobó')
user_val = fields.Many2one('res.users', string='Validó', required=False)
date_val = fields.Datetime(string='Fecha validó')
location_id = fields.Many2one(
'stock.location', "Source Location",
default=lambda self: self.env['stock.picking.type'].browse(self._context.get('default_picking_type_id')).default_location_src_id,
check_company=True, readonly=True, required=True,
states={'draft': [('readonly', False)]})
product_id = fields.Many2one('product.product', 'Product', related='move_lines.product_id', readonly=True)
move_ids_without_package = fields.One2many('stock.move', 'picking_id', string="Stock moves not in package", compute='_compute_move_without_package', inverse='_set_move_without_package')
move_line_ids_without_package = fields.One2many('stock.move.line', 'picking_id', 'Operations without package', domain=['|',('package_level_id', '=', False), ('picking_type_entire_packs', '=', False)])
origin = fields.Char(
'Source Document', index=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Reference of the document")
origin_order = fields.Char(
string='Codigo Orden de Compra', index=True)
n_bill = fields.Char(
string='Número de Factura')
partner_id = fields.Many2one(
'res.partner', 'Contact',
check_company=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
identification_partner = fields.Char('N° Documento')
return_reason_id = fields.Many2one("stock.return.reason", string="Motivo de Devolución", copy=False, tracking=True)
@api.onchange('partner_id')
def set_identification_dv(self):
if self.partner_id:
numero_id = self.env['res.partner'].search([('id', '=', self.partner_id.id)], limit=1).vat
self.identification_partner = numero_id
@ api.onchange('move_ids_without_package','move_line_ids_without_package')
def variante_producto(self):
if self.location_id:
for product in self.move_ids_without_package:
location = self.location_id.location_id.name
product_operaciones = product.product_id.warehouse_id.lot_stock_id.location_id.name
product_operaciones_detalladas = product.product_id.warehouse_id.lot_stock_id.location_id.name
if (product.product_id.warehouse_id or product.product_id.warehouse_id):
if (location != product_operaciones) or (location != product_operaciones_detalladas):
raise UserError(_('El almacén del producto o variante de producto no corresponde con el almacén de la ubicación origen. '
'Seleccione la variante de producto correspondiente o asigne el almacén adecuado al producto o variante del producto.'))
def get_root_warehouse(self, location_id):
stock_location = self.env['stock.location']
#En sale.order estaba tomando dos warehouse, se estaba duplicando el warehouse
#para varias compañias, por lo tanto se establece the current company para que
#tome unicamente un warehouse .
company_ids = self.env.user.company_id.id
current = stock_location.search([['id', '=', location_id and 'company_id' == company_ids]])
while current.location_id and current.location_id.location_id:
current = current.location_id
warehouse = self.env['stock.warehouse'].search(
[['code', '=', current.complete_name]])
return warehouse
@api.depends('state', 'is_locked')
def _compute_show_validate(self):
for picking in self:
if not (picking.immediate_transfer) and picking.state == 'draft':
picking.show_validate = False
elif picking.state not in ('draft', 'approved', 'waiting', 'confirmed', 'assigned') or not picking.is_locked:
picking.show_validate = False
else:
picking.show_validate = True
def set_warehouse(self, vals):
if vals.get('location_id', False):
warehouse_orig = self.get_root_warehouse(vals['location_id'])
if warehouse_orig:
vals['warehouse_orig'] = warehouse_orig.id
if vals.get('location_dest_id', False):
warehouse_dest = self.get_root_warehouse(vals['location_dest_id'])
if warehouse_dest:
vals['warehouse_dest'] = warehouse_dest.id
return vals
def set_parent(self, vals):
if vals.get('origin', False):
parent = self.env['stock.picking'].search(['&', ['name', '=', vals['origin'].split(
'Retorno de ')[-1]], ['company_id', '=', self.env.company.id]])
if parent:
vals['parent_id'] = parent.id
vals['company_id'] = parent.company_id.id
if not vals.get('origin', False):
if self.origin:
code_sc = self.origin
code_oc_sc = self.env['purchase.order'].search([('codigo_solicitud_cotizacion','=',code_sc),],limit=1).name
code_oc_rint = self.env['purchase.order'].search([('code_requisition','=',code_sc),],limit=1).name
if code_oc_sc:
vals['origin_order'] = code_oc_sc
if 'OC' in code_sc:
vals['origin_order'] = code_sc
if code_oc_rint:
vals['origin_order'] = code_oc_rint
vals['parent_id'] = False
@ api.model
def write(self, vals):
vals = self.set_warehouse(vals)
self.set_parent(vals)
self._check_intrawarehouse_moves(vals)
return super(Picking, self).write(vals)
@ api.model
def create(self, vals):
vals = self.set_warehouse(vals)
self.set_parent(vals)
self._check_intrawarehouse_moves(vals)
return super(Picking, self).create(vals)
def _check_different_lot_stock_moves(self):
if self.group_id:
pickings_on_group = self.env['stock.picking'].search(
[['group_id', '=', self.group_id.id], ['state', '=', 'done']])
if len(pickings_on_group) > 0 and self.backorder_id == False:
move_lot_ids = []
move_lot_ids_qty = {}
for picking in pickings_on_group:
if 'Retorno' in picking.origin:
pass
for move in picking.move_line_ids_without_package:
move_lot_ids.append(move.lot_id.id)
key = str(move.product_id)+str(move.lot_id)
if move_lot_ids_qty.get(key, False):
if move.qty_done * move.product_uom_id.factor_inv < move_lot_ids_qty.get(key, False):
move_lot_ids_qty[key] = move.qty_done * \
move.product_uom_id.factor_inv
else:
move_lot_ids_qty[key] = move.qty_done * \
move.product_uom_id.factor_inv
for move in self.move_line_ids_without_package:
key = str(move.product_id)+str(move.lot_id)
if move.lot_id.id not in move_lot_ids:
raise UserError(_('No se puede agregar lotes no existentes en movimientos terminados anteriores. {}'.format(
move.product_id.name)))
print(move.qty_done * move.product_uom_id.factor_inv)
print()
if move_lot_ids_qty.get(key, False):
if move.qty_done * move.product_uom_id.factor_inv > move_lot_ids_qty.get(key, False):
raise UserError(_('No se puede realizar un movimiento con mayor cantidad de producto terminado que en los anteriores movimientos. {}'.format(
move.product_id.name)))
def _check_intrawarehouse_moves(self, vals):
if vals.get('warehouse_orig', False):
current_user = self.env['res.users'].browse(self.env.uid)
warehouse = self.env['stock.warehouse'].search(
[['id', '=', vals.get('warehouse_orig')]])
responsables = warehouse.user_ids
if current_user not in responsables:
raise UserError(
_('Los movimientos intraalmacen solo la puede realizar un usuario responsable del almacen destino'))
def button_validate(self):
for line in self.move_line_ids_without_package:
quantity_stock_picking=line.qty_done
lot_line=line.lot_id.id
id_product=line.product_id.id
quantity_warehouse=0
if lot_line:
for qty_warehouse in self.env['stock.quant'].search([('product_id.id','=',id_product),('location_id.id','=',self.location_id.id),('lot_id.id','=',lot_line)]):
quantity_warehouse += qty_warehouse.quantity
else:
for qty_warehouse in self.env['stock.quant'].search([('product_id.id','=',id_product),('location_id.id','=',self.location_id.id)]):
quantity_warehouse += qty_warehouse.quantity
if quantity_warehouse < quantity_stock_picking:
if self.picking_type_id.name == 'Órdenes de Entrega':
raise UserError(_('La cantidad disponible en el almacén no es suficiente para cumplir la demanda.'))
if not self.partner_id:
products = {}
for line in self.move_line_ids:
key = str(line.product_id.id) + '-' + \
str(line.lot_id.id) + '-' + str(line.location_id.id)
if products.get(key, False):
products[key] += line.qty_done * \
line.product_uom_id.factor_inv
else:
products[key] = line.qty_done * \
line.product_uom_id.factor_inv
for key, qty_done in products.items():
product, lot, dest = key.split('-')
product = int(product)
dest = int(dest)
if lot == 'False':
lot = False
else:
lot = int(lot)
quant = self.env['stock.quant'].search(
[['product_id', '=', product], ['lot_id', '=', lot], ['location_id', '=', dest]])
quant_sum = sum(map(lambda q: q.quantity *
q.product_uom_id.factor_inv, quant))
if quant_sum < qty_done:
if self.picking_type_id.code == 'internal':
raise UserError(
_('Las cantidades en la ubicación origen no son suficientes para cubrir la demanda.'))
view = self.env.ref(
'overwrite_inventory.button_confirm_form_generic')
wiz = self.env['overwrite_inventory.button.confirm.generic'].create(
#{'message': 'Las cantidades en la ubicación origen no son suficientes para cubrir la demanda. Confirme para ignorar inventario negativo.'})
{'message': '¿Seguro que quieres validar?.'})
return {
'type': 'ir.actions.act_window',
'name': "Confirmar",
'res_model': 'overwrite_inventory.button.confirm.generic',
'views': [(view.id, 'form')],
'target': 'new',
'res_id': wiz.id,
'context': {'model': 'stock.picking', 'id': self.id}
}
return self.button_validate_confirm()
def button_unapprove(self):
for picking in self:
picking.write({'state': 'assigned',
'user_apprv': False,
'date_apprv': False})
return True
def button_approve(self):
'''if self.picking_type_id.name == 'Recibos':
if not self.move_line_ids_without_package.lot_id:
raise UserError(_('Para aprobar debes colocar un Lote / N° de Serie.'))
for stock_pickings in self.env['stock.picking'].search([]):
if stock_pickings.picking_type_id.name == 'Recibos':
name_receipts = stock_pickings.name
for operaciones_detalladas in stock_pickings.move_line_ids_without_package:
self_lot = self.move_line_ids_without_package.lot_id.name
lots = operaciones_detalladas.lot_id.name
if (lots == self_lot) and (self.name != name_receipts):
raise UserError(_('Para aprobar debes colocar un Lote / N° de Serie nuevo.'))
'''
for picking in self:
picking.write({'state': 'approved',
'user_apprv': self.env.uid,
'date_apprv': datetime.datetime.now()})
return True
def button_validate_confirm(self):
self.ensure_one()
if not self.env['mrp.production'].search([['name', '=', self.origin]]):
self._check_different_lot_stock_moves()
if self.state == 'waiting':
raise UserError(
_('Por favor completar las operaciones precondiciones'))
if not self.move_lines and not self.move_line_ids:
raise UserError(_('Please add some items to move.'))
# Clean-up the context key at validation to avoid forcing the creation of immediate
# transfers.
ctx = dict(self.env.context)
ctx.pop('default_immediate_transfer', None)
self = self.with_context(ctx)
# add user as a follower
self.message_subscribe([self.env.user.partner_id.id])
# If no lots when needed, raise error
picking_type = self.picking_type_id
precision_digits = self.env['decimal.precision'].precision_get(
'Product Unit of Measure')
no_quantities_done = all(float_is_zero(move_line.qty_done, precision_digits=precision_digits)
for move_line in self.move_line_ids.filtered(lambda m: m.state not in ('done', 'cancel')))
no_reserved_quantities = all(float_is_zero(
move_line.product_qty, precision_rounding=move_line.product_uom_id.rounding) for move_line in self.move_line_ids)
if no_reserved_quantities and no_quantities_done:
raise UserError(
_('You cannot validate a transfer if no quantites are reserved nor done. To force the transfer, switch in edit more and encode the done quantities.'))
if picking_type.use_create_lots or picking_type.use_existing_lots:
lines_to_check = self.move_line_ids
if not no_quantities_done:
lines_to_check = lines_to_check.filtered(
lambda line: float_compare(line.qty_done, 0,
precision_rounding=line.product_uom_id.rounding)
)
for line in lines_to_check:
product = line.product_id
if product and product.tracking != 'none':
if not line.lot_name and not line.lot_id:
raise UserError(
_('You need to supply a Lot/Serial number for product %s.') % product.display_name)
# Propose to use the sms mechanism the first time a delivery
# picking is validated. Whatever the user's decision (use it or not),
# the method button_validate is called again (except if it's cancel),
# so the checks are made twice in that case, but the flow is not broken
sms_confirmation = self._check_sms_confirmation_popup()
if sms_confirmation:
return sms_confirmation
if no_quantities_done:
view = self.env.ref('stock.view_immediate_transfer')
wiz = self.env['stock.immediate.transfer'].create(
{'pick_ids': [(4, self.id)]})
return {
'name': _('Immediate Transfer?'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'stock.immediate.transfer',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': wiz.id,
'context': self.env.context,
}
if self._get_overprocessed_stock_moves() and not self._context.get('skip_overprocessed_check'):
view = self.env.ref('stock.view_overprocessed_transfer')
wiz = self.env['stock.overprocessed.transfer'].create(
{'picking_id': self.id})
return {
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'stock.overprocessed.transfer',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': wiz.id,
'context': self.env.context,
}
# Check backorder should check for other barcodes
if self._check_backorder():
return self.action_generate_backorder_wizard()
self.action_done()
self.write({'user_val': self.env.uid,
'date_val': datetime.datetime.now()})
return
class Warehouse(models.Model):
_inherit = "stock.warehouse"
code = fields.Char(size=10)
user_ids = fields.Many2many('res.users', string='Responsables')
class ProductCategory(models.Model):
_name = "product.category"
_inherit = ["product.category","mail.thread"]
company_id = fields.Many2one(
'res.company',
'Company',
ondelete='cascade',
)
class StockValuationLayer(models.Model):
_inherit = 'stock.valuation.layer'
categ_id = fields.Many2one(store=True)
class StockValuationLayer(models.Model):
_inherit = 'stock.location'
usage = fields.Selection([
('supplier', 'Vendor Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory Loss'),
('production', 'Production'),
('transit', 'Transit Location'),
('interwarehouse', 'Interalmacen')])
class StockMoveLine(models.Model):
_inherit = 'stock.move.line'
exists_qty = fields.Float(
'Exists Quantity', compute='_compute_exists_qty',
store=True, help='Cantidad existente en inventario')
# Costo promedio por lotes en el modelo de las lineas de movimiento de inventario
cost_unit_lot = fields.Float(string='Costo por Lote', default=0, compute='_compute_cost_unit_lot')
product_id = fields.Many2one('product.product', 'Product', ondelete="cascade", check_company=True, domain="[('type', '!=', 'service'), '|', ('company_id', '=', False), ('company_id', '=', company_id)]")
picking_id = fields.Many2one(
'stock.picking', 'Stock Picking', auto_join=True,
check_company=True,
index=True,
help='The stock operation where the packing has been made')
origin = fields.Char(related='move_id.origin', string='Source')
lot_id = fields.Many2one(
'stock.production.lot', 'Lot/Serial Number',
domain="[('product_id', '=', product_id), ('company_id', '=', company_id)]", check_company=True)
def _compute_cost_unit_lot(self):
if self.picking_id.origin:
origin_purchase_order = self.picking_id.origin
if origin_purchase_order[0:2] == 'OC':
for product in self.env['purchase.order'].search([('name', '=', origin_purchase_order)], limit=1).order_line:
for product_line_self in self:
if product.product_id == product_line_self.product_id:
product_line_self.cost_unit_lot = product.price_unit
elif origin_purchase_order[0:2] == 'SC':
for product in self.env['purchase.order'].search([('codigo_solicitud_cotizacion', '=', origin_purchase_order)], limit=1).order_line:
for product_line_self in self:
if product.product_id == product_line_self.product_id:
product_line_self.cost_unit_lot = product.price_unit
else:
self.cost_unit_lot = 0
elif self.picking_id.origin_order:
origin_purchase_order = self.picking_id.origin_order
if origin_purchase_order[0:2] == 'OC':
for product in self.env['purchase.order'].search([('name', '=', origin_purchase_order)], limit=1).order_line:
for product_line_self in self:
if product.product_id == product_line_self.product_id:
product_line_self.cost_unit_lot = product.price_unit
elif origin_purchase_order[0:2] == 'SC':
for product in self.env['purchase.order'].search([('codigo_solicitud_cotizacion', '=', origin_purchase_order)], limit=1).order_line:
for product_line_self in self:
if product.product_id == product_line_self.product_id:
product_line_self.cost_unit_lot = product.price_unit
else:
self.cost_unit_lot = 0
else:
self.cost_unit_lot = 0
@api.depends('qty_done')
def _compute_exists_qty(self):
for move in self:
if move.location_dest_id.usage == 'internal':
move.exists_qty = move.qty_done
else:
move.exists_qty = -1 * move.qty_done
# Costo promedio en los lugares
class StockQuant(models.Model):
_inherit = 'stock.quant'
def _domain_lot_id(self):
if not self._is_inventory_mode():
return
domain = [
"'|'",
"('company_id', '=', company_id)",
"('company_id', '=', False)"
]
if self.env.context.get('active_model') == 'product.product':
domain.insert(0, "('product_id', '=', %s)" % self.env.context.get('active_id'))
elif self.env.context.get('active_model') == 'product.template':
product_template = self.env['product.template'].browse(self.env.context.get('active_id'))
if product_template.exists():
domain.insert(0, "('product_id', 'in', %s)" % product_template.product_variant_ids.ids)
else:
domain.insert(0, "('product_id', '=', product_id)")
return '[' + ', '.join(domain) + ']'
def _domain_product_id(self):
if not self._is_inventory_mode():
return
domain = [('type', '=', 'product')]
if self.env.context.get('product_tmpl_ids') or self.env.context.get('product_tmpl_id'):
products = self.env.context.get('product_tmpl_ids', []) + [self.env.context.get('product_tmpl_id', 0)]
domain = expression.AND([domain, [('product_tmpl_id', 'in', products)]])
return domain
cost_unit = fields.Monetary(string='Costo Promedio del Lote', compute='_compute_cost_unit')
cost_unit_average = fields.Monetary(string='Costo Promedio del Producto por Almacén', compute='_compute_cost_unit_average')
value_average = fields.Monetary(string='Costo Total del Almacén', compute='_value_average')
lot_id = fields.Many2one(
'stock.production.lot', 'Lot/Serial Number', index=True,
ondelete='restrict', readonly=True, check_company=True,
domain=lambda self: self._domain_lot_id())
product_id = fields.Many2one(
'product.product', 'Product',
domain=lambda self: self._domain_product_id(),
ondelete='restrict', readonly=True, required=True, index=True, check_company=True)
quantity = fields.Float(
'Quantity',
help='Quantity of products in this quant, in the default unit of measure of the product',
readonly=True)
@api.model
def _is_inventory_mode(self):
""" Used to control whether a quant was written on or created during an
"inventory session", meaning a mode where we need to create the stock.move
record necessary to be consistent with the `inventory_quantity` field.
"""
return self.env.context.get('inventory_mode') is True and self.user_has_groups('stock.group_stock_manager')
def _compute_cost_unit(self):
for line in self:
cost_lots = 0
qty_lots = 0
if self.env['stock.production.lot'].search([('name', '=', line.lot_id.name)]):
for purchase_order in self.env['stock.production.lot'].search([('name', '=', line.lot_id.name)]):
for purchase_order_line in purchase_order.purchase_order_ids:
for product in purchase_order_line.order_line:
if product.product_id == line.product_id:
cost_lots += product.price_unit*product.product_qty
qty_lots += product.product_qty
if qty_lots > 0:
line.cost_unit = cost_lots / qty_lots
else:
line.cost_unit = 0
def _compute_cost_unit_average(self):
product_cost_average=[[]]
quantity_products=0
for line in self:
exist=False
for products_lines in product_cost_average:
if products_lines:
if products_lines[0] == line.product_id:
products_lines[1] += (line.cost_unit*line.quantity)
products_lines[2] += line.quantity
exist=True
if not exist:
product_cost_average[quantity_products].append(line.product_id)
product_cost_average[quantity_products].append(line.cost_unit*line.quantity)
product_cost_average[quantity_products].append(line.quantity)
product_cost_average.append([])
quantity_products+=1
product_cost_average.pop()
for line in self:
for products_lines in product_cost_average:
if products_lines[0] == line.product_id:
if products_lines[2] == 0:
line.cost_unit_average = 0
else:
line.cost_unit_average = products_lines[1]/products_lines[2]
def _value_average(self):
for line in self:
line.value_average = line.quantity*line.cost_unit_average
class SaleSubscriptionCloseReason(models.Model):
_name = "stock.return.reason"
name = fields.Char('Razón', required=True)
sequence = fields.Integer(default=11)
| 46.450782
| 206
| 0.587301
|
4a0b2a710569f8b91695095ac42b4602ed363155
| 2,024
|
py
|
Python
|
scraper.py
|
vyacheslav-polyakov/vocabmap
|
32ffa7fd98f73bbdb0f49d937f3a98e41c7dc9e8
|
[
"MIT"
] | null | null | null |
scraper.py
|
vyacheslav-polyakov/vocabmap
|
32ffa7fd98f73bbdb0f49d937f3a98e41c7dc9e8
|
[
"MIT"
] | null | null | null |
scraper.py
|
vyacheslav-polyakov/vocabmap
|
32ffa7fd98f73bbdb0f49d937f3a98e41c7dc9e8
|
[
"MIT"
] | null | null | null |
from urllib.request import Request, urlopen
from urllib.parse import quote
from bs4 import BeautifulSoup as bs
import io
import re
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
def findExamples(word):
# Retrieve the exmples
url = 'https://www.purpleculture.net/dictionary-details/?word=' + quote(word)
request = Request(url, headers=headers)
response = urlopen(request)
soup = bs(response, 'html.parser')
examples = soup.findAll('span', {'class': 'samplesen'})
meanings = soup.findAll('div', {'class':'sample_en'})
# Write the exmples to a temporary file to decode it
file = open(r'vocab\temp.txt', 'w', encoding='utf-8')
for example in examples:
file.write(example.text)
file.close()
# Separate examples into a list
file = open(r'vocab\temp.txt', 'r', encoding='utf-8')
string = file.read()
file.close()
examples = re.split("[a-zāáǎàōóǒòēéěèīíìǐūúǔùǖǘǚǜü “”]", string)
examples = ''.join(examples)
examples = re.split('[。?]', examples)
file = open(r'vocab\temp.txt', 'w', encoding='utf-8')
examples_cn = []
examples_en = []
translations = []
for m in meanings:
translations.append(m.text)
for example in examples:
if len(example) <= 30 and example.replace(' ', '') != '':
examples_cn.append(example)
examples_en.append(translations[examples.index(example)])
file.write(example+'\n')
file.close()
examples_cn.append('') # to know where the list ends
examples_en.append('') # to know where the list ends
result = [examples_cn, examples_en]
return result
findExamples('果然'.encode('utf-8'))
| 38.188679
| 132
| 0.635375
|
4a0b2a96a66d116aad8b765e9a7fd3b846ba1df6
| 87,976
|
py
|
Python
|
discord/ext/commands/core.py
|
curiositIy/discord.py
|
f6a74f74a7aed0879fc086805eae8873e745d0ea
|
[
"MIT"
] | null | null | null |
discord/ext/commands/core.py
|
curiositIy/discord.py
|
f6a74f74a7aed0879fc086805eae8873e745d0ea
|
[
"MIT"
] | null | null | null |
discord/ext/commands/core.py
|
curiositIy/discord.py
|
f6a74f74a7aed0879fc086805eae8873e745d0ea
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import datetime
import functools
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Generic,
List,
Literal,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import discord
from ._types import _BaseCommand, CogT
from .cog import Cog
from .context import Context
from .converter import Greedy, run_converters
from .cooldowns import BucketType, Cooldown, CooldownMapping, DynamicCooldownMapping, MaxConcurrency
from .errors import *
from .parameters import Parameter, Signature
if TYPE_CHECKING:
from typing_extensions import Concatenate, ParamSpec, Self
from discord.message import Message
from ._types import BotT, Check, ContextT, Coro, CoroFunc, Error, Hook, UserCheck
__all__ = (
'Command',
'Group',
'GroupMixin',
'command',
'group',
'has_role',
'has_permissions',
'has_any_role',
'check',
'check_any',
'before_invoke',
'after_invoke',
'bot_has_role',
'bot_has_permissions',
'bot_has_any_role',
'cooldown',
'dynamic_cooldown',
'max_concurrency',
'dm_only',
'guild_only',
'is_owner',
'is_nsfw',
'has_guild_permissions',
'bot_has_guild_permissions',
)
MISSING: Any = discord.utils.MISSING
T = TypeVar('T')
CommandT = TypeVar('CommandT', bound='Command')
# CHT = TypeVar('CHT', bound='Check')
GroupT = TypeVar('GroupT', bound='Group')
if TYPE_CHECKING:
P = ParamSpec('P')
else:
P = TypeVar('P')
def unwrap_function(function: Callable[..., Any], /) -> Callable[..., Any]:
partial = functools.partial
while True:
if hasattr(function, '__wrapped__'):
function = function.__wrapped__
elif isinstance(function, partial):
function = function.func
else:
return function
def get_signature_parameters(
function: Callable[..., Any],
globalns: Dict[str, Any],
/,
*,
skip_parameters: Optional[int] = None,
) -> Dict[str, Parameter]:
signature = Signature.from_callable(function)
params: Dict[str, Parameter] = {}
cache: Dict[str, Any] = {}
eval_annotation = discord.utils.evaluate_annotation
required_params = discord.utils.is_inside_class(function) + 1 if skip_parameters is None else skip_parameters
if len(signature.parameters) < required_params:
raise TypeError(f'Command signature requires at least {required_params - 1} parameter(s)')
iterator = iter(signature.parameters.items())
for _ in range(0, required_params):
next(iterator)
for name, parameter in iterator:
default = parameter.default
if isinstance(default, Parameter): # update from the default
if default.annotation is not Parameter.empty:
# There are a few cases to care about here.
# x: TextChannel = commands.CurrentChannel
# x = commands.CurrentChannel
# In both of these cases, the default parameter has an explicit annotation
# but in the second case it's only used as the fallback.
if default._fallback:
if parameter.annotation is Parameter.empty:
parameter._annotation = default.annotation
else:
parameter._annotation = default.annotation
parameter._default = default.default
parameter._displayed_default = default._displayed_default
annotation = parameter.annotation
if annotation is None:
params[name] = parameter.replace(annotation=type(None))
continue
annotation = eval_annotation(annotation, globalns, globalns, cache)
if annotation is Greedy:
raise TypeError('Unparameterized Greedy[...] is disallowed in signature.')
if hasattr(annotation, '__metadata__'):
# Annotated[X, Y] can access Y via __metadata__
metadata = annotation.__metadata__
if len(metadata) >= 1:
annotation = metadata[0]
if isinstance(annotation, discord.app_commands.transformers._TransformMetadata):
annotation = annotation.metadata
params[name] = parameter.replace(annotation=annotation)
return params
def wrap_callback(coro: Callable[P, Coro[T]], /) -> Callable[P, Coro[Optional[T]]]:
@functools.wraps(coro)
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> Optional[T]:
try:
ret = await coro(*args, **kwargs)
except CommandError:
raise
except asyncio.CancelledError:
return
except Exception as exc:
raise CommandInvokeError(exc) from exc
return ret
return wrapped
def hooked_wrapped_callback(
command: Command[Any, ..., Any], ctx: Context[BotT], coro: Callable[P, Coro[T]], /
) -> Callable[P, Coro[Optional[T]]]:
@functools.wraps(coro)
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> Optional[T]:
try:
ret = await coro(*args, **kwargs)
except CommandError:
ctx.command_failed = True
raise
except asyncio.CancelledError:
ctx.command_failed = True
return
except Exception as exc:
ctx.command_failed = True
raise CommandInvokeError(exc) from exc
finally:
if command._max_concurrency is not None:
await command._max_concurrency.release(ctx.message)
await command.call_after_hooks(ctx)
return ret
return wrapped
class _CaseInsensitiveDict(dict):
def __contains__(self, k):
return super().__contains__(k.casefold())
def __delitem__(self, k):
return super().__delitem__(k.casefold())
def __getitem__(self, k):
return super().__getitem__(k.casefold())
def get(self, k, default=None):
return super().get(k.casefold(), default)
def pop(self, k, default=None):
return super().pop(k.casefold(), default)
def __setitem__(self, k, v):
super().__setitem__(k.casefold(), v)
class _AttachmentIterator:
def __init__(self, data: List[discord.Attachment]):
self.data: List[discord.Attachment] = data
self.index: int = 0
def __iter__(self) -> Self:
return self
def __next__(self) -> discord.Attachment:
try:
value = self.data[self.index]
except IndexError:
raise StopIteration
else:
self.index += 1
return value
def is_empty(self) -> bool:
return self.index >= len(self.data)
class Command(_BaseCommand, Generic[CogT, P, T]):
r"""A class that implements the protocol for a bot text command.
These are not created manually, instead they are created via the
decorator or functional interface.
Attributes
-----------
name: :class:`str`
The name of the command.
callback: :ref:`coroutine <coroutine>`
The coroutine that is executed when the command is called.
help: Optional[:class:`str`]
The long help text for the command.
brief: Optional[:class:`str`]
The short help text for the command.
usage: Optional[:class:`str`]
A replacement for arguments in the default help text.
aliases: Union[List[:class:`str`], Tuple[:class:`str`]]
The list of aliases the command can be invoked under.
enabled: :class:`bool`
A boolean that indicates if the command is currently enabled.
If the command is invoked while it is disabled, then
:exc:`.DisabledCommand` is raised to the :func:`.on_command_error`
event. Defaults to ``True``.
parent: Optional[:class:`Group`]
The parent group that this command belongs to. ``None`` if there
isn't one.
cog: Optional[:class:`Cog`]
The cog that this command belongs to. ``None`` if there isn't one.
checks: List[Callable[[:class:`.Context`], :class:`bool`]]
A list of predicates that verifies if the command could be executed
with the given :class:`.Context` as the sole parameter. If an exception
is necessary to be thrown to signal failure, then one inherited from
:exc:`.CommandError` should be used. Note that if the checks fail then
:exc:`.CheckFailure` exception is raised to the :func:`.on_command_error`
event.
description: :class:`str`
The message prefixed into the default help command.
hidden: :class:`bool`
If ``True``\, the default help command does not show this in the
help output.
rest_is_raw: :class:`bool`
If ``False`` and a keyword-only argument is provided then the keyword
only argument is stripped and handled as if it was a regular argument
that handles :exc:`.MissingRequiredArgument` and default values in a
regular matter rather than passing the rest completely raw. If ``True``
then the keyword-only argument will pass in the rest of the arguments
in a completely raw matter. Defaults to ``False``.
invoked_subcommand: Optional[:class:`Command`]
The subcommand that was invoked, if any.
require_var_positional: :class:`bool`
If ``True`` and a variadic positional argument is specified, requires
the user to specify at least one argument. Defaults to ``False``.
.. versionadded:: 1.5
ignore_extra: :class:`bool`
If ``True``\, ignores extraneous strings passed to a command if all its
requirements are met (e.g. ``?foo a b c`` when only expecting ``a``
and ``b``). Otherwise :func:`.on_command_error` and local error handlers
are called with :exc:`.TooManyArguments`. Defaults to ``True``.
cooldown_after_parsing: :class:`bool`
If ``True``\, cooldown processing is done after argument parsing,
which calls converters. If ``False`` then cooldown processing is done
first and then the converters are called second. Defaults to ``False``.
extras: :class:`dict`
A dict of user provided extras to attach to the Command.
.. note::
This object may be copied by the library.
.. versionadded:: 2.0
"""
__original_kwargs__: Dict[str, Any]
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
# if you're wondering why this is done, it's because we need to ensure
# we have a complete original copy of **kwargs even for classes that
# mess with it by popping before delegating to the subclass __init__.
# In order to do this, we need to control the instance creation and
# inject the original kwargs through __new__ rather than doing it
# inside __init__.
self = super().__new__(cls)
# we do a shallow copy because it's probably the most common use case.
# this could potentially break if someone modifies a list or something
# while it's in movement, but for now this is the cheapest and
# fastest way to do what we want.
self.__original_kwargs__ = kwargs.copy()
return self
def __init__(
self,
func: Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]],
],
/,
**kwargs: Any,
) -> None:
if not asyncio.iscoroutinefunction(func):
raise TypeError('Callback must be a coroutine.')
name = kwargs.get('name') or func.__name__
if not isinstance(name, str):
raise TypeError('Name of a command must be a string.')
self.name: str = name
self.callback = func
self.enabled: bool = kwargs.get('enabled', True)
help_doc = kwargs.get('help')
if help_doc is not None:
help_doc = inspect.cleandoc(help_doc)
else:
help_doc = inspect.getdoc(func)
if isinstance(help_doc, bytes):
help_doc = help_doc.decode('utf-8')
self.help: Optional[str] = help_doc
self.brief: Optional[str] = kwargs.get('brief')
self.usage: Optional[str] = kwargs.get('usage')
self.rest_is_raw: bool = kwargs.get('rest_is_raw', False)
self.aliases: Union[List[str], Tuple[str]] = kwargs.get('aliases', [])
self.extras: Dict[str, Any] = kwargs.get('extras', {})
if not isinstance(self.aliases, (list, tuple)):
raise TypeError("Aliases of a command must be a list or a tuple of strings.")
self.description: str = inspect.cleandoc(kwargs.get('description', ''))
self.hidden: bool = kwargs.get('hidden', False)
try:
checks = func.__commands_checks__
checks.reverse()
except AttributeError:
checks = kwargs.get('checks', [])
self.checks: List[UserCheck[ContextT]] = checks
try:
cooldown = func.__commands_cooldown__
except AttributeError:
cooldown = kwargs.get('cooldown')
if cooldown is None:
buckets = CooldownMapping(cooldown, BucketType.default)
elif isinstance(cooldown, CooldownMapping):
buckets = cooldown
else:
raise TypeError("Cooldown must be a an instance of CooldownMapping or None.")
self._buckets: CooldownMapping = buckets
try:
max_concurrency = func.__commands_max_concurrency__
except AttributeError:
max_concurrency = kwargs.get('max_concurrency')
self._max_concurrency: Optional[MaxConcurrency] = max_concurrency
self.require_var_positional: bool = kwargs.get('require_var_positional', False)
self.ignore_extra: bool = kwargs.get('ignore_extra', True)
self.cooldown_after_parsing: bool = kwargs.get('cooldown_after_parsing', False)
self._cog: CogT = None
# bandaid for the fact that sometimes parent can be the bot instance
parent: Optional[GroupMixin[Any]] = kwargs.get('parent')
self.parent: Optional[GroupMixin[Any]] = parent if isinstance(parent, _BaseCommand) else None
self._before_invoke: Optional[Hook] = None
try:
before_invoke = func.__before_invoke__
except AttributeError:
pass
else:
self.before_invoke(before_invoke)
self._after_invoke: Optional[Hook] = None
try:
after_invoke = func.__after_invoke__
except AttributeError:
pass
else:
self.after_invoke(after_invoke)
@property
def cog(self) -> CogT:
return self._cog
@cog.setter
def cog(self, value: CogT) -> None:
self._cog = value
@property
def callback(
self,
) -> Union[Callable[Concatenate[CogT, Context, P], Coro[T]], Callable[Concatenate[Context, P], Coro[T]],]:
return self._callback
@callback.setter
def callback(
self,
function: Union[
Callable[Concatenate[CogT, Context, P], Coro[T]],
Callable[Concatenate[Context, P], Coro[T]],
],
) -> None:
self._callback = function
unwrap = unwrap_function(function)
self.module: str = unwrap.__module__
try:
globalns = unwrap.__globals__
except AttributeError:
globalns = {}
self.params: Dict[str, Parameter] = get_signature_parameters(function, globalns)
def add_check(self, func: UserCheck[ContextT], /) -> None:
"""Adds a check to the command.
This is the non-decorator interface to :func:`.check`.
.. versionadded:: 1.3
.. versionchanged:: 2.0
``func`` parameter is now positional-only.
.. seealso:: The :func:`~discord.ext.commands.check` decorator
Parameters
-----------
func
The function that will be used as a check.
"""
self.checks.append(func)
def remove_check(self, func: UserCheck[ContextT], /) -> None:
"""Removes a check from the command.
This function is idempotent and will not raise an exception
if the function is not in the command's checks.
.. versionadded:: 1.3
.. versionchanged:: 2.0
``func`` parameter is now positional-only.
Parameters
-----------
func
The function to remove from the checks.
"""
try:
self.checks.remove(func)
except ValueError:
pass
def update(self, **kwargs: Any) -> None:
"""Updates :class:`Command` instance with updated attribute.
This works similarly to the :func:`~discord.ext.commands.command` decorator in terms
of parameters in that they are passed to the :class:`Command` or
subclass constructors, sans the name and callback.
"""
cog = self.cog
self.__init__(self.callback, **dict(self.__original_kwargs__, **kwargs))
self.cog = cog
async def __call__(self, context: Context[BotT], /, *args: P.args, **kwargs: P.kwargs) -> T:
"""|coro|
Calls the internal callback that the command holds.
.. note::
This bypasses all mechanisms -- including checks, converters,
invoke hooks, cooldowns, etc. You must take care to pass
the proper arguments and types to this function.
.. versionadded:: 1.3
.. versionchanged:: 2.0
``context`` parameter is now positional-only.
"""
if self.cog is not None:
return await self.callback(self.cog, context, *args, **kwargs) # type: ignore
else:
return await self.callback(context, *args, **kwargs) # type: ignore
def _ensure_assignment_on_copy(self, other: Self) -> Self:
other._before_invoke = self._before_invoke
other._after_invoke = self._after_invoke
if self.checks != other.checks:
other.checks = self.checks.copy()
if self._buckets.valid and not other._buckets.valid:
other._buckets = self._buckets.copy()
if self._max_concurrency and self._max_concurrency != other._max_concurrency:
other._max_concurrency = self._max_concurrency.copy()
try:
other.on_error = self.on_error
except AttributeError:
pass
return other
def copy(self) -> Self:
"""Creates a copy of this command.
Returns
--------
:class:`Command`
A new instance of this command.
"""
ret = self.__class__(self.callback, **self.__original_kwargs__)
return self._ensure_assignment_on_copy(ret)
def _update_copy(self, kwargs: Dict[str, Any]) -> Self:
if kwargs:
kw = kwargs.copy()
kw.update(self.__original_kwargs__)
copy = self.__class__(self.callback, **kw)
return self._ensure_assignment_on_copy(copy)
else:
return self.copy()
async def dispatch_error(self, ctx: Context[BotT], error: CommandError, /) -> None:
ctx.command_failed = True
cog = self.cog
try:
coro = self.on_error
except AttributeError:
pass
else:
injected = wrap_callback(coro) # type: ignore
if cog is not None:
await injected(cog, ctx, error)
else:
await injected(ctx, error) # type: ignore
try:
if cog is not None:
local = Cog._get_overridden_method(cog.cog_command_error)
if local is not None:
wrapped = wrap_callback(local)
await wrapped(ctx, error)
finally:
ctx.bot.dispatch('command_error', ctx, error)
async def transform(self, ctx: Context[BotT], param: Parameter, attachments: _AttachmentIterator, /) -> Any:
converter = param.converter
consume_rest_is_special = param.kind == param.KEYWORD_ONLY and not self.rest_is_raw
view = ctx.view
view.skip_ws()
# The greedy converter is simple -- it keeps going until it fails in which case,
# it undos the view ready for the next parameter to use instead
if isinstance(converter, Greedy):
# Special case for Greedy[discord.Attachment] to consume the attachments iterator
if converter.converter is discord.Attachment:
return list(attachments)
if param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY):
return await self._transform_greedy_pos(ctx, param, param.required, converter.converter)
elif param.kind == param.VAR_POSITIONAL:
return await self._transform_greedy_var_pos(ctx, param, converter.converter)
else:
# if we're here, then it's a KEYWORD_ONLY param type
# since this is mostly useless, we'll helpfully transform Greedy[X]
# into just X and do the parsing that way.
converter = converter.converter
# Try to detect Optional[discord.Attachment] or discord.Attachment special converter
if converter is discord.Attachment:
try:
return next(attachments)
except StopIteration:
raise MissingRequiredAttachment(param)
if self._is_typing_optional(param.annotation) and param.annotation.__args__[0] is discord.Attachment:
if attachments.is_empty():
# I have no idea who would be doing Optional[discord.Attachment] = 1
# but for those cases then 1 should be returned instead of None
return None if param.default is param.empty else param.default
return next(attachments)
if view.eof:
if param.kind == param.VAR_POSITIONAL:
raise RuntimeError() # break the loop
if param.required:
if self._is_typing_optional(param.annotation):
return None
if hasattr(converter, '__commands_is_flag__') and converter._can_be_constructible():
return await converter._construct_default(ctx)
raise MissingRequiredArgument(param)
return await param.get_default(ctx)
previous = view.index
if consume_rest_is_special:
ctx.current_argument = argument = view.read_rest().strip()
else:
try:
ctx.current_argument = argument = view.get_quoted_word()
except ArgumentParsingError as exc:
if self._is_typing_optional(param.annotation):
view.index = previous
return None
else:
raise exc
view.previous = previous
# type-checker fails to narrow argument
return await run_converters(ctx, converter, argument, param) # type: ignore
async def _transform_greedy_pos(self, ctx: Context[BotT], param: Parameter, required: bool, converter: Any) -> Any:
view = ctx.view
result = []
while not view.eof:
# for use with a manual undo
previous = view.index
view.skip_ws()
try:
ctx.current_argument = argument = view.get_quoted_word()
value = await run_converters(ctx, converter, argument, param) # type: ignore
except (CommandError, ArgumentParsingError):
view.index = previous
break
else:
result.append(value)
if not result and not required:
return await param.get_default(ctx)
return result
async def _transform_greedy_var_pos(self, ctx: Context[BotT], param: Parameter, converter: Any) -> Any:
view = ctx.view
previous = view.index
try:
ctx.current_argument = argument = view.get_quoted_word()
value = await run_converters(ctx, converter, argument, param) # type: ignore
except (CommandError, ArgumentParsingError):
view.index = previous
raise RuntimeError() from None # break loop
else:
return value
@property
def clean_params(self) -> Dict[str, Parameter]:
"""Dict[:class:`str`, :class:`Parameter`]:
Retrieves the parameter dictionary without the context or self parameters.
Useful for inspecting signature.
"""
return self.params.copy()
@property
def cooldown(self) -> Optional[Cooldown]:
"""Optional[:class:`~discord.app_commands.Cooldown`]: The cooldown of a command when invoked
or ``None`` if the command doesn't have a registered cooldown.
.. versionadded:: 2.0
"""
return self._buckets._cooldown
@property
def full_parent_name(self) -> str:
""":class:`str`: Retrieves the fully qualified parent command name.
This the base command name required to execute it. For example,
in ``?one two three`` the parent name would be ``one two``.
"""
entries = []
command = self
# command.parent is type-hinted as GroupMixin some attributes are resolved via MRO
while command.parent is not None: # type: ignore
command = command.parent # type: ignore
entries.append(command.name) # type: ignore
return ' '.join(reversed(entries))
@property
def parents(self) -> List[Group[Any, ..., Any]]:
"""List[:class:`Group`]: Retrieves the parents of this command.
If the command has no parents then it returns an empty :class:`list`.
For example in commands ``?a b c test``, the parents are ``[c, b, a]``.
.. versionadded:: 1.1
"""
entries = []
command = self
while command.parent is not None: # type: ignore
command = command.parent # type: ignore
entries.append(command)
return entries
@property
def root_parent(self) -> Optional[Group[Any, ..., Any]]:
"""Optional[:class:`Group`]: Retrieves the root parent of this command.
If the command has no parents then it returns ``None``.
For example in commands ``?a b c test``, the root parent is ``a``.
"""
if not self.parent:
return None
return self.parents[-1]
@property
def qualified_name(self) -> str:
""":class:`str`: Retrieves the fully qualified command name.
This is the full parent name with the command name as well.
For example, in ``?one two three`` the qualified name would be
``one two three``.
"""
parent = self.full_parent_name
if parent:
return parent + ' ' + self.name
else:
return self.name
def __str__(self) -> str:
return self.qualified_name
async def _parse_arguments(self, ctx: Context[BotT]) -> None:
ctx.args = [ctx] if self.cog is None else [self.cog, ctx]
ctx.kwargs = {}
args = ctx.args
kwargs = ctx.kwargs
attachments = _AttachmentIterator(ctx.message.attachments)
view = ctx.view
iterator = iter(self.params.items())
for name, param in iterator:
ctx.current_parameter = param
if param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY):
transformed = await self.transform(ctx, param, attachments)
args.append(transformed)
elif param.kind == param.KEYWORD_ONLY:
# kwarg only param denotes "consume rest" semantics
if self.rest_is_raw:
ctx.current_argument = argument = view.read_rest()
kwargs[name] = await run_converters(ctx, param.converter, argument, param)
else:
kwargs[name] = await self.transform(ctx, param, attachments)
break
elif param.kind == param.VAR_POSITIONAL:
if view.eof and self.require_var_positional:
raise MissingRequiredArgument(param)
while not view.eof:
try:
transformed = await self.transform(ctx, param, attachments)
args.append(transformed)
except RuntimeError:
break
if not self.ignore_extra and not view.eof:
raise TooManyArguments('Too many arguments passed to ' + self.qualified_name)
async def call_before_hooks(self, ctx: Context[BotT], /) -> None:
# now that we're done preparing we can call the pre-command hooks
# first, call the command local hook:
cog = self.cog
if self._before_invoke is not None:
# should be cog if @commands.before_invoke is used
instance = getattr(self._before_invoke, '__self__', cog)
# __self__ only exists for methods, not functions
# however, if @command.before_invoke is used, it will be a function
if instance:
await self._before_invoke(instance, ctx) # type: ignore
else:
await self._before_invoke(ctx) # type: ignore
# call the cog local hook if applicable:
if cog is not None:
hook = Cog._get_overridden_method(cog.cog_before_invoke)
if hook is not None:
await hook(ctx)
# call the bot global hook if necessary
hook = ctx.bot._before_invoke
if hook is not None:
await hook(ctx)
async def call_after_hooks(self, ctx: Context[BotT], /) -> None:
cog = self.cog
if self._after_invoke is not None:
instance = getattr(self._after_invoke, '__self__', cog)
if instance:
await self._after_invoke(instance, ctx) # type: ignore
else:
await self._after_invoke(ctx) # type: ignore
# call the cog local hook if applicable:
if cog is not None:
hook = Cog._get_overridden_method(cog.cog_after_invoke)
if hook is not None:
await hook(ctx)
hook = ctx.bot._after_invoke
if hook is not None:
await hook(ctx)
def _prepare_cooldowns(self, ctx: Context[BotT]) -> None:
if self._buckets.valid:
dt = ctx.message.edited_at or ctx.message.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
bucket = self._buckets.get_bucket(ctx.message, current)
if bucket is not None:
retry_after = bucket.update_rate_limit(current)
if retry_after:
raise CommandOnCooldown(bucket, retry_after, self._buckets.type) # type: ignore
async def prepare(self, ctx: Context[BotT], /) -> None:
ctx.command = self
if not await self.can_run(ctx):
raise CheckFailure(f'The check functions for command {self.qualified_name} failed.')
if self._max_concurrency is not None:
# For this application, context can be duck-typed as a Message
await self._max_concurrency.acquire(ctx) # type: ignore
try:
if self.cooldown_after_parsing:
await self._parse_arguments(ctx)
self._prepare_cooldowns(ctx)
else:
self._prepare_cooldowns(ctx)
await self._parse_arguments(ctx)
await self.call_before_hooks(ctx)
except:
if self._max_concurrency is not None:
await self._max_concurrency.release(ctx) # type: ignore
raise
def is_on_cooldown(self, ctx: Context[BotT], /) -> bool:
"""Checks whether the command is currently on cooldown.
.. versionchanged:: 2.0
``ctx`` parameter is now positional-only.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to use when checking the commands cooldown status.
Returns
--------
:class:`bool`
A boolean indicating if the command is on cooldown.
"""
if not self._buckets.valid:
return False
bucket = self._buckets.get_bucket(ctx.message)
if bucket is None:
return False
dt = ctx.message.edited_at or ctx.message.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_tokens(current) == 0
def reset_cooldown(self, ctx: Context[BotT], /) -> None:
"""Resets the cooldown on this command.
.. versionchanged:: 2.0
``ctx`` parameter is now positional-only.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to reset the cooldown under.
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(ctx.message)
if bucket is not None:
bucket.reset()
def get_cooldown_retry_after(self, ctx: Context[BotT], /) -> float:
"""Retrieves the amount of seconds before this command can be tried again.
.. versionadded:: 1.4
.. versionchanged:: 2.0
``ctx`` parameter is now positional-only.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to retrieve the cooldown from.
Returns
--------
:class:`float`
The amount of time left on this command's cooldown in seconds.
If this is ``0.0`` then the command isn't on cooldown.
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(ctx.message)
if bucket is None:
return 0.0
dt = ctx.message.edited_at or ctx.message.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_retry_after(current)
return 0.0
async def invoke(self, ctx: Context[BotT], /) -> None:
await self.prepare(ctx)
# terminate the invoked_subcommand chain.
# since we're in a regular command (and not a group) then
# the invoked subcommand is None.
ctx.invoked_subcommand = None
ctx.subcommand_passed = None
injected = hooked_wrapped_callback(self, ctx, self.callback) # type: ignore
await injected(*ctx.args, **ctx.kwargs) # type: ignore
async def reinvoke(self, ctx: Context[BotT], /, *, call_hooks: bool = False) -> None:
ctx.command = self
await self._parse_arguments(ctx)
if call_hooks:
await self.call_before_hooks(ctx)
ctx.invoked_subcommand = None
try:
await self.callback(*ctx.args, **ctx.kwargs) # type: ignore
except:
ctx.command_failed = True
raise
finally:
if call_hooks:
await self.call_after_hooks(ctx)
def error(self, coro: Error[CogT, ContextT], /) -> Error[CogT, ContextT]:
"""A decorator that registers a coroutine as a local error handler.
A local error handler is an :func:`.on_command_error` event limited to
a single command. However, the :func:`.on_command_error` is still
invoked afterwards as the catch-all.
.. versionchanged:: 2.0
``coro`` parameter is now positional-only.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the local error handler.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The error handler must be a coroutine.')
self.on_error: Error[CogT, Any] = coro
return coro
def has_error_handler(self) -> bool:
""":class:`bool`: Checks whether the command has an error handler registered.
.. versionadded:: 1.7
"""
return hasattr(self, 'on_error')
def before_invoke(self, coro: Hook[CogT, ContextT], /) -> Hook[CogT, ContextT]:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.before_invoke` for more info.
.. versionchanged:: 2.0
``coro`` parameter is now positional-only.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro: Hook[CogT, ContextT], /) -> Hook[CogT, ContextT]:
"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
See :meth:`.Bot.after_invoke` for more info.
.. versionchanged:: 2.0
``coro`` parameter is now positional-only.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
@property
def cog_name(self) -> Optional[str]:
"""Optional[:class:`str`]: The name of the cog this command belongs to, if any."""
return type(self.cog).__cog_name__ if self.cog is not None else None
@property
def short_doc(self) -> str:
""":class:`str`: Gets the "short" documentation of a command.
By default, this is the :attr:`.brief` attribute.
If that lookup leads to an empty string then the first line of the
:attr:`.help` attribute is used instead.
"""
if self.brief is not None:
return self.brief
if self.help is not None:
return self.help.split('\n', 1)[0]
return ''
def _is_typing_optional(self, annotation: Union[T, Optional[T]]) -> bool:
return getattr(annotation, '__origin__', None) is Union and type(None) in annotation.__args__ # type: ignore
@property
def signature(self) -> str:
""":class:`str`: Returns a POSIX-like signature useful for help command output."""
if self.usage is not None:
return self.usage
params = self.clean_params
if not params:
return ''
result = []
for name, param in params.items():
greedy = isinstance(param.converter, Greedy)
optional = False # postpone evaluation of if it's an optional argument
annotation: Any = param.converter.converter if greedy else param.converter
origin = getattr(annotation, '__origin__', None)
if not greedy and origin is Union:
none_cls = type(None)
union_args = annotation.__args__
optional = union_args[-1] is none_cls
if len(union_args) == 2 and optional:
annotation = union_args[0]
origin = getattr(annotation, '__origin__', None)
if annotation is discord.Attachment:
# For discord.Attachment we need to signal to the user that it's an attachment
# It's not exactly pretty but it's enough to differentiate
if optional:
result.append(f'[{name} (upload a file)]')
elif greedy:
result.append(f'[{name} (upload files)]...')
else:
result.append(f'<{name} (upload a file)>')
continue
# for typing.Literal[...], typing.Optional[typing.Literal[...]], and Greedy[typing.Literal[...]], the
# parameter signature is a literal list of it's values
if origin is Literal:
name = '|'.join(f'"{v}"' if isinstance(v, str) else str(v) for v in annotation.__args__)
if not param.required:
# We don't want None or '' to trigger the [name=value] case and instead it should
# do [name] since [name=None] or [name=] are not exactly useful for the user.
if param.displayed_default:
result.append(
f'[{name}={param.displayed_default}]' if not greedy else f'[{name}={param.displayed_default}]...'
)
continue
else:
result.append(f'[{name}]')
elif param.kind == param.VAR_POSITIONAL:
if self.require_var_positional:
result.append(f'<{name}...>')
else:
result.append(f'[{name}...]')
elif greedy:
result.append(f'[{name}]...')
elif optional:
result.append(f'[{name}]')
else:
result.append(f'<{name}>')
return ' '.join(result)
async def can_run(self, ctx: Context[BotT], /) -> bool:
"""|coro|
Checks if the command can be executed by checking all the predicates
inside the :attr:`~Command.checks` attribute. This also checks whether the
command is disabled.
.. versionchanged:: 1.3
Checks whether the command is disabled or not
.. versionchanged:: 2.0
``ctx`` parameter is now positional-only.
Parameters
-----------
ctx: :class:`.Context`
The ctx of the command currently being invoked.
Raises
-------
:class:`CommandError`
Any command error that was raised during a check call will be propagated
by this function.
Returns
--------
:class:`bool`
A boolean indicating if the command can be invoked.
"""
if not self.enabled:
raise DisabledCommand(f'{self.name} command is disabled')
original = ctx.command
ctx.command = self
try:
if not await ctx.bot.can_run(ctx):
raise CheckFailure(f'The global check functions for command {self.qualified_name} failed.')
cog = self.cog
if cog is not None:
local_check = Cog._get_overridden_method(cog.cog_check)
if local_check is not None:
ret = await discord.utils.maybe_coroutine(local_check, ctx)
if not ret:
return False
predicates = self.checks
if not predicates:
# since we have no checks, then we just return True.
return True
return await discord.utils.async_all(predicate(ctx) for predicate in predicates) # type: ignore
finally:
ctx.command = original
class GroupMixin(Generic[CogT]):
"""A mixin that implements common functionality for classes that behave
similar to :class:`.Group` and are allowed to register commands.
Attributes
-----------
all_commands: :class:`dict`
A mapping of command name to :class:`.Command`
objects.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
case_insensitive = kwargs.get('case_insensitive', False)
self.all_commands: Dict[str, Command[CogT, ..., Any]] = _CaseInsensitiveDict() if case_insensitive else {}
self.case_insensitive: bool = case_insensitive
super().__init__(*args, **kwargs)
@property
def commands(self) -> Set[Command[CogT, ..., Any]]:
"""Set[:class:`.Command`]: A unique set of commands without aliases that are registered."""
return set(self.all_commands.values())
def recursively_remove_all_commands(self) -> None:
for command in self.all_commands.copy().values():
if isinstance(command, GroupMixin):
command.recursively_remove_all_commands()
self.remove_command(command.name)
def add_command(self, command: Command[CogT, ..., Any], /) -> None:
"""Adds a :class:`.Command` into the internal list of commands.
This is usually not called, instead the :meth:`~.GroupMixin.command` or
:meth:`~.GroupMixin.group` shortcut decorators are used instead.
.. versionchanged:: 1.4
Raise :exc:`.CommandRegistrationError` instead of generic :exc:`.ClientException`
.. versionchanged:: 2.0
``command`` parameter is now positional-only.
Parameters
-----------
command: :class:`Command`
The command to add.
Raises
-------
CommandRegistrationError
If the command or its alias is already registered by different command.
TypeError
If the command passed is not a subclass of :class:`.Command`.
"""
if not isinstance(command, Command):
raise TypeError('The command passed must be a subclass of Command')
if isinstance(self, Command):
command.parent = self
if command.name in self.all_commands:
raise CommandRegistrationError(command.name)
self.all_commands[command.name] = command
for alias in command.aliases:
if alias in self.all_commands:
self.remove_command(command.name)
raise CommandRegistrationError(alias, alias_conflict=True)
self.all_commands[alias] = command
def remove_command(self, name: str, /) -> Optional[Command[CogT, ..., Any]]:
"""Remove a :class:`.Command` from the internal list
of commands.
This could also be used as a way to remove aliases.
.. versionchanged:: 2.0
``name`` parameter is now positional-only.
Parameters
-----------
name: :class:`str`
The name of the command to remove.
Returns
--------
Optional[:class:`.Command`]
The command that was removed. If the name is not valid then
``None`` is returned instead.
"""
command = self.all_commands.pop(name, None)
# does not exist
if command is None:
return None
if name in command.aliases:
# we're removing an alias so we don't want to remove the rest
return command
# we're not removing the alias so let's delete the rest of them.
for alias in command.aliases:
cmd = self.all_commands.pop(alias, None)
# in the case of a CommandRegistrationError, an alias might conflict
# with an already existing command. If this is the case, we want to
# make sure the pre-existing command is not removed.
if cmd is not None and cmd != command:
self.all_commands[alias] = cmd
return command
def walk_commands(self) -> Generator[Command[CogT, ..., Any], None, None]:
"""An iterator that recursively walks through all commands and subcommands.
.. versionchanged:: 1.4
Duplicates due to aliases are no longer returned
Yields
------
Union[:class:`.Command`, :class:`.Group`]
A command or group from the internal list of commands.
"""
for command in self.commands:
yield command
if isinstance(command, GroupMixin):
yield from command.walk_commands()
def get_command(self, name: str, /) -> Optional[Command[CogT, ..., Any]]:
"""Get a :class:`.Command` from the internal list
of commands.
This could also be used as a way to get aliases.
The name could be fully qualified (e.g. ``'foo bar'``) will get
the subcommand ``bar`` of the group command ``foo``. If a
subcommand is not found then ``None`` is returned just as usual.
.. versionchanged:: 2.0
``name`` parameter is now positional-only.
Parameters
-----------
name: :class:`str`
The name of the command to get.
Returns
--------
Optional[:class:`Command`]
The command that was requested. If not found, returns ``None``.
"""
# fast path, no space in name.
if ' ' not in name:
return self.all_commands.get(name)
names = name.split()
if not names:
return None
obj = self.all_commands.get(names[0])
if not isinstance(obj, GroupMixin):
return obj
for name in names[1:]:
try:
obj = obj.all_commands[name] # type: ignore
except (AttributeError, KeyError):
return None
return obj
@overload
def command(
self: GroupMixin[CogT],
name: str = ...,
*args: Any,
**kwargs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]],
]
],
Command[CogT, P, T],
]:
...
@overload
def command(
self: GroupMixin[CogT],
name: str = ...,
cls: Type[CommandT] = ...,
*args: Any,
**kwargs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]],
]
],
CommandT,
]:
...
def command(
self,
name: str = MISSING,
cls: Type[Command[Any, ..., Any]] = MISSING,
*args: Any,
**kwargs: Any,
) -> Any:
"""A shortcut decorator that invokes :func:`~discord.ext.commands.command` and adds it to
the internal command list via :meth:`~.GroupMixin.add_command`.
Returns
--------
Callable[..., :class:`Command`]
A decorator that converts the provided method into a Command, adds it to the bot, then returns it.
"""
def decorator(func):
kwargs.setdefault('parent', self)
result = command(name=name, cls=cls, *args, **kwargs)(func)
self.add_command(result)
return result
return decorator
@overload
def group(
self: GroupMixin[CogT],
name: str = ...,
*args: Any,
**kwargs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]],
]
],
Group[CogT, P, T],
]:
...
@overload
def group(
self: GroupMixin[CogT],
name: str = ...,
cls: Type[GroupT] = ...,
*args: Any,
**kwargs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[T]],
Callable[Concatenate[ContextT, P], Coro[T]],
]
],
GroupT,
]:
...
def group(
self,
name: str = MISSING,
cls: Type[Group[Any, ..., Any]] = MISSING,
*args: Any,
**kwargs: Any,
) -> Any:
"""A shortcut decorator that invokes :func:`.group` and adds it to
the internal command list via :meth:`~.GroupMixin.add_command`.
Returns
--------
Callable[..., :class:`Group`]
A decorator that converts the provided method into a Group, adds it to the bot, then returns it.
"""
def decorator(func):
kwargs.setdefault('parent', self)
result = group(name=name, cls=cls, *args, **kwargs)(func)
self.add_command(result)
return result
return decorator
class Group(GroupMixin[CogT], Command[CogT, P, T]):
"""A class that implements a grouping protocol for commands to be
executed as subcommands.
This class is a subclass of :class:`.Command` and thus all options
valid in :class:`.Command` are valid in here as well.
Attributes
-----------
invoke_without_command: :class:`bool`
Indicates if the group callback should begin parsing and
invocation only if no subcommand was found. Useful for
making it an error handling function to tell the user that
no subcommand was found or to have different functionality
in case no subcommand was found. If this is ``False``, then
the group callback will always be invoked first. This means
that the checks and the parsing dictated by its parameters
will be executed. Defaults to ``False``.
case_insensitive: :class:`bool`
Indicates if the group's commands should be case insensitive.
Defaults to ``False``.
"""
def __init__(self, *args: Any, **attrs: Any) -> None:
self.invoke_without_command: bool = attrs.pop('invoke_without_command', False)
super().__init__(*args, **attrs)
def copy(self) -> Self:
"""Creates a copy of this :class:`Group`.
Returns
--------
:class:`Group`
A new instance of this group.
"""
ret = super().copy()
for cmd in self.commands:
ret.add_command(cmd.copy())
return ret
async def invoke(self, ctx: Context[BotT], /) -> None:
ctx.invoked_subcommand = None
ctx.subcommand_passed = None
early_invoke = not self.invoke_without_command
if early_invoke:
await self.prepare(ctx)
view = ctx.view
previous = view.index
view.skip_ws()
trigger = view.get_word()
if trigger:
ctx.subcommand_passed = trigger
ctx.invoked_subcommand = self.all_commands.get(trigger, None)
if early_invoke:
injected = hooked_wrapped_callback(self, ctx, self.callback) # type: ignore
await injected(*ctx.args, **ctx.kwargs) # type: ignore
ctx.invoked_parents.append(ctx.invoked_with) # type: ignore
if trigger and ctx.invoked_subcommand:
ctx.invoked_with = trigger
await ctx.invoked_subcommand.invoke(ctx)
elif not early_invoke:
# undo the trigger parsing
view.index = previous
view.previous = previous
await super().invoke(ctx)
async def reinvoke(self, ctx: Context[BotT], /, *, call_hooks: bool = False) -> None:
ctx.invoked_subcommand = None
early_invoke = not self.invoke_without_command
if early_invoke:
ctx.command = self
await self._parse_arguments(ctx)
if call_hooks:
await self.call_before_hooks(ctx)
view = ctx.view
previous = view.index
view.skip_ws()
trigger = view.get_word()
if trigger:
ctx.subcommand_passed = trigger
ctx.invoked_subcommand = self.all_commands.get(trigger, None)
if early_invoke:
try:
await self.callback(*ctx.args, **ctx.kwargs) # type: ignore
except:
ctx.command_failed = True
raise
finally:
if call_hooks:
await self.call_after_hooks(ctx)
ctx.invoked_parents.append(ctx.invoked_with) # type: ignore
if trigger and ctx.invoked_subcommand:
ctx.invoked_with = trigger
await ctx.invoked_subcommand.reinvoke(ctx, call_hooks=call_hooks)
elif not early_invoke:
# undo the trigger parsing
view.index = previous
view.previous = previous
await super().reinvoke(ctx, call_hooks=call_hooks)
# Decorators
if TYPE_CHECKING:
# Using a class to emulate a function allows for overloading the inner function in the decorator.
class _CommandDecorator:
@overload
def __call__(self, func: Callable[Concatenate[CogT, ContextT, P], Coro[T]], /) -> Command[CogT, P, T]:
...
@overload
def __call__(self, func: Callable[Concatenate[ContextT, P], Coro[T]], /) -> Command[None, P, T]:
...
def __call__(self, func: Callable[..., Coro[T]], /) -> Any:
...
class _GroupDecorator:
@overload
def __call__(self, func: Callable[Concatenate[CogT, ContextT, P], Coro[T]], /) -> Group[CogT, P, T]:
...
@overload
def __call__(self, func: Callable[Concatenate[ContextT, P], Coro[T]], /) -> Group[None, P, T]:
...
def __call__(self, func: Callable[..., Coro[T]], /) -> Any:
...
@overload
def command(
name: str = ...,
**attrs: Any,
) -> _CommandDecorator:
...
@overload
def command(
name: str = ...,
cls: Type[CommandT] = ...,
**attrs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[ContextT, P], Coro[Any]],
Callable[Concatenate[CogT, ContextT, P], Coro[Any]], # type: ignore # CogT is used here to allow covariance
]
],
CommandT,
]:
...
def command(
name: str = MISSING,
cls: Type[Command[Any, ..., Any]] = MISSING,
**attrs: Any,
) -> Any:
"""A decorator that transforms a function into a :class:`.Command`
or if called with :func:`.group`, :class:`.Group`.
By default the ``help`` attribute is received automatically from the
docstring of the function and is cleaned up with the use of
``inspect.cleandoc``. If the docstring is ``bytes``, then it is decoded
into :class:`str` using utf-8 encoding.
All checks added using the :func:`.check` & co. decorators are added into
the function. There is no way to supply your own checks through this
decorator.
Parameters
-----------
name: :class:`str`
The name to create the command with. By default this uses the
function name unchanged.
cls
The class to construct with. By default this is :class:`.Command`.
You usually do not change this.
attrs
Keyword arguments to pass into the construction of the class denoted
by ``cls``.
Raises
-------
TypeError
If the function is not a coroutine or is already a command.
"""
if cls is MISSING:
cls = Command
def decorator(func):
if isinstance(func, Command):
raise TypeError('Callback is already a command.')
return cls(func, name=name, **attrs)
return decorator
@overload
def group(
name: str = ...,
**attrs: Any,
) -> _GroupDecorator:
...
@overload
def group(
name: str = ...,
cls: Type[GroupT] = ...,
**attrs: Any,
) -> Callable[
[
Union[
Callable[Concatenate[CogT, ContextT, P], Coro[Any]], # type: ignore # CogT is used here to allow covariance
Callable[Concatenate[ContextT, P], Coro[Any]],
]
],
GroupT,
]:
...
def group(
name: str = MISSING,
cls: Type[Group[Any, ..., Any]] = MISSING,
**attrs: Any,
) -> Any:
"""A decorator that transforms a function into a :class:`.Group`.
This is similar to the :func:`~discord.ext.commands.command` decorator but the ``cls``
parameter is set to :class:`Group` by default.
.. versionchanged:: 1.1
The ``cls`` parameter can now be passed.
"""
if cls is MISSING:
cls = Group
return command(name=name, cls=cls, **attrs)
def check(predicate: UserCheck[ContextT], /) -> Check[ContextT]:
r"""A decorator that adds a check to the :class:`.Command` or its
subclasses. These checks could be accessed via :attr:`.Command.checks`.
These checks should be predicates that take in a single parameter taking
a :class:`.Context`. If the check returns a ``False``\-like value then
during invocation a :exc:`.CheckFailure` exception is raised and sent to
the :func:`.on_command_error` event.
If an exception should be thrown in the predicate then it should be a
subclass of :exc:`.CommandError`. Any exception not subclassed from it
will be propagated while those subclassed will be sent to
:func:`.on_command_error`.
A special attribute named ``predicate`` is bound to the value
returned by this decorator to retrieve the predicate passed to the
decorator. This allows the following introspection and chaining to be done:
.. code-block:: python3
def owner_or_permissions(**perms):
original = commands.has_permissions(**perms).predicate
async def extended_check(ctx):
if ctx.guild is None:
return False
return ctx.guild.owner_id == ctx.author.id or await original(ctx)
return commands.check(extended_check)
.. note::
The function returned by ``predicate`` is **always** a coroutine,
even if the original function was not a coroutine.
.. versionchanged:: 1.3
The ``predicate`` attribute was added.
Examples
---------
Creating a basic check to see if the command invoker is you.
.. code-block:: python3
def check_if_it_is_me(ctx):
return ctx.message.author.id == 85309593344815104
@bot.command()
@commands.check(check_if_it_is_me)
async def only_for_me(ctx):
await ctx.send('I know you!')
Transforming common checks into its own decorator:
.. code-block:: python3
def is_me():
def predicate(ctx):
return ctx.message.author.id == 85309593344815104
return commands.check(predicate)
@bot.command()
@is_me()
async def only_me(ctx):
await ctx.send('Only you!')
.. versionchanged:: 2.0
``predicate`` parameter is now positional-only.
Parameters
-----------
predicate: Callable[[:class:`Context`], :class:`bool`]
The predicate to check if the command should be invoked.
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func.checks.append(predicate)
else:
if not hasattr(func, '__commands_checks__'):
func.__commands_checks__ = []
func.__commands_checks__.append(predicate)
return func
if inspect.iscoroutinefunction(predicate):
decorator.predicate = predicate
else:
@functools.wraps(predicate)
async def wrapper(ctx: ContextT):
return predicate(ctx)
decorator.predicate = wrapper
return decorator # type: ignore
def check_any(*checks: Check[ContextT]) -> Check[ContextT]:
r"""A :func:`check` that is added that checks if any of the checks passed
will pass, i.e. using logical OR.
If all checks fail then :exc:`.CheckAnyFailure` is raised to signal the failure.
It inherits from :exc:`.CheckFailure`.
.. note::
The ``predicate`` attribute for this function **is** a coroutine.
.. versionadded:: 1.3
Parameters
------------
\*checks: Callable[[:class:`Context`], :class:`bool`]
An argument list of checks that have been decorated with
the :func:`check` decorator.
Raises
-------
TypeError
A check passed has not been decorated with the :func:`check`
decorator.
Examples
---------
Creating a basic check to see if it's the bot owner or
the server owner:
.. code-block:: python3
def is_guild_owner():
def predicate(ctx):
return ctx.guild is not None and ctx.guild.owner_id == ctx.author.id
return commands.check(predicate)
@bot.command()
@commands.check_any(commands.is_owner(), is_guild_owner())
async def only_for_owners(ctx):
await ctx.send('Hello mister owner!')
"""
unwrapped = []
for wrapped in checks:
try:
pred = wrapped.predicate
except AttributeError:
raise TypeError(f'{wrapped!r} must be wrapped by commands.check decorator') from None
else:
unwrapped.append(pred)
async def predicate(ctx: Context[BotT]) -> bool:
errors = []
for func in unwrapped:
try:
value = await func(ctx)
except CheckFailure as e:
errors.append(e)
else:
if value:
return True
# if we're here, all checks failed
raise CheckAnyFailure(unwrapped, errors)
return check(predicate) # type: ignore
def has_role(item: Union[int, str], /) -> Check[Any]:
"""A :func:`.check` that is added that checks if the member invoking the
command has the role specified via the name or ID specified.
If a string is specified, you must give the exact name of the role, including
caps and spelling.
If an integer is specified, you must give the exact snowflake ID of the role.
If the message is invoked in a private message context then the check will
return ``False``.
This check raises one of two special exceptions, :exc:`.MissingRole` if the user
is missing a role, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.MissingRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
.. versionchanged:: 2.0
``item`` parameter is now positional-only.
Parameters
-----------
item: Union[:class:`int`, :class:`str`]
The name or ID of the role to check.
"""
def predicate(ctx: Context[BotT]) -> bool:
if ctx.guild is None:
raise NoPrivateMessage()
# ctx.guild is None doesn't narrow ctx.author to Member
if isinstance(item, int):
role = discord.utils.get(ctx.author.roles, id=item) # type: ignore
else:
role = discord.utils.get(ctx.author.roles, name=item) # type: ignore
if role is None:
raise MissingRole(item)
return True
return check(predicate)
def has_any_role(*items: Union[int, str]) -> Callable[[T], T]:
r"""A :func:`.check` that is added that checks if the member invoking the
command has **any** of the roles specified. This means that if they have
one out of the three roles specified, then this check will return `True`.
Similar to :func:`.has_role`\, the names or IDs passed in must be exact.
This check raises one of two special exceptions, :exc:`.MissingAnyRole` if the user
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.MissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
Parameters
-----------
items: List[Union[:class:`str`, :class:`int`]]
An argument list of names or IDs to check that the member has roles wise.
Example
--------
.. code-block:: python3
@bot.command()
@commands.has_any_role('Library Devs', 'Moderators', 492212595072434186)
async def cool(ctx):
await ctx.send('You are cool indeed')
"""
def predicate(ctx):
if ctx.guild is None:
raise NoPrivateMessage()
# ctx.guild is None doesn't narrow ctx.author to Member
getter = functools.partial(discord.utils.get, ctx.author.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise MissingAnyRole(list(items))
return check(predicate)
def bot_has_role(item: int, /) -> Callable[[T], T]:
"""Similar to :func:`.has_role` except checks if the bot itself has the
role.
This check raises one of two special exceptions, :exc:`.BotMissingRole` if the bot
is missing the role, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.BotMissingRole` or :exc:`.NoPrivateMessage`
instead of generic :exc:`.CheckFailure`
.. versionchanged:: 2.0
``item`` parameter is now positional-only.
"""
def predicate(ctx):
if ctx.guild is None:
raise NoPrivateMessage()
me = ctx.me
if isinstance(item, int):
role = discord.utils.get(me.roles, id=item)
else:
role = discord.utils.get(me.roles, name=item)
if role is None:
raise BotMissingRole(item)
return True
return check(predicate)
def bot_has_any_role(*items: int) -> Callable[[T], T]:
"""Similar to :func:`.has_any_role` except checks if the bot itself has
any of the roles listed.
This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1
Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic checkfailure
"""
def predicate(ctx):
if ctx.guild is None:
raise NoPrivateMessage()
me = ctx.me
getter = functools.partial(discord.utils.get, me.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise BotMissingAnyRole(list(items))
return check(predicate)
def has_permissions(**perms: bool) -> Check[Any]:
"""A :func:`.check` that is added that checks if the member has all of
the permissions necessary.
Note that this check operates on the current channel permissions, not the
guild wide permissions.
The permissions passed in must be exactly like the properties shown under
:class:`.discord.Permissions`.
This check raises a special exception, :exc:`.MissingPermissions`
that is inherited from :exc:`.CheckFailure`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python3
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test(ctx):
await ctx.send('You can manage messages.')
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError(f"Invalid permission(s): {', '.join(invalid)}")
def predicate(ctx: Context[BotT]) -> bool:
ch = ctx.channel
permissions = ch.permissions_for(ctx.author) # type: ignore
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise MissingPermissions(missing)
return check(predicate)
def bot_has_permissions(**perms: bool) -> Check[Any]:
"""Similar to :func:`.has_permissions` except checks if the bot itself has
the permissions listed.
This check raises a special exception, :exc:`.BotMissingPermissions`
that is inherited from :exc:`.CheckFailure`.
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError(f"Invalid permission(s): {', '.join(invalid)}")
def predicate(ctx: Context[BotT]) -> bool:
guild = ctx.guild
me = guild.me if guild is not None else ctx.bot.user
permissions = ctx.channel.permissions_for(me) # type: ignore
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise BotMissingPermissions(missing)
return check(predicate)
def has_guild_permissions(**perms: bool) -> Check[Any]:
"""Similar to :func:`.has_permissions`, but operates on guild wide
permissions instead of the current channel permissions.
If this check is called in a DM context, it will raise an
exception, :exc:`.NoPrivateMessage`.
.. versionadded:: 1.3
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError(f"Invalid permission(s): {', '.join(invalid)}")
def predicate(ctx: Context[BotT]) -> bool:
if not ctx.guild:
raise NoPrivateMessage
permissions = ctx.author.guild_permissions # type: ignore
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise MissingPermissions(missing)
return check(predicate)
def bot_has_guild_permissions(**perms: bool) -> Check[Any]:
"""Similar to :func:`.has_guild_permissions`, but checks the bot
members guild permissions.
.. versionadded:: 1.3
"""
invalid = set(perms) - set(discord.Permissions.VALID_FLAGS)
if invalid:
raise TypeError(f"Invalid permission(s): {', '.join(invalid)}")
def predicate(ctx: Context[BotT]) -> bool:
if not ctx.guild:
raise NoPrivateMessage
permissions = ctx.me.guild_permissions # type: ignore
missing = [perm for perm, value in perms.items() if getattr(permissions, perm) != value]
if not missing:
return True
raise BotMissingPermissions(missing)
return check(predicate)
def dm_only() -> Check[Any]:
"""A :func:`.check` that indicates this command must only be used in a
DM context. Only private messages are allowed when
using the command.
This check raises a special exception, :exc:`.PrivateMessageOnly`
that is inherited from :exc:`.CheckFailure`.
.. versionadded:: 1.1
"""
def predicate(ctx: Context[BotT]) -> bool:
if ctx.guild is not None:
raise PrivateMessageOnly()
return True
return check(predicate)
def guild_only() -> Check[Any]:
"""A :func:`.check` that indicates this command must only be used in a
guild context only. Basically, no private messages are allowed when
using the command.
This check raises a special exception, :exc:`.NoPrivateMessage`
that is inherited from :exc:`.CheckFailure`.
If used on hybrid commands, this will be equivalent to the
:func:`discord.app_commands.guild_only` decorator. In an unsupported
context, such as a subcommand, this will still fallback to applying the
check.
"""
# Due to implementation quirks, this check has to be re-implemented completely
# to work with both app_commands and the command framework.
def predicate(ctx: Context[BotT]) -> bool:
if ctx.guild is None:
raise NoPrivateMessage()
return True
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func.checks.append(predicate)
if hasattr(func, '__commands_is_hybrid__'):
app_command = getattr(func, 'app_command', None)
if app_command:
app_command.guild_only = True
else:
if not hasattr(func, '__commands_checks__'):
func.__commands_checks__ = []
func.__commands_checks__.append(predicate)
func.__discord_app_commands_guild_only__ = True
return func
if inspect.iscoroutinefunction(predicate):
decorator.predicate = predicate
else:
@functools.wraps(predicate)
async def wrapper(ctx: Context[BotT]):
return predicate(ctx)
decorator.predicate = wrapper
return decorator # type: ignore
def is_owner() -> Check[Any]:
"""A :func:`.check` that checks if the person invoking this command is the
owner of the bot.
This is powered by :meth:`.Bot.is_owner`.
This check raises a special exception, :exc:`.NotOwner` that is derived
from :exc:`.CheckFailure`.
"""
async def predicate(ctx: Context[BotT]) -> bool:
if not await ctx.bot.is_owner(ctx.author):
raise NotOwner('You do not own this bot.')
return True
return check(predicate)
def is_nsfw() -> Check[Any]:
"""A :func:`.check` that checks if the channel is a NSFW channel.
This check raises a special exception, :exc:`.NSFWChannelRequired`
that is derived from :exc:`.CheckFailure`.
If used on hybrid commands, this will be equivalent to setting the
application command's ``nsfw`` attribute to ``True``. In an unsupported
context, such as a subcommand, this will still fallback to applying the
check.
.. versionchanged:: 1.1
Raise :exc:`.NSFWChannelRequired` instead of generic :exc:`.CheckFailure`.
DM channels will also now pass this check.
"""
# Due to implementation quirks, this check has to be re-implemented completely
# to work with both app_commands and the command framework.
def predicate(ctx: Context[BotT]) -> bool:
ch = ctx.channel
if ctx.guild is None or (
isinstance(ch, (discord.TextChannel, discord.Thread, discord.VoiceChannel)) and ch.is_nsfw()
):
return True
raise NSFWChannelRequired(ch) # type: ignore
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func.checks.append(predicate)
if hasattr(func, '__commands_is_hybrid__'):
app_command = getattr(func, 'app_command', None)
if app_command:
app_command.nsfw = True
else:
if not hasattr(func, '__commands_checks__'):
func.__commands_checks__ = []
func.__commands_checks__.append(predicate)
func.__discord_app_commands_is_nsfw__ = True
return func
if inspect.iscoroutinefunction(predicate):
decorator.predicate = predicate
else:
@functools.wraps(predicate)
async def wrapper(ctx: Context[BotT]):
return predicate(ctx)
decorator.predicate = wrapper
return decorator # type: ignore
def cooldown(
rate: int,
per: float,
type: Union[BucketType, Callable[[Message], Any]] = BucketType.default,
) -> Callable[[T], T]:
"""A decorator that adds a cooldown to a :class:`.Command`
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
Parameters
------------
rate: :class:`int`
The number of times a command can be used before triggering a cooldown.
per: :class:`float`
The amount of seconds to wait for a cooldown when it's been triggered.
type: Union[:class:`.BucketType`, Callable[[:class:`.Message`], Any]]
The type of cooldown to have. If callable, should return a key for the mapping.
.. versionchanged:: 1.7
Callables are now supported for custom bucket types.
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func._buckets = CooldownMapping(Cooldown(rate, per), type)
else:
func.__commands_cooldown__ = CooldownMapping(Cooldown(rate, per), type)
return func
return decorator # type: ignore
def dynamic_cooldown(
cooldown: Union[BucketType, Callable[[Message], Any]],
type: BucketType,
) -> Callable[[T], T]:
"""A decorator that adds a dynamic cooldown to a :class:`.Command`
This differs from :func:`.cooldown` in that it takes a function that
accepts a single parameter of type :class:`.discord.Message` and must
return a :class:`~discord.app_commands.Cooldown` or ``None``.
If ``None`` is returned then that cooldown is effectively bypassed.
A cooldown allows a command to only be used a specific amount
of times in a specific time frame. These cooldowns can be based
either on a per-guild, per-channel, per-user, per-role or global basis.
Denoted by the third argument of ``type`` which must be of enum
type :class:`.BucketType`.
If a cooldown is triggered, then :exc:`.CommandOnCooldown` is triggered in
:func:`.on_command_error` and the local error handler.
A command can only have a single cooldown.
.. versionadded:: 2.0
Parameters
------------
cooldown: Callable[[:class:`.discord.Message`], Optional[:class:`~discord.app_commands.Cooldown`]]
A function that takes a message and returns a cooldown that will
apply to this invocation or ``None`` if the cooldown should be bypassed.
type: :class:`.BucketType`
The type of cooldown to have.
"""
if not callable(cooldown):
raise TypeError("A callable must be provided")
if type is BucketType.default:
raise ValueError('BucketType.default cannot be used in dynamic cooldowns')
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func._buckets = DynamicCooldownMapping(cooldown, type)
else:
func.__commands_cooldown__ = DynamicCooldownMapping(cooldown, type)
return func
return decorator # type: ignore
def max_concurrency(number: int, per: BucketType = BucketType.default, *, wait: bool = False) -> Callable[[T], T]:
"""A decorator that adds a maximum concurrency to a :class:`.Command` or its subclasses.
This enables you to only allow a certain number of command invocations at the same time,
for example if a command takes too long or if only one user can use it at a time. This
differs from a cooldown in that there is no set waiting period or token bucket -- only
a set number of people can run the command.
.. versionadded:: 1.3
Parameters
-------------
number: :class:`int`
The maximum number of invocations of this command that can be running at the same time.
per: :class:`.BucketType`
The bucket that this concurrency is based on, e.g. ``BucketType.guild`` would allow
it to be used up to ``number`` times per guild.
wait: :class:`bool`
Whether the command should wait for the queue to be over. If this is set to ``False``
then instead of waiting until the command can run again, the command raises
:exc:`.MaxConcurrencyReached` to its error handler. If this is set to ``True``
then the command waits until it can be executed.
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
value = MaxConcurrency(number, per=per, wait=wait)
if isinstance(func, Command):
func._max_concurrency = value
else:
func.__commands_max_concurrency__ = value
return func
return decorator # type: ignore
def before_invoke(coro: Hook[CogT, ContextT], /) -> Callable[[T], T]:
"""A decorator that registers a coroutine as a pre-invoke hook.
This allows you to refer to one before invoke hook for several commands that
do not have to be within the same cog.
.. versionadded:: 1.4
.. versionchanged:: 2.0
``coro`` parameter is now positional-only.
Example
---------
.. code-block:: python3
async def record_usage(ctx):
print(ctx.author, 'used', ctx.command, 'at', ctx.message.created_at)
@bot.command()
@commands.before_invoke(record_usage)
async def who(ctx): # Output: <User> used who at <Time>
await ctx.send('i am a bot')
class What(commands.Cog):
@commands.before_invoke(record_usage)
@commands.command()
async def when(self, ctx): # Output: <User> used when at <Time>
await ctx.send(f'and i have existed since {ctx.bot.user.created_at}')
@commands.command()
async def where(self, ctx): # Output: <Nothing>
await ctx.send('on Discord')
@commands.command()
async def why(self, ctx): # Output: <Nothing>
await ctx.send('because someone made me')
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func.before_invoke(coro)
else:
func.__before_invoke__ = coro
return func
return decorator # type: ignore
def after_invoke(coro: Hook[CogT, ContextT], /) -> Callable[[T], T]:
"""A decorator that registers a coroutine as a post-invoke hook.
This allows you to refer to one after invoke hook for several commands that
do not have to be within the same cog.
.. versionadded:: 1.4
.. versionchanged:: 2.0
``coro`` parameter is now positional-only.
"""
def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:
if isinstance(func, Command):
func.after_invoke(coro)
else:
func.__after_invoke__ = coro
return func
return decorator # type: ignore
| 33.849942
| 122
| 0.608166
|
4a0b2bd6dbee3e911529ab45b6d33c312e5d5747
| 4,966
|
py
|
Python
|
xnu-2782.1.97/tools/lldbmacros/core/caching.py
|
LeeWongSnail/SourceCode_ReadingNote
|
b6bbf99a5fef6a087e053f6dfcc7f12691961ba6
|
[
"MIT"
] | null | null | null |
xnu-2782.1.97/tools/lldbmacros/core/caching.py
|
LeeWongSnail/SourceCode_ReadingNote
|
b6bbf99a5fef6a087e053f6dfcc7f12691961ba6
|
[
"MIT"
] | null | null | null |
xnu-2782.1.97/tools/lldbmacros/core/caching.py
|
LeeWongSnail/SourceCode_ReadingNote
|
b6bbf99a5fef6a087e053f6dfcc7f12691961ba6
|
[
"MIT"
] | 1
|
2021-03-28T02:56:16.000Z
|
2021-03-28T02:56:16.000Z
|
"""
A basic caching module for xnu debug macros to use.
It is recommended to use [Get|Save][Static|Dynamic]CacheData() apis for
your caching needs. These APIs will handle the case of clearing caches when
a debugger continues and stops or hit a breakpoint.
Use Static caches for data that will not change if the program is run and stopped again. e.g. typedata, version numbers etc.
An example invocation could be like
def getDSYMPathForUUID(uuid):
# Get the data from cache
cached_data = caching.GetStaticCacheData('dsym.for.uuid', {})
if uuid in cached_data:
return cached_data[uuid]
else:
path = #get info for uuid
cached_data[uuid] = path
# save the cached_data object to cache.
caching.SaveStaticCacheData('dsym.for.uuid', cached_data)
return cached_data[uuid]
And use Dynamic caches for things like thread data, zones information etc.
These will automatically be dropped when debugger continues the target
An example use of Dynamic cache could be as follows
def GetExecutablePathForPid(pid):
# Get the data from cache
cached_data = caching.GetDynamicCacheData('exec_for_path', {})
if pid in cached_data:
return cached_data[pid]
else:
exec_path = "/path/to/exec" #get exec path for pid
cached_data[pid] = path
# save the cached_data object to cache.
caching.SaveDynamicCacheData('exec_for_path', cached_data)
return cached_data[pid]
"""
#Private Routines and objects
from configuration import *
import sys
"""
The format for the saved data dictionaries is
{
'key' : (valueobj, versno),
...
}
The versno is an int defining the version of obj. In case of version mismatch it will set valueobj to default upon access.
"""
_static_data = {}
_dynamic_data = {}
def _GetDebuggerSessionID():
""" A default callable function that _GetCurrentSessionID uses to
identify a stopped session.
"""
return 0
def _GetCurrentSessionID():
""" Get the current session id. This will update whenever
system is continued or if there is new information that would
cause the dynamic cache to be deleted.
returns: int - session id number.
"""
session_id = _GetDebuggerSessionID()
return session_id;
#Public APIs
def GetSizeOfCache():
""" Returns number of bytes held in cache.
returns:
int - size of cache including static and dynamic
"""
global _static_data, _dynamic_data
return sys.getsizeof(_static_data) + sys.getsizeof(_dynamic_data)
def GetStaticCacheData(key, default_value = None):
""" Get cached object based on key from the cache of static information.
params:
key: str - a unique string identifying your data.
default_value : obj - an object that should be returned if key is not found.
returns:
default_value - if the static cache does not have your data.
obj - The data obj saved with SaveStaticCacheData()
"""
global _static_data
key = str(key)
if key in _static_data:
return _static_data[key][0]
return default_value
def SaveStaticCacheData(key, value):
""" Save data into the cache identified by key.
It will overwrite any data that was previously associated with key
params:
key : str - a unique string identifying your data
value: obj - any object that is to be cached.
returns:
Nothing
"""
global _static_data
if not config['CacheStaticData']:
return
key = str(key)
_static_data[key] = (value, _GetCurrentSessionID())
return
def GetDynamicCacheData(key, default_value=None):
""" Get cached object based on key from the cache of dynamic information.
params:
key: str - a unique string identifying cached object
default_value : obj - an object that should be returned if key is not found.
returns:
default_value - if dynamic cache does not have data or if the saved version mismatches with current session id.
obj - The data obj saved with SaveDynamicCacheData()
"""
global _dynamic_data
key = str(key)
if key in _dynamic_data:
if _GetCurrentSessionID() == _dynamic_data[key][1]:
return _dynamic_data[key][0]
else:
del _dynamic_data[key]
return default_value
def SaveDynamicCacheData(key, value):
""" Save data into the cache identified by key.
It will overwrite any data that was previously associated with key
params:
key : str - a unique string identifying your data
value: obj - any object that is to be cached.
returns:
Nothing
"""
global _dynamic_data
if not config['CacheDynamicData']:
return
key = str(key)
_dynamic_data[key] = (value, _GetCurrentSessionID())
return
| 29.915663
| 124
| 0.671365
|
4a0b2bd7fb7143c581d937365612c2be751a7ddb
| 5,226
|
py
|
Python
|
tests/test_types.py
|
madebr/conan-templates-script
|
7830d74e69c5a62d3606b7d33a800ae9290b6502
|
[
"MIT"
] | null | null | null |
tests/test_types.py
|
madebr/conan-templates-script
|
7830d74e69c5a62d3606b7d33a800ae9290b6502
|
[
"MIT"
] | null | null | null |
tests/test_types.py
|
madebr/conan-templates-script
|
7830d74e69c5a62d3606b7d33a800ae9290b6502
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import create_configuration, create_remote_conan_templates, conan
from bincrafters_templates import PROJECT_TYPES
from bincrafters_templates.bincrafters_templates import BincraftersTemplatesError, Configuration, generate_project, \
template_geturl, template_seturl, template_update
import git
from pathlib import Path
import pytest
from pytest_cases import pytest_fixture_plus
@pytest_fixture_plus
@pytest.mark.parametrize("template_type", PROJECT_TYPES, ids=lambda t: f"template_type:{t}")
def valid_template_type(template_type):
return template_type
def _test_valid_template_type(configuration, template_type, outputpath, conan):
name = "conan-testname"
generate_project(configuration, template_type=template_type, output=outputpath, name=name)
project_path = outputpath / name
assert (project_path / "test_package").is_dir()
assert (project_path / ".ci").is_dir()
assert (project_path / ".github").is_dir()
assert (project_path / ".gitattributes").is_file()
assert (project_path / ".gitignore").is_file()
assert (project_path / ".travis.yml").is_file()
assert (project_path / "appveyor.yml").is_file()
assert project_path.is_dir()
conanfile_path = project_path / "conanfile.py"
assert conanfile_path.is_file()
conan.inspect(str(conanfile_path), ("name", "version", ))
return conanfile_path
def test_valid_template_type(valid_template_type, tmpdir_factory, conan):
""" Generate valid project """
output_path = Path(tmpdir_factory.mktemp("output"))
configuration = create_configuration(tmpdir_factory)
_test_valid_template_type(configuration, valid_template_type, output_path, conan)
def test_invalid_template_type(tmpdir_factory):
""" Cannot generate project because invalid template type """
output_path = Path(tmpdir_factory.mktemp("output"))
configuration = create_configuration(tmpdir_factory)
name = "conan-testname"
invalid_template_type = "nonsense"
try:
generate_project(configuration, invalid_template_type, output_path, name)
assert False, "An error should have been thrown"
except BincraftersTemplatesError:
pass
def test_default_template(tmpdir_factory, conan):
output_path = Path(tmpdir_factory.mktemp("output"))
configuration = create_configuration(tmpdir_factory)
c = _test_valid_template_type(configuration, "default", output_path, conan)
answer = conan.inspect(str(c), ("settings", "options", ))
assert answer["settings"] is not None
assert answer["options"] is not None
assert "os" in answer["settings"]
assert "arch" in answer["settings"]
assert "os_build" not in answer["settings"]
assert "arch_build" not in answer["settings"]
assert "compiler" in answer["settings"]
assert "build_type" in answer["settings"]
assert "shared" in answer["options"]
assert "fPIC" in answer["options"]
def test_header_only_template(tmpdir_factory, conan):
output_path = Path(tmpdir_factory.mktemp("output"))
configuration = create_configuration(tmpdir_factory)
c = _test_valid_template_type(configuration, "header_only", output_path, conan)
answer = conan.inspect(str(c), ("settings", "options", ))
assert answer["settings"] is None
assert answer["options"] is None
def test_install_only_template(tmpdir_factory, conan):
output_path = Path(tmpdir_factory.mktemp("output"))
configuration = create_configuration(tmpdir_factory)
c = _test_valid_template_type(configuration, "installer_only", output_path, conan)
answer = conan.inspect(str(c), ("settings", "options", ))
assert answer["settings"] is not None
assert answer["options"] is None
assert "os" not in answer["settings"]
assert "arch" not in answer["settings"]
assert "os_build" in answer["settings"]
assert "arch_build" in answer["settings"]
assert "build_type" not in answer["settings"]
def test_change_remote(tmpdir_factory, conan):
output_path = Path(tmpdir_factory.mktemp("output"))
c0 = create_configuration(tmpdir_factory)
c1 = create_configuration(tmpdir_factory)
u0 = Path(next(template_geturl(c0)))
u1 = Path(next(template_geturl(c1)))
r0 = git.Repo(u0)
(u0 / "dummy_c0").touch()
r0.index.add(["dummy_c0"])
r0.index.commit("added dummy_c0")
r1 = git.Repo(u1)
(u1 / "dummy_c1").touch()
r1.index.add(["dummy_c1"])
r1.index.commit("added dummy_c1")
template_seturl(c0, u1)
template_update(c0)
assert (c0.template_path / "dummy_c1").is_file()
def test_updated_remote(tmpdir_factory, conan):
output_path = Path(tmpdir_factory.mktemp("output"))
configuration = create_configuration(tmpdir_factory)
c = _test_valid_template_type(configuration, "default", output_path, conan)
remote_path = Path(next(template_geturl(configuration)))
assert Path(tmpdir_factory.getbasetemp()) in remote_path.parents
r = git.Repo(remote_path)
(remote_path / "dummy_file").touch()
r.index.add(["dummy_file"])
r.index.commit("added dummy_file")
template_update(configuration)
assert (configuration.template_path / "dummy_file").is_file()
| 32.6625
| 117
| 0.730578
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.