hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ab3a2dd3958eeed0683e138275086ab9243b7a2e
| 1,525
|
py
|
Python
|
burl/core/api/views.py
|
wryfi/burl
|
664878ce9a31695456be89c8e10e8bb612074ef6
|
[
"MIT"
] | 1
|
2021-02-07T21:48:59.000Z
|
2021-02-07T21:48:59.000Z
|
burl/core/api/views.py
|
wryfi/burl
|
664878ce9a31695456be89c8e10e8bb612074ef6
|
[
"MIT"
] | 16
|
2020-03-24T16:53:30.000Z
|
2022-03-15T17:46:59.000Z
|
burl/core/api/views.py
|
wryfi/burl
|
664878ce9a31695456be89c8e10e8bb612074ef6
|
[
"MIT"
] | null | null | null |
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.reverse import reverse
from rest_framework_simplejwt.tokens import RefreshToken
@api_view(['GET'])
def root(request, fmt=None):
return Response({
'v1': reverse('api_v1:root', request=request, format=fmt),
})
@api_view(['GET'])
def v1_root(request, fmt=None):
root_navigation = {
'redirects': reverse('api_v1:redirects:redirect-list', request=request, format=fmt),
'token': reverse('api_v1:token_root', request=request, format=fmt)
}
return Response(root_navigation)
@api_view(['GET'])
def token_root(request, fmt=None):
token_navigation = {
'auth': reverse('api_v1:token_auth', request=request, format=fmt),
'refresh': reverse('api_v1:token_refresh', request=request, format=fmt),
'verify': reverse('api_v1:token_verify', request=request, format=fmt),
}
return Response(token_navigation)
@api_view(['POST'])
def token_refresh(request):
token = request.COOKIES.get("burl_refresh_token")
if token:
refresh = RefreshToken(str(token))
access = str(refresh.access_token)
if access:
return Response({"access": access}, 200)
else:
return Response({"unauthorized"}, 401)
return Response("unauthorized", 401)
@api_view(['POST'])
def token_refresh_revoke(_request):
response = Response("ok")
response.delete_cookie("burl_refresh_token")
return response
| 29.901961
| 92
| 0.688525
| 186
| 1,525
| 5.446237
| 0.231183
| 0.096742
| 0.071076
| 0.136229
| 0.154985
| 0.124383
| 0
| 0
| 0
| 0
| 0
| 0.013578
| 0.179016
| 1,525
| 50
| 93
| 30.5
| 0.795527
| 0
| 0
| 0.125
| 0
| 0
| 0.152131
| 0.019672
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.1
| 0.025
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab3e250f158b4ed0173fe7715ee2559fe186d522
| 1,879
|
py
|
Python
|
qurator/sbb_ned/embeddings/bert.py
|
qurator-spk/sbb_ned
|
d4cfe249f72e48913f254a58fbe0dbe6e47bd168
|
[
"Apache-2.0"
] | 6
|
2020-09-05T16:08:59.000Z
|
2022-03-05T00:54:47.000Z
|
qurator/sbb_ned/embeddings/bert.py
|
qurator-spk/sbb_ned
|
d4cfe249f72e48913f254a58fbe0dbe6e47bd168
|
[
"Apache-2.0"
] | 6
|
2020-09-23T17:58:37.000Z
|
2022-03-10T14:02:09.000Z
|
qurator/sbb_ned/embeddings/bert.py
|
qurator-spk/sbb_ned
|
d4cfe249f72e48913f254a58fbe0dbe6e47bd168
|
[
"Apache-2.0"
] | 2
|
2021-03-22T00:12:51.000Z
|
2022-01-31T10:04:08.000Z
|
from ..embeddings.base import Embeddings
from flair.data import Sentence
class BertEmbeddings(Embeddings):
def __init__(self, model_path,
layers="-1, -2, -3, -4", pooling_operation='first', use_scalar_mix=True, no_cuda=False, *args, **kwargs):
super(BertEmbeddings, self).__init__(*args, **kwargs)
self._path = model_path
self._embeddings = None
self._layers = layers
self._pooling_operation = pooling_operation
self._use_scalar_mix = use_scalar_mix
self._no_cuda = no_cuda
def get(self, keys):
if self._embeddings is None:
if self._no_cuda:
import flair
import torch
flair.device = torch.device('cpu')
from .flair_bert import BertEmbeddings
self._embeddings = BertEmbeddings(bert_model_or_path=self._path,
layers=self._layers,
pooling_operation=self._pooling_operation,
use_scalar_mix=self._use_scalar_mix)
sentences = [Sentence(key) for key in keys]
# noinspection PyUnresolvedReferences
self._embeddings.embed(sentences)
for s_idx, sentence in enumerate(sentences):
for t_idx, token in enumerate(sentence):
emb = token.embedding.cpu().numpy()
yield token.text, emb
del token
del sentence
def config(self):
return {'description': self.description()}
def description(self):
layer_str = self._layers
layer_str = layer_str.replace(' ', '')
layer_str = layer_str.replace(',', '_')
return "bert-layers_{}-pooling_{}-scalarmix_{}".format(layer_str, self._pooling_operation, self._use_scalar_mix)
| 29.359375
| 122
| 0.585418
| 198
| 1,879
| 5.227273
| 0.343434
| 0.092754
| 0.069565
| 0.046377
| 0.10628
| 0.061836
| 0
| 0
| 0
| 0
| 0
| 0.003152
| 0.324641
| 1,879
| 63
| 123
| 29.825397
| 0.812451
| 0.018627
| 0
| 0
| 0
| 0
| 0.040174
| 0.02063
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.131579
| 0.026316
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab41670d17acae57b54990c3a25815a2ee40eb19
| 9,225
|
py
|
Python
|
onnxmltools/convert/keras/_parse.py
|
gpminsuk/onnxmltools
|
4e88929a79a1018183f58e2d5e032dd639839dd2
|
[
"MIT"
] | 1
|
2018-04-10T02:30:47.000Z
|
2018-04-10T02:30:47.000Z
|
onnxmltools/convert/keras/_parse.py
|
gpminsuk/onnxmltools
|
4e88929a79a1018183f58e2d5e032dd639839dd2
|
[
"MIT"
] | null | null | null |
onnxmltools/convert/keras/_parse.py
|
gpminsuk/onnxmltools
|
4e88929a79a1018183f58e2d5e032dd639839dd2
|
[
"MIT"
] | 1
|
2018-06-27T18:16:20.000Z
|
2018-06-27T18:16:20.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import tensorflow as tf
from keras.models import Model
from keras.layers import Layer, InputLayer
from ...proto import onnx
from ..common._container import KerasModelContainer
from ..common._topology import Topology
from ..common.data_types import *
def _extract_inbound_nodes(model):
if hasattr(model, 'inbound_nodes'):
return model.inbound_nodes
elif hasattr(model, '_inbound_nodes'):
return model._inbound_nodes
else:
raise ValueError('Failed to find inbound_nodes and _inbound_nodes when parsing Keras model')
def extract_model_input_and_output_shapes(model, default_batch_size):
if hasattr(model, 'input_shape'):
if not isinstance(model.input_shape, list):
input_shapes = [list(model.input_shape)]
else:
input_shapes = [list(shape) for shape in model.input_shape]
elif hasattr(model, 'input_shapes'):
input_shapes = [list(shape) for shape in model.input_shapes]
else:
raise ValueError('Fail to extract model input shape(s)')
for shape in input_shapes:
if not isinstance(shape[0], numbers.Integral):
shape[0] = default_batch_size
if hasattr(model, 'output_shape'):
if not isinstance(model.output_shape, list):
output_shapes = [list(model.output_shape)]
else:
output_shapes = [list(shape) for shape in model.output_shape]
elif hasattr(model, 'output_shapes'):
output_shapes = [list(shape) for shape in model.output_shapes]
else:
raise ValueError('Fail to extract model output shape(s)')
for shape in output_shapes:
if not isinstance(shape[0], numbers.Integral):
shape[0] = default_batch_size
return input_shapes, output_shapes
def determine_tensor_type(tensor, default_batch_size, keras_shape=None):
# keras_shape can overwrite the shaped defined in Tensorflow tensor
if keras_shape is None:
tensor_shape = [d.value if d.value is not None else 'None' for d in tensor.shape]
else:
tensor_shape = [d if d is not None else 'None' for d in keras_shape]
# Adjust batch size if needed
if tensor_shape[0] == 'None':
tensor_shape[0] = default_batch_size
# Determine the tensor's element type
tensor_type = tensor.dtype
if tensor_type in [tf.int8, tf.int16, tf.int32, tf.int64]:
return Int64TensorType(shape=tensor_shape)
elif tensor_type in [tf.float16, tf.float32, tf.float64]:
return FloatTensorType(shape=tensor_shape)
else:
raise ValueError('Unable to find out a correct type for tensor %s' % tensor)
def parse_keras(model, initial_types=None, targeted_onnx=onnx.__version__):
'''
The main parsing function of Keras Model and Sequential objects.
:param model: A Keras Model or Sequential object
:param initial_types: A list providing some types for some root variables. Each element is a tuple of a variable
name and a type defined in data_types.py.
:param targeted_onnx: a version string such as `1.1.2` or `1.2.1` for specifying the ONNX version used to produce
the output model.
:return: a Topology object. It's a intermediate representation of the input Keras model
'''
raw_model_container = KerasModelContainer(model)
topology = Topology(raw_model_container, default_batch_size=1, initial_types=initial_types,
targeted_onnx=targeted_onnx)
scope = topology.declare_scope('__root__')
# Each inbound node defines an evaluation of the underlining model (if the model is called multiple times, it may
# contain several inbound nodes). According to the tensors specified in those inbound nodes, we declare the roots
# and leaves of the computational graph described by the Keras input model.
for node in _extract_inbound_nodes(model):
input_shapes, output_shapes = extract_model_input_and_output_shapes(model, topology.default_batch_size)
# Declare inputs for a specific model execution
for tensor, shape in zip(node.input_tensors, input_shapes):
raw_model_container.add_input_name(tensor.name)
tensor_type = determine_tensor_type(tensor, topology.default_batch_size, list(shape))
scope.get_local_variable_or_declare_one(tensor.name, tensor_type)
# Declare outputs for a specific model execution
for tensor, shape in zip(node.output_tensors, output_shapes):
raw_model_container.add_output_name(tensor.name)
tensor_type = determine_tensor_type(tensor, topology.default_batch_size, list(shape))
scope.get_local_variable_or_declare_one(tensor.name, tensor_type)
# For each model execution, we call a parsing function to create a computational (sub-)graph because ONNX has no
# model/layer sharing.
for node in _extract_inbound_nodes(model):
_parse_keras(topology, scope, model, node)
topology.root_names = [variable.onnx_name for variable in scope.variables.values()]
return topology
def _parse_keras(topology, parent_scope, model, inbound_node):
if isinstance(model, Model):
scope = topology.declare_scope('scope')
# Declare output variables so that they can be connected with the variables produced in layers and sub-models
for layer in model.layers:
for node in _extract_inbound_nodes(layer):
for tensor in node.output_tensors:
tensor_type = determine_tensor_type(tensor, topology.default_batch_size)
scope.declare_local_variable(tensor.name, tensor_type)
# Recursively call the parsing function
for layer in model.layers:
for node in _extract_inbound_nodes(layer):
_parse_keras(topology, scope, layer, node)
# Connect the variables declared when parsing the input model and the actual model inputs. inbound_node has the
# actual inputs while the while graph is indeed declared only via the first inbound node of the input model.
# That is, for a shared (sub-)model, we may declare it several times and each time we may connect its I/O with
# the I/O specified in a inbound node.
for parent_tensor, local_tensor in zip(inbound_node.input_tensors, _extract_inbound_nodes(model)[0].input_tensors):
parent_tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
local_tensor_type = determine_tensor_type(local_tensor, topology.default_batch_size)
parent_variable = parent_scope.get_local_variable_or_declare_one(parent_tensor.name, parent_tensor_type)
local_variable = scope.get_local_variable_or_declare_one(local_tensor.name, local_tensor_type)
operator = scope.declare_local_operator('identity')
operator.inputs.append(parent_variable)
operator.outputs.append(local_variable)
# Connect the variables declared when parsing the input model and the actual model outputs. inbound_node has the
# actual outputs while the while graph is indeed declared via the first inbound node of the input models.
for parent_tensor, local_tensor in zip(inbound_node.output_tensors, _extract_inbound_nodes(model)[0].output_tensors):
parent_tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
local_tensor_type = determine_tensor_type(local_tensor, topology.default_batch_size)
parent_variable = parent_scope.get_local_variable_or_declare_one(parent_tensor.name, parent_tensor_type)
local_variable = scope.get_local_variable_or_declare_one(local_tensor.name, local_tensor_type)
operator = scope.declare_local_operator('identity')
operator.inputs.append(local_variable)
operator.outputs.append(parent_variable)
elif isinstance(model, Layer):
if isinstance(model, InputLayer):
return
operator = parent_scope.declare_local_operator(type(model), raw_model=model)
# Simply connect the layer's I/O with variables declared in the parent scope. Note that it may create input
# variables in the parent scope because we only declare output variables in the beginning of _parse_keras(...)
for parent_tensor in inbound_node.input_tensors:
tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
operator.inputs.append(parent_scope.get_local_variable_or_declare_one(parent_tensor.name, tensor_type))
for parent_tensor in inbound_node.output_tensors:
tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
operator.outputs.append(parent_scope.get_local_variable_or_declare_one(parent_tensor.name, tensor_type))
else:
raise RuntimeError('Unsupported Keras component %s' % type(model))
| 51.825843
| 125
| 0.714363
| 1,236
| 9,225
| 5.089806
| 0.16343
| 0.049277
| 0.040693
| 0.03815
| 0.473216
| 0.440629
| 0.415196
| 0.382133
| 0.331744
| 0.292322
| 0
| 0.004088
| 0.204444
| 9,225
| 177
| 126
| 52.118644
| 0.853114
| 0.258211
| 0
| 0.303571
| 0
| 0
| 0.04986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044643
| false
| 0
| 0.0625
| 0
| 0.169643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab41d11daca6d1b31e59637bf18b9f99a383f86f
| 24,575
|
py
|
Python
|
src/scenic/core/regions.py
|
cahartsell/Scenic
|
2e7979011aef426108687947668d9ba6f5439136
|
[
"BSD-3-Clause"
] | null | null | null |
src/scenic/core/regions.py
|
cahartsell/Scenic
|
2e7979011aef426108687947668d9ba6f5439136
|
[
"BSD-3-Clause"
] | null | null | null |
src/scenic/core/regions.py
|
cahartsell/Scenic
|
2e7979011aef426108687947668d9ba6f5439136
|
[
"BSD-3-Clause"
] | null | null | null |
"""Objects representing regions in space."""
import math
import random
import itertools
import numpy
import scipy.spatial
import shapely.geometry
import shapely.ops
from scenic.core.distributions import Samplable, RejectionException, needsSampling
from scenic.core.lazy_eval import valueInContext
from scenic.core.vectors import Vector, OrientedVector, VectorDistribution
from scenic.core.geometry import RotatedRectangle
from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors
from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion
from scenic.core.type_support import toVector
from scenic.core.utils import cached, areEquivalent
def toPolygon(thing):
if needsSampling(thing):
return None
if hasattr(thing, 'polygon'):
return thing.polygon
if hasattr(thing, 'polygons'):
return thing.polygons
if hasattr(thing, 'lineString'):
return thing.lineString
return None
def regionFromShapelyObject(obj, orientation=None):
"""Build a 'Region' from Shapely geometry."""
assert obj.is_valid, obj
if obj.is_empty:
return nowhere
elif isinstance(obj, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=obj, orientation=orientation)
elif isinstance(obj, (shapely.geometry.LineString, shapely.geometry.MultiLineString)):
return PolylineRegion(polyline=obj, orientation=orientation)
else:
raise RuntimeError(f'unhandled type of Shapely geometry: {obj}')
class PointInRegionDistribution(VectorDistribution):
"""Uniform distribution over points in a Region"""
def __init__(self, region):
super().__init__(region)
self.region = region
def sampleGiven(self, value):
return value[self.region].uniformPointInner()
def __str__(self):
return f'PointIn({self.region})'
class Region(Samplable):
"""Abstract class for regions."""
def __init__(self, name, *dependencies, orientation=None):
super().__init__(dependencies)
self.name = name
self.orientation = orientation
def sampleGiven(self, value):
return self
def intersect(self, other, triedReversed=False):
"""Get a `Region` representing the intersection of this one with another."""
if triedReversed:
return IntersectionRegion(self, other)
else:
return other.intersect(self, triedReversed=True)
@staticmethod
def uniformPointIn(region):
"""Get a uniform `Distribution` over points in a `Region`."""
return PointInRegionDistribution(region)
def uniformPoint(self):
"""Sample a uniformly-random point in this `Region`.
Can only be called on fixed Regions with no random parameters.
"""
assert not needsSampling(self)
return self.uniformPointInner()
def uniformPointInner(self):
"""Do the actual random sampling. Implemented by subclasses."""
raise NotImplementedError()
def containsPoint(self, point):
"""Check if the `Region` contains a point. Implemented by subclasses."""
raise NotImplementedError()
def containsObject(self, obj):
"""Check if the `Region` contains an :obj:`~scenic.core.object_types.Object`.
The default implementation assumes the `Region` is convex; subclasses must
override the method if this is not the case.
"""
for corner in obj.corners:
if not self.containsPoint(corner):
return False
return True
def __contains__(self, thing):
"""Check if this `Region` contains an object or vector."""
from scenic.core.object_types import Object
if isinstance(thing, Object):
return self.containsObject(thing)
vec = toVector(thing, '"X in Y" with X not an Object or a vector')
return self.containsPoint(vec)
def getAABB(self):
"""Axis-aligned bounding box for this `Region`. Implemented by some subclasses."""
raise NotImplementedError()
def orient(self, vec):
"""Orient the given vector along the region's orientation, if any."""
if self.orientation is None:
return vec
else:
return OrientedVector(vec.x, vec.y, self.orientation[vec])
def __str__(self):
return f'<Region {self.name}>'
class AllRegion(Region):
"""Region consisting of all space."""
def intersect(self, other, triedReversed=False):
return other
def containsPoint(self, point):
return True
def containsObject(self, obj):
return True
def __eq__(self, other):
return type(other) is AllRegion
def __hash__(self):
return hash(AllRegion)
class EmptyRegion(Region):
"""Region containing no points."""
def intersect(self, other, triedReversed=False):
return self
def uniformPointInner(self):
raise RejectionException(f'sampling empty Region')
def containsPoint(self, point):
return False
def containsObject(self, obj):
return False
def show(self, plt, style=None):
pass
def __eq__(self, other):
return type(other) is EmptyRegion
def __hash__(self):
return hash(EmptyRegion)
everywhere = AllRegion('everywhere')
nowhere = EmptyRegion('nowhere')
class CircularRegion(Region):
def __init__(self, center, radius, resolution=32):
super().__init__('Circle', center, radius)
self.center = center.toVector()
self.radius = radius
self.circumcircle = (self.center, self.radius)
if not (needsSampling(self.center) or needsSampling(self.radius)):
ctr = shapely.geometry.Point(self.center)
self.polygon = ctr.buffer(self.radius, resolution=resolution)
def sampleGiven(self, value):
return CircularRegion(value[self.center], value[self.radius])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
return CircularRegion(center, radius)
def containsPoint(self, point):
point = point.toVector()
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
r = random.triangular(0, self.radius, self.radius)
t = random.uniform(-math.pi, math.pi)
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def getAABB(self):
x, y = self.center
r = self.radius
return ((x - r, y - r), (x + r, y + r))
def isEquivalentTo(self, other):
if type(other) is not CircularRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius))
def __str__(self):
return f'CircularRegion({self.center}, {self.radius})'
class SectorRegion(Region):
def __init__(self, center, radius, heading, angle, resolution=32):
super().__init__('Sector', center, radius, heading, angle)
self.center = center.toVector()
self.radius = radius
self.heading = heading
self.angle = angle
r = (radius / 2) * cos(angle / 2)
self.circumcircle = (self.center.offsetRadially(r, heading), r)
if not any(needsSampling(x) for x in (self.center, radius, heading, angle)):
ctr = shapely.geometry.Point(self.center)
circle = ctr.buffer(self.radius, resolution=resolution)
if angle >= math.tau - 0.001:
self.polygon = circle
else:
mask = shapely.geometry.Polygon([
self.center,
self.center.offsetRadially(radius, heading + angle/2),
self.center.offsetRadially(2*radius, heading),
self.center.offsetRadially(radius, heading - angle/2)
])
self.polygon = circle & mask
def sampleGiven(self, value):
return SectorRegion(value[self.center], value[self.radius],
value[self.heading], value[self.angle])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
heading = valueInContext(self.heading, context)
angle = valueInContext(self.angle, context)
return SectorRegion(center, radius, heading, angle)
def containsPoint(self, point):
point = point.toVector()
if not pointIsInCone(tuple(point), tuple(self.center), self.heading, self.angle):
return False
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
heading, angle, maxDist = self.heading, self.angle, self.radius
r = random.triangular(0, maxDist, maxDist)
ha = angle / 2.0
t = random.uniform(-ha, ha) + (heading + (math.pi / 2))
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def isEquivalentTo(self, other):
if type(other) is not SectorRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.angle, self.angle))
def __str__(self):
return f'SectorRegion({self.center},{self.radius},{self.heading},{self.angle})'
class RectangularRegion(RotatedRectangle, Region):
def __init__(self, position, heading, width, height):
super().__init__('Rectangle', position, heading, width, height)
self.position = position.toVector()
self.heading = heading
self.width = width
self.height = height
self.hw = hw = width / 2
self.hh = hh = height / 2
self.radius = hypot(hw, hh) # circumcircle; for collision detection
self.corners = tuple(position.offsetRotated(heading, Vector(*offset))
for offset in ((hw, hh), (-hw, hh), (-hw, -hh), (hw, -hh)))
self.circumcircle = (self.position, self.radius)
def sampleGiven(self, value):
return RectangularRegion(value[self.position], value[self.heading],
value[self.width], value[self.height])
def evaluateInner(self, context):
position = valueInContext(self.position, context)
heading = valueInContext(self.heading, context)
width = valueInContext(self.width, context)
height = valueInContext(self.height, context)
return RectangularRegion(position, heading, width, height)
def uniformPointInner(self):
hw, hh = self.hw, self.hh
rx = random.uniform(-hw, hw)
ry = random.uniform(-hh, hh)
pt = self.position.offsetRotated(self.heading, Vector(rx, ry))
return self.orient(pt)
def getAABB(self):
x, y = zip(*self.corners)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
return ((minx, miny), (maxx, maxy))
def isEquivalentTo(self, other):
if type(other) is not RectangularRegion:
return False
return (areEquivalent(other.position, self.position)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.width, self.width)
and areEquivalent(other.height, self.height))
def __str__(self):
return f'RectangularRegion({self.position},{self.heading},{self.width},{self.height})'
class PolylineRegion(Region):
"""Region given by one or more polylines (chain of line segments)"""
def __init__(self, points=None, polyline=None, orientation=True):
super().__init__('Polyline', orientation=orientation)
if points is not None:
points = tuple(points)
if len(points) < 2:
raise RuntimeError('tried to create PolylineRegion with < 2 points')
self.points = points
self.lineString = shapely.geometry.LineString(points)
elif polyline is not None:
if isinstance(polyline, shapely.geometry.LineString):
if len(polyline.coords) < 2:
raise RuntimeError('tried to create PolylineRegion with <2-point LineString')
elif isinstance(polyline, shapely.geometry.MultiLineString):
if len(polyline) == 0:
raise RuntimeError('tried to create PolylineRegion from empty MultiLineString')
for line in polyline:
assert len(line.coords) >= 2
else:
raise RuntimeError('tried to create PolylineRegion from non-LineString')
self.lineString = polyline
else:
raise RuntimeError('must specify points or polyline for PolylineRegion')
if not self.lineString.is_valid:
raise RuntimeError('tried to create PolylineRegion with '
f'invalid LineString {self.lineString}')
self.segments = self.segmentsOf(self.lineString)
cumulativeLengths = []
total = 0
for p, q in self.segments:
dx, dy = p[0] - q[0], p[1] - q[1]
total += math.hypot(dx, dy)
cumulativeLengths.append(total)
self.cumulativeLengths = cumulativeLengths
@classmethod
def segmentsOf(cls, lineString):
if isinstance(lineString, shapely.geometry.LineString):
segments = []
points = list(lineString.coords)
if len(points) < 2:
raise RuntimeError('LineString has fewer than 2 points')
last = points[0]
for point in points[1:]:
segments.append((last, point))
last = point
return segments
elif isinstance(lineString, shapely.geometry.MultiLineString):
allSegments = []
for line in lineString:
allSegments.extend(cls.segmentsOf(line))
return allSegments
else:
raise RuntimeError('called segmentsOf on non-linestring')
def uniformPointInner(self):
pointA, pointB = random.choices(self.segments,
cum_weights=self.cumulativeLengths)[0]
interpolation = random.random()
x, y = averageVectors(pointA, pointB, weight=interpolation)
if self.orientation is True:
return OrientedVector(x, y, headingOfSegment(pointA, pointB))
else:
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
if poly is not None:
intersection = self.lineString & poly
if (intersection.is_empty or
not isinstance(intersection, (shapely.geometry.LineString,
shapely.geometry.MultiLineString))):
# TODO handle points!
return nowhere
return PolylineRegion(polyline=intersection)
return super().intersect(other, triedReversed)
def containsPoint(self, point):
return self.lineString.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
return False
def getAABB(self):
xmin, ymin, xmax, ymax = self.lineString.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
for pointA, pointB in self.segments:
plt.plot([pointA[0], pointB[0]], [pointA[1], pointB[1]], style)
def __str__(self):
return f'PolylineRegion({self.lineString})'
def __eq__(self, other):
if type(other) is not PolylineRegion:
return NotImplemented
return (other.lineString == self.lineString)
@cached
def __hash__(self):
return hash(str(self.lineString))
class PolygonalRegion(Region):
"""Region given by one or more polygons (possibly with holes)"""
def __init__(self, points=None, polygon=None, orientation=None):
super().__init__('Polygon', orientation=orientation)
if polygon is None and points is None:
raise RuntimeError('must specify points or polygon for PolygonalRegion')
if polygon is None:
points = tuple(points)
if len(points) == 0:
raise RuntimeError('tried to create PolygonalRegion from empty point list!')
for point in points:
if needsSampling(point):
raise RuntimeError('only fixed PolygonalRegions are supported')
self.points = points
polygon = shapely.geometry.Polygon(points)
if isinstance(polygon, shapely.geometry.Polygon):
self.polygons = shapely.geometry.MultiPolygon([polygon])
elif isinstance(polygon, shapely.geometry.MultiPolygon):
self.polygons = polygon
else:
raise RuntimeError(f'tried to create PolygonalRegion from non-polygon {polygon}')
if not self.polygons.is_valid:
raise RuntimeError('tried to create PolygonalRegion with '
f'invalid polygon {self.polygons}')
if points is None and len(self.polygons) == 1 and len(self.polygons[0].interiors) == 0:
self.points = tuple(self.polygons[0].exterior.coords[:-1])
if self.polygons.is_empty:
raise RuntimeError('tried to create empty PolygonalRegion')
triangles = []
for polygon in self.polygons:
triangles.extend(triangulatePolygon(polygon))
assert len(triangles) > 0, self.polygons
self.trianglesAndBounds = tuple((tri, tri.bounds) for tri in triangles)
areas = (triangle.area for triangle in triangles)
self.cumulativeTriangleAreas = tuple(itertools.accumulate(areas))
def uniformPointInner(self):
triangle, bounds = random.choices(
self.trianglesAndBounds,
cum_weights=self.cumulativeTriangleAreas)[0]
minx, miny, maxx, maxy = bounds
# TODO improve?
while True:
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
if triangle.intersects(shapely.geometry.Point(x, y)):
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
orientation = other.orientation if self.orientation is None else self.orientation
if poly is not None:
intersection = self.polygons & poly
if intersection.is_empty:
return nowhere
elif isinstance(intersection, (shapely.geometry.Polygon,
shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=intersection, orientation=orientation)
elif isinstance(intersection, shapely.geometry.GeometryCollection):
polys = []
for geom in intersection:
if isinstance(geom, shapely.geometry.Polygon):
polys.append(geom)
if len(polys) == 0:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
intersection = shapely.geometry.MultiPolygon(polys)
return PolygonalRegion(polygon=intersection, orientation=orientation)
else:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
return super().intersect(other, triedReversed)
def union(self, other):
poly = toPolygon(other)
if not poly:
raise RuntimeError(f'cannot take union of PolygonalRegion with {other}')
union = polygonUnion((self.polygons, poly))
return PolygonalRegion(polygon=union)
def containsPoint(self, point):
return self.polygons.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
objPoly = obj.polygon
if objPoly is None:
raise RuntimeError('tried to test containment of symbolic Object!')
# TODO improve boundary handling?
return self.polygons.contains(objPoly)
def getAABB(self):
xmin, xmax, ymin, ymax = self.polygons.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
plotPolygon(self.polygons, plt, style=style)
def __str__(self):
return '<PolygonalRegion>'
def __eq__(self, other):
if type(other) is not PolygonalRegion:
return NotImplemented
return (other.polygons == self.polygons
and other.orientation == self.orientation)
@cached
def __hash__(self):
# TODO better way to hash mutable Shapely geometries? (also for PolylineRegion)
return hash((str(self.polygons), self.orientation))
class PointSetRegion(Region):
"""Region consisting of a set of discrete points.
No :obj:`~scenic.core.object_types.Object` can be contained in a `PointSetRegion`,
since the latter is discrete. (This may not be true for subclasses, e.g.
`GridRegion`.)
Args:
name (str): name for debugging
points (iterable): set of points comprising the region
kdtree (:obj:`scipy.spatial.KDTree`, optional): k-D tree for the points (one will
be computed if none is provided)
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation for
the region
tolerance (float, optional): distance tolerance for checking whether a point lies
in the region
"""
def __init__(self, name, points, kdTree=None, orientation=None, tolerance=1e-6):
super().__init__(name, orientation=orientation)
self.points = tuple(points)
for point in self.points:
if needsSampling(point):
raise RuntimeError('only fixed PointSetRegions are supported')
self.kdTree = scipy.spatial.cKDTree(self.points) if kdTree is None else kdTree
self.orientation = orientation
self.tolerance = tolerance
def uniformPointInner(self):
return self.orient(Vector(*random.choice(self.points)))
def intersect(self, other, triedReversed=False):
def sampler(intRegion):
o = intRegion.regions[1]
center, radius = o.circumcircle
possibles = (Vector(*self.kdTree.data[i])
for i in self.kdTree.query_ball_point(center, radius))
intersection = [p for p in possibles if o.containsPoint(p)]
if len(intersection) == 0:
raise RejectionException(f'empty intersection of Regions {self} and {o}')
return self.orient(random.choice(intersection))
return IntersectionRegion(self, other, sampler=sampler, orientation=self.orientation)
def containsPoint(self, point):
distance, location = self.kdTree.query(point)
return (distance <= self.tolerance)
def containsObject(self, obj):
raise NotImplementedError()
def __eq__(self, other):
if type(other) is not PointSetRegion:
return NotImplemented
return (other.name == self.name
and other.points == self.points
and other.orientation == self.orientation)
def __hash__(self):
return hash((self.name, self.points, self.orientation))
class GridRegion(PointSetRegion):
"""A Region given by an obstacle grid.
A point is considered to be in a `GridRegion` if the nearest grid point is
not an obstacle.
Args:
name (str): name for debugging
grid: 2D list, tuple, or NumPy array of 0s and 1s, where 1 indicates an obstacle
and 0 indicates free space
Ax (float): spacing between grid points along X axis
Ay (float): spacing between grid points along Y axis
Bx (float): X coordinate of leftmost grid column
By (float): Y coordinate of lowest grid row
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation of region
"""
def __init__(self, name, grid, Ax, Ay, Bx, By, orientation=None):
self.grid = numpy.array(grid)
self.sizeY, self.sizeX = self.grid.shape
self.Ax, self.Ay = Ax, Ay
self.Bx, self.By = Bx, By
y, x = numpy.where(self.grid == 0)
points = [self.gridToPoint(point) for point in zip(x, y)]
super().__init__(name, points, orientation=orientation)
def gridToPoint(self, gp):
x, y = gp
return ((self.Ax * x) + self.Bx, (self.Ay * y) + self.By)
def pointToGrid(self, point):
x, y = point
x = (x - self.Bx) / self.Ax
y = (y - self.By) / self.Ay
nx = int(round(x))
if nx < 0 or nx >= self.sizeX:
return None
ny = int(round(y))
if ny < 0 or ny >= self.sizeY:
return None
return (nx, ny)
def containsPoint(self, point):
gp = self.pointToGrid(point)
if gp is None:
return False
x, y = gp
return (self.grid[y, x] == 0)
def containsObject(self, obj):
# TODO improve this procedure!
# Fast check
for c in obj.corners:
if not self.containsPoint(c):
return False
# Slow check
gps = [self.pointToGrid(corner) for corner in obj.corners]
x, y = zip(*gps)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
p = self.gridToPoint((x, y))
if self.grid[y, x] == 1 and obj.containsPoint(p):
return False
return True
class IntersectionRegion(Region):
def __init__(self, *regions, orientation=None, sampler=None):
self.regions = tuple(regions)
if len(self.regions) < 2:
raise RuntimeError('tried to take intersection of fewer than 2 regions')
super().__init__('Intersection', *self.regions, orientation=orientation)
if sampler is None:
sampler = self.genericSampler
self.sampler = sampler
def sampleGiven(self, value):
regs = [value[reg] for reg in self.regions]
# Now that regions have been sampled, attempt intersection again in the hopes
# there is a specialized sampler to handle it (unless we already have one)
if self.sampler is self.genericSampler:
failed = False
intersection = regs[0]
for region in regs[1:]:
intersection = intersection.intersect(region)
if isinstance(intersection, IntersectionRegion):
failed = True
break
if not failed:
intersection.orientation = value[self.orientation]
return intersection
return IntersectionRegion(*regs, orientation=value[self.orientation],
sampler=self.sampler)
def evaluateInner(self, context):
regs = (valueInContext(reg, context) for reg in self.regions)
orientation = valueInContext(self.orientation, context)
return IntersectionRegion(*regs, orientation=orientation, sampler=self.sampler)
def containsPoint(self, point):
return all(region.containsPoint(point) for region in self.regions)
def uniformPointInner(self):
return self.orient(self.sampler(self))
@staticmethod
def genericSampler(intersection):
regs = intersection.regions
point = regs[0].uniformPointInner()
for region in regs[1:]:
if not region.containsPoint(point):
raise RejectionException(
f'sampling intersection of Regions {regs[0]} and {region}')
return point
def isEquivalentTo(self, other):
if type(other) is not IntersectionRegion:
return False
return (areEquivalent(set(other.regions), set(self.regions))
and other.orientation == self.orientation)
def __str__(self):
return f'IntersectionRegion({self.regions})'
| 33.896552
| 96
| 0.722238
| 3,127
| 24,575
| 5.621043
| 0.131116
| 0.024748
| 0.006258
| 0.014223
| 0.336861
| 0.261535
| 0.184502
| 0.134039
| 0.102975
| 0.057803
| 0
| 0.003512
| 0.16586
| 24,575
| 724
| 97
| 33.94337
| 0.853944
| 0.115443
| 0
| 0.330325
| 0
| 0
| 0.072705
| 0.012118
| 0
| 0
| 0
| 0.002762
| 0.00722
| 1
| 0.169675
| false
| 0.001805
| 0.028881
| 0.057762
| 0.413357
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab42204ebfa5ee7790165df748eb621656c602f4
| 6,525
|
py
|
Python
|
orangery/cli/cutfill.py
|
mrahnis/orangery
|
69afe0057bd61163eb8e026e58d648dfa1e73b94
|
[
"BSD-3-Clause"
] | 2
|
2015-11-30T02:46:28.000Z
|
2021-06-26T15:01:45.000Z
|
orangery/cli/cutfill.py
|
mrahnis/orangery
|
69afe0057bd61163eb8e026e58d648dfa1e73b94
|
[
"BSD-3-Clause"
] | 18
|
2017-06-18T03:23:05.000Z
|
2022-03-18T00:14:05.000Z
|
orangery/cli/cutfill.py
|
mrahnis/orangery
|
69afe0057bd61163eb8e026e58d648dfa1e73b94
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
import time
import json
import click
import matplotlib.pyplot as plt
import orangery as o
from orangery.cli import defaults, util
from orangery.tools.plotting import get_scale_factor
@click.command(options_metavar='<options>')
@click.argument('file1', nargs=1, type=click.Path(exists=True), metavar='<file_t0>') # help="survey representing the initial condition"
@click.argument('file2', nargs=1, type=click.Path(exists=True), metavar='<file_t1>') # help="survey representing the final condition"
@click.argument('fields', nargs=1, metavar='<fields>') # help="character string identifying the columns"
@click.argument('xs_name', nargs=1, metavar='<name>') # help="name of the cross-section to plot"
@click.option('--codes', 'codes_f', nargs=1, type=click.Path(exists=True), metavar='<codes_file>', help="JSON file representing the usage intent of a set of survey codes")
@click.option('--show/--save', is_flag=True, default=True, help="Show the plot or save to files; --show is the default")
@click.option('--summary/--no-summary', default=True, help="Print summary information; --summary is the default")
@click.option('--units', type=click.Choice(['m','sft','ft']), default='m', help="Unit to show in axis labels")
@click.option('--labels', nargs=2, metavar='<text text>', help="Labels to display in the legend")
@click.option('--exaggeration', metavar='<int>', default=3, help="Vertical exaggeration of plot")
@click.option('--scale', nargs=2, metavar='<float int>', type=click.Tuple([float, int]), default=(10, 300), help="Scale where first argument is units per-inch on the horizontal axis and second argument is output DPI")
@click.option('--close/--no-close', default=True, help="Close the line ends; --close is the default")
@click.option('--reverse', type=click.Choice(['t0','t1','tx']), help="Reverse a line or lines of section (t0=initial, t1=final, tx=both)")
@click.option('--exclude', nargs=2, type=click.Tuple([str, click.Choice(['t0','t1','tx'])]), multiple=True, metavar='<str choice>', help="Exclude a survey code from a line or lines of section (t0=initial, t1=final, tx=both)")
@click.option('--overlay', nargs=1, type=click.Path(exists=True))
@click.option('-v', '--verbose', is_flag=True, help="Enables verbose mode")
def cutfill(file1, file2, fields, xs_name, codes_f, show, summary, units, labels, exaggeration, scale, close, reverse, exclude, overlay, verbose):
"""Displays a plot of a repeat survey with cut and fill.
\b
The cutfill subcommand takes four arguments:
<file_t0> : survey data representing the initial condition in csv format
<file_t1> : survey data representing the final condition in csv format
<fields> : series of characters describing the data columns
<name> : name of cross-section to plot
Options allow to set various properties of the plot. The default is to --show the plot.
With the --save option the plot will be saved as an image along with a csv file containing
data about cross-sectional cut-and-fill areas along the line of secion.
\b
Example:
orangery cutfill file_2004.csv file_2010.csv pxyzctr XS-7 --reverse t0
"""
if verbose is True:
loglevel = 2
else:
loglevel = 0
logging.basicConfig(stream=sys.stderr, level=loglevel or logging.INFO)
# load the configuration
codes = defaults.codes.copy()
if codes_f:
user_codes = util.load_config(codes_f)
codes.update(user_codes)
# load the survey data
s1 = o.Survey(file1, fields, codes, 0)
s2 = o.Survey(file2, fields, codes, 0)
if overlay:
s3 = o.Survey(overlay, fields, codes, 0)
exclude_t0 = []
exclude_t1 = []
for code in exclude:
if code[1] in ('t0', 'tx'):
exclude_t0.append(code[0])
if code[1] in ('t1', 'tx'):
exclude_t1.append(code[0])
# select a group of points, in this case a cross section
xs_pts1 = o.group(s1.data, s1.code_table, group=xs_name, exclude=exclude_t0)
xs_pts2 = o.group(s2.data, s2.code_table, group=xs_name, exclude=exclude_t1)
# xs_pts_overlay = o.group(s3.data, s3.code_table, group=xs_name)
# get the endpoints of the group
p1, p2 = o.endpoints(xs_pts1, reverse=reverse in ('t0','tx'))
# make the sections
xs1 = o.Section(xs_pts1, p1, p2, reverse=reverse in ('t0','tx'))
xs2 = o.Section(xs_pts2, p1, p2, reverse=reverse in ('t1','tx'))
# xs_overlay = o.Section(xs_pts_overlay, p1, p2)
if labels:
label_t0 = labels[0]
label_t1 = labels[1]
label_overlay = labels[3]
elif 't' in fields:
label_t0 = (xs1.data.iloc[0]['t']).split('T')[0]
label_t1 = (xs2.data.iloc[0]['t']).split('T')[0]
# label_overlay = (xs_overlay.data.iloc[0]['t']).split('T')[0]
else:
label_t0 = 't0'
label_t1 = 't1'
# label_overlay = 'pre-restoration'
# calculate the change
chg = o.Change(xs1, xs2, close_ends=close)
if summary:
chg.summarize()
import matplotlib
font = {'family':'normal','weight':'normal','size':16}
matplotlib.rc('font', **font)
# plot the change between two cross-sections
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect(exaggeration)
# xs_overlay.plot(ax=ax, marker='None', linestyle='-', linewidth=3, color='tab:red', label=label_overlay)
xs1.plot(ax=ax, marker='o', markersize=4, markerfacecolor='white', markeredgecolor='black', linestyle='-', color='gray', label=label_t0)
xs2.plot(ax=ax, marker='o', markersize=4, markerfacecolor='black', markeredgecolor='black', linestyle='-', color='black', label=label_t1)
chg.polygon_plot(ax=ax, fill_label='Fill', cut_label='Cut')
chg.annotate_plot(ax=ax)
ax.set_xlabel('Distance ({0})'.format(units))
ax.set_ylabel('Elevation ({0}), {1}x exaggeration'.format(units, exaggeration))
plt.legend(loc='best')
plt.title('Cross-section {0}'.format(xs_name))
if show:
plt.show()
else:
fname = xs_name + '-' + label_t0.replace('-', '') + '-' + label_t1.replace('-', '')
scale_factor = get_scale_factor(fig, ax, scale[0])
dims = fig.get_size_inches()
fig.set_size_inches(dims[0]*scale_factor, dims[1]*scale_factor)
fig.savefig(fname+'.png', dpi=scale[1])
click.echo('Figure saved to: {}'.format(fname+'.png'))
chg.save(fname+'.csv')
click.echo('Data saved to: {}'.format(fname+'.csv'))
| 45
| 225
| 0.66728
| 966
| 6,525
| 4.425466
| 0.267081
| 0.030877
| 0.009357
| 0.014035
| 0.152281
| 0.108538
| 0.108538
| 0.081871
| 0.043977
| 0.025263
| 0
| 0.024309
| 0.1741
| 6,525
| 144
| 226
| 45.3125
| 0.768974
| 0.219004
| 0
| 0.033333
| 0
| 0.011111
| 0.213474
| 0.004385
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011111
| false
| 0
| 0.111111
| 0
| 0.122222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab434ed80b4187ec898e721a32ff14b67e16c48a
| 1,669
|
py
|
Python
|
py/solns/wordSearch/wordSearch.py
|
zcemycl/algoTest
|
9518fb2b60fd83c85aeb2ab809ff647aaf643f0a
|
[
"MIT"
] | 1
|
2022-01-26T16:33:45.000Z
|
2022-01-26T16:33:45.000Z
|
py/solns/wordSearch/wordSearch.py
|
zcemycl/algoTest
|
9518fb2b60fd83c85aeb2ab809ff647aaf643f0a
|
[
"MIT"
] | null | null | null |
py/solns/wordSearch/wordSearch.py
|
zcemycl/algoTest
|
9518fb2b60fd83c85aeb2ab809ff647aaf643f0a
|
[
"MIT"
] | 1
|
2022-01-26T16:35:44.000Z
|
2022-01-26T16:35:44.000Z
|
class Solution:
@staticmethod
def naive(board,word):
rows,cols,n = len(board),len(board[0]),len(word)
visited = set()
def dfs(i,j,k):
idf = str(i)+','+str(j)
if i<0 or j<0 or i>cols-1 or j>rows-1 or \
board[j][i]!=word[k] or idf in visited:
return False
if k==n-1 and word[k]==board[j][i]:
return True
visited.add(idf)
if word[k]==board[j][i]:
return dfs(i+1,j,k+1) or dfs(i-1,j,k+1) or\
dfs(i,j+1,k+1) or dfs(i,j-1,k+1)
for j in range(rows):
for i in range(cols):
if board[j][i]==word[0]:
if dfs(i,j,0): return True
return False
@staticmethod
def quick(board,word):
''' Improve by,
1. Exclude set which stores visited coordinates, and use #.
2. No indicing in original word.
3. Quick exit for 4 directions.
'''
rows,cols,n = len(board),len(board[0]),len(word)
def dfs(i,j,remain):
if len(remain)==0: return True
if i<0 or j<0 or i>cols-1 or j>rows-1 or \
board[j][i]!=remain[0]: return False
board[j][i]="#"
ret = False
for rowOff,colOff in [(1,0),(-1,0),(0,1),(0,-1)]:
ret = dfs(i+colOff,j+rowOff,remain[1:])
if ret: break
board[j][i]=remain[0]
return ret
for j in range(rows):
for i in range(cols):
if board[j][i]==word[0]:
if dfs(i,j,word): return True
return False
| 37.088889
| 67
| 0.461953
| 260
| 1,669
| 2.965385
| 0.2
| 0.046693
| 0.072633
| 0.042802
| 0.442283
| 0.442283
| 0.352789
| 0.352789
| 0.352789
| 0.303502
| 0
| 0.037255
| 0.388856
| 1,669
| 45
| 68
| 37.088889
| 0.718627
| 0.081486
| 0
| 0.375
| 0
| 0
| 0.001339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab453c7b64fdd47b4cf51bb569f233871fe2b337
| 4,118
|
py
|
Python
|
harbor/tests/test_unit.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 663
|
2016-08-23T05:23:45.000Z
|
2022-03-29T00:37:23.000Z
|
harbor/tests/test_unit.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 6,642
|
2016-06-09T16:29:20.000Z
|
2022-03-31T22:24:09.000Z
|
harbor/tests/test_unit.py
|
tdimnet/integrations-core
|
a78133a3b71a1b8377fa214d121a98647031ab06
|
[
"BSD-3-Clause"
] | 1,222
|
2017-01-27T15:51:38.000Z
|
2022-03-31T18:17:51.000Z
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from mock import MagicMock
from requests import HTTPError
from datadog_checks.base import AgentCheck
from datadog_checks.dev.http import MockResponse
from .common import HARBOR_COMPONENTS, HARBOR_VERSION, VERSION_1_5, VERSION_1_6, VERSION_1_8
@pytest.mark.usefixtures("patch_requests")
def test_check_health(aggregator, harbor_check, harbor_api):
base_tags = ['tag1:val1', 'tag2']
harbor_check._check_health(harbor_api, base_tags)
if harbor_api.harbor_version >= VERSION_1_8:
components = HARBOR_COMPONENTS
for c in components:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:{}'.format(c)])
elif harbor_api.harbor_version >= VERSION_1_6:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:chartmuseum'])
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
elif harbor_api.harbor_version >= VERSION_1_5:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
else:
aggregator.assert_service_check('harbor.status', AgentCheck.UNKNOWN, tags=base_tags)
@pytest.mark.usefixtures("patch_requests")
def test_check_registries_health(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._check_registries_health(harbor_api, tags)
tags.append('registry:demo')
aggregator.assert_service_check('harbor.registry.status', AgentCheck.OK, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_project_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_project_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.projects.count', 2, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_disk_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_disk_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.disk.free', 5e5, tags=tags)
aggregator.assert_metric('harbor.disk.total', 1e6, tags=tags)
@pytest.mark.usefixtures("patch_requests")
@pytest.mark.skipif(HARBOR_VERSION < VERSION_1_5, reason="The registry.read_only metric is submitted for Harbor 1.5+")
def test_submit_read_only_status(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_read_only_status(harbor_api, tags)
aggregator.assert_metric('harbor.registry.read_only', 0, tags=tags)
def test_api__make_get_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_get_request('{base_url}/api/path') == {"json": True}
harbor_api.http.get = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_get_request('{base_url}/api/path')
def test_api__make_paginated_get_request(harbor_api):
expected_result = [{'item': i} for i in range(20)]
paginated_result = [[expected_result[i], expected_result[i + 1]] for i in range(0, len(expected_result) - 1, 2)]
values = []
for r in paginated_result:
values.append(MockResponse(json_data=r, headers={'link': 'Link: <unused_url>; rel=next; type="text/plain"'}))
values[-1].headers.pop('link')
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(side_effect=values)
assert harbor_api._make_paginated_get_request('{base_url}/api/path') == expected_result
def test_api__make_post_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.post = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_post_request('{base_url}/api/path') == {"json": True}
harbor_api.http.post = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_post_request('{base_url}/api/path')
| 43.347368
| 120
| 0.753035
| 567
| 4,118
| 5.156966
| 0.218695
| 0.089261
| 0.035568
| 0.057456
| 0.661423
| 0.620041
| 0.573529
| 0.492134
| 0.422709
| 0.386457
| 0
| 0.015257
| 0.124575
| 4,118
| 94
| 121
| 43.808511
| 0.795839
| 0.026226
| 0
| 0.235294
| 0
| 0
| 0.143535
| 0.022217
| 0
| 0
| 0
| 0
| 0.191176
| 1
| 0.117647
| false
| 0
| 0.088235
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab47866bdd7f779d52254d019f551b3dccc349a3
| 2,649
|
py
|
Python
|
M-SPRING/template/adapter.py
|
CN-UPB/SPRING
|
1cb74919689e832987cb2c9b490eec7f09a64f52
|
[
"Apache-2.0"
] | 3
|
2019-09-27T08:07:11.000Z
|
2021-11-19T11:27:39.000Z
|
M-SPRING/template/adapter.py
|
CN-UPB/SPRING
|
1cb74919689e832987cb2c9b490eec7f09a64f52
|
[
"Apache-2.0"
] | null | null | null |
M-SPRING/template/adapter.py
|
CN-UPB/SPRING
|
1cb74919689e832987cb2c9b490eec7f09a64f52
|
[
"Apache-2.0"
] | null | null | null |
# module for adapting templates on the fly if components are reused
# check that all reused components are defined consistently -> else: exception
def check_consistency(components):
for j1 in components:
for j2 in components: # compare all components
if j1 == j2 and j1.__dict__ != j2.__dict__: # same name and reuseID but different other attributes
raise ValueError("Inconsistent definition of reused component {}.".format(j1))
# check and return number of reuses
def reuses(component, arcs):
# count number of reuses for each port
times = set() # set => no duplicates
for k in range(component.inputs):
times.add(len([a for a in arcs if a.ends_in(k, component)]))
for k in range(component.outputs):
times.add(len([a for a in arcs if a.starts_at(k, component)]))
# check if each port was reused the same number of times (requirement/assumption)
if len(times) != 1:
raise ValueError("Not all ports of {} are (re-)used the same number of times (required).".format(component))
return times.pop()
# return adapted templates with adapted reused components and exactly one arc per port (allows proportional output)
def adapt_for_reuse(templates):
# create set of components and arcs
arcs = []
for t in templates:
arcs += t.arcs
# find reused components and adapt them
component_reuses = {} # dictionary with components-#reuses
reused_components = [] # list of all reused components (contains duplicates) for consistency check
for t in templates:
for j in t.components:
uses = reuses(j, arcs)
if uses > 1: # used by >1 => reuse
if j.source:
raise ValueError("Source component {} cannot be reused".format(j))
j.adapt(uses) # add ports and functions on the fly
component_reuses[j] = uses
reused_components.append(j)
check_consistency(reused_components) # check consistent def of reused components
# adjust arcs to use new ports
for j in component_reuses:
uses = component_reuses[j]
port_offset = 0
for t in templates:
# adjust/shift ingoing arcs by offset to correct port
arc_shifted = False
for a in t.arcs:
if a.dest == j:
a.dest_in += port_offset
arc_shifted = True
if a.source == j:
a.src_out += port_offset
arc_shifted = True
# increase the offset for the next template if an arc was shifted
if arc_shifted:
if port_offset >= uses: # arc was shifted too often: something went wrong
raise ValueError("Port offset {} too high. Should be < {} (#reuses).".format(port_offset, uses))
port_offset += 1
return templates
| 36.791667
| 116
| 0.690449
| 387
| 2,649
| 4.640827
| 0.320413
| 0.071269
| 0.010022
| 0.025056
| 0.099109
| 0.02784
| 0.02784
| 0.02784
| 0.02784
| 0.02784
| 0
| 0.005865
| 0.227633
| 2,649
| 71
| 117
| 37.309859
| 0.871945
| 0.367686
| 0
| 0.106383
| 0
| 0
| 0.128319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0
| 0
| 0.106383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4821f304dd37c05424c887f3d045a73e2c92fa
| 4,369
|
py
|
Python
|
column_completer.py
|
AllanLRH/column_completer
|
c1a0e1915256a4e3825c5c3b9863d78fdaf50be1
|
[
"Unlicense"
] | null | null | null |
column_completer.py
|
AllanLRH/column_completer
|
c1a0e1915256a4e3825c5c3b9863d78fdaf50be1
|
[
"Unlicense"
] | null | null | null |
column_completer.py
|
AllanLRH/column_completer
|
c1a0e1915256a4e3825c5c3b9863d78fdaf50be1
|
[
"Unlicense"
] | null | null | null |
class ColumnCompleter(object):
"""Complete Pandas DataFrame column names"""
def __init__(self, df, space_filler='_', silence_warnings=False):
"""
Once instantiated with a Pandas DataFrame, it will expose the column
names as attributes which maps to their string counterparts.
Autocompletion is supported.
Spaces in the column names are by default replaced with underscores, though
it still maps to the original column names — the replacement is necessary to
conform to a valid Python syntax.
Parameters
----------
df : pd.DataFrame
DataFrame whose column names to expose.
space_filler : str, optional
String to replace spaces in collumn names, by default '_'.
silence_warnings : bool, optional
Set to True to disable warning concerning column names which start or ends
with spaces, which is hard to detect by visual inspection, by default False.
"""
super(ColumnCompleter, self).__init__()
# We copy the columns to avoid keeping old references to a DataFrame which
# would otherwise be garbage collected.
self.columns = df.columns.copy()
self.space_filler = space_filler
self.silence_warnings = silence_warnings
if not self.silence_warnings:
self._warn_about_column_names_edge_spaces()
self._set_columns()
def _warn_about_column_names_edge_spaces(self):
if not hasattr(self.columns, 'str'): # the column names are not strings
return None
if self.columns.str.startswith(' ').any():
raise Warning("The following columns starts with one or more spaces: " +
self.columns[self.columns.str.startswith(' ')])
if self.columns.str.endswith(' ').any():
raise Warning("The following columns ends with one or more spaces: " +
self.columns[self.columns.str.endswith(' ')])
def _set_columns(self):
if not hasattr(self.columns, 'str'): # the column names are not strings
self.mapping = {col: col for col in self.columns}
elif self.space_filler is None:
self.mapping = {col: col for col in self.columns if ' ' not in col}
else:
self.mapping = {col.replace(
' ', self.space_filler): col for col in self.columns}
if len(self.mapping) < len(self.columns):
raise ValueError("Using {} as a replacemnt for".format(repr(self.space_filler)) +
" spaces causes a collision of column names, please chose another.")
self.keys = self.mapping.keys()
if len(self.keys) < len(self.columns) and not self.silence_warnings:
raise Warning("Without a space_filler specified, you're only able to autocomplete " +
"{} of {} column names.".format(len(self.keys), len(self.columns)))
@staticmethod
def replace_df_column_spaces(df, rep, capatilize_first_letter=False):
"""
Return a DataFrame with the spaces in the column names replaced with a custom string.
Parameters
----------
df : pd.DataFrame
DataFrame whose columns ot rename.
rep : str
String to replace spaces with.
capatilize_first_letter : bool, optional
If True, the first letter of the rennamed columns will be capitalized, by default False.
Returns
-------
pd.DataFrame
DataFrame with renamed columns.
Raises
------
ValueError
If the renaming of the columns causes one or more column names to be identical.
"""
rename_dict = {col: col.replace(' ', rep) for col in df.columns}
if len(set(rename_dict.values())) < len(df.columns.unique()):
raise ValueError("Renaming the columns in such a way would cause a " +
"collision of column names.")
if capatilize_first_letter:
rename_dict = {k: v[0].upper() + v[1:]
for k, v in rename_dict.items()}
return df.rename(columns=rename_dict)
def __dir__(self):
return self.keys
def __getattr__(self, key):
return self.mapping[key]
| 44.581633
| 101
| 0.610895
| 535
| 4,369
| 4.874766
| 0.297196
| 0.063267
| 0.032209
| 0.019555
| 0.247699
| 0.21319
| 0.139571
| 0.105061
| 0.105061
| 0.077454
| 0
| 0.000658
| 0.304646
| 4,369
| 97
| 102
| 45.041237
| 0.857472
| 0.334173
| 0
| 0.042553
| 0
| 0
| 0.144223
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12766
| false
| 0
| 0
| 0.042553
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4824ab4c800c1d309f147567a8700135e66f6b
| 1,483
|
py
|
Python
|
source/vsm-dashboard/vsm_dashboard/test/test_data/swift_data.py
|
ramkrsna/virtual-storage-manager
|
78125bfb4dd4d78ff96bc3274c8919003769c545
|
[
"Apache-2.0"
] | 172
|
2015-01-07T08:40:17.000Z
|
2019-02-18T07:01:11.000Z
|
source/vsm-dashboard/vsm_dashboard/test/test_data/swift_data.py
|
ramkrsna/virtual-storage-manager
|
78125bfb4dd4d78ff96bc3274c8919003769c545
|
[
"Apache-2.0"
] | 83
|
2015-03-06T07:47:03.000Z
|
2018-07-05T15:10:19.000Z
|
source/vsm-dashboard/vsm_dashboard/test/test_data/swift_data.py
|
ramkrsna/virtual-storage-manager
|
78125bfb4dd4d78ff96bc3274c8919003769c545
|
[
"Apache-2.0"
] | 125
|
2015-01-05T12:22:15.000Z
|
2019-02-18T07:01:39.000Z
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vsm_dashboard.api import swift
from .utils import TestDataContainer
def data(TEST):
TEST.containers = TestDataContainer()
TEST.objects = TestDataContainer()
container_1 = swift.Container(dict(name=u"container_one\u6346"))
container_2 = swift.Container(dict(name=u"container_two\u6346"))
TEST.containers.add(container_1, container_2)
object_dict = {"name": u"test_object\u6346",
"content_type": u"text/plain",
"bytes": 128,
"last_modified": None,
"hash": u"object_hash"}
obj_dicts = [object_dict]
obj_data = "Fake Data"
for obj_dict in obj_dicts:
swift_object = swift.StorageObject(obj_dict,
container_1.name,
data=obj_data)
TEST.objects.add(swift_object)
| 38.025641
| 78
| 0.645988
| 188
| 1,483
| 4.978723
| 0.531915
| 0.064103
| 0.028846
| 0.034188
| 0.068376
| 0.068376
| 0
| 0
| 0
| 0
| 0
| 0.02583
| 0.269049
| 1,483
| 38
| 79
| 39.026316
| 0.837638
| 0.389076
| 0
| 0
| 0
| 0
| 0.137892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab48693089ba2d51e690249090e8808f1456a30c
| 17,971
|
py
|
Python
|
cinder/backup/driver.py
|
liangintel/stx-cinder
|
f4c43797a3f8c0caebfd8fb67244c084d26d9741
|
[
"Apache-2.0"
] | null | null | null |
cinder/backup/driver.py
|
liangintel/stx-cinder
|
f4c43797a3f8c0caebfd8fb67244c084d26d9741
|
[
"Apache-2.0"
] | 2
|
2018-10-25T13:04:01.000Z
|
2019-08-17T13:15:24.000Z
|
cinder/backup/driver.py
|
liangintel/stx-cinder
|
f4c43797a3f8c0caebfd8fb67244c084d26d9741
|
[
"Apache-2.0"
] | 2
|
2018-10-17T13:32:50.000Z
|
2018-11-08T08:39:39.000Z
|
# Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import keymgr as key_manager
service_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context, db=None):
super(BackupMetadataAPI, self).__init__(db)
self.context = context
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info("Value with type=%s is not serializable",
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", key)
continue
# Copy the encryption key UUID for backup
if key is 'encryption_key_id' and value is not None:
km = key_manager.API(CONF)
value = km.store(self.context, km.get(self.context, value))
LOG.debug("Copying encryption key UUID for backup.")
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info("Unable to serialize field '%s' - "
"excluding from backup", entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields, excludes=None):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
:param metadata: master set of metadata
:param fields: list of fields we want to extract
:param excludes: fields to be excluded
:returns: filtered metadata
"""
if not fields:
return metadata
if not excludes:
excludes = []
subset = {}
for field in fields:
if field in metadata and field not in excludes:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
excludes = []
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
# NOTE(dosaboy): if the target volume looks like it was auto-created
# as part of this restore operation and we have a name to restore
# then apply the name to the target volume. However, if that target
# volume already existed and it has a name or we do not have a name to
# restore, then ignore this key. This is intended to be a less drastic
# solution than commit 7ee80f7.
key = 'display_name'
if key in fields and key in metadata:
target_vol = self.db.volume_get(self.context, volume_id)
name = target_vol.get(key, '')
if (not metadata.get(key) or name and
not name.startswith('restore_backup_')):
excludes.append(key)
excludes.append('display_description')
metadata = self._filter(metadata, fields, excludes=excludes)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed.")
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description', 'encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
class BackupDriver(base.Base):
def __init__(self, context, db=None):
super(BackupDriver, self).__init__(db)
self.context = context
self.backup_meta_api = BackupMetadataAPI(context, db)
# This flag indicates if backup driver supports force
# deletion. So it should be set to True if the driver that inherits
# from BackupDriver supports the force deletion function.
self.support_force_delete = False
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file):
"""Restore a saved backup.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def delete_backup(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
and return it here as a dictionary so it can be serialized into a
string.
Default backup driver implementation has no extra information.
:param backup: backup object to export
:returns: driver_info - dictionary with extra information
"""
return {}
def import_record(self, backup, driver_info):
"""Import driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
since it will be called with the extra information that was provided by
export_record when exporting the backup.
Default backup driver implementation does nothing since it didn't
export any specific data in export_record.
:param backup: backup object to export
:param driver_info: dictionary with driver specific backup record
information
:returns: nothing
"""
return
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed."""
return
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):
@abc.abstractmethod
def verify(self, backup):
"""Verify that the backup exists on the backend.
Verify that the backup is OK, possibly following an import record
operation.
:param backup: backup id of the backup to verify
:raises InvalidBackup, NotImplementedError:
"""
return
| 40.566591
| 79
| 0.615492
| 2,156
| 17,971
| 4.974954
| 0.179035
| 0.024613
| 0.016782
| 0.019485
| 0.434179
| 0.399217
| 0.36845
| 0.330785
| 0.306452
| 0.269812
| 0
| 0.002269
| 0.313394
| 17,971
| 442
| 80
| 40.658371
| 0.867007
| 0.313338
| 0
| 0.292887
| 0
| 0
| 0.16246
| 0.008872
| 0
| 0
| 0
| 0
| 0
| 1
| 0.100418
| false
| 0
| 0.041841
| 0.004184
| 0.23431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4a651d98707257763d7fecd97ef8404192f74c
| 1,146
|
py
|
Python
|
code/reasoningtool/tests/QuerySciGraphTests.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 31
|
2018-03-05T20:01:10.000Z
|
2022-02-01T03:31:22.000Z
|
code/reasoningtool/tests/QuerySciGraphTests.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 1,774
|
2018-03-06T01:55:03.000Z
|
2022-03-31T03:09:04.000Z
|
code/reasoningtool/tests/QuerySciGraphTests.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 19
|
2018-05-10T00:43:19.000Z
|
2022-03-08T19:26:16.000Z
|
import unittest
from QuerySciGraph import QuerySciGraph
class QuerySciGraphTestCase(unittest.TestCase):
def test_get_disont_ids_for_mesh_id(self):
disont_ids = QuerySciGraph.get_disont_ids_for_mesh_id('MESH:D005199')
known_ids = {'DOID:13636'}
self.assertSetEqual(disont_ids, known_ids)
def test_query_sub_phenotypes_for_phenotype(self):
sub_phenotypes = QuerySciGraph.query_sub_phenotypes_for_phenotype("HP:0000107") # Renal cyst
known_phenotypes = {'HP:0100877': 'Renal diverticulum',
'HP:0000108': 'Renal corticomedullary cysts',
'HP:0000803': 'Renal cortical cysts',
'HP:0000003': 'Multicystic kidney dysplasia',
'HP:0008659': 'Multiple small medullary renal cysts',
'HP:0005562': 'Multiple renal cysts',
'HP:0000800': 'Cystic renal dysplasia',
'HP:0012581': 'Solitary renal cyst'}
self.assertDictEqual(sub_phenotypes, known_phenotypes)
if __name__ == '__main__':
unittest.main()
| 44.076923
| 101
| 0.620419
| 114
| 1,146
| 5.921053
| 0.464912
| 0.053333
| 0.035556
| 0.044444
| 0.151111
| 0.062222
| 0
| 0
| 0
| 0
| 0
| 0.090686
| 0.287958
| 1,146
| 25
| 102
| 45.84
| 0.73652
| 0.008726
| 0
| 0
| 0
| 0
| 0.27425
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4c81650d8bacd66796cfc5a7b9384015825cae
| 1,342
|
py
|
Python
|
ledis/cli.py
|
gianghta/Ledis
|
a6b31617621746344408ee411cf510ef3cfb2e7b
|
[
"MIT"
] | null | null | null |
ledis/cli.py
|
gianghta/Ledis
|
a6b31617621746344408ee411cf510ef3cfb2e7b
|
[
"MIT"
] | null | null | null |
ledis/cli.py
|
gianghta/Ledis
|
a6b31617621746344408ee411cf510ef3cfb2e7b
|
[
"MIT"
] | null | null | null |
from typing import Any
from ledis import Ledis
from ledis.exceptions import InvalidUsage
class CLI:
__slots__ = {"ledis", "commands"}
def __init__(self):
self.ledis = Ledis()
self.commands = {
"set": self.ledis.set,
"get": self.ledis.get,
"sadd": self.ledis.sadd,
"srem": self.ledis.srem,
"smembers": self.ledis.smembers,
"sinter": self.ledis.sinter,
"keys": self.ledis.keys,
"del": self.ledis.delete,
"expire": self.ledis.expire,
"ttl": self.ledis.ttl,
"save": self.ledis.save,
"restore": self.ledis.restore,
}
def call(self, query: str) -> Any:
if " " in query:
command, data = query.split(" ", 1)
data = data.split()
else:
command = query
data = []
if command.lower() not in self.commands:
allowed_commands = ", ".join(key.upper() for key in self.commands)
raise InvalidUsage(
f"Command '{command}' is invalid. "
f"Allowed commands are {allowed_commands}."
)
try:
return self.commands[command.lower()](*data)
except TypeError:
raise InvalidUsage("Invalid command format")
| 29.173913
| 78
| 0.520119
| 139
| 1,342
| 4.94964
| 0.388489
| 0.170058
| 0.040698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001159
| 0.35693
| 1,342
| 45
| 79
| 29.822222
| 0.79606
| 0
| 0
| 0
| 0
| 0
| 0.123696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.078947
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4cd7dafdbec4a4f671b37357e68833614883fc
| 866
|
py
|
Python
|
ClosedLoopTF.py
|
nazhanshaberi/miniature-octo-barnacle
|
eb1a8b5366003bf2d0f7e89af9d9dea120965f4f
|
[
"MIT"
] | null | null | null |
ClosedLoopTF.py
|
nazhanshaberi/miniature-octo-barnacle
|
eb1a8b5366003bf2d0f7e89af9d9dea120965f4f
|
[
"MIT"
] | null | null | null |
ClosedLoopTF.py
|
nazhanshaberi/miniature-octo-barnacle
|
eb1a8b5366003bf2d0f7e89af9d9dea120965f4f
|
[
"MIT"
] | null | null | null |
#group 1: Question 1(b)
# A control system for positioning the head of a laser printer has the closed loop transfer function:
# !pip install control
import matplotlib.pyplot as plt
import control
a=10 #Value for a
b=50 #value for b
sys1 = control.tf(20*b,[1,20+a,b+20*a,20*b])
print('3rd order system transfer function T1(s)=',sys1)
sys2=control.tf(b,[1,a,b])
print('2nd order system transfer funtion T2(s)',sys2)
value = sys1.pole()
list_of_poles = [pole.round(2) for pole in value]
print('poles',list_of_poles)
y1=control.step_response(sys1)
y2=control.step_response(sys2)
plt.plot(y1[0],y1[1],'r--', label='3rd order actual system')
plt.plot(y2[0],y2[1],'g', label='2nd order approximation system')
plt.legend()
plt.grid()
plt.xlabel('time (s)')
plt.ylabel('step response y(t)')
plt.title('step response comparison of 3rd and 2nd order system')
plt.show()
| 29.862069
| 101
| 0.725173
| 159
| 866
| 3.91195
| 0.433962
| 0.07717
| 0.061093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055046
| 0.118938
| 866
| 28
| 102
| 30.928571
| 0.760157
| 0.191686
| 0
| 0
| 0
| 0
| 0.316547
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4de19d0181da877a10411de0bdd3a02265b4f5
| 1,567
|
py
|
Python
|
tests/test_vmtkScripts/test_vmtksurfacescaling.py
|
ramtingh/vmtk
|
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_vmtkScripts/test_vmtksurfacescaling.py
|
ramtingh/vmtk
|
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_vmtkScripts/test_vmtksurfacescaling.py
|
ramtingh/vmtk
|
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
|
[
"Apache-2.0"
] | 1
|
2019-06-18T23:41:11.000Z
|
2019-06-18T23:41:11.000Z
|
## Program: VMTK
## Language: Python
## Date: January 10, 2018
## Version: 1.4
## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## Richard Izzo (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfacescaling as scaling
def test_isotropic_scale(aorta_surface, compare_surfaces):
name = __name__ + '_test_isotropic_scale.vtp'
scaler = scaling.vmtkSurfaceScaling()
scaler.Surface = aorta_surface
scaler.ScaleFactor = 2
scaler.Execute()
assert compare_surfaces(scaler.Surface, name, tolerance=1.0) == True
@pytest.mark.parametrize('xfactor,yfactor,zfactor,paramid', [
(2, None, None, '0'),
(None, 2, None, '1'),
(None, None, 2, '2'),
(2, 2, None, '3'),
(2, None, 2, '4'),
(None, 2, 2, '5'),
])
def test_xyz_scale_factors(aorta_surface, compare_surfaces, xfactor,
yfactor, zfactor, paramid):
name = __name__ + '_test_xyz_scale_factors_' + paramid + '.vtp'
scaler = scaling.vmtkSurfaceScaling()
scaler.Surface = aorta_surface
scaler.ScaleFactorX = xfactor
scaler.ScaleFactorY = yfactor
scaler.ScaleFactorZ = zfactor
scaler.Execute()
assert compare_surfaces(scaler.Surface, name, tolerance=1.0) == True
| 31.34
| 75
| 0.678366
| 189
| 1,567
| 5.470899
| 0.492063
| 0.046422
| 0.034816
| 0.052224
| 0.253385
| 0.253385
| 0.253385
| 0.253385
| 0.253385
| 0.12766
| 0
| 0.022617
| 0.209955
| 1,567
| 49
| 76
| 31.979592
| 0.812601
| 0.306318
| 0
| 0.296296
| 0
| 0
| 0.084746
| 0.07533
| 0
| 0
| 0
| 0
| 0.074074
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4e7cdf2bacce34a3021f862bd5b5457c0c010e
| 3,677
|
py
|
Python
|
mlcsim/dist.py
|
nobodywasishere/MLCSim
|
a3eb3d39b6970a4e706e292c6a283531fb44350c
|
[
"MIT"
] | null | null | null |
mlcsim/dist.py
|
nobodywasishere/MLCSim
|
a3eb3d39b6970a4e706e292c6a283531fb44350c
|
[
"MIT"
] | null | null | null |
mlcsim/dist.py
|
nobodywasishere/MLCSim
|
a3eb3d39b6970a4e706e292c6a283531fb44350c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Distribution functions
This module provides functions for dealing with normal distributions
and generating error maps.
When called directly as main, it allows for converting a threshold map
into an error map.
```
$ python -m mlcsim.dist --help
usage: dist.py [-h] [-b {1,2,3,4}] -f F [-o O]
options:
-h, --help show this help message and exit
-b {1,2,3,4} bits per cell
-f F Threshold map json to convert
-o O output to file
```
"""
import argparse
import json
from pprint import pprint
from typing import Dict, List
import numpy as np
from scipy import stats as ss # type: ignore
# https://stackoverflow.com/a/32574638/9047818
# https://stackoverflow.com/a/13072714/9047818
def normalMidpoint(mean_a: float, mean_b: float, std_a: float, std_b: float) -> float:
"""Find the midpoint between two normal distributions
Args:
mean_a (float): Mean of first distribution
mean_b (float): Mean of second distribution
std_a (float): Std dev of first distribution
std_b (float): Std dev of second distribution
Returns:
float: Midpoint between distributions
"""
a = 1 / (2 * std_a**2) - 1 / (2 * std_b**2)
b = mean_b / (std_b**2) - mean_a / (std_a**2)
c = (
mean_a**2 / (2 * std_a**2)
- mean_b**2 / (2 * std_b**2)
- np.log(std_b / std_a)
)
roots = np.roots([a, b, c])
masked = np.ma.masked_outside(roots, mean_a, mean_b)
return float(masked[~masked.mask][0][0])
# https://www.askpython.com/python/normal-distribution
def normalChance(mean: float, stdev: float, thr: float) -> float:
"""Find the chance of a normal distribution above/below a given value
Args:
mean (float): Mean of the distribution
stdev (float): Std dev of the distribution
thr (float): Threshold to check above/below
Returns:
float: Chance for threshold to end up above/below the given point in the distribution
"""
chance = ss.norm(loc=mean, scale=stdev).cdf(thr)
return float(chance if mean > thr else 1 - chance)
def genErrorMap(thr_maps: Dict[str, List[List[float]]], bpc: int) -> List[List[float]]:
"""Generate an error map from a threshold map
Args:
thr_maps (dict): Threshold map
bpc (int): Bits per cell
Raises:
ValueError: if the given bpc is not in the threshold map
Returns:
list: Error map from the threshold map
"""
if str(bpc) not in thr_maps.keys():
raise ValueError(f"Threshold map does not have values for {bpc} levels")
thr_map: List[List[float]] = thr_maps[str(bpc)]
err_map = [[0.0]]
for i in range(len(thr_map) - 1):
mid = normalMidpoint(
thr_map[i][0], thr_map[i + 1][0], thr_map[i][1], thr_map[i + 1][1]
)
up = normalChance(thr_map[i][0], thr_map[i][1], mid)
dn = normalChance(thr_map[i + 1][0], thr_map[i + 1][1], mid)
err_map[i].append(up)
err_map.append([dn])
err_map[-1].append(0.0)
return err_map
def _main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-b", type=int, default=2, choices=range(1, 5), help="bits per cell"
)
parser.add_argument("-f", required=True, help="Threshold map json to convert")
parser.add_argument("-o", type=str, help="output to file")
args = parser.parse_args()
with open(args.f) as f:
thr_map = json.load(f)
err_map = genErrorMap(thr_map, args.b)
if args.o:
with open(args.o, "w") as f:
json.dump(err_map, f)
else:
pprint(err_map)
if __name__ == "__main__":
_main()
| 27.856061
| 93
| 0.627142
| 570
| 3,677
| 3.938596
| 0.280702
| 0.032071
| 0.024944
| 0.021381
| 0.057016
| 0.025835
| 0.025835
| 0.025835
| 0.015145
| 0
| 0
| 0.027417
| 0.246125
| 3,677
| 131
| 94
| 28.068702
| 0.782468
| 0.406038
| 0
| 0
| 0
| 0
| 0.059137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.113208
| 0
| 0.245283
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4ecc2d3d04743c00cc721399cf77a91c741662
| 2,063
|
py
|
Python
|
Pr-Lab5/lab5.py
|
JackShen1/pr-labs
|
c84df379d8f7b26ccff30248dfb23ae38e0ce7c2
|
[
"MIT"
] | 2
|
2021-02-25T11:42:06.000Z
|
2021-03-08T20:43:44.000Z
|
Pr-Lab5/lab5.py
|
JackShen1/pr-labs
|
c84df379d8f7b26ccff30248dfb23ae38e0ce7c2
|
[
"MIT"
] | null | null | null |
Pr-Lab5/lab5.py
|
JackShen1/pr-labs
|
c84df379d8f7b26ccff30248dfb23ae38e0ce7c2
|
[
"MIT"
] | null | null | null |
earth = {
"Asia":
{'Japan': ("Tokyo", 377975, 125620000)},
"Europe":
{'Austria': ("Vienna", 83800, 8404000),
'Germany': ("Berlin", 357000, 81751000),
'Great Britain': ("London", 244800, 62700000),
'Iceland': ("Reykjavík", 103000, 317630),
'Italy': ("Rome", 301400, 60605000),
'Spain': ("Madrid", 506000, 46162000),
'Ukraine': ("Kyiv", 603700, 45562000)}
}
class Earth:
def __init__(self, continent):
self.dictionary = earth
self.continent = continent
def continent_out(self, a):
print(
" Country " + " " * 20 + " Capital " + " " * 15 + " Area (km²) " + " " * 7 + " Population " + "\n" +
"-----------" + " " * 20 + "-----------" + " " * 15 + "-------------------" + " " * 7 + "--------------")
for x in self.dictionary.get(a.title()):
print("{:30}".format(x),
"{:<30}{:<25}{:<25}".format(self.dictionary.get(a.title())[x][0],
str(self.dictionary.get(a.title())[x][1]) + " km²",
str(self.dictionary.get(a.title())[x][2])))
def country_out(self, a):
a.insert(0, ('Continent', ('Capital', 'Area (km²)', 'Population')))
b = []
for i in a:
b.extend((i[0], i[1][0], str(i[1][1]), str(i[1][2])))
return ("{:<20}{:<20}{:<25}{:<25}\n" * len(a)).format(*b)
def print_continent(self):
return self.continent_out(self.continent)
def print_country(self, a):
for i in self.dictionary.keys():
continent = i
country_describe = self.dictionary.get(continent).get(a.title())
if country_describe is None: continue
return self.country_out([(continent, country_describe)])
input_str = input("Enter the name of the continent or country: ")
if input_str.title() in earth.keys():
Earth(input_str).print_continent()
else:
print(Earth(continent=None).print_country(input_str))
| 38.203704
| 125
| 0.49491
| 226
| 2,063
| 4.433628
| 0.376106
| 0.097804
| 0.08483
| 0.071856
| 0.100798
| 0.077844
| 0.053892
| 0
| 0
| 0
| 0
| 0.101985
| 0.291808
| 2,063
| 53
| 126
| 38.924528
| 0.583847
| 0
| 0
| 0
| 0
| 0
| 0.175957
| 0.012603
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0
| 0.022727
| 0.204545
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4f57f26ec8e3a5f4f9c3add8fa33115729abc3
| 3,956
|
py
|
Python
|
endpoints/api/test/test_tag.py
|
kwestpharedhat/quay
|
a0df895005bcd3e53847046f69f6a7add87c88fd
|
[
"Apache-2.0"
] | null | null | null |
endpoints/api/test/test_tag.py
|
kwestpharedhat/quay
|
a0df895005bcd3e53847046f69f6a7add87c88fd
|
[
"Apache-2.0"
] | null | null | null |
endpoints/api/test/test_tag.py
|
kwestpharedhat/quay
|
a0df895005bcd3e53847046f69f6a7add87c88fd
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from playhouse.test_utils import assert_query_count
from data.registry_model import registry_model
from data.database import Manifest
from endpoints.api.test.shared import conduct_api_call
from endpoints.test.shared import client_with_identity
from endpoints.api.tag import RepositoryTag, RestoreTag, ListRepositoryTags
from test.fixtures import *
@pytest.mark.parametrize(
"expiration_time, expected_status",
[
(None, 201),
("aksdjhasd", 400),
],
)
def test_change_tag_expiration_default(expiration_time, expected_status, client, app):
with client_with_identity("devtable", client) as cl:
params = {
"repository": "devtable/simple",
"tag": "latest",
}
request_body = {
"expiration": expiration_time,
}
conduct_api_call(cl, RepositoryTag, "put", params, request_body, expected_status)
def test_change_tag_expiration(client, app):
with client_with_identity("devtable", client) as cl:
params = {
"repository": "devtable/simple",
"tag": "latest",
}
repo_ref = registry_model.lookup_repository("devtable", "simple")
tag = registry_model.get_repo_tag(repo_ref, "latest")
updated_expiration = tag.lifetime_start_ts + 60 * 60 * 24
request_body = {
"expiration": updated_expiration,
}
conduct_api_call(cl, RepositoryTag, "put", params, request_body, 201)
tag = registry_model.get_repo_tag(repo_ref, "latest")
assert tag.lifetime_end_ts == updated_expiration
@pytest.mark.parametrize(
"manifest_exists,test_tag,expected_status",
[
(True, "-INVALID-TAG-NAME", 400),
(True, ".INVALID-TAG-NAME", 400),
(
True,
"INVALID-TAG_NAME-BECAUSE-THIS-IS-WAY-WAY-TOO-LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG",
400,
),
(False, "newtag", 404),
(True, "generatemanifestfail", None),
(True, "latest", 201),
(True, "newtag", 201),
],
)
def test_move_tag(manifest_exists, test_tag, expected_status, client, app):
with client_with_identity("devtable", client) as cl:
test_image = "unknown"
if manifest_exists:
repo_ref = registry_model.lookup_repository("devtable", "simple")
tag_ref = registry_model.get_repo_tag(repo_ref, "latest")
assert tag_ref
test_image = tag_ref.manifest.digest
params = {"repository": "devtable/simple", "tag": test_tag}
request_body = {"manifest_digest": test_image}
if expected_status is None:
with pytest.raises(Exception):
conduct_api_call(cl, RepositoryTag, "put", params, request_body, expected_status)
else:
conduct_api_call(cl, RepositoryTag, "put", params, request_body, expected_status)
@pytest.mark.parametrize(
"repo_namespace, repo_name, query_count",
[
("devtable", "simple", 4),
("devtable", "history", 4),
("devtable", "complex", 4),
("devtable", "gargantuan", 4),
("buynlarge", "orgrepo", 6), # +2 for permissions checks.
("buynlarge", "anotherorgrepo", 6), # +2 for permissions checks.
],
)
def test_list_repo_tags(repo_namespace, repo_name, client, query_count, app):
# Pre-cache media type loads to ensure consistent query count.
Manifest.media_type.get_name(1)
params = {"repository": repo_namespace + "/" + repo_name}
with client_with_identity("devtable", client) as cl:
with assert_query_count(query_count):
tags = conduct_api_call(cl, ListRepositoryTags, "get", params).json["tags"]
repo_ref = registry_model.lookup_repository(repo_namespace, repo_name)
history, _ = registry_model.list_repository_tag_history(repo_ref)
assert len(tags) == len(history)
| 34.4
| 146
| 0.654954
| 441
| 3,956
| 5.603175
| 0.249433
| 0.047349
| 0.033994
| 0.054634
| 0.444759
| 0.37151
| 0.334278
| 0.334278
| 0.31809
| 0.215297
| 0
| 0.013834
| 0.232558
| 3,956
| 114
| 147
| 34.701754
| 0.800066
| 0.028817
| 0
| 0.274725
| 0
| 0
| 0.17431
| 0.044554
| 0
| 0
| 0
| 0
| 0.054945
| 1
| 0.043956
| false
| 0
| 0.087912
| 0
| 0.131868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4f80a6a89a2ba8ed869ff8442a2bb12f645322
| 8,541
|
py
|
Python
|
inventory.py
|
Jongerr/vendor_receiving
|
f69f09a5b41d38b45e9ea0bf82590bb27ce913f6
|
[
"MIT"
] | null | null | null |
inventory.py
|
Jongerr/vendor_receiving
|
f69f09a5b41d38b45e9ea0bf82590bb27ce913f6
|
[
"MIT"
] | null | null | null |
inventory.py
|
Jongerr/vendor_receiving
|
f69f09a5b41d38b45e9ea0bf82590bb27ce913f6
|
[
"MIT"
] | null | null | null |
import json
import os
import random
import requests
from passlib.hash import pbkdf2_sha256 as pbk
from PyQt5.QtSql import QSqlDatabase, QSqlQuery
from pprint import pprint
ENCODING = 'utf-8'
DB_PATH = os.path.join(os.path.curdir, 'inventory.db')
def scrambleWord(word):
"""Randomize the letters in word and return the resulting string."""
word_list = list(word)
random.shuffle(word_list)
word = ''.join(word_list)
return word
def generateItems():
"""Generate a dictionary of retail products and store the data in items.json.
Pulls a list of items and artificially doubles it with scrambled item names.
Each item is given a random PLU, UPC, and department number.
Each dictionary key is the item's PLU.
"""
response = requests.get('https://www.randomlists.com/data/things.json')
json_data = response.json()
items = json_data['RandL']['items']
#double sample size by scrambling item names
scrambled_list = []
for item in items:
scrambled_item = scrambleWord(item)
scrambled_list.append(scrambled_item)
items = items + scrambled_list
data = {}
for item in items:
random.seed(item)
upc = random.randint(100000000000, 999999999999)
plu = random.randint(1000, 9999999)
department = (plu % 7) + 1
print('UPC:{0} | PLU:{1} | Item:{2} | D{3}'.format(upc, plu, item, department))
if plu in data:
print('Duplicate found: {}'.format(plu))
continue
data[plu] = {'upc':upc, 'department':department, 'model':item}
with open('items.json', 'w') as f:
json.dump(data, f)
def generatePO():
"""Create dumby Purchase Orders and store them in pos.json.
Each PO is asigned one random vendor and department number,
along with a random length list of items belonging to said department.
Returns: True if items.json successfully opens, False otherwise.
"""
try:
with open('items.json', 'r') as f:
items_dict = json.load(f)
except FileNotFoundError:
return False
vendors = ['Dyson', 'Ingrammicro', 'LKG', 'Inland', 'Sandisk', 'Seagate', 'Hasbro', 'Mattel',\
'Gear Head', 'Logitech', 'NTE', 'Dell', 'Microsoft', 'Right Stuff', 'Alliance', 'Energizer']
po_dict = {}
for i in range(50):
po_num = 24000000 + random.randint(1, 999999)
if po_num in po_dict:
continue
po_dict[po_num] = {'department': (po_num % 7) + 1, 'items': {}, 'vendor': random.choice(vendors)}
for key in items_dict:
match_found = False
loops = 0
while not match_found:
loops += 1
if loops > 200:
print('\n\nToo many loops.\n\n')
break
po, department = random.choice(list(po_dict.items()))
department = department['department']
print('PO department: {}'.format(department))
print('item plu: {} department: {}'.format(key, items_dict[key]['department']))
if items_dict[key]['department'] == department:
max_count = random.randint(1, 20)
po_dict[po]['items'][key] = max_count
match_found = True
with open('pos.json', 'w') as f:
json.dump(po_dict, f)
return True
def fillDB():
"""Create a database and populate two tables(named items and purchase_order).
The 'items' and 'purchase_order' tables are populated with the data from items.json
and pos.json respectively.
"""
with open('items.json') as f:
data = json.load(f)
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if query.exec_("drop table items"):
print('successfully dropped table')
else:
print('unsuccessfully dropped table')
print(query.lastError().text())
if query.exec_("create table items(plu int primary key, upc varchar(12) unique, "
"model varchar(20), department int)"):
print('success')
else:
print('failure')
print(query.lastError().text())
for key in data:
if query.exec_("insert into items values({}, '{}', '{}', {})".format(key, data[key]['upc'],
data[key]['model'], data[key]['department'])):
print("values({}, {}, {}, {}) successfully inserted.".format(key, data[key]['upc'], data[key]['model'], data[key]['department']))
else:
print("values({}, {}, {}, {}) unsuccessfully inserted.".format(key, data[key]['upc'], data[key]['model'], data[key]['department']))
print(query.lastError().text())
with open('pos.json') as f:
po_dict = json.load(f)
if query.exec_("drop table purchase_order"):
print('successfully dropped table')
else:
print('unsuccessfully dropped table')
print(query.lastError().text())
if query.exec_("create table purchase_order(po int primary key, vendor varchar(30), "
"department int, items blob)"):
print('success')
else:
print('failure')
print(query.lastError().text())
for key in po_dict:
item_string = json.dumps(po_dict[key]['items'])
item_blob = item_string.encode(ENCODING)
if query.exec_("insert into purchase_order values({}, '{}', {}, '{}')"\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_string)):
print("values({}, {}, {}, {}) successfully inserted."\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_string))
else:
print("values({}, {}, {}, {}) unsuccessfully inserted."\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_blob))
print(query.lastError().text())
def createEmployeeTable():
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if not query.exec_("drop table employee"):
print(query.lastError().text())
if not query.exec_("create table employee(id int primary key, first_name varchar(10), "\
"last_name varchar(10), posistion int, pass_hash varchar(200))"):
print(query.lastError().text())
if not query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(162973, 'Jon', 'Michie', 2, pbk.hash('Michie'))):
print(query.lastError().text())
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(131901, 'Ben', 'Terry', 3, pbk.hash('Terry')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(150697, 'Daniel', 'Silva', 2, pbk.hash('Silva')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(68412, 'James', 'Hutchetson', 2, pbk.hash('Hutchetson')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(161844, 'MacKenly', 'Gamble', 1, pbk.hash('Gamble')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(141047, 'George', 'Huston', 1, pbk.hash('Huston')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(46045, 'Arthur', 'Art', 1, pbk.hash('Art')))
def testHashVerification(name):
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if not query.exec_("select pass_hash from employee where last_name = '{}'".format(name)):
print(query.lastError().text())
elif not query.next():
print('Table values not found')
else:
pass_hash = query.value(0)
if pbk.verify(name, pass_hash):
print('It\'s a match!')
else:
print('Match not found.')
if __name__ == '__main__':
generateItems()
generatePO()
fillDB()
createEmployeeTable()
testHashVerification('Terry')
| 37.296943
| 143
| 0.583772
| 997
| 8,541
| 4.915747
| 0.248746
| 0.029382
| 0.038768
| 0.046929
| 0.350133
| 0.333809
| 0.318915
| 0.253214
| 0.238115
| 0.238115
| 0
| 0.02128
| 0.25723
| 8,541
| 228
| 144
| 37.460526
| 0.751261
| 0.093783
| 0
| 0.346591
| 0
| 0
| 0.262643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034091
| false
| 0.028409
| 0.039773
| 0
| 0.107955
| 0.210227
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab4fc96f582ec2e7dbdc6b88ad13480fe26a5ca3
| 1,749
|
py
|
Python
|
lnbits/core/views/lnurl.py
|
frennkie/lnbits
|
5fe64d324dc7ac05d1d0fc25eb5ad6a5a414ea8a
|
[
"MIT"
] | null | null | null |
lnbits/core/views/lnurl.py
|
frennkie/lnbits
|
5fe64d324dc7ac05d1d0fc25eb5ad6a5a414ea8a
|
[
"MIT"
] | null | null | null |
lnbits/core/views/lnurl.py
|
frennkie/lnbits
|
5fe64d324dc7ac05d1d0fc25eb5ad6a5a414ea8a
|
[
"MIT"
] | null | null | null |
import requests
from flask import abort, redirect, request, url_for
from lnurl import LnurlWithdrawResponse, handle as handle_lnurl
from lnurl.exceptions import LnurlException
from time import sleep
from lnbits.core import core_app
from lnbits.helpers import Status
from lnbits.settings import WALLET
from ..crud import create_account, get_user, create_wallet, create_payment
@core_app.route("/lnurlwallet")
def lnurlwallet():
memo = "LNbits LNURL funding"
try:
withdraw_res = handle_lnurl(request.args.get("lightning"), response_class=LnurlWithdrawResponse)
except LnurlException:
abort(Status.INTERNAL_SERVER_ERROR, "Could not process withdraw LNURL.")
try:
ok, checking_id, payment_request, error_message = WALLET.create_invoice(withdraw_res.max_sats, memo)
except Exception as e:
ok, error_message = False, str(e)
if not ok:
abort(Status.INTERNAL_SERVER_ERROR, error_message)
r = requests.get(
withdraw_res.callback.base,
params={**withdraw_res.callback.query_params, **{"k1": withdraw_res.k1, "pr": payment_request}},
)
if not r.ok:
abort(Status.INTERNAL_SERVER_ERROR, "Could not process withdraw LNURL.")
for i in range(10):
invoice_status = WALLET.get_invoice_status(checking_id)
sleep(i)
if not invoice_status.paid:
continue
break
user = get_user(create_account().id)
wallet = create_wallet(user_id=user.id)
create_payment(
wallet_id=wallet.id,
checking_id=checking_id,
amount=withdraw_res.max_sats * 1000,
memo=memo,
pending=invoice_status.pending,
)
return redirect(url_for("core.wallet", usr=user.id, wal=wallet.id))
| 30.155172
| 108
| 0.70669
| 227
| 1,749
| 5.237885
| 0.356828
| 0.055509
| 0.047939
| 0.063078
| 0.126156
| 0.126156
| 0.097561
| 0.097561
| 0.097561
| 0.097561
| 0
| 0.005751
| 0.204688
| 1,749
| 57
| 109
| 30.684211
| 0.849029
| 0
| 0
| 0.090909
| 0
| 0
| 0.069754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.204545
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab5224dab7764f41af318140ad4ebc3291d1cf50
| 1,507
|
py
|
Python
|
driver_training/driver_training.py
|
munishm/MLOpsPython
|
e3ee31f6a0cac645a2b3ad945b8263e07d3085e4
|
[
"MIT"
] | null | null | null |
driver_training/driver_training.py
|
munishm/MLOpsPython
|
e3ee31f6a0cac645a2b3ad945b8263e07d3085e4
|
[
"MIT"
] | null | null | null |
driver_training/driver_training.py
|
munishm/MLOpsPython
|
e3ee31f6a0cac645a2b3ad945b8263e07d3085e4
|
[
"MIT"
] | null | null | null |
# Import libraries
import argparse
from azureml.core import Run
import joblib
import json
import os
import pandas as pd
import shutil
# Import functions from train.py
from train import split_data, train_model, get_model_metrics
# Get the output folder for the model from the '--output_folder' parameter
parser = argparse.ArgumentParser()
parser.add_argument('--output_folder', type=str, dest='output_folder', default="outputs")
args = parser.parse_args()
print(args)
output_folder = args.output_folder
# Get the experiment run context
run = Run.get_context()
# load the safe driver prediction dataset
train_df = pd.read_csv('porto_seguro_safe_driver_prediction_input.csv')
# Load the parameters for training the model from the file
with open("parameters.json") as f:
pars = json.load(f)
parameters = pars["training"]
# Log each of the parameters to the run
for param_name, param_value in parameters.items():
run.log(param_name, param_value)
# Call the functions defined in this file
train_data, valid_data = split_data(train_df)
data = [train_data, valid_data]
model = train_model(data, parameters)
# Print the resulting metrics for the model
model_metrics = get_model_metrics(model, data)
print(model_metrics)
for k, v in model_metrics.items():
run.log(k, v)
# Save the trained model to the output folder
os.makedirs(output_folder, exist_ok=True)
output_path = output_folder + "/porto_seguro_safe_driver_model.pkl"
joblib.dump(value=model, filename=output_path)
run.complete()
| 27.4
| 89
| 0.780358
| 233
| 1,507
| 4.858369
| 0.369099
| 0.095406
| 0.039753
| 0.026502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134041
| 1,507
| 54
| 90
| 27.907407
| 0.867433
| 0.273391
| 0
| 0
| 0
| 0
| 0.127306
| 0.073801
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.258065
| 0
| 0.258065
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab57b93b87294fbdfc94236ad38a6b407c2435a8
| 7,463
|
py
|
Python
|
katana/utils/directory_traversal_utils.py
|
warriorframework/Katanaframework
|
9dc78df9d0c8f19ef5eaaa8690fbfa1ad885b323
|
[
"Apache-2.0"
] | 1
|
2020-09-30T11:14:14.000Z
|
2020-09-30T11:14:14.000Z
|
katana/utils/directory_traversal_utils.py
|
warriorframework/Katanaframework
|
9dc78df9d0c8f19ef5eaaa8690fbfa1ad885b323
|
[
"Apache-2.0"
] | 4
|
2020-06-06T01:55:04.000Z
|
2021-06-10T22:57:50.000Z
|
katana/utils/directory_traversal_utils.py
|
warriorframework/Katanaframework
|
9dc78df9d0c8f19ef5eaaa8690fbfa1ad885b323
|
[
"Apache-2.0"
] | 1
|
2020-09-17T08:20:09.000Z
|
2020-09-17T08:20:09.000Z
|
import glob
import os
import re
import errno
import shutil
def get_sub_dirs_and_files(path, abs_path=False):
"""
Gets the direct child sub-files and sub-folders of the given directory
Args:
path: Absolute path to the directory
abs_path: If set to True, it returns a list of absolute paths to the sub-directories and
sub-files instead of directory names only
Returns:
dict: {"folders": [list of (if abs_path is True, then path to) sub-folders],
"files": [list of (if abs_path is True, then path to) sub-files]}
"""
folders = get_sub_folders(path, abs_path=abs_path)
files = get_sub_files(path, abs_path=abs_path)
return {"folders": folders, "files": files}
def get_sub_folders(path, abs_path=False):
"""
Gets the direct child sub-folders of the given directory
Args:
path: Absolute path to the directory
abs_path: If set to True, it returns a list of absolute paths to the sub-directories
instead of directory names only
Returns:
only_folders: [list of sub-folders]
"""
folders = []
temp = glob.glob(path + os.sep + "*")
for folder in temp:
if os.path.isdir(folder) and not folder.endswith('__pycache__'):
folders.append(folder)
only_folders = [f.replace("\\", '/') for f in folders]
if not abs_path:
only_folders = [f.rpartition('/')[2] for f in only_folders]
return only_folders
def get_sub_files(path, abs_path=False):
"""
Gets the direct child sub-files of the given directory
Args:
path: Absolute path to the directory
abs_path: If set to True, it returns a list of absolute paths to the sub-files instead of
file names only
Returns:
only_files: [list of sub-files]
"""
files = glob.glob(path + os.sep + "*.*")
only_files = [f.replace("\\", '/') for f in files]
if not abs_path:
only_files = [f.rpartition('/')[2] for f in only_files]
return only_files
def get_abs_path(relative_path, base_path=None, silence_error=False):
"""
Gets the absolute path from the given relative_path and base_path
Args:
relative_path: relative path to the file/directory
base_path: absolute path from where the relative path should be traced. If not provided, the
current working directory path will be used.
silence_error: Setting this to True would not verify if the directory exists
Returns:
path: absolute path derived from relative_path and base_path
"""
if base_path is None:
base_path = os.getcwd()
path = os.path.join(base_path.strip(), relative_path.strip())
if not silence_error and not os.path.exists(path):
print("An Error Occurred: {0} does not exist".format(path))
path = None
return path
def get_parent_directory(directory_path, level=1):
"""
Gets the parent directory
Args:
directory_path: Absolute path to the file/dir who's parent needs to be returned
level: Indicates how many levels up to go to find the parent
eg: default of 1 goes one level up (to the parent directory)
level=2 would get the grandparent directory
Returns:
"""
if directory_path.endswith(os.sep):
directory_path = directory_path[:-1]
for i in range(0, level):
directory_path = os.path.dirname(directory_path)
return directory_path
def get_paths_of_subfiles(parent_dir, extension=re.compile("\..*")):
"""
This function returns a list of all the sub-files inside the given directory
Args:
parent_dir: Absolute path to the directory
extension: Regular Expression tha would match a file extension. If not provided, file paths
of all extension will be returned
Returns:
file_path: Returns a list of paths to sub-files inside the parent_dir
"""
file_paths = []
sub_files_and_folders = get_sub_dirs_and_files(parent_dir, abs_path=True)
for sub_file in sub_files_and_folders["files"]:
if extension.match(os.path.splitext(sub_file)[1]):
file_paths.append(sub_file)
for sub_folder in sub_files_and_folders["folders"]:
file_paths.extend(get_paths_of_subfiles(sub_folder, extension=extension))
return file_paths
def get_dir_from_path(path):
"""
This function is wrapper function for os.path.basename.
Args:
path: a file path [Eg: /home/user/Documents/GitHub/warriorframework]
Returns:
The base directory name: [Eg: warriorframework]
"""
return os.path.basename(path)
def get_parent_dir_path(path):
"""
This function is wrapper function for os.path.dirname(os.path.normpath(<path>)).
Args:
path: a file path [Eg: /home/user/Documents/GitHub/warriorframework]
Returns:
The parent directory path: [Eg: /home/user/Documents/GitHub]
"""
return os.path.dirname(os.path.normpath(path))
def join_path(path, *paths):
"""
This function is wrapper function for os.path.join.
Args:
path: a file path
*paths: paths to be joined to the file path above
Returns:
Joined path
"""
return os.path.join(path, *paths)
def get_relative_path(path, start_directory):
"""
This is a wrapper function for the os.path.relpath
Args:
path: Absolute path to the file/dir to which the relatove path needs to be calculated.
start_directory: The absolute path to the starting directory
Returns:
rel_path: A relative path from start_directory
"""
if start_directory == "":
print("-- Error -- start_directory is empty.")
relpath = path
else:
try:
relpath = os.path.relpath(path, start_directory)
except Exception as e:
print("-- Error -- {0}".format(e))
relpath = None
else:
if not relpath.startswith(".") and not relpath.startswith(os.sep):
relpath = os.sep + relpath
return relpath
def create_dir(path):
output = path
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
output = False
print("-- A Error Occurred -- {0}".format(exception))
return output
def delete_dir(src):
output = True
try:
shutil.rmtree(src)
except Exception as e:
print(e)
output = False
return output
def file_or_dir_exists(filepath):
output = False
if os.path.exists(filepath):
output = True
return output
def get_direct_sub_files(path, abs_path=False, extension=re.compile("\..*")):
"""
Gets the direct child sub-files of the given directory
Args:
path: Absolute path to the directory
abs_path: If set to True, it returns a list of absolute paths to the sub-files instead of
file names only
Returns:
only_files: [list of sub-files]
"""
files = glob.glob(path + os.sep + "*.*")
only_files = [f.replace("\\", '/') for f in files]
if not abs_path:
only_files = [f.rpartition('/')[2] for f in only_files]
final_files = []
for sub_file in only_files:
if extension.match(os.path.splitext(sub_file)[1]):
final_files.append(sub_file)
return final_files
| 29.498024
| 100
| 0.643307
| 1,045
| 7,463
| 4.461244
| 0.15311
| 0.028529
| 0.017375
| 0.029172
| 0.418061
| 0.360789
| 0.320034
| 0.291291
| 0.28314
| 0.275847
| 0
| 0.002375
| 0.266649
| 7,463
| 252
| 101
| 29.615079
| 0.849443
| 0.436822
| 0
| 0.26
| 0
| 0
| 0.047077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.14
| false
| 0
| 0.05
| 0
| 0.33
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab58cf0f15e7f5253d551ed9c21fd93da300dfec
| 8,381
|
py
|
Python
|
alignment.py
|
LucaOnline/theanine-synthetase
|
75a9d1f6d853409e12bf9f3b6e5948b594a03217
|
[
"MIT"
] | null | null | null |
alignment.py
|
LucaOnline/theanine-synthetase
|
75a9d1f6d853409e12bf9f3b6e5948b594a03217
|
[
"MIT"
] | 1
|
2021-04-28T21:34:45.000Z
|
2021-05-11T23:29:59.000Z
|
alignment.py
|
LucaOnline/theanine-synthetase
|
75a9d1f6d853409e12bf9f3b6e5948b594a03217
|
[
"MIT"
] | null | null | null |
"""The `alignment` module provides an implementation of the Needleman-Wunsch alignment algorithm."""
from typing import Tuple, Literal, List
from math import floor
import numpy as np
from stats import variance
MOVE_DIAGONAL = 0
MOVE_RIGHT = 1
MOVE_DOWN = 2
EditMove = Literal[MOVE_DIAGONAL, MOVE_RIGHT, MOVE_DOWN]
CHEMICAL_CLASS = {
"A": "Purine",
"G": "Purine",
"T": "Pyrimidine",
"C": "Pyrimidine",
}
class AlignmentResult:
"""
AlignmentResult represents the result of performing an alignment on two sequences.
"""
def __init__(self, alignment_1: str, alignment_2: str):
"""
Produces a new AlignmentResult representing the result of performing an alignment on
two sequences.
"""
if len(alignment_1) != len(alignment_2):
raise ValueError("input strings have differing lengths")
self.alignment_1 = alignment_1
self.alignment_2 = alignment_2
def get_alignment_length(self) -> int:
"""Returns the length of the alignment."""
return len(self.alignment_1)
def get_alignment_1(self) -> str:
"""Returns the first alignment string."""
return self.alignment_1
def get_alignment_2(self) -> str:
"""Returns the second alignment string."""
return self.alignment_2
def get_match_string(self) -> str:
"""Returns the match string for the alignment."""
return "".join(
[
"|" if self.alignment_1[i] == self.alignment_2[i] else " "
for i in range(len(self.alignment_1))
]
)
def clustered_mismatches(self, cluster_count: int) -> List[int]:
"""
Breaks the alignment into `cluster_count` clusters and
returns the number of mismatches in each cluster. If the
alignment cannot be equally divided into the number of
clusters, this leaves the last cluster with the remainder
of the mismatches.
"""
if cluster_count < 1:
raise ValueError("cluster count must be greater than or equal to 1")
match_string = self.get_match_string()
cluster_size = floor(len(match_string) / cluster_count)
return [
match_string[i * cluster_size : i * cluster_size + cluster_size].count(" ")
for i in range(0, len(match_string) // cluster_size)
]
def clustered_mismatch_variance(self, cluster_count: int) -> float:
"""
Returns the variance between the mismatch clusters. The
raw cluster mismatches can be retrieved with the
`clustered_mismatches` method. `cluster_count` controls
the number of clusters used.
"""
return variance(
np.array(self.clustered_mismatches(cluster_count=cluster_count)),
sample=False,
)
def matches(self) -> int:
"""Returns the number of matching elements for the alignment."""
return self.get_match_string().count("|")
def hamming_distance(self) -> int:
"""Returns the Hamming distance of the alignment."""
return len(self.alignment_1) - self.matches()
def largest_mismatch(self) -> Tuple[int, int]:
"""Returns the position and size of the largest mismatch in the alignment."""
matches = self.get_match_string()
found_mismatch = False
largest_mismatch = 0
largest_mismatch_pos = 0
current_mismatch = 0
for i, c in enumerate(matches):
if c == " ":
found_mismatch = True
current_mismatch += 1
if current_mismatch > largest_mismatch:
largest_mismatch = current_mismatch
largest_mismatch_pos = i - largest_mismatch + 1
else:
current_mismatch = 0
if found_mismatch:
return (largest_mismatch_pos, largest_mismatch)
return (-1, 0)
def format_result(self, line_length: int = 80):
"""
Formats the found alignment with pipes between
matching elements. The optional `line_length` parameter
allows for adjusting the number of elements on each set of
lines.
"""
matches = self.get_match_string()
# Chunk lines
alignment_1_lines = [
self.alignment_1[i : i + line_length]
for i in range(0, len(self.alignment_1), line_length)
]
alignment_2_lines = [
self.alignment_2[i : i + line_length]
for i in range(0, len(self.alignment_2), line_length)
]
match_lines = [
matches[i : i + line_length] for i in range(0, len(matches), line_length)
]
# Output line chunks in order
return "\n".join(
[
"\n".join(
[alignment_1_lines[i], match_lines[i], alignment_2_lines[i], ""]
)
for i in range(len(match_lines))
]
)
def examine(self, line_length: int = 80):
"""
Formats and prints the found alignment with pipes between
matching elements. The optional `line_length` parameter
allows for adjusting the number of elements on each set of
lines.
"""
print(self.format_result(line_length=line_length))
def backtrack(quad: np.ndarray) -> EditMove:
"""Trace one step back through an edit matrix."""
if quad.shape == (0, 2):
return MOVE_DOWN
elif quad.shape == (2, 0):
return MOVE_RIGHT
# numpy's argmax doesn't allow for prioritizing non-indels
next_pos = (0, 0)
if quad[0, 1] > quad[next_pos]:
next_pos = (0, 1)
if quad[1, 0] > quad[next_pos]:
next_pos = (1, 0)
if next_pos == (0, 0):
return MOVE_DIAGONAL
elif next_pos == (0, 1):
return MOVE_RIGHT
else:
return MOVE_DOWN
def score_cell(
quad: np.ndarray,
top_char: str,
left_char: str,
nucleotides: bool,
chemical_classes: dict,
) -> np.int:
"""Calculate the Needleman-Wunsch score for a cell."""
down_score = quad[0, 1] - 1
right_score = quad[1, 0] - 1
# Penalize transversions more heavily
if nucleotides and chemical_classes[top_char] != chemical_classes[left_char]:
down_score -= 1
right_score -= 1
diag_score = quad[0, 0] - 1
if top_char == left_char:
diag_score += 2
return max([down_score, right_score, diag_score])
def align_sequences(
top_seq: str, left_seq: str, nucleotides: bool = True
) -> AlignmentResult:
"""
This function aligns the two provided sequences using Needleman-Wunsch
alignment. It uses a scoring scheme with a gap penalty of -1, a match
bonus of 1, and a mismatch penalty of -1. If the two sequences are
`nucleotides`, then an additional -1 penalty is applied to transversions.
"""
size1 = len(top_seq) + 1
size2 = len(left_seq) + 1
chemical_classes = CHEMICAL_CLASS # Copy this into the local scope so it can be accessed more quickly
# Build search matrix
search = np.zeros((size2, size1), dtype=np.int)
search[0] = [i for i in range(0, -size1, -1)]
search[:, 0] = [i for i in range(0, -size2, -1)]
# Do scoring
for x in range(1, size2):
for y in range(1, size1):
search[x, y] = score_cell(
search[x - 1 : x + 1, y - 1 : y + 1],
top_seq[y - 1],
left_seq[x - 1],
nucleotides,
chemical_classes,
)
search = search.T
# Unwind result
final_top = ""
final_left = ""
bt_x, bt_y = (size1 - 1, size2 - 1)
while bt_x != 0 or bt_y != 0:
next_move = backtrack(search[bt_x - 1 : bt_x + 1, bt_y - 1 : bt_y + 1])
if next_move == MOVE_DIAGONAL:
final_top = top_seq[bt_x - 1] + final_top
final_left = left_seq[bt_y - 1] + final_left
bt_x -= 1
bt_y -= 1
elif next_move == MOVE_DOWN:
final_top = "-" + final_top
final_left = left_seq[bt_y - 1] + final_left
bt_y -= 1
elif next_move == MOVE_RIGHT:
final_top = top_seq[bt_x - 1] + final_top
final_left = "-" + final_left
bt_x -= 1
return AlignmentResult(final_top, final_left)
| 32.111111
| 106
| 0.595752
| 1,081
| 8,381
| 4.434783
| 0.20259
| 0.037964
| 0.026283
| 0.018356
| 0.22257
| 0.180851
| 0.155611
| 0.147685
| 0.123905
| 0.104714
| 0
| 0.021115
| 0.310583
| 8,381
| 260
| 107
| 32.234615
| 0.808584
| 0.240067
| 0
| 0.111111
| 0
| 0
| 0.021682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092593
| false
| 0
| 0.024691
| 0
| 0.234568
| 0.006173
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab58dbf2a732c20f8c5b6f7ff7869c6f7c00ca41
| 2,348
|
py
|
Python
|
examples/the-feeling-of-success/mock_grasp_object_op.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
examples/the-feeling-of-success/mock_grasp_object_op.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
examples/the-feeling-of-success/mock_grasp_object_op.py
|
yujialuo/erdos
|
7a631b55895f1a473b0f4d38a0d6053851e65b5d
|
[
"Apache-2.0"
] | null | null | null |
from mock_gripper_op import MockGripType
from std_msgs.msg import Bool
from erdos.op import Op
from erdos.data_stream import DataStream
from erdos.message import Message
class MockGraspObjectOperator(Op):
"""
Sends a "close" action to the gripper.
"""
gripper_stream = "gripper-output-stream"
action_complete_stream_name = "grasp-action-complete-stream"
def __init__(self, name):
"""
Initializes a lock which blocks future actions to be sent until the
past actions are completed.
"""
super(MockGraspObjectOperator, self).__init__(name)
self.move_ahead_lock = True
@staticmethod
def setup_streams(input_streams, trigger_stream_name, gripper_stream_name):
"""
Registers callbacks on the given streams and returns two streams, one
of which sends the action to the gripper and the other returns a
message upon the completion of the action.
"""
input_streams.filter_name(trigger_stream_name)\
.add_callback(MockGraspObjectOperator.grasp_object)
input_streams.filter_name(gripper_stream_name)\
.add_callback(MockGraspObjectOperator.release_lock)
return [
DataStream(
data_type=MockGripType,
name=MockGraspObjectOperator.gripper_stream),
DataStream(
data_type=Bool,
name=MockGraspObjectOperator.action_complete_stream_name)
]
def grasp_object(self, msg):
"""
Sends a close action to the gripper and waits for its completion.
"""
mock_grasp_object = MockGripType("close")
mock_grasp_msg = Message(mock_grasp_object, msg.timestamp)
self.move_ahead_lock = False
self.get_output_stream(
MockGraspObjectOperator.gripper_stream).send(mock_grasp_msg)
while not self.move_ahead_lock:
pass
action_complete_msg = Message(True, msg.timestamp)
self.get_output_stream(
MockGraspObjectOperator.action_complete_stream_name).send(
action_complete_msg)
def release_lock(self, msg):
"""
Releases the lock so that new actions can be sent to the gripper.
"""
self.move_ahead_lock = True
def execute(self):
self.spin()
| 34.028986
| 79
| 0.663969
| 269
| 2,348
| 5.535316
| 0.32342
| 0.047011
| 0.032236
| 0.045668
| 0.246474
| 0.038952
| 0.038952
| 0
| 0
| 0
| 0
| 0
| 0.270443
| 2,348
| 68
| 80
| 34.529412
| 0.869235
| 0.189097
| 0
| 0.146341
| 0
| 0
| 0.030474
| 0.027652
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0.02439
| 0.121951
| 0
| 0.341463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab58e931b2be617a8f028a51f43ae40e92333614
| 3,683
|
py
|
Python
|
src/pyfsa/lib/fsa.py
|
taliamax/pyfsa
|
d92faa96c1e17e4016df7b367c7d405a07f1253b
|
[
"Apache-2.0"
] | 1
|
2021-01-21T21:48:26.000Z
|
2021-01-21T21:48:26.000Z
|
src/pyfsa/lib/fsa.py
|
taliamax/pyfsa
|
d92faa96c1e17e4016df7b367c7d405a07f1253b
|
[
"Apache-2.0"
] | null | null | null |
src/pyfsa/lib/fsa.py
|
taliamax/pyfsa
|
d92faa96c1e17e4016df7b367c7d405a07f1253b
|
[
"Apache-2.0"
] | 4
|
2021-01-22T04:04:22.000Z
|
2021-11-01T14:43:09.000Z
|
# -*- coding: utf-8 -*-
import pygraphviz as gv # type: ignore
import itertools as it
from typing import (
List,
Optional,
)
from pyfsa.lib.types import TransitionsTable
def get_state_graph(
transitions: TransitionsTable,
start: Optional[str] = None,
end: Optional[str] = None,
nodes: Optional[List[str]] = None,
name: str = 'output.png',
draw: bool = True,
engine: str = 'circo',
) -> gv.AGraph:
'''
From a transition dictionary, creates a pygraphviz graph
of all the possible states and how to reach the given state.
Returns the resulting graph.
'''
graph = gv.AGraph(directed=True, strict=False, ranksep='1')
key_num = it.count()
if nodes is not None:
graph.add_nodes_from(nodes)
else:
graph.add_nodes_from(transitions.keys())
for node, transition_row in transitions.items():
for label, targets in transition_row.items():
for target in targets:
graph.add_edge(
node,
target,
key=f'{next(key_num)}',
label=label,
weight=1,
)
if start:
n: gv.Node = graph.get_node(start)
n.attr['color'] = '#0000FF'
n.attr['style'] = 'filled'
if end:
n = graph.get_node(end)
n.attr['color'] = '#00FF00'
n.attr['style'] = 'filled'
if draw:
graph.layout(prog=engine)
graph.draw(name)
return graph
def verify_string(
string: str,
starting_state: str,
final_state: str,
transitions: TransitionsTable,
) -> bool:
'''
Given a transitions table, a start and end state, and
some string, verifies that executing the finite state machine
on the given string produces the desired final state.
'''
current_state = starting_state
for letter in string:
transition = transitions[current_state]
current_state = transition[letter][0]
return current_state == final_state
def render_string_graph(
string: str,
start: str,
end: str,
transitions: TransitionsTable,
name: str = 'output.png',
draw: bool = True,
engine: str = 'circo'
) -> gv.AGraph:
'''
Given a string, a start state, an end state, end a
transitions table, produces the graph resulting in
the traversal of the string through the states defined
in the transitions table. By default, it will
output a png file of the result, but that can be
suppressed.
'''
graph = gv.AGraph(directed=True)
graph.graph_attr['label'] = f'Evaluating {string}'
node_names = it.count()
current_state = start
node_name = next(node_names)
graph.add_node(node_name)
current_node = gv.Node(graph, node_name)
current_node.attr['label'] = current_state
current_node.attr['fillcolor'] = '#0000FF'
current_node.attr['style'] = 'filled'
for letter in string:
node_name = next(node_names)
graph.add_node(node_name)
next_node = gv.Node(graph, node_name)
# TODO: The algorithm prioritizes just the first
# found state, which may not produce a correct
# answer. Needs to fix this
next_state = transitions[current_state][letter][0]
next_node.attr['label'] = next_state
graph.add_edge(current_node, next_node, label=letter)
current_node = next_node
current_state = next_state
if current_state == end:
current_node.attr['style'] = 'filled'
current_node.attr['fillcolor'] = '#00FF00'
if draw:
graph.layout(prog=engine)
graph.draw(name)
return graph
| 27.485075
| 65
| 0.620961
| 467
| 3,683
| 4.775161
| 0.295503
| 0.04843
| 0.033632
| 0.021525
| 0.208969
| 0.147982
| 0.127354
| 0.127354
| 0.127354
| 0.127354
| 0
| 0.007904
| 0.278577
| 3,683
| 133
| 66
| 27.691729
| 0.831389
| 0.199566
| 0
| 0.322222
| 0
| 0
| 0.062893
| 0
| 0
| 0
| 0
| 0.007519
| 0
| 1
| 0.033333
| false
| 0
| 0.044444
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab5be15bbc59ecc18cf93a6170b0dd272f33cfd6
| 833
|
py
|
Python
|
tests/test_slison.py
|
Habidatum/slisonner
|
488be30a199a5d29271e24377c37a7ad83d52e3e
|
[
"MIT"
] | 2
|
2017-02-06T17:15:11.000Z
|
2017-04-17T13:18:18.000Z
|
tests/test_slison.py
|
Habidatum/slisonner
|
488be30a199a5d29271e24377c37a7ad83d52e3e
|
[
"MIT"
] | null | null | null |
tests/test_slison.py
|
Habidatum/slisonner
|
488be30a199a5d29271e24377c37a7ad83d52e3e
|
[
"MIT"
] | null | null | null |
from slisonner import decoder, encoder
from tests import mocker
from tempfile import mkdtemp
from shutil import rmtree
def test_full_encode_decode_cycle():
temp_out_dir = mkdtemp()
slice_id = '2015-01-02 00:00:00'
x_size, y_size = 10, 16
temp_slice_path = mocker.generate_slice(x_size, y_size, 'float32')
slice_meta_encoded, slison_filepath = encoder.encode_slice_file(
filepath=temp_slice_path,
slice_duration=300,
timestamp=slice_id,
layer_id='london',
x_size=x_size,
y_size=y_size,
value_type='float32',
out_dir=temp_out_dir)
slice_data, slice_meta_decoded = decoder.decode_slison(slison_filepath)
for key, encoded_value in slice_meta_encoded.items():
assert encoded_value == slice_meta_decoded[key]
rmtree(temp_out_dir)
| 29.75
| 75
| 0.716687
| 120
| 833
| 4.6
| 0.441667
| 0.043478
| 0.065217
| 0.054348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037764
| 0.205282
| 833
| 27
| 76
| 30.851852
| 0.796073
| 0
| 0
| 0
| 0
| 0
| 0.046819
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.045455
| false
| 0
| 0.181818
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab5e4dfefa5bc8fdcbff9af5c74dd0475612065f
| 372
|
py
|
Python
|
exercises/pyfiles/ex812_polarsincos.py
|
TUDelft-AE-Python/ae1205-exercises
|
342d1d567b64d3ccb3371ce9826c02a87a155fa8
|
[
"MIT"
] | 1
|
2021-10-05T04:49:54.000Z
|
2021-10-05T04:49:54.000Z
|
exercises/pyfiles/ex812_polarsincos.py
|
TUDelft-AE1205/ae1205-exercises
|
342d1d567b64d3ccb3371ce9826c02a87a155fa8
|
[
"MIT"
] | null | null | null |
exercises/pyfiles/ex812_polarsincos.py
|
TUDelft-AE1205/ae1205-exercises
|
342d1d567b64d3ccb3371ce9826c02a87a155fa8
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import math
xtab = []
ytab = []
for i in range(0, 628):
# Calculate polar coordinates for provided equation
phi = float(i) / 100.0
r = 4 * math.cos(2 * phi)
# Convert to Cartesian and store in lists
x = r * math.cos(phi)
y = r * math.sin(phi)
xtab.append(x)
ytab.append(y)
plt.plot(xtab, ytab)
plt.show()
| 19.578947
| 55
| 0.620968
| 61
| 372
| 3.786885
| 0.622951
| 0.069264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035971
| 0.252688
| 372
| 19
| 56
| 19.578947
| 0.794964
| 0.239247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab60f9944d5fde6a7550fbbfc9b1e8fd43e10b50
| 1,518
|
py
|
Python
|
W-DCGAN/model.py
|
lmyybh/pytorch-networks
|
8da055f5042c3803b275734afc89d33d239d7585
|
[
"MulanPSL-1.0"
] | null | null | null |
W-DCGAN/model.py
|
lmyybh/pytorch-networks
|
8da055f5042c3803b275734afc89d33d239d7585
|
[
"MulanPSL-1.0"
] | null | null | null |
W-DCGAN/model.py
|
lmyybh/pytorch-networks
|
8da055f5042c3803b275734afc89d33d239d7585
|
[
"MulanPSL-1.0"
] | null | null | null |
import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, signal_size, out_channels=3):
super(Generator, self).__init__()
self.linear = nn.Linear(signal_size, 1024*4*4)
convs = []
channels = [1024, 512, 256, 128]
for i in range(1, len(channels)):
convs.append(nn.ConvTranspose2d(channels[i-1], channels[i], 2, stride=2))
convs.append(nn.BatchNorm2d(channels[i]))
convs.append(nn.LeakyReLU(0.2, inplace=True))
convs.append(nn.ConvTranspose2d(channels[-1], out_channels, 2, stride=2))
convs.append(nn.Tanh())
self.convs = nn.Sequential(*convs)
def forward(self, x):
x = self.linear(x)
x = x.view(x.size(0), 1024, 4, 4)
x = self.convs(x)
return x
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
channels = [3, 32, 64, 128, 256, 512, 1024]
convs = []
for i in range(1, len(channels)):
convs.append(nn.Conv2d(channels[i-1], channels[i], 3, padding=1, stride=2))
convs.append(nn.BatchNorm2d(channels[i]))
convs.append(nn.LeakyReLU(0.2, inplace=True))
self.convs = nn.Sequential(*convs)
self.linear = nn.Linear(1024*1*1, 1)
def forward(self, x):
x = self.convs(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
# x = torch.sigmoid(x)
return x
| 33
| 87
| 0.563241
| 207
| 1,518
| 4.033816
| 0.241546
| 0.105389
| 0.124551
| 0.064671
| 0.608383
| 0.371257
| 0.297006
| 0.265868
| 0.265868
| 0.265868
| 0
| 0.069509
| 0.289196
| 1,518
| 45
| 88
| 33.733333
| 0.704356
| 0.013175
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab61967196abc0b2e677bfd1d2c054cef2f1f32b
| 792
|
py
|
Python
|
rational.py
|
navel0810/chibi
|
d2e9a791492352c3c1b76c841a3ad30df2f444fd
|
[
"MIT"
] | null | null | null |
rational.py
|
navel0810/chibi
|
d2e9a791492352c3c1b76c841a3ad30df2f444fd
|
[
"MIT"
] | null | null | null |
rational.py
|
navel0810/chibi
|
d2e9a791492352c3c1b76c841a3ad30df2f444fd
|
[
"MIT"
] | null | null | null |
import math
class Q(object):
def __init__(self,a,b=1):
gcd=math.gcd(a,b)
self.a=a//gcd
self.b=b//gcd
def __repr__(self):
if self.b==1:
return str(self.a)
return f'{self.a}/{self.b}'
def __add__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*d+b*c,b*d)
def __sub__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*d-b*c,b*d)
def __mul__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*c,b*d)
def __truediv__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*d,b*c)
q1=Q(1,2)
q2=Q(1,3)
print(q1/q2)
| 17.217391
| 36
| 0.412879
| 144
| 792
| 2.104167
| 0.194444
| 0.079208
| 0.069307
| 0.132013
| 0.471947
| 0.471947
| 0.471947
| 0.471947
| 0.471947
| 0.471947
| 0
| 0.022173
| 0.430556
| 792
| 46
| 37
| 17.217391
| 0.649667
| 0
| 0
| 0.432432
| 0
| 0
| 0.022727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162162
| false
| 0
| 0.027027
| 0
| 0.378378
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db3b69e3c4b3003f7bdcfee2a7ee4c426c44a37d
| 5,738
|
py
|
Python
|
views/auth.py
|
bluebibi/flask_rest
|
9b1ee876060bca5d97459bb894c73530f66c4c15
|
[
"MIT"
] | null | null | null |
views/auth.py
|
bluebibi/flask_rest
|
9b1ee876060bca5d97459bb894c73530f66c4c15
|
[
"MIT"
] | 1
|
2022-02-11T03:43:51.000Z
|
2022-02-11T03:43:51.000Z
|
views/auth.py
|
bluebibi/flask_rest
|
9b1ee876060bca5d97459bb894c73530f66c4c15
|
[
"MIT"
] | 2
|
2019-11-19T02:09:03.000Z
|
2020-04-04T06:55:14.000Z
|
from flask import Blueprint, redirect, render_template, request, flash, session
from database import base
from database.base import User
from forms import UserForm, LoginForm, MyPageUserForm
from flask_login import login_required, login_user, logout_user, current_user
import requests
auth_blueprint = Blueprint('auth', __name__)
kakao_oauth = {}
@auth_blueprint.route('/my_page', methods=['GET', 'POST'])
@login_required
def _user():
form = MyPageUserForm()
q = base.db_session.query(User).filter(User.email == current_user.email)
user = q.first()
if request.method == 'POST':
if form.validate_on_submit():
user.email = request.form['email']
user.name = request.form['name']
user.set_password(request.form['password'])
user.affiliation = request.form['affiliation']
base.db_session.commit()
flash('귀하의 회원정보가 수정 되었습니다.')
return redirect('/auth/my_page')
return render_template("my_page.html", user=user, form=form, kakao_oauth=kakao_oauth)
def login_process(email, password):
q = base.db_session.query(User).filter(User.email == email)
user = q.first()
if user:
if user.authenticate(password):
login_result = login_user(user)
if login_result:
print("사용자(사용자 이메일:{0})의 로그인 성공!".format(current_user.email))
return '/'
else:
flash('비밀번호를 다시 확인하여 입력해주세요.')
return '/auth/login'
else:
flash('이메일 및 비밀번호를 다시 확인하여 입력해주세요.')
return '/auth/login'
@auth_blueprint.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect('/')
form = LoginForm()
if request.method == 'POST':
if form.validate_on_submit():
redirect_url = login_process(form.data['email'], form.data['password'])
return redirect(redirect_url)
return render_template('login.html', form=form, current_user=current_user)
@auth_blueprint.route('kakao_oauth_redirect')
def kakao_oauth_redirect():
code = str(request.args.get('code'))
url = "https://kauth.kakao.com/oauth/token"
data = "grant_type=authorization_code" \
"&client_id=0eb67d9cd0372c01d3915bbd934b4f6d" \
"&redirect_uri=http://localhost:8080/auth/kakao_oauth_redirect" \
"&code={0}".format(code)
headers = {
"Content-Type": "application/x-www-form-urlencoded;charset=utf-8",
"Cache-Control": "no-cache"
}
response = requests.post(
url=url,
data=data,
headers=headers
)
#print("kakao_oauth_redirect", response.json())
kakao_oauth["access_token"] = response.json()["access_token"]
kakao_oauth["expires_in"] = response.json()["expires_in"]
kakao_oauth["refresh_token"] = response.json()["refresh_token"]
kakao_oauth["refresh_token_expires_in"] = response.json()["refresh_token_expires_in"]
kakao_oauth["scope"] = response.json()["scope"]
kakao_oauth["token_type"] = response.json()["token_type"]
if "kaccount_email" not in kakao_oauth or kakao_oauth['kaccount_email'] is None:
kakao_me_and_signup()
redirect_url = login_process(kakao_oauth["kaccount_email"], "1234")
return redirect(redirect_url)
def kakao_me_and_signup():
url = "https://kapi.kakao.com/v1/user/me"
headers = {
"Authorization": "Bearer {0}".format(kakao_oauth["access_token"]),
"Content-Type": "application/x-www-form-urlencoded;charset=utf-8"
}
response = requests.post(
url=url,
headers=headers
)
#print("kakao_me_and_signup", response.json())
kakao_oauth["kaccount_email"] = response.json()["kaccount_email"]
kakao_oauth["id"] = response.json()["id"]
kakao_oauth["kakao_profile_image"] = response.json()["properties"]["profile_image"]
kakao_oauth["nickname"] = response.json()["properties"]["nickname"]
kakao_oauth["kakao_thumbnail_image"] = response.json()["properties"]["thumbnail_image"]
c = base.db_session.query(User).filter(User.email == kakao_oauth["kaccount_email"]).count()
if c == 0:
user = User(name=kakao_oauth["nickname"], email=kakao_oauth["kaccount_email"], affiliation=None)
user.set_password("1234")
base.db_session.add(user)
base.db_session.commit()
def kakao_logout():
url = "https://kapi.kakao.com/v1/user/logout"
headers = {
"Authorization": "Bearer {0}".format(kakao_oauth["access_token"])
}
response = requests.post(
url=url,
headers=headers
)
if response.status_code == 200:
kakao_oauth["kaccount_email"] = None
kakao_oauth["id"] = None
kakao_oauth["kakao_profile_image"] = None
kakao_oauth["nickname"] = None
kakao_oauth["kakao_thumbnail_image"] = None
@auth_blueprint.route("/logout")
@login_required
def logout():
logout_user()
if kakao_oauth and "kaccount_email" in kakao_oauth:
kakao_logout()
return redirect('/')
@auth_blueprint.route('/signup', methods=['GET', 'POST'])
def signup():
form = UserForm()
if request.method == 'POST':
if form.validate_on_submit():
new_user = User()
new_user.email = request.form['email']
new_user.name = request.form['name']
new_user.set_password(request.form['password'])
new_user.affiliation = request.form['affiliation']
base.db_session.add(new_user)
base.db_session.commit()
flash('귀하는 회원가입이 성공적으로 완료되었습니다. 가입하신 정보로 로그인을 다시 하시기 바랍니다.')
return redirect('/auth/login')
return render_template("signup.html", form=form)
| 32.977011
| 104
| 0.648832
| 695
| 5,738
| 5.136691
| 0.205755
| 0.092437
| 0.029132
| 0.038655
| 0.341176
| 0.226891
| 0.207843
| 0.152941
| 0.114566
| 0.028571
| 0
| 0.009486
| 0.210003
| 5,738
| 174
| 105
| 32.977011
| 0.778072
| 0.015859
| 0
| 0.246269
| 0
| 0
| 0.22972
| 0.045342
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| false
| 0.044776
| 0.044776
| 0
| 0.19403
| 0.059701
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db3d773e3532da7f969a86616e27db866a72624c
| 3,500
|
py
|
Python
|
doc/.src/book/exer/cable_sin.py
|
hplgit/fem-book
|
c23099715dc3cb72e7f4d37625e6f9614ee5fc4e
|
[
"MIT"
] | 86
|
2015-12-17T12:57:11.000Z
|
2022-03-26T01:53:47.000Z
|
doc/.src/book/exer/cable_sin.py
|
hplgit/fem-book
|
c23099715dc3cb72e7f4d37625e6f9614ee5fc4e
|
[
"MIT"
] | 9
|
2017-04-16T21:57:29.000Z
|
2021-04-17T08:09:30.000Z
|
doc/.src/book/exer/cable_sin.py
|
hplgit/fem-book
|
c23099715dc3cb72e7f4d37625e6f9614ee5fc4e
|
[
"MIT"
] | 43
|
2016-03-11T19:33:14.000Z
|
2022-03-05T00:21:57.000Z
|
import matplotlib.pyplot as plt
def model():
"""Solve u'' = -1, u(0)=0, u'(1)=0."""
import sympy as sym
x, c_0, c_1, = sym.symbols('x c_0 c_1')
u_x = sym.integrate(1, (x, 0, x)) + c_0
u = sym.integrate(u_x, (x, 0, x)) + c_1
r = sym.solve([u.subs(x,0) - 0,
sym.diff(u,x).subs(x, 1) - 0],
[c_0, c_1])
u = u.subs(c_0, r[c_0]).subs(c_1, r[c_1])
u = sym.simplify(sym.expand(u))
return u
def midpoint_rule(f, M=100000):
"""Integrate f(x) over [0,1] using M intervals."""
from numpy import sum, linspace
dx = 1.0/M # interval length
x = linspace(dx/2, 1-dx/2, M) # integration points
return dx*sum(f(x))
def check_integral_b():
from numpy import pi, sin
for i in range(12):
exact = 2/(pi*(2*i+1))
numerical = midpoint_rule(
f=lambda x: sin((2*i+1)*pi*x/2))
print(i, abs(exact - numerical))
def sine_sum(x, N):
s = 0
from numpy import pi, sin, zeros
u = [] # u[k] is the sum i=0,...,k
k = 0
for i in range(N+1):
s += - 16.0/((2*i+1)**3*pi**3)*sin((2*i+1)*pi*x/2)
u.append(s.copy()) # important with copy!
return u
def plot_sine_sum():
from numpy import linspace
x = linspace(0, 1, 501) # coordinates for plot
u = sine_sum(x, N=10)
u_e = 0.5*x*(x-2)
N_values = 0, 1, 10
for k in N_values:
plt.plot(x, u[k])
plt.plot(x, u_e)
plt.legend(['N=%d' % k for k in N_values] + ['exact'],
loc='upper right')
plt.xlabel('$x$'); plt.ylabel('$u$')
plt.savefig('tmpc.png'); plt.savefig('tmpc.pdf')
def check_integral_d():
from numpy import pi, sin
for i in range(24):
if i % 2 == 0:
exact = 2/(pi*(i+1))
elif (i-1) % 4 == 0:
exact = 2*2/(pi*(i+1))
else:
exact = 0
numerical = midpoint_rule(
f=lambda x: sin((i+1)*pi*x/2))
print(i, abs(exact - numerical))
def check_integral_d_sympy_answer():
from numpy import pi, sin
for i in range(12):
exact = 2/(pi*(i+1))
numerical = midpoint_rule(
f=lambda x: sin((i+1)*pi*x/2))
print(i, abs(exact - numerical))
def sine_sum_d(x, N):
s = 0
from numpy import pi, sin, zeros
u = [] # u[k] is the sum i=0,...,k
k = 0
for i in range(N+1):
if i % 2 == 0: # even i
s += - 16.0/((i+1)**3*pi**3)*sin((i+1)*pi*x/2)
elif (i-1) % 4 == 0: # 1, 5, 9, 13, 17
s += - 2*16.0/((i+1)**3*pi**3)*sin((i+1)*pi*x/2)
else:
s += 0
u.append(s.copy())
return u
def plot_sine_sum_d():
from numpy import linspace
x = linspace(0, 1, 501) # coordinates for plot
u = sine_sum_d(x, N=20)
u_e = 0.5*x*(x-2)
N_values = 0, 1, 2, 3, 20
for k in N_values:
plt.plot(x, u[k])
plt.plot(x, u_e)
plt.legend(['N=%d' % k for k in N_values] + ['exact'],
loc='upper right')
plt.xlabel('$x$'); plt.ylabel('$u$')
#plt.axis([0.9, 1, -0.52, -0.49])
plt.savefig('tmpd.png'); plt.savefig('tmpd.pdf')
if __name__ == '__main__':
import sys
print(model())
print('sine 2*i+1 integral:')
check_integral_b()
print('sine i+1 integral, sympy answer:')
check_integral_d_sympy_answer()
print('sine i+1 integral:')
check_integral_d()
#sys.exit(0)
plot_sine_sum()
plt.figure()
plot_sine_sum_d()
plt.show()
| 28.92562
| 60
| 0.512571
| 622
| 3,500
| 2.77492
| 0.173633
| 0.020857
| 0.069525
| 0.017381
| 0.618772
| 0.520278
| 0.516802
| 0.48146
| 0.48146
| 0.442642
| 0
| 0.064754
| 0.302857
| 3,500
| 120
| 61
| 29.166667
| 0.642623
| 0.084
| 0
| 0.514286
| 0
| 0
| 0.053689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.104762
| 0
| 0.228571
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db3df0c689b2c97c43cbf7385cac4c429ebf9bf0
| 2,440
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/12_features/numtrees_20/rule_6.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/12_features/numtrees_20/rule_6.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/12_features/numtrees_20/rule_6.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Gender, obj[4]: Age, obj[5]: Education, obj[6]: Occupation, obj[7]: Bar, obj[8]: Coffeehouse, obj[9]: Restaurant20to50, obj[10]: Direction_same, obj[11]: Distance
# {"feature": "Age", "instances": 51, "metric_value": 0.9662, "depth": 1}
if obj[4]>0:
# {"feature": "Occupation", "instances": 44, "metric_value": 0.9024, "depth": 2}
if obj[6]>1:
# {"feature": "Bar", "instances": 33, "metric_value": 0.9834, "depth": 3}
if obj[7]<=1.0:
# {"feature": "Education", "instances": 22, "metric_value": 0.994, "depth": 4}
if obj[5]>0:
# {"feature": "Passanger", "instances": 17, "metric_value": 0.9774, "depth": 5}
if obj[0]<=2:
# {"feature": "Time", "instances": 11, "metric_value": 0.994, "depth": 6}
if obj[1]<=2:
# {"feature": "Restaurant20to50", "instances": 8, "metric_value": 0.9544, "depth": 7}
if obj[9]>0.0:
# {"feature": "Coffeehouse", "instances": 6, "metric_value": 0.65, "depth": 8}
if obj[8]<=2.0:
return 'True'
elif obj[8]>2.0:
return 'False'
else: return 'False'
elif obj[9]<=0.0:
return 'False'
else: return 'False'
elif obj[1]>2:
return 'False'
else: return 'False'
elif obj[0]>2:
# {"feature": "Gender", "instances": 6, "metric_value": 0.65, "depth": 6}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
# {"feature": "Time", "instances": 2, "metric_value": 1.0, "depth": 7}
if obj[1]<=2:
return 'True'
elif obj[1]>2:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
elif obj[7]>1.0:
# {"feature": "Coupon", "instances": 11, "metric_value": 0.684, "depth": 4}
if obj[2]>2:
return 'True'
elif obj[2]<=2:
# {"feature": "Direction_same", "instances": 4, "metric_value": 1.0, "depth": 5}
if obj[10]>0:
return 'True'
elif obj[10]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[6]<=1:
return 'True'
else: return 'True'
elif obj[4]<=0:
# {"feature": "Passanger", "instances": 7, "metric_value": 0.5917, "depth": 2}
if obj[0]>0:
return 'False'
elif obj[0]<=0:
return 'True'
else: return 'True'
else: return 'False'
| 35.882353
| 243
| 0.547951
| 348
| 2,440
| 3.798851
| 0.152299
| 0.11649
| 0.099849
| 0.102874
| 0.480333
| 0.270802
| 0.270045
| 0.197428
| 0.125567
| 0.092284
| 0
| 0.090222
| 0.241393
| 2,440
| 67
| 244
| 36.41791
| 0.623987
| 0.492213
| 0
| 0.574074
| 0
| 0
| 0.099592
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db40a9951b31f74580005898f3f6b78a4f2c461b
| 1,080
|
py
|
Python
|
eth2/beacon/types/historical_batch.py
|
AndrewBezold/trinity
|
bc656da4dece431a0c929a99349d45faf75decf8
|
[
"MIT"
] | null | null | null |
eth2/beacon/types/historical_batch.py
|
AndrewBezold/trinity
|
bc656da4dece431a0c929a99349d45faf75decf8
|
[
"MIT"
] | null | null | null |
eth2/beacon/types/historical_batch.py
|
AndrewBezold/trinity
|
bc656da4dece431a0c929a99349d45faf75decf8
|
[
"MIT"
] | null | null | null |
from typing import Sequence
from eth.constants import ZERO_HASH32
from eth_typing import Hash32
import ssz
from ssz.sedes import Vector, bytes32
from eth2.configs import Eth2Config
from .defaults import default_tuple, default_tuple_of_size
class HistoricalBatch(ssz.Serializable):
fields = [("block_roots", Vector(bytes32, 1)), ("state_roots", Vector(bytes32, 1))]
def __init__(
self,
*,
block_roots: Sequence[Hash32] = default_tuple,
state_roots: Sequence[Hash32] = default_tuple,
config: Eth2Config = None
) -> None:
if config:
# try to provide sane defaults
if block_roots == default_tuple:
block_roots = default_tuple_of_size(
config.SLOTS_PER_HISTORICAL_ROOT, ZERO_HASH32
)
if state_roots == default_tuple:
state_roots = default_tuple_of_size(
config.SLOTS_PER_HISTORICAL_ROOT, ZERO_HASH32
)
super().__init__(block_roots=block_roots, state_roots=state_roots)
| 30
| 87
| 0.649074
| 124
| 1,080
| 5.298387
| 0.346774
| 0.146119
| 0.103501
| 0.082192
| 0.280061
| 0.185693
| 0.185693
| 0.185693
| 0.185693
| 0.185693
| 0
| 0.029601
| 0.280556
| 1,080
| 35
| 88
| 30.857143
| 0.815959
| 0.025926
| 0
| 0.076923
| 0
| 0
| 0.020952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.269231
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db434cf2f9b45ff5f3690a75cc12bde4fd9cb6aa
| 1,354
|
py
|
Python
|
ADVECTOR/io_tools/create_bathymetry.py
|
john-science/ADVECTOR
|
5c5ca7595c2c051f1a088b1f0e694936c3da3610
|
[
"MIT"
] | 7
|
2021-09-07T02:32:00.000Z
|
2022-01-15T11:35:02.000Z
|
ADVECTOR/io_tools/create_bathymetry.py
|
TheOceanCleanupAlgorithms/ADVECT
|
e27ce15da6a2fcbccbe363f8c2415b0122696d1f
|
[
"MIT"
] | 1
|
2021-12-24T15:16:26.000Z
|
2021-12-24T15:16:26.000Z
|
ADVECTOR/io_tools/create_bathymetry.py
|
TheOceanCleanupAlgorithms/ADVECT
|
e27ce15da6a2fcbccbe363f8c2415b0122696d1f
|
[
"MIT"
] | 1
|
2021-12-12T15:13:52.000Z
|
2021-12-12T15:13:52.000Z
|
import numpy as np
import xarray as xr
def create_bathymetry_from_land_mask(land_mask: xr.DataArray) -> xr.DataArray:
"""Method: identifies the lower depth bound of the shallowest
ocean cell (non-null) in each vertical grid column.
:param land_mask: dimensions {time, depth, lat, lon}, boloean array, True where cell is land"""
assert np.all(land_mask.depth <= 0), "depth coordinate must be positive up"
assert np.all(
np.diff(land_mask.depth) > 0
), "depth coordinate must be sorted ascending"
# In the kernel, particles look up data based on the nearest cell-center.
# Thus cell bounds are the midpoints between each centers.
# Very top cell bound is surface, and bottom cell bounds are
# assumed to be symmetric about bottom cell center.
depth_diff = np.diff(land_mask.depth)
depth_bnds = np.concatenate(
[
land_mask.depth.values[:1] - depth_diff[0] / 2,
land_mask.depth.values[:-1] + depth_diff / 2,
[0],
]
)
bathy = (
(~land_mask)
.assign_coords({"depth": depth_bnds[:-1]})
.idxmax(dim="depth")
.where(~land_mask.isel(depth=-1), depth_bnds[-1])
)
bathy = bathy.drop(["time", "depth"])
bathy.name = "bathymetry"
bathy.attrs = {"units": "m", "positive": "up"}
return bathy
| 34.717949
| 99
| 0.637371
| 188
| 1,354
| 4.484043
| 0.484043
| 0.094899
| 0.077106
| 0.033215
| 0.181495
| 0.151839
| 0.151839
| 0.083037
| 0
| 0
| 0
| 0.010774
| 0.245938
| 1,354
| 38
| 100
| 35.631579
| 0.814887
| 0.329394
| 0
| 0
| 0
| 0
| 0.136925
| 0
| 0
| 0
| 0
| 0
| 0.08
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db44a6bdcd485ae98f0bcb11e91bddb17022662f
| 1,456
|
py
|
Python
|
contacts/migrations_old/0006_data_status.py
|
I-TECH-UW/mwachx
|
e191755c3369208d678fceec68dbb4f5f51c453a
|
[
"Apache-2.0"
] | 3
|
2015-05-27T14:35:49.000Z
|
2016-02-26T21:04:32.000Z
|
contacts/migrations/0006_data_status.py
|
tperrier/mwachx
|
94616659dc29843e661b2ecc9a2e7f1d4e81b5a4
|
[
"Apache-2.0"
] | 375
|
2015-01-31T10:08:34.000Z
|
2021-06-10T19:44:21.000Z
|
contacts/migrations_old/0006_data_status.py
|
I-TECH-UW/mwachx
|
e191755c3369208d678fceec68dbb4f5f51c453a
|
[
"Apache-2.0"
] | 6
|
2016-01-10T19:52:41.000Z
|
2020-06-15T22:07:24.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools as it
from django.db import models, migrations
def convert_status(apps, schema_editor):
''' Migrate Visit.skipped and ScheduledPhoneCall.skipped -> status
(pending,missed,deleted,attended)
'''
Visit = apps.get_model("contacts","Visit")
ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall")
for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()):
if obj.skipped is None:
obj.status = 'pending'
elif obj.skipped == False:
obj.status = 'attended'
elif obj.skipped == True:
obj.status = 'missed'
obj.save()
def unconvert_status(apps, schema_editor):
''' Reverse function sets skipped based on status'''
Visit = apps.get_model("contacts","Visit")
ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall")
for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()):
if obj.status == 'pending':
obj.skipped = None
elif obj.status == 'attended':
obj.skipped = False
elif obj.status == 'missed':
obj.skipped = True
obj.save()
class Migration(migrations.Migration):
dependencies = [
('contacts', '0005_auto_add_visit_status'),
]
operations = [
migrations.RunPython(convert_status,unconvert_status),
]
| 30.333333
| 79
| 0.643544
| 159
| 1,456
| 5.773585
| 0.377358
| 0.065359
| 0.052288
| 0.087146
| 0.324619
| 0.324619
| 0.324619
| 0.324619
| 0.324619
| 0.324619
| 0
| 0.004488
| 0.23489
| 1,456
| 47
| 80
| 30.978723
| 0.819569
| 0.113324
| 0
| 0.25
| 0
| 0
| 0.121739
| 0.020553
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db49fa274fd584b7dd27d84ca85b94655d65a8a2
| 6,946
|
py
|
Python
|
scripts/make_VFS.py
|
nvoron23/brython
|
b1ce5fa39b5d38c0dde138b4e75723fbb3e574ab
|
[
"BSD-3-Clause"
] | 1
|
2015-11-06T09:32:34.000Z
|
2015-11-06T09:32:34.000Z
|
scripts/make_VFS.py
|
nvoron23/brython
|
b1ce5fa39b5d38c0dde138b4e75723fbb3e574ab
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/make_VFS.py
|
nvoron23/brython
|
b1ce5fa39b5d38c0dde138b4e75723fbb3e574ab
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import os
import pyminifier
try:
import io as StringIO
except ImportError:
import cStringIO as StringIO # lint:ok
# Check to see if slimit or some other minification library is installed and
# Set minify equal to slimit's minify function.
try:
import slimit
js_minify = slimit.minify
except ImportError as error:
print(error)
js_minify = slimit = None
###############################################################################
def process_unittest(filename):
"""Process a VFS filename for Brython."""
print("Generating {}".format(filename))
nb = 0
nb_err = 0
_main_root = os.path.dirname(filename)
_VFS = {}
for _mydir in ("Lib",):
for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):
if 'unittest' not in _root:
continue
if '__pycache__' in _root:
continue
for _file in _files:
_ext = os.path.splitext(_file)[1]
if _ext not in ('.py'):
continue
nb += 1
file_name = os.path.join(_root, _file)
try: # python 3
with open(file_name, encoding="utf-8") as file_with_data:
_data = file_with_data.read()
except Exception as reason: # python 2
with open(file_name, "r") as file_with_data:
_data = str(file_with_data.read()).decode("utf-8")
if not len(_data):
print("No data for {} ({}).".format(_file, type(_data)))
if _ext.lower() == '.py' and _data:
try:
_data = pyminifier.remove_comments_and_docstrings(
_data)
_data = pyminifier.dedent(_data)
except Exception as error:
print(error)
nb_err += 1
_vfs_filename = os.path.join(
_root, _file).replace(_main_root, '')
_vfs_filename = _vfs_filename.replace("\\", "/")
mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')
mod_name, ext = os.path.splitext(mod_name)
is_package = mod_name.endswith('__init__')
if is_package:
mod_name = mod_name[:-9]
_VFS[mod_name] = [_data, 1]
else:
_VFS[mod_name] = [_data]
print(("Adding %s %s" % (mod_name, _vfs_filename)))
print('%s files, %s errors' % (nb, nb_err))
with open(filename, "w") as file_to_write_VFS:
file_to_write_VFS.write('__BRYTHON__.libs = __BRYTHON__.libs || {};\n')
file_to_write_VFS.write("__BRYTHON__.=libs['unittest']=%s;\n\n" % json.dumps(_VFS))
file_to_write_VFS.write("""
__BRYTHON__.import_from_unittest function(mod_name){
var stored = __BRYTHON__.libs['unittest'][mod_name]
if(stored!==undefined){
var module_contents = stored[0]
var is_package = stored[1]
var path = 'py_unittest'
var module = {name:mod_name,__class__:$B.$ModuleDict,is_package:is_package}
if(is_package){var package=mod_name}
else{
var elts = mod_name.split('.')
elts.pop()
var package = elts.join('.')
}
$B.modules[mod_name].$package = is_package
$B.modules[mod_name].__package__ = package
run_py(module,path,module_contents)
return true
}
return null
}
// add this import function to brython by doing the following:
// <body onload="brython({custom_import_funcs:[__BRYTHON__.import_from_unittest]})">
// this will allow us to import unittest modules.
""")
def process(filename, exclude_dirs=['unittest',]):
"""Process a VFS filename for Brython."""
print("Generating {}".format(filename))
nb = 0
nb_err = 0
_main_root = os.path.dirname(filename)
_VFS = {}
for _mydir in ("libs", "Lib"):
for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):
#if _root.endswith('lib_migration'):
_flag=False
for _exclude in exclude_dirs:
if _exclude in _root: #_root.endswith(_exclude):
_flag=True
continue
if _flag:
continue # skip these modules
if '__pycache__' in _root:
continue
nb += 1
for _file in _files:
_ext = os.path.splitext(_file)[1]
if _ext not in ('.js', '.py'):
continue
nb += 1
with open(os.path.join(_root, _file), "r") as file_with_data:
_data = file_with_data.read()
if len(_data) == 0:
print('no data for %s' % _file)
_data = unicode('')
print(_data, type(_data))
else:
_data = _data.decode('utf-8')
if _ext in '.js':
if js_minify is not None:
try:
_data = js_minify(_data)
except Exception as error:
print(error)
elif _ext == '.py' and len(_data) > 0:
try:
_data = pyminifier.remove_comments_and_docstrings(_data)
_data = pyminifier.dedent(_data)
except Exception as error:
print(error)
nb_err += 1
_vfs_filename = os.path.join(_root, _file).replace(_main_root, '')
_vfs_filename = _vfs_filename.replace("\\", "/")
if _vfs_filename.startswith('/libs/crypto_js/rollups/'):
if _file not in ('md5.js', 'sha1.js', 'sha3.js',
'sha224.js', 'sha384.js', 'sha512.js'):
continue
mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')
mod_name, ext = os.path.splitext(mod_name)
is_package = mod_name.endswith('__init__')
if is_package:
mod_name = mod_name[:-9]
_VFS[mod_name] = [ext, _data, 1]
else:
_VFS[mod_name] = [ext, _data]
print(("adding %s %s" % (mod_name, _vfs_filename)))
print('%s files, %s errors' % (nb, nb_err))
with open(filename, "w") as file_to_write_VFS:
file_to_write_VFS.write('__BRYTHON__.use_VFS = true;\n')
file_to_write_VFS.write('__BRYTHON__.VFS=%s;\n\n' % json.dumps(_VFS))
###############################################################################
if __name__ == '__main__':
_main_root = os.path.join(os.getcwd(), '../src')
process(os.path.join(_main_root, "py_VFS.js"))
| 36.177083
| 91
| 0.512093
| 781
| 6,946
| 4.185659
| 0.201024
| 0.053533
| 0.024472
| 0.029979
| 0.52126
| 0.475375
| 0.449373
| 0.410523
| 0.410523
| 0.389722
| 0
| 0.008659
| 0.351569
| 6,946
| 191
| 92
| 36.366492
| 0.71714
| 0.04607
| 0
| 0.435065
| 0
| 0
| 0.199566
| 0.063576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012987
| false
| 0
| 0.077922
| 0
| 0.103896
| 0.084416
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db4a6abf2a3e16936115e864f7caf11878e6ba2c
| 9,659
|
py
|
Python
|
main.py
|
rcox771/spectrum_scanner
|
71559d62ca9dc9f66d66b7ada4491de42c6cdd52
|
[
"MIT"
] | null | null | null |
main.py
|
rcox771/spectrum_scanner
|
71559d62ca9dc9f66d66b7ada4491de42c6cdd52
|
[
"MIT"
] | null | null | null |
main.py
|
rcox771/spectrum_scanner
|
71559d62ca9dc9f66d66b7ada4491de42c6cdd52
|
[
"MIT"
] | null | null | null |
from rtlsdr import RtlSdr
from contextlib import closing
from matplotlib import pyplot as plt
import numpy as np
from scipy.signal import spectrogram, windows
from scipy import signal
from skimage.io import imsave, imread
from datetime import datetime
import json
import os
from tqdm import tqdm
import time
from queue import Queue
import asyncio
from pathlib import Path
import warnings
for cat in [RuntimeWarning, UserWarning, FutureWarning]:
warnings.filterwarnings("ignore", category=cat)
def split_images(dir="sdr_captures/specs_raw"):
jpgs = list(Path(dir).rglob('*.jpg'))
pngs = list(Path(dir).rglob('*.png'))
img_files = pngs + jpgs
img_files = list(filter(lambda x: 'chk' not in str(x), img_files))
for img_file in tqdm(img_files, desc="splitting images"):
im = imread(img_file)
shp = list(im.shape)
shp = list(filter(lambda x: x != 1, shp))
shp = np.array(shp)
dim_to_slide_over = shp.argmax()
win_size = shp[shp.argmin()]
im_size = shp[dim_to_slide_over]
for start in range(0, im_size, win_size):
stop = start + win_size
if stop >= im_size:
break
if dim_to_slide_over == 0:
chunk = im[start:stop, :]
elif dim_to_slide_over == 1:
chunk = im[:, start:stop]
file_out = str(
Path(img_file).with_suffix(f".chk_{start}_{stop}.png"))
imsave(file_out, chunk)
# y -- spectrogram, nf by nt array
# dbf -- Dynamic range of the spectrum
def adjust_dyn_range(x, mx=3, mn=10, rel_to=np.median):
r = rel_to(x)
zmax = r+mx
zmin = r-mn
x[x<zmin] = zmin
x[x>zmax] = zmax
return x
def to_spec(y, fs, fc, NFFT=1024, dbf=60, nperseg=128, normalize=True):
#w = windows.hamming(nperseg)
#window = signal.kaiser(nperseg, beta=14)
f, t, y = spectrogram(y, detrend=None, noverlap=int(nperseg/2), nfft=NFFT, fs=fs)
y = np.fft.fftshift(y, axes=0)
if normalize:
#y = norm_spectrum(y)
y = np.sqrt(np.power(y.real, 2) + np.power(y.imag, 2))
y = 20 * np.log10(np.abs(y)/ np.abs(y).max())
y = np.abs(y)
y = y / y.max()
return y
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def spectrogram(x, fs, fc, m=None, dbf=60):
if not m:
m = 1024
isreal_bool = np.isreal(x).all()
lx = len(x);
nt = (lx + m - 1) // m
x = np.append(x,np.zeros(-lx+nt*m))
x = x.reshape((int(m/2),nt*2), order='F')
x = np.concatenate((x,x),axis=0)
x = x.reshape((m*nt*2,1),order='F')
x = x[np.r_[m//2:len(x),np.ones(m//2)*(len(x)-1)].astype(int)].reshape((m,nt*2),order='F')
xmw = x * windows.hamming(m)[:,None]
t_range = [0.0, lx / fs]
if isreal_bool:
f_range = [ fc, fs / 2.0 + fc]
xmf = np.fft.fft(xmw,len(xmw),axis=0)
xmf = xmf[0:m/2,:]
else:
f_range = [-fs / 2.0 + fc, fs / 2.0 + fc]
xmf = np.fft.fftshift( np.fft.fft( xmw ,len(xmw),axis=0), axes=0 )
f_range = np.linspace(*f_range, xmf.shape[0])
t_range = np.linspace(*t_range, xmf.shape[1])
h = xmf.shape[0]
each = int(h*.10)
xmf = xmf[each:-each, :]
xmf = np.sqrt(np.power(xmf.real, 2) + np.power(xmf.imag, 2))
xmf = np.abs(xmf)
xmf /= xmf.max()
#throw away sides
xmf = 20 * np.log10(xmf)
xmf = np.clip(xmf, -dbf, 0)
xmf = MinMaxScaler().fit_transform(StandardScaler(with_mean=True, with_std=True).fit_transform(xmf))
xmf = np.abs(xmf)
#xmf-=np.median(xmf)
xmf/=xmf.max()
print(xmf.min(), xmf.max())
return f_range, t_range, xmf
def append_json(data, path):
with open(path, 'a') as f:
f.write(json.dumps(data) + '\n')
async def stream(sdr, N):
samples_buffer = Queue()
total = 0
with tqdm(total=N, desc='sampling') as pbar:
#for i in range(10):
# time.sleep(0.1)
async for samples in sdr.stream():
# do something with samples
# ...
samples_buffer.put(samples)
#print(f'put {len(samples)} into buffer')
total += len(samples)
pbar.update(len(samples))
if total >= N:
break
# to stop streaming:
await sdr.stop()
# done
sdr.close()
return samples_buffer
def capture(fc=94.3e6,
fs=int(1e6),
gain='auto',
seconds_dwell=.4
#offset_dc=5e4
):
N = int(seconds_dwell * fs)
with closing(RtlSdr()) as sdr:
sdr.sample_rate = fs
sdr.center_freq = fc# + int(offset_dc)
sdr.gain = gain
t = datetime.now()
stamp = datetime.timestamp(t)
loop = asyncio.get_event_loop()
samples_buffer = loop.run_until_complete(stream(sdr, N))
iq_samples = np.hstack(np.array(list(samples_buffer.queue)))[:N].astype("complex64")
#iq_samples = shift_mix(iq_samples, -offset_dc, fs)
#path = os.path.join(out_dir, f'{stamp}.png')
meta = dict(
fs=fs,
fc=fc,
gain=gain,
seconds_dwell=seconds_dwell,
dt_start=stamp
)
return iq_samples, meta
def shift_mix(x, hz, fs):
return x*np.exp(1j*2*np.pi*hz/fs*np.arange(len(x)))
def save_capture(path, spec_img, meta, meta_path):
imsave(path, spec_img.T)
append_json(meta, meta_path)
def scan(
low=80e6,
high=1000e6,
repeats=10,
target_hpb=300,
):
out_dir="sdr_captures/specs_raw"
meta_path="sdr_captures/dataset.json"
os.makedirs(out_dir, exist_ok=True)
for repeat in tqdm(range(repeats), desc='repeats'):
for fs in [int(3.2e6)]:#list(map(int, (3.2e6, 2e6, 1e6))):
#for NFFT in [1024, 2048, 2048 * 2]:
fcs = []
fc = low
while fc < high:
fc += int((fs * (1/3.)))
fcs.append(fc)
fcs = np.array(fcs)
print(f'scanning {len(fcs)} total frequencies...')
for fc in tqdm(fcs, desc='fcs'):
try:
iq, meta = capture(fc=fc, fs=fs)
meta['NFFT'] = closest_power_of_two(fs / target_hpb)
meta['hpb'] = fs/meta['NFFT']
ff, tt, spec_img = spectrogram(iq, fs, fc, m=meta['NFFT'])
img_path = os.path.join(out_dir, f"{meta['dt_start']}.png")
save_capture(img_path, spec_img, meta, meta_path)
except Exception as e:
print(e)
time.sleep(1)
pass
def get_optimal_fs(max_fs=3e6):
fss = np.array([np.power(2,i) for i in range(30)])
fss = fss[fss<=max_fs][-1]
return fss
def optimal_scan(
min_freq=80e6,
max_freq=107e6,
fs=3e6,
hpb_target=4096
):
fs2 = get_optimal_fs(fs)
if fs2!=fs:
print(f'optimal fs found: {fs2}, original: {fs}')
fs = fs2
del fs2
n_bins = closest_power_of_two(fs / hpb_target)
print(f'given hz per bin target: {hpb_target} -> nfft bins per sweep: {n_bins}')
assert fs == hpb_target * n_bins
print(f'{fs} = {hpb_target} * {n_bins}')
diff_bw = max_freq-min_freq
sweeps = np.ceil(diff_bw/fs) + 1
sweep_bw = sweeps * fs
delta_bw = sweep_bw - diff_bw
adjusted_min_freq = min_freq - int(delta_bw//2)
adjusted_max_freq = max_freq + int(delta_bw//2)
assert (adjusted_max_freq-adjusted_min_freq) == sweep_bw
print(f'optimal min/max frequecies: {adjusted_min_freq}/{adjusted_max_freq}')
min_freq = adjusted_min_freq
max_freq = adjusted_max_freq
freq_bins = np.arange(n_bins*sweeps)
fz = np.arange(min_freq, max_freq, hpb_target).astype(int)
return freq_bins, fz
def closest_power_of_two(number):
# Returns next power of two following 'number'
n = np.ceil(np.log2(number))
a = np.array([np.power(2, n - 1), np.power(2, n), np.power(2, n + 1)])
return int(a[np.argmin(np.abs(a - number))])
def norm_spectrum(spec_img):
spec_img = 20 * np.log10(np.abs(spec_img) / np.max(np.abs(spec_img)))
mid = np.median(spec_img)
# high = mid + 30
# low = mid - 30
# spec_img[spec_img < low] = low
# spec_img[spec_img > high] = high
spec_img = np.abs(spec_img)
spec_img /= spec_img.max()
print('spec max:', spec_img.max(), 'spec min:', spec_img.min())
return spec_img
def parse_measure(s):
s = s.lower()
if s[-1].isalpha():
h, mod = float(s[:-1]), s[-1]
if mod == 'm':
h*=1e6
elif mod == 'k':
h*=1e3
else:
h = int(s)
return h
def string_to_linspace(s, delim=':'):
return np.arange(*list(map(parse_measure, s.split(delim))))
#string_to_linspace('24M:28M:3M')
def plot_one(fc=94.3 * 1e6, fs=3e6, target_hpb=300, seconds_dwell=.2):
NFFT = closest_power_of_two(fs / target_hpb)
iq_samples, meta = capture(fc=fc, fs=fs, seconds_dwell=seconds_dwell)
spec_img = to_spec(iq_samples, fs, fc, NFFT=NFFT)
#spec_img = norm_spectrum(spec_img)
#spec_img = np.abs(spec_img)
#spec_img /= spec_img.max()
#spec_img = 1 - spec_img
print('img shape:', spec_img.shape)
fig, ax = plt.subplots(1, 1, figsize=(14, 4))
ax.matshow(spec_img.T[:NFFT], cmap=plt.get_cmap('viridis'))
print(spec_img.T.shape)
#Wplt.plot(spec_img.T[0, :])
plt.show()
if __name__ == "__main__":
#split_images()
#plot_one()
scan(repeats=3, target_hpb=1500)
split_images()
#plot_one()
| 27.997101
| 104
| 0.576871
| 1,471
| 9,659
| 3.630184
| 0.222978
| 0.043258
| 0.016479
| 0.020974
| 0.118914
| 0.070787
| 0.046629
| 0.038764
| 0.013483
| 0.013483
| 0
| 0.02869
| 0.27829
| 9,659
| 345
| 105
| 27.997101
| 0.73734
| 0.089036
| 0
| 0.04661
| 0
| 0
| 0.056246
| 0.017456
| 0
| 0
| 0
| 0
| 0.008475
| 1
| 0.067797
| false
| 0.004237
| 0.072034
| 0.008475
| 0.190678
| 0.042373
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db4a91969cbd0645d892f196740aa9b468c864c7
| 7,333
|
py
|
Python
|
examples/mnist1.py
|
braingineer/pyromancy
|
7a7ab1a6835fd63b9153463dd08bb53630f15c62
|
[
"MIT"
] | null | null | null |
examples/mnist1.py
|
braingineer/pyromancy
|
7a7ab1a6835fd63b9153463dd08bb53630f15c62
|
[
"MIT"
] | 1
|
2021-03-25T22:13:53.000Z
|
2021-03-25T22:13:53.000Z
|
examples/mnist1.py
|
braingineer/pyromancy
|
7a7ab1a6835fd63b9153463dd08bb53630f15c62
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from tqdm import tqdm
from pyromancy import pyromq
from pyromancy.losses import LossGroup, NegativeLogLikelihood
from pyromancy.metrics import MetricGroup, Accuracy
from pyromancy.subscribers import LogSubscriber
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--weight-decay', default=1e-4, type=float)
parser.add_argument('--grad-clip-norm', default=10.0, type=float)
parser.add_argument('--disable-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Name the experiment
parser.add_argument('--experiment-name', required=True)
parser.add_argument("--experimentdb", default=None)
parser.add_argument('--log-to-console', default=False, action='store_true')
args = parser.parse_args()
if args.experimentdb is None:
args.experimentdb = args.experiment_name + '.db'
return args
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
# noinspection PyCallingNonCallable,PyCallingNonCallable
def run_once(args, train_loader, test_loader):
broker = pyromq.Broker()
model = Net()
if args.cuda:
model.cuda()
training_events = pyromq.TrainingEventPublisher(broker=broker)
broker.add_subscriber(LogSubscriber(experiment_uid=args.experiment_name,
log_file=os.path.join('logs', args.experiment_name),
to_console=args.log_to_console))
opt = torch.optim.SGD(params=model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
momentum=args.momentum)
losses = LossGroup(optimizer=opt,
grad_clip_norm=args.grad_clip_norm,
name='losses',
channel_name=pyromq.channels.METRIC_EVENTS,
broker=broker)
losses.add(NegativeLogLikelihood(name='nll',
target_name='y_target',
output_name='y_pred'),
data_target='train')
# Metrics
metrics = MetricGroup(name='metrics',
channel_name=pyromq.channels.METRIC_EVENTS,
broker=broker)
metrics.add(Accuracy(name='acc',
target_name='y_target',
output_name='y_pred'),
data_target='*')
metrics.add(NegativeLogLikelihood(name='nll',
target_name='y_target',
output_name='y_pred'),
data_target='val')
training_events.training_start()
for _ in tqdm(range(args.epochs), total=args.epochs):
training_events.epoch_start()
model.train(True)
for data, target in train_loader:
# From the original example
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
# put the incoming batch data into a dictionary
batch_dict = {'x_data': data, 'y_target': target}
# Training Event
training_events.batch_start()
# Get model outputs
predictions = {'y_pred': model(batch_dict['x_data'])}
# Compute Metrics
metrics.compute(in_dict=batch_dict, out_dict=predictions,
data_type='train')
# Compute Losses
losses.compute(in_dict=batch_dict, out_dict=predictions,
data_type='train')
losses.step()
# Training Event
training_events.batch_end()
model.train(False)
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
batch_dict = {'x_data': data, 'y_target': target}
# Training Event
training_events.batch_start()
predictions = {'y_pred': model(batch_dict['x_data'])}
metrics.compute(in_dict=batch_dict,
out_dict=predictions,
data_type='val')
training_events.batch_end()
training_events.epoch_end()
def main():
args = parse_args()
args.cuda = not args.disable_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
dataload_kwargs = {}
if args.cuda:
dataload_kwargs = {'num_workers': 1, 'pin_memory': True}
train_dataset = datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# noinspection PyUnresolvedReferences
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True, **dataload_kwargs)
test_dataset = datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# noinspection PyUnresolvedReferences
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=args.batch_size,
shuffle=True, **dataload_kwargs)
run_once(args, train_loader, test_loader)
if __name__ == "__main__":
main()
| 33.949074
| 92
| 0.571799
| 796
| 7,333
| 5.084171
| 0.252513
| 0.024463
| 0.046207
| 0.013837
| 0.357796
| 0.297999
| 0.297999
| 0.2723
| 0.230788
| 0.230788
| 0
| 0.017034
| 0.319515
| 7,333
| 215
| 93
| 34.106977
| 0.793988
| 0.046093
| 0
| 0.260563
| 0
| 0
| 0.078797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035211
| false
| 0
| 0.091549
| 0
| 0.147887
| 0.007042
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db4b58f91ffeef6d5055943e105969fe3018f79e
| 24,453
|
py
|
Python
|
hunter/main.py
|
datastax-labs/hunter
|
3631cc3fa529991297a8b631bbae15b138cce307
|
[
"Apache-2.0"
] | 17
|
2021-09-03T07:32:40.000Z
|
2022-03-24T21:56:22.000Z
|
hunter/main.py
|
datastax-labs/hunter
|
3631cc3fa529991297a8b631bbae15b138cce307
|
[
"Apache-2.0"
] | 1
|
2021-12-02T14:05:07.000Z
|
2021-12-02T14:05:07.000Z
|
hunter/main.py
|
datastax-labs/hunter
|
3631cc3fa529991297a8b631bbae15b138cce307
|
[
"Apache-2.0"
] | 2
|
2022-01-18T18:40:41.000Z
|
2022-03-11T15:33:25.000Z
|
import argparse
import copy
import logging
import sys
from dataclasses import dataclass
from datetime import datetime, timedelta
from slack_sdk import WebClient
from typing import Dict, Optional, List
import pytz
from hunter import config
from hunter.attributes import get_back_links
from hunter.config import ConfigError, Config
from hunter.data_selector import DataSelector
from hunter.grafana import GrafanaError, Grafana, Annotation
from hunter.graphite import GraphiteError
from hunter.importer import DataImportError, Importers
from hunter.report import Report
from hunter.series import (
AnalysisOptions,
ChangePointGroup,
SeriesComparison,
compare,
AnalyzedSeries,
)
from hunter.slack import SlackNotifier, NotificationError
from hunter.test_config import TestConfigError, TestConfig, GraphiteTestConfig
from hunter.util import parse_datetime, DateFormatError, interpolate
@dataclass
class HunterError(Exception):
message: str
class Hunter:
__conf: Config
__importers: Importers
__grafana: Optional[Grafana]
__slack: Optional[SlackNotifier]
def __init__(self, conf: Config):
self.__conf = conf
self.__importers = Importers(conf)
self.__grafana = None
self.__slack = self.__maybe_create_slack_notifier()
def list_tests(self, group_names: Optional[List[str]]):
if group_names is not None:
test_names = []
for group_name in group_names:
group = self.__conf.test_groups.get(group_name)
if group is None:
raise HunterError(f"Test group not found: {group_name}")
test_names += (t.name for t in group)
else:
test_names = self.__conf.tests
for test_name in sorted(test_names):
print(test_name)
def list_test_groups(self):
for group_name in sorted(self.__conf.test_groups):
print(group_name)
def get_test(self, test_name: str) -> TestConfig:
test = self.__conf.tests.get(test_name)
if test is None:
raise HunterError(f"Test not found {test_name}")
return test
def get_tests(self, *names: str) -> List[TestConfig]:
tests = []
for name in names:
group = self.__conf.test_groups.get(name)
if group is not None:
tests += group
else:
test = self.__conf.tests.get(name)
if test is not None:
tests.append(test)
else:
raise HunterError(f"Test or group not found: {name}")
return tests
def list_metrics(self, test: TestConfig):
importer = self.__importers.get(test)
for metric_name in importer.fetch_all_metric_names(test):
print(metric_name)
def analyze(
self, test: TestConfig, selector: DataSelector, options: AnalysisOptions
) -> AnalyzedSeries:
importer = self.__importers.get(test)
series = importer.fetch_data(test, selector)
analyzed_series = series.analyze(options)
change_points = analyzed_series.change_points_by_time
report = Report(series, change_points)
print(test.name + ":")
print(report.format_log_annotated())
return analyzed_series
def __get_grafana(self) -> Grafana:
if self.__grafana is None:
self.__grafana = Grafana(self.__conf.grafana)
return self.__grafana
def update_grafana_annotations(self, test: GraphiteTestConfig, series: AnalyzedSeries):
grafana = self.__get_grafana()
begin = datetime.fromtimestamp(series.time()[0], tz=pytz.UTC)
end = datetime.fromtimestamp(series.time()[len(series.time()) - 1], tz=pytz.UTC)
logging.info(f"Fetching Grafana annotations for test {test.name}...")
tags_to_query = ["hunter", "change-point", "test:" + test.name]
old_annotations_for_test = grafana.fetch_annotations(begin, end, list(tags_to_query))
logging.info(f"Found {len(old_annotations_for_test)} annotations")
created_count = 0
for metric_name, change_points in series.change_points.items():
path = test.get_path(series.branch_name(), metric_name)
metric_tag = f"metric:{metric_name}"
tags_to_create = (
tags_to_query
+ [metric_tag]
+ test.tags
+ test.annotate
+ test.metrics[metric_name].annotate
)
substitutions = {
"TEST_NAME": test.name,
"METRIC_NAME": metric_name,
"GRAPHITE_PATH": [path],
"GRAPHITE_PATH_COMPONENTS": path.split("."),
"GRAPHITE_PREFIX": [test.prefix],
"GRAPHITE_PREFIX_COMPONENTS": test.prefix.split("."),
}
tmp_tags_to_create = []
for t in tags_to_create:
tmp_tags_to_create += interpolate(t, substitutions)
tags_to_create = tmp_tags_to_create
old_annotations = [a for a in old_annotations_for_test if metric_tag in a.tags]
old_annotation_times = set((a.time for a in old_annotations if a.tags))
target_annotations = []
for cp in change_points:
attributes = series.attributes_at(cp.index)
annotation_text = get_back_links(attributes)
target_annotations.append(
Annotation(
id=None,
time=datetime.fromtimestamp(cp.time, tz=pytz.UTC),
text=annotation_text,
tags=tags_to_create,
)
)
target_annotation_times = set((a.time for a in target_annotations))
to_delete = [a for a in old_annotations if a.time not in target_annotation_times]
if to_delete:
logging.info(
f"Removing {len(to_delete)} annotations "
f"for test {test.name} and metric {metric_name}..."
)
grafana.delete_annotations(*(a.id for a in to_delete))
to_create = [a for a in target_annotations if a.time not in old_annotation_times]
if to_create:
logging.info(
f"Creating {len(to_create)} annotations "
f"for test {test.name} and metric {metric_name}..."
)
grafana.create_annotations(*to_create)
created_count += len(to_create)
if created_count == 0:
logging.info("All annotations up-to-date. No new annotations needed.")
else:
logging.info(f"Created {created_count} annotations.")
def remove_grafana_annotations(self, test: Optional[TestConfig], force: bool):
"""Removes all Hunter annotations (optionally for a given test) in Grafana"""
grafana = self.__get_grafana()
if test:
logging.info(f"Fetching Grafana annotations for test {test.name}...")
else:
logging.info(f"Fetching Grafana annotations...")
tags_to_query = {"hunter", "change-point"}
if test:
tags_to_query.add("test:" + test.name)
annotations = grafana.fetch_annotations(None, None, list(tags_to_query))
if not annotations:
logging.info("No annotations found.")
return
if not force:
print(
f"Are you sure to remove {len(annotations)} annotations from {grafana.url}? [y/N]"
)
decision = input().strip()
if decision.lower() != "y" and decision.lower() != "yes":
return
logging.info(f"Removing {len(annotations)} annotations...")
grafana.delete_annotations(*(a.id for a in annotations))
def regressions(
self, test: TestConfig, selector: DataSelector, options: AnalysisOptions
) -> bool:
importer = self.__importers.get(test)
# Even if user is interested only in performance difference since some point X,
# we really need to fetch some earlier points than X.
# Otherwise, if performance went down very early after X, e.g. at X + 1, we'd have
# insufficient number of data points to compute the baseline performance.
# Instead of using `since-` selector, we're fetching everything from the
# beginning and then we find the baseline performance around the time pointed by
# the original selector.
since_version = selector.since_version
since_commit = selector.since_commit
since_time = selector.since_time
baseline_selector = copy.deepcopy(selector)
baseline_selector.last_n_points = sys.maxsize
baseline_selector.branch = None
baseline_selector.since_version = None
baseline_selector.since_commit = None
baseline_selector.since_time = since_time - timedelta(days=30)
baseline_series = importer.fetch_data(test, baseline_selector)
if since_version:
baseline_index = baseline_series.find_by_attribute("version", since_version)
if not baseline_index:
raise HunterError(f"No runs of test {test.name} with version {since_version}")
baseline_index = max(baseline_index)
elif since_commit:
baseline_index = baseline_series.find_by_attribute("commit", since_commit)
if not baseline_index:
raise HunterError(f"No runs of test {test.name} with commit {since_commit}")
baseline_index = max(baseline_index)
else:
baseline_index = baseline_series.find_first_not_earlier_than(since_time)
baseline_series = baseline_series.analyze()
if selector.branch:
target_series = importer.fetch_data(test, selector).analyze()
else:
target_series = baseline_series
cmp = compare(baseline_series, baseline_index, target_series, target_series.len())
regressions = []
for metric_name, stats in cmp.stats.items():
direction = baseline_series.metric(metric_name).direction
m1 = stats.mean_1
m2 = stats.mean_2
change_percent = stats.forward_rel_change() * 100.0
if m2 * direction < m1 * direction and stats.pvalue < options.max_pvalue:
regressions.append(
" {:16}: {:#8.3g} --> {:#8.3g} ({:+6.1f}%)".format(
metric_name, m1, m2, change_percent
)
)
if regressions:
print(f"{test.name}:")
for r in regressions:
print(r)
else:
print(f"{test.name}: OK")
return len(regressions) > 0
def __maybe_create_slack_notifier(self):
if not self.__conf.slack:
return None
return SlackNotifier(WebClient(token=self.__conf.slack.bot_token))
def notify_slack(
self,
test_change_points: Dict[str, AnalyzedSeries],
selector: DataSelector,
channels: List[str],
since: datetime,
):
if not self.__slack:
logging.error(
"Slack definition is missing from the configuration, cannot send notification"
)
return
self.__slack.notify(test_change_points, selector=selector, channels=channels, since=since)
def validate(self):
valid = True
unique_metrics = set()
for name, test in self.__conf.tests.items():
logging.info("Checking {}".format(name))
test_metrics = test.fully_qualified_metric_names()
for test_metric in test_metrics:
if test_metric not in unique_metrics:
unique_metrics.add(test_metric)
else:
valid = False
logging.error(f"Found duplicated metric: {test_metric}")
try:
importer = self.__importers.get(test)
series = importer.fetch_data(test)
for metric, metric_data in series.data.items():
if not metric_data:
logging.warning(f"Test's metric does not have data: {name} {metric}")
except Exception as err:
logging.error(f"Invalid test definition: {name}\n{repr(err)}\n")
valid = False
logging.info(f"Validation finished: {'VALID' if valid else 'INVALID'}")
if not valid:
exit(1)
def setup_data_selector_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"--branch", metavar="STRING", dest="branch", help="name of the branch", nargs="?"
)
parser.add_argument(
"--metrics",
metavar="LIST",
dest="metrics",
help="a comma-separated list of metrics to analyze",
)
parser.add_argument(
"--attrs",
metavar="LIST",
dest="attributes",
help="a comma-separated list of attribute names associated with the runs "
"(e.g. commit, branch, version); "
"if not specified, it will be automatically filled based on available information",
)
since_group = parser.add_mutually_exclusive_group()
since_group.add_argument(
"--since-commit",
metavar="STRING",
dest="since_commit",
help="the commit at the start of the time span to analyze",
)
since_group.add_argument(
"--since-version",
metavar="STRING",
dest="since_version",
help="the version at the start of the time span to analyze",
)
since_group.add_argument(
"--since",
metavar="DATE",
dest="since_time",
help="the start of the time span to analyze; "
"accepts ISO, and human-readable dates like '10 weeks ago'",
)
until_group = parser.add_mutually_exclusive_group()
until_group.add_argument(
"--until-commit",
metavar="STRING",
dest="until_commit",
help="the commit at the end of the time span to analyze",
)
until_group.add_argument(
"--until-version",
metavar="STRING",
dest="until_version",
help="the version at the end of the time span to analyze",
)
until_group.add_argument(
"--until",
metavar="DATE",
dest="until_time",
help="the end of the time span to analyze; same syntax as --since",
)
parser.add_argument(
"--last",
type=int,
metavar="COUNT",
dest="last_n_points",
help="the number of data points to take from the end of the series"
)
def data_selector_from_args(args: argparse.Namespace) -> DataSelector:
data_selector = DataSelector()
if args.branch:
data_selector.branch = args.branch
if args.metrics is not None:
data_selector.metrics = list(args.metrics.split(","))
if args.attributes is not None:
data_selector.attributes = list(args.attributes.split(","))
if args.since_commit is not None:
data_selector.since_commit = args.since_commit
if args.since_version is not None:
data_selector.since_version = args.since_version
if args.since_time is not None:
data_selector.since_time = parse_datetime(args.since_time)
if args.until_commit is not None:
data_selector.until_commit = args.until_commit
if args.until_version is not None:
data_selector.until_version = args.until_version
if args.until_time is not None:
data_selector.until_time = parse_datetime(args.until_time)
if args.last_n_points is not None:
data_selector.last_n_points = args.last_n_points
return data_selector
def setup_analysis_options_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"-P, --p-value",
dest="pvalue",
type=float,
default=0.001,
help="maximum accepted P-value of a change-point; "
"P denotes the probability that the change-point has "
"been found by a random coincidence, rather than a real "
"difference between the data distributions",
)
parser.add_argument(
"-M",
"--magnitude",
dest="magnitude",
type=float,
default=0.0,
help="minimum accepted magnitude of a change-point "
"computed as abs(new_mean / old_mean - 1.0); use it "
"to filter out stupidly small changes like < 0.01",
)
parser.add_argument(
"--window",
default=50,
type=int,
dest="window",
help="the number of data points analyzed at once; "
"the window size affects the discriminative "
"power of the change point detection algorithm; "
"large windows are less susceptible to noise; "
"however, a very large window may cause dismissing short regressions "
"as noise so it is best to keep it short enough to include not more "
"than a few change points (optimally at most 1)",
)
def analysis_options_from_args(args: argparse.Namespace) -> AnalysisOptions:
conf = AnalysisOptions()
if args.pvalue is not None:
conf.max_pvalue = args.pvalue
if args.magnitude is not None:
conf.min_magnitude = args.magnitude
if args.window is not None:
conf.window_len = args.window
return conf
def main():
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
parser = argparse.ArgumentParser(description="Hunts performance regressions in Fallout results")
subparsers = parser.add_subparsers(dest="command")
list_tests_parser = subparsers.add_parser("list-tests", help="list available tests")
list_tests_parser.add_argument("group", help="name of the group of the tests", nargs="*")
list_metrics_parser = subparsers.add_parser(
"list-metrics", help="list available metrics for a test"
)
list_metrics_parser.add_argument("test", help="name of the test")
subparsers.add_parser("list-groups", help="list available groups of tests")
analyze_parser = subparsers.add_parser(
"analyze",
help="analyze performance test results",
formatter_class=argparse.RawTextHelpFormatter,
)
analyze_parser.add_argument("tests", help="name of the test or group of the tests", nargs="+")
analyze_parser.add_argument(
"--update-grafana",
help="Update Grafana dashboards with appropriate annotations of change points",
action="store_true",
)
analyze_parser.add_argument(
"--notify-slack",
help="Send notification containing a summary of change points to given Slack channels",
nargs="+",
)
analyze_parser.add_argument(
"--cph-report-since",
help="Sets a limit on the date range of the Change Point History reported to Slack. Same syntax as --since.",
metavar="DATE",
dest="cph_report_since",
)
setup_data_selector_parser(analyze_parser)
setup_analysis_options_parser(analyze_parser)
regressions_parser = subparsers.add_parser("regressions", help="find performance regressions")
regressions_parser.add_argument(
"tests", help="name of the test or group of the tests", nargs="+"
)
setup_data_selector_parser(regressions_parser)
setup_analysis_options_parser(regressions_parser)
remove_annotations_parser = subparsers.add_parser("remove-annotations")
remove_annotations_parser.add_argument(
"tests", help="name of the test or test group", nargs="*"
)
remove_annotations_parser.add_argument(
"--force", help="don't ask questions, just do it", dest="force", action="store_true"
)
validate_parser = subparsers.add_parser("validate",
help="validates the tests and metrics defined in the configuration")
try:
args = parser.parse_args()
conf = config.load_config()
hunter = Hunter(conf)
if args.command == "list-groups":
hunter.list_test_groups()
if args.command == "list-tests":
group_names = args.group if args.group else None
hunter.list_tests(group_names)
if args.command == "list-metrics":
test = hunter.get_test(args.test)
hunter.list_metrics(test)
if args.command == "analyze":
update_grafana_flag = args.update_grafana
slack_notification_channels = args.notify_slack
slack_cph_since = parse_datetime(args.cph_report_since)
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
tests_analyzed_series = {test.name: None for test in tests}
for test in tests:
try:
analyzed_series = hunter.analyze(test, selector=data_selector, options=options)
if update_grafana_flag:
if not isinstance(test, GraphiteTestConfig):
raise GrafanaError(f"Not a Graphite test")
hunter.update_grafana_annotations(test, analyzed_series)
if slack_notification_channels:
tests_analyzed_series[test.name] = analyzed_series
except DataImportError as err:
logging.error(err.message)
except GrafanaError as err:
logging.error(
f"Failed to update grafana dashboards for {test.name}: {err.message}"
)
if slack_notification_channels:
hunter.notify_slack(
tests_analyzed_series,
selector=data_selector,
channels=slack_notification_channels,
since=slack_cph_since,
)
if args.command == "regressions":
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
regressing_test_count = 0
errors = 0
for test in tests:
try:
regressions = hunter.regressions(
test, selector=data_selector, options=options
)
if regressions:
regressing_test_count += 1
except HunterError as err:
logging.error(err.message)
errors += 1
except DataImportError as err:
logging.error(err.message)
errors += 1
if regressing_test_count == 0:
print("No regressions found!")
elif regressing_test_count == 1:
print("Regressions in 1 test found")
else:
print(f"Regressions in {regressing_test_count} tests found")
if errors > 0:
print(f"Some tests were skipped due to import / analyze errors. Consult error log.")
if args.command == "remove-annotations":
if args.tests:
tests = hunter.get_tests(*args.tests)
for test in tests:
hunter.remove_grafana_annotations(test, args.force)
else:
hunter.remove_grafana_annotations(None, args.force)
if args.command == "validate":
hunter.validate()
if args.command is None:
parser.print_usage()
except ConfigError as err:
logging.error(err.message)
exit(1)
except TestConfigError as err:
logging.error(err.message)
exit(1)
except GraphiteError as err:
logging.error(err.message)
exit(1)
except GrafanaError as err:
logging.error(err.message)
exit(1)
except DataImportError as err:
logging.error(err.message)
exit(1)
except HunterError as err:
logging.error(err.message)
exit(1)
except DateFormatError as err:
logging.error(err.message)
exit(1)
except NotificationError as err:
logging.error(err.message)
exit(1)
if __name__ == "__main__":
main()
| 38.630332
| 117
| 0.613953
| 2,818
| 24,453
| 5.131299
| 0.147622
| 0.019917
| 0.018811
| 0.015284
| 0.268534
| 0.206432
| 0.153734
| 0.116252
| 0.097718
| 0.071024
| 0
| 0.003619
| 0.29935
| 24,453
| 632
| 118
| 38.691456
| 0.840367
| 0.021592
| 0
| 0.208259
| 0
| 0.003591
| 0.177937
| 0.005227
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034111
| false
| 0
| 0.064632
| 0
| 0.132855
| 0.025135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db4c9fb7ae81031adc5833740cfc20ab17a83afb
| 3,036
|
py
|
Python
|
docs/python/conf.py
|
jun-yoon/onnxruntime
|
806e24d5c69693533ed4b6fa56b84095efa5df70
|
[
"MIT"
] | 2
|
2019-01-29T03:48:42.000Z
|
2019-01-29T07:51:31.000Z
|
docs/python/conf.py
|
jun-yoon/onnxruntime
|
806e24d5c69693533ed4b6fa56b84095efa5df70
|
[
"MIT"
] | 2
|
2019-01-09T16:03:17.000Z
|
2019-02-13T13:58:28.000Z
|
docs/python/conf.py
|
jun-yoon/onnxruntime
|
806e24d5c69693533ed4b6fa56b84095efa5df70
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
import os
import sys
import shutil
# Check these extensions were installed.
import sphinx_gallery.gen_gallery
# The package should be installed in a virtual environment.
import onnxruntime
# The documentation requires two extensions available at:
# https://github.com/xadupre/sphinx-docfx-yaml
# https://github.com/xadupre/sphinx-docfx-markdown
import sphinx_modern_theme
# -- Project information -----------------------------------------------------
project = 'ONNX Runtime'
copyright = '2018, Microsoft'
author = 'Microsoft'
version = onnxruntime.__version__
release = version
# -- General configuration ---------------------------------------------------
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
"sphinx.ext.autodoc",
'sphinx.ext.githubpages',
"sphinx_gallery.gen_gallery",
'sphinx.ext.autodoc',
"docfx_yaml.extension",
"docfx_markdown",
"pyquickhelper.sphinxext.sphinx_runpython_extension",
]
templates_path = ['_templates']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
master_doc = 'intro'
language = "en"
exclude_patterns = []
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_modern_theme"
html_theme_path = [sphinx_modern_theme.get_html_theme_path()]
html_logo = "../MSFT-Onnx-Runtime-11282019-Logo.png"
html_static_path = ['_static']
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for Sphinx Gallery ----------------------------------------------
sphinx_gallery_conf = {
'examples_dirs': 'examples',
'gallery_dirs': 'auto_examples',
}
# -- markdown options -----------------------------------------------------------
md_image_dest = "media"
md_link_replace = {
'#onnxruntimesessionoptionsenable-profiling)': '#class-onnxruntimesessionoptions)',
}
# -- Setup actions -----------------------------------------------------------
def setup(app):
# Placeholder to initialize the folder before
# generating the documentation.
app.add_stylesheet('_static/gallery.css')
# download examples for the documentation
this = os.path.abspath(os.path.dirname(__file__))
dest = os.path.join(this, "model.onnx")
if not os.path.exists(dest):
import urllib.request
url = 'https://raw.githubusercontent.com/onnx/onnx/master/onnx/backend/test/data/node/test_sigmoid/model.onnx'
urllib.request.urlretrieve(url, dest)
loc = os.path.split(dest)[-1]
if not os.path.exists(loc):
import shutil
shutil.copy(dest, loc)
return app
| 29.192308
| 118
| 0.635705
| 320
| 3,036
| 5.86875
| 0.503125
| 0.033546
| 0.027157
| 0.024494
| 0.052183
| 0.034079
| 0
| 0
| 0
| 0
| 0
| 0.005366
| 0.140646
| 3,036
| 103
| 119
| 29.475728
| 0.71445
| 0.378129
| 0
| 0.033898
| 0
| 0.016949
| 0.373727
| 0.144772
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.135593
| 0
| 0.169492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db4cd73478117d82a5229f15076b8071351fd162
| 586
|
py
|
Python
|
traffic_sim/__main__.py
|
ngngardner/toc_project
|
15a111a2731b583f82e65c622d16d32af4fe3ae0
|
[
"MIT"
] | null | null | null |
traffic_sim/__main__.py
|
ngngardner/toc_project
|
15a111a2731b583f82e65c622d16d32af4fe3ae0
|
[
"MIT"
] | null | null | null |
traffic_sim/__main__.py
|
ngngardner/toc_project
|
15a111a2731b583f82e65c622d16d32af4fe3ae0
|
[
"MIT"
] | null | null | null |
"""Traffic simulator code."""
import sys
from os import path
from traffic_sim.analysis import TrafficExperiment
from traffic_sim.console import console
if not __package__:
_path = path.realpath(path.abspath(__file__))
sys.path.insert(0, path.dirname(path.dirname(_path)))
def main():
"""Run code from CLI."""
console.log('traffic sim')
num_trials = 30
ex = TrafficExperiment(
experiments=100,
trials=num_trials,
rows=10,
cols=10,
epochs=10,
)
ex.run()
ex.analyze()
if __name__ == '__main__':
main()
| 18.903226
| 57
| 0.643345
| 73
| 586
| 4.863014
| 0.520548
| 0.084507
| 0.078873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026846
| 0.237201
| 586
| 30
| 58
| 19.533333
| 0.767338
| 0.071672
| 0
| 0
| 0
| 0
| 0.035647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db4e4eef4ddc738259fac8554c6c1cde5bc457e8
| 1,873
|
py
|
Python
|
demo.py
|
williamfzc/pyat
|
4e9792d4bfdc119d910eb88cf8a13a0ab7848518
|
[
"MIT"
] | 20
|
2018-11-01T03:49:56.000Z
|
2020-07-23T12:19:20.000Z
|
demo.py
|
williamfzc/pyat
|
4e9792d4bfdc119d910eb88cf8a13a0ab7848518
|
[
"MIT"
] | 2
|
2018-12-28T05:40:47.000Z
|
2019-05-20T02:23:29.000Z
|
demo.py
|
williamfzc/pyat
|
4e9792d4bfdc119d910eb88cf8a13a0ab7848518
|
[
"MIT"
] | 14
|
2018-11-01T09:01:38.000Z
|
2021-06-09T07:40:45.000Z
|
from pyatool import PYAToolkit
# 个性化的函数需要toolkit形参,即使不需要使用
def test_b(toolkit):
return 'i am test_b, running on {}'.format(toolkit.device_id)
# 封装adb命令成为方法
PYAToolkit.bind_cmd(func_name='test_a', command='shell pm list package | grep google')
# 或者绑定个性化的函数
PYAToolkit.bind_func(real_func=test_b)
# 是否需要log
PYAToolkit.switch_logger(True)
# 初始化
d = PYAToolkit('123456F')
assert d.is_connected()
# 它也支持远程控制(还不够稳定,暂不推荐
# d = PYAToolkit('123456F', mode='remote')
# 已经绑定的方法直接调用即可
result = d.test_a()
# 可能的输出
# package:com.google.android.webview
# 个性化函数也一样
result = d.test_b()
# i am test_b, running on 123456F
# 也可以通过 `std` 或 `standard_func` 调用(会有代码自动补全,比较方便)
# 仅限标准库,自己拓展的库只支持直接调用
d.std.get_current_activity(toolkit=d)
# 获取所有已经注册的函数
all_functions = d.current_function()
print(all_functions)
# 下面列举所有标准函数的使用方法,有任何问题欢迎反馈或自己改
# 打印出机器id,仅供测试用
d.hello_world()
# 展示所有已安装的包
installed_package = d.show_package()
# 栈顶活动名
current_activity_name = d.get_current_activity()
# 安装指定apk(支持url与path),例子里的安装可能比较久因为是从github下的,可以自己改
d.install_from(url=r'https://github.com/williamfzc/simhand2/releases/download/v0.1.2/app-debug.apk')
# d.install_from(path=r'/Users/admin/some_path/some_apk.apk')
# 检测包是否已安装
target_package_name = 'com.github.williamfzc.simhand2'
is_installed = d.is_installed(package_name=target_package_name)
# 清理缓存
d.clean_cache(target_package_name)
if is_installed:
d.uninstall(target_package_name)
# 获取手机ip
local_address = d.get_ip_address()
print(local_address)
# 切换wifi状态
d.switch_wifi(False)
# 切换飞行模式
d.switch_airplane(True)
d.switch_airplane(False)
d.switch_wifi(True)
# 切换输入法
d.set_ime('com.sohu.inputmethod.sogouoem/.SogouIME')
# push and pull
d.push('./README.md', '/sdcard/')
d.pull('/sdcard/README.md', './haha.md')
# send keyevent
d.input_key_event(26)
d.input_key_event(26)
# swipe
d.swipe(500, 1200, 500, 200)
# click
d.click(200, 200)
| 20.811111
| 100
| 0.767218
| 278
| 1,873
| 4.964029
| 0.52518
| 0.018116
| 0.049275
| 0.011594
| 0.047826
| 0.024638
| 0
| 0
| 0
| 0
| 0
| 0.027267
| 0.099306
| 1,873
| 89
| 101
| 21.044944
| 0.790753
| 0.293647
| 0
| 0.057143
| 0
| 0.028571
| 0.205267
| 0.053447
| 0
| 0
| 0
| 0
| 0.028571
| 1
| 0.028571
| false
| 0
| 0.028571
| 0.028571
| 0.085714
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db4f84187c639afbc8e53e791899d9a207e520b3
| 1,791
|
py
|
Python
|
nnlab/nn/graph.py
|
nlab-mpg/nnlab
|
56aabb53fa7b86601b35c7b8c9e890d50e19d9af
|
[
"MIT"
] | null | null | null |
nnlab/nn/graph.py
|
nlab-mpg/nnlab
|
56aabb53fa7b86601b35c7b8c9e890d50e19d9af
|
[
"MIT"
] | null | null | null |
nnlab/nn/graph.py
|
nlab-mpg/nnlab
|
56aabb53fa7b86601b35c7b8c9e890d50e19d9af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from six.moves import xrange, zip
import tensorflow as tf
from .tensor import Tensor
class Graph(object):
"""The class for defining computational graph."""
def __init__(self, loss=None, modules=None, inputs=None, outputs=None, monitors=None):
self._loss = loss
self._modules = modules if modules is not None else []
self._inputs = inputs
self._outputs = outputs
self._monitors = monitors
self._check_arguments(loss, modules, inputs, outputs, monitors)
def _check_arguments(self, loss, modules, inputs, outputs, monitors):
"""Verify the arguments."""
if loss is not None and not isinstance(loss, Tensor):
raise Exception("loss must be a tensor")
if modules is not None and not isinstance(modules, list):
raise Exception("modules must be a list")
if inputs is not None and not self._check_type(inputs):
raise Exception("input must be a tensor/list/dict")
if outputs is not None and not self._check_type(outputs):
raise Exception("output must be a tensor/list/dict")
if monitors is not None and not isinstance(monitors, dict):
raise Exception("monitors must be a dict")
def _check_type(self, obj):
"""Check whether the type is either a tensor or list or dict"""
return isinstance(obj, Tensor) or isinstance(obj, list) or isinstance(obj, dict)
@property
def loss(self):
return self._loss
@property
def modules(self):
return self._modules
@property
def inputs(self):
return self._inputs
| 33.166667
| 90
| 0.641541
| 233
| 1,791
| 4.811159
| 0.270386
| 0.026762
| 0.048171
| 0.053524
| 0.239072
| 0.157895
| 0.09099
| 0.049955
| 0
| 0
| 0
| 0.000767
| 0.272473
| 1,791
| 53
| 91
| 33.792453
| 0.859555
| 0.092686
| 0
| 0.088235
| 0
| 0
| 0.081468
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.117647
| 0.088235
| 0.441176
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db5061768015e77516d7fdac7ebe34947ba071f8
| 18,798
|
py
|
Python
|
local-rotations.py
|
katiekruzan/masters-thesis
|
c9b89a0995957b5b50442b86ae8a38388f1fb720
|
[
"MIT"
] | null | null | null |
local-rotations.py
|
katiekruzan/masters-thesis
|
c9b89a0995957b5b50442b86ae8a38388f1fb720
|
[
"MIT"
] | null | null | null |
local-rotations.py
|
katiekruzan/masters-thesis
|
c9b89a0995957b5b50442b86ae8a38388f1fb720
|
[
"MIT"
] | null | null | null |
"""
Here we're going to code for the local rotations. We're doing an object oriented approach
Left and right are in reference to the origin
"""
__version__ = 1.0
__author__ = 'Katie Kruzan'
import string # just to get the alphabet easily iterable
import sys # This just helps us in our printing
from typing import Dict # This helps us in our documentation
# Getting the structure for the classes we're putting together
class Segment:
"""
These are going to represent the outer segments and the mysteries they hold.
The segments will be adjacent to 2 outer nodes
"""
def __init__(self, name: str):
"""
Initialize the segment, keeping a place for the right left outer vertices to which it is adjacent
:param name: How we will reference this segment. In this implementation, it is expected to be a negative integer
"""
self.leftOuter = None
self.rightOuter = None
self.name = name
def getName(self) -> str:
"""
Return the name we gave to this segment.
:return: name
"""
return self.name
def getLeftOuter(self):
"""
Return the outer node to the left of this segment with respect to the origin
:return: leftOuter
"""
return self.leftOuter
def getRightOuter(self):
"""
Return the outer node to the right of this segment with respect to the origin
:return: rightOuter
"""
return self.rightOuter
def setLeftOuter(self, left):
"""
Set the outer node to the left of this segment with respect to the origin
Also, set left's right segment to this segment.
:param left: A outer node object to be referenced as this segment's left outer node
:return: None
"""
self.leftOuter = left
if left.getRightSegment() is None:
left.setRightSegment(self)
def setRightOuter(self, right):
"""
Set the outer node to the right of this segment with respect to the origin
Also, set right's left segment to this segment.
:param right: A outer node object to be referenced as this segment's right outer node
:return: None
"""
self.rightOuter = right
if right.getLeftSegment() is None:
right.setLeftSegment(self)
def isValidObject(self) -> bool:
"""
Checks to see if this segment has been full initialized.
:return: valid returns true if it has both the left and right outer nodes set
"""
if (self.leftOuter is None) or (self.rightOuter is None):
return False
return True
def toString(self) -> str:
"""
Returns a formatted string of the left and right outer nodes this is associated with
:return: Description string
"""
return 'left Outer: ' + self.leftOuter.getName() + '\nright Outer: ' + self.rightOuter.getName()
class Outer:
"""
Class to represent the outer vertices that are adjacent to an inner vertex and 2 outer segments
"""
def __init__(self, name: str):
"""
Initialize the outer node
Keeping a place for the inner vertex and right and left outer segments to which it is adjacent.
:param name: How we will reference this outer node. In this implementation, it is expected to be a positive integer
"""
self.adjInner = None
self.leftSegment = None
self.rightSegment = None
self.name = name
def getName(self) -> str:
"""
Return the name we gave to this outer node.
:return: name
"""
return self.name
def getLeftSegment(self) -> Segment:
"""
Return the segment object to the left of this outer node with respect to the origin
:return: leftSegment
"""
return self.leftSegment
def getRightSegment(self) -> Segment:
"""
Return the segment object to the right of this outer node with respect to the origin
:return: rightSegment
"""
return self.rightSegment
def getAdjInner(self):
"""
Return the inner node object adjacent to this outer note object
:return: adjInner
"""
return self.adjInner
def setLeftSegment(self, left: Segment):
"""
Set the segment to the left of this outer node with respect to the origin
Also, set left's right outer node to self.
:param left: A segment object to be referenced as this node's left outer segment
:return: None
"""
self.leftSegment = left
if left.getRightOuter() is None:
left.setRightOuter(self)
def setRightSegment(self, right: Segment):
"""
Set the segment to the right of this outer node with respect to the origin
Also, set right's left outer node to self.
:param right: A segment object to be referenced as this node's right outer segment
:return: None
"""
self.rightSegment = right
if right.getLeftOuter() is None:
right.setLeftOuter(self)
def setAdjInner(self, inner):
"""
Set the inner node adjacent to this outer node
Also, set inner's adjacent outer node to self.
:param inner: A inner node object to be referenced as this node's adjacent inner node
:return: None
"""
self.adjInner = inner
if inner.getAdjOuter() is None:
inner.setAdjOuter(self)
def isValidObject(self) -> bool:
"""
Checks to see if this outer node has been full initialized.
:return: valid returns true if it has the left segment, right segment, and inner node set
"""
if (self.leftSegment is None) or (self.rightSegment is None) or (self.adjInner is None):
return False
return True
def toString(self) -> str:
"""
Returns a formatted string of the left segment, right segment, and inner node this outer node is associated with
:return: Description string
"""
return 'left Segment: ' + self.leftSegment.getName() + '\nright Segment: ' + self.rightSegment.getName() \
+ '\nadj Inner: ' + self.adjInner.getName()
class Inner:
"""
Class to represent the inner vertices that are adjacent to an outer vertex and 2 neighboring inner vertices
"""
def __init__(self, name: str):
"""
Initialize the inner node object
Keeping a place for the outer vertex and right and left adjacent inner nodes.
:param name: How we will reference this inner node. In this implementation, it is expected to be a lowercase letter
"""
self.adjOuter = None
self.leftInner = None
self.rightInner = None
self.name = name
def getName(self) -> str:
"""
Return the name we gave to this inner node.
:return: name
"""
return self.name
def getLeftInner(self):
"""
Return the inner node object to the left of this inner node with respect to the origin
:return: leftInner
"""
return self.leftInner
def getRightInner(self):
"""
Return the inner node object to the right of this inner node with respect to the origin
:return: rightInner
"""
return self.rightInner
def getAdjOuter(self) -> Outer:
"""
Return the outer node object adjacent to this inner node
:return: adjOuter
"""
return self.adjOuter
def setLeftInner(self, left):
"""
Set the inner node to the left of this inner node with respect to the origin
Also, set left's right inner node to self.
:param left: An inner node object to be referenced as this node's left inner node
:return: None
"""
self.leftInner = left
if left.getRightInner() is None:
left.setRightInner(self)
def setRightInner(self, right):
"""
Set the inner node to the right of this inner node with respect to the origin
Also, set right's left inner node to self.
:param right: An inner node object to be referenced as this node's right inner node
:return: None
"""
self.rightInner = right
if right.getLeftInner() is None:
right.setLeftInner(self)
def setAdjOuter(self, outer: Outer):
"""
Set the outer node adjacent to this inner node
Also, set outer's adjacent inner node to self.
:param outer: An outer node object to be referenced as this node's adjacent outer node
:return: None
"""
self.adjOuter = outer
if outer.getAdjInner() is None:
outer.setAdjInner(self)
def isValidObject(self) -> bool:
"""
Checks to see if this inner node has been full initialized.
:return: valid returns true if it has the left inner node, right inner node, and adjacent outer node set
"""
if (self.leftInner is None) or (self.rightInner is None) or (self.adjOuter is None):
return False
return True
def toString(self) -> str:
"""
Returns a formatted string of the left inner node, right inner node, and adjacent outer node this inner node
is associated with
:return: Description string
"""
return 'left Inner: ' + self.leftInner.getName() + '\nright Inner: ' + self.rightInner.getName() \
+ '\nadj Outer: ' + self.adjOuter.getName()
def standardCircle(num_verts: int) -> (Dict[str, Segment], Dict[str, Outer], Dict[str, Inner]):
"""
This will go through and initialize our standard starting circle
:param num_verts: the number of outer nodes we will have
:returns: tuple(segs, outs, inns)
-segs - dictionary of str: Segment objects in the circle \\
-outs - dictionary of str: Outer objects in the circle \\
-inns - dictionary of str: Inner objects in the circle
"""
# Initializing our dictionaries
segs = dict()
outs = dict()
inns = dict()
# Running through the number of vertices we will be edning up with
for i in range(num_verts):
# start with an inner node - labeling with lowercase letters
inn = Inner(string.ascii_letters[i])
# If we aren't on the first one, connect it to the previous one.
if i != 0:
inn.setLeftInner(inns[string.ascii_letters[i - 1]])
# If we've hit the end of the line, go ahead and close up the circle.
if i == num_verts - 1:
inn.setRightInner(inns[string.ascii_letters[0]])
# then make the outer
out = Outer(str(i + 1))
# Go ahead and connect the inner we just made with this outer node
out.setAdjInner(inn)
# If we aren't on the first one, go ahead and connect it to the previous segment
if i != 0:
out.setLeftSegment(segs[str(-i)])
# Now time to make the segment
seg = Segment(str(-i - 1))
# Go ahead and connect the outer node we just made with this segment
seg.setLeftOuter(out)
# If we're at the end of the circle, then we close it up. Otherwise, move on
if i == num_verts - 1:
seg.setRightOuter(outs[str(1)])
# add them to our dictionaries
segs[seg.getName()] = seg
outs[out.getName()] = out
inns[inn.getName()] = inn
# If we've made it here, then we've made the full circle and are ready to return it
return segs, outs, inns
def findTheFace(source_in: Inner) -> list:
"""
This will take an inner node and use the algorithm to walk the face that it is on.
The order of the face will be i, o, s, o, i repeat
:param source_in: Inner node object we are starting from.
:return: face: a list representing the face. This list is of inner, outer, and segment objects in the
order i, o, s, o, i, repeat.
"""
# initialize the list
face = list()
# starting the face with the source inner node.
face.append(source_in)
# initialize the ending inner node we will be using for comparison
end_in = None
# As long as we haven't looped back around, go through the following process.
while source_in != end_in:
# inner: find adjacent outer
face.append(face[-1].getAdjOuter())
# outer: go to right seg
face.append(face[-1].getRightSegment())
# segment: go to right outer
face.append(face[-1].getRightOuter())
# outer: then adj inner
face.append(face[-1].getAdjInner())
# then left inner and repeat.
# set this inner node as our node to compare to our starting node.
end_in = face[-1].getLeftInner()
face.append(end_in)
return face
def faceCannonOrder(face: list) -> list:
"""
Just list the face with the face elements in order.
We will do it with the first numerical face, and then go right before it for an order that will be consistent.
:param face: a list representing the face. This list is of inner, outer, and segment objects in the
order i, o, s, o, i, repeat.
:return: ordered face in canonical order
"""
# find the first numerical face then go right before it
# initialize face num as a relatively high number we won't encounter
facenum = 333
# initialize the int for where we will split the list
start_ind = 0
# loop through and find the face we want to find
for i in range(len(face)):
try:
if int(face[i].getName()) < facenum:
# To get here, we must have found a lower face
# keep track of where this is located in the list
start_ind = i - 1
# make our current lowest face the new lowest face to keep comparing to.
facenum = int(face[i].getName())
# if we try casting a letter to a number, python will get upset, but that also means we're looking at
# an inner node, which we don't want for this anyways.
except ValueError:
continue
# make our ordered face getting from the starting index to the end, then wrapping around and getting the rest of
# the face
ord_face = face[start_ind:] + face[:start_ind]
# go through and make sure we don't have any duplicate elements right by each other. If we do, then drop them.
for i in range(len(ord_face) - 1):
if ord_face[i].toString() == ord_face[i + 1].toString():
ord_face.pop(i)
break
# return the ordered face
return ord_face
def grabAllTheFaces(inns: Dict[str, Inner]) -> list:
"""
Function to get the list of unique faces for our circle.
:param inns: dictionary of Inner objects. We will loop through these to get the faces
:return: faces: List of distinct faces in canonical order.
"""
# initialize the list of faces
faces = list()
# a set of all the elements we have covered by the faces. Will use this for a completeness check
covered = set()
# run through every inner node we've been given
for inn in inns:
# Generate the face that inner node lies on
face = findTheFace(inns[inn])
# put the face we've gotten in canonical order
face = faceCannonOrder(face)
# Check if we've already captured it.
if face not in faces:
# If not, then add it to our list of faces
faces.append(face)
# Go ahead and add the elements in this face to our covered set
covered.update(face)
# check we've gotten all the elements
if len(covered) == (3 * len(inns)):
print('We got em!!!')
# Now return a list of all the faces we have.
return faces
def printCircleStatus(segs: Dict[str, Segment], outs: Dict[str, Outer], inns: Dict[str, Inner]):
"""
Helper function that prints the status of the circle to the console
:param segs: dictionary of str: Segment objects in the circle
:param outs: dictionary of str: Outer objects in the circle
:param inns: dictionary of str: Inner objects in the circle
:return: None
"""
# Run through the segments
print('\nSegments:')
for k in segs:
print()
print(k)
print(segs[k].toString())
# Run through the Outer nodes
print('\nOuters:')
for k in outs:
print()
print(k)
print(outs[k].toString())
# Run through the Inner nodes
print('\nInners:')
for k in inns:
print()
print(k)
print(inns[k].toString())
if __name__ == '__main__':
# This is where you change the variables.
# must be a positive integer > 2
verts = 12
# Must be a string with spaces between each element. If you want to denote multiple cycles, you must add a |
switch_txt = '2 3 4 5 | 12 7'
# we're going to make a list of all the switches and all the cycles
switches = list()
# first, we get the cycles, split by '|'
cycles = switch_txt.split('|')
for c in cycles:
# We're going to split the switch into a list split by the whitespace
s = c.strip().split()
# Then we're going to append the switches in the cycle to the new list
switches.append(s)
# Go ahead and make the standard circle given the number of vertices we want to use.
segments, outers, inners = standardCircle(verts)
# Go through and grab the faces for our standard circle
facs = grabAllTheFaces(inners)
print('\nPrinting the faces')
for f in facs:
print()
for p in f:
sys.stdout.write(p.getName() + ' ')
# Go through and do the switches for each cycle
for switch in switches:
for num in range(len(switch)):
# store the current part of the switch we're working on
cs = switch[num]
# store the next part of the switch we're working on, looping to the beginning if we're at the end
ns = switch[(num + 1) % len(switch)]
# Do the actual switch
# Getting the new inner and outer validly switched up
inners[string.ascii_letters[int(cs) - 1]].setAdjOuter(outers[ns])
outers[ns].setAdjInner(inners[string.ascii_letters[int(cs) - 1]])
# print how the final rotation sits
printCircleStatus(segments, outers, inners)
# Go through and generate and print the new faces
new_facs = grabAllTheFaces(inners)
print('\nPrinting the new faces')
for f in new_facs:
print()
for p in f:
sys.stdout.write(p.getName() + ' ')
| 36.500971
| 123
| 0.6223
| 2,620
| 18,798
| 4.441985
| 0.138931
| 0.031706
| 0.012287
| 0.016498
| 0.399639
| 0.320846
| 0.290686
| 0.26783
| 0.23449
| 0.197628
| 0
| 0.003053
| 0.303117
| 18,798
| 514
| 124
| 36.571984
| 0.885344
| 0.514895
| 0
| 0.19802
| 0
| 0
| 0.029792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163366
| false
| 0
| 0.014851
| 0
| 0.311881
| 0.094059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db51ec07d8e04f942c3e7a0e0c331ea715cd23c8
| 19,075
|
py
|
Python
|
PT-FROST/frost.py
|
EtienneDavid/FROST
|
1cea124d69f07e3ac7e3ad074059d29c0849254c
|
[
"MIT"
] | 2
|
2020-12-21T12:46:06.000Z
|
2021-03-02T08:28:15.000Z
|
PT-FROST/frost.py
|
yogsin/FROST
|
1cea124d69f07e3ac7e3ad074059d29c0849254c
|
[
"MIT"
] | null | null | null |
PT-FROST/frost.py
|
yogsin/FROST
|
1cea124d69f07e3ac7e3ad074059d29c0849254c
|
[
"MIT"
] | 2
|
2020-12-20T15:04:24.000Z
|
2021-11-21T12:29:02.000Z
|
import random
import argparse
import numpy as np
import pandas as pd
import os
import time
import string
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import WideResnet
from cifar import get_train_loader, get_val_loader
from label_guessor import LabelGuessor
from lr_scheduler import WarmupCosineLrScheduler
from ema import EMA
import utils
## args
parser = argparse.ArgumentParser(description=' FixMatch Training')
parser.add_argument('--wresnet-k', default=2, type=int, help='width factor of wide resnet')
parser.add_argument('--wresnet-n', default=28, type=int, help='depth of wide resnet')
parser.add_argument('--n-classes', type=int, default=10, help='number of classes in dataset')
parser.add_argument('--n-labeled', type=int, default=10, help='number of labeled samples for training')
parser.add_argument('--n-epochs', type=int, default=256, help='number of training epochs')
parser.add_argument('--batchsize', type=int, default=64, help='train batch size of labeled samples')
parser.add_argument('--mu', type=int, default=7, help='factor of train batch size of unlabeled samples')
parser.add_argument('--mu-c', type=int, default=1, help='factor of train batch size of contrastive learing samples')
parser.add_argument('--thr', type=float, default=0.95, help='pseudo label threshold')
parser.add_argument('--n-imgs-per-epoch', type=int, default=50000, help='number of training images for each epoch')
parser.add_argument('--lam-x', type=float, default=1., help='coefficient of labeled loss')
parser.add_argument('--lam-u', type=float, default=1., help='coefficient of unlabeled loss')
parser.add_argument('--lam-clr', type=float, default=1., help='coefficient of contrastive loss')
parser.add_argument('--ema-alpha', type=float, default=0.999, help='decay rate for ema module')
parser.add_argument('--lr', type=float, default=0.03, help='learning rate for training')
parser.add_argument('--weight-decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for optimizer')
parser.add_argument('--seed', type=int, default=-1, help='seed for random behaviors, no seed if negtive')
parser.add_argument('--feature_dim', default=128, type=int, help='Feature dim for latent vector')
parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softmax')
parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label')
parser.add_argument('--test', default=0, type=int, help='0 is softmax test function, 1 is similarity test function')
parser.add_argument('--bootstrap', type=int, default=16, help='Bootstrapping factor (default=16)')
parser.add_argument('--boot-schedule', type=int, default=1, help='Bootstrapping schedule (default=1)')
parser.add_argument('--balance', type=int, default=0, help='Balance class methods to use (default=0 None)')
parser.add_argument('--delT', type=float, default=0.2, help='Class balance threshold delta (default=0.2)')
args = parser.parse_args()
print(args)
# save results
save_name_pre = '{}_E{}_B{}_LX{}_LU{}_LCLR{}_THR{}_LR{}_WD{}'.format(args.n_labeled, args.n_epochs, args.batchsize,
args.lam_x, args.lam_u, args.lam_clr, args.thr, args.lr, args.weight_decay)
ticks = time.time()
result_dir = 'results/' + save_name_pre + '.' + str(ticks)
if not os.path.exists(result_dir):
os.mkdir(result_dir)
def set_model():
model = WideResnet(args.n_classes, k=args.wresnet_k, n=args.wresnet_n, feature_dim=args.feature_dim) # wresnet-28-2
model.train()
model.cuda()
criteria_x = nn.CrossEntropyLoss().cuda()
criteria_u = nn.CrossEntropyLoss().cuda()
return model, criteria_x, criteria_u
def train_one_epoch(
model,
criteria_x,
criteria_u,
optim,
lr_schdlr,
ema,
dltrain_x,
dltrain_u,
dltrain_all,
lb_guessor,
):
loss_avg, loss_x_avg, loss_u_avg, loss_clr_avg = [], [], [], []
epsilon = 0.000001
dl_u, dl_all = iter(dltrain_u), iter(dltrain_all)
for _, _, ims_all_1, ims_all_2, _ in tqdm(dl_all, desc='Training ...'):
ims_u_weak, ims_u_strong, _, _, lbs_u = next(dl_u)
loss_x, loss_u, loss_clr = torch.tensor(0).cuda(), torch.tensor(0).cuda(), torch.tensor(0).cuda()
fv_1, fv_2 = torch.tensor(0).cuda(), torch.tensor(0).cuda()
ims_u_weak = ims_u_weak.cuda()
ims_u_strong = ims_u_strong.cuda()
ims_all_1 = ims_all_1.cuda(non_blocking=True)
ims_all_2 = ims_all_2.cuda(non_blocking=True)
dl_x = iter(dltrain_x)
ims_x_weak, _, _, _, lbs_x = next(dl_x)
ims_x_weak = ims_x_weak.cuda()
lbs_x = lbs_x.cuda()
n_x, n_u, n_all = 0, 0, 0
if args.lam_u >= epsilon and args.lam_clr >= epsilon: #pseudo-labeling and Contrasive learning
lbs_u, valid_u, mask_u = lb_guessor(model, ims_u_weak, args.balance, args.delT)
ims_u_strong = ims_u_strong[valid_u]
n_x, n_u, n_all = ims_x_weak.size(0), ims_u_strong.size(0), ims_all_1.size(0)
if n_u != 0:
ims_x_u_all_1 = torch.cat([ims_x_weak, ims_u_strong, ims_all_1], dim=0).detach()
ims_x_u_all_2 = torch.cat([ims_x_weak, ims_u_strong, ims_all_2], dim=0).detach()
logits_x_u_all_1, fv_1, z_1 = model(ims_x_u_all_1)
logits_x_u_all_2, fv_2, z_2 = model(ims_x_u_all_2)
logits_x_u_all = (logits_x_u_all_1 + logits_x_u_all_2) / 2
logits_x, logits_u = logits_x_u_all[:n_x], logits_x_u_all[n_x:(n_x + n_u)]
loss_x = criteria_x(logits_x, lbs_x)
if args.balance == 2 or args.balance == 3:
loss_u = (F.cross_entropy(logits_u, lbs_u, reduction='none') * mask_u).mean()
else:
loss_u = criteria_u(logits_u, lbs_u)
else: # n_u == 0
ims_x_all_1 = torch.cat([ims_x_weak, ims_all_1], dim=0).detach()
ims_x_all_2 = torch.cat([ims_x_weak, ims_all_2], dim=0).detach()
logits_x_all_1, fv_1, z_1 = model(ims_x_all_1)
logits_x_all_2, fv_2, z_2 = model(ims_x_all_2)
logits_x_all = (logits_x_all_1 + logits_x_all_2) / 2
logits_x = logits_x_all[:n_x]
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
elif args.lam_u >= epsilon: #lam_clr == 0: pseudo-labeling only
lbs_u, valid_u, mask_u = lb_guessor(model, ims_u_weak, args.balance, args.delT)
ims_u_strong = ims_u_strong[valid_u]
n_x, n_u = ims_x_weak.size(0), ims_u_strong.size(0)
if n_u != 0:
ims_x_u = torch.cat([ims_x_weak, ims_u_strong], dim=0).detach()
logits_x_u, _, _ = model(ims_x_u)
logits_x, logits_u = logits_x_u[:n_x], logits_x_u[n_x:]
loss_x = criteria_x(logits_x, lbs_x)
if args.balance == 2 or args.balance == 3:
loss_u = (F.cross_entropy(logits_u, lbs_u, reduction='none') * mask_u).mean()
else:
loss_u = criteria_u(logits_u, lbs_u)
else: # n_u == 0
logits_x, _, _ = model(ims_x_weak)
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
else: #lam_u == 0: contrastive learning only
n_x, n_all = ims_x_weak.size(0), ims_all_1.size(0)
ims_x_all_1 = torch.cat([ims_x_weak, ims_all_1], dim=0).detach()
ims_x_all_2 = torch.cat([ims_x_weak, ims_all_2], dim=0).detach()
logits_x_all_1, fv_1, z_1 = model(ims_x_all_1)
logits_x_all_2, fv_2, z_2 = model(ims_x_all_2)
logits_x_all = (logits_x_all_1 + logits_x_all_2) / 2
logits_x = logits_x_all[:n_x]
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
if args.lam_clr >= epsilon:
#compute l_clr
fv_1 = fv_1[(n_x + n_u):]
fv_2 = fv_2[(n_x + n_u):]
z_1 = z_1[(n_x + n_u):]
z_2 = z_2[(n_x + n_u):]
#[2*muc*B, D]
z = torch.cat([z_1, z_2], dim=0)
#[2*muc*B, 2*muc*B]
sim_matrix = torch.exp(torch.mm(z, z.t().contiguous()) / args.temperature) #denominator
#[2*muc*B, 2*muc*B]
# mask = (torch.ones_like(sim_matrix) - torch.eye(2 * args.mu_c * args.batchsize, device=sim_matrix.device)).bool()
mask = (torch.ones_like(sim_matrix) - torch.eye(2 * args.mu_c * args.batchsize, device=sim_matrix.device))
mask = mask > 0
#[2*muc*B, 2*muc*B - 1]
sim_matrix = sim_matrix.masked_select(mask).view(2 * args.mu_c * args.batchsize, -1)
#[muc*B]
pos_sim = torch.exp(torch.sum(z_1 * z_2, dim=-1) / args.temperature) #numerator
#[2*muc*B]
pos_sim = torch.cat([pos_sim, pos_sim], dim=0)
loss_clr = (- torch.log(pos_sim / sim_matrix.sum(dim=-1))).mean()
#compute loss
loss = args.lam_x * loss_x + args.lam_u * loss_u + args.lam_clr * loss_clr
optim.zero_grad()
loss.backward()
optim.step()
ema.update_params()
lr_schdlr.step()
loss_x_avg.append(loss_x.item())
loss_u_avg.append(loss_u.item())
loss_clr_avg.append(loss_clr.item())
loss_avg.append(loss.item())
ema.update_buffer()
def evaluate(ema):
ema.apply_shadow()
ema.model.eval()
ema.model.cuda()
dlval = get_val_loader(batch_size=128, num_workers=0)
matches = []
for ims, lbs in dlval:
ims = ims.cuda()
lbs = lbs.cuda()
with torch.no_grad():
logits, _, _ = ema.model(ims)
scores = torch.softmax(logits, dim=1)
_, preds = torch.max(scores, dim=1)
match = lbs == preds
matches.append(match)
matches = torch.cat(matches, dim=0).float()
acc = torch.mean(matches)
ema.restore()
return acc
def test(model, memory_data_loader, test_data_loader, c, epoch):
model.eval()
total_top1, total_top5, total_num, feature_bank, feature_labels = 0.0, 0.0, 0, [], []
with torch.no_grad():
# generate feature bank
for data, _, _ in tqdm(memory_data_loader, desc='Feature extracting'):
logits, feature, _ = model(data.cuda(non_blocking=True))
feature_bank.append(feature)
feature_labels.append(torch.tensor(torch.argmax(logits,dim=1),dtype=torch.int64))
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_labels = torch.cat(feature_labels, dim=0).contiguous().cpu()
# loop test data to predict the label by weighted knn search
test_bar = tqdm(test_data_loader)
for data, _, target in test_bar:
# data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
data = data.cuda(non_blocking=True)
_, feature, _ = model(data)
total_num += data.size(0)
# compute cos similarity between each feature vector and feature bank ---> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=args.k, dim=-1)
# [B, K]
# sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices)
sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices.cpu())
sim_weight = (sim_weight / args.temperature).exp()
# counts for each class
one_hot_label = torch.zeros(data.size(0) * args.k, c, device=sim_labels.device)
# [B*K, C]
one_hot_label = one_hot_label.scatter(-1, sim_labels.view(-1, 1), 1.0)
# weighted score ---> [B, C]
pred_scores = torch.sum(one_hot_label.view(data.size(0), -1, c) * sim_weight.cpu().unsqueeze(dim=-1), dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True)
total_top1 += torch.sum((pred_labels[:, :1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}%'
.format(epoch, args.n_epochs, total_top1 / total_num * 100))
return total_top1 / total_num * 100
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def sort_unlabeled(ema,numPerClass):
ema.apply_shadow()
ema.model.eval()
ema.model.cuda()
n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize
_, _, dltrain_all = get_train_loader(args.batchsize, 1, 1, n_iters_per_epoch, L=args.n_classes*numPerClass, seed=args.seed)
predicted = []
labels = []
for ims_w, _, _, _, lbs in dltrain_all:
ims = ims_w.cuda()
labels.append(lbs)
with torch.no_grad():
logits, _, _ = ema.model(ims)
scores = torch.softmax(logits, dim=1)
predicted.append(scores.cpu())
print( "labels ",len(labels))
labels = np.concatenate(labels, axis=0)
print( "labels ",len(labels))
predicted = np.concatenate( predicted, axis=0)
preds = predicted.argmax(1)
probs = predicted.max(1)
top = np.argsort(-probs,axis=0)
del dltrain_all, logits
labeledSize =args.n_classes * numPerClass
unique_train_pseudo_labels, unique_train_counts = np.unique(preds, return_counts=True)
print("Number of training pseudo-labels in each class: ", unique_train_counts," for classes: ", unique_train_pseudo_labels)
sortByClass = np.random.randint(0,high=len(top), size=(args.n_classes, numPerClass), dtype=int)
indx = np.zeros([args.n_classes], dtype=int)
matches = np.zeros([args.n_classes, numPerClass], dtype=int)
labls = preds[top]
samples = top
for i in range(len(top)):
if indx[labls[i]] < numPerClass:
sortByClass[labls[i], indx[labls[i]]] = samples[i]
if labls[i] == labels[top[i]]:
matches[labls[i], indx[labls[i]]] = 1
indx[labls[i]] += 1
if min(indx) < numPerClass:
print("Counts of at least one class ", indx, " is lower than ", numPerClass)
name = "dataset/seeds/size"+str(labeledSize)+"." + get_random_string(8) + ".npy"
np.save(name, sortByClass[0:args.n_classes, :numPerClass])
classAcc = 100*np.sum(matches, axis=1)/numPerClass
print("Accuracy of the predicted pseudo-labels: top ", labeledSize, ", ", np.mean(classAcc), classAcc )
ema.restore()
return name
def train():
n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize
n_iters_all = n_iters_per_epoch * args.n_epochs #/ args.mu_c
epsilon = 0.000001
model, criteria_x, criteria_u = set_model()
lb_guessor = LabelGuessor(thresh=args.thr)
ema = EMA(model, args.ema_alpha)
wd_params, non_wd_params = [], []
for param in model.parameters():
if len(param.size()) == 1:
non_wd_params.append(param)
else:
wd_params.append(param)
param_list = [{'params': wd_params}, {'params': non_wd_params, 'weight_decay': 0}]
optim = torch.optim.SGD(param_list, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=True)
lr_schdlr = WarmupCosineLrScheduler(optim, max_iter=n_iters_all, warmup_iter=0)
dltrain_x, dltrain_u, dltrain_all = get_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch,
L=args.n_labeled, seed=args.seed)
train_args = dict(
model=model,
criteria_x=criteria_x,
criteria_u=criteria_u,
optim=optim,
lr_schdlr=lr_schdlr,
ema=ema,
dltrain_x=dltrain_x,
dltrain_u=dltrain_u,
dltrain_all=dltrain_all,
lb_guessor=lb_guessor,
)
n_labeled = int(args.n_labeled / args.n_classes)
best_acc, top1 = -1, -1
results = {'top 1 acc': [], 'best_acc': []}
b_schedule = [args.n_epochs/2, 3*args.n_epochs/4]
if args.boot_schedule == 1:
step = int(args.n_epochs/3)
b_schedule = [step, 2*step]
elif args.boot_schedule == 2:
step = int(args.n_epochs/4)
b_schedule = [step, 2*step, 3*step]
for e in range(args.n_epochs):
if args.bootstrap > 1 and (e in b_schedule):
seed = 99
n_labeled *= args.bootstrap
name = sort_unlabeled(ema, n_labeled)
print("Bootstrap at epoch ", e," Name = ",name)
dltrain_x, dltrain_u, dltrain_all = get_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch,
L=10*n_labeled, seed=seed, name=name)
train_args = dict(
model=model,
criteria_x=criteria_x,
criteria_u=criteria_u,
optim=optim,
lr_schdlr=lr_schdlr,
ema=ema,
dltrain_x=dltrain_x,
dltrain_u=dltrain_u,
dltrain_all=dltrain_all,
lb_guessor=lb_guessor,
)
model.train()
train_one_epoch(**train_args)
torch.cuda.empty_cache()
if args.test == 0 or args.lam_clr < epsilon:
top1 = evaluate(ema) * 100
elif args.test == 1:
memory_data = utils.CIFAR10Pair(root='dataset', train=True, transform=utils.test_transform, download=False)
memory_data_loader = DataLoader(memory_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
test_data = utils.CIFAR10Pair(root='dataset', train=False, transform=utils.test_transform, download=False)
test_data_loader = DataLoader(test_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
c = len(memory_data.classes) #10
top1 = test(model, memory_data_loader, test_data_loader, c, e)
best_acc = top1 if best_acc < top1 else best_acc
results['top 1 acc'].append('{:.4f}'.format(top1))
results['best_acc'].append('{:.4f}'.format(best_acc))
data_frame = pd.DataFrame(data=results)
data_frame.to_csv(result_dir + '/' + save_name_pre + '.accuracy.csv', index_label='epoch')
log_msg = [
'epoch: {}'.format(e + 1),
'top 1 acc: {:.4f}'.format(top1),
'best_acc: {:.4f}'.format(best_acc)]
print(', '.join(log_msg))
if __name__ == '__main__':
train()
| 46.074879
| 131
| 0.618244
| 2,794
| 19,075
| 3.951324
| 0.128132
| 0.02029
| 0.040036
| 0.007971
| 0.369928
| 0.311685
| 0.26875
| 0.236413
| 0.220924
| 0.19846
| 0
| 0.022134
| 0.251533
| 19,075
| 413
| 132
| 46.186441
| 0.751138
| 0.045242
| 0
| 0.247813
| 0
| 0
| 0.089674
| 0.002366
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.052478
| 0
| 0.087464
| 0.023324
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db52db9f4875bf2abe871f56389adc2f255c93ca
| 8,456
|
py
|
Python
|
Logistic Regression/main.py
|
Frightera/LR-and-NN-for-Cancer-Data
|
54f8c9455af529c512efe012d8b3ed3f6b594a57
|
[
"MIT"
] | 4
|
2021-03-10T22:18:35.000Z
|
2022-03-06T15:37:23.000Z
|
Logistic Regression/main.py
|
Frightera/LR-From-Scratch
|
54f8c9455af529c512efe012d8b3ed3f6b594a57
|
[
"MIT"
] | null | null | null |
Logistic Regression/main.py
|
Frightera/LR-From-Scratch
|
54f8c9455af529c512efe012d8b3ed3f6b594a57
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv("data.csv")
data.info()
"""
Data columns (total 33 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 569 non-null int64
.
.
.
32 Unnamed: 32 0 non-null float64
"""
data.drop(["Unnamed: 32", "id"], axis = 1, inplace = True)
# data.head(10)
data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(["diagnosis"], axis = 1)
# %% Normalization
x_normalized = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values
x_data.head()
"""
x_data.head()
Out[9]:
radius_mean texture_mean ... symmetry_worst fractal_dimension_worst
0 17.99 10.38 ... 0.4601 0.11890
1 20.57 17.77 ... 0.2750 0.08902
2 19.69 21.25 ... 0.3613 0.08758
3 11.42 20.38 ... 0.6638 0.17300
4 20.29 14.34 ... 0.2364 0.07678
"""
x_normalized.head()
"""
x_normalized.head()
Out[10]:
radius_mean texture_mean ... symmetry_worst fractal_dimension_worst
0 0.521037 0.022658 ... 0.598462 0.418864
1 0.643144 0.272574 ... 0.233590 0.222878
2 0.601496 0.390260 ... 0.403706 0.213433
3 0.210090 0.360839 ... 1.000000 0.773711
4 0.629893 0.156578 ... 0.157500 0.142595
"""
# %% train test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_normalized,y,test_size = 0.25, random_state = 42)
# test size & random state can be changed, test size can be choosen as 0.2 or 0.18
# sklearn randomly splits, with given state data will be splitted with same random pattern.
# rows as features
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
# %% Parameter Initialize
"""
If all the weights were initialized to zero,
backpropagation will not work as expected because the gradient for the intermediate neurons
and starting neurons will die out(become zero) and will not update ever.
"""
def initialize_weights_and_bias(dimension):
w = np.full((dimension,1), 0.01) # init 0.01
b = np.zeros(1)
return w,b
def sigmoid(n):
y_hat = 1 / (1 + np.exp(-n))
return y_hat
# %%
def forward_backward_propagation(w,b,x_train,y_train):
# forward propagation
z = np.dot(w.T,x_train) + b
#y_train = y_train.T.reshape(-1,1)
y_hat = sigmoid(z)
loss = -(y_train*np.log(y_hat)+(1-y_train)*np.log(1-y_hat))
cost = (np.sum(loss))/x_train.shape[1] # x_train.shape[1] is for scaling
# Once cost is calculated, forward prop. is completed.
# backward propagation
derivative_weight = (np.dot(x_train,((y_hat-y_train).T)))/x_train.shape[1] # x_train.shape[1] is for scaling
derivative_bias = np.sum(y_hat-y_train)/x_train.shape[1] # x_train.shape[1] is for scaling
# x_train.shape[1] = 426
gradients = {"derivative_weight": derivative_weight,"derivative_bias": derivative_bias}
return cost,gradients
# Updating(learning) parameters
def update(w, b, x_train, y_train, learning_rate,number_of_iteration):
cost_list = []
cost_list2 = []
index = []
# updating(learning) parameters is number_of_iterarion times
for i in range(number_of_iteration):
# make forward and backward propagation and find cost and gradients
cost,gradients = forward_backward_propagation(w,b,x_train,y_train)
cost_list.append(cost)
# lets update
w = w - learning_rate * gradients["derivative_weight"]
b = b - learning_rate * gradients["derivative_bias"]
if i % 100 == 0: # that's arbitrary, you can set it differently
cost_list2.append(cost)
index.append(i)
print ("Cost after iteration %i: %f" %(i, cost))
# we update(learn) parameters weights and bias
parameters = {"weight": w,"bias": b}
plt.plot(index,cost_list2)
plt.xticks(index,rotation='vertical')
plt.xlabel("Number of Iteration")
plt.ylabel("Cost")
plt.legend()
plt.show()
return parameters, gradients, cost_list
# prediction
def predict(w,b,x_test):
# x_test is a input for forward propagation
z = sigmoid(np.dot(w.T,x_test)+b)
Y_prediction = np.zeros((1,x_test.shape[1]))
# if z is bigger than 0.5, our prediction is one - true (y_hat=1),
# if z is smaller than 0.5, our prediction is sign zero - false (y_hat=0),
for i in range(z.shape[1]):
if z[0,i]<= 0.5:
Y_prediction[0,i] = 0
else:
Y_prediction[0,i] = 1
return Y_prediction
#implementing logistic regression
def logistic_regression(x_train, y_train, x_test, y_test, learning_rate , num_iterations):
# initialize
dimension = x_train.shape[0]
w,b = initialize_weights_and_bias(dimension)
# do not change learning rate
parameters, gradients, cost_list = update(w, b, x_train, y_train, learning_rate,num_iterations)
y_prediction_test = predict(parameters["weight"],parameters["bias"],x_test)
y_pred_train = predict(parameters["weight"],parameters["bias"],x_train)
# Print accuracy
print("test accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_test - y_test)) * 100))
print("train accuracy: {} %".format(100 - np.mean(np.abs(y_pred_train - y_train)) * 100))
# %% Hyperparameter tuning
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.153169
Cost after iteration 200: 0.121662
Cost after iteration 300: 0.107146
Cost after iteration 400: 0.098404
Cost after iteration 500: 0.092401
Cost after iteration 600: 0.087937
Cost after iteration 700: 0.084435
Cost after iteration 800: 0.081582
Cost after iteration 900: 0.079191
Cost after iteration 1000: 0.077143
Cost after iteration 1100: 0.075359
Cost after iteration 1200: 0.073784
Cost after iteration 1300: 0.072378
Cost after iteration 1400: 0.071111
No handles with labels found to put in legend.
test accuracy: 98.6013986013986 %
train accuracy: 98.35680751173709 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 1, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.226383
Cost after iteration 200: 0.176670
Cost after iteration 300: 0.153585
Cost after iteration 400: 0.139306
Cost after iteration 500: 0.129319
Cost after iteration 600: 0.121835
Cost after iteration 700: 0.115963
Cost after iteration 800: 0.111204
Cost after iteration 900: 0.107248
No handles with labels found to put in legend.
Cost after iteration 1000: 0.103893
Cost after iteration 1100: 0.101001
Cost after iteration 1200: 0.098474
Cost after iteration 1300: 0.096240
Cost after iteration 1400: 0.094247
test accuracy: 97.9020979020979 %
train accuracy: 98.12206572769954 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 0.3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.357455
Cost after iteration 200: 0.274917
Cost after iteration 300: 0.235865
Cost after iteration 400: 0.212165
Cost after iteration 500: 0.195780
Cost after iteration 600: 0.183524
Cost after iteration 700: 0.173868
Cost after iteration 800: 0.165980
Cost after iteration 900: 0.159363
Cost after iteration 1000: 0.153700
Cost after iteration 1100: 0.148775
Cost after iteration 1200: 0.144439
Cost after iteration 1300: 0.140581
Cost after iteration 1400: 0.137119
No handles with labels found to put in legend.
test accuracy: 97.9020979020979 %
train accuracy: 96.94835680751174 %
"""
# %% Sklearn
from sklearn.linear_model import LogisticRegression
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
logreg = LogisticRegression(random_state = 42,max_iter= 1500)
print("test accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_test, y_test)))
print("train accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_train, y_train)))
"""
test accuracy: 0.986013986013986
train accuracy: 0.9671361502347418
"""
# %%
| 35.830508
| 113
| 0.674078
| 1,297
| 8,456
| 4.258288
| 0.257517
| 0.074959
| 0.149919
| 0.0239
| 0.420786
| 0.245519
| 0.20641
| 0.20641
| 0.195908
| 0.163136
| 0
| 0.143202
| 0.217124
| 8,456
| 235
| 114
| 35.982979
| 0.691088
| 0.12973
| 0
| 0.095238
| 0
| 0
| 0.067378
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.202381
| 0.059524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db5470b1f6ebd8cb49e975c2e7b8774a4d607820
| 2,446
|
py
|
Python
|
fine-tune/inference_embedding.py
|
LinHuiqing/nonparaSeq2seqVC_code
|
d40a0cb9dc11c77b8af56b8510e4ab041f2f2b25
|
[
"MIT"
] | 199
|
2019-12-13T03:11:21.000Z
|
2022-03-29T15:44:49.000Z
|
fine-tune/inference_embedding.py
|
LinHuiqing/nonparaSeq2seqVC_code
|
d40a0cb9dc11c77b8af56b8510e4ab041f2f2b25
|
[
"MIT"
] | 39
|
2019-12-16T20:08:45.000Z
|
2022-02-10T00:36:40.000Z
|
fine-tune/inference_embedding.py
|
LinHuiqing/nonparaSeq2seqVC_code
|
d40a0cb9dc11c77b8af56b8510e4ab041f2f2b25
|
[
"MIT"
] | 57
|
2019-12-16T23:25:25.000Z
|
2022-03-28T18:04:16.000Z
|
import os
import numpy as np
import torch
import argparse
from hparams import create_hparams
from model import lcm
from train import load_model
from torch.utils.data import DataLoader
from reader import TextMelIDLoader, TextMelIDCollate, id2sp
from inference_utils import plot_data
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--checkpoint_path', type=str,
help='directory to save checkpoints')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
checkpoint_path=args.checkpoint_path
hparams = create_hparams(args.hparams)
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path)['state_dict'], strict=False)
_ = model.eval()
def gen_embedding(speaker):
training_list = hparams.training_list
train_set_A = TextMelIDLoader(training_list, hparams.mel_mean_std, hparams.speaker_A,
hparams.speaker_B,
shuffle=False,pids=[speaker])
collate_fn = TextMelIDCollate(lcm(hparams.n_frames_per_step_encoder,
hparams.n_frames_per_step_decoder))
train_loader_A = DataLoader(train_set_A, num_workers=1, shuffle=False,
sampler=None,
batch_size=1, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
with torch.no_grad():
speaker_embeddings = []
for i,batch in enumerate(train_loader_A):
#print i
x, y = model.parse_batch(batch)
text_input_padded, mel_padded, text_lengths, mel_lengths, speaker_id = x
speaker_id, speaker_embedding = model.speaker_encoder.inference(mel_padded)
speaker_embedding = speaker_embedding.data.cpu().numpy()
speaker_embeddings.append(speaker_embedding)
speaker_embeddings = np.vstack(speaker_embeddings)
print(speaker_embeddings.shape)
if not os.path.exists('outdir/embeddings'):
os.makedirs('outdir/embeddings')
np.save('outdir/embeddings/%s.npy'%speaker, speaker_embeddings)
plot_data([speaker_embeddings],
'outdir/embeddings/%s.pdf'%speaker)
print('Generating embedding of %s ...'%hparams.speaker_A)
gen_embedding(hparams.speaker_A)
print('Generating embedding of %s ...'%hparams.speaker_B)
gen_embedding(hparams.speaker_B)
| 33.054054
| 89
| 0.688062
| 297
| 2,446
| 5.407407
| 0.387205
| 0.074097
| 0.02802
| 0.021171
| 0.07721
| 0.051059
| 0.051059
| 0
| 0
| 0
| 0
| 0.001573
| 0.22036
| 2,446
| 73
| 90
| 33.506849
| 0.840587
| 0.002862
| 0
| 0
| 0
| 0
| 0.098852
| 0.019688
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.196078
| 0
| 0.215686
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db569a6325c560b769cb648e074b4a8fea4a1b00
| 3,954
|
py
|
Python
|
bombgame/recursive_bt_maze.py
|
JeFaProductions/bombgame2
|
fc2ca7c6606aecd2bec013ed307aa344a0adffc7
|
[
"MIT"
] | null | null | null |
bombgame/recursive_bt_maze.py
|
JeFaProductions/bombgame2
|
fc2ca7c6606aecd2bec013ed307aa344a0adffc7
|
[
"MIT"
] | 2
|
2019-04-04T13:53:11.000Z
|
2019-11-28T17:02:00.000Z
|
bombgame/recursive_bt_maze.py
|
JeFaProductions/bombgame2
|
fc2ca7c6606aecd2bec013ed307aa344a0adffc7
|
[
"MIT"
] | null | null | null |
# recursive_bt_maze.py
#
# Author: Jens Gansloser
# Created On: 16 Feb 2019
import os
import random
import numpy as np
class RecursiveBTMaze:
def __init__(self, width, height):
if width % 2 == 0 or height % 2 == 0:
raise ValueError("Width and height need to be odd.")
self.width = width
self.height = height
self.go = {'N': np.array([0, 2]),
'E': np.array([2, 0]),
'S': np.array([0, -2]),
'W': np.array([-2, 0])}
self.go_half = {key: (0.5 * value).astype(np.int) for key, value in self.go.items()}
self.opposite = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}
# 0: path, 1: wall.
self.data = np.ones((height, width), dtype=np.int)
self.stack = []
index = np.array([random.randint(0, self.height - 1),
random.randint(0, self.width - 1)])
index[index % 2 == 0] += 1
self.stack.append([index, self.shuffle_directions()])
def generate(self):
while self.next():
pass
def next(self, borders=False):
if self.stack:
index, directions = self.stack.pop()
stack_size = len(self.stack)
directions_size = len(directions)
while directions:
direction = directions.pop()
new_index = index + self.go[direction]
# Special case at the borders.
if borders:
if self.cell_valid(index + self.go_half[direction]) and not self.cell_valid(new_index):
if random.choice([0, 1]):
y, x = index + self.go_half[direction]
self.data[y, x] = 0
if self.cell_valid(new_index) and not self.cell_visited(new_index):
self.stack.append([index, directions])
self.cell_move(index, new_index)
self.stack.append([new_index, self.shuffle_directions()])
break
if directions_size == 4 and not directions and len(self.stack) == stack_size:
self.random_break(index)
return True
else:
return False
def random_break(self, index):
for direction in self.shuffle_directions():
new_index = index + self.go[direction]
if self.cell_valid(new_index) and self.cell_value(index + self.go_half[direction]) == 1:
self.cell_move(index, new_index)
break
def cell_value(self, index):
y, x = index
return self.data[y, x]
def cell_visited(self, index):
return self.cell_value(index) != 1
def cell_valid(self, index):
y, x = index
if y < 0 or y >= self.height or x < 0 or x >= self.width:
return False
return True
def cell_move(self, index, new_index):
y, x = new_index
self.data[y, x] = 0
y, x = (index + 0.5 * (new_index - index)).astype(np.int)
self.data[y, x] = 0
def shuffle_directions(self):
return random.sample(self.go.keys(), len(self.go.keys()))
def itermaze(self):
return self.__iter2d__(self.data)
@staticmethod
def __iter2d__(data):
for i in range(data.shape[0]):
for j in range(data.shape[1]):
yield np.array([i, j]), data[i, j]
def __str__(self):
data = -1 * np.ones((self.height + 2, self.width + 2))
out = ''
wall = '#'
path = '0'
border = '+'
data[1:-1, 1:-1] = self.data
for index, value in self.__iter2d__(data):
if index[1] == 0:
out += os.linesep
if value == -1:
out += border
elif value == 0:
out += path
elif value == 1:
out += wall
return out
| 28.861314
| 107
| 0.508346
| 502
| 3,954
| 3.88247
| 0.213147
| 0.049256
| 0.02822
| 0.020523
| 0.181632
| 0.081067
| 0.02668
| 0
| 0
| 0
| 0
| 0.023534
| 0.365959
| 3,954
| 136
| 108
| 29.073529
| 0.753889
| 0.028832
| 0
| 0.159574
| 0
| 0
| 0.012262
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12766
| false
| 0.010638
| 0.031915
| 0.031915
| 0.265957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db579a2c18ea2f40634d5108f68e0bca010002d0
| 5,608
|
py
|
Python
|
KV_Reader.py
|
Nibuja05/KVConverter
|
74f810df4ac82358f405eac9c2f56dce13b69302
|
[
"MIT"
] | 2
|
2020-07-06T00:24:27.000Z
|
2021-09-20T20:16:36.000Z
|
KV_Reader.py
|
Nibuja05/KVConverter
|
74f810df4ac82358f405eac9c2f56dce13b69302
|
[
"MIT"
] | null | null | null |
KV_Reader.py
|
Nibuja05/KVConverter
|
74f810df4ac82358f405eac9c2f56dce13b69302
|
[
"MIT"
] | null | null | null |
import re
import math
class KVPart():
"""docstring for KVPart"""
def __init__(self, name, tab_count = 0):
#super(KVPart, self).__init__()
self.name = name
self.values = []
self.tab_count = tab_count
self.parent = None
self.master = False
def add_simple_value(self, value):
self.values.append(value)
def add_KVPart(self, name):
if self.master == False:
new_KVPart = KVPart(name, self.tab_count + 1)
else:
new_KVPart = KVPart(name, self.tab_count)
new_KVPart.set_parent(self)
self.values.append(new_KVPart)
return new_KVPart
def add_KVPart_finished(self, part):
if not part is None:
part.set_tab_count(self.tab_count + 1)
self.values.append(part)
def add_KVComment(self, text):
new_KVComment = KVComment(text)
self.values.append(new_KVComment)
def is_empty(self):
if len(self.values) == 0:
return True
return False
def set_parent(self, parent):
self.parent = parent
def get_parent(self):
return self.parent
def has_parent(self):
if self.parent is not None:
return True
return False
def get_name(self):
return self.name
def set_master(self, boolean):
self.master = boolean
def get_values(self):
return self.values
def has_KV_child(self):
return any(isinstance(x, KVPart) for x in self.values)
def set_tab_count(self, count):
self.tab_count = count
def items(self):
return self.name, self.values[0]
def __str__(self):
if self.master == False:
string = self.fTab(self.tab_count) + "\"" + self.name + "\""
if any(isinstance(x, KVPart) for x in self.values):
string += "\n" + self.fTab(self.tab_count) + "{\n"
else:
count = self.get_normal_space(string)
string += self.get_normal_space(string)
for x in self.values:
if type(x) is KVPart:
string += str(x)
elif type(x) is KVComment:
string += self.fTab(self.tab_count + 1) + str(x) + "\n"
else:
string += "\"" + str(x) + "\"\n"
if any(isinstance(x, KVPart) for x in self.values):
string += self.fTab(self.tab_count) + "}\n"
return string
else:
if len(self.values) > 1:
string = ""
for x in self.values:
string += str(x) + "\n"
return string
else:
return ""
def __repr__(self):
return "<|" + self.name + "|>"
def fTab(self, count):
string = ""
for x in range(count):
string += "\t"
return string
def get_normal_space(self, text):
lines = text.splitlines()
last_line = lines[len(lines) - 1]
new_position = last_line.rfind("\"")
tab_count = math.floor((40 - new_position) / 5)
space_count = ((40 - new_position) % 5) + 1
string = ""
for x in range(space_count):
string += " "
string += self.fTab(tab_count)
return string
class KVComment():
"""docstring for KVComment"""
def __init__(self, text):
#super(KVComment, self).__init__()
self.text = text
def __str__(self):
return self.text
def read_file(path):
#path = input("Please enter the path of the KV File:")
#path = "C:\\Steam\\steamapps\\common\\dota 2 beta\\game\\dota_addons\\heataria\\scripts\\npc\\abilities\\heataria_blaze_path.txt"
try:
file = open(path, "r")
text = file.read()
except FileNotFoundError:
text = read_file()
finally:
master = KVPart("master")
master.set_master(True)
progress_text(text, master)
return master
#processes a KV textfile into a KV_Part structure
def progress_text(text, last_KVPart = None):
if last_KVPart is not None:
#search patterns to check structure
quote_pattern = r'\"(.*?)\"'
open_pattern = r'.*{'
close_pattern = r'.*}'
comment_pattern = r'//.*'
quote_match = re.search(quote_pattern, text)
open_match = re.search(open_pattern, text)
close_match = re.search(close_pattern, text)
comment_match = re.search(comment_pattern, text)
#cancel if there are no more quotes left
if quote_match is not None:
quote_start = quote_match.start()
else:
return
#if there are no brackets left, give them a placeholder value
if open_match is not None:
open_start = open_match.start()
else:
open_start = len(text)
if close_match is not None:
close_start = close_match.start()
else:
close_start = len(text)
if comment_match is not None:
comment_start = comment_match.start()
else:
comment_start = len(text)
string = quote_match.group(1)
#print("SEACH: q." + str(quote_start) + " o." + str(open_start) + " cl." + str(close_start) + " co." + str(comment_start))
if comment_start < quote_start and comment_start < open_start and comment_start < close_start:
string = comment_match.group()
text = text[comment_match.end() + 1:]
last_KVPart.add_KVComment(string)
progress_text(text, last_KVPart)
#no bracktes before next quote -> simply add to current KV_Part
elif quote_start < open_start and quote_start < close_start:
#check if its a value or key
if last_KVPart.is_empty() and not last_KVPart.get_name() == "master":
last_KVPart.add_simple_value(string)
new_KVPart = last_KVPart.get_parent()
else:
new_KVPart = last_KVPart.add_KVPart(string)
text = text[quote_match.end() + 1:]
progress_text(text, new_KVPart)
#closing bracket -> remove bracket and move to parent KV_Part
elif close_start < quote_start:
text = text[close_match.end() + 1:]
if last_KVPart.has_parent():
temp_KVPart = last_KVPart.get_parent()
else:
temp_KVPart = last_KVPart
progress_text(text, temp_KVPart)
#opening bracket -> creates a new child KV_Part
elif open_start < quote_start:
new_KVPart = last_KVPart.add_KVPart(string)
text = text[quote_match.end() + 1:]
progress_text(text, new_KVPart)
| 26.704762
| 131
| 0.684736
| 838
| 5,608
| 4.368735
| 0.177804
| 0.032778
| 0.0295
| 0.013657
| 0.210325
| 0.150232
| 0.095056
| 0.078121
| 0.078121
| 0.068287
| 0
| 0.00462
| 0.189551
| 5,608
| 209
| 132
| 26.832536
| 0.80088
| 0.140335
| 0
| 0.223602
| 0
| 0
| 0.017302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.012422
| 0.043478
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db587b6771666fcfb06093ced1689bf5fcf21ace
| 3,476
|
py
|
Python
|
scripts/updatetestsuiterefimages.py
|
PaulDoessel/appleseed
|
142908e05609cd802b3ab937ff27ef2b73dd3088
|
[
"MIT"
] | null | null | null |
scripts/updatetestsuiterefimages.py
|
PaulDoessel/appleseed
|
142908e05609cd802b3ab937ff27ef2b73dd3088
|
[
"MIT"
] | null | null | null |
scripts/updatetestsuiterefimages.py
|
PaulDoessel/appleseed
|
142908e05609cd802b3ab937ff27ef2b73dd3088
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2016 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
import argparse
import os
import shutil
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
def safe_mkdir(dir):
if not os.path.exists(dir):
os.mkdir(dir)
def walk(directory, recursive):
if recursive:
for dirpath, dirnames, filenames in os.walk(directory):
yield dirpath, dirnames, filenames
else:
yield os.walk(directory).next()
#--------------------------------------------------------------------------------------------------
# Update reference images in a given test suite directory.
#--------------------------------------------------------------------------------------------------
def update_ref_images(parent_dir):
renders_dir = os.path.join(parent_dir, "renders")
ref_dir = os.path.join(parent_dir, "ref")
safe_mkdir(ref_dir)
for filename in os.listdir(renders_dir):
if os.path.splitext(filename)[1] == ".png":
src_path = os.path.join(renders_dir, filename)
dst_path = os.path.join(ref_dir, filename)
print(" copying {0} to {1}...".format(src_path, dst_path))
shutil.copyfile(src_path, dst_path)
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="update functional test suite reference images.")
parser.add_argument("-r", "--recursive", action='store_true', dest="recursive",
help="scan the specified directory and all its subdirectories")
parser.add_argument("directory", nargs='?', default=".", help="directory to scan")
args = parser.parse_args()
for dirpath, dirnames, filenames in walk(args.directory, args.recursive):
if "renders" in dirnames:
update_ref_images(dirpath)
if __name__ == '__main__':
main()
| 39.954023
| 99
| 0.592923
| 399
| 3,476
| 5.070175
| 0.461153
| 0.0435
| 0.019773
| 0.026693
| 0.05042
| 0.02175
| 0
| 0
| 0
| 0
| 0
| 0.003776
| 0.161968
| 3,476
| 86
| 100
| 40.418605
| 0.690697
| 0.559264
| 0
| 0
| 0
| 0
| 0.14238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.235294
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db58e1a129781006da344d7eb154b8ae346ffb44
| 4,244
|
py
|
Python
|
raidquaza/poll/polls.py
|
Breee/raidquaza
|
308d643e71eddf6f6dc432c01322a02d604ac70e
|
[
"MIT"
] | 2
|
2019-03-12T16:44:24.000Z
|
2020-04-13T21:06:20.000Z
|
raidquaza/poll/polls.py
|
Breee/raidquaza
|
308d643e71eddf6f6dc432c01322a02d604ac70e
|
[
"MIT"
] | 5
|
2019-07-13T00:11:42.000Z
|
2021-07-29T11:55:39.000Z
|
raidquaza/poll/polls.py
|
Breee/raidquaza
|
308d643e71eddf6f6dc432c01322a02d604ac70e
|
[
"MIT"
] | null | null | null |
from typing import List, Any
import time
from discord import Embed, Reaction
from utils import uniquify
# EMOJIS regional_indicator_A to regional_indicator_T
reaction_emojies = ['\U0001F1E6',
'\U0001F1E7',
'\U0001F1E8',
'\U0001F1E9',
'\U0001F1EA',
'\U0001F1EB',
'\U0001F1EC',
'\U0001F1ED',
'\U0001F1EE',
'\U0001F1EF',
'\U0001F1F0',
'\U0001F1F1',
'\U0001F1F2',
'\U0001F1F3',
'\U0001F1F4',
'\U0001F1F5',
'\U0001F1F6',
'\U0001F1F7',
'\U0001F1F8',
'\U0001F1F9']
number_emojies = {'rq_plus_one': 1, 'rq_plus_two': 2, 'rq_plus_three': 3, 'rq_plus_four': 4}
class PollCreationException(Exception):
pass
class Poll(object):
"""
A Poll object.
"""
def __init__(self, poll_id: str, poll_title: str, options: List[Any], is_immortal=False, updated_since_start=True):
if options is None:
options = []
self.poll_id = poll_id
self.creation_time = time.time()
self.last_update = time.time()
self.poll_title = poll_title
self.options = uniquify(options)
self.reaction_to_option = {reaction_emojies[k]: options[k] for k in range(len(options))}
self.option_to_reaction = {options[k]: reaction_emojies[k] for k in range(len(options))}
self.participants = dict()
self.option_to_participants = {key: [] for key in options}
self.sent_message = None
self.received_message = None
self.is_immortal = is_immortal
self.is_enabled = True
self.updated_since_start = updated_since_start
async def full_update(self, reactions: List[Reaction], bot_user_id: int):
if self.updated_since_start:
return
self.reaction_to_option = {reaction_emojies[k]: self.options[k] for k in range(len(self.options))}
self.option_to_reaction = {self.options[k]: reaction_emojies[k] for k in range(len(self.options))}
self.participants = dict()
self.option_to_participants = {key: [] for key in self.options}
for reaction in reactions:
async for user in reaction.users():
if bot_user_id != user.id:
self.process_reaction(reaction=reaction, user=user, add=True)
self.updated_since_start = True
def process_reaction(self, reaction, user, add):
# get users + reaction emoji
if hasattr(user, 'nick') and user.nick is not None:
nick = user.nick
else:
nick = user.display_name
if reaction.emoji in self.reaction_to_option:
# set list of users for the option the reaction belongs to.
option = self.reaction_to_option[reaction.emoji]
if add and nick not in self.option_to_participants[option]:
self.option_to_participants[option].append(nick)
elif not add:
self.option_to_participants[option].remove(nick)
if nick not in self.participants:
self.participants[nick] = 1
if hasattr(reaction.emoji, 'name') and reaction.emoji.name in number_emojies:
amount = number_emojies[reaction.emoji.name]
self.participants[nick] += (amount if add else -1 * amount)
def to_discord(self):
msg = f'Poll for **{self.poll_title}**'
embed = Embed(color=0xbb1c1c)
for option, participants in self.option_to_participants.items():
reaction = self.option_to_reaction[option]
name = f'{reaction} {option}'
value = ', '.join(
sorted([f'{x} [{self.participants[x]}]' for x in participants])) if participants else '-'
field_counters = [self.participants[x] for x in participants]
total = sum(field_counters)
embed.add_field(name=f'{name} [{total}]', value=value, inline=False)
embed.set_footer(text=f'ID: {self.poll_id}')
return msg, embed
| 41.203883
| 119
| 0.583176
| 488
| 4,244
| 4.889344
| 0.266393
| 0.03772
| 0.045264
| 0.060352
| 0.269908
| 0.17435
| 0.17435
| 0.108969
| 0.098072
| 0.083822
| 0
| 0.042152
| 0.312441
| 4,244
| 102
| 120
| 41.607843
| 0.775531
| 0.035815
| 0
| 0.023256
| 0
| 0
| 0.090574
| 0.011046
| 0
| 0
| 0.001964
| 0
| 0
| 1
| 0.034884
| false
| 0.011628
| 0.046512
| 0
| 0.127907
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db59947574fede70d491b2341a72a67a1fae3994
| 387
|
py
|
Python
|
Python/Regex and Parsing/Validating and Parsing Email Addresses.py
|
pavstar619/HackerRank
|
697ee46b6e621ad884a064047461d7707b1413cd
|
[
"MIT"
] | 61
|
2017-04-27T13:45:12.000Z
|
2022-01-27T11:40:15.000Z
|
Python/Regex and Parsing/Validating and Parsing Email Addresses.py
|
fahad0193/HackerRank
|
eb6c95e16688c02921c1df6b6ea613667a251457
|
[
"MIT"
] | 1
|
2017-06-24T14:16:06.000Z
|
2017-06-24T14:16:28.000Z
|
Python/Regex and Parsing/Validating and Parsing Email Addresses.py
|
fahad0193/HackerRank
|
eb6c95e16688c02921c1df6b6ea613667a251457
|
[
"MIT"
] | 78
|
2017-07-05T11:48:20.000Z
|
2022-02-08T08:04:22.000Z
|
import email.utils as em
import re
class Main():
def __init__(self):
self.n = int(input())
for i in range(self.n):
self.s = em.parseaddr(input())
if re.match(r'^[a-zA-Z](\w|-|\.|_)+@[a-zA-Z]+\.[a-zA-Z]{0,3}$', self.s[1]):
print(em.formataddr(self.s))
if __name__ == '__main__':
obj = Main()
| 24.1875
| 87
| 0.4677
| 56
| 387
| 3
| 0.607143
| 0.089286
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011538
| 0.328165
| 387
| 15
| 88
| 25.8
| 0.634615
| 0
| 0
| 0
| 0
| 0.090909
| 0.142119
| 0.121447
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db64c7127d561a8ba836f248730b0617bfb376eb
| 368
|
py
|
Python
|
chap7/heapq_merge.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
chap7/heapq_merge.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
chap7/heapq_merge.py
|
marble-git/python-laoqi
|
74c4bb5459113e54ce64443e5da5a9c6a3052d6a
|
[
"MIT"
] | null | null | null |
#coding:utf-8
'''
filename:heapq_merge.py
chap:7
subject:4-2
conditions:heapq.merge,sorted_list:lst1,lst2
lst3=merged_list(lst1,lst2) is sorted
solution:heapq.merge
'''
import heapq
lst1 = [1,3,5,7,9]
lst2 = [2,4,6,8]
if __name__ == '__main__':
lst3 = heapq.merge(lst1,lst2)
print('lst3',lst3)
print(list(lst3))
| 14.72
| 49
| 0.616848
| 56
| 368
| 3.857143
| 0.553571
| 0.185185
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091549
| 0.228261
| 368
| 24
| 50
| 15.333333
| 0.669014
| 0.464674
| 0
| 0
| 0
| 0
| 0.072727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db654a453fae8398e895160a150ba86dbbcc20b1
| 1,966
|
py
|
Python
|
bindings/python/examples/feature_example.py
|
lithathampan/wav2letter
|
8abf8431d99da147cc4aefc289ad33626e13de6f
|
[
"BSD-3-Clause"
] | 1
|
2020-07-27T20:51:32.000Z
|
2020-07-27T20:51:32.000Z
|
bindings/python/examples/feature_example.py
|
lithathampan/wav2letter
|
8abf8431d99da147cc4aefc289ad33626e13de6f
|
[
"BSD-3-Clause"
] | null | null | null |
bindings/python/examples/feature_example.py
|
lithathampan/wav2letter
|
8abf8431d99da147cc4aefc289ad33626e13de6f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-27T16:18:20.000Z
|
2021-09-27T16:18:20.000Z
|
#!/usr/bin/env python3
# adapted from wav2letter/src/feature/test/MfccTest.cpp
import itertools as it
import os
import sys
from wav2letter.feature import FeatureParams, Mfcc
def load_data(filename):
path = os.path.join(data_path, filename)
path = os.path.abspath(path)
with open(path) as f:
return [float(x) for x in it.chain.from_iterable(line.split() for line in f)]
if __name__ == "__main__":
if len(sys.argv) != 2:
print(f"usage: {sys.argv[0]} feature_test_data_path", file=sys.stderr)
print(" (usually: <wav2letter_root>/src/feature/test/data)", file=sys.stderr)
sys.exit(1)
data_path = sys.argv[1]
wavinput = load_data("sa1.dat")
# golden features to compare
htkfeatures = load_data("sa1-mfcc.htk")
assert len(wavinput) > 0
assert len(htkfeatures) > 0
params = FeatureParams()
# define parameters of the featurization
params.sampling_freq = 16000
params.low_freq_filterbank = 0
params.high_freq_filterbank = 8000
params.num_filterbank_chans = 20
params.num_cepstral_coeffs = 13
params.use_energy = False
params.zero_mean_frame = False
params.use_power = False
# apply MFCC featurization
mfcc = Mfcc(params)
features = mfcc.apply(wavinput)
# check that obtained features are the same as golden one
assert len(features) == len(htkfeatures)
assert len(features) % 39 == 0
numframes = len(features) // 39
featurescopy = features.copy()
for f in range(numframes):
for i in range(1, 39):
features[f * 39 + i - 1] = features[f * 39 + i]
features[f * 39 + 12] = featurescopy[f * 39 + 0]
features[f * 39 + 25] = featurescopy[f * 39 + 13]
features[f * 39 + 38] = featurescopy[f * 39 + 26]
differences = [abs(x[0] - x[1]) for x in zip(features, htkfeatures)]
print(f"max_diff={max(differences)}")
print(f"avg_diff={sum(differences)/len(differences)}")
| 30.71875
| 86
| 0.657172
| 277
| 1,966
| 4.545126
| 0.411552
| 0.019063
| 0.043685
| 0.028594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041912
| 0.223296
| 1,966
| 63
| 87
| 31.206349
| 0.78258
| 0.11292
| 0
| 0
| 0
| 0
| 0.111047
| 0.076525
| 0
| 0
| 0
| 0
| 0.093023
| 1
| 0.023256
| false
| 0
| 0.093023
| 0
| 0.139535
| 0.093023
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db65bd23cd7117025faa3493e9ff0bcdc4419ed0
| 3,227
|
py
|
Python
|
app.py
|
shreyashack/PY_Message_Decryption
|
251a82ee26c529ff63668328230c9d494f4c9cfa
|
[
"MIT"
] | 1
|
2020-11-18T10:01:13.000Z
|
2020-11-18T10:01:13.000Z
|
app.py
|
shreyashack/PY_Message_Decryption
|
251a82ee26c529ff63668328230c9d494f4c9cfa
|
[
"MIT"
] | null | null | null |
app.py
|
shreyashack/PY_Message_Decryption
|
251a82ee26c529ff63668328230c9d494f4c9cfa
|
[
"MIT"
] | null | null | null |
from tkinter import *
import onetimepad
class Message_Decrypt:
def __init__(self,root):
self.root=root
self.root.title("Message Decryption")
self.root.geometry("400x475")
self.root.iconbitmap("logo368.ico")
self.root.resizable(0,0)
def on_enter1(e):
but_decrypt['background']="black"
but_decrypt['foreground']="cyan"
def on_leave1(e):
but_decrypt['background']="SystemButtonFace"
but_decrypt['foreground']="SystemButtonText"
def on_enter2(e):
but_clear['background']="black"
but_clear['foreground']="cyan"
def on_leave2(e):
but_clear['background']="SystemButtonFace"
but_clear['foreground']="SystemButtonText"
def clear():
text_decrypt.delete('1.0',"end")
text_decrypt_output.delete('1.0',"end")
def decrypt():
try:
s=text_decrypt.get('1.0','end')
b=s.strip()
x=onetimepad.decrypt(b,'random')
text_decrypt_output.insert('end',x)
except Exception as e:
print(e)
#===========frame==================================#
mainframe=Frame(self.root,width=400,height=475,relief="ridge",bd=4)
mainframe.place(x=0,y=0)
firstframe=Frame(mainframe,width=393,height=207,relief="ridge",bd=4)
firstframe.place(x=0,y=0)
secondframe=Frame(mainframe,width=393,height=207,relief="ridge",bd=4)
secondframe.place(x=0,y=207)
thirdframe=Frame(mainframe,width=393,height=52,relief="ridge",bd=4,bg="gray77")
thirdframe.place(x=0,y=415)
#===================firstframe==============================#
scol=Scrollbar(firstframe,orient="vertical")
scol.place(relx=1, rely=0, relheight=1, anchor='ne')
text_decrypt=Text(firstframe,height=10,width=45,font=('times new roman',12),yscrollcommand=scol.set,relief="sunken",bd=3,fg="black")
text_decrypt.place(x=0,y=0)
scol.config(command=text_decrypt.yview)
#====================secondframe============================#
scol=Scrollbar(secondframe,orient="vertical")
scol.place(relx=1, rely=0, relheight=1, anchor='ne')
text_decrypt_output=Text(secondframe,height=10,width=45,font=('times new roman',12),yscrollcommand=scol.set,relief="sunken",bd=3,fg="black")
text_decrypt_output.place(x=0,y=0)
scol.config(command=text_decrypt_output.yview)
#==================third====================================#
but_decrypt=Button(thirdframe,text="Decrypt",width=13,font=('times new roman',14),cursor="hand2",command=decrypt)
but_decrypt.place(x=20,y=3)
but_decrypt.bind("<Enter>",on_enter1)
but_decrypt.bind("<Leave>",on_leave1)
but_clear=Button(thirdframe,text="Clear",width=13,font=('times new roman',14),cursor="hand2",command=clear)
but_clear.place(x=235,y=3)
but_clear.bind("<Enter>",on_enter2)
but_clear.bind("<Leave>",on_leave2)
if __name__ == "__main__":
root=Tk()
Message_Decrypt(root)
root.mainloop()
| 34.329787
| 154
| 0.577007
| 387
| 3,227
| 4.674419
| 0.289406
| 0.066888
| 0.023217
| 0.026534
| 0.330569
| 0.305141
| 0.305141
| 0.305141
| 0.305141
| 0.305141
| 0
| 0.042511
| 0.220019
| 3,227
| 93
| 155
| 34.698925
| 0.676202
| 0.070344
| 0
| 0.032258
| 0
| 0
| 0.137366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112903
| false
| 0
| 0.032258
| 0
| 0.16129
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db682583f2b418b3755329c159971a743aab45f6
| 589
|
py
|
Python
|
backend/tests/test_api/test_api_auth.py
|
abodacs/fastapi-ml-skeleton
|
fa9a013d06e70cbaff9b9469db32246e41ce7e0f
|
[
"Apache-2.0"
] | null | null | null |
backend/tests/test_api/test_api_auth.py
|
abodacs/fastapi-ml-skeleton
|
fa9a013d06e70cbaff9b9469db32246e41ce7e0f
|
[
"Apache-2.0"
] | 3
|
2020-03-16T22:07:31.000Z
|
2021-06-25T15:33:38.000Z
|
backend/tests/test_api/test_api_auth.py
|
abodacs/fastapi-ml-skeleton
|
fa9a013d06e70cbaff9b9469db32246e41ce7e0f
|
[
"Apache-2.0"
] | null | null | null |
# Skeleton
from fastapi_skeleton.core import messages
def test_auth_using_prediction_api_no_apikey_header(test_client) -> None:
response = test_client.post("/api/model/predict")
assert response.status_code == 400
assert response.json() == {"detail": messages.NO_API_KEY}
def test_auth_using_prediction_api_wrong_apikey_header(test_client) -> None:
response = test_client.post(
"/api/model/predict", json={"image": "test"}, headers={"token": "WRONG_TOKEN"}
)
assert response.status_code == 401
assert response.json() == {"detail": messages.AUTH_REQ}
| 34.647059
| 86
| 0.728353
| 77
| 589
| 5.246753
| 0.441558
| 0.09901
| 0.054455
| 0.079208
| 0.613861
| 0.455446
| 0.311881
| 0.311881
| 0.311881
| 0.311881
| 0
| 0.011881
| 0.142615
| 589
| 16
| 87
| 36.8125
| 0.788119
| 0.013582
| 0
| 0
| 0
| 0
| 0.126079
| 0
| 0
| 0
| 0
| 0
| 0.363636
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db693358ac60e6cb090422f46492eb2fca4b02bf
| 2,434
|
py
|
Python
|
object_detection/box_coders/mean_stddev_box_coder.py
|
ophirSarusi/TF_Object_Detection
|
e08ccd18c6f14586e048048a445cf5a10dbc7c4d
|
[
"MIT"
] | 59
|
2018-09-23T09:34:24.000Z
|
2020-03-10T04:31:27.000Z
|
object_detection/box_coders/mean_stddev_box_coder.py
|
ophirSarusi/TF_Object_Detection
|
e08ccd18c6f14586e048048a445cf5a10dbc7c4d
|
[
"MIT"
] | 46
|
2018-07-10T23:53:15.000Z
|
2022-02-06T03:31:47.000Z
|
object_detection/box_coders/mean_stddev_box_coder.py
|
ophirSarusi/TF_Object_Detection
|
e08ccd18c6f14586e048048a445cf5a10dbc7c4d
|
[
"MIT"
] | 58
|
2018-09-23T10:31:47.000Z
|
2021-11-08T11:34:40.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mean stddev box coder.
This box coder use the following coding schema to encode boxes:
rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev.
"""
from object_detection.core import box_coder
from object_detection.core import box_list
class MeanStddevBoxCoder(box_coder.BoxCoder):
"""Mean stddev box coder."""
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of N anchors. We assume that anchors has an associated
stddev field.
Returns:
a tensor representing N anchor-encoded boxes
Raises:
ValueError: if the anchors BoxList does not have a stddev field
"""
if not anchors.has_field('stddev'):
raise ValueError('anchors must have a stddev field')
box_corners = boxes.get()
means = anchors.get()
stddev = anchors.get_field('stddev')
return (box_corners - means) / stddev
def _decode(self, rel_codes, anchors):
"""Decode.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors. We assume that anchors has an associated
stddev field.
Returns:
boxes: BoxList holding N bounding boxes
Raises:
ValueError: if the anchors BoxList does not have a stddev field
"""
if not anchors.has_field('stddev'):
raise ValueError('anchors must have a stddev field')
means = anchors.get()
stddevs = anchors.get_field('stddev')
box_corners = rel_codes * stddevs + means
return box_list.BoxList(box_corners)
| 34.28169
| 81
| 0.671323
| 320
| 2,434
| 5.025
| 0.39375
| 0.037313
| 0.027363
| 0.039801
| 0.320274
| 0.320274
| 0.280473
| 0.236318
| 0.236318
| 0.236318
| 0
| 0.004782
| 0.226787
| 2,434
| 70
| 82
| 34.771429
| 0.849628
| 0.603944
| 0
| 0.3
| 0
| 0
| 0.112102
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.1
| 0.05
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db6aa256e7b60e45c5a9fbde4a14ff7a63101137
| 3,544
|
py
|
Python
|
hlrl/torch/agents/wrappers/agent.py
|
Chainso/HLRL
|
584f4ed2fa4d8b311a21dbd862ec9434833dd7cd
|
[
"MIT"
] | null | null | null |
hlrl/torch/agents/wrappers/agent.py
|
Chainso/HLRL
|
584f4ed2fa4d8b311a21dbd862ec9434833dd7cd
|
[
"MIT"
] | null | null | null |
hlrl/torch/agents/wrappers/agent.py
|
Chainso/HLRL
|
584f4ed2fa4d8b311a21dbd862ec9434833dd7cd
|
[
"MIT"
] | null | null | null |
import torch
from typing import Any, Dict, List, OrderedDict, Tuple
from hlrl.core.agents import RLAgent
from hlrl.core.common.wrappers import MethodWrapper
class TorchRLAgent(MethodWrapper):
"""
A torch agent that wraps its experiences as torch tensors.
"""
def __init__(self,
agent: RLAgent,
batch_state: bool = True):
"""
Creates torch agent that can wrap experiences as tensors.
Args:
agent: The agent to wrap.
batch_state: If the state should be batched with a batch size of 1
when transformed.
"""
super().__init__(agent)
self.batch_state = batch_state
def make_tensor(self, data):
"""
Creates a float tensor of the data of batch size 1.
"""
if self.batch_state:
data = [data]
return torch.FloatTensor(data).to(self.algo.device)
def transform_state(self, state):
state_dict = self.om.transform_state(state)
state_dict["state"] = self.make_tensor(state_dict["state"])
return state_dict
def transform_reward(
self,
state: Any,
algo_step: OrderedDict[str, Any],
reward: Any,
terminal: Any,
next_state: Any
) -> Any:
"""
Creates a tensor from the reward.
Args:
state: The state of the environment.
algo_step: The transformed algorithm step of the state.
reward: The reward from the environment.
terminal: If the next state is a terminal state.
next_state: The new state of the environment.
Returns:
The reward as a tensor.
"""
reward = self.om.transform_reward(
state, algo_step, reward, terminal, next_state
)
if self.batch_state:
reward = [reward]
return self.make_tensor(reward)
def transform_terminal(self, terminal: Any, info: Any) -> Any:
"""
Transforms the terminal of an environment step.
Args:
terminal: The terminal value to transform.
info: Additional environment information for the step.
Returns:
The transformed terminal.
"""
terminal = self.om.transform_terminal(terminal, info)
if self.batch_state:
terminal = [terminal]
return self.make_tensor(terminal)
def transform_action(self, action):
return self.om.transform_action(action).squeeze().cpu().numpy()
def reward_to_float(self, reward: torch.Tensor) -> float:
"""
Converts the reward to a single float value.
Args:
reward: The reward to turn into a float.
Returns:
The float value of the reward tensor.
"""
reward = reward[0].detach().cpu()
reward = reward.item()
return reward
def create_batch(
self,
ready_experiences: Dict[str, List[Any]],
) -> Dict[str, torch.Tensor]:
"""
Creates a batch of experiences to be trained on from the ready
experiences.
Args:
ready_experiences: The experiences to be trained on.
Returns:
A dictionary of each field necessary for training.
"""
batch = {
key: torch.cat(ready_experiences[key]) for key in ready_experiences
}
return self.om.create_batch(batch)
| 27.905512
| 79
| 0.577596
| 404
| 3,544
| 4.955446
| 0.247525
| 0.034965
| 0.027972
| 0.023976
| 0.023976
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001292
| 0.344808
| 3,544
| 126
| 80
| 28.126984
| 0.860896
| 0.334368
| 0
| 0.098039
| 0
| 0
| 0.005028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156863
| false
| 0
| 0.078431
| 0.019608
| 0.392157
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db6b5bcc7b8379dc6e51f6670d5ff0c0d562417c
| 649
|
py
|
Python
|
PixivConstant.py
|
NHOrus/PixivUtil2
|
facd6b1a21e4adf5edf1de4d4809e94e834246b6
|
[
"BSD-2-Clause"
] | null | null | null |
PixivConstant.py
|
NHOrus/PixivUtil2
|
facd6b1a21e4adf5edf1de4d4809e94e834246b6
|
[
"BSD-2-Clause"
] | null | null | null |
PixivConstant.py
|
NHOrus/PixivUtil2
|
facd6b1a21e4adf5edf1de4d4809e94e834246b6
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
PIXIVUTIL_VERSION = '20191220-beta1'
PIXIVUTIL_LINK = 'https://github.com/Nandaka/PixivUtil2/releases'
PIXIVUTIL_DONATE = 'https://bit.ly/PixivUtilDonation'
# Log Settings
PIXIVUTIL_LOG_FILE = 'pixivutil.log'
PIXIVUTIL_LOG_SIZE = 10485760
PIXIVUTIL_LOG_COUNT = 10
PIXIVUTIL_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# Download Results
PIXIVUTIL_NOT_OK = -1
PIXIVUTIL_OK = 0
PIXIVUTIL_SKIP_OLDER = 1
PIXIVUTIL_SKIP_BLACKLIST = 2
PIXIVUTIL_KEYBOARD_INTERRUPT = 3
PIXIVUTIL_SKIP_DUPLICATE = 4
PIXIVUTIL_SKIP_LOCAL_LARGER = 5
PIXIVUTIL_CHECK_DOWNLOAD = 6
PIXIVUTIL_ABORTED = 9999
BUFFER_SIZE = 8192
| 25.96
| 77
| 0.784284
| 87
| 649
| 5.517241
| 0.632184
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064125
| 0.11094
| 649
| 24
| 78
| 27.041667
| 0.767764
| 0.078582
| 0
| 0
| 0
| 0
| 0.26431
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db6b74f1fcb56888f5ba09963ca5bb5ed146122f
| 8,906
|
py
|
Python
|
dynamic_schemas/views.py
|
Threemusketeerz/DSystems
|
cd03ad2fa6b55872d57bfd01a4ac781aa5cbed8c
|
[
"BSD-2-Clause"
] | 1
|
2018-01-23T12:23:48.000Z
|
2018-01-23T12:23:48.000Z
|
dynamic_schemas/views.py
|
Threemusketeerz/DSystems
|
cd03ad2fa6b55872d57bfd01a4ac781aa5cbed8c
|
[
"BSD-2-Clause"
] | 1
|
2018-01-19T08:43:59.000Z
|
2018-01-23T12:20:43.000Z
|
dynamic_schemas/views.py
|
Threemusketeerz/DSystems
|
cd03ad2fa6b55872d57bfd01a4ac781aa5cbed8c
|
[
"BSD-2-Clause"
] | null | null | null |
from django.http import Http404
from django.shortcuts import render, redirect, reverse
from django.views.generic import ListView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.renderers import TemplateHTMLRenderer
from .models import Schema, SchemaColumn, SchemaResponse, SchemaUrl
from .forms import SchemaResponseForm, ResponseUpdateForm
from .serializers import SchemaResponseSerializer
from .prepare_data import getcolumns
import pytz
class SchemaIndexView(LoginRequiredMixin, ListView):
# login_url = '/accounts/login.html/'
template_name = 'dynamic_schemas/index.html'
context_object_name = 'all_schemas'
def get_queryset(self):
return Schema.objects.all()
@login_required
def form_view(request, pk):
schema = Schema.objects.get(pk=pk)
urls = schema.help_field.all()
if request.method == 'POST':
form = SchemaResponseForm(schema, request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
return redirect(reverse('dynamic_schemas:schema_view',
kwargs={'pk': pk}))
else:
form = SchemaResponseForm(schema)
return render(request, f'dynamic_schemas/create-form.html', \
{
'form': form,
'schema': schema,
'help_urls': urls,
})
@login_required
def form_update_view(request, pk, r_pk):
schema = Schema.objects.get(pk=pk)
instance = SchemaResponse.objects.get(schema=schema, pk=r_pk)
columns = SchemaColumn.objects.filter(schema=schema)
###################################################
# This little snippet checks if the responses can be edited. If they can
# the submit button will be provided. There is no restriction on
# has_been_edited, but since the data cant be saved we're good for now.
load_button = False
aggr_editables = [c.is_editable_once for c in columns]
if True in aggr_editables:
load_button = True
###################################################
form = ResponseUpdateForm(instance, pk)
if request.method == 'POST':
form = ResponseUpdateForm(instance, pk, request.POST or None)
if form.is_valid():
form.update()
return redirect(reverse('dynamic_schemas:schema_view',
kwargs={'pk': pk}))
return render(request, f'dynamic_schemas/update-form.html',
{'form_update': form,
'load_button': load_button}
)
""" API Views """
class MakeDataPrettyMixin:
def _make_date_tz(self, instance=None, tz=None):
""" Takes an instance, and sets its timezone.
TODO:
Should this be a classmethod? Will a classmethod complicate the
view in its context?
"""
# Can this be moved to SETTINGS instead? Same for _make_date_readable.
# Problem is probably that the UTC format gets overridden.
if instance:
if tz:
tz = pytz.timezone(tz)
return instance.pub_date.astimezone(tz)
return
def _make_date_readable(self, instances):
"""
Helper function to change the dates to a format pleasing to the
eyes, takes a bundle of instances and converts their time.
How extensible do we want this?
Function is kept private for now, since in Denmark the timezone is CET.
"""
for instance in instances:
inst_as_cet = self._make_date_tz(
instance=instance
# tz='Europe/Copenhagen'
)
instance.pub_date = inst_as_cet \
.strftime('%d-%m/%Y %H:%M:%S')
return instances
def _make_user_readable(self, serializer):
""" Helper to return the correct attributes to the front-end
"""
for data in serializer.data:
# import ipdb; ipdb.set_trace()
user = data['user']
instance = User.objects.get(id=user)
user = instance.first_name + instance.last_name
if instance.first_name == '':
user = instance.username
data['user'] = user
# __import__('ipdb').set_trace()
# import ipdb; ipdb.set_trace()
return serializer
def _make_intruction_links_readable(self, serializer):
for data in serializer.data:
instr = data['instruction']
instance = SchemaUrl.objects.get(id=instr)
instr = '<a href="'+ instance.url +'">'+ instance.name +'</a>'
data['instruction'] = instr
return serializer
class ResponseList(MakeDataPrettyMixin, APIView):
"""
Lists responses according to schema.
Purely for APIView for now. Not being used in the actual rendering af the
tables.
"""
default_order = [
('desc', '-'),
('asc', ''),
]
def get_orderprefix(self, order):
for tup in self.default_order:
if order in tup:
return tup[1]
def get(self, request, pk, format=None, *args):
req = request.GET
# Amount of data to fetch each pull
start = int(req.get('start', 0))
length = int(req.get('length', 30))
end = start + length;
order = req.get('order[0][dir]')
order_column = req.get('order[0][column]')
order_by_pre = self.get_orderprefix(order)
order_column_name = req.get('columns['+order_column+'][data]')
# __import__('ipdb').set_trace()
order_str = order_by_pre + order_column_name
draw = req.get('draw')
# TODO Gonna require some thinking. Also need to user recordsFiltered.
# search = req.get('search[value]')
schema = Schema.objects.get(pk=pk)
responses_count = SchemaResponse.objects.filter(schema=schema).count()
responses = SchemaResponse \
.objects \
.filter(schema=schema) \
.order_by(order_str)[start:end]
# __import__('ipdb').set_trace()
responses = self._make_date_readable(responses)
serializer = SchemaResponseSerializer(responses, many=True)
serializer = self._make_user_readable(serializer)
serializer = self._make_intruction_links_readable(serializer)
return_data = {
'draw': int(draw),
'recordsTotal': responses_count,
'recordsFiltered': responses_count,
'data': serializer.data,
}
# __import__('ipdb').set_trace()
return Response(return_data)
class ResponseColumns(APIView):
def get(self, request, pk, format=None, *args):
req = request.GET
schema = Schema.objects.get(pk=pk)
sr = SchemaResponse.objects.filter(schema=schema).first()
columns = getcolumns(sr).getvalue()
return Response(columns)
class SchemaView(LoginRequiredMixin, APIView):
"""
Fetches the FIRST object from ResponseList. Makes it availabe for
as a template for the table in main.html
Excludes schema.id, and the placeholder qa_set in the template.
"""
renderer_classes = [TemplateHTMLRenderer]
template_name = 'dynamic_schemas/table_dev.html'
def get_object(self, pk):
try:
schema = Schema.objects.get(pk=pk)
if SchemaColumn.objects.filter(schema=schema).count() != 0:
all_responses = SchemaResponse.objects.filter(schema=schema)
single_response = all_responses.first()
serializer = SchemaResponseSerializer(single_response)
return serializer.data
except single_response.DoesNotExist:
raise Http404
def get(self, request, pk):
schema = Schema.objects.get(pk=pk)
schema_help_urls = schema.help_field.all()
schema_obsolete = schema.obsolete.all()
schema_new = schema.new.all()
all_responses = SchemaResponse.objects.filter(schema=schema)
# self._make_date_readable(all_responses)
serializer = SchemaResponseSerializer(all_responses, many=True)
data = {'single_response': self.get_object(pk),
'all_responses': serializer.data,
'pk': pk,
'schema': schema,
'help_urls': schema_help_urls,
'schema_obsolete': schema_obsolete,
'schema_new': schema_new,
}
# __import__('ipdb').set_trace()
return Response(data)
| 32.50365
| 79
| 0.613631
| 991
| 8,906
| 5.353179
| 0.273461
| 0.036192
| 0.025071
| 0.032988
| 0.202074
| 0.124411
| 0.075778
| 0.051272
| 0.038077
| 0.038077
| 0
| 0.002032
| 0.281608
| 8,906
| 273
| 80
| 32.622711
| 0.827133
| 0.168538
| 0
| 0.170732
| 0
| 0
| 0.066413
| 0.024483
| 0
| 0
| 0
| 0.007326
| 0
| 1
| 0.073171
| false
| 0
| 0.091463
| 0.006098
| 0.317073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db6ec26c39a9f24fdd4d35e11407f85831432a46
| 24,215
|
py
|
Python
|
api/views.py
|
conscience99/lyriko
|
0ecc9e4d5ec8e3d746fcb286209a1e7993548a66
|
[
"MIT"
] | null | null | null |
api/views.py
|
conscience99/lyriko
|
0ecc9e4d5ec8e3d746fcb286209a1e7993548a66
|
[
"MIT"
] | null | null | null |
api/views.py
|
conscience99/lyriko
|
0ecc9e4d5ec8e3d746fcb286209a1e7993548a66
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework import response
from rest_framework.serializers import Serializer
from . import serializers
from rest_framework.response import Response
from rest_framework.views import APIView
from django.views import View
from rest_framework import status
from . models import SaveList, User, Lyrics, SearchHistory, VerificationCode, SubmitLyrics
from rest_framework.permissions import BasePermission, IsAuthenticated, SAFE_METHODS, IsAdminUser
from rest_framework.authtoken.models import Token
from django.contrib.auth.hashers import make_password, check_password
from django.contrib.auth import login, authenticate
import requests
from django.db.models import Q
from bs4 import BeautifulSoup
import json
from datetime import datetime
import random
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.conf import settings
from django.template.loader import get_template
from django.urls import reverse
import jwt
from django.utils.encoding import force_bytes, force_text, DjangoUnicodeDecodeError
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.contrib.sites.shortcuts import get_current_site
from .utils import Util
from rest_framework_simplejwt.tokens import RefreshToken
from django.template import Context
from django.http import HttpResponse, HttpResponseNotFound
import os
import re
import urllib
from datetime import datetime
import random
import time
now = datetime.now()
import json
class SignupView(APIView):
now = datetime.now()
def post(self, request, *args,**kwargs):
user=User()
try:
User.objects.get(email=request.data['email'])
return Response({"email":"already taken"})
except:
serializer=serializers.UserSerializer(data=request.data)
if serializer.is_valid():
password=make_password(request.data['password'])
username=request.data['username']
user.username=username
user.first_name=request.data['first_name']
user.last_name=request.data['last_name']
user.email=request.data['email']
user.email_username=request.data['email']
user.password=password
user.is_verified = False
user.save()
new_user=User.objects.get(id=user.id)
token=Token.objects.create(user=new_user)
verification = VerificationCode()
code = random.randint(199999,999999)
verification.code=code
verification.user_id=new_user.id
verification._year = now.year
verification._month = now.month
verification._day = now.day
verification._hour = now.hour
verification._minute = now.minute
verification.save()
from_e = settings.EMAIL_HOST_USER
to=request.data['email']
html = get_template('api/code.html')
html_content = html.render({'username':new_user.username, 'code':code})
text = 'Hi {username}, \n Please use {code} to continue with Lyriko.'
subject = 'Confirm your email'
email = EmailMultiAlternatives(
subject,
text,
from_e,
[to]
)
email.attach_alternative(html_content, 'text/html')
try:
email.send()
except:
pass
token=Token.objects.get(user=user)
response={'token':token.key, 'user':serializer.data}
return Response(response)
else:
return Response(serializer.errors)
class SendCode(APIView):
def post(self, request, *args, **kwargs):
try:
user = User.objects.get(email=request.data['email'])
except:
return Response({"error":"User not found."})
try:
v = VerificationCode.objects.get(user_id=user.id)
v.delete()
except:
pass
verification = VerificationCode()
code = random.randint(199999,999999)
verification.code=code
verification.user_id=user.id
verification._year = now.year
verification._month = now.month
verification._day = now.day
verification._hour = now.hour
verification._minute = now.minute
verification.save()
from_e = settings.EMAIL_HOST_USER
to=request.data['email']
html = get_template('api/code.html')
html_content = html.render({'username':user.username, 'code':code})
text = 'Hi {username}, \n Please use {code} to continue with Lyriko.'
subject = 'Action Required'
email = EmailMultiAlternatives(
subject,
text,
from_e,
[to]
)
email.attach_alternative(html_content, 'text/html')
try:
email.send()
except:
return Response({"error":"Error occured"})
return Response({"success":"Success"})
class AccountActivation(APIView):
permission_classes=[IsAuthenticated]
def post(self, request, *args, **kwargs):
user=User.objects.get(username=request.user.username)
code=request.data['code']
try:
verification = VerificationCode.objects.get(user_id=user.id, code=int(code))
user.is_verified=True
user.save()
verification.delete()
return Response({'msg':'success'})
except:
return Response({'error':'Invalid code.'})
class VerifyUser(APIView):
def post(self, request, *args, **kwargs):
user = User.objects.get(email=request.data['email'])
code = request.data['code']
try:
_code = VerificationCode.objects.get(code=int(code), user_id=user.id)
_code.delete()
return Response({"msg":"success"})
except:
return Response({"error":"invalid code"})
class CheckSaveList(APIView):
permission_classes=[IsAuthenticated]
def post(self, request, *args, **kwargs):
try:
if SaveList.objects.get(owner_username=request.user.username, lyrics_id=request.data['lyrics_id']):
return Response({"watchlisted":'true'})
except:
return Response({"watchlisted":'false'})
class LyricsView(APIView):
def get(self, request, *args, **kwargs):
if request.method=='GET':
lyrics_items=Lyrics.objects.all()
serializer = serializers.LyricsSerializer(lyrics_items,many=True)
response={'lyrics':serializer.data}
return Response(response, status=status.HTTP_200_OK)
else:
response={'error':'Forbidden'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class AddLyricsView(APIView):
permission_classes=[IsAuthenticated]
def post(self, request, *args, **kwargs):
if request.method=='POST':
data=request.data
lyrics=Lyrics()
serializer=serializers.LyricsSerializer(data=data)
if serializer.is_valid():
lyrics.title=request.POST['title']
lyrics.artist=request.POST['artist']
lyrics.body=request.POST['body']
lyrics.title_slug=request.POST['title'].replace(' ', '-').lower()
lyrics.artist_slug=request.POST['artist'].replace(' ', '-').lower()
response={'lyrics':serializer.data}
return Response(response,status=status.HTTP_200_OK )
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SingleLyricsView(APIView):
def post(self, request, *args, **kwargs ):
artist = request.data['artist'].strip().replace("-"," ").title()
title=request.data['title'].strip().replace("-"," ").title()
search_history=SearchHistory()
title_clean1=request.data['title'].strip().replace("ain-t", "aint")
title_clean2=title_clean1.replace('you-re', 'youre')
title_cleean3 = title_clean2.replace('isn-t', 'isnt')
title_clean4 =title_cleean3.replace('aren-t', 'arent')
title_clean_5= title_clean4.replace("weren-t","werent")
title_clean6 = title_clean_5.replace("can-t", "cant")
title_clean7 = title_clean6.replace('don-t', 'dont')
title_clean8 = title_clean7.replace('i-d', 'id').replace('i-ve', 'ive').replace('we-ve','weve',).replace('you-ve', 'youve').replace('he-s', 'hes').replace('she-s', 'shes').replace('it-s', 'its',).replace('you-d', 'youd').replace('i-ll', 'ill').replace("you-ll", "youll").replace('let-s', "lets").replace("amn't", "amnt").replace("haven-t","havent")
try:
lyrics_item=Lyrics.objects.get(artist_slug=request.data['artist'], title_slug__icontains=title_clean8)
views = lyrics_item.views
updt_views=views+1
lyrics_item.views = updt_views
lyrics_item.save()
serializer=serializers.LyricsSerializer(lyrics_item, many=False)
response={'lyrics':serializer.data}
### Record activities ###
search_history.searcher_username = request.data['username']
search_history.artist=artist.replace('-',' ')
search_history.title=title.replace('-',' ')
search_history.save()
return Response(response,status=status.HTTP_200_OK)
except Lyrics.DoesNotExist:
return Response({"error":"Not Found"})
class SearchHistoryView(APIView):
permission_classes=[IsAuthenticated]
def get(self, request, *args, **kwargs ):
search_history_items=SearchHistory.objects.filter(searcher_username=request.user.username).order_by('-moment').all()
serializer=serializers.SearchHistorySerializer(search_history_items, many=True)
response={"search_history":serializer.data}
return Response(response,status=status.HTTP_200_OK)
class DeleteHistory(APIView):
permission_classes=[IsAuthenticated]
def post(self, request, *args, **kwargs):
searcher_username = request.user.username
history_item_id = request.data['id']
try:
SearchHistory.objects.get(searcher_username=searcher_username, id=history_item_id).delete()
return Response({"msg":"OK"})
except:
return Response({"msg":"Something went wrong"})
class TrendingView(APIView):
def get(self, request, *args, **kwargs):
lyrics=Lyrics.objects.order_by('-views')[0:35]
serializer=serializers.LyricsSerializer(lyrics, many=True)
response={"top":serializer.data}
return Response(response)
class RandomView(APIView):
def get(self, request,*args, **kwargs, ):
lyrics=Lyrics.objects.all()
lyrics_items=[]
for lyric in lyrics:
lyrics_items.append(lyric)
random_lyrics=random.choice(lyrics_items)
serializer=serializers.LyricsSerializer(random_lyrics)
resp={"lyrics":serializer.data}
return Response(resp)
class RecentView(APIView):
def get(self, request, *args, **kwargs):
recent_items=SearchHistory.objects.order_by('-moment').all()[:20]
recent = []
for i in recent_items:
recent.append(i)
serializer=serializers.SearchHistorySerializer(recent, many=True)
resp={"recent":serializer.data}
return Response(resp)
class SuggestionView(APIView):
def post(self, request, *args, **kwargs):
_type=request.data['type']
if _type=="title":
lyrics=Lyrics.objects.filter(title__contains=request.data['title'])
serializer=serializers.LyricsSerializer(lyrics, many=True)
resp={'suggestions':serializer.data}
return Response(resp)
else:
lyrics=Lyrics.objects.filter(artist__contains=request.data['artist'])
serializer=serializers.LyricsSerializer(lyrics, many=True)
resp={'suggestions':serializer.data}
return Response(resp)
class ChangePassword(APIView):
def post(self, request, *args, **kwargs):
if request.data['access'] == "code":
try:
user = User.objects.get(email=request.data['email'])
except:
pass
user.password = make_password(request.data['new_password'])
user.save()
return Response({"msg":"success"})
else:
user = User.objects.get(username=request.user.username)
current_password = request.data['current_password']
if check_password(current_password, user.password):
user.password = make_password(request.data['new_password'])
user.save()
return Response({"success":"Password changed"})
else:
return Response({"error":"Incorrect password"})
class modifyUser(APIView):
permission_classes=[IsAuthenticated]
def post(self, request, *args, **kwargs):
user = User.objects.get(pk=request.user.id)
new_email = request.data['email']
old_email = user.email
if new_email != old_email:
user.is_verified = False
user.username = request.data['username']
user.email = new_email
user.first_name = request.data['first_name']
user.last_name = request.data['last_name']
user.save()
n_user = User.objects.get(id=request.user.id)
serializer=serializers.UserSerializer(user, many=False)
response={'user':serializer.data}
return Response(response)
''' class EditLyricsView(APIView):
def post(self, request, pk, *args, **kwargs ):
data=request.data
lyrics=Lyrics.objects.get(pk=pk)
lyrics.title=request.POST['title']
lyrics.artist=request.POST['artist']
lyrics.body=request.POST['body']
Lyrics.objects.get(pk=pk)
lyrics.save()
lyrics_item=Lyrics.objects.get(pk=pk)
serializer=serializers.LyricsSerializer(lyrics_item,many=False)
response={'lyrics':serializer.data}
return Response(response,status=status.HTTP_200_OK ) '''
class SaveListView(APIView):
permission_classes=[IsAuthenticated]
def get(self, request, *args, **kwargs):
save_list_items=SaveList.objects.filter(owner_username=request.user.username)
save_list=[]
for i in save_list_items:
lyrics = Lyrics.objects.get(pk=i.lyrics_id)
save_list.append(lyrics)
serializer = serializers.LyricsSerializer(save_list, many=True)
return Response({'lyrics':serializer.data}, status=status.HTTP_200_OK)
class AddSaveListView(APIView):
permission_classes=[IsAuthenticated]
def post(self, request, *args, **kwargs):
items=SaveList.objects.filter(owner_username=request.user.username)
data=request.data
username=request.user.username
savelist=SaveList()
try:
if SaveList.objects.get(owner_username=request.user.username, lyrics_id=request.data['lyrics_id']):
return Response({"Error":"Cannot add lyrics to Save List twice or more."})
except:
savelist.lyrics_id=request.data['lyrics_id']
savelist.owner_username=username
savelist.save()
save_list_items=SaveList.objects.filter(owner_username=request.user.username)
save_list = []
for save_list_item in save_list_items:
sl = Lyrics.objects.get(pk=save_list_item.lyrics_id)
save_list.append(sl)
serializer = serializers.LyricsSerializer(save_list, many=True)
response={'save_list':serializer.data}
return Response(response, status=status.HTTP_200_OK)
class RemoveSaveListView(APIView):
permission_classes=[IsAuthenticated]
def post(self, request, *args, **kwargs):
owner_username=request.user.username
lyrics_id=request.data['lyrics_id']
save_list_item=SaveList.objects.get(owner_username=owner_username, lyrics_id=lyrics_id)
save_list_item.delete()
save_list_items=SaveList.objects.filter(owner_username=request.user.username)
save_list = []
for save_list_item in save_list_items:
sl = Lyrics.objects.get(pk=save_list_item.lyrics_id)
save_list.append(sl)
serializer = serializers.LyricsSerializer(save_list, many=True)
response={'save_list':serializer.data}
return Response(response, status=status.HTTP_200_OK)
class CheckUserView(APIView):
def post(self, request, *args, **kwargs):
try:
User.objects.get(username=request.data['username'])
return Response({'true'}, status=status.HTTP_200_OK)
except User.DoesNotExist:
return Response({'false'})
""" class SignupView(APIView):
def post(self, request, *args, **kwargs):
user=User()
serializer=serializers.UserSerializer(data=request.data)
print(request.data)
if serializer.is_valid():
password=make_password(request.data['password'])
username=request.data['username']
user.username=username
user.first_name=request.data['first_name']
user.last_name=request.data['last_name']
user.email=request.data['email']
user.email_username=request.data['email']
user.password=password
user.save()
new_user=User.objects.get(username=username)
print(new_user)
token=Token.objects.create(user=new_user)
response={'token':token.key, 'user':serializer.data}
return Response(response, status=status.HTTP_200_OK)
else:
return Response(serializer.errors) """
class UserDataView(APIView):
permission_classes=[IsAuthenticated]
def get(self, request, *args, **kwargs):
user=User.objects.get(username=request.user.username)
serializer=serializers.UserSerializer(user, many=False)
response={'user':serializer.data}
return Response(response, status=status.HTTP_200_OK)
class SigninView(APIView):
def post(self, request, *args, **kwargs):
password=request.data['password']
username=request.data['username']
try:
if '@' not in username:
user=User.objects.get(username=username)
elif '@' in username:
user=User.objects.get(email_username=username)
except:
return Response({'error':'User not found.'})
if check_password(password, user.password):
login(self.request, user)
token=Token.objects.get(user=user)
serializer=serializers.UserSerializer(user, many=False)
response={'user':serializer.data, 'token':token.key}
return Response(response, status=status.HTTP_200_OK)
else:
return Response({'error':'Incorrect password'})
class SubmitLyricsv(APIView):
def post(self, request, *args, **kwargs):
serializer = serializers.SubmitLyricsSerializer(data=request.data)
if serializer.is_valid:
sl=SubmitLyrics()
sl.title=request.data['title']
sl.artist=request.data['artist']
sl.body=request.data['body']
sl.save()
response = {"msg":"OK"}
return Response(response)
else:
return Response({serializers.errors})
class ApproveSubmitLyrics(APIView):
permission_classes=[IsAuthenticated]
def post(self, request, *args, **kwargs):
user = request.user
if user.is_lyrics_admin != True:
return Response({"Error":"Forbidden"})
else:
lyrics = Lyrics()
lyrics.artist = request.data['artist']
lyrics.artist_slug = request.data['artist'].strip().replace(" ","-").lower()
lyrics.title = request.data['title']
lyrics.title_slug=request.data['title'].strip().replace(" ","-").lower()
lyrics.body = request.data['body']
lyrics.save()
sl = SubmitLyrics.objects.get(id=request.data['id']).delete()
return Response({"msg":"OK"})
class SubmitLyricsListView(APIView):
permission_classes=[IsAuthenticated]
def get(self, request, *args, **kwargs):
user=request.user
if user.is_lyrics_admin != True:
return Response({"Error":"Forbidden"})
else:
sub = SubmitLyrics.objects.all()
serializer = serializers.SubmitLyricsSerializer(sub, many=True)
res = {"submit_lyrics_view":serializer.data}
return Response(res)
class SubmitLyricsView(APIView):
permission_classes=[IsAuthenticated]
def post(self, request, *args, **kwargs):
user = request.user
if user.is_lyrics_admin != True:
return Response({"Error":"Forbidden"})
else:
item = SubmitLyrics.objects.get(id=request.data['id'])
serializer = serializers.SubmitLyricsSerializer(item, many=False)
res = {"submit_lyrics_item":serializer.data}
return Response(res)
class DeclineSubmitLyrics(APIView):
def post(self, request, *args, **kwargs):
user = request.user
if user.is_lyrics_admin != True:
return Response({"Error":"Forbidden"})
else:
item = SubmitLyrics.objects.get(id=request.data['id'])
item.delete()
return Response({"msg":"OK"})
class RelatedView(APIView):
def post(self, request, *args, **kwargs):
lyrics = Lyrics.objects.filter(artist_slug=request.data['artist'])[0:10]
serializer=serializers.LyricsSerializer(lyrics, many=True)
response={"top":serializer.data}
return Response(response)
class SearchViewv(APIView):
def post(self, request, *args, **kwargs):
if request.data['term']:
term=request.data['term']
terms = term.split()
results =[]
for i in terms:
if i!="by":
for j in Lyrics.objects.filter(title__icontains=i):
results.append(j)
for k in Lyrics.objects.filter(artist__icontains=i):
results.append(k)
search_res = [i for j, i in enumerate(results) if i not in results[:j]]
serializer=serializers.LyricsSerializer(search_res, many=True)
response={"result":serializer.data}
return Response(response)
else:
return Response({"error":"Unavailable"})
""" data = requests.get(f"https://api.lyrics.ovh/v1/{artistSlug}/{titleSlug}/")
lyric = data.json()
if data.status_code == 200:
lyrics.title=title
lyrics.artist=artist
lyrics.title_slug=titleSlug
lyrics.artist_slug=artistSlug
lyrics.body=lyric['lyrics']
lyrics.save()
lyrics_item=Lyrics.objects.get(title_slug=title_slug, artist_slug=artist_slug)
searchHistory.lyrics_id = lyrics_item.id
searchHistory.searcher_username = request.user.username
searchHistory.moment=now.strftime('%Y-%m-%d %H:%M:%S')
searchHistory.save()
serializer=serializers.LyricsSerializer(lyrics_item, many=False)
response={'lyrics':serializer.data}
return Response(response,status=status.HTTP_200_OK ) """
| 38.436508
| 356
| 0.617097
| 2,569
| 24,215
| 5.697548
| 0.127676
| 0.048849
| 0.030744
| 0.043042
| 0.632097
| 0.571702
| 0.520325
| 0.457471
| 0.437863
| 0.425224
| 0
| 0.005792
| 0.265662
| 24,215
| 629
| 357
| 38.497615
| 0.817343
| 0.000702
| 0
| 0.498943
| 0
| 0
| 0.066374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061311
| false
| 0.033827
| 0.080338
| 0
| 0.344609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db705bf281d4e51af41d8edd5763fe3fe1cf7124
| 3,936
|
py
|
Python
|
lab6.py
|
jschmidtnj/CS115
|
fa2374f1ae9c9b63e572850a97af6086112d7a36
|
[
"MIT"
] | null | null | null |
lab6.py
|
jschmidtnj/CS115
|
fa2374f1ae9c9b63e572850a97af6086112d7a36
|
[
"MIT"
] | null | null | null |
lab6.py
|
jschmidtnj/CS115
|
fa2374f1ae9c9b63e572850a97af6086112d7a36
|
[
"MIT"
] | 1
|
2022-01-03T01:44:39.000Z
|
2022-01-03T01:44:39.000Z
|
'''
Created on 10/11/2017
@author: jschmid3@stevens.edu
Pledge: I pledge my honor that I have abided by the Stevens Honor System -Joshua Schmidt
CS115 - Lab 6
'''
def isOdd(n):
'''Returns whether or not the integer argument is odd.'''
#question 1: base_2 of 42: 101010
if n == 0:
return False
if n % 2 != 0:
return True
return False
#question 2: if given an odd base-10 number, the least-significant bit of its base-2 representation will be a 1.
#question 3: if given an even base-10 number, the least-significant bit of its base-2 representation will be a 0.
#This is because 2^0 = 1, and that is the only way to make an odd number, by having a 1 in the least significant bit.
#question 4: By eliminating the least significant bit, the original number decreases by a factor of 2, if the bit is a 0.
#if the least significant bit is a 1, the original number is decreased by a factor of 2, - 1.
#question 5: If N is odd, the base-2 of N is Y + "1". If N is even, the base-2 of N is Y + "0".
#This is because to get from N base-10 to N base-2 you do successive division by 2, keeping the remainder, so given
#the base-2 of all of the division except for the first, one must put that remainder in front, hence the answer given.
def numToBinary(n):
'''Precondition: integer argument is non-negative.
Returns the string with the binary representation of non-negative integer n.
If n is 0, the empty string is returned.'''
if n == 0:
return ""
elif isOdd(n):
return numToBinary(n // 2) + "1"
else: return numToBinary(n // 2) + "0"
#print(numToBinary(15))
def binaryToNum(s):
'''Precondition: s is a string of 0s and 1s.
Returns the integer corresponding to the binary representation in s.
Note: the empty string represents 0.'''
if s == "":
return 0
return int(s[0])*(2**(len(s)-1)) + binaryToNum(s[1:])
#print(binaryToNum("1111"))
def addBin(s, numAdd, carry = 0):
"""adds 2 binary numbers"""
if s == "" or numAdd == "":
if carry == 0:
return s + numAdd
place = carry
carry = 0
if s != "" and s[-1] == "1":
carry = place
place = 1 - place
if numAdd != "" and numAdd[-1] == "1":
carry += place
place = 1 - place
return addBin(s[:-1], numAdd[:-1], carry) + str(place)
#print(addBin("100", "001", 0))
def makeEightBit(a):
"""makes a binary number 8 bit"""
if len(a) == 8:
print(str(a))
return str(a)
elif len(a) > 8:
#print(a[(len(a)-8):])
makeEightBit(a[(len(a)-8):])
else:
makeEightBit("0" + a)
return ""
def increment(s):
'''Precondition: s is a string of 8 bits.
Returns the binary representation of binaryToNum(s) + 1.'''
#numAdd = "00000001"
dec = binaryToNum(s)
dec += 1
answer = numToBinary(dec)
#print(answer)
if len(answer) > 8:
return answer[(len(answer)-8):]
answer = (8-len(answer))*"0" + answer
return answer
#print(increment("1110100000"))
def count(s, n):
'''Precondition: s is an 8-bit string and n >= 0.
Prints s and its n successors.'''
if n == 0:
print(s)
return ""
print(s)
return count(increment(s), n-1)
#print(count("11111110", 5))
#print("a")
def numToTernary(n):
'''Precondition: integer argument is non-negative.
Returns the string with the ternary representation of non-negative integer
n. If n is 0, the empty string is returned.'''
if n == 0:
return ""
return numToTernary(n // 3) + str(n % 3)
#print(numToTernary(42))
def ternaryToNum(s):
'''Precondition: s is a string of 0s, 1s, and 2s.
Returns the integer corresponding to the ternary representation in s.
Note: the empty string represents 0.'''
if s == "":
return 0
return int(s[0])*(3**(len(s)-1)) + ternaryToNum(s[1:])
#print(ternaryToNum('12211010'))
| 33.641026
| 121
| 0.621697
| 622
| 3,936
| 3.932476
| 0.233119
| 0.011038
| 0.038839
| 0.044971
| 0.322567
| 0.312756
| 0.284137
| 0.243663
| 0.221586
| 0.221586
| 0
| 0.054953
| 0.251016
| 3,936
| 116
| 122
| 33.931034
| 0.774763
| 0.567073
| 0
| 0.3
| 0
| 0
| 0.003769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0
| 0
| 0.466667
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db73a20804b8cf971455500dd1ae60cb3137e6bf
| 4,321
|
py
|
Python
|
src/processing/augmentation.py
|
sdcubber/kaggle_carvana
|
44f6c7f1e80be2caa3c7ad4c7fb69067af45fe8f
|
[
"MIT"
] | null | null | null |
src/processing/augmentation.py
|
sdcubber/kaggle_carvana
|
44f6c7f1e80be2caa3c7ad4c7fb69067af45fe8f
|
[
"MIT"
] | null | null | null |
src/processing/augmentation.py
|
sdcubber/kaggle_carvana
|
44f6c7f1e80be2caa3c7ad4c7fb69067af45fe8f
|
[
"MIT"
] | null | null | null |
# Script for data augmentation functions
import numpy as np
from collections import deque
from PIL import Image
import cv2
from data.config import *
def imread_cv2(image_path):
"""
Read image_path with cv2 format (H, W, C)
if image is '.gif' outputs is a numpy array of {0,1}
"""
image_format = image_path[-3:]
if image_format == 'jpg':
image = cv2.imread(image_path)
else:
image = np.array(Image.open(image_path))
return image
def resize_cv2(image, heigh=1280, width=1918):
return cv2.resize(image, (width, heigh), cv2.INTER_LINEAR)
def image_to_tensor(image, mean=0, std=1.):
"""Transform image (input is numpy array, read in by cv2) """
if len(image.shape) == 2:
image = image.reshape(image.shape[0], image.shape[1], 1)
image = image.astype(np.float32)
image = (image-mean)/std
image = image.transpose((2,0,1))
tensor = torch.from_numpy(image)
return tensor
# --- Data Augmentation functions --- #
# A lot of functions can be found here:
# https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py#L223
# transform image and label
def randomHorizontalFlip(image, mask, p=0.5):
"""Do a random horizontal flip with probability p"""
if np.random.random() < p:
image = np.fliplr(image)
mask = np.fliplr(mask)
return image, mask
def randomVerticalFlip(image, mask, p=0.5):
"""Do a random vertical flip with probability p"""
if np.random.random() < p:
image = np.flipud(image)
mask = np.flipud(mask)
return image, mask
def randomHorizontalShift(image, mask, max_shift=0.05, p=0.5):
"""Do random horizontal shift with max proportion shift and with probability p
Elements that roll beyond the last position are re-introduced at the first."""
max_shift_pixels = int(max_shift*image.shape[1])
shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1))
if np.random.random() < p:
image = np.roll(image, shift, axis=1)
mask = np.roll(mask, shift, axis=1)
return image, mask
def randomVerticalShift(image, mask, max_shift=0.05, p=0.5):
"""Do random vertical shift with max proportion shift and probability p
Elements that roll beyond the last position are re-introduced at the first."""
max_shift_pixels = int(max_shift*image.shape[0])
shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1))
if np.random.random() < p:
image = np.roll(image, shift, axis=0)
mask = np.roll(mask, shift, axis=0)
return image, mask
def randomInvert(image, mask, p=0.5):
"""Randomly invert image with probability p"""
if np.random.random() < p:
image = 255 - image
mask = mask
return image, mask
def randomBrightness(image, mask, p=0.75):
"""With probability p, randomly increase or decrease brightness.
See https://stackoverflow.com/questions/37822375/python-opencv-increasing-image-brightness-without-overflowing-uint8-array"""
if np.random.random() < p:
max_value = np.percentile(255-image, q=25) # avoid burning out white cars, so take image-specific maximum
value = np.random.choice(np.arange(-max_value, max_value))
if value > 0:
image = np.where((255 - image) < value,255,image+value).astype(np.uint8)
else:
image = np.where(image < -value,0,image+value).astype(np.uint8)
return image, mask
def randomHue(image, mask, p=0.25, max_value=75):
"""With probability p, randomly increase or decrease hue.
See https://stackoverflow.com/questions/32609098/how-to-fast-change-image-brightness-with-python-opencv"""
if np.random.random() < p:
value = np.random.choice(np.arange(-max_value, max_value))
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv[:,:,0] = hsv[:,:,0] + value
hsv = np.clip(hsv, a_min=0, a_max=255).astype(np.uint8)
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return image, mask
def GaussianBlur(image, mask, kernel=(1, 1),sigma=1, p=0.5):
"""With probability p, apply Gaussian blur"""
# TODO
return image, mask
def randomRotate(image, mask, max_angle, p=0.5):
"""Perform random rotation with max_angle and probability p"""
# TODO
return(image, mask)
| 36.931624
| 129
| 0.669058
| 640
| 4,321
| 4.453125
| 0.270313
| 0.066316
| 0.047368
| 0.050526
| 0.402807
| 0.324211
| 0.287018
| 0.287018
| 0.241404
| 0.22807
| 0
| 0.034863
| 0.203425
| 4,321
| 116
| 130
| 37.25
| 0.793144
| 0.303633
| 0
| 0.291667
| 0
| 0
| 0.00103
| 0
| 0
| 0
| 0
| 0.008621
| 0
| 1
| 0.166667
| false
| 0
| 0.069444
| 0.013889
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db74905cc0d77c3c1aff987d3c4f57d66e26cc16
| 1,905
|
py
|
Python
|
terrafirma/core/views/env.py
|
AlexandraAlter/django-terrafirma
|
afce5946f173aded2b4bfea78cf1b1034ec32272
|
[
"MIT"
] | null | null | null |
terrafirma/core/views/env.py
|
AlexandraAlter/django-terrafirma
|
afce5946f173aded2b4bfea78cf1b1034ec32272
|
[
"MIT"
] | null | null | null |
terrafirma/core/views/env.py
|
AlexandraAlter/django-terrafirma
|
afce5946f173aded2b4bfea78cf1b1034ec32272
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse_lazy
from django import views
from django.views import generic as g_views
from django.views.generic import base as b_views, edit as e_views
from .. import forms, models
class NewEnvView(e_views.CreateView):
model = models.Environment
fields = ['name', 'abbrev']
success_url = reverse_lazy('home')
class EnvMixin(b_views.ContextMixin):
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.env = get_object_or_404(models.Environment, abbrev=kwargs['env_abbrev'])
def url_vars(self):
return {'env_abbrev': self.env.abbrev}
def get_context_data(self, **kwargs):
return super().get_context_data(env=self.env, **kwargs)
class MaybeEnvMixin(b_views.ContextMixin):
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.env = models.Environment.objects.get(abbrev=request.GET['env'])
def url_vars(self):
return {'env_abbrev': self.env.abbrev if self.env else None}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.env:
context.update(env=self.env)
return context
class EnvView(EnvMixin, g_views.DetailView):
model = models.Environment
slug_field = 'abbrev'
slug_url_kwarg = 'env_abbrev'
class EditEnvView(EnvMixin, e_views.UpdateView):
model = models.Environment
fields = ['name', 'abbrev']
slug_field = 'abbrev'
slug_url_kwarg = 'env_abbrev'
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.object = self.env
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.save()
return redirect('env', env_abbrev=self.env.abbrev)
| 30.238095
| 85
| 0.67979
| 251
| 1,905
| 5
| 0.262948
| 0.055777
| 0.081275
| 0.045418
| 0.422311
| 0.404781
| 0.301195
| 0.301195
| 0.243825
| 0.243825
| 0
| 0.003919
| 0.196325
| 1,905
| 62
| 86
| 30.725806
| 0.815807
| 0
| 0
| 0.422222
| 0
| 0
| 0.048294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.177778
| false
| 0
| 0.133333
| 0.066667
| 0.755556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db777f4b56a68caa06eca0c2b86f08c668527cb4
| 2,717
|
py
|
Python
|
Archive/train_cnn.py
|
Yeok-c/Urban-Sound-Classification
|
98c46eb54266ef7b859d192e9bebe8a5d48e1708
|
[
"Apache-2.0"
] | null | null | null |
Archive/train_cnn.py
|
Yeok-c/Urban-Sound-Classification
|
98c46eb54266ef7b859d192e9bebe8a5d48e1708
|
[
"Apache-2.0"
] | null | null | null |
Archive/train_cnn.py
|
Yeok-c/Urban-Sound-Classification
|
98c46eb54266ef7b859d192e9bebe8a5d48e1708
|
[
"Apache-2.0"
] | null | null | null |
### Load necessary libraries ###
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow import keras
from sklearn.metrics import ConfusionMatrixDisplay
model = get_network()
model.summary()
### Train and evaluate via 10-Folds cross-validation ###
accuracies = []
folds = np.array(['fold1','fold2','fold3','fold4',
'fold5','fold6','fold7','fold8',
'fold9','fold10'])
load_dir = "UrbanSounds8K/processed/"
kf = KFold(n_splits=10)
for train_index, test_index in kf.split(folds):
x_train, y_train = [], []
for ind in train_index:
# read features or segments of an audio file
train_data = np.load("{0}/{1}.npz".format(load_dir,folds[ind]),
allow_pickle=True)
# for training stack all the segments so that they are treated as an example/instance
features = np.concatenate(train_data["features"], axis=0)
labels = np.concatenate(train_data["labels"], axis=0)
x_train.append(features)
y_train.append(labels)
# stack x,y pairs of all training folds
x_train = np.concatenate(x_train, axis = 0).astype(np.float32)
y_train = np.concatenate(y_train, axis = 0).astype(np.float32)
# for testing we will make predictions on each segment and average them to
# produce single label for an entire sound clip.
test_data = np.load("{0}/{1}.npz".format(load_dir,
folds[test_index][0]), allow_pickle=True)
x_test = test_data["features"]
y_test = test_data["labels"]
log_dir="logs/fit/" + folds[test_index][0]
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model = get_network()
model.fit(x_train, y_train, epochs = 20, batch_size = 64, verbose = 1, validation_split=0.2,
use_multiprocessing=True, workers=8, callbacks=[tensorboard_callback])
# evaluate on test set/fold
y_true, y_pred = [], []
for x, y in zip(x_test, y_test):
# average predictions over segments of a sound clip
avg_p = np.argmax(np.mean(model.predict(x), axis = 0))
y_pred.append(avg_p)
# pick single label via np.unique for a sound clip
y_true.append(np.unique(y)[0])
accuracies.append(accuracy_score(y_true, y_pred))
print("Fold n accuracy: {0}".format(accuracy_score(y_true, y_pred)))
cm = ConfusionMatrixDisplay.from_predictions(y_true, y_pred)
cm.figure_.savefig('conf_mat_' + str(test_index) + '_acc_' + str(accuracy_score(y_true, y_pred)) + '.png',dpi=1000)
print("Average 10 Folds Accuracy: {0}".format(np.mean(accuracies)))
| 40.552239
| 123
| 0.670225
| 391
| 2,717
| 4.483376
| 0.383632
| 0.017114
| 0.017114
| 0.028523
| 0.11352
| 0.105533
| 0.03765
| 0.03765
| 0.03765
| 0.03765
| 0
| 0.022748
| 0.207214
| 2,717
| 66
| 124
| 41.166667
| 0.791086
| 0.179242
| 0
| 0.045455
| 0
| 0
| 0.091444
| 0.010865
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.136364
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db77a4da0f8fc3044cf961cfd37d6efdc53cb0ed
| 274
|
py
|
Python
|
Python3/1436-Destination-City/soln.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/1436-Destination-City/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/1436-Destination-City/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def destCity(self, paths: List[List[str]]) -> str:
bads = set()
cities = set()
for u, v in paths:
cities.add(u)
cities.add(v)
bads.add(u)
ans = cities - bads
return list(ans)[0]
| 24.909091
| 54
| 0.474453
| 35
| 274
| 3.714286
| 0.542857
| 0.138462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006061
| 0.39781
| 274
| 10
| 55
| 27.4
| 0.781818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db77b07e8a875d39eb972f8b432c0f0db96a2c4f
| 6,105
|
py
|
Python
|
metaflow/plugins/kfp/tests/flows/resources_flow.py
|
zillow/metaflow
|
a42dc9eab04695f2b0a429874e607ed67d5a2b45
|
[
"Apache-2.0"
] | 7
|
2020-07-24T17:07:58.000Z
|
2021-05-19T21:47:12.000Z
|
metaflow/plugins/kfp/tests/flows/resources_flow.py
|
zillow/metaflow
|
a42dc9eab04695f2b0a429874e607ed67d5a2b45
|
[
"Apache-2.0"
] | 55
|
2020-07-20T16:56:27.000Z
|
2022-03-28T12:51:15.000Z
|
metaflow/plugins/kfp/tests/flows/resources_flow.py
|
zillow/metaflow
|
a42dc9eab04695f2b0a429874e607ed67d5a2b45
|
[
"Apache-2.0"
] | 6
|
2020-10-15T18:38:35.000Z
|
2021-06-20T03:05:43.000Z
|
import os
import pprint
import subprocess
import time
from typing import Dict, List
from kubernetes.client import (
V1EnvVar,
V1EnvVarSource,
V1ObjectFieldSelector,
V1ResourceFieldSelector,
)
from metaflow import FlowSpec, step, environment, resources, current
def get_env_vars(env_resources: Dict[str, str]) -> List[V1EnvVar]:
res = []
for name, resource in env_resources.items():
res.append(
V1EnvVar(
# this is used by some functions of operator-sdk
# it uses this environment variable to get the pods
name=name,
value_from=V1EnvVarSource(
resource_field_ref=V1ResourceFieldSelector(
container_name="main",
resource=resource,
divisor="1m" if "cpu" in resource else "1",
)
),
)
)
return res
kubernetes_vars = get_env_vars(
{
"LOCAL_STORAGE": "requests.ephemeral-storage",
"LOCAL_STORAGE_LIMIT": "limits.ephemeral-storage",
"CPU": "requests.cpu",
"CPU_LIMIT": "limits.cpu",
"MEMORY": "requests.memory",
"MEMORY_LIMIT": "limits.memory",
}
)
kubernetes_vars.append(
V1EnvVar(
name="MY_POD_NAME",
value_from=V1EnvVarSource(
field_ref=V1ObjectFieldSelector(field_path="metadata.name")
),
)
)
annotations = {
"metaflow.org/flow_name": "MF_NAME",
"metaflow.org/step": "MF_STEP",
"metaflow.org/run_id": "MF_RUN_ID",
"metaflow.org/experiment": "MF_EXPERIMENT",
"metaflow.org/tag_metaflow_test": "MF_TAG_METAFLOW_TEST",
"metaflow.org/tag_test_t1": "MF_TAG_TEST_T1",
}
for annotation, env_name in annotations.items():
kubernetes_vars.append(
V1EnvVar(
name=env_name,
value_from=V1EnvVarSource(
field_ref=V1ObjectFieldSelector(
field_path=f"metadata.annotations['{annotation}']"
)
),
)
)
labels = {
"aip.zillowgroup.net/kfp-pod-default": "KF_POD_DEFAULT",
"tags.ledger.zgtools.net/ai-flow-name": "AI_FLOW_NAME",
"tags.ledger.zgtools.net/ai-step-name": "AI_STEP_NAME",
"tags.ledger.zgtools.net/ai-experiment-name": "AI_EXPERIMENT_NAME",
}
for label, env_name in labels.items():
kubernetes_vars.append(
V1EnvVar(
name=env_name,
value_from=V1EnvVarSource(
field_ref=V1ObjectFieldSelector(
field_path=f"metadata.labels['{label}']"
)
),
)
)
class ResourcesFlow(FlowSpec):
@resources(
local_storage="242",
cpu="0.6",
memory="1G",
)
@environment( # pylint: disable=E1102
vars={"MY_ENV": "value"}, kubernetes_vars=kubernetes_vars
)
@step
def start(self):
pprint.pprint(dict(os.environ))
print("=====")
# test simple environment var
assert os.environ.get("MY_ENV") == "value"
# test kubernetes_vars
assert "resourcesflow" in os.environ.get("MY_POD_NAME")
assert os.environ.get("CPU") == "600"
assert os.environ.get("CPU_LIMIT") == "600"
assert os.environ.get("LOCAL_STORAGE") == "242000000"
assert os.environ.get("LOCAL_STORAGE_LIMIT") == "242000000"
assert os.environ.get("MEMORY") == "1000000000"
assert os.environ.get("MEMORY_LIMIT") == "1000000000"
assert os.environ.get("MF_NAME") == current.flow_name
assert os.environ.get("MF_STEP") == current.step_name
assert os.environ.get("MF_RUN_ID") == current.run_id
assert os.environ.get("MF_EXPERIMENT") == "metaflow_test"
assert os.environ.get("MF_TAG_METAFLOW_TEST") == "true"
assert os.environ.get("MF_TAG_TEST_T1") == "true"
assert os.environ.get("KF_POD_DEFAULT") == "true"
assert os.environ.get("AI_FLOW_NAME") == current.flow_name
assert os.environ.get("AI_STEP_NAME") == current.step_name
assert os.environ.get("AI_EXPERIMENT_NAME") == "metaflow_test"
self.items = [1, 2]
self.next(self.foreach_step, foreach="items")
@environment(vars={"MY_ENV": "value"}) # pylint: disable=E1102
@resources(volume="11G")
@step
def foreach_step(self):
# test simple environment var
assert os.environ.get("MY_ENV") == "value"
output = subprocess.check_output(
"df -h | grep /opt/metaflow_volume", shell=True
)
assert "11G" in str(output)
self.next(self.join_step)
@resources(volume="12G")
@step
def join_step(self, inputs):
output = subprocess.check_output(
"df -h | grep /opt/metaflow_volume", shell=True
)
assert "12G" in str(output)
self.next(self.split_step)
@step
def split_step(self):
self.items = [1, 2]
self.next(self.shared_volume_foreach_step, foreach="items")
@resources(volume="13G", volume_mode="ReadWriteMany")
@step
def shared_volume_foreach_step(self):
output = subprocess.check_output(
"df -h | grep /opt/metaflow_volume", shell=True
)
assert "13G" in str(output)
file_path = "/opt/metaflow_volume/test.txt"
message = "hello world!"
# validate the volume is shared across the foreach splits
if self.input == 1:
with open(file_path, "w") as f:
f.write(message)
else:
while not os.path.exists(file_path):
time.sleep(1)
print(".")
with open(file_path, "r") as f:
read_lines = f.readlines()
print("read_lines", read_lines)
assert message == read_lines[0]
self.next(self.shared_volume_join_step)
@step
def shared_volume_join_step(self, inputs):
self.next(self.end)
@step
def end(self):
print("All done.")
if __name__ == "__main__":
ResourcesFlow()
| 30.073892
| 71
| 0.591646
| 699
| 6,105
| 4.964235
| 0.228898
| 0.051873
| 0.065706
| 0.093372
| 0.37781
| 0.286167
| 0.225072
| 0.192795
| 0.17147
| 0.152738
| 0
| 0.022686
| 0.285176
| 6,105
| 202
| 72
| 30.222772
| 0.772456
| 0.044717
| 0
| 0.216867
| 0
| 0
| 0.211231
| 0.066804
| 0
| 0
| 0
| 0
| 0.138554
| 1
| 0.048193
| false
| 0
| 0.042169
| 0
| 0.10241
| 0.036145
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db79520622b9fcae917edbc819e1d1c2cae17bf8
| 5,951
|
py
|
Python
|
src/nb_utils/general.py
|
redfrexx/osm_association_rules
|
33975ce25047f9ab3b21e890bc5ed9bab59a0a2f
|
[
"BSD-3-Clause"
] | null | null | null |
src/nb_utils/general.py
|
redfrexx/osm_association_rules
|
33975ce25047f9ab3b21e890bc5ed9bab59a0a2f
|
[
"BSD-3-Clause"
] | null | null | null |
src/nb_utils/general.py
|
redfrexx/osm_association_rules
|
33975ce25047f9ab3b21e890bc5ed9bab59a0a2f
|
[
"BSD-3-Clause"
] | 2
|
2021-05-10T10:19:13.000Z
|
2021-09-15T10:32:10.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions used for data handling
"""
__author__ = "Christina Ludwig, GIScience Research Group, Heidelberg University"
__email__ = "christina.ludwig@uni-heidelberg.de"
import os
import yaml
from shapely.geometry import box
import numpy as np
import pandas as pd
import geopandas as gpd
import json
from nb_utils.utils import create_bbox, reproject_to_utm
CONTEXT_NAMES = {"area": "Area", "building_density": "Building density", "age": "Days since creation",
"n_tags": "Number of tags", "changes": "Number of changes", "max_version": "Version number",
"user_count_inner": "Inner user count", "user_density_inner": "Inner user density",
"user_count_outer": "Outer user count", "user_density_outer": "Outer user density",
"feature_count": "Feature count", "random": "Random"}
rules_colnames = ['antecedents', 'consequents', 'antecedent support',
'consequent support', 'support', 'confidence', 'lift', 'leverage',
'conviction', "context", "context_min", "context_max", "context_p_min", "context_p_max", "nfeatures", "rule"]
pretty_names_units = {"area": "Area [ha]", "building_density": "Building density", "feature_count": "Feature count", "age": "Days since creation", "n_tags": "Number of tags", "changes": "Number of changes", "max_version": "Version number", "user_count_inner": "Inner user count", "user_density_inner": "Inner user density", "user_count_outer": "Outer user count",
"user_density_outer": "Outer user density", "random": "Random"}
def load_config(config_file, cities):
"""
Load config parameters from file
:param config_file:
:param cities:
:return:
"""
if not os.path.exists(config_file):
print("ERROR: Config file {} does not exist.".format(config_file))
else:
with open(config_file, 'r') as src:
config = yaml.load(src, Loader=yaml.FullLoader)
config_cities = config["locations"]
config_cities = {city: config_cities[city] for city in cities}
return config_cities
def load_data(cities, data_dir):
"""
Load data into notebook from file
:return:
"""
loaded_tags_dfs = []
loaded_context_dfs = []
for city in cities:
print("Loading {}...".format(city))
# Check paths
tags_file = os.path.join(data_dir, city, "{}_tags.json".format(city))
context_file = os.path.join(data_dir, city, "{}_context.geojson".format(city))
if (not os.path.exists(tags_file)) or (not os.path.exists(context_file)):
print("{}: Input files not found.".format(city))
return None, None, None
# Read data and set index
tags_df = pd.read_json(tags_file).set_index("@osmId")
context_df = gpd.read_file(context_file).set_index("@osmId")
# Calculate area (should be moved to data_extraction)
context_df["area"] = reproject_to_utm(context_df).area #/ 10000. # conversion to ha
# Add column holding the city name
context_df["city"] = city
loaded_tags_dfs.append(tags_df)
loaded_context_dfs.append(context_df)
# Convert list of dataframes to dataframe
all_tags_df = pd.concat(loaded_tags_dfs, axis=0)
all_tags_df = all_tags_df.fillna(False)
all_context_df = pd.concat(loaded_context_dfs, axis=0)
all_features = all_context_df.join(all_tags_df, sort=False)
# Add dummy columns for "no antecedent" and random context variable
all_features["none"] = True
all_features["random"] = np.random.rand(len(all_features))
# The park iteself is always counted as an objects inside of it. Therefore, subtract 1.
all_features["feature_count"] = all_features["feature_count"] - 1
# Delete unnecessary columns
unnecessary_cols = list(filter(lambda x: x.startswith("gt:"), all_features.columns)) + ["leisure=park"]
all_features.drop(unnecessary_cols, axis=1, inplace=True)
return all_features
def create_city_bboxes(config_cities):
"""
Creat bboxes of cities
:return:
"""
bboxes = {c: box(*create_bbox(config_cities[c]["center"], config_cities[c]["width"])) for c in config_cities.keys()}
bbox_df = pd.DataFrame().from_dict(bboxes, orient="index", columns=["geometry"])
return gpd.GeoDataFrame(bbox_df)
def dump_city_rules(city_rules, interim_dir):
"""
Write results from context based association rule analysis to file
:param city_rules:
:param interim_dir:
:return:
"""
city_rules_dir = os.path.join(interim_dir, "city_rules")
if not os.path.exists(city_rules_dir):
os.mkdir(city_rules_dir)
for k, v in city_rules.items():
print(k)
v["heatmap"].to_json(os.path.join(city_rules_dir, "{}_heatmap.json".format(k)))
v["valid_rules"].reset_index().to_json(os.path.join(city_rules_dir, "{}_valid_rules.json".format(k)))
with open(os.path.join(city_rules_dir, "{}_sel_features.json".format(k)), "w") as dst:
json.dump(list(v["sel_features"].index), dst)
def load_city_rules(cities, interim_dir, all_features):
"""
Load results from context based association rule analysis to file
:param cities:
:param interim_dir:
:param all_features:
:return:
"""
city_rules = {}
for city in cities:
with open(os.path.join(interim_dir, "city_rules", "{}_sel_features.json".format(city))) as dst:
selected_ids = json.load(dst)
sel_features = all_features.loc[selected_ids]
selected_osmids = json
city_rules[city] = {
"heatmap": pd.read_json(os.path.join(interim_dir, "city_rules", "{}_heatmap.json".format(city))),
"valid_rules": pd.read_json(
os.path.join(interim_dir, "city_rules", "{}_valid_rules.json".format(city))).set_index("index"),
"sel_features": sel_features}
return city_rules
| 40.482993
| 363
| 0.668627
| 799
| 5,951
| 4.740926
| 0.272841
| 0.042767
| 0.023759
| 0.021119
| 0.238648
| 0.206441
| 0.200634
| 0.172122
| 0.157339
| 0.157339
| 0
| 0.002307
| 0.198622
| 5,951
| 146
| 364
| 40.760274
| 0.79199
| 0.140985
| 0
| 0.02439
| 0
| 0
| 0.262321
| 0.00684
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060976
| false
| 0
| 0.097561
| 0
| 0.219512
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db7ad2b92a14b73e461a5d252d3a7ab245920c9f
| 3,922
|
py
|
Python
|
keystoneclient/auth/identity/v3/federated.py
|
darren-wang/ksc
|
fd096540e8e57b6bd7c923f4cb4ad6616d103cc8
|
[
"Apache-1.1"
] | 1
|
2019-09-11T11:56:19.000Z
|
2019-09-11T11:56:19.000Z
|
tools/dockerize/webportal/usr/lib/python2.7/site-packages/keystoneclient/auth/identity/v3/federated.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
tools/dockerize/webportal/usr/lib/python2.7/site-packages/keystoneclient/auth/identity/v3/federated.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
import six
from keystoneclient.auth.identity.v3 import base
from keystoneclient.auth.identity.v3 import token
__all__ = ['FederatedBaseAuth']
@six.add_metaclass(abc.ABCMeta)
class FederatedBaseAuth(base.BaseAuth):
rescoping_plugin = token.Token
def __init__(self, auth_url, identity_provider, protocol, **kwargs):
"""Class constructor accepting following parameters:
:param auth_url: URL of the Identity Service
:type auth_url: string
:param identity_provider: name of the Identity Provider the client
will authenticate against. This parameter
will be used to build a dynamic URL used to
obtain unscoped OpenStack token.
:type identity_provider: string
"""
super(FederatedBaseAuth, self).__init__(auth_url=auth_url, **kwargs)
self.identity_provider = identity_provider
self.protocol = protocol
@classmethod
def get_options(cls):
options = super(FederatedBaseAuth, cls).get_options()
options.extend([
cfg.StrOpt('identity-provider',
help="Identity Provider's name"),
cfg.StrOpt('protocol',
help='Protocol for federated plugin'),
])
return options
@property
def federated_token_url(self):
"""Full URL where authorization data is sent."""
values = {
'host': self.auth_url.rstrip('/'),
'identity_provider': self.identity_provider,
'protocol': self.protocol
}
url = ("%(host)s/OS-FEDERATION/identity_providers/"
"%(identity_provider)s/protocols/%(protocol)s/auth")
url = url % values
return url
def _get_scoping_data(self):
return {'trust_id': self.trust_id,
'domain_id': self.domain_id,
'domain_name': self.domain_name,
'project_id': self.project_id,
'project_name': self.project_name,
'project_domain_id': self.project_domain_id,
'project_domain_name': self.project_domain_name}
def get_auth_ref(self, session, **kwargs):
"""Authenticate retrieve token information.
This is a multi-step process where a client does federated authn
receives an unscoped token.
If an unscoped token is successfully received and scoping information
is present then the token is rescoped to that target.
:param session: a session object to send out HTTP requests.
:type session: keystoneclient.session.Session
:returns: a token data representation
:rtype: :py:class:`keystoneclient.access.AccessInfo`
"""
auth_ref = self.get_unscoped_auth_ref(session)
scoping = self._get_scoping_data()
if any(scoping.values()):
token_plugin = self.rescoping_plugin(self.auth_url,
token=auth_ref.auth_token,
**scoping)
auth_ref = token_plugin.get_auth_ref(session)
return auth_ref
@abc.abstractmethod
def get_unscoped_auth_ref(self, session, **kwargs):
"""Fetch unscoped federated token."""
| 35.017857
| 77
| 0.63284
| 452
| 3,922
| 5.320796
| 0.369469
| 0.073181
| 0.013721
| 0.013306
| 0.051559
| 0.031601
| 0
| 0
| 0
| 0
| 0
| 0.002147
| 0.287353
| 3,922
| 111
| 78
| 35.333333
| 0.858318
| 0.375319
| 0
| 0
| 0
| 0
| 0.132514
| 0.03993
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113208
| false
| 0
| 0.09434
| 0.018868
| 0.320755
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db7cbd4afe84d62fa37ba5ff4602788af4116b50
| 802
|
py
|
Python
|
config.py
|
iDevHank/i18n
|
ec731b5d6fab330a868ebb9f9e11ff1caef629ef
|
[
"MIT"
] | null | null | null |
config.py
|
iDevHank/i18n
|
ec731b5d6fab330a868ebb9f9e11ff1caef629ef
|
[
"MIT"
] | null | null | null |
config.py
|
iDevHank/i18n
|
ec731b5d6fab330a868ebb9f9e11ff1caef629ef
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# The format of your own localizable method.
# This is an example of '"string".localized'
SUFFIX = '.localized'
KEY = r'"(?:\\.|[^"\\])*"'
LOCALIZABLE_RE = r'%s%s' % (KEY, SUFFIX)
# Specify the path of localizable files in project.
LOCALIZABLE_FILE_PATH = ''
LOCALIZABLE_FILE_NAMES = ['Localizable']
LOCALIZABLE_FILE_TYPES = ['strings']
# File types of source file.
SEARCH_TYPES = ['swift', 'm', 'json']
SOURCE_FILE_EXCLUSIVE_PATHS = [
'Assets.xcassets', 'Carthage', 'ThirdParty',
'Pods', 'Media.xcassets', 'Framework', 'bin']
LOCALIZABLE_FILE_EXCLUSIVE_PATHS = ['Carthage', 'ThirdParty',
'Pods', 'Framework', 'bin']
LOCALIZABLE_FORMAT_RE = r'"(?:\\.|[^"\\])*"\s*=\s*"(?:\\.|[^"\\])*";\n'
DEFAULT_TARGET_PATH = 'generated.strings'
| 33.416667
| 71
| 0.634663
| 92
| 802
| 5.336957
| 0.532609
| 0.1222
| 0.016293
| 0.020367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001486
| 0.160848
| 802
| 23
| 72
| 34.869565
| 0.728083
| 0.229426
| 0
| 0
| 0
| 0
| 0.353997
| 0.071778
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db7dbf749958b5f62cb5ff7deb97ed3b8e66afdf
| 1,771
|
py
|
Python
|
MuonGun/resources/scripts/histreduce.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | 1
|
2020-12-24T22:00:01.000Z
|
2020-12-24T22:00:01.000Z
|
MuonGun/resources/scripts/histreduce.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | null | null | null |
MuonGun/resources/scripts/histreduce.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | 3
|
2020-07-17T09:20:29.000Z
|
2021-03-30T16:44:18.000Z
|
#!/usr/bin/env python
"""
Add all (potentially gigantic) histograms in a group of files.
"""
import dashi
import tables
import os, sys, operator, shutil
from optparse import OptionParser
parser = OptionParser(usage="%prog [OPTIONS] infiles outfile", description=__doc__)
parser.add_option("--blocksize", dest="blocksize", type=int, default=2048)
opts, args = parser.parse_args()
if len(args) < 2:
parser.error("You must specify at least one output and one input file")
infiles, outfile = args[:-1], args[-1]
if os.path.exists(outfile):
parser.error("%s already exists!" % outfile)
shutil.copy(infiles[0], outfile)
from collections import defaultdict
paths = defaultdict(list)
for fname in infiles[1:]:
with tables.openFile(fname) as hdf:
for group in hdf.walkNodes(where='/', classname='Group'):
if 'ndim' in group._v_attrs: # a dashi histogram
path = group._v_pathname
paths[path].append(fname)
def histadd(sourceGroup, destGroup, blocksize=1):
"""
Add dashi histograms stored in HDF5 groups
:param blocksize: operate on blocksize I/O chunks at a time
"""
for arr in '_h_bincontent', '_h_squaredweights':
source = sourceGroup._v_children[arr]
dest = destGroup._v_children[arr]
chunksize = blocksize*reduce(operator.mul, dest.chunkshape)
size = reduce(operator.mul, dest.shape)
for i in range(0, size, chunksize):
dest[i:i+chunksize] += source[i:i+chunksize]
for prop in 'nentries', 'nans', 'nans_wgt', 'nans_sqwgt':
destGroup._v_attrs[prop] += sourceGroup._v_attrs[prop]
with tables.openFile(outfile, 'a') as ohdf:
for path, fnames in paths.iteritems():
print(path)
destGroup = ohdf.getNode(path)
for fname in fnames:
with tables.openFile(fname) as hdf:
histadd(hdf.getNode(path), destGroup, opts.blocksize)
| 31.070175
| 83
| 0.728967
| 255
| 1,771
| 4.968627
| 0.466667
| 0.023678
| 0.04262
| 0.036306
| 0.044199
| 0.044199
| 0
| 0
| 0
| 0
| 0
| 0.007895
| 0.141728
| 1,771
| 56
| 84
| 31.625
| 0.825658
| 0.115754
| 0
| 0.052632
| 0
| 0
| 0.126132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.131579
| 0
| 0.157895
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db7e13c9886abafe9915d05b01539badc566a636
| 2,108
|
py
|
Python
|
procrastinate/exceptions.py
|
ignaciocabeza/procrastinate
|
95ba8c7acdf39aa7a1216c19903802b4f65b65d1
|
[
"MIT"
] | null | null | null |
procrastinate/exceptions.py
|
ignaciocabeza/procrastinate
|
95ba8c7acdf39aa7a1216c19903802b4f65b65d1
|
[
"MIT"
] | null | null | null |
procrastinate/exceptions.py
|
ignaciocabeza/procrastinate
|
95ba8c7acdf39aa7a1216c19903802b4f65b65d1
|
[
"MIT"
] | null | null | null |
import datetime
class ProcrastinateException(Exception):
"""
Unexpected Procrastinate error.
"""
def __init__(self, message=None):
if not message:
message = self.__doc__
super().__init__(message)
class TaskNotFound(ProcrastinateException):
"""
Task cannot be imported.
"""
class JobError(ProcrastinateException):
"""
Job ended with an exception.
"""
class LoadFromPathError(ImportError, ProcrastinateException):
"""
App was not found at the provided path, or the loaded object is not an App.
"""
class JobRetry(ProcrastinateException):
"""
Job should be retried.
"""
def __init__(self, scheduled_at: datetime.datetime):
self.scheduled_at = scheduled_at
super().__init__()
class AppNotOpen(ProcrastinateException):
"""
App was not open. Procrastinate App needs to be opened using:
- ``app.open()``,
- ``await app.open_async()``,
- ``with app.open():``,
- ``async with app.open_async():``.
"""
class ConnectorException(ProcrastinateException):
"""
Database error.
"""
# The precise error can be seen with ``exception.__cause__``.
class AlreadyEnqueued(ProcrastinateException):
"""
There is already a job waiting in the queue with the same queueing lock.
"""
class UniqueViolation(ConnectorException):
"""
A unique constraint is violated. The constraint name is available in
``exception.constraint_name``.
"""
def __init__(self, *args, constraint_name: str):
super().__init__(*args)
self.constraint_name = constraint_name
class MissingApp(ProcrastinateException):
"""
Missing app. This most probably happened because procrastinate needs an
app via --app or the PROCRASTINATE_APP environment variable.
"""
class SyncConnectorConfigurationError(ProcrastinateException):
"""
A synchronous connector (probably Psycopg2Connector) was used, but the operation
needs an asynchronous connector (AiopgConnector). Please check your App
configuration.
"""
| 22.913043
| 84
| 0.675047
| 214
| 2,108
| 6.453271
| 0.462617
| 0.050688
| 0.023896
| 0.044895
| 0.031861
| 0.031861
| 0.031861
| 0
| 0
| 0
| 0
| 0.000611
| 0.223435
| 2,108
| 91
| 85
| 23.164835
| 0.843006
| 0.429791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.090909
| 0
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db7efcd3ba8afeab68792a36832e16d7660931cd
| 1,097
|
py
|
Python
|
question3.py
|
haojunsng/foodpanda-dataeng
|
b1b9a5c615113a1b8727c9c7dfe7ad3e50059428
|
[
"MIT"
] | null | null | null |
question3.py
|
haojunsng/foodpanda-dataeng
|
b1b9a5c615113a1b8727c9c7dfe7ad3e50059428
|
[
"MIT"
] | null | null | null |
question3.py
|
haojunsng/foodpanda-dataeng
|
b1b9a5c615113a1b8727c9c7dfe7ad3e50059428
|
[
"MIT"
] | null | null | null |
from functions import get_df, write_df
import geopy
from geopy import distance
"""
The function question3 takes in the latitude and longitude of potential distress locations,
and returns the nearest port with essential provisions such as water, fuel_oil and diesel.
"""
def question3(dataset_name, latitude, longitude):
df = get_df()
distress_location = (latitude, longitude)
ports_with_provisions = df[(df['provisions'] == True) & (df['water'] == True) & (df['fuel_oil'] == True) & (df['diesel'] == True)]
results = []
for each in ports_with_provisions.itertuples(index=False):
each_coords = (float(each[4]), float(each[5]))
dist = geopy.distance.geodesic(distress_location, each_coords)
results.append(dist.km)
ports_with_provisions['dist'] = results
answer3 = ports_with_provisions.sort_values(by='dist', ascending=True)[['country', 'port_name', 'port_latitude', 'port_longitude']].head(1)
write_df(answer3, dataset_name, 'Table for Question 3')
if __name__ == "__main__":
question3("foodpanda_tables", 32.610982, -38.706256)
| 36.566667
| 143
| 0.71103
| 144
| 1,097
| 5.1875
| 0.493056
| 0.048193
| 0.10174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027115
| 0.159526
| 1,097
| 29
| 144
| 37.827586
| 0.78308
| 0
| 0
| 0
| 0
| 0
| 0.136714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.176471
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db808d5da5102b2f6086cfb47bc515cc8e85e1ce
| 6,587
|
py
|
Python
|
plugins/aea-cli-benchmark/aea_cli_benchmark/case_acn_communication/case.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
plugins/aea-cli-benchmark/aea_cli_benchmark/case_acn_communication/case.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
plugins/aea-cli-benchmark/aea_cli_benchmark/case_acn_communication/case.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Check amount of time for acn connection communications."""
import asyncio
import logging
import os
import time
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from typing import Callable, List, Tuple, Union
from aea_cli_benchmark.case_acn_communication.utils import (
DEFAULT_DELEGATE_PORT,
DEFAULT_MAILBOX_PORT,
DEFAULT_NODE_PORT,
_make_libp2p_client_connection,
_make_libp2p_connection,
_make_libp2p_mailbox_connection,
)
from aea.connections.base import Connection
from aea.mail.base import Envelope
from packages.fetchai.protocols.default.message import DefaultMessage
class TimeMeasure:
"""Time measure data class."""
def __init__(self):
"""Init data class instance."""
self.time = -1
@contextmanager
def time_measure():
"""Get time measure context."""
start = time.time()
m = TimeMeasure()
try:
yield m
finally:
m.time = time.time() - start
def make_envelope(from_addr: str, to_addr: str) -> Envelope:
"""Construct an envelope."""
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
envelope = Envelope(
to=to_addr,
sender=from_addr,
message=msg,
)
return envelope
async def _run(con_maker: Callable[..., Connection]) -> Tuple[float, float]:
"""Run test case and return times for the first and the second messages sent over ACN."""
try:
connections = []
genesis_node = _make_libp2p_connection(".", relay=True)
await genesis_node.connect()
connections.append(genesis_node)
genesis_multiaddr = genesis_node.node.multiaddrs[0]
relay_node1 = _make_libp2p_connection(
".",
relay=True,
entry_peers=[genesis_multiaddr],
port=DEFAULT_NODE_PORT + 1,
mailbox=True,
delegate=True,
mailbox_port=DEFAULT_MAILBOX_PORT + 1,
delegate_port=DEFAULT_DELEGATE_PORT + 1,
)
await relay_node1.connect()
connections.append(relay_node1)
relay_node2 = _make_libp2p_connection(
".",
relay=True,
entry_peers=[genesis_multiaddr],
port=DEFAULT_NODE_PORT + 2,
mailbox=True,
delegate=True,
mailbox_port=DEFAULT_MAILBOX_PORT + 2,
delegate_port=DEFAULT_DELEGATE_PORT + 2,
)
await relay_node2.connect()
connections.append(relay_node2)
relay_node1_multiaddr = relay_node1.node.multiaddrs[0]
relay_node2_multiaddr = relay_node2.node.multiaddrs[0]
await asyncio.sleep(1)
con1 = con_maker(
port=DEFAULT_NODE_PORT + 10,
entry_peer=relay_node1_multiaddr,
mailbox_port=DEFAULT_MAILBOX_PORT + 1,
delegate_port=DEFAULT_DELEGATE_PORT + 1,
pub_key=relay_node1.node.pub,
)
await con1.connect()
connections.append(con1)
con2 = con_maker(
port=DEFAULT_NODE_PORT + 20,
entry_peer=relay_node2_multiaddr,
mailbox_port=DEFAULT_MAILBOX_PORT + 2,
delegate_port=DEFAULT_DELEGATE_PORT + 2,
pub_key=relay_node2.node.pub,
)
await con2.connect()
connections.append(con2)
envelope = make_envelope(con1.address, con2.address)
with time_measure() as tm:
await con1.send(envelope)
envelope = await con2.receive()
first_time = tm.time
with time_measure() as tm:
await con1.send(envelope)
envelope = await con2.receive()
second_time = tm.time
return first_time, second_time
finally:
for con in reversed(connections):
await con.disconnect()
def run(connection: str, run_times: int = 10) -> List[Tuple[str, Union[int, float]]]:
"""Check construction time and memory usage."""
logging.basicConfig(level=logging.CRITICAL)
cwd = os.getcwd()
try:
if connection == "p2pnode":
def con_maker(
port: int,
entry_peer: str,
mailbox_port: int,
delegate_port: int,
pub_key: str,
):
return _make_libp2p_connection(".", port=port, entry_peers=[entry_peer])
elif connection == "client":
def con_maker(
port: int,
entry_peer: str,
mailbox_port: int,
delegate_port: int,
pub_key: str,
):
return _make_libp2p_client_connection(
peer_public_key=pub_key, data_dir=".", node_port=delegate_port
)
elif connection == "mailbox":
def con_maker(
port: int,
entry_peer: str,
mailbox_port: int,
delegate_port: int,
pub_key: str,
):
return _make_libp2p_mailbox_connection(
peer_public_key=pub_key, data_dir=".", node_port=mailbox_port
)
else:
raise ValueError(f"Unsupported connection: {connection}")
with TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
coro = _run(con_maker)
first_time, second_time = asyncio.get_event_loop().run_until_complete(coro)
return [
("first time (seconds)", first_time),
("second time (seconds)", second_time),
]
finally:
os.chdir(cwd)
| 30.780374
| 93
| 0.591316
| 719
| 6,587
| 5.182198
| 0.297636
| 0.041331
| 0.025497
| 0.029522
| 0.271337
| 0.263553
| 0.24423
| 0.24423
| 0.24423
| 0.231884
| 0
| 0.016717
| 0.300744
| 6,587
| 213
| 94
| 30.924883
| 0.792228
| 0.152573
| 0
| 0.329032
| 0
| 0
| 0.019842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045161
| false
| 0
| 0.070968
| 0.019355
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db83659c6d0ac1aa7ea69a87f18b5fd2867e5ddc
| 3,651
|
py
|
Python
|
genomics_algo/utilities/string_cmp.py
|
SvoONs/genomics_algo
|
3174c1e9e685db12c5849ce5c7e3411f1922a4be
|
[
"MIT"
] | null | null | null |
genomics_algo/utilities/string_cmp.py
|
SvoONs/genomics_algo
|
3174c1e9e685db12c5849ce5c7e3411f1922a4be
|
[
"MIT"
] | 38
|
2020-11-11T21:26:56.000Z
|
2021-03-20T23:25:49.000Z
|
genomics_algo/utilities/string_cmp.py
|
SvoONs/genomics_algo
|
3174c1e9e685db12c5849ce5c7e3411f1922a4be
|
[
"MIT"
] | 1
|
2020-11-13T21:38:43.000Z
|
2020-11-13T21:38:43.000Z
|
def longest_common_prefix(s1: str, s2: str) -> str:
"""
Finds the longest common prefix (substring) given two strings
s1: First string to compare
s2: Second string to compare
Returns:
Longest common prefix between s1 and s2
>>> longest_common_prefix("ACTA", "GCCT")
''
>>> longest_common_prefix("ACTA", "ACT")
'ACT'
>>> longest_common_prefix("ACT", "ACTA")
'ACT'
>>> longest_common_prefix("GATA", "GAAT")
'GA'
>>> longest_common_prefix("ATGA", "")
''
>>> longest_common_prefix("", "GCCT")
''
>>> longest_common_prefix("GCCT", "GCCT")
'GCCT'
"""
i = 0
while i < min(len(s1), len(s2)):
if s1[i] != s2[i]:
break
i += 1
return s1[:i]
def longest_common_suffix(s1: str, s2: str) -> str:
"""
Finds the longest common suffix (substring) given two strings
s1: First string to compare
s2: Second string to compare
Returns:
Longest common suffix between s1 and s2
>>> longest_common_suffix("ACTA", "GCCT")
''
>>> longest_common_suffix("ACTA", "CTA")
'CTA'
>>> longest_common_suffix("CTA", "ACTA")
'CTA'
>>> longest_common_suffix("GATAT", "GAATAT")
'ATAT'
>>> longest_common_suffix("ACTA", "")
''
>>> longest_common_suffix("", "GCCT")
''
>>> longest_common_suffix("GCCT", "GCCT")
'GCCT'
"""
return longest_common_prefix(s1[::-1], s2[::-1])[::-1]
def find_hamming_distance(s1: str, s2: str) -> int:
"""Compute the Hamming distance between two strings of equal length
>>> find_hamming_distance("ATG", "ATC")
1
>>> find_hamming_distance("ATG", "TGA")
3
>>> find_hamming_distance("A", "A")
0
>>> find_hamming_distance("ATG", "ATG")
0
>>> find_hamming_distance("", "")
0
>>> find_hamming_distance("GAGGTAGCGGCGTTTAAC", "GTGGTAACGGGGTTTAAC")
3
"""
assert len(s1) == len(s2)
return sum(1 for i in range(len(s1)) if s1[i] != s2[i])
def find_levenshtein_distance(s1: str, s2: str) -> int:
"""Compute the Levenshtein distance between two strings (i.e., minimum number
of edits including substitution, insertion and deletion needed in a string to
turn it into another)
>>> find_levenshtein_distance("AT", "")
2
>>> find_levenshtein_distance("AT", "ATC")
1
>>> find_levenshtein_distance("ATG", "ATC")
1
>>> find_levenshtein_distance("ATG", "TGA")
2
>>> find_levenshtein_distance("ATG", "ATG")
0
>>> find_levenshtein_distance("", "")
0
>>> find_levenshtein_distance("GAGGTAGCGGCGTTTAAC", "GTGGTAACGGGGTTTAAC")
3
>>> find_levenshtein_distance("TGGCCGCGCAAAAACAGC", "TGACCGCGCAAAACAGC")
2
>>> find_levenshtein_distance("GCGTATGCGGCTAACGC", "GCTATGCGGCTATACGC")
2
"""
# initializing a matrix for with `len(s1) + 1` rows and `len(s2) + 1` columns
D = [[0 for x in range(len(s2) + 1)] for y in range(len(s1) + 1)]
# fill first column
for i in range(len(s1) + 1):
D[i][0] = i
# fill first row
for j in range(len(s2) + 1):
D[0][j] = j
# fill rest of the matrix
for i in range(1, len(s1) + 1):
for j in range(1, len(s2) + 1):
distance_left = D[i][j - 1] + 1 # deletion in pattern
distance_above = D[i - 1][j] + 1 # insertion in pattern
distance_diagonal = D[i - 1][j - 1] + (
s1[i - 1] != s2[j - 1]
) # substitution
D[i][j] = min(distance_left, distance_above, distance_diagonal)
# return the last value (i.e., right most bottom value)
return D[-1][-1]
| 28.97619
| 81
| 0.586962
| 471
| 3,651
| 4.392781
| 0.220807
| 0.131948
| 0.101015
| 0.019333
| 0.278879
| 0.21943
| 0.148864
| 0.148864
| 0.118898
| 0.086032
| 0
| 0.030236
| 0.25719
| 3,651
| 125
| 82
| 29.208
| 0.73267
| 0.611339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 1
| 0.148148
| false
| 0
| 0
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db836b59bf5fd8d655aefd6e4020d61dca742b2c
| 11,906
|
py
|
Python
|
whyqd/parsers/wrangling_parser.py
|
whythawk/whyqd
|
8ee41768d6788318458d41831200594b61777ccc
|
[
"BSD-3-Clause"
] | 17
|
2020-02-21T14:41:24.000Z
|
2022-01-31T20:25:53.000Z
|
whyqd/parsers/wrangling_parser.py
|
whythawk/whyqd
|
8ee41768d6788318458d41831200594b61777ccc
|
[
"BSD-3-Clause"
] | null | null | null |
whyqd/parsers/wrangling_parser.py
|
whythawk/whyqd
|
8ee41768d6788318458d41831200594b61777ccc
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
from typing import Optional, Dict, List, Union, Type, TYPE_CHECKING
from datetime import date, datetime
import pandas as pd
import numpy as np
import re
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
# Readthedocs has a problem, but difficult to replicate
locale.setlocale(locale.LC_ALL, "")
from . import CoreScript
from ..models import ColumnModel
from ..types import MimeType
if TYPE_CHECKING:
from ..schema import Schema
from ..models import DataSourceModel
class WranglingScript:
"""Get, review and restructure tabular data."""
def __init__(self):
self.check_source = CoreScript().check_source
self.core = CoreScript()
self.DATE_FORMATS = {
"date": {"fmt": ["%Y-%m-%d"], "txt": ["YYYY-MM-DD"]},
"datetime": {
"fmt": ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S %Z%z"],
"txt": ["YYYY-MM-DD hh:mm:ss", "YYYY-MM-DD hh:mm:ss UTC+0000"],
},
"year": {"fmt": ["%Y"], "txt": ["YYYY"]},
}
def get_dataframe(
self,
source: str,
preserve: Union[str, List[str]] = None,
filetype: MimeType = MimeType.CSV,
names: Optional[List[str]] = None,
nrows: Optional[int] = None,
) -> Union[Dict[str, pd.DataFrame], pd.DataFrame]:
"""Return a Pandas dataframe from a given source.
Accepts default pandas parameters for Excel and CSV, but the objective is to preserve the source data with
little data conversion outside of the data wrangling process. With this in mind, a
Parameters
----------
source: str
Source filename.
preserve: str or list of str, default None
Column names where variable type guessing must be prevented and the original data preserved.
Critical for foreign key references with weird formats, like integers with leading `0`.
filetype: MimeType, default MimeType.CSV
Pandas can read a diversity of filetypes, but whyqd has only been tested on `xls`, `xlsx` and `csv`.
names: list of str, default None
If the source data has no header row, explicitly pass a list of names - in the correct order - to address
the data.
nrows: int, default None
A specified number of rows to return. For review, it is faster to load only a small number.
Returns
-------
DataFrame or dict of DataFrame
"""
self.check_source(source)
# If the dtypes have not been set, then ensure that any provided preserved columns remain untouched
# i.e. no forcing of text to numbers
# defaulting to `dtype = object` ...
kwargs = {}
if preserve:
if not isinstance(preserve, list):
preserve = [preserve]
# kwargs["dtype"] = {k: object for k in preserve}
kwargs["dtype"] = {k: pd.StringDtype() for k in preserve}
if names:
kwargs["header"] = None
kwargs["names"] = names
if nrows:
kwargs["nrows"] = nrows
# Check filetype
if filetype in [MimeType.XLS, MimeType.XLSX]:
# This will default to returning a dictionary of dataframes for each sheet
kwargs["sheet_name"] = None
df = pd.read_excel(source, **kwargs)
keys = list(df.keys())
for k in keys:
if df[k].empty:
del df[k]
if len(df.keys()) == 1:
df = df[keys[0]]
if filetype == MimeType.CSV:
# New in pandas 1.3: will ignore encoding errors - perfect for this initial wrangling process
kwargs["encoding_errors"] = "ignore"
# Supposed to help with fruity separater guessing
kwargs["engine"] = "python"
if not nrows:
df = pd.read_csv(source, **kwargs)
else:
kwargs["iterator"] = True
kwargs["chunksize"] = 10000
df_iterator = pd.read_csv(source, **kwargs)
df = pd.concat(df_iterator, ignore_index=True)
return df
def get_dataframe_from_datasource(self, data: DataSourceModel) -> pd.DataFrame:
"""Return the dataframe for a data source.
Parameters
----------
data: DataSourceModel
Returns
-------
pd.DataFrame
"""
path = data.path
try:
self.core.check_source(path)
except FileNotFoundError:
path = str(self.directory / data.source)
self.core.check_source(path)
df_columns = [d.name for d in data.columns]
names = [d.name for d in data.names] if data.names else None
df = self.get_dataframe(
source=path,
filetype=data.mime,
names=names,
preserve=[d.name for d in data.preserve if d.name in df_columns],
)
if isinstance(df, dict):
if df:
df = df[data.sheet_name]
else:
# It's an empty df for some reason. Maybe excessive filtering.
df = pd.DataFrame()
if df.empty:
raise ValueError(
f"Data source contains no data ({data.path}). Review actions to see if any were more destructive than expected."
)
return df
def get_dataframe_columns(self, df: pd.DataFrame) -> List(ColumnModel):
"""Returns a list of ColumnModels from a source DataFrame.
Parameters
----------
df: pd.DataFrame
Should be derived from `get_dataframe` with a sensible default for `nrows` being 50.
Returns
-------
List of ColumnModel
"""
# Prepare summary
columns = [
{"name": k, "type": "number"}
if v in ["float64", "int64"]
else {"name": k, "type": "date"}
if v in ["datetime64[ns]"]
else {"name": k, "type": "string"}
for k, v in df.dtypes.apply(lambda x: x.name).to_dict().items()
]
return [ColumnModel(**c) for c in columns]
def deduplicate_columns(self, df: pd.DataFrame, schema: Type[Schema]) -> pd.Index:
"""
Source: https://stackoverflow.com/a/65254771/295606
Source: https://stackoverflow.com/a/55405151
Returns a new column list permitting deduplication of dataframes which may result from merge.
Parameters
----------
df: pd.DataFrame
fields: list of FieldModel
Destination Schema fields
Returns
-------
pd.Index
Updated column names
"""
column_index = pd.Series(df.columns.tolist())
if df.columns.has_duplicates:
duplicates = column_index[column_index.duplicated()].unique()
for name in duplicates:
dups = column_index == name
replacements = [f"{name}{i}" if i != 0 else name for i in range(dups.sum())]
column_index.loc[dups] = replacements
# Fix any fields with the same name as any of the target fields
# Do this to 'force' schema assignment
for name in [f.name for f in schema.get.fields]:
dups = column_index == name
replacements = [f"{name}{i}__dd" if i != 0 else f"{name}__dd" for i in range(dups.sum())]
column_index.loc[dups] = replacements
return pd.Index(column_index)
# def check_column_unique(self, source: str, key: str) -> bool:
# """
# Test a column in a dataframe to ensure all values are unique.
# Parameters
# ----------
# source: Source filename
# key: Column name of field where data are to be tested for uniqueness
# Raises
# ------
# ValueError if not unique
# Returns
# -------
# bool, True if unique
# """
# df = self.get_dataframe(source, key)
# if len(df[key]) != len(df[key].unique()):
# import warnings
# filename = source.split("/")[-1] # Obfuscate the path
# e = "'{}' contains non-unique rows in column `{}`".format(filename, key)
# # raise ValueError(e)
# warnings.warn(e)
# return True
# def check_date_format(self, date_type: str, date_value: str) -> bool:
# # https://stackoverflow.com/a/37045601
# # https://www.saltycrane.com/blog/2009/05/converting-time-zones-datetime-objects-python/
# for fmt in self.DATE_FORMATS[date_type]["fmt"]:
# try:
# if date_value == datetime.strptime(date_value, fmt).strftime(fmt):
# return True
# except ValueError:
# continue
# raise ValueError(f"Incorrect date format, should be: `{self.DATE_FORMATS[date_type]['txt']}`")
###################################################################################################
### Pandas type parsers
###################################################################################################
def parse_dates(self, x: Union[None, str]) -> Union[pd.NaT, date.isoformat]:
"""
This is the hard-won 'trust nobody', certainly not Americans, date parser.
TODO: Replace with https://github.com/scrapinghub/dateparser
The only concern is that dateparser.parse(x).date().isoformat() will coerce *any* string to a date,
no matter *what* it is.
"""
if pd.isnull(x):
return pd.NaT
# Check if to_datetime can handle things
if not pd.isnull(pd.to_datetime(x, errors="coerce", dayfirst=True)):
return date.isoformat(pd.to_datetime(x, errors="coerce", dayfirst=True))
# Manually see if coersion will work
x = str(x).strip()[:10]
x = re.sub(r"[\\/,\.]", "-", x)
try:
y, m, d = x.split("-")
except ValueError:
return pd.NaT
if len(y) < 4:
# Swap the day and year positions
# Ignore US dates
d, m, y = x.split("-")
# Fat finger on 1999 ... not going to check for other date errors as no way to figure out
if y[0] == "9":
y = "1" + y[1:]
x = "{}-{}-{}".format(y, m, d)
try:
x = datetime.strptime(x, "%Y-%m-%d")
except ValueError:
return pd.NaT
x = date.isoformat(x)
try:
pd.Timestamp(x)
return x
except pd.errors.OutOfBoundsDatetime:
return pd.NaT
def parse_float(self, x: Union[str, int, float]) -> Union[np.nan, float]:
"""
Regex to extract wrecked floats: https://stackoverflow.com/a/385597
Checked against: https://regex101.com/
"""
try:
return float(x)
except ValueError:
re_float = re.compile(
r"""(?x)
^
\D* # first, match an optional sign *and space*
( # then match integers or f.p. mantissas:
\d+ # start out with a ...
(
\.\d* # mantissa of the form a.b or a.
)? # ? takes care of integers of the form a
|\.\d+ # mantissa of the form .b
)
([eE][+-]?\d+)? # finally, optionally match an exponent
$"""
)
try:
x = re_float.match(x).group(1)
x = re.sub(r"[^e0-9,-\.]", "", str(x))
return locale.atof(x)
except (ValueError, AttributeError):
return np.nan
| 38.160256
| 128
| 0.534184
| 1,405
| 11,906
| 4.474733
| 0.27758
| 0.015747
| 0.002863
| 0.013997
| 0.134404
| 0.058374
| 0.03881
| 0.03881
| 0.01527
| 0.01527
| 0
| 0.010775
| 0.337393
| 11,906
| 311
| 129
| 38.282958
| 0.786158
| 0.354611
| 0
| 0.154839
| 0
| 0.006452
| 0.077332
| 0
| 0
| 0
| 0
| 0.003215
| 0
| 1
| 0.045161
| false
| 0
| 0.077419
| 0
| 0.212903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db83c7d51feb9c6d2d6569094bc6e9a0eb64b2ce
| 432
|
py
|
Python
|
0x02-python-import_modules/2-args.py
|
FatChicken277/holbertonschool-higher_level_programming
|
520d6310a5e2a874f8c5f5185d0fb769b6412e7c
|
[
"CNRI-Python"
] | null | null | null |
0x02-python-import_modules/2-args.py
|
FatChicken277/holbertonschool-higher_level_programming
|
520d6310a5e2a874f8c5f5185d0fb769b6412e7c
|
[
"CNRI-Python"
] | null | null | null |
0x02-python-import_modules/2-args.py
|
FatChicken277/holbertonschool-higher_level_programming
|
520d6310a5e2a874f8c5f5185d0fb769b6412e7c
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/python3
def args(args):
lenn = len(args) - 1
if lenn == 0:
print("0 arguments.")
elif lenn == 1:
print("{0} argument:".format(lenn))
print("{0}: {1}".format(lenn, args[lenn]))
elif lenn > 1:
print("{0} arguments:".format(lenn))
for i in range(lenn):
print("{0}: {1}".format(i+1, args[i+1]))
if __name__ == "__main__":
import sys
args(sys.argv)
| 25.411765
| 52
| 0.518519
| 61
| 432
| 3.540984
| 0.393443
| 0.138889
| 0.138889
| 0.12963
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044872
| 0.277778
| 432
| 16
| 53
| 27
| 0.647436
| 0.039352
| 0
| 0
| 0
| 0
| 0.152174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0.357143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db8779ff5f2f1e236cb5f3cfe96c63ab0de64f28
| 5,766
|
py
|
Python
|
keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py
|
sanket4373/keystone
|
7cf7e7497729803f0470167315af9349b88fe0ec
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py
|
sanket4373/keystone
|
7cf7e7497729803f0470167315af9349b88fe0ec
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py
|
sanket4373/keystone
|
7cf7e7497729803f0470167315af9349b88fe0ec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
# catalog
service_table = sql.Table(
'service',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('type', sql.String(255)),
sql.Column('extra', sql.Text()))
service_table.create(migrate_engine, checkfirst=True)
endpoint_table = sql.Table(
'endpoint',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('region', sql.String(255)),
sql.Column('service_id',
sql.String(64),
sql.ForeignKey('service.id'),
nullable=False),
sql.Column('extra', sql.Text()))
endpoint_table.create(migrate_engine, checkfirst=True)
# identity
role_table = sql.Table(
'role',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(255), unique=True, nullable=False))
role_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name tenant_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to alter a column to drop
# an unnamed unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='tenant_name_key'))
else:
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
tenant_table.create(migrate_engine, checkfirst=True)
metadata_table = sql.Table(
'metadata',
meta,
sql.Column('user_id', sql.String(64), primary_key=True),
sql.Column('tenant_id', sql.String(64), primary_key=True),
sql.Column('data', sql.Text()))
metadata_table.create(migrate_engine, checkfirst=True)
ec2_credential_table = sql.Table(
'ec2_credential',
meta,
sql.Column('access', sql.String(64), primary_key=True),
sql.Column('secret', sql.String(64)),
sql.Column('user_id', sql.String(64)),
sql.Column('tenant_id', sql.String(64)))
ec2_credential_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name user_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to alter a column to drop
# an unnamed unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='user_name_key'))
else:
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
user_table.create(migrate_engine, checkfirst=True)
user_tenant_membership_table = sql.Table(
'user_tenant_membership',
meta,
sql.Column(
'user_id',
sql.String(64),
sql.ForeignKey('user.id'),
primary_key=True),
sql.Column(
'tenant_id',
sql.String(64),
sql.ForeignKey('tenant.id'),
primary_key=True))
user_tenant_membership_table.create(migrate_engine, checkfirst=True)
# token
token_table = sql.Table(
'token',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('expires', sql.DateTime()),
sql.Column('extra', sql.Text()))
token_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['user_tenant_membership', 'token', 'user', 'tenant', 'role',
'metadata', 'ec2_credential', 'endpoint', 'service']
for t in tables:
table = sql.Table(t, meta, autoload=True)
table.drop(migrate_engine, checkfirst=True)
| 36.961538
| 79
| 0.620534
| 744
| 5,766
| 4.700269
| 0.211022
| 0.08493
| 0.066057
| 0.055762
| 0.646554
| 0.60795
| 0.517872
| 0.480412
| 0.448098
| 0.437804
| 0
| 0.016159
| 0.259452
| 5,766
| 155
| 80
| 37.2
| 0.80281
| 0.249913
| 0
| 0.490385
| 0
| 0
| 0.095038
| 0.010249
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.009615
| 0
| 0.028846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db885085ce16df342f9eaff7d4d323eb7dc1a85c
| 15,984
|
py
|
Python
|
boa3_test/examples/ico.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/examples/ico.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/examples/ico.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, List, Union
from boa3.builtin import NeoMetadata, metadata, public
from boa3.builtin.contract import Nep17TransferEvent
from boa3.builtin.interop.blockchain import get_contract
from boa3.builtin.interop.contract import GAS, NEO, call_contract
from boa3.builtin.interop.runtime import calling_script_hash, check_witness
from boa3.builtin.interop.storage import delete, get, put
from boa3.builtin.type import UInt160
# -------------------------------------------
# METADATA
# -------------------------------------------
@metadata
def manifest_metadata() -> NeoMetadata:
"""
Defines this smart contract's metadata information
"""
meta = NeoMetadata()
meta.author = "Mirella Medeiros, Ricardo Prado and Lucas Uezu. COZ in partnership with Simpli"
meta.description = "ICO Example"
meta.email = "contact@coz.io"
return meta
# -------------------------------------------
# Storage Key Prefixes
# -------------------------------------------
KYC_WHITELIST_PREFIX = b'KYCWhitelistApproved'
TOKEN_TOTAL_SUPPLY_PREFIX = b'TokenTotalSupply'
TRANSFER_ALLOWANCE_PREFIX = b'TransferAllowancePrefix_'
# -------------------------------------------
# TOKEN SETTINGS
# -------------------------------------------
# Script hash of the contract owner
TOKEN_OWNER = UInt160()
# Symbol of the Token
TOKEN_SYMBOL = 'ICO'
# Number of decimal places
TOKEN_DECIMALS = 8
# Initial Supply of tokens in the system
TOKEN_INITIAL_SUPPLY = 10_000_000 * 100_000_000 # 10m total supply * 10^8 (decimals)
# -------------------------------------------
# Events
# -------------------------------------------
on_transfer = Nep17TransferEvent
# -------------------------------------------
# Methods
# -------------------------------------------
@public
def verify() -> bool:
"""
When this contract address is included in the transaction signature,
this method will be triggered as a VerificationTrigger to verify that the signature is correct.
For example, this method needs to be called when withdrawing token from the contract.
:return: whether the transaction signature is correct
"""
return is_administrator()
def is_administrator() -> bool:
"""
Validates if the invoker has administrative rights
:return: whether the contract's invoker is an administrator
"""
return check_witness(TOKEN_OWNER)
def is_valid_address(address: UInt160) -> bool:
"""
Validates if the address passed through the kyc.
:return: whether the given address is validated by kyc
"""
return get(KYC_WHITELIST_PREFIX + address).to_int() > 0
@public
def deploy() -> bool:
"""
Initializes the storage when the smart contract is deployed.
:return: whether the deploy was successful. This method must return True only during the smart contract's deploy.
"""
if not check_witness(TOKEN_OWNER):
return False
if get(TOKEN_TOTAL_SUPPLY_PREFIX).to_int() > 0:
return False
put(TOKEN_TOTAL_SUPPLY_PREFIX, TOKEN_INITIAL_SUPPLY)
put(TOKEN_OWNER, TOKEN_INITIAL_SUPPLY)
on_transfer(None, TOKEN_OWNER, TOKEN_INITIAL_SUPPLY)
return True
@public
def mint(amount: int) -> bool:
"""
Mints new tokens
:param amount: the amount of gas to be refunded
:type amount: int
:return: whether the refund was successful
"""
assert amount >= 0
if not is_administrator():
return False
if amount > 0:
current_total_supply = totalSupply()
owner_balance = balanceOf(TOKEN_OWNER)
put(TOKEN_TOTAL_SUPPLY_PREFIX, current_total_supply + amount)
put(TOKEN_OWNER, owner_balance + amount)
on_transfer(None, TOKEN_OWNER, amount)
post_transfer(None, TOKEN_OWNER, amount, None)
return True
@public
def refund(address: UInt160, neo_amount: int, gas_amount: int) -> bool:
"""
Refunds an address with given Neo and Gas
:param address: the address that have the tokens
:type address: UInt160
:param neo_amount: the amount of neo to be refunded
:type neo_amount: int
:param gas_amount: the amount of gas to be refunded
:type gas_amount: int
:return: whether the refund was successful
"""
assert len(address) == 20
assert neo_amount > 0 or gas_amount > 0
if not is_administrator():
return False
if neo_amount > 0:
result = call_contract(NEO, 'transfer', [calling_script_hash, address, neo_amount, None])
if result != True:
# due to a current limitation in the neo3-boa, changing the condition to `not result`
# will result in a compiler error
return False
if gas_amount > 0:
result = call_contract(GAS, 'transfer', [calling_script_hash, address, gas_amount, None])
if result != True:
# due to a current limitation in the neo3-boa, changing the condition to `not result`
# will result in a compiler error
return False
return True
# -------------------------------------------
# Public methods from NEP5.1
# -------------------------------------------
@public
def symbol() -> str:
"""
Gets the symbols of the token.
This symbol should be short (3-8 characters is recommended), with no whitespace characters or new-lines and should
be limited to the uppercase latin alphabet (i.e. the 26 letters used in English).
This method must always return the same value every time it is invoked.
:return: a short string symbol of the token managed in this contract.
"""
return TOKEN_SYMBOL
@public
def decimals() -> int:
"""
Gets the amount of decimals used by the token.
E.g. 8, means to divide the token amount by 100,000,000 (10 ^ 8) to get its user representation.
This method must always return the same value every time it is invoked.
:return: the number of decimals used by the token.
"""
return TOKEN_DECIMALS
@public
def totalSupply() -> int:
"""
Gets the total token supply deployed in the system.
This number mustn't be in its user representation. E.g. if the total supply is 10,000,000 tokens, this method
must return 10,000,000 * 10 ^ decimals.
:return: the total token supply deployed in the system.
"""
return get(TOKEN_TOTAL_SUPPLY_PREFIX).to_int()
@public
def balanceOf(account: UInt160) -> int:
"""
Get the current balance of an address
The parameter account should be a 20-byte address.
:param account: the account address to retrieve the balance for
:type account: UInt160
:return: the token balance of the `account`
:raise AssertionError: raised if `account` length is not 20.
"""
assert len(account) == 20
return get(account).to_int()
@public
def transfer(from_address: UInt160, to_address: UInt160, amount: int, data: Any) -> bool:
"""
Transfers a specified amount of NEP17 tokens from one account to another
If the method succeeds, it must fire the `transfer` event and must return true, even if the amount is 0,
or from and to are the same address.
:param from_address: the address to transfer from
:type from_address: UInt160
:param to_address: the address to transfer to
:type to_address: UInt160
:param amount: the amount of NEP17 tokens to transfer
:type amount: int
:param data: whatever data is pertinent to the onPayment method
:type data: Any
:return: whether the transfer was successful
:raise AssertionError: raised if `from_address` or `to_address` length is not 20 or if `amount` if less than zero.
"""
# the parameters from and to should be 20-byte addresses. If not, this method should throw an exception.
assert len(from_address) == 20 and len(to_address) == 20
# the parameter amount must be greater than or equal to 0. If not, this method should throw an exception.
assert amount >= 0
# The function MUST return false if the from account balance does not have enough tokens to spend.
from_balance = get(from_address).to_int()
if from_balance < amount:
return False
# The function should check whether the from address equals the caller contract hash.
# If so, the transfer should be processed;
# If not, the function should use the check_witness to verify the transfer.
if from_address != calling_script_hash:
if not check_witness(from_address):
return False
# skip balance changes if transferring to yourself or transferring 0 cryptocurrency
if from_address != to_address and amount != 0:
if from_balance == amount:
delete(from_address)
else:
put(from_address, from_balance - amount)
to_balance = get(to_address).to_int()
put(to_address, to_balance + amount)
# if the method succeeds, it must fire the transfer event
on_transfer(from_address, to_address, amount)
# if the to_address is a smart contract, it must call the contracts onPayment
post_transfer(from_address, to_address, amount, data)
# and then it must return true
return True
def post_transfer(from_address: Union[UInt160, None], to_address: Union[UInt160, None], amount: int, data: Any):
"""
Checks if the one receiving NEP17 tokens is a smart contract and if it's one the onPayment method will be called
:param from_address: the address of the sender
:type from_address: UInt160
:param to_address: the address of the receiver
:type to_address: UInt160
:param amount: the amount of cryptocurrency that is being sent
:type amount: int
:param data: any pertinent data that might validate the transaction
:type data: Any
"""
if not isinstance(to_address, None): # TODO: change to 'is not None' when `is` semantic is implemented
contract = get_contract(to_address)
if not isinstance(contract, None): # TODO: change to 'is not None' when `is` semantic is implemented
call_contract(to_address, 'onPayment', [from_address, amount, data])
@public
def allowance(from_address: UInt160, to_address: UInt160) -> int:
"""
Returns the amount of tokens that the to account can transfer from the from account.
:param from_address: the address that have the tokens
:type from_address: UInt160
:param to_address: the address that is authorized to use the tokens
:type to_address: UInt160
:return: the amount of tokens that the `to` account can transfer from the `from` account
:raise AssertionError: raised if `from_address` or `to_address` length is not 20.
"""
# the parameters from and to should be 20-byte addresses. If not, this method should throw an exception.
assert len(from_address) == 20 and len(to_address) == 20
return get(TRANSFER_ALLOWANCE_PREFIX + from_address + to_address).to_int()
@public
def transferFrom(originator: UInt160, from_address: UInt160, to_address: UInt160, amount: int, data: Any) -> bool:
"""
Transfers an amount from the `from` account to the `to` account if the `originator` has been approved to transfer
the requested amount.
:param originator: the address where the actual token is
:type originator: UInt160
:param from_address: the address to transfer from with originator's approval
:type from_address: UInt160
:param to_address: the address to transfer to
:type to_address: UInt160
:param amount: the amount of NEP17 tokens to transfer
:type amount: int
:param data: any pertinent data that might validate the transaction
:type data: Any
:return: whether the transfer was successful
:raise AssertionError: raised if `from_address` or `to_address` length is not 20 or if `amount` if less than zero.
"""
# the parameters from and to should be 20-byte addresses. If not, this method should throw an exception.
assert len(originator) == 20 and len(from_address) == 20 and len(to_address) == 20
# the parameter amount must be greater than or equal to 0. If not, this method should throw an exception.
assert amount >= 0
# The function should check whether the from address equals the caller contract hash.
# If so, the transfer should be processed;
# If not, the function should use the check_witness to verify the transfer.
if from_address != calling_script_hash:
if not check_witness(from_address):
return False
approved_transfer_amount = allowance(originator, from_address)
if approved_transfer_amount < amount:
return False
originator_balance = balanceOf(originator)
if originator_balance < amount:
return False
# update allowance between originator and from
if approved_transfer_amount == amount:
delete(TRANSFER_ALLOWANCE_PREFIX + originator + from_address)
else:
put(TRANSFER_ALLOWANCE_PREFIX + originator + from_address, approved_transfer_amount - amount)
# skip balance changes if transferring to yourself or transferring 0 cryptocurrency
if amount != 0 and from_address != to_address:
# update originator's balance
if originator_balance == amount:
delete(originator)
else:
put(originator, originator_balance - amount)
# updates to's balance
to_balance = get(to_address).to_int()
put(to_address, to_balance + amount)
# if the method succeeds, it must fire the transfer event
on_transfer(from_address, to_address, amount)
# if the to_address is a smart contract, it must call the contracts onPayment
post_transfer(from_address, to_address, amount, data)
# and then it must return true
return True
@public
def approve(originator: UInt160, to_address: UInt160, amount: int) -> bool:
"""
Approves the to account to transfer amount tokens from the originator account.
:param originator: the address that have the tokens
:type originator: UInt160
:param to_address: the address that is authorized to use the tokens
:type to_address: UInt160
:param amount: the amount of NEP17 tokens to transfer
:type amount: int
:return: whether the approval was successful
:raise AssertionError: raised if `originator` or `to_address` length is not 20 or if `amount` if less than zero.
"""
assert len(originator) == 20 and len(to_address) == 20
assert amount >= 0
if not check_witness(originator):
return False
if originator == to_address:
return False
if not is_valid_address(originator) or not is_valid_address(to_address):
# one of the address doesn't passed the kyc yet
return False
if balanceOf(originator) < amount:
return False
put(TRANSFER_ALLOWANCE_PREFIX + originator + to_address, amount)
return True
# -------------------------------------------
# Public methods from KYC
# -------------------------------------------
@public
def kyc_register(addresses: List[UInt160]) -> int:
"""
Includes the given addresses to the kyc whitelist
:param addresses: a list with the addresses to be included
:return: the number of included addresses
"""
included_addresses = 0
if is_administrator():
for address in addresses:
if len(address) == 20:
kyc_key = KYC_WHITELIST_PREFIX + address
put(kyc_key, True)
included_addresses += 1
return included_addresses
@public
def kyc_remove(addresses: List[UInt160]) -> int:
"""
Removes the given addresses from the kyc whitelist
:param addresses: a list with the addresses to be removed
:return: the number of removed addresses
"""
removed_addresses = 0
if is_administrator():
for address in addresses:
if len(address) == 20:
kyc_key = KYC_WHITELIST_PREFIX + address
delete(kyc_key)
removed_addresses += 1
return removed_addresses
| 33.509434
| 118
| 0.673486
| 2,129
| 15,984
| 4.943166
| 0.138093
| 0.035918
| 0.010452
| 0.011307
| 0.511688
| 0.45401
| 0.424458
| 0.415811
| 0.394147
| 0.360414
| 0
| 0.01951
| 0.223974
| 15,984
| 476
| 119
| 33.579832
| 0.828926
| 0.518456
| 0
| 0.397661
| 0
| 0
| 0.027119
| 0.003408
| 0
| 0
| 0
| 0.002101
| 0.064327
| 1
| 0.105263
| false
| 0
| 0.046784
| 0
| 0.339181
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db8859ce66203d2b7d494162105376778915c59d
| 20,640
|
py
|
Python
|
emotion_recognition.py
|
Partaourides/SERN
|
e6cc0a9a0cc3ac4b9a87e3ccdf5781792f85d718
|
[
"MIT"
] | 10
|
2019-05-07T02:20:02.000Z
|
2020-10-09T02:20:31.000Z
|
emotion_recognition.py
|
Partaourides/SERN
|
e6cc0a9a0cc3ac4b9a87e3ccdf5781792f85d718
|
[
"MIT"
] | 2
|
2020-06-27T13:09:03.000Z
|
2021-07-28T04:55:38.000Z
|
emotion_recognition.py
|
Partaourides/SERN
|
e6cc0a9a0cc3ac4b9a87e3ccdf5781792f85d718
|
[
"MIT"
] | 1
|
2019-07-18T00:28:13.000Z
|
2019-07-18T00:28:13.000Z
|
import os
# Restrict the script to run on CPU
os.environ ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Import Keras Tensoflow Backend
# from keras import backend as K
import tensorflow as tf
# Configure it to use only specific CPU Cores
config = tf.ConfigProto(intra_op_parallelism_threads=4,
inter_op_parallelism_threads=4,
device_count={"CPU": 1, "GPU": 0},
allow_soft_placement=True)
# import tensorflow as tf
import numpy as np
from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator
from sklearn.metrics import confusion_matrix
from models_AC import SentenceModel
import json
import os
def emotion_recognition(n_run, epochs, batch_size, embedding_size, first_rnn_size, dropout, embedding, num_speakers):
########################################################################################################################
# Hyper-parameters
########################################################################################################################
split_size = 0.8 # Split proportion of train and test data
#log_dir = './logs_AC/RNN_without_ID/1'
log_dir = './logs_AC/RNN_' \
+ str(num_speakers) + '/' + str(n_run) + '/'
#log_dir = './logs_AC/RNN_' + embedding + 'Emb' + str(embedding_size) + '_1layer' + str(2*first_rnn_size) + '/' + str(n_run)
train_log_dir = log_dir + 'train'
val_log_dir = log_dir + 'val'
########################################################################################################################
# Initialize the Data set
########################################################################################################################
sentences, targets, data_info, speakers = dataset(mode='sentences', embedding=embedding, embedding_size=embedding_size)
train_data = IeomapSentenceIterator(sentences[0], targets[0], data_info['sentences_length'][0], speakers[0])
val_data = IeomapSentenceIterator(sentences[1], targets[1], data_info['sentences_length'][1], speakers[1])
test_data = IeomapSentenceIterator(sentences[2], targets[2], data_info['sentences_length'][2], speakers[2])
########################################################################################################################
# Initialize the model
########################################################################################################################
g = SentenceModel(vocab_size=(data_info['vocabulary_size'] + 1),
embedding_size=embedding_size,
first_rnn_size=first_rnn_size,
num_classes=data_info['num_classes'],
dropout=dropout,
embedding=embedding,
num_speakers=num_speakers)
# Store model setup
model_setup = {'vocab_size': (data_info['vocabulary_size'] + 1),
'embedding_size': embedding_size,
'first_rnn_size': first_rnn_size,
'num_classes': data_info['num_classes'],
'dropout': dropout,
'embedding': embedding,
'num_speakers': num_speakers}
dirname = os.path.dirname(log_dir)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(log_dir + 'model_setup.p', 'w') as file:
json.dump(model_setup, file, indent=4)
########################################################################################################################
# Initialize the parameters
########################################################################################################################
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver()
epoch = 0
best_epoch = 0
train_conf_matrix = 0
val_conf_matrix = 0
test_conf_matrix = 0
best_acc = 0
########################################################################################################################
# Performance Indicators
########################################################################################################################
writer_train = tf.summary.FileWriter(train_log_dir, sess.graph)
writer_val = tf.summary.FileWriter(val_log_dir)
accuracy_tf = tf.placeholder(tf.float32, [])
precision_tf = tf.placeholder(tf.float32, [])
recall_tf = tf.placeholder(tf.float32, [])
summary_op = tf.summary.scalar('accuracy', accuracy_tf)
summary_op = tf.summary.scalar('precision', precision_tf)
summary_op = tf.summary.scalar('recall', recall_tf)
########################################################################################################################
# Model training procedure
########################################################################################################################
while train_data.epoch < epochs: # and train_data.epoch < best_epoch + 20:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = train_data.next_batch(batch_size)
preds, _ = sess.run([g['preds'],
g['ts']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(len(targets_batch))})
####################################################################################################################
# Calculate the Train data Confusion Matrix
####################################################################################################################
train_conf_matrix += confusion_matrix(targets_batch, preds, labels=range(data_info['num_classes']))
####################################################################################################################
# Add the end of each training epoch compute the validation results and store the relevant information
####################################################################################################################
if train_data.epoch != epoch:
while val_data.epoch == epoch:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = val_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
val_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
train_CM_size = len(train_conf_matrix)
total_train = sum(sum(train_conf_matrix))
train_TP = np.diagonal(train_conf_matrix)
train_FP = [sum(train_conf_matrix[:, i]) - train_TP[i] for i in range(train_CM_size)]
train_FN = [sum(train_conf_matrix[i, :]) - train_TP[i] for i in range(train_CM_size)]
train_TN = train_CM_size - train_TP - train_FP - train_FN
train_precision = train_TP / (train_TP + train_FP) # aka True Positive Rate
train_recall = train_TP / (train_TP + train_FN)
total_train_correct = sum(train_TP)
total_train_accuracy = total_train_correct / total_train
total_train_precision = sum(train_precision) / train_CM_size
total_train_recall = sum(train_recall) / train_CM_size
val_CM_size = len(val_conf_matrix)
total_val = sum(sum(val_conf_matrix))
val_TP = np.diagonal(val_conf_matrix)
val_FP = [sum(val_conf_matrix[:, i]) - val_TP[i] for i in range(val_CM_size)]
val_FN = [sum(val_conf_matrix[i, :]) - val_TP[i] for i in range(val_CM_size)]
val_TN = val_CM_size - val_TP - val_FP - val_FN
val_precision = val_TP / (val_TP + val_FP)
val_recall = val_TP / (val_TP + val_FN)
total_val_correct = sum(val_TP)
total_val_accuracy = total_val_correct / total_val
total_val_precision = sum(val_precision) / val_CM_size
total_val_recall = sum(val_recall) / val_CM_size
################################################################################################################
# Store Accuracy Precision Recall
################################################################################################################
train_acc_summary = tf.Summary(
value=[tf.Summary.Value(tag="accuracy", simple_value=total_train_accuracy), ])
train_prec_summary = tf.Summary(
value=[tf.Summary.Value(tag="precision", simple_value=total_train_precision), ])
train_rec_summary = tf.Summary(value=[tf.Summary.Value(tag="recall", simple_value=total_train_recall), ])
val_acc_summary = tf.Summary(value=[tf.Summary.Value(tag="accuracy", simple_value=total_val_accuracy), ])
val_prec_summary = tf.Summary(
value=[tf.Summary.Value(tag="precision", simple_value=total_val_precision), ])
val_rec_summary = tf.Summary(value=[tf.Summary.Value(tag="recall", simple_value=total_val_recall), ])
writer_train.add_summary(train_acc_summary, epoch)
writer_train.add_summary(train_prec_summary, epoch)
writer_train.add_summary(train_rec_summary, epoch)
writer_val.add_summary(val_acc_summary, epoch)
writer_val.add_summary(val_prec_summary, epoch)
writer_val.add_summary(val_rec_summary, epoch)
writer_train.flush()
writer_val.flush()
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(train_conf_matrix)
print(val_conf_matrix)
if best_acc < total_val_accuracy:
saver.save(sess, log_dir + "acc_best_validation_model.ckpt")
best_acc = total_val_accuracy
best_epoch = epoch
store_info = {'epoch': best_epoch,
'train_conf_matrix': list([list(x) for x in train_conf_matrix]),
'train_accuracy': total_train_accuracy,
'train_precision': list(train_precision),
'total_train_precision': total_train_precision,
'train_recall': list(train_recall),
'total_train_recall': total_train_recall,
'val_conf_matrix': list([list(x) for x in val_conf_matrix]),
'val_accuracy': total_val_accuracy,
'val_precision': list(val_precision),
'total_val_precision': total_val_precision,
'val_recall': list(val_recall),
'total_val_recall': total_val_recall}
store_convergence_info = {'epoch': train_data.epoch,
'train_conf_matrix': list([list(x) for x in train_conf_matrix]),
'train_accuracy': total_train_accuracy,
'train_precision': list(train_precision),
'total_train_precision': total_train_precision,
'train_recall': list(train_recall),
'total_train_recall': total_train_recall,
'val_conf_matrix': list([list(x) for x in val_conf_matrix]),
'val_accuracy': total_val_accuracy,
'val_precision': list(val_precision),
'total_val_precision': total_val_precision,
'val_recall': list(val_recall),
'total_val_recall': total_val_recall}
################################################################################################################
# Get ready for the next epoch
################################################################################################################
epoch += 1
train_conf_matrix = 0
val_conf_matrix = 0
################################################################################################################
####################################################################################################################
# Add the end of training compute the test results and store the relevant information
####################################################################################################################
while test_data.epoch == 0:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = test_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
test_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
test_CM_size = len(test_conf_matrix)
total_test = sum(sum(test_conf_matrix))
test_TP = np.diagonal(test_conf_matrix)
test_FP = [sum(test_conf_matrix[:, i]) - test_TP[i] for i in range(test_CM_size)]
test_FN = [sum(test_conf_matrix[i, :]) - test_TP[i] for i in range(test_CM_size)]
test_TN = test_CM_size - test_TP - test_FP - test_FN
test_precision = test_TP / (test_TP + test_FP)
test_recall = test_TP / (test_TP + test_FN)
total_test_correct = sum(test_TP)
total_test_accuracy = total_test_correct / total_test
total_test_precision = sum(test_precision) / test_CM_size
total_test_recall = sum(test_recall) / test_CM_size
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(test_conf_matrix)
store_convergence_info['test_conf_matrix'] = list([list(x) for x in test_conf_matrix])
store_convergence_info['test_accuracy'] = total_test_accuracy
store_convergence_info['test_precision'] = list(test_precision)
store_convergence_info['total_test_precision'] = total_test_precision
store_convergence_info['test_recall'] = list(test_recall)
store_convergence_info['total_test_recall'] = total_test_recall
# trick to be able to save numpy.int64 into json
def default(o):
if isinstance(o, np.int64): return int(o)
raise TypeError
with open(log_dir + 'convergence_results.p', 'w') as file:
json.dump(store_convergence_info, file, default=default, indent=4)
saver.save(sess, log_dir + "convergence_model.ckpt")
####################################################################################################################
# Add the end of training compute the test results of the best validation model and store the relevant information
####################################################################################################################
saver.restore(sess, log_dir + "acc_best_validation_model.ckpt")
test_conf_matrix = 0
while test_data.epoch == 1:
sentences_batch, sentences_length_batch, targets_batch, speakers_batch = test_data.next_batch(batch_size)
preds = sess.run([g['preds']],
feed_dict={g['x']: np.array(sentences_batch),
g['y']: np.array(targets_batch).reshape(len(targets_batch)),
g['speaker']: np.array(speakers_batch),
g['seqlen']: np.array(sentences_length_batch).reshape(
len(targets_batch))})
############################################################################################################
# Calculate the Test data Confusion Matrix
############################################################################################################
test_conf_matrix += confusion_matrix(targets_batch, preds[0], labels=range(data_info['num_classes']))
################################################################################################################
# Compute Accuracy, Precision and Recall
################################################################################################################
test_CM_size = len(test_conf_matrix)
total_test = sum(sum(test_conf_matrix))
test_TP = np.diagonal(test_conf_matrix)
test_FP = [sum(test_conf_matrix[:, i]) - test_TP[i] for i in range(test_CM_size)]
test_FN = [sum(test_conf_matrix[i, :]) - test_TP[i] for i in range(test_CM_size)]
test_TN = test_CM_size - test_TP - test_FP - test_FN
test_precision = test_TP / (test_TP + test_FP)
test_recall = test_TP / (test_TP + test_FN)
total_test_correct = sum(test_TP)
total_test_accuracy = total_test_correct / total_test
total_test_precision = sum(test_precision) / test_CM_size
total_test_recall = sum(test_recall) / test_CM_size
################################################################################################################
# Print the confusion matrix and store important information
################################################################################################################
print(test_conf_matrix)
store_info['test_conf_matrix'] = list([list(x) for x in test_conf_matrix])
store_info['test_accuracy'] = total_test_accuracy
store_info['test_precision'] = list(test_precision)
store_info['total_test_precision'] = total_test_precision
store_info['test_recall'] = list(test_recall)
store_info['total_test_recall'] = total_test_recall
with open(log_dir + 'acc_best_validation_results.p', 'w') as file:
json.dump(store_info, file, default=default, indent=4)
| 58.971429
| 128
| 0.463275
| 1,907
| 20,640
| 4.664394
| 0.111169
| 0.051714
| 0.031478
| 0.019786
| 0.662619
| 0.616639
| 0.58842
| 0.528836
| 0.497583
| 0.469028
| 0
| 0.003322
| 0.227132
| 20,640
| 349
| 129
| 59.140401
| 0.554288
| 0.070058
| 0
| 0.373874
| 0
| 0
| 0.081601
| 0.012219
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009009
| false
| 0
| 0.036036
| 0
| 0.045045
| 0.018018
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db889090b0a80e5b1926c1a844e99f3562167374
| 1,779
|
py
|
Python
|
dashboard/rpc/alias.py
|
flaree/Toxic-Cogs
|
e33c3fe3a81c86ef3c89928b0a977fae13b916a9
|
[
"MIT"
] | null | null | null |
dashboard/rpc/alias.py
|
flaree/Toxic-Cogs
|
e33c3fe3a81c86ef3c89928b0a977fae13b916a9
|
[
"MIT"
] | null | null | null |
dashboard/rpc/alias.py
|
flaree/Toxic-Cogs
|
e33c3fe3a81c86ef3c89928b0a977fae13b916a9
|
[
"MIT"
] | null | null | null |
import discord
from redbot.core.bot import Red
from redbot.core.commands import commands
from redbot.core.utils.chat_formatting import humanize_list
from .utils import permcheck, rpccheck
class DashboardRPC_AliasCC:
def __init__(self, cog: commands.Cog):
self.bot: Red = cog.bot
self.cog: commands.Cog = cog
# Initialize RPC handlers
self.bot.register_rpc_handler(self.fetch_aliases)
def unload(self):
self.bot.unregister_rpc_handler(self.fetch_aliases)
@staticmethod
def safe(string):
return (
string.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
)
@rpccheck()
@permcheck("Alias", ["aliascc"])
async def fetch_aliases(self, guild: discord.Guild, member: discord.Member):
aliascog = self.bot.get_cog("Alias")
aliases = await aliascog._aliases.get_guild_aliases(guild)
ida = {}
for alias in aliases:
if len(alias.command) > 50:
command = alias.command[:47] + "..."
else:
command = alias.command
if alias.command not in ida:
ida[alias.command] = {"aliases": [], "shortened": command}
ida[alias.command]["aliases"].append(f"{self.safe(alias.name)}")
data = {}
for command, aliases in ida.items():
data[command] = {
"humanized": humanize_list(
list(map(lambda x: f"<code>{x}</code>", aliases["aliases"]))
),
"raw": aliases["aliases"],
"shortened": aliases["shortened"],
}
return data
| 32.345455
| 81
| 0.540191
| 182
| 1,779
| 5.175824
| 0.379121
| 0.076433
| 0.044586
| 0.038217
| 0.055202
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003347
| 0.328274
| 1,779
| 54
| 82
| 32.944444
| 0.784937
| 0.012929
| 0
| 0
| 0
| 0
| 0.087647
| 0.013529
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.113636
| 0.022727
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db88f0e02537c3b3ec61c4fbd738d9a4605bd04a
| 6,939
|
py
|
Python
|
train.py
|
hafezgh/music_classification
|
68fa398b7d4455475d07ae17c3b6b94459a96ac7
|
[
"MIT"
] | 1
|
2021-07-15T18:47:02.000Z
|
2021-07-15T18:47:02.000Z
|
train.py
|
hafezgh/music_classification
|
68fa398b7d4455475d07ae17c3b6b94459a96ac7
|
[
"MIT"
] | null | null | null |
train.py
|
hafezgh/music_classification
|
68fa398b7d4455475d07ae17c3b6b94459a96ac7
|
[
"MIT"
] | null | null | null |
import torch
DEVICE = 'cuda'
import math
import torch.optim as optim
from model import *
import os
import copy, gzip, pickle, time
data_dir = './drive/MyDrive/music_classification/Data'
classes = os.listdir(data_dir+'/images_original')
def fit(model, train_loader, train_len, optimizer, criterion):
model.train()
batch_size = train_loader.batch_size
n_batches = math.ceil(train_len/batch_size)
#print('Batch Size:', batch_size,'Number of Batches:', n_batches)
model.train()
train_running_loss = 0.0
train_running_correct = 0
counter = 0
total = 0
#prog_bar = tqdm(enumerate(train_loader), total=int(train_len/batch_size))
for i, data in enumerate(train_loader):
counter += 1
data, target = data[0].to(DEVICE), data[1].to(DEVICE)
total += target.size(0)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, target)
train_running_loss += loss.item()
_, preds = torch.max(outputs.data, 1)
train_running_correct += (preds == target).sum().item()
loss.backward()
optimizer.step()
train_loss = train_running_loss / counter
train_accuracy = 100. * train_running_correct / total
return train_loss, train_accuracy
def validate(model, val_loader, val_len, criterion):
model.eval()
val_running_loss = 0.0
val_running_correct = 0
counter = 0
total = 0
batch_size = val_len
#prog_bar = tqdm(enumerate(val_loader), total=int(val_len/batch_size))
with torch.no_grad():
for i, data in enumerate(val_loader):
counter += 1
data, target = data[0].to(DEVICE), data[1].to(DEVICE)
total += target.size(0)
outputs = model(data)
loss = criterion(outputs, target)
val_running_loss += loss.item()
_, preds = torch.max(outputs.data, 1)
val_running_correct += (preds == target).sum().item()
val_loss = val_running_loss / counter
val_accuracy = 100. * val_running_correct / total
return val_loss, val_accuracy
def train(hparams, train_loader, val_loader, train_len, val_len, checkpoint_path=None, **kwargs):
model = CRNN_Base(len(classes), hparams['c'], hparams['h'], hparams['w'], hparams['k'], hparams['filters'],\
hparams['poolings'], hparams['dropout_rate'], gru_units=hparams['gru_units'])
model.to(DEVICE)
optimizer = optim.Adam(model.parameters(), lr=hparams['lr'])
try:
path = kwargs['path']
stream = gzip.open(path, "rb")
checkpoint = pickle.load(stream)
stream.close()
train_loss = checkpoint['train_loss']
train_accuracy = checkpoint['train_accuracy']
val_loss = checkpoint['val_loss']
val_accuracy = checkpoint['val_accuracy']
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch_load = checkpoint['epoch']
print(f'Checkpoint found! Training will resume from epoch {epoch_load+1}')
print('Last epoch results: ')
print(f"Train Loss: {train_loss[-1]:.4f}, Train Acc: {train_accuracy[-1]:.2f}")
print(f'Val Loss: {val_loss[-1]:.4f}, Val Acc: {val_accuracy[-1]:.2f}')
if 'lr_scheduler' in kwargs.keys() and 'scheduler_state_dict' in checkpoint.keys():
if kwargs['lr_scheduler'] == True:
print('Learning rate sceduler is active.\n')
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1, last_epoch=-1, verbose=True)
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
else:
scheduler = False
else:
scheduler = False
except:
print('No checkpoints found! Training will start from the beginning.\n')
train_loss, train_accuracy = [], []
val_loss, val_accuracy = [], []
epoch_load = 0
scheduler = None
es = False
if 'lr_scheduler' in kwargs.keys():
if kwargs['lr_scheduler'] == True:
print('Learning rate sceduler is active.\n')
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1, last_epoch=-1, verbose=True)
else:
scheduler = False
else:
scheduler = False
es = False
if 'early_stopping' in kwargs.keys():
print('Early stopping is active.')
print()
es = True
min_val_loss = np.inf
patience = 30
epochs_no_improve = 0
best_model = None
criterion = nn.CrossEntropyLoss()
start = time.time()
for epoch in range(hparams['epochs']-epoch_load):
print(f"Epoch {epoch+epoch_load+1} of {hparams['epochs']}")
train_epoch_loss, train_epoch_accuracy = fit(
model, train_loader, train_len, optimizer, criterion
)
val_epoch_loss, val_epoch_accuracy = validate(
model, val_loader, val_len, criterion
)
if scheduler:
scheduler.step()
train_loss.append(train_epoch_loss)
train_accuracy.append(train_epoch_accuracy)
val_loss.append(val_epoch_loss)
val_accuracy.append(val_epoch_accuracy)
if es:
if val_epoch_loss < min_val_loss:
#Saving the model
min_val_loss = val_epoch_loss
best_model = copy.deepcopy(model.state_dict())
epochs_no_improve = 0
else:
epochs_no_improve += 1
# Check early stopping condition
if epochs_no_improve == patience:
print(f'Early stopping after {epoch+epoch_load+1} epochs!')
model.load_state_dict(best_model)
break
print(f"Train Loss: {train_epoch_loss:.4f}, Train Acc: {train_epoch_accuracy:.2f}")
print(f'Val Loss: {val_epoch_loss:.4f}, Val Acc: {val_epoch_accuracy:.2f}')
checkpoint_to_save = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch+epoch_load,
'train_loss': train_loss,
'val_loss': val_loss,
'train_accuracy': train_accuracy,
'val_accuracy': val_accuracy
}
if scheduler:
checkpoint_to_save['scheduler_state_dict'] = scheduler.state_dict()
## Saving the model
if checkpoint_path != None:
stream = gzip.open(checkpoint_path, "wb")
pickle.dump(checkpoint_to_save, stream)
stream.close()
end = time.time()
print(f"Training time: {(end-start)/60:.3f} minutes")
return model, train_loss, train_accuracy, val_loss, val_accuracy
| 40.343023
| 118
| 0.607292
| 839
| 6,939
| 4.777116
| 0.190703
| 0.026198
| 0.022455
| 0.021956
| 0.312625
| 0.27495
| 0.220559
| 0.166667
| 0.124252
| 0.124252
| 0
| 0.012048
| 0.282317
| 6,939
| 172
| 119
| 40.343023
| 0.792771
| 0.038911
| 0
| 0.267974
| 0
| 0
| 0.159088
| 0.030317
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.039216
| 0
| 0.078431
| 0.091503
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db8bf109b16daade88fa4febce0557b98851466c
| 476
|
py
|
Python
|
doc/samples/pos.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 1,390
|
2015-01-01T21:11:47.000Z
|
2022-03-31T11:35:44.000Z
|
doc/samples/pos.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 393
|
2015-01-05T11:18:29.000Z
|
2022-03-20T11:46:46.000Z
|
doc/samples/pos.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 176
|
2015-01-07T16:58:56.000Z
|
2022-03-28T12:12:11.000Z
|
def task_pos_args():
def show_params(param1, pos):
print('param1 is: {0}'.format(param1))
for index, pos_arg in enumerate(pos):
print('positional-{0}: {1}'.format(index, pos_arg))
return {'actions':[(show_params,)],
'params':[{'name':'param1',
'short':'p',
'default':'default value'},
],
'pos_arg': 'pos',
'verbosity': 2,
}
| 34
| 63
| 0.44958
| 47
| 476
| 4.404255
| 0.574468
| 0.086957
| 0.10628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.378151
| 476
| 13
| 64
| 36.615385
| 0.672297
| 0
| 0
| 0
| 0
| 0
| 0.212185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0
| 0
| 0.230769
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db8c048cea31b2b7400108b7a16a198179252811
| 24,553
|
py
|
Python
|
projectq/backends/_qracksim/_simulator_test.py
|
vm6502q/ProjectQ
|
1eac4b1f529551dfc1668443eba0c68dee54120b
|
[
"Apache-2.0"
] | 1
|
2019-08-29T19:04:27.000Z
|
2019-08-29T19:04:27.000Z
|
projectq/backends/_qracksim/_simulator_test.py
|
vm6502q/ProjectQ
|
1eac4b1f529551dfc1668443eba0c68dee54120b
|
[
"Apache-2.0"
] | 6
|
2019-01-27T17:05:25.000Z
|
2020-02-24T00:15:59.000Z
|
projectq/backends/_qracksim/_simulator_test.py
|
vm6502q/ProjectQ
|
1eac4b1f529551dfc1668443eba0c68dee54120b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for projectq.backends._sim._simulator.py, using both the Python
and the C++ simulator as backends.
"""
import copy
import math
import cmath
import numpy
import pytest
import random
import scipy
import scipy.sparse
import scipy.sparse.linalg
from projectq import MainEngine
from projectq.cengines import (BasicEngine, BasicMapperEngine, DummyEngine,
LocalOptimizer, NotYetMeasuredError)
from projectq.ops import (All, Allocate, BasicGate, BasicMathGate, CNOT, C,
Command, H, Measure, QubitOperator, Rx, Ry, Rz, S,
TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap,
UniformlyControlledRy, UniformlyControlledRz)
from projectq.libs.math import (AddConstant,
AddConstantModN,
SubConstant,
SubConstantModN,
MultiplyByConstantModN)
from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag
from projectq.types import WeakQubitRef
from projectq.backends import Simulator
tolerance = 1e-6
def test_is_qrack_simulator_present():
_qracksim = pytest.importorskip("projectq.backends._qracksim._qracksim")
import projectq.backends._qracksim._qracksim as _
def get_available_simulators():
result = []
try:
test_is_qrack_simulator_present()
result.append("qrack_simulator_qengine")
result.append("qrack_simulator_qunit")
except:
pass
return result
@pytest.fixture(params=get_available_simulators())
def sim(request):
if request.param == "qrack_simulator_qengine":
from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim
sim = Simulator()
sim._simulator = QrackSim(1, -1, 1)
elif request.param == "qrack_simulator_qunit":
from projectq.backends._qracksim._qracksim import QrackSimulator as QrackSim
sim = Simulator()
sim._simulator = QrackSim(1, -1, 2)
return sim
@pytest.fixture(params=["mapper", "no_mapper"])
def mapper(request):
"""
Adds a mapper which changes qubit ids by adding 1
"""
if request.param == "mapper":
class TrivialMapper(BasicMapperEngine):
def __init__(self):
BasicEngine.__init__(self)
self.current_mapping = dict()
def receive(self, command_list):
for cmd in command_list:
for qureg in cmd.all_qubits:
for qubit in qureg:
if qubit.id == -1:
continue
elif qubit.id not in self.current_mapping:
previous_map = self.current_mapping
previous_map[qubit.id] = qubit.id + 1
self.current_mapping = previous_map
self._send_cmd_with_mapped_ids(cmd)
return TrivialMapper()
if request.param == "no_mapper":
return None
class Mock1QubitGate(BasicGate):
def __init__(self):
BasicGate.__init__(self)
self.cnt = 0
@property
def matrix(self):
self.cnt += 1
return numpy.matrix([[0, 1],
[1, 0]])
class Mock6QubitGate(BasicGate):
def __init__(self):
BasicGate.__init__(self)
self.cnt = 0
@property
def matrix(self):
self.cnt += 1
return numpy.eye(2 ** 6)
class MockNoMatrixGate(BasicGate):
def __init__(self):
BasicGate.__init__(self)
self.cnt = 0
@property
def matrix(self):
self.cnt += 1
raise AttributeError
def test_simulator_is_available(sim):
backend = DummyEngine(save_commands=True)
eng = MainEngine(backend, [])
qubit = eng.allocate_qubit()
Measure | qubit
qubit[0].__del__()
assert len(backend.received_commands) == 3
# Test that allocate, measure, basic math, and deallocate are available.
for cmd in backend.received_commands:
assert sim.is_available(cmd)
new_cmd = backend.received_commands[-1]
new_cmd.gate = Mock6QubitGate()
assert not sim.is_available(new_cmd)
new_cmd.gate = MockNoMatrixGate()
assert not sim.is_available(new_cmd)
new_cmd.gate = Mock1QubitGate()
assert sim.is_available(new_cmd)
new_cmd = backend.received_commands[-2]
assert len(new_cmd.qubits) == 1
new_cmd.gate = AddConstantModN(1, 2)
assert sim.is_available(new_cmd)
new_cmd.gate = MultiplyByConstantModN(1, 2)
assert sim.is_available(new_cmd)
#new_cmd.gate = DivideByConstantModN(1, 2)
#assert sim.is_available(new_cmd)
def test_simulator_cheat(sim):
# cheat function should return a tuple
assert isinstance(sim.cheat(), tuple)
# first entry is the qubit mapping.
# should be empty:
assert len(sim.cheat()[0]) == 0
# state vector should only have 1 entry:
assert len(sim.cheat()[1]) == 1
eng = MainEngine(sim, [])
qubit = eng.allocate_qubit()
# one qubit has been allocated
assert len(sim.cheat()[0]) == 1
assert sim.cheat()[0][0] == 0
assert len(sim.cheat()[1]) == 2
assert 1. == pytest.approx(abs(sim.cheat()[1][0]))
qubit[0].__del__()
# should be empty:
assert len(sim.cheat()[0]) == 0
# state vector should only have 1 entry:
assert len(sim.cheat()[1]) == 1
def test_simulator_functional_measurement(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(5)
# entangle all qubits:
H | qubits[0]
for qb in qubits[1:]:
CNOT | (qubits[0], qb)
All(Measure) | qubits
bit_value_sum = sum([int(qubit) for qubit in qubits])
assert bit_value_sum == 0 or bit_value_sum == 5
def test_simulator_measure_mapped_qubit(sim):
eng = MainEngine(sim, [])
qb1 = WeakQubitRef(engine=eng, idx=1)
qb2 = WeakQubitRef(engine=eng, idx=2)
cmd0 = Command(engine=eng, gate=Allocate, qubits=([qb1],))
cmd1 = Command(engine=eng, gate=X, qubits=([qb1],))
cmd2 = Command(engine=eng, gate=Measure, qubits=([qb1],), controls=[],
tags=[LogicalQubitIDTag(2)])
with pytest.raises(NotYetMeasuredError):
int(qb1)
with pytest.raises(NotYetMeasuredError):
int(qb2)
eng.send([cmd0, cmd1, cmd2])
eng.flush()
with pytest.raises(NotYetMeasuredError):
int(qb1)
assert int(qb2) == 1
def test_simulator_kqubit_exception(sim):
m1 = Rx(0.3).matrix
m2 = Rx(0.8).matrix
m3 = Ry(0.1).matrix
m4 = Rz(0.9).matrix.dot(Ry(-0.1).matrix)
m = numpy.kron(m4, numpy.kron(m3, numpy.kron(m2, m1)))
class KQubitGate(BasicGate):
@property
def matrix(self):
return m
eng = MainEngine(sim, [])
qureg = eng.allocate_qureg(3)
with pytest.raises(Exception):
KQubitGate() | qureg
with pytest.raises(Exception):
H | qureg
def test_simulator_swap(sim):
eng = MainEngine(sim, [])
qubits1 = eng.allocate_qureg(1)
qubits2 = eng.allocate_qureg(1)
X | qubits1
Swap | (qubits1, qubits2)
All(Measure) | qubits1
All(Measure) | qubits2
assert (int(qubits1[0]) == 0) and (int(qubits2[0]) == 1)
SqrtSwap | (qubits1, qubits2)
SqrtSwap | (qubits1, qubits2)
All(Measure) | qubits1
All(Measure) | qubits2
assert (int(qubits1[0]) == 1) and (int(qubits2[0]) == 0)
def test_simulator_math(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(8)
AddConstant(1) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 1
AddConstantModN(10, 256) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 11
controls = eng.allocate_qureg(1)
# Control is off
C(AddConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 11
# Turn control on
X | controls
C(AddConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 21
SubConstant(5) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 16
C(SubConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 6
# Turn control off
X | controls
C(SubConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 6
MultiplyByConstantModN(2, 256) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 12
# Control is off
C(MultiplyByConstantModN(2, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 12
# Turn control on
X | controls
C(MultiplyByConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 120
def test_simulator_probability(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(6)
All(H) | qubits
eng.flush()
bits = [0, 0, 1, 0, 1, 0]
for i in range(6):
assert (eng.backend.get_probability(bits[:i], qubits[:i]) ==
pytest.approx(0.5**i))
extra_qubit = eng.allocate_qubit()
with pytest.raises(RuntimeError):
eng.backend.get_probability([0], extra_qubit)
del extra_qubit
All(H) | qubits
Ry(2 * math.acos(math.sqrt(0.3))) | qubits[0]
eng.flush()
assert eng.backend.get_probability([0], [qubits[0]]) == pytest.approx(0.3)
Ry(2 * math.acos(math.sqrt(0.4))) | qubits[2]
eng.flush()
assert eng.backend.get_probability([0], [qubits[2]]) == pytest.approx(0.4)
assert (numpy.isclose(0.12, eng.backend.get_probability([0, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance))
assert (numpy.isclose(0.18, eng.backend.get_probability([0, 1], qubits[:3:2]), rtol=tolerance, atol=tolerance))
assert (numpy.isclose(0.28, eng.backend.get_probability([1, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance))
All(Measure) | qubits
def test_simulator_amplitude(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(6)
All(X) | qubits
All(H) | qubits
eng.flush()
bits = [0, 0, 1, 0, 1, 0]
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi < 0:
polPhi += 2 * math.pi
assert polR == pytest.approx(1. / 8.)
bits = [0, 0, 0, 0, 1, 0]
polR2, polPhi2 = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi2 < math.pi:
polPhi2 += 2 * math.pi
assert polR2 == pytest.approx(polR)
assert (polPhi2 - math.pi) == pytest.approx(polPhi)
bits = [0, 1, 1, 0, 1, 0]
polR3, polPhi3 = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi3 < math.pi:
polPhi3 += 2 * math.pi
assert polR3 == pytest.approx(polR)
assert (polPhi3 - math.pi) == pytest.approx(polPhi)
All(H) | qubits
All(X) | qubits
Ry(2 * math.acos(0.3)) | qubits[0]
eng.flush()
bits = [0] * 6
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
assert polR == pytest.approx(0.3)
bits[0] = 1
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
assert (polR ==
pytest.approx(math.sqrt(0.91)))
All(Measure) | qubits
# raises if not all qubits are in the list:
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits[:-1])
# doesn't just check for length:
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits[:-1] + [qubits[0]])
extra_qubit = eng.allocate_qubit()
eng.flush()
# there is a new qubit now!
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits)
def test_simulator_set_wavefunction(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(2)
wf = [0., 0., math.sqrt(0.2), math.sqrt(0.8)]
with pytest.raises(RuntimeError):
eng.backend.set_wavefunction(wf, qubits)
eng.flush()
eng.backend.set_wavefunction(wf, qubits)
assert pytest.approx(eng.backend.get_probability('1', [qubits[0]])) == .8
assert pytest.approx(eng.backend.get_probability('01', qubits)) == .2
assert pytest.approx(eng.backend.get_probability('1', [qubits[1]])) == 1.
All(Measure) | qubits
def test_simulator_set_wavefunction_always_complex(sim):
""" Checks that wavefunction is always complex """
eng = MainEngine(sim)
qubit = eng.allocate_qubit()
eng.flush()
wf = [1., 0]
eng.backend.set_wavefunction(wf, qubit)
Y | qubit
eng.flush()
amplitude = eng.backend.get_amplitude('1', qubit)
assert amplitude == pytest.approx(1j) or amplitude == pytest.approx(-1j)
def test_simulator_collapse_wavefunction(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(4)
# unknown qubits: raises
with pytest.raises(RuntimeError):
eng.backend.collapse_wavefunction(qubits, [0] * 4)
eng.flush()
eng.backend.collapse_wavefunction(qubits, [0] * 4)
assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == 1.
All(H) | qubits[1:]
eng.flush()
assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == .125
# impossible outcome: raises
with pytest.raises(RuntimeError):
eng.backend.collapse_wavefunction(qubits, [1] + [0] * 3)
eng.backend.collapse_wavefunction(qubits[:-1], [0, 1, 0])
probability = eng.backend.get_probability([0, 1, 0, 1], qubits)
assert probability == pytest.approx(.5)
eng.backend.set_wavefunction([1.] + [0.] * 15, qubits)
H | qubits[0]
CNOT | (qubits[0], qubits[1])
eng.flush()
eng.backend.collapse_wavefunction([qubits[0]], [1])
probability = eng.backend.get_probability([1, 1], qubits[0:2])
assert probability == pytest.approx(1.)
def test_simulator_no_uncompute_exception(sim):
eng = MainEngine(sim, [])
qubit = eng.allocate_qubit()
H | qubit
with pytest.raises(RuntimeError):
qubit[0].__del__()
# If you wanted to keep using the qubit, you shouldn't have deleted it.
assert qubit[0].id == -1
def test_simulator_functional_entangle(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(5)
# entangle all qubits:
H | qubits[0]
for qb in qubits[1:]:
CNOT | (qubits[0], qb)
# check the state vector:
assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance)
for i in range(1, 31):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
# unentangle all except the first 2
for qb in qubits[2:]:
CNOT | (qubits[0], qb)
# entangle using Toffolis
for qb in qubits[2:]:
Toffoli | (qubits[0], qubits[1], qb)
# check the state vector:
assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance)
for i in range(1, 31):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
# uncompute using multi-controlled NOTs
with Control(eng, qubits[0:-1]):
X | qubits[-1]
with Control(eng, qubits[0:-2]):
X | qubits[-2]
with Control(eng, qubits[0:-3]):
X | qubits[-3]
CNOT | (qubits[0], qubits[1])
H | qubits[0]
# check the state vector:
assert 1. == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
for i in range(1, 32):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
All(Measure) | qubits
def test_simulator_convert_logical_to_mapped_qubits(sim):
mapper = BasicMapperEngine()
def receive(command_list):
pass
mapper.receive = receive
eng = MainEngine(sim, [mapper])
qubit0 = eng.allocate_qubit()
qubit1 = eng.allocate_qubit()
mapper.current_mapping = {qubit0[0].id: qubit1[0].id,
qubit1[0].id: qubit0[0].id}
assert (sim._convert_logical_to_mapped_qureg(qubit0 + qubit1) ==
qubit1 + qubit0)
def slow_implementation(angles, control_qubits, target_qubit, eng, gate_class):
"""
Assumption is that control_qubits[0] is lowest order bit
We apply angles[0] to state |0>
"""
assert len(angles) == 2**len(control_qubits)
for index in range(2**len(control_qubits)):
with Compute(eng):
for bit_pos in range(len(control_qubits)):
if not (index >> bit_pos) & 1:
X | control_qubits[bit_pos]
with Control(eng, control_qubits):
gate_class(angles[index]) | target_qubit
Uncompute(eng)
@pytest.mark.parametrize("gate_classes", [(Ry, UniformlyControlledRy),
(Rz, UniformlyControlledRz)])
def test_uniformly_controlled_r(sim, gate_classes):
n = 2
random_angles = [3.0, 0.8, 1.2, 0.7]
basis_state_index = 2
basis_state = [0] * 2**(n+1)
basis_state[basis_state_index] = 1.
correct_eng = MainEngine(backend=Simulator())
test_eng = MainEngine(backend=sim)
correct_sim = correct_eng.backend
correct_qb = correct_eng.allocate_qubit()
correct_ctrl_qureg = correct_eng.allocate_qureg(n)
correct_eng.flush()
test_sim = test_eng.backend
test_qb = test_eng.allocate_qubit()
test_ctrl_qureg = test_eng.allocate_qureg(n)
test_eng.flush()
correct_sim.set_wavefunction(basis_state, correct_qb + correct_ctrl_qureg)
test_sim.set_wavefunction(basis_state, test_qb + test_ctrl_qureg)
test_eng.flush()
correct_eng.flush()
gate_classes[1](random_angles) | (test_ctrl_qureg, test_qb)
slow_implementation(angles=random_angles,
control_qubits=correct_ctrl_qureg,
target_qubit=correct_qb,
eng=correct_eng,
gate_class=gate_classes[0])
test_eng.flush()
correct_eng.flush()
for fstate in range(2**(n+1)):
binary_state = format(fstate, '0' + str(n+1) + 'b')
test = test_sim.get_amplitude(binary_state,
test_qb + test_ctrl_qureg)
correct = correct_sim.get_amplitude(binary_state, correct_qb +
correct_ctrl_qureg)
print(test, "==", correct)
assert correct == pytest.approx(test, rel=tolerance, abs=tolerance)
All(Measure) | test_qb + test_ctrl_qureg
All(Measure) | correct_qb + correct_ctrl_qureg
test_eng.flush(deallocate_qubits=True)
correct_eng.flush(deallocate_qubits=True)
def test_qubit_operator(sim):
test_eng = MainEngine(sim)
test_qureg = test_eng.allocate_qureg(1)
test_eng.flush()
qubit_op = QubitOperator("X0 X1", 1)
with pytest.raises(Exception):
sim.get_expectation_value(qubit_op, test_qureg)
test_eng.backend.set_wavefunction([1, 0],
test_qureg)
test_eng.flush()
qubit_op = QubitOperator("X0", 1)
qubit_op | test_qureg[0]
test_eng.flush()
amplitude = test_eng.backend.get_amplitude('0', test_qureg)
assert amplitude == pytest.approx(0.)
amplitude = test_eng.backend.get_amplitude('1', test_qureg)
assert amplitude == pytest.approx(1.)
def test_get_expectation_value(sim):
num_qubits = 2
test_eng = MainEngine(sim)
test_qureg = test_eng.allocate_qureg(num_qubits)
test_eng.flush()
qubit_op = QubitOperator("X0 X1 X2", 1)
with pytest.raises(Exception):
sim.get_expectation_value(qubit_op, test_qureg)
qubit_op = QubitOperator("X0", 1)
test_eng.backend.set_wavefunction([1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([1 / math.sqrt(2), -1 / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Y0", 1)
test_eng.backend.set_wavefunction([1 / math.sqrt(2), 1j / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([1 / math.sqrt(2), -1j / math.sqrt(2), 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Z0", 1)
test_eng.backend.set_wavefunction([1, 0, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([0, 1, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Z0", 0.25)
test_eng.backend.set_wavefunction([1, 0, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(0.25, rel=tolerance, abs=tolerance))
test_eng.backend.set_wavefunction([0, 1, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-0.25, rel=tolerance, abs=tolerance))
qubit_op = QubitOperator("Z0 Z1", 1)
test_eng.backend.set_wavefunction([1, 0, 0, 0],
test_qureg)
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
X | test_qureg[0]
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
X | test_qureg[1]
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(1, rel=tolerance, abs=tolerance))
X | test_qureg[0]
test_eng.flush()
assert(sim.get_expectation_value(qubit_op, test_qureg) == pytest.approx(-1, rel=tolerance, abs=tolerance))
| 33.680384
| 115
| 0.629455
| 3,208
| 24,553
| 4.664277
| 0.115025
| 0.030743
| 0.02172
| 0.033683
| 0.584442
| 0.517877
| 0.454321
| 0.427187
| 0.405734
| 0.383947
| 0
| 0.030553
| 0.24551
| 24,553
| 728
| 116
| 33.726648
| 0.777166
| 0.071111
| 0
| 0.469203
| 0
| 0
| 0.009068
| 0.005502
| 0
| 0
| 0
| 0
| 0.141304
| 1
| 0.059783
| false
| 0.003623
| 0.036232
| 0.001812
| 0.117754
| 0.001812
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db8f8bf38af010e37a76dcb939676a34f09f75d2
| 1,693
|
py
|
Python
|
com_reader.py
|
plusterm/plusterm
|
45e9382accdaae7d51c65cab77e571bc6d264936
|
[
"MIT"
] | 2
|
2018-01-10T16:20:45.000Z
|
2018-01-16T12:04:13.000Z
|
com_reader.py
|
plusterm/plusterm
|
45e9382accdaae7d51c65cab77e571bc6d264936
|
[
"MIT"
] | 14
|
2018-01-10T12:56:43.000Z
|
2018-05-11T16:28:31.000Z
|
com_reader.py
|
plusterm/plusterm
|
45e9382accdaae7d51c65cab77e571bc6d264936
|
[
"MIT"
] | null | null | null |
# from wx.lib.pubsub import pub
from pubsub import pub
import serial
import threading
import queue
import time
class ComReaderThread(threading.Thread):
'''
Creates a thread that continously reads from the serial connection
Puts result as a tuple (timestamp, data) in a queue
'''
def __init__(self, ser, error_que):
threading.Thread.__init__(self)
self.ser = ser
self.error_que = error_que
self.alive = threading.Event()
self.alive.set()
def run(self):
while self.alive.isSet():
try:
if self.ser.in_waiting > 0:
timestamp = time.time()
data = self.ser.read(self.ser.in_waiting)
pub.sendMessage('serial.data', data=(timestamp, data))
except serial.SerialException as e:
reconnected = False
print('Serial connection lost, trying to reconnect.')
ts = time.time()
self.error_que.put((ts, str(e)))
while not reconnected and self.alive.isSet():
try:
# if ser still thinks it's open close it
if self.ser.is_open:
self.ser.close()
self.ser.open()
except Exception as e:
# if reconnection failed let some time pass
time.sleep(0.1)
else:
reconnected = True
print('Reconnected')
def stop(self, timeout=0.5):
self.alive.clear()
threading.Thread.join(self, timeout)
| 29.701754
| 74
| 0.517425
| 185
| 1,693
| 4.654054
| 0.443243
| 0.065041
| 0.034843
| 0.039489
| 0.044135
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004916
| 0.399291
| 1,693
| 56
| 75
| 30.232143
| 0.841691
| 0.135854
| 0
| 0.054054
| 0
| 0
| 0.045833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.135135
| 0
| 0.243243
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db8ff673815400dfc9c26d89afa7b79ffbf19f2f
| 1,032
|
py
|
Python
|
docker/app/app.py
|
ganeshkumarsv/datadog-cloudfoundry-buildpack
|
7c622dfc7990da83e5dfa4f474878a642fd40fd3
|
[
"Apache-2.0"
] | 5
|
2018-04-19T18:33:06.000Z
|
2021-05-13T03:19:31.000Z
|
docker/app/app.py
|
ganeshkumarsv/datadog-cloudfoundry-buildpack
|
7c622dfc7990da83e5dfa4f474878a642fd40fd3
|
[
"Apache-2.0"
] | 24
|
2018-05-04T13:42:24.000Z
|
2021-12-13T12:18:53.000Z
|
docker/app/app.py
|
ganeshkumarsv/datadog-cloudfoundry-buildpack
|
7c622dfc7990da83e5dfa4f474878a642fd40fd3
|
[
"Apache-2.0"
] | 14
|
2018-05-04T13:29:34.000Z
|
2022-02-22T17:41:20.000Z
|
from flask import Flask
from datadog import statsd
import logging
import os
# This is a small example application
# It uses tracing and dogstatsd on a sample flask application
log = logging.getLogger("app")
app = Flask(__name__)
# The app has two routes, a basic endpoint and an exception endpoint
@app.route("/")
def hello():
statsd.increment('request.number', 1, tags=["test", "foo:bar", "my:app"])
log.info("Got a request at hello")
return "Hello World!"
@app.route("/error")
def error():
statsd.increment('request.error.number', 1, tags=["test", "foo:bar", "my:app"])
log.info("Got a request at error")
raise Exception()
# This is meant to be run directly, instead of executed through flask run
if __name__ == '__main__':
# It grabs the host and port from the environment
port = 5001
host = '0.0.0.0'
if os.environ.get('HOST'):
host = os.environ.get('HOST')
if os.environ.get('PORT'):
port = os.environ.get('PORT')
app.run(debug=True, host=host, port=port)
| 27.891892
| 83
| 0.670543
| 158
| 1,032
| 4.303797
| 0.455696
| 0.052941
| 0.070588
| 0.044118
| 0.135294
| 0.135294
| 0.135294
| 0.135294
| 0.135294
| 0.135294
| 0
| 0.01199
| 0.19186
| 1,032
| 36
| 84
| 28.666667
| 0.803357
| 0.273256
| 0
| 0
| 0
| 0
| 0.221774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db92111d1426d48f852fa3b382344c31b99bb952
| 2,446
|
py
|
Python
|
monai/networks/blocks/selfattention.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | 1
|
2022-03-16T01:18:43.000Z
|
2022-03-16T01:18:43.000Z
|
monai/networks/blocks/selfattention.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | null | null | null |
monai/networks/blocks/selfattention.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from monai.utils import optional_import
Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange")
class SABlock(nn.Module):
"""
A self-attention block, based on: "Dosovitskiy et al.,
An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>"
"""
def __init__(self, hidden_size: int, num_heads: int, dropout_rate: float = 0.0) -> None:
"""
Args:
hidden_size: dimension of hidden layer.
num_heads: number of attention heads.
dropout_rate: faction of the input units to drop.
"""
super().__init__()
if not (0 <= dropout_rate <= 1):
raise ValueError("dropout_rate should be between 0 and 1.")
if hidden_size % num_heads != 0:
raise ValueError("hidden size should be divisible by num_heads.")
self.num_heads = num_heads
self.out_proj = nn.Linear(hidden_size, hidden_size)
self.qkv = nn.Linear(hidden_size, hidden_size * 3, bias=False)
self.input_rearrange = Rearrange("b h (qkv l d) -> qkv b l h d", qkv=3, l=num_heads)
self.out_rearrange = Rearrange("b h l d -> b l (h d)")
self.drop_output = nn.Dropout(dropout_rate)
self.drop_weights = nn.Dropout(dropout_rate)
self.head_dim = hidden_size // num_heads
self.scale = self.head_dim**-0.5
def forward(self, x):
output = self.input_rearrange(self.qkv(x))
q, k, v = output[0], output[1], output[2]
att_mat = (torch.einsum("blxd,blyd->blxy", q, k) * self.scale).softmax(dim=-1)
att_mat = self.drop_weights(att_mat)
x = torch.einsum("bhxy,bhyd->bhxd", att_mat, v)
x = self.out_rearrange(x)
x = self.out_proj(x)
x = self.drop_output(x)
return x
| 38.825397
| 114
| 0.656582
| 364
| 2,446
| 4.277473
| 0.425824
| 0.057803
| 0.030829
| 0.020552
| 0.066795
| 0.035967
| 0
| 0
| 0
| 0
| 0
| 0.017149
| 0.237122
| 2,446
| 62
| 115
| 39.451613
| 0.817256
| 0.353639
| 0
| 0
| 0
| 0
| 0.12633
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db933a7c4e56e24f7c3bf21ad73b25c489317eb1
| 1,642
|
py
|
Python
|
api/tests/opentrons/commands/test_protocol_commands.py
|
mrakitin/opentrons
|
d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9
|
[
"Apache-2.0"
] | null | null | null |
api/tests/opentrons/commands/test_protocol_commands.py
|
mrakitin/opentrons
|
d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9
|
[
"Apache-2.0"
] | null | null | null |
api/tests/opentrons/commands/test_protocol_commands.py
|
mrakitin/opentrons
|
d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from opentrons.commands import protocol_commands
@pytest.mark.parametrize(
argnames="seconds,"
"minutes,"
"expected_seconds,"
"expected_minutes,"
"expected_text",
argvalues=[
[10, 0, 10, 0, "Delaying for 0 minutes and 10.0 seconds"],
[10, 9, 10, 9, "Delaying for 9 minutes and 10.0 seconds"],
[100, 0, 40, 1, "Delaying for 1 minutes and 40.0 seconds"],
[105, 5.25, 0, 7, "Delaying for 7 minutes and 0.0 seconds"],
[0.5, 0, 0.5, 0, "Delaying for 0 minutes and 0.5 seconds"],
[105.5, 5.25, 0.5, 7, "Delaying for 7 minutes and 0.5 seconds"],
[0.998, 0, 0.998, 0, "Delaying for 0 minutes and 0.998 seconds"],
[0.9998, 0, 0.9998, 0, "Delaying for 0 minutes and 1.0 seconds"],
[1.0001, 0, 1.0001, 0, "Delaying for 0 minutes and 1.0 seconds"],
]
)
def test_delay(seconds,
minutes,
expected_seconds,
expected_minutes,
expected_text
):
command = protocol_commands.delay(seconds, minutes)
name = command['name']
payload = command['payload']
assert name == 'command.DELAY'
assert payload['seconds'] == expected_seconds
assert payload['minutes'] == expected_minutes
assert payload['text'] == expected_text
def test_delay_with_message():
"""It should allow a message to be appended to the delay text."""
command = protocol_commands.delay(seconds=1, minutes=1, msg="Waiting...")
assert command["payload"]["text"] == (
"Delaying for 1 minutes and 1.0 seconds. Waiting..."
)
| 35.695652
| 77
| 0.596224
| 223
| 1,642
| 4.318386
| 0.219731
| 0.114226
| 0.062305
| 0.067497
| 0.485981
| 0.383178
| 0.282451
| 0.182762
| 0.066459
| 0
| 0
| 0.095477
| 0.272838
| 1,642
| 45
| 78
| 36.488889
| 0.711055
| 0.035932
| 0
| 0
| 0
| 0
| 0.331642
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db9557d7a7cbb9a18b934e17eeb9d696dbc28b20
| 1,467
|
py
|
Python
|
tests/test_histogram_source.py
|
ess-dmsc/just-bin-it
|
8fcd03337a8a88087f25c510c589d482bdd9e4ad
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_histogram_source.py
|
ess-dmsc/just-bin-it
|
8fcd03337a8a88087f25c510c589d482bdd9e4ad
|
[
"BSD-2-Clause"
] | 23
|
2018-12-04T11:50:37.000Z
|
2022-03-17T11:30:39.000Z
|
tests/test_histogram_source.py
|
ess-dmsc/just-bin-it
|
8fcd03337a8a88087f25c510c589d482bdd9e4ad
|
[
"BSD-2-Clause"
] | 2
|
2019-07-24T11:13:41.000Z
|
2020-08-04T18:33:22.000Z
|
from unittest.mock import patch
import pytest
from just_bin_it.endpoints.sources import HistogramSource
from tests.doubles.consumer import StubConsumer
TEST_MESSAGE = b"this is a byte message"
INVALID_FB = b"this is an invalid fb message"
class TestHistogramSource:
@pytest.fixture(autouse=True)
def prepare(self):
pass
def test_if_no_consumer_supplied_then_raises(self):
with pytest.raises(Exception):
HistogramSource(None)
def test_if_no_new_messages_then_no_data(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([])
hs = HistogramSource(mock_consumer)
data = hs.get_new_data()
assert len(data) == 0
@patch("just_bin_it.endpoints.sources.deserialise_hs00", return_value=TEST_MESSAGE)
def test_if_five_new_messages_on_one_topic_then_data_has_five_items(
self, mock_method
):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([TEST_MESSAGE] * 5)
hs = HistogramSource(mock_consumer)
data = hs.get_new_data()
_, _, message = data[0]
assert len(data) == 5
assert message == TEST_MESSAGE
def test_deserialising_invalid_fb_does_not_throw(self):
mock_consumer = StubConsumer(["broker1"], ["topic1"])
mock_consumer.add_messages([INVALID_FB])
hs = HistogramSource(mock_consumer)
hs.get_new_data()
| 30.5625
| 87
| 0.69666
| 184
| 1,467
| 5.206522
| 0.380435
| 0.112735
| 0.028184
| 0.097077
| 0.34238
| 0.290188
| 0.290188
| 0.290188
| 0.290188
| 0.133612
| 0
| 0.010363
| 0.210634
| 1,467
| 47
| 88
| 31.212766
| 0.816926
| 0
| 0
| 0.228571
| 0
| 0
| 0.092706
| 0.031357
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.142857
| false
| 0.028571
| 0.114286
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db9a10e90482e634cd4e39a1baf5cb649420edce
| 10,817
|
py
|
Python
|
bbp/comps/irikura_gen_srf.py
|
ZhangHCFJEA/bbp
|
33bd999cf8d719c49f9a904872c62f02eb5850d1
|
[
"BSD-3-Clause"
] | 28
|
2017-10-31T09:16:30.000Z
|
2022-02-28T23:44:29.000Z
|
bbp/comps/irikura_gen_srf.py
|
ZhangHCFJEA/bbp
|
33bd999cf8d719c49f9a904872c62f02eb5850d1
|
[
"BSD-3-Clause"
] | 37
|
2017-05-23T15:15:35.000Z
|
2022-02-05T09:13:18.000Z
|
bbp/comps/irikura_gen_srf.py
|
ZhangHCFJEA/bbp
|
33bd999cf8d719c49f9a904872c62f02eb5850d1
|
[
"BSD-3-Clause"
] | 26
|
2017-09-21T17:43:33.000Z
|
2021-11-29T06:34:30.000Z
|
#!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import math
import shutil
# Import Broadband modules
import plot_srf
import bband_utils
from irikura_gen_srf_cfg import IrikuraGenSrfCfg
from install_cfg import InstallCfg
class IrikuraGenSrf(object):
"""
Implements Arben's gen_srf.csh script in Python
"""
def __init__(self, i_r_velmodel, i_r_srcfile,
o_r_srffile, i_vmodel_name, sim_id=0,
**kwargs):
self.sim_id = sim_id
self.r_velmodel = i_r_velmodel
self.r_srcfile = i_r_srcfile
self.r_srffile = o_r_srffile
self.vmodel_name = i_vmodel_name
self.r_srcfiles = []
# Get all src files that were passed to us
if kwargs is not None and len(kwargs) > 0:
for idx in range(len(kwargs)):
self.r_srcfiles.append(kwargs['src%d' % (idx)])
else:
# Not a multisegment run, just use the single src file
self.r_srcfiles.append(i_r_srcfile)
def run(self):
"""
This function prepares the parameters for Irikura's gen_srf then calls it
"""
print("IrikuraGenSrf".center(80, '-'))
# Load configuration, set sim_id
install = InstallCfg.getInstance()
sim_id = self.sim_id
# Build directory paths
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id))
a_param_outdir = os.path.join(a_outdir, "param_files")
# Make sure the output and tmp directories exist
bband_utils.mkdirs([a_tmpdir, a_indir, a_outdir,
a_logdir, a_param_outdir])
# Now, file paths
self.log = os.path.join(a_logdir, "%d.gen_srf.log" % (sim_id))
a_srcfiles = [os.path.join(a_indir,
srcfile) for srcfile in self.r_srcfiles]
# Read src file
cfg = IrikuraGenSrfCfg(a_srcfiles)
# Define location of input velocity model and output srf file
if cfg.num_srcfiles > 1:
a_srffile = os.path.join(a_tmpdir, self.r_srffile)
a_final_srffile = os.path.join(a_indir, self.r_srffile)
else:
a_srffile = os.path.join(a_indir, self.r_srffile)
a_velmod = os.path.join(install.A_IN_DATA_DIR, str(sim_id),
self.r_velmodel)
# Run in tmpdir subdir to isolate temp fortran files
# Save cwd, change back to it at the end
old_cwd = os.getcwd()
os.chdir(a_tmpdir)
# Read parameters from the src(s) file(s)
# The following parameters should be common to all SRC files
# So we just read from the first one
simulation_seed = int(cfg.CFGDICT[0]['seed'])
dip = cfg.CFGDICT[0]['dip']
rake = cfg.CFGDICT[0]['rake']
dlen = cfg.CFGDICT[0]['dlen']
dwid = cfg.CFGDICT[0]['dwid']
lon_top_center = cfg.CFGDICT[0]['lon_top_center']
lat_top_center = cfg.CFGDICT[0]['lat_top_center']
depth_to_top = cfg.CFGDICT[0]['depth_to_top']
if cfg.num_srcfiles > 1:
fault_len = cfg.CFGDICT[0]['max_fault_length']
else:
fault_len = cfg.CFGDICT[0]['fault_length']
fault_width = cfg.CFGDICT[0]['fault_width']
# Average strike of all SRC files
strike = 0.0
for segment in range(cfg.num_srcfiles):
strike = strike + cfg.CFGDICT[segment]['strike']
strike = math.ceil(strike / cfg.num_srcfiles)
# Hypocenter (down_dip is common to all src files)
hypo_down_dip = cfg.CFGDICT[0]['hypo_down_dip']
if cfg.num_srcfiles > 1:
hypo_along_stk = 0.0
for segment in range(cfg.num_srcfiles):
current_fault_len = cfg.CFGDICT[segment]['fault_length']
current_hypo_along_stk = cfg.CFGDICT[segment]['hypo_along_stk']
if abs(current_hypo_along_stk) <= current_fault_len:
# Hypocenter in this segment!
hypo_along_stk = hypo_along_stk + (current_fault_len / 2.0) + current_hypo_along_stk
break
else:
# Not here yet, just add the total length of this segment
hypo_along_stk = hypo_along_stk + current_fault_len
# Now convert hypo_along_stk so that 0.0 is the middle of the fault
hypo_along_stk = hypo_along_stk - (fault_len / 2.0)
else:
hypo_along_stk = cfg.CFGDICT[0]['hypo_along_stk']
#
# Run gen_srf code
#
progstring = ("%s >> %s 2>&1 << END\n" %
(os.path.join(install.A_IRIKURA_BIN_DIR, cfg.GENSRF),
self.log) +
"%s\n" % a_srffile +
"%f %f %f %f %f\n" %
(fault_len, fault_width,
strike, dip, rake) +
"%f %f %f\n" %
(lon_top_center, lat_top_center, depth_to_top) +
"%f %f\n" % (dlen, dwid) +
"%f %f %f %f\n" %
(hypo_along_stk, hypo_down_dip,
cfg.DENS, cfg.VS) +
"%f\n" % (cfg.DT) +
"%d\n" % (simulation_seed) +
"%s\n" % (a_velmod) +
"%f\n" % (cfg.VEL_RUP_FRAC) +
"END")
bband_utils.runprog(progstring)
if cfg.num_srcfiles > 1:
# Assign the slip from the planar fault to each segment's SRF file
a_segs_file = os.path.join(a_tmpdir, "segments.midpoint.txt")
# Write segments' file
seg_file = open(a_segs_file, 'w')
seg_file.write("segm lon lat depth fleng fwidth shypo zhypo strike dip rake\n")
seg_file.write("%d\n" % (cfg.num_srcfiles))
total_length = 0.0
for segment in range(cfg.num_srcfiles):
if abs(cfg.CFGDICT[segment]['hypo_along_stk']) <= cfg.CFGDICT[segment]['fault_length']:
hypo_along_stk = cfg.CFGDICT[segment]['hypo_along_stk']
hypo_down_dip = cfg.CFGDICT[segment]['hypo_down_dip']
else:
hypo_along_stk = 999.0
hypo_down_dip = 999.0
seg_file.write("seg%d %.6f %.6f %.1f %.1f %.1f %.1f %.1f %.1f %d %d %d\n" %
(segment + 1,
cfg.CFGDICT[segment]['lon_top_center'],
cfg.CFGDICT[segment]['lat_top_center'],
cfg.CFGDICT[segment]['depth_to_top'],
total_length,
(total_length + cfg.CFGDICT[segment]['fault_length']),
cfg.CFGDICT[segment]['fault_width'],
hypo_along_stk, hypo_down_dip,
cfg.CFGDICT[segment]['strike'],
cfg.CFGDICT[segment]['dip'],
cfg.CFGDICT[segment]['rake']))
total_length = total_length + cfg.CFGDICT[segment]['fault_length']
seg_file.close()
#
# Run gen_srf_segment code
#
for segment in range(cfg.num_srcfiles):
progstring = ("%s >> %s 2>&1 << END\n" %
(os.path.join(install.A_IRIKURA_BIN_DIR,
cfg.GENSRFSEGMENT), self.log) +
".\n" +
"%s\n" % (self.r_srffile) +
"./segments.midpoint.txt\n" +
"%d\n" % (segment + 1) +
"%f %f\n" % (dlen, dwid) +
"END")
# Run code
bband_utils.runprog(progstring)
#
# Now add the segments together
#
progstring = ("%s >> %s 2>&1 << END\n" %
(os.path.join(install.A_IRIKURA_BIN_DIR,
cfg.SUMSEG), self.log) +
".\n" +
"%s\n" % (self.r_srffile) +
"./segments.midpoint.txt\n" +
"%d\n" % (cfg.num_srcfiles) +
"%f %f\n" % (dlen, dwid) +
"END")
# Run code
bband_utils.runprog(progstring)
# Copy file to final location
progstring = "cp %s %s" % (os.path.join(a_tmpdir,
"all_seg.%s" %
(self.r_srffile)),
a_final_srffile)
bband_utils.runprog(progstring)
# Use copied file from now on
a_srffile = a_final_srffile
# Restore working directory
os.chdir(old_cwd)
#
# Move results to outputfile
#
progstring = "cp %s %s" % (a_srffile,
os.path.join(a_tmpdir, self.r_srffile))
bband_utils.runprog(progstring)
progstring = "cp %s %s" % (a_srffile,
os.path.join(a_outdir, self.r_srffile))
bband_utils.runprog(progstring)
shutil.copy2(os.path.join(a_tmpdir, "stress_drop.out"),
os.path.join(a_param_outdir,
"stress_drop.out"))
# Plot SRF
plot_srf.run(self.r_srffile, sim_id=self.sim_id)
print("IrikuraGenSrf Completed".center(80, '-'))
if __name__ == "__main__":
print("Testing Module: %s" % os.path.basename((sys.argv[0])))
ME = IrikuraGenSrf(sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], sim_id=int(sys.argv[5]))
ME.run()
| 41.764479
| 104
| 0.524637
| 1,326
| 10,817
| 4.058069
| 0.217195
| 0.053893
| 0.037168
| 0.024531
| 0.383944
| 0.270768
| 0.217989
| 0.195131
| 0.195131
| 0.117264
| 0
| 0.011504
| 0.373209
| 10,817
| 258
| 105
| 41.926357
| 0.782153
| 0.16622
| 0
| 0.230769
| 0
| 0.005917
| 0.094145
| 0.007948
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011834
| false
| 0
| 0.053254
| 0
| 0.071006
| 0.023669
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db9c6841dad833eb81be4efbbef24d978326ad58
| 11,120
|
py
|
Python
|
core/tests/test_models.py
|
EthanMarrs/digit2
|
207569a3b7a61282a2d0bd5f354a837ad81ef55d
|
[
"BSD-2-Clause"
] | null | null | null |
core/tests/test_models.py
|
EthanMarrs/digit2
|
207569a3b7a61282a2d0bd5f354a837ad81ef55d
|
[
"BSD-2-Clause"
] | null | null | null |
core/tests/test_models.py
|
EthanMarrs/digit2
|
207569a3b7a61282a2d0bd5f354a837ad81ef55d
|
[
"BSD-2-Clause"
] | null | null | null |
"""test_models.py: runs tests on the models for digit."""
import pytest
from core.models import (Grade,
Subject,
Question,
Comment,
Option,
Topic,
Block,
Syllabus,
StateException,
)
from django.test import TestCase
from django.contrib.auth.models import User
class TestQuestion(TestCase):
"""Test the Question Model."""
def setUp(self):
"""Create questions for testing."""
grade_test = Grade(name="Grade Example")
grade_test.save()
subject_test = Subject(name="addition",
grade=grade_test)
subject_test.save()
question1 = Question(question_content='what is 1 + 1?',
answer_content='This is an addition question',
subject=subject_test)
question1.save()
def test_question_default_state(self):
"""Confirm that default state is Incomplete."""
question1 = Question.objects.all()[0]
assert(question1.state == question1.INCOMPLETE)
def test_question_state_from_incomplete(self):
"""Check that question state.
Confirm that state can only go from 'incomplete' to
'ready for review'.
"""
question1 = Question.objects.all()[0]
with pytest.raises(StateException) as exception_info:
question1.change_to_needs_reworking()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 0")
assert(question1.state == question1.INCOMPLETE)
with pytest.raises(StateException) as exception_info:
question1.change_to_complete()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 0")
assert(question1.state == question1.INCOMPLETE)
with pytest.raises(StateException) as exception_info:
question1.change_to_flagged()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 0")
assert(question1.state == question1.INCOMPLETE)
question1.change_to_review_ready()
assert(question1.state == question1.REVIEW_READY)
def test_question_state_from_ready_for_review(self):
"""Check that question state.
Confirm that state can only go from 'ready to review' to
'complete' or 'needs reworking'.
"""
question1 = Question.objects.all()[0]
question1.state = question1.REVIEW_READY
with pytest.raises(StateException) as exception_info:
question1.change_to_review_ready()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 1")
with pytest.raises(StateException) as exception_info:
question1.change_to_flagged()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 1")
assert(question1.state == question1.REVIEW_READY)
question1.change_to_complete()
assert(question1.state == question1.COMPLETE)
question1.state = question1.REVIEW_READY
question1.change_to_needs_reworking()
assert(question1.state == question1.NEEDS_REWORKING)
def test_question_state_from_needs_reworking(self):
"""Check that question state.
Confirm that state can only go from 'needs reworking' to
'ready for review'.
"""
question1 = Question.objects.all()[0]
question1.state = question1.NEEDS_REWORKING
with pytest.raises(StateException) as exception_info:
question1.change_to_needs_reworking()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 2")
assert(question1.state == question1.NEEDS_REWORKING)
with pytest.raises(StateException) as exception_info:
question1.change_to_complete()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 2")
assert(question1.state == question1.NEEDS_REWORKING)
with pytest.raises(StateException) as exception_info:
question1.change_to_flagged()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 2")
assert(question1.state == question1.NEEDS_REWORKING)
question1.change_to_review_ready()
assert(question1.state == question1.REVIEW_READY)
def test_question_state_from_complete(self):
"""Check that question state.
Confirm that state can only go from 'complete' to
'flagged for review'.
"""
question1 = Question.objects.all()[0]
question1.state = question1.COMPLETE
with pytest.raises(StateException) as exception_info:
question1.change_to_review_ready()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 3")
assert(question1.state == question1.COMPLETE)
with pytest.raises(StateException) as exception_info:
question1.change_to_complete()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 3")
assert(question1.state == question1.COMPLETE)
with pytest.raises(StateException) as exception_info:
question1.change_to_needs_reworking()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 3")
assert(question1.state == question1.COMPLETE)
question1.change_to_flagged()
assert(question1.state == question1.FLAGGED)
def test_question_state_from_flagged_for_review(self):
"""Check that question state.
Confirm that state can only go from 'flagged for review' to
'complete'.
"""
question1 = Question.objects.all()[0]
question1.state = question1.FLAGGED
with pytest.raises(StateException) as exception_info:
question1.change_to_review_ready()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 4")
assert(question1.state == question1.FLAGGED)
with pytest.raises(StateException) as exception_info:
question1.change_to_needs_reworking()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 4")
assert(question1.state == question1.FLAGGED)
with pytest.raises(StateException) as exception_info:
question1.change_to_flagged()
assert(exception_info.value.__str__() ==
"Incorrect state change. Current state is 4")
assert(question1.state == question1.FLAGGED)
question1.change_to_complete()
assert(question1.state == question1.COMPLETE)
def test_question_option_save(self):
"""Test that question cannot have option with correct answer."""
question1 = Question.objects.all()[0]
option = Option.objects.first()
option.correct = True
option.save()
assert(len(question1.option_set.all()) == 3)
assert(len(Option.objects.all()) == 3)
def test_get_comments(self):
"""
Test that the get_comments() function returns all comments
relating to a question.
"""
user = User.objects.create(username="testuser")
question1 = Question.objects.all()[0]
Comment.objects.create(text="Test comment!", question=question1, user=user)
Comment.objects.create(text="Another comment!", question=question1, user=user)
assert(len(question1.get_comments()) == 2)
assert(question1.get_comments()[0].text == "Test comment!")
assert(question1.get_comments()[0].created_at < question1.get_comments()[1].created_at)
def test_get_options(self):
"""
Test that the get_options() function returns all options
relating to a question.
"""
question1 = Question.objects.all()[0]
assert(question1.get_number_of_options() == 3)
def test_get_state(self):
question1 = Question.objects.all()[0]
assert(question1.state == question1.INCOMPLETE)
assert(question1.get_state() == "Incomplete")
class TestTopic(TestCase):
"""Test the Topic Model."""
def setUp(self):
"""Create Topic for testing."""
grade_test = Grade.objects.create(name="Grade Example")
syllabus_test = Syllabus.objects.create(grade=grade_test)
Topic.objects.create(name="Financial Mathematics",
description="Topic that involves sinking funds "
"and loan calculations",
syllabus=syllabus_test, week_start=1,
duration=3)
def test_topic_creates_blocks(self):
"""
Confirm that blocks are created automatically and associated with the
topic.
"""
blocks = Block.objects.all()
assert(len(blocks) == 3)
assert(blocks[0].topic.name == "Financial Mathematics")
def test_topic_creates_questions(self):
"""
Confirm that questions are created automatically and associated with the
correct block and topic.
"""
questions = Question.objects.all()
assert(len(questions) == 3 * 15)
assert(questions[0].block.topic.name == "Financial Mathematics")
def test_topic_number_of_questions(self):
"""
Confirm that the correct number of questions is returned by the helper
function.
"""
questions = Question.objects.all()
topics = Topic.objects.all()
assert(len(questions) == topics[0].get_number_of_questions())
def test_topic_number_of_blocks(self):
"""
Confirm that the correct number of blocks is returned by the helper
function.
"""
blocks = Block.objects.all()
topics = Topic.objects.all()
assert(len(blocks) == topics[0].get_number_of_blocks())
def test_topic_save_does_not_duplicate_questions(self):
already_created_topic = Topic.objects.get(name="Financial Mathematics")
count = 0
for block in Block.objects.filter(topic=already_created_topic):
for question in Question.objects.filter(block=block):
count += 1
assert(count == 45)
new_description = "This is a new description"
already_created_topic.description = new_description
already_created_topic.save()
edited_topic = Topic.objects.get(name="Financial Mathematics")
count = 0
for block in Block.objects.filter(topic=edited_topic):
for question in Question.objects.filter(block=block):
count += 1
assert(count == 45)
| 38.082192
| 95
| 0.629496
| 1,199
| 11,120
| 5.633028
| 0.114262
| 0.053894
| 0.08854
| 0.090169
| 0.723867
| 0.629701
| 0.610453
| 0.56944
| 0.533314
| 0.507403
| 0
| 0.017897
| 0.276439
| 11,120
| 291
| 96
| 38.213058
| 0.821526
| 0.114748
| 0
| 0.596774
| 0
| 0
| 0.095967
| 0
| 0
| 0
| 0
| 0
| 0.268817
| 1
| 0.091398
| false
| 0
| 0.021505
| 0
| 0.123656
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db9da718184383db0fb17735d540dd6d59f6b655
| 5,830
|
py
|
Python
|
base/views.py
|
omololevy/my_portfolio
|
29f8892c3a6e40a9c05c85110301987005d2c5c1
|
[
"MIT"
] | 2
|
2021-12-25T23:11:03.000Z
|
2021-12-26T07:09:35.000Z
|
base/views.py
|
omololevy/portfolio
|
29f8892c3a6e40a9c05c85110301987005d2c5c1
|
[
"MIT"
] | 6
|
2022-01-15T15:38:36.000Z
|
2022-02-22T17:17:59.000Z
|
base/views.py
|
omololevy/my_portfolio
|
29f8892c3a6e40a9c05c85110301987005d2c5c1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.mail import EmailMessage
from django.conf import settings
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.forms import UserCreationForm
from .decorators import *
from .forms import PostForm, CustomUserCreationForm, ProfileForm, UserForm
from .filters import PostFilter
from .models import *
# Create your views here.
def home(request):
posts = Post.objects.filter(active=True, featured=True)[0:3]
context = {'posts':posts}
return render(request, 'base/index.html', context)
def posts(request):
posts = Post.objects.filter(active=True)
myFilter = PostFilter(request.GET, queryset=posts)
posts = myFilter.qs
page = request.GET.get('page')
paginator = Paginator(posts, 5)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {'posts':posts, 'myFilter':myFilter}
return render(request, 'base/posts.html', context)
def post(request, slug):
post = Post.objects.get(slug=slug)
if request.method == 'POST':
PostComment.objects.create(
author=request.user.profile,
post=post,
body=request.POST['comment']
)
messages.success(request, "Your comment has been posted successfully!")
return redirect('post', slug=post.slug)
context = {'post':post}
return render(request, 'base/post.html', context)
def profile(request):
return render(request, 'base/profile.html')
#CRUD VIEWS
@admin_only
@login_required(login_url="home")
def createPost(request):
form = PostForm()
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('posts')
context = {'form':form}
return render(request, 'base/post_form.html', context)
@admin_only
@login_required(login_url="home")
def updatePost(request, slug):
post = Post.objects.get(slug=slug)
form = PostForm(instance=post)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
form.save()
return redirect('posts')
context = {'form':form}
return render(request, 'base/post_form.html', context)
@admin_only
@login_required(login_url="home")
def deletePost(request, slug):
post = Post.objects.get(slug=slug)
if request.method == 'POST':
post.delete()
return redirect('posts')
context = {'item':post}
return render(request, 'base/delete.html', context)
def sendEmail(request):
if request.method == 'POST':
template = render_to_string('base/email_template.html', {
'name':request.POST['name'],
'email':request.POST['email'],
'message':request.POST['message'],
})
email = EmailMessage(
request.POST['subject'],
template,
settings.EMAIL_HOST_USER,
['cotechlevy@gmail.com']
)
email.fail_silently=False
email.send()
return render(request, 'base/email_sent.html')
def loginPage(request):
if request.user.is_authenticated:
return redirect('home')
if request.method == 'POST':
email = request.POST.get('email')
password =request.POST.get('password')
#Little Hack to work around re-building the usermodel
try:
user = User.objects.get(email=email)
user = authenticate(request, username=user.username, password=password)
except:
messages.error(request, 'User with this email does not exists')
return redirect('login')
if user is not None:
login(request, user)
return redirect('home')
else:
messages.error(request, 'Email OR password is incorrect')
context = {}
return render(request, 'base/login.html', context)
def registerPage(request):
form = CustomUserCreationForm()
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.save()
messages.success(request, 'Account successfuly created!')
user = authenticate(request, username=user.username, password=request.POST['password1'])
if user is not None:
login(request, user)
next_url = request.GET.get('next')
if next_url == '' or next_url == None:
next_url = 'home'
return redirect(next_url)
else:
messages.error(request, 'An error has occured with registration')
context = {'form':form}
return render(request, 'base/register.html', context)
def logoutUser(request):
logout(request)
return redirect('home')
@admin_only
@login_required(login_url="home")
def userAccount(request):
profile = request.user.profile
context = {'profile':profile}
return render(request, 'base/account.html', context)
@login_required(login_url="home")
def updateProfile(request):
user = request.user
profile = user.profile
form = ProfileForm(instance=profile)
if request.method == 'POST':
user_form = UserForm(request.POST, instance=user)
if user_form.is_valid():
user_form.save()
form = ProfileForm(request.POST, request.FILES, instance=profile)
if form.is_valid():
form.save()
return redirect('account')
context = {'form':form}
return render(request, 'base/profile_form.html', context)
def myEducation(request):
return render(request, 'base/education.html')
def myExperience(request):
return render(request, 'base/experience.html')
def myAchievements(request):
return render(request, 'base/achievements.html')
def myAbout(request):
return render(request, 'base/about.html')
def myContact(request):
return render(request, 'base/contact.html')
def mySkills(request):
return render(request, 'base/skills.html')
| 25.911111
| 91
| 0.732247
| 752
| 5,830
| 5.621011
| 0.212766
| 0.0511
| 0.080908
| 0.097942
| 0.326473
| 0.242725
| 0.225219
| 0.164656
| 0.115448
| 0.115448
| 0
| 0.000992
| 0.135849
| 5,830
| 224
| 92
| 26.026786
| 0.838031
| 0.01458
| 0
| 0.278107
| 0
| 0
| 0.132033
| 0.011845
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112426
| false
| 0.023669
| 0.088757
| 0.04142
| 0.366864
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
db9dc14c3ce1122987ebe56a59b8a07194d400d2
| 30,282
|
py
|
Python
|
radioLib/pastebin/pastebin.py
|
hephaestus9/Radio
|
c1560c25def211ab6354fb0aa5cc935e2851c8f0
|
[
"MIT"
] | 1
|
2021-05-17T08:31:07.000Z
|
2021-05-17T08:31:07.000Z
|
lib/pastebin/pastebin.py
|
hephaestus9/Ironworks
|
37be48e37f63530dd7bf82618948ef82522699a0
|
[
"MIT"
] | null | null | null |
lib/pastebin/pastebin.py
|
hephaestus9/Ironworks
|
37be48e37f63530dd7bf82618948ef82522699a0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#############################################################################
# Pastebin.py - Python 3.2 Pastebin API.
# Copyright (C) 2012 Ian Havelock
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# This software is a derivative work of:
# http://winappdbg.sourceforge.net/blog/pastebin.py
#############################################################################
__ALL__ = ['delete_paste', 'user_details', 'trending', 'pastes_by_user', 'generate_user_key',
'legacy_paste', 'paste', 'Pastebin', 'PastebinError']
import sys
import urllib
class PastebinError(RuntimeError):
"""Pastebin API error.
The error message returned by the web application is stored as the Python exception message."""
class PastebinAPI(object):
"""Pastebin API interaction object.
Public functions:
paste -- Pastes a user-specified file or string using the new API-key POST method.
legacy_paste -- Pastes a user-specified file or string using the old anonymous POST method.
generate_user_key -- Generates a session-key that is required for other functions.
pastes_by_user -- Returns all public pastes submitted by the specified login credentials.
trending -- Returns the top trending paste.
user_details -- Returns details about the user for the specified API user key.
delete_paste -- Adds two numbers together and returns the result."""
# String to determine bad API requests
_bad_request = 'Bad API request'
# Base domain name
_base_domain = 'pastebin.com'
# Valid Pastebin URLs begin with this string (kinda bvious)
_prefix_url = 'http://%s/' % _base_domain
# Valid Pastebin URLs with a custom subdomain begin with this string
_subdomain_url = 'http://%%s.%s/' % _base_domain
# URL to the LEGACY POST API
_legacy_api_url= 'http://%s/api_public.php' % _base_domain
# URL to the POST API
_api_url= 'http://%s/api/api_post.php' % _base_domain
# URL to the login POST API
_api_login_url= 'http://%s/api/api_login.php' % _base_domain
# Valid paste_expire_date values (Never, 10 minutes, 1 Hour, 1 Day, 1 Month)
paste_expire_date = ('N', '10M', '1H', '1D', '1M')
# Valid paste_expire_date values (0 = public, 1 = unlisted, 2 = private)
paste_private = ('public', 'unlisted', 'private')
# Valid parse_format values
paste_format = (
'4cs', # 4CS
'6502acme', # 6502 ACME Cross Assembler
'6502kickass', # 6502 Kick Assembler
'6502tasm', # 6502 TASM/64TASS
'abap', # ABAP
'actionscript', # ActionScript
'actionscript3', # ActionScript 3
'ada', # Ada
'algol68', # ALGOL 68
'apache', # Apache Log
'applescript', # AppleScript
'apt_sources', # APT Sources
'asm', # ASM (NASM)
'asp', # ASP
'autoconf', # autoconf
'autohotkey', # Autohotkey
'autoit', # AutoIt
'avisynth', # Avisynth
'awk', # Awk
'bascomavr', # BASCOM AVR
'bash', # Bash
'basic4gl', # Basic4GL
'bibtex', # BibTeX
'blitzbasic', # Blitz Basic
'bnf', # BNF
'boo', # BOO
'bf', # BrainFuck
'c', # C
'c_mac', # C for Macs
'cil', # C Intermediate Language
'csharp', # C#
'cpp', # C++
'cpp-qt', # C++ (with QT extensions)
'c_loadrunner', # C: Loadrunner
'caddcl', # CAD DCL
'cadlisp', # CAD Lisp
'cfdg', # CFDG
'chaiscript', # ChaiScript
'clojure', # Clojure
'klonec', # Clone C
'klonecpp', # Clone C++
'cmake', # CMake
'cobol', # COBOL
'coffeescript', # CoffeeScript
'cfm', # ColdFusion
'css', # CSS
'cuesheet', # Cuesheet
'd', # D
'dcs', # DCS
'delphi', # Delphi
'oxygene', # Delphi Prism (Oxygene)
'diff', # Diff
'div', # DIV
'dos', # DOS
'dot', # DOT
'e', # E
'ecmascript', # ECMAScript
'eiffel', # Eiffel
'email', # Email
'epc', # EPC
'erlang', # Erlang
'fsharp', # F#
'falcon', # Falcon
'fo', # FO Language
'f1', # Formula One
'fortran', # Fortran
'freebasic', # FreeBasic
'freeswitch', # FreeSWITCH
'gambas', # GAMBAS
'gml', # Game Maker
'gdb', # GDB
'genero', # Genero
'genie', # Genie
'gettext', # GetText
'go', # Go
'groovy', # Groovy
'gwbasic', # GwBasic
'haskell', # Haskell
'hicest', # HicEst
'hq9plus', # HQ9 Plus
'html4strict', # HTML
'html5', # HTML 5
'icon', # Icon
'idl', # IDL
'ini', # INI file
'inno', # Inno Script
'intercal', # INTERCAL
'io', # IO
'j', # J
'java', # Java
'java5', # Java 5
'javascript', # JavaScript
'jquery', # jQuery
'kixtart', # KiXtart
'latex', # Latex
'lb', # Liberty BASIC
'lsl2', # Linden Scripting
'lisp', # Lisp
'llvm', # LLVM
'locobasic', # Loco Basic
'logtalk', # Logtalk
'lolcode', # LOL Code
'lotusformulas', # Lotus Formulas
'lotusscript', # Lotus Script
'lscript', # LScript
'lua', # Lua
'm68k', # M68000 Assembler
'magiksf', # MagikSF
'make', # Make
'mapbasic', # MapBasic
'matlab', # MatLab
'mirc', # mIRC
'mmix', # MIX Assembler
'modula2', # Modula 2
'modula3', # Modula 3
'68000devpac', # Motorola 68000 HiSoft Dev
'mpasm', # MPASM
'mxml', # MXML
'mysql', # MySQL
'newlisp', # newLISP
'text', # None
'nsis', # NullSoft Installer
'oberon2', # Oberon 2
'objeck', # Objeck Programming Langua
'objc', # Objective C
'ocaml-brief', # OCalm Brief
'ocaml', # OCaml
'pf', # OpenBSD PACKET FILTER
'glsl', # OpenGL Shading
'oobas', # Openoffice BASIC
'oracle11', # Oracle 11
'oracle8', # Oracle 8
'oz', # Oz
'pascal', # Pascal
'pawn', # PAWN
'pcre', # PCRE
'per', # Per
'perl', # Perl
'perl6', # Perl 6
'php', # PHP
'php-brief', # PHP Brief
'pic16', # Pic 16
'pike', # Pike
'pixelbender', # Pixel Bender
'plsql', # PL/SQL
'postgresql', # PostgreSQL
'povray', # POV-Ray
'powershell', # Power Shell
'powerbuilder', # PowerBuilder
'proftpd', # ProFTPd
'progress', # Progress
'prolog', # Prolog
'properties', # Properties
'providex', # ProvideX
'purebasic', # PureBasic
'pycon', # PyCon
'python', # Python
'q', # q/kdb+
'qbasic', # QBasic
'rsplus', # R
'rails', # Rails
'rebol', # REBOL
'reg', # REG
'robots', # Robots
'rpmspec', # RPM Spec
'ruby', # Ruby
'gnuplot', # Ruby Gnuplot
'sas', # SAS
'scala', # Scala
'scheme', # Scheme
'scilab', # Scilab
'sdlbasic', # SdlBasic
'smalltalk', # Smalltalk
'smarty', # Smarty
'sql', # SQL
'systemverilog', # SystemVerilog
'tsql', # T-SQL
'tcl', # TCL
'teraterm', # Tera Term
'thinbasic', # thinBasic
'typoscript', # TypoScript
'unicon', # Unicon
'uscript', # UnrealScript
'vala', # Vala
'vbnet', # VB.NET
'verilog', # VeriLog
'vhdl', # VHDL
'vim', # VIM
'visualprolog', # Visual Pro Log
'vb', # VisualBasic
'visualfoxpro', # VisualFoxPro
'whitespace', # WhiteSpace
'whois', # WHOIS
'winbatch', # Winbatch
'xbasic', # XBasic
'xml', # XML
'xorg_conf', # Xorg Config
'xpp', # XPP
'yaml', # YAML
'z80', # Z80 Assembler
'zxbasic', # ZXBasic
)
def __init__(self):
pass
def delete_paste(self, api_dev_key, api_user_key, api_paste_key):
"""Delete the paste specified by the api_paste_key.
Usage Example::
>>> from pastebin import PastebinAPI
>>> x = PastebinAPI()
>>> paste_to_delete = x.delete_paste('453a994e0e2f1efae07f8759e59e075b',
... 'c57a18e6c0ae228cd4bd16fe36da381a',
... 'WkgcTFtv')
>>> print paste_to_delete
Paste Removed
@type api_dev_key: string
@param api_dev_key: The API Developer Key of a registered U{http://pastebin.com} account.
@type api_user_key: string
@param api_user_key: The API User Key of a U{http://pastebin.com} registered user.
@type api_paste_key: string
@param api_paste_key: The Paste Key of the paste to be deleted (string after final / in U{http://pastebin.com} URL).
@rtype: string
@returns: A successful deletion returns 'Paste Removed'.
"""
# Valid api developer key
argv = {'api_dev_key' : str(api_dev_key) }
# Requires pre-registered account
if api_user_key is not None:
argv['api_user_key'] = str(api_user_key)
# Key of the paste to be deleted.
if api_paste_key is not None:
argv['api_paste_key'] = str(api_paste_key)
# Valid API option - 'user_details' in this instance
argv['api_option'] = str('delete')
# lets try to read the URL that we've just built.
request = urllib.urlopen(self._api_url, urllib.urlencode(argv))
response = self._submit_paste(request)
return response
def user_details(self, api_dev_key, api_user_key):
"""Return user details of the user specified by the api_user_key.
Usage Example::
>>> from pastebin import PastebinAPI
>>> x = PastebinAPI()
>>> details = x.user_details('453a994e0e2f1efae07f8759e59e075b',
... 'c57a18e6c0ae228cd4bd16fe36da381a')
>>> print details
<user>
<user_name>MonkeyPuzzle</user_name>
<user_format_short>python</user_format_short>
<user_expiration>N</user_expiration>
<user_avatar_url>http://pastebin.com/i/guest.gif</user_avatar_url>
<user_private>0</user_private>
<user_website></user_website>
<user_email>user@email.com</user_email>
<user_location></user_location>
<user_account_type>0</user_account_type>
</user>
@type api_dev_key: string
@param api_dev_key: The API Developer Key of a registered U{http://pastebin.com} account.
@type api_user_key: string
@param api_user_key: The API User Key of a U{http://pastebin.com} registered user.
@rtype: string
@returns: Returns an XML string containing user information.
"""
# Valid api developer key
argv = {'api_dev_key' : str(api_dev_key) }
# Requires pre-registered account to generate an api_user_key (see generate_user_key)
if api_user_key is not None:
argv['api_user_key'] = str(api_user_key)
# Valid API option - 'user_details' in this instance
argv['api_option'] = str('userdetails')
# lets try to read the URL that we've just built.
request_string = urllib.urlopen(self._api_url, urllib.urlencode(argv))
response = request_string.read()
# do some basic error checking here so we can gracefully handle any errors we are likely to encounter
if response.startswith(self._bad_request):
raise PastebinError(response)
elif not response.startswith('<user>'):
raise PastebinError(response)
return response
def trending(self, api_dev_key):
"""Returns the top trending paste details.
Usage Example::
>>> from pastebin import PastebinAPI
>>> x = PastebinAPI()
>>> details = x.trending('453a994e0e2f1efae07f8759e59e075b')
>>> print details
<paste>
<paste_key>jjMRFDH6</paste_key>
<paste_date>1333230838</paste_date>
<paste_title></paste_title>
<paste_size>6416</paste_size>
<paste_expire_date>0</paste_expire_date>
<paste_private>0</paste_private>
<paste_format_long>None</paste_format_long>
<paste_format_short>text</paste_format_short>
<paste_url>http://pastebin.com/jjMRFDH6</paste_url>
<paste_hits>6384</paste_hits>
</paste>
Note: Returns multiple trending pastes, not just 1.
@type api_dev_key: string
@param api_dev_key: The API Developer Key of a registered U{http://pastebin.com} account.
@rtype: string
@return: Returns the string (XML formatted) containing the top trending pastes.
"""
# Valid api developer key
argv = {'api_dev_key' : str(api_dev_key) }
# Valid API option - 'trends' is returns trending pastes
argv['api_option'] = str('trends')
# lets try to read the URL that we've just built.
request_string = urllib.urlopen(self._api_url, urllib.urlencode(argv))
response = request_string.read()
# do some basic error checking here so we can gracefully handle any errors we are likely to encounter
if response.startswith(self._bad_request):
raise PastebinError(response)
elif not response.startswith('<paste>'):
raise PastebinError(response)
return response
def pastes_by_user(self, api_dev_key, api_user_key, results_limit = None):
"""Returns all pastes for the provided api_user_key.
Usage Example::
>>> from pastebin import PastebinAPI
>>> x = PastebinAPI()
>>> details = x.user_details('453a994e0e2f1efae07f8759e59e075b',
... 'c57a18e6c0ae228cd4bd16fe36da381a',
... 100)
>>> print details
<paste>
<paste_key>DLiSspYT</paste_key>
<paste_date>1332714730</paste_date>
<paste_title>Pastebin.py - Python 3.2 Pastebin.com API</paste_title>
<paste_size>25300</paste_size>
<paste_expire_date>0</paste_expire_date>
<paste_private>0</paste_private>
<paste_format_long>Python</paste_format_long>
<paste_format_short>python</paste_format_short>
<paste_url>http://pastebin.com/DLiSspYT</paste_url>
<paste_hits>70</paste_hits>
</paste>
Note: Returns multiple pastes, not just 1.
@type api_dev_key: string
@param api_dev_key: The API Developer Key of a registered U{http://pastebin.com} account.
@type api_user_key: string
@param api_user_key: The API User Key of a U{http://pastebin.com} registered user.
@type results_limit: number
@param results_limit: The number of pastes to return between 1 - 1000.
@rtype: string
@returns: Returns an XML string containing number of specified pastes by user.
"""
# Valid api developer key
argv = {'api_dev_key' : str(api_dev_key) }
# Requires pre-registered account
if api_user_key is not None:
argv['api_user_key'] = str(api_user_key)
# Number of results to return - between 1 & 1000, default = 50
if results_limit is None:
argv['api_results_limit'] = 50
if results_limit is not None:
if results_limit < 1:
argv['api_results_limit'] = 50
elif results_limit > 1000:
argv['api_results_limit'] = 1000
else:
argv['api_results_limit'] = int(results_limit)
# Valid API option - 'paste' is default for new paste
argv['api_option'] = str('list')
# lets try to read the URL that we've just built.
request_string = urllib.urlopen(self._api_url, urllib.urlencode(argv))
response = request_string.read()
# do some basic error checking here so we can gracefully handle any errors we are likely to encounter
if response.startswith(self._bad_request):
raise PastebinError(response)
elif not response.startswith('<paste>'):
raise PastebinError(response)
return response
def generate_user_key(self, api_dev_key, username, password):
"""Generate a user session key - needed for other functions.
Usage Example::
>>> from pastebin import PastebinAPI
>>> x = PastebinAPI()
>>> my_key = x.generate_user_key('453a994e0e2f1efae07f8759e59e075b',
... 'MonkeyPuzzle',
... '12345678')
>>> print my_key
c57a18e6c0ae228cd4bd16fe36da381a
@type api_dev_key: string
@param api_dev_key: The API Developer Key of a registered U{http://pastebin.com} account.
@type username: string
@param username: The username of a registered U{http://pastebin.com} account.
@type password: string
@param password: The password of a registered U{http://pastebin.com} account.
@rtype: string
@returns: Session key (api_user_key) to allow authenticated interaction to the API.
"""
# Valid api developer key
argv = {'api_dev_key' : str(api_dev_key) }
# Requires pre-registered pastebin account
if username is not None:
argv['api_user_name'] = str(username)
# Requires pre-registered pastebin account
if password is not None:
argv['api_user_password'] = str(password)
# lets try to read the URL that we've just built.
request_string = urllib.urlopen(self._api_login_url, urllib.urlencode(argv))
response = request_string.read()
# do some basic error checking here so we can gracefully handle any errors we are likely to encounter
if response.startswith(self._bad_request):
raise PastebinError(response)
return response
def paste(self, api_dev_key, api_paste_code,
api_user_key = None, paste_name = None, paste_format = None,
paste_private = None, paste_expire_date = None):
"""Submit a code snippet to Pastebin using the new API.
Usage Example::
>>> from pastebin import PastebinAPI
>>> x = PastebinAPI()
>>> url = x.paste('453a994e0e2f1efae07f8759e59e075b' ,
... 'Snippet of code to paste goes here',
... paste_name = 'title of paste',
... api_user_key = 'c57a18e6c0ae228cd4bd16fe36da381a',
... paste_format = 'python',
... paste_private = 'unlisted',
... paste_expire_date = '10M')
>>> print url
http://pastebin.com/tawPUgqY
@type api_dev_key: string
@param api_dev_key: The API Developer Key of a registered U{http://pastebin.com} account.
@type api_paste_code: string
@param api_paste_code: The file or string to paste to body of the U{http://pastebin.com} paste.
@type api_user_key: string
@param api_user_key: The API User Key of a U{http://pastebin.com} registered user.
If none specified, paste is made as a guest.
@type paste_name: string
@param paste_name: (Optional) Title of the paste.
Default is to paste anonymously.
@type paste_format: string
@param paste_format: (Optional) Programming language of the code being
pasted. This enables syntax highlighting when reading the code in
U{http://pastebin.com}. Default is no syntax highlighting (text is
just text and not source code).
@type paste_private: string
@param paste_private: (Optional) C{'public'} if the paste is public (visible
by everyone), C{'unlisted'} if it's public but not searchable.
C{'private'} if the paste is private and not searchable or indexed.
The Pastebin FAQ (U{http://pastebin.com/faq}) claims
private pastes are not indexed by search engines (aka Google).
@type paste_expire_date: str
@param paste_expire_date: (Optional) Expiration date for the paste.
Once past this date the paste is deleted automatically. Valid
values are found in the L{PastebinAPI.paste_expire_date} class member.
If not provided, the paste never expires.
@rtype: string
@return: Returns the URL to the newly created paste.
"""
# Valid api developer key
argv = {'api_dev_key' : str(api_dev_key) }
# Code snippet to submit
if api_paste_code is not None:
argv['api_paste_code'] = str(api_paste_code)
# Valid API option - 'paste' is default for new paste
argv['api_option'] = str('paste')
# API User Key
if api_user_key is not None:
argv['api_user_key'] = str(api_user_key)
elif api_user_key is None:
argv['api_user_key'] = str('')
# Name of the poster
if paste_name is not None:
argv['api_paste_name'] = str(paste_name)
# Syntax highlighting
if paste_format is not None:
paste_format = str(paste_format).strip().lower()
argv['api_paste_format'] = paste_format
# Is the snippet private?
if paste_private is not None:
if paste_private == 'public':
argv['api_paste_private'] = int(0)
elif paste_private == 'unlisted':
argv['api_paste_private'] = int(1)
elif paste_private == 'private':
argv['api_paste_private'] = int(2)
# Expiration for the snippet
if paste_expire_date is not None:
paste_expire_date = str(paste_expire_date).strip().upper()
argv['api_paste_expire_date'] = paste_expire_date
# lets try to read the URL that we've just built.
request_string = urllib.urlopen(self._api_url, urllib.urlencode(argv))
response = request_string.read()
# do some basic error checking here so we can gracefully handle any errors we are likely to encounter
if response.startswith(self._bad_request):
raise PastebinError(response)
elif not response.startswith(self._prefix_url):
raise PastebinError(response)
return response
def legacy_paste(self, paste_code,
paste_name = None, paste_private = None,
paste_expire_date = None, paste_format = None):
"""Unofficial python interface to the Pastebin legacy API.
Unlike the official API, this one doesn't require an API key, so it's
virtually anonymous.
Usage Example::
>>> from pastebin import PastebinAPI
>>> x = PastebinAPI()
>>> url = x.legacy_paste('Snippet of code to paste goes here',
... paste_name = 'title of paste',
... paste_private = 'unlisted',
... paste_expire_date = '10M',
... paste_format = 'python')
>>> print url
http://pastebin.com/tawPUgqY
@type paste_code: string
@param paste_code: The file or string to paste to body of the U{http://pastebin.com} paste.
@type paste_name: string
@param paste_name: (Optional) Title of the paste.
Default is to paste with no title.
@type paste_private: string
@param paste_private: (Optional) C{'public'} if the paste is public (visible
by everyone), C{'unlisted'} if it's public but not searchable.
C{'private'} if the paste is private and not searchable or indexed.
The Pastebin FAQ (U{http://pastebin.com/faq}) claims
private pastes are not indexed by search engines (aka Google).
@type paste_expire_date: string
@param paste_expire_date: (Optional) Expiration date for the paste.
Once past this date the paste is deleted automatically. Valid
values are found in the L{PastebinAPI.paste_expire_date} class member.
If not provided, the paste never expires.
@type paste_format: string
@param paste_format: (Optional) Programming language of the code being
pasted. This enables syntax highlighting when reading the code in
U{http://pastebin.com}. Default is no syntax highlighting (text is
just text and not source code).
@rtype: string
@return: Returns the URL to the newly created paste.
"""
# Code snippet to submit
argv = { 'paste_code' : str(paste_code) }
# Name of the poster
if paste_name is not None:
argv['paste_name'] = str(paste_name)
# Is the snippet private?
if paste_private is not None:
argv['paste_private'] = int(bool(int(paste_private)))
# Expiration for the snippet
if paste_expire_date is not None:
paste_expire_date = str(paste_expire_date).strip().upper()
argv['paste_expire_date'] = paste_expire_date
# Syntax highlighting
if paste_format is not None:
paste_format = str(paste_format).strip().lower()
argv['paste_format'] = paste_format
# lets try to read the URL that we've just built.
request_string = urllib.urlopen(self._legacy_api_url, urllib.urlencode(argv))
response = request_string.read()
# do some basic error checking here so we can gracefully handle any errors we are likely to encounter
if response.startswith(self._bad_request):
raise PastebinError(response)
elif not response.startswith(self._prefix_url):
raise PastebinError(response)
return response
######################################################
delete_paste = PastebinAPI.delete_paste
user_details = PastebinAPI.user_details
trending = PastebinAPI.trending
pastes_by_user = PastebinAPI.pastes_by_user
generate_user_key = PastebinAPI.generate_user_key
legacy_paste = PastebinAPI.legacy_paste
paste = PastebinAPI.paste
######################################################
if __name__ == "__main__":
main()
| 38.186633
| 125
| 0.533848
| 3,194
| 30,282
| 4.903256
| 0.192235
| 0.019667
| 0.023626
| 0.019411
| 0.552072
| 0.517464
| 0.475959
| 0.459421
| 0.440202
| 0.430879
| 0
| 0.020838
| 0.367677
| 30,282
| 792
| 126
| 38.234848
| 0.797055
| 0.478502
| 0
| 0.190202
| 0
| 0
| 0.150962
| 0.001584
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023055
| false
| 0.011527
| 0.005764
| 0
| 0.083573
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dba0ab7feb9b0f1f06f733ef048e8a1aa5355e67
| 2,544
|
py
|
Python
|
app/requests.py
|
seron-ux/News-app
|
d22b256b26fb9fa2bb77658952139b9ddebb8f8c
|
[
"MIT"
] | 1
|
2021-04-16T12:03:37.000Z
|
2021-04-16T12:03:37.000Z
|
app/requests.py
|
seron-ux/News-app
|
d22b256b26fb9fa2bb77658952139b9ddebb8f8c
|
[
"MIT"
] | null | null | null |
app/requests.py
|
seron-ux/News-app
|
d22b256b26fb9fa2bb77658952139b9ddebb8f8c
|
[
"MIT"
] | null | null | null |
import urllib.request,json
from .models import News
import requests
News = News
# Getting api key
api_key = None
# Getting the news base url
base_url = None
base_url2 = None
def configure_request(app):
global api_key,base_url,base_url2
api_key = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_API_BASE_URL']
base_url2 = app.config['ARTICLE_API_BASE_URL']
def get_news(category):
'''
Function that gets the json responce to our url request
'''
get_news_url = base_url.format(category,api_key)
print(get_news_url)
get_news_response = requests.get(get_news_url).json()
print(get_news_response)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_results(news_results_list)
return news_results
def search_news(news_name):
search_news_url = 'https://api.thenewsdb.org/3/search/news?api_key={}&query={}'.format(api_key,news_name)
search_news_response = requests.get(search_news_url).json()
search_news_results = None
if search_news_response['results']:
search_news_list = search_news_response['results']
search_news_results = process_results(search_news_list)
return search_news_results
def process_results(news_list):
'''
Function that processes the news result and transform them to a list of Objects
Args:
news_list: A list of dictionaries that contain news details
Returns :
news_results: A list of news objects
'''
news_results = []
for news_item in news_list:
title = news_item.get('title')
image = news_item.get('urlToImage')
description = news_item.get('description')
date = news_item.get('publishedAt')
article = news_item.get('url')
if image:
news_object = News(title,image,description,date,article)
news_results.append(news_object)
return news_results
def get_article(source):
'''
Function that gets the json responce to our url request
'''
get_news_url = base_url.format(source,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_results(news_results_list)
return news_results
| 26.5
| 109
| 0.688286
| 347
| 2,544
| 4.729107
| 0.221902
| 0.113955
| 0.063985
| 0.056063
| 0.291286
| 0.291286
| 0.248629
| 0.248629
| 0.248629
| 0.248629
| 0
| 0.002033
| 0.226415
| 2,544
| 96
| 110
| 26.5
| 0.831809
| 0.139937
| 0
| 0.211538
| 0
| 0
| 0.091509
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.057692
| 0
| 0.230769
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dba12a8374326bf93ca2bf2928409a83d003c3d7
| 861
|
py
|
Python
|
leetcode/151_reverse _words_in_a_string.py
|
caoxudong/code_practice
|
cb960cf69d67ae57b35f0691d35e15c11989e6d2
|
[
"MIT"
] | 1
|
2020-06-19T11:23:46.000Z
|
2020-06-19T11:23:46.000Z
|
leetcode/151_reverse _words_in_a_string.py
|
caoxudong/code_practice
|
cb960cf69d67ae57b35f0691d35e15c11989e6d2
|
[
"MIT"
] | null | null | null |
leetcode/151_reverse _words_in_a_string.py
|
caoxudong/code_practice
|
cb960cf69d67ae57b35f0691d35e15c11989e6d2
|
[
"MIT"
] | null | null | null |
"""
Given an input string, reverse the string word by word.
For example,
Given s = "the sky is blue",
return "blue is sky the".
For C programmers: Try to solve it in-place in O(1) space.
Clarification:
* What constitutes a word?
A sequence of non-space characters constitutes a word.
* Could the input string contain leading or trailing spaces?
Yes. However, your reversed string should not contain leading or trailing spaces.
* How about multiple spaces between two words?
Reduce them to a single space in the reversed string.
https://leetcode.com/problems/reverse-words-in-a-string/
"""
class Solution:
# @param s, a string
# @return a string
def reverseWords(self, s):
elements = s.split(" ")
elements = [x for x in elements if x != ""]
elements = elements[::-1]
return " ".join(elements)
| 28.7
| 86
| 0.680604
| 127
| 861
| 4.614173
| 0.551181
| 0.035836
| 0.054608
| 0.081911
| 0.102389
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003008
| 0.227642
| 861
| 30
| 87
| 28.7
| 0.878195
| 0.735192
| 0
| 0
| 0
| 0
| 0.010582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dba13fb4439b8ad0fa549819c5076a87665d49e6
| 3,540
|
py
|
Python
|
Day10/loops.py
|
azeemchaudhrry/30DaysofPython
|
8aa80c81967d87e4bc70254a41517d0303ca0599
|
[
"MIT"
] | null | null | null |
Day10/loops.py
|
azeemchaudhrry/30DaysofPython
|
8aa80c81967d87e4bc70254a41517d0303ca0599
|
[
"MIT"
] | null | null | null |
Day10/loops.py
|
azeemchaudhrry/30DaysofPython
|
8aa80c81967d87e4bc70254a41517d0303ca0599
|
[
"MIT"
] | null | null | null |
# Day 10 Loops
from countries import *
# While Loop
# count = 0
# while count < 5:
# if count == 3:
# break
# print(count)
# count = count + 1
# numbers = [0,2,3,4,5,6,7,8,9,10]
# for number in numbers:
# print(number)
# language = 'Python'
# for letter in language:
# print(letter)
# tpl = ('python','updates','wow')
# for number in tpl:
# print(number)
# person = {
# 'first_name':'Asabeneh',
# 'last_name':'Yetayeh',
# 'age':250,
# 'country':'Finland',
# 'is_marred':True,
# 'skills':['JavaScript', 'React', 'Node', 'MongoDB', 'Python'],
# 'address':{
# 'street':'Space street',
# 'zipcode':'02210'
# }
# }
# print('------------------------------------')
# for key in person:
# print(key)
# print('------------------------------------')
# for key,value in person.items():
# print(key, value)
# print('--------------------------------------')
# it_companies = {'Facebook', 'Google', 'Microsoft', 'Apple', 'IBM', 'Oracle', 'Amazon'}
# for company in it_companies:
# print(company)
# print('--------------------------------------')
# numbers = (0,1,2,3,4,5,6,7)
# for number in numbers:
# print(number)
# if(number == 3):
# break
# print('--------------------------------------')
# for number in numbers:
# print(number)
# if(number == 3):
# continue
# print('--------------------------------------')
# numbers = (0,1,2,3,4,5)
# for number in numbers:
# print(number)
# if number == 3:
# continue
# print('Next number should be ', number + 1) if number != 5 else print("loop's end") # for short hand conditions need both if and else statements
# print('outside the loop')
# print('--------------------------------------')
# lst = list(range(11))
# print(lst)
# st = set(range(1,11))
# print(st)
# lst = list(range(0,11,2))
# print(lst)
# st = set(range(0,11,2))
# print(st)
# Exercises: Day 10
# Iterate 0 to 10 using for loop, do the same using while loop.
# numbers = [0,1,2,3,4,5,6,7,8,9,10]
# for number in numbers:
# print(number)
# count = 0
# while count < 10:
# print(count)
# count += 1
# Iterate 10 to 0 using for loop, do the same using while loop.
# for number in range(10,-1,-1):
# print(number)
# count = 10
# while count > -1:
# print(count)
# count -= 1
# Write a loop that makes seven calls to print(), so we get on the output the following triangle:
for index in range(0,8):
print(index * '#')
limit = 9
for i in range(0,limit):
for j in range(0,limit):
print('# ', end='')
print('')
for i in range(0, 11):
print(f'{i} x {i} = {i * i}')
frameworks = ['Python', 'Numpy','Pandas','Django', 'Flask']
for framework in frameworks:
print(framework)
for i in range(0,101):
if i % 2 == 0:
print(i)
for i in range(0,101):
if i % 2 != 0:
print(i)
sum = 0
for i in range(0,101):
sum += i
print('The sum of all numbers is : ', sum)
even_sum = odd_sum = 0
for i in range(0,101):
if i % 2 == 0:
even_sum += i
elif i % 2 != 0:
odd_sum += i
print(f'The sum of all evens is {even_sum}. And the sum of all odds is {odd_sum}.')
for country in countries:
if 'land' in country:
print(country)
fruits = ['banana', 'orange', 'mango', 'lemon']
total_elements = len(fruits) - 1
for i in range(0, int(len(fruits) / 2)):
temp_element = fruits[i]
fruits[i] = fruits[total_elements - i]
fruits[total_elements - i] = temp_element
print(fruits)
| 22.547771
| 150
| 0.530508
| 500
| 3,540
| 3.724
| 0.284
| 0.035446
| 0.038668
| 0.041353
| 0.298604
| 0.228249
| 0.228249
| 0.228249
| 0.20623
| 0.135338
| 0
| 0.046375
| 0.232486
| 3,540
| 157
| 151
| 22.547771
| 0.63894
| 0.633898
| 0
| 0.2
| 0
| 0
| 0.14616
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025
| 0
| 0.025
| 0.275
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dba3388df291e70cf8ca9ead3a8d7661985dbeac
| 10,412
|
py
|
Python
|
tessera-server/tessera/views_api.py
|
Dimas625/tessera
|
8e554f217220228fb8a0662fb5075cb839e9f1b1
|
[
"Apache-2.0"
] | 379
|
2015-01-02T19:12:10.000Z
|
2016-12-05T05:41:47.000Z
|
tessera-server/tessera/views_api.py
|
Dimas625/tessera
|
8e554f217220228fb8a0662fb5075cb839e9f1b1
|
[
"Apache-2.0"
] | 129
|
2015-01-07T04:21:05.000Z
|
2016-07-24T18:37:43.000Z
|
tessera-server/tessera/views_api.py
|
Dimas625/tessera
|
8e554f217220228fb8a0662fb5075cb839e9f1b1
|
[
"Apache-2.0"
] | 44
|
2015-01-05T13:48:40.000Z
|
2016-11-23T07:11:41.000Z
|
# -*- mode:python -*-
import flask
import json
import logging
from datetime import datetime
import inflection
from functools import wraps
from flask import request, url_for
from werkzeug.exceptions import HTTPException
from .client.api.model import *
from . import database
from . import helpers
from .application import db
mgr = database.DatabaseManager(db)
log = logging.getLogger(__name__)
api = flask.Blueprint('api', __name__)
# =============================================================================
# API Helpers
# =============================================================================
def route_api(application, *args, **kwargs):
def decorator(fn):
@application.route(*args, **kwargs)
@wraps(fn)
def wrapper(*args, **kwargs):
headers = None
status_code = 200
try:
value = fn(*args, **kwargs)
except HTTPException as e:
raise helpers.set_exception_response(e)
if isinstance(value, tuple):
if len(value) > 2:
headers = value[2]
status_code = value[1]
value = value[0]
return helpers.jsonify(value, status_code, headers)
return fn
return decorator
def _dashboard_sort_column():
"""Return a SQLAlchemy column descriptor to sort results by, based on
the 'sort' and 'order' request parameters.
"""
columns = {
'created' : database.DashboardRecord.creation_date,
'modified' : database.DashboardRecord.last_modified_date,
'category' : database.DashboardRecord.category,
'id' : database.DashboardRecord.id,
'title' : database.DashboardRecord.title
}
colname = helpers.get_param('sort', 'created')
order = helpers.get_param('order')
column = database.DashboardRecord.creation_date
if colname in columns:
column = columns[colname]
if order == 'desc' or order == u'desc':
return column.desc()
else:
return column.asc()
def _set_dashboard_hrefs(dash):
"""Add the various ReSTful hrefs to an outgoing dashboard
representation. dash should be the dictionary for of the dashboard,
not the model object.
"""
id = dash['id']
dash['href'] = url_for('api.dashboard_get', id=id)
dash['definition_href'] = url_for('api.dashboard_get_definition', id=id)
dash['view_href'] = url_for('ui.dashboard_with_slug',
id=id,
slug=inflection.parameterize(dash['title']))
if 'definition' in dash:
definition = dash['definition']
definition['href'] = url_for('api.dashboard_get_definition', id=id)
return dash
def _dashboards_response(dashboards):
"""Return a Flask response object for a list of dashboards in API
format. dashboards must be a list of dashboard model objects, which
will be converted to their JSON representation.
"""
if not isinstance(dashboards, list):
dashboards = [dashboards]
include_definition = helpers.get_param_boolean('definition', False)
return [ _set_dashboard_hrefs(d.to_json(include_definition=include_definition)) for d in dashboards]
def _set_tag_hrefs(tag):
"""Add ReSTful href attributes to a tag's dictionary
representation.
"""
id = tag['id']
tag['href'] = url_for('api.tag_get', id=id)
return tag
def _tags_response(tags):
"""Return a Flask response object for a list of tags in API
format. tags must be a list of tag model objects, which
will be converted to their JSON representation.
"""
if not isinstance(tags, list):
tags = [tags]
return [_set_tag_hrefs(t.to_json()) for t in tags]
# =============================================================================
# Dashboards
# =============================================================================
@route_api(api, '/dashboard/')
def dashboard_list():
"""Listing for all dashboards. Returns just the metadata, not the
definitions.
"""
imported_from = request.args.get('imported_from')
if imported_from:
query = database.DashboardRecord.query.filter_by(imported_from=imported_from) \
.order_by(_dashboard_sort_column())
else:
query = database.DashboardRecord.query.order_by(_dashboard_sort_column())
dashboards = [d for d in query.all()]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/tagged/<tag>')
def dashboard_list_tagged(tag):
"""Listing for a set of dashboards with a tag applied. Returns just
the metadata, not the definitions.
"""
tag = database.TagRecord.query.filter_by(name=tag).first()
if not tag:
return _dashboards_response([])
dashboards = [d for d in tag.dashboards.order_by(_dashboard_sort_column()) if tag]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/category/<category>')
def dashboard_list_dashboards_in_category(category):
"""Listing for a set of dashboards in a specified category. Returns
just the metadata, not the definitions.
"""
dashboards = [d for d in database.DashboardRecord.query
.filter_by(category=category)
.order_by(_dashboard_sort_column()) ]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/category/')
def dashboard_list_all_dashboard_categories():
result = db.session.query(
database.DashboardRecord.category,
db.func.count(database.DashboardRecord.category)
).group_by(database.DashboardRecord.category).all()
categories = []
for (name, count) in result:
categories.append({
'name' : name,
'count' : count,
})
return categories
@route_api(api, '/dashboard/<id>')
def dashboard_get(id):
"""Get the metadata for a single dashboard.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
rendering = helpers.get_param('rendering', False)
include_definition = helpers.get_param_boolean('definition', False)
dash = _set_dashboard_hrefs(dashboard.to_json(rendering or include_definition))
if rendering:
dash['preferences'] = helpers.get_preferences()
return dash
@route_api(api, '/dashboard/<id>/for-rendering')
def dashboard_get_for_rendering(id):
"""Get a dashboard with its definition, and current settings necessary
for rendering.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
dash = _set_dashboard_hrefs(dashboard.to_json(True))
return {
'dashboard' : dash,
'preferences' : helpers.get_preferences()
}
@route_api(api, '/dashboard/', methods=['POST'])
def dashboard_create():
"""Create a new dashboard with an empty definition.
"""
dashboard = database.DashboardRecord.from_json(request.json)
if not dashboard.title:
return {
'error_message': "Missing required field 'title'"
}, 400
if 'definition' in request.json:
dashboard.definition = database.DefinitionRecord(dumps(request.json['definition']))
else:
dashboard.definition = database.DefinitionRecord(dumps(DashboardDefinition()))
mgr.store_dashboard(dashboard)
href = url_for('api.dashboard_get', id=dashboard.id)
return {
'dashboard_href' : href,
'view_href' : url_for('ui.dashboard_with_slug',
id=dashboard.id,
slug=inflection.parameterize(dashboard.title))
}, 201, { 'Location' : href }
@route_api(api, '/dashboard/<id>', methods=['PUT'])
def dashboard_update(id):
"""Update the metadata for an existing dashboard.
"""
body = request.json
dashboard = database.DashboardRecord.query.get_or_404(id)
dashboard.merge_from_json(body)
mgr.store_dashboard(dashboard)
# TODO - return similar to create, above
return {}
@route_api(api, '/dashboard/<id>', methods=['DELETE'])
def dashboard_delete(id):
"""Delete a dashboard. Use with caution.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
db.session.delete(dashboard)
db.session.commit()
return {}, 204
@route_api(api, '/dashboard/<id>/definition')
def dashboard_get_definition(id):
"""Fetch the definition for a dashboard. This returns the
representation to use when modifiying a dashboard.
"""
dashboard = database.DashboardRecord.query.filter_by(id=id)[0]
definition = database.DashboardRecord.query.get_or_404(id).definition.to_json()
definition['href'] = url_for('api.dashboard_get_definition', id=id)
definition['dashboard_href'] = url_for('api.dashboard_get', id=id)
return definition
@route_api(api, '/dashboard/<id>/definition', methods=['PUT'])
def dashboard_update_definition(id):
"""Update the definition of the dashboard. This should use the
representation returned by /api/dashboard/<id>/definition, and
should NOT have any embedded variables expanded, nor should it
have complete graphite URLs in the queries.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
# Validate the payload
definition = DashboardDefinition.from_json(json.loads(request.data.decode('utf-8')))
if dashboard.definition:
dashboard.definition.definition = dumps(definition)
else:
dashboard.definition = database.DashboardRecordDef(request.data)
mgr.store_dashboard(dashboard)
return {}
# =============================================================================
# Tags
# =============================================================================
@route_api(api, '/tag/')
def tag_list():
"""Listing for all tags.
"""
tags = db.session.query(database.TagRecord).all()
return _tags_response(tags)
@route_api(api, '/tag/<id>')
def tag_get(id):
tag = database.TagRecord.query.get_or_404(id)
return _tags_response(tag)
# =============================================================================
# Miscellany
# =============================================================================
@route_api(api, '/preferences/')
def preferences_get():
return helpers.get_preferences()
@route_api(api, '/preferences/', methods=['PUT'])
def preferences_put():
helpers.set_preferences(request.json)
return helpers.get_preferences()
| 34.25
| 104
| 0.634364
| 1,171
| 10,412
| 5.473954
| 0.186166
| 0.071763
| 0.025741
| 0.034321
| 0.350078
| 0.237442
| 0.200312
| 0.164899
| 0.086895
| 0.045398
| 0
| 0.00469
| 0.201402
| 10,412
| 303
| 105
| 34.363036
| 0.766206
| 0.226373
| 0
| 0.145833
| 0
| 0
| 0.100114
| 0.03316
| 0
| 0
| 0
| 0.0033
| 0
| 1
| 0.119792
| false
| 0
| 0.078125
| 0.005208
| 0.333333
| 0.005208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dba3bf31e30dbc6e19d1f005b15ec72aaafc1178
| 4,175
|
py
|
Python
|
modules/aws_service.py
|
Darkcybe/attack_range
|
b135251cc40e527e78e6e826759e421fb3834577
|
[
"Apache-2.0"
] | 1
|
2020-08-26T18:14:17.000Z
|
2020-08-26T18:14:17.000Z
|
modules/aws_service.py
|
Darkcybe/attack_range
|
b135251cc40e527e78e6e826759e421fb3834577
|
[
"Apache-2.0"
] | null | null | null |
modules/aws_service.py
|
Darkcybe/attack_range
|
b135251cc40e527e78e6e826759e421fb3834577
|
[
"Apache-2.0"
] | null | null | null |
import sys
import re
import boto3
from botocore.exceptions import ClientError
import uuid
import time
import yaml
import os
def get_instance_by_name(ec2_name, config):
instances = get_all_instances(config)
for instance in instances:
str = instance['Tags'][0]['Value']
if str == ec2_name:
return instance
def get_single_instance_public_ip(ec2_name, config):
instance = get_instance_by_name(ec2_name, config)
return instance['NetworkInterfaces'][0]['Association']['PublicIp']
def get_all_instances(config):
key_name = config['key_name']
region = config['region']
client = boto3.client('ec2', region_name=region)
response = client.describe_instances(
Filters=[
{
'Name': "key-name",
'Values': [key_name]
}
]
)
instances = []
for reservation in response['Reservations']:
for instance in reservation['Instances']:
if instance['State']['Name']!='terminated':
if len(instance['Tags']) > 0:
str = instance['Tags'][0]['Value']
if str.startswith(config['range_name'] + '-attack-range'):
instances.append(instance)
return instances
def get_splunk_instance_ip(config):
all_instances = get_all_instances(config)
for instance in all_instances:
instance_tag = config['range_name'] + '-attack-range-splunk-server'
if instance['Tags'][0]['Value'] == instance_tag:
return instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0]['Association']['PublicIp']
def check_ec2_instance_state(ec2_name, state, config):
instance = get_instance_by_name(ec2_name, config)
if not instance:
log.error(ec2_name + ' not found as AWS EC2 instance.')
sys.exit(1)
return (instance['State']['Name'] == state)
def change_ec2_state(instances, new_state, log, config):
region = config['region']
client = boto3.client('ec2', region_name=region)
if len(instances) == 0:
log.error(ec2_name + ' not found as AWS EC2 instance.')
sys.exit(1)
if new_state == 'stopped':
for instance in instances:
if instance['State']['Name'] == 'running':
response = client.stop_instances(
InstanceIds=[instance['InstanceId']]
)
log.info('Successfully stopped instance with ID ' +
instance['InstanceId'] + ' .')
elif new_state == 'running':
for instance in instances:
if instance['State']['Name'] == 'stopped':
response = client.start_instances(
InstanceIds=[instance['InstanceId']]
)
log.info('Successfully started instance with ID ' + instance['InstanceId'] + ' .')
# def upload_file_s3_bucket(file_name, results, test_file, isArchive):
# region = config['region']
# s3_client = boto3.client('s3', region_name=region)
# if isArchive:
# response = s3_client.upload_file(file_name, 'attack-range-attack-data', str(test_file['simulation_technique'] + '/attack_data.tar.gz'))
# else:
# response = s3_client.upload_file(file_name, 'attack-range-attack-data', str(test_file['simulation_technique'] + '/attack_data.json'))
#
# with open('tmp/test_results.yml', 'w') as f:
# yaml.dump(results, f)
# response2 = s3_client.upload_file('tmp/test_results.yml', 'attack-range-automated-testing', str(test_file['simulation_technique'] + '/test_results.yml'))
# os.remove('tmp/test_results.yml')
def upload_file_s3_bucket(s3_bucket, file_path, S3_file_path, config):
region = config['region']
s3_client = boto3.client('s3', region_name=region)
response = s3_client.upload_file(file_path, s3_bucket, S3_file_path)
def upload_test_results_s3_bucket(s3_bucket, test_file, test_result_file_path, config):
region = config['region']
s3_client = boto3.client('s3', region_name=region)
response = s3_client.upload_file(test_result_file_path, s3_bucket, str(test_file['simulation_technique'] + '/test_results.yml'))
| 36.946903
| 159
| 0.640958
| 498
| 4,175
| 5.144578
| 0.208835
| 0.021858
| 0.025371
| 0.035129
| 0.546058
| 0.460968
| 0.459407
| 0.382904
| 0.282982
| 0.248634
| 0
| 0.015858
| 0.229701
| 4,175
| 112
| 160
| 37.276786
| 0.780784
| 0.177725
| 0
| 0.265823
| 0
| 0
| 0.162281
| 0.007895
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101266
| false
| 0
| 0.101266
| 0
| 0.265823
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dba4148e040528b537c6483d7f1281dc550a6268
| 5,685
|
py
|
Python
|
pystacknet/metrics.py
|
KevinMichaelSchindler/pystacknet
|
bb723511787be6a0828d2ec5ef141fa76b80ef84
|
[
"MIT"
] | null | null | null |
pystacknet/metrics.py
|
KevinMichaelSchindler/pystacknet
|
bb723511787be6a0828d2ec5ef141fa76b80ef84
|
[
"MIT"
] | null | null | null |
pystacknet/metrics.py
|
KevinMichaelSchindler/pystacknet
|
bb723511787be6a0828d2ec5ef141fa76b80ef84
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 18:33:58 2018
@author: Marios Michailidis
metrics and method to check metrics used within StackNet
"""
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score , mean_squared_log_error #regression metrics
from sklearn.metrics import roc_auc_score, log_loss ,accuracy_score, f1_score ,matthews_corrcoef
import numpy as np
valid_regression_metrics=["rmse","mae","rmsle","r2","mape","smape"]
valid_classification_metrics=["auc","logloss","accuracy","f1","matthews"]
############ classification metrics ############
def auc(y_true, y_pred, sample_weight=None):
return roc_auc_score(y_true, y_pred, sample_weight=sample_weight)
def logloss(y_true, y_pred, sample_weight=None, labels = None):
return log_loss(y_true, y_pred, sample_weight=sample_weight, labels = labels)
def accuracy(y_true, y_pred, sample_weight=None):
return accuracy_score(y_true, y_pred, sample_weight=sample_weight)
def f1(y_true, y_pred, sample_weight=None):
return f1_score(y_true, y_pred, sample_weight=sample_weight)
def matthews(y_true, y_pred, sample_weight=None):
return matthews_corrcoef(y_true, y_pred, sample_weight=sample_weight)
############ regression metrics ############
def rmse(y_true, y_pred, sample_weight=None):
return np.sqrt(mean_squared_error(y_true, y_pred, sample_weight=sample_weight))
def mae(y_true, y_pred, sample_weight=None):
return mean_absolute_error(y_true, y_pred, sample_weight=sample_weight)
def rmsle (y_true, y_pred, sample_weight=None):
return np.sqrt(mean_squared_log_error(y_true, y_pred, sample_weight=sample_weight))
def r2(y_true, y_pred, sample_weight=None):
return r2_score(y_true, y_pred, sample_weight=sample_weight)
def mape(y_true, y_pred, sample_weight=None):
y_true = y_true.ravel()
y_pred = y_pred.ravel()
if sample_weight is not None:
sample_weight = sample_weight.ravel()
eps = 1E-15
ape = np.abs((y_true - y_pred) / (y_true + eps)) * 100
ape[y_true == 0] = 0
return np.average(ape, weights=sample_weight)
def smape(y_true, y_pred, sample_weight=None):
y_true = y_true.ravel()
y_pred = y_pred.ravel()
if sample_weight is not None:
sample_weight = sample_weight.ravel()
eps = 1E-15
sape = (np.abs(y_true - y_pred) / (0.5 * (np.abs(y_true) + np.abs(y_pred)) + eps)) * 100
sape[(y_true == 0) & (y_pred == 0)] = 0
return np.average(sape, weights=sample_weight)
"""
metric: string or class that returns a metric given (y_true, y_pred, sample_weight=None)
Curently supported metrics are "rmse","mae","rmsle","r2","mape","smape"
"""
def check_regression_metric(metric):
if type(metric) is type(None):
raise Exception ("metric cannot be None")
if isinstance(metric, str) :
if metric not in valid_regression_metrics:
raise Exception ("The regression metric has to be one of %s " % (", ".join([str(k) for k in valid_regression_metrics])))
if metric=="rmse":
return rmse,metric
elif metric=="mae":
return mae,metric
elif metric=="rmsle":
return rmsle,metric
elif metric=="r2":
return r2,metric
elif metric=="mape":
return mape,metric
elif metric=="smape":
return smape,metric
else :
raise Exception ("The metric %s is not recognised " % (metric) )
else : #customer metrics is given
try:
y_true_temp=[[1],[2],[3]]
y_pred_temp=[[2],[1],[3]]
y_true_temp=np.array(y_true_temp)
y_pred_temp=np.array(y_pred_temp)
sample_weight_temp=[1,0.5,1]
metric(y_true_temp,y_pred_temp, sample_weight=sample_weight_temp )
return metric,"custom"
except:
raise Exception ("The custom metric has to implement metric(y_true, y_pred, sample_weight=None)" )
"""
metric: string or class that returns a metric given (y_true, y_pred, sample_weight=None)
Curently supported metrics are "rmse","mae","rmsle","r2","mape","smape"
"""
def check_classification_metric(metric):
if type(metric) is type(None):
raise Exception ("metric cannot be None")
if isinstance(metric, str) :
if metric not in valid_classification_metrics:
raise Exception ("The classification metric has to be one of %s " % (", ".join([str(k) for k in valid_classification_metrics])))
if metric=="auc":
return auc,metric
elif metric=="logloss":
return logloss,metric
elif metric=="accuracy":
return accuracy,metric
elif metric=="r2":
return r2,metric
elif metric=="f1":
return f1,metric
elif metric=="matthews":
return matthews,metric
else :
raise Exception ("The metric %s is not recognised " % (metric) )
else : #customer metrics is given
try:
y_true_temp=[[1],[0],[1]]
y_pred_temp=[[0.4],[1],[0.2]]
y_true_temp=np.array(y_true_temp)
y_pred_temp=np.array(y_pred_temp)
sample_weight_temp=[1,0.5,1]
metric(y_true_temp,y_pred_temp, sample_weight=sample_weight_temp )
return metric,"custom"
except:
raise Exception ("The custom metric has to implement metric(y_true, y_pred, sample_weight=None)" )
| 36.210191
| 140
| 0.628672
| 786
| 5,685
| 4.301527
| 0.150127
| 0.166815
| 0.049689
| 0.0769
| 0.664596
| 0.65454
| 0.638864
| 0.631174
| 0.554274
| 0.529429
| 0
| 0.016529
| 0.255057
| 5,685
| 157
| 141
| 36.210191
| 0.781818
| 0.045207
| 0
| 0.407767
| 0
| 0
| 0.092986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126214
| false
| 0
| 0.029126
| 0.087379
| 0.398058
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dba7508f72db5159de10c2533d780968df627768
| 5,629
|
py
|
Python
|
check_logstash_pipeline.py
|
stdevel/nagios-plugins
|
5ea0e186fa6fdd0e70681c7fed02c6d46d50bbb5
|
[
"IBM-pibs",
"Apache-1.1"
] | null | null | null |
check_logstash_pipeline.py
|
stdevel/nagios-plugins
|
5ea0e186fa6fdd0e70681c7fed02c6d46d50bbb5
|
[
"IBM-pibs",
"Apache-1.1"
] | null | null | null |
check_logstash_pipeline.py
|
stdevel/nagios-plugins
|
5ea0e186fa6fdd0e70681c7fed02c6d46d50bbb5
|
[
"IBM-pibs",
"Apache-1.1"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2017-11-24 21:10:35 +0100 (Fri, 24 Nov 2017)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check a Logstash pipeline is online via the Logstash Rest API
API is only available in Logstash 5.x onwards, will get connection refused on older versions
Optional thresholds apply to the number of pipeline workers
Ensure Logstash options:
--http.host should be set to 0.0.0.0 if querying remotely
--http.port should be set to the same port that you are querying via this plugin's --port switch
Tested on Logstash 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 6.0, 6.1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
#from harisekhon.utils import log
from harisekhon.utils import ERRORS, UnknownError, support_msg_api
from harisekhon.utils import validate_chars
from harisekhon import RestNagiosPlugin
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.6'
class CheckLogstashPipeline(RestNagiosPlugin):
def __init__(self):
# Python 2.x
super(CheckLogstashPipeline, self).__init__()
# Python 3.x
# super().__init__()
self.name = 'Logstash'
self.default_port = 9600
# could add pipeline name to end of this endpoint but error would be less good 404 Not Found
# Logstash 5.x /_node/pipeline <= use -5 switch for older Logstash
# Logstash 6.x /_node/pipelines
self.path = '/_node/pipelines'
self.auth = False
self.json = True
self.msg = 'Logstash piplines msg not defined yet'
self.pipeline = None
def add_options(self):
super(CheckLogstashPipeline, self).add_options()
self.add_opt('-i', '--pipeline', default='main', help='Pipeline to expect is configured (default: main)')
self.add_opt('-d', '--dead-letter-queue-enabled', action='store_true',
help='Check dead letter queue is enabled on pipeline (optional, only applies to Logstash 6+)')
self.add_opt('-5', '--logstash-5', action='store_true',
help='Logstash 5.x (has a slightly different API endpoint to 6.x)')
self.add_opt('-l', '--list', action='store_true', help='List pipelines and exit (only for Logstash 6+)')
self.add_thresholds()
def process_options(self):
super(CheckLogstashPipeline, self).process_options()
self.pipeline = self.get_opt('pipeline')
validate_chars(self.pipeline, 'pipeline', 'A-Za-z0-9_-')
# slightly more efficient to not return the potential list of other pipelines but the error is less informative
#self.path += '/{}'.format(self.pipeline)
if self.get_opt('logstash_5'):
if self.pipeline != 'main':
self.usage("--pipeline can only be 'main' for --logstash-5")
if self.get_opt('list'):
self.usage('can only --list pipelines for Logstash 6+')
if self.get_opt('dead_letter_queue_enabled'):
self.usage('--dead-letter-queue-enabled only available with Logstash 6+')
self.path = self.path.rstrip('s')
self.validate_thresholds(simple='lower', optional=True)
def parse_json(self, json_data):
if self.get_opt('logstash_5'):
pipeline = json_data['pipeline']
else:
pipelines = json_data['pipelines']
if self.get_opt('list'):
print('Logstash Pipelines:\n')
for pipeline in pipelines:
print(pipeline)
sys.exit(ERRORS['UNKNOWN'])
pipeline = None
if self.pipeline in pipelines:
pipeline = pipelines[self.pipeline]
self.msg = "Logstash pipeline '{}' ".format(self.pipeline)
if pipeline:
self.msg += 'exists'
if 'workers' not in pipeline:
raise UnknownError('workers field not found, Logstash may still be initializing' + \
'. If problem persists {}'.format(support_msg_api()))
workers = pipeline['workers']
self.msg += ' with {} workers'.format(workers)
self.check_thresholds(workers)
if not self.get_opt('logstash_5'):
dead_letter_queue_enabled = pipeline['dead_letter_queue_enabled']
self.msg += ', dead letter queue enabled: {}'.format(dead_letter_queue_enabled)
if self.get_opt('dead_letter_queue_enabled') and not dead_letter_queue_enabled:
self.warning()
self.msg += ' (expected True)'
batch_delay = pipeline['batch_delay']
batch_size = pipeline['batch_size']
self.msg += ', batch delay: {}, batch size: {}'.format(batch_delay, batch_size)
else:
self.critical()
self.msg += 'does not exist!'
if __name__ == '__main__':
CheckLogstashPipeline().main()
| 39.921986
| 119
| 0.637413
| 728
| 5,629
| 4.767857
| 0.325549
| 0.02881
| 0.043215
| 0.057044
| 0.086142
| 0.031691
| 0.019591
| 0.019591
| 0
| 0
| 0
| 0.019034
| 0.253331
| 5,629
| 140
| 120
| 40.207143
| 0.806805
| 0.239119
| 0
| 0.068966
| 0
| 0
| 0.240819
| 0.030367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045977
| false
| 0
| 0.126437
| 0
| 0.183908
| 0.045977
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|