id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
6597504 | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bounding Box List operations."""
import tensorflow as tf
from official.vision.beta.ops import sampling_ops
from official.vision.beta.projects.centernet.ops import box_list
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def scale(boxlist, y_scale, x_scale):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope('Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def area(boxlist):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope('Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def change_coordinate_frame(boxlist, window):
"""Change coordinate frame of the boxlist to be relative to window's frame.
Given a window of the form [ymin, xmin, ymax, xmax],
changes bounding box coordinates from boxlist to be relative to this window
(e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).
An example use case is data augmentation: where we are given groundtruth
boxes (boxlist) and would like to randomly crop the image to some
window (window). In this case we need to change the coordinate frame of
each groundtruth box to be relative to this new window.
Args:
boxlist: A BoxList object holding N boxes.
window: A rank 1 tensor [4].
Returns:
Returns a BoxList object with N boxes.
"""
with tf.name_scope('ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
boxlist_new = scale(box_list.BoxList(
boxlist.get() - [window[0], window[1], window[0], window[1]]),
1.0 / win_height, 1.0 / win_width)
boxlist_new = _copy_extra_fields(boxlist_new, boxlist)
return boxlist_new
def matmul_gather_on_zeroth_axis(params, indices):
"""Matrix multiplication based implementation of tf.gather on zeroth axis.
Args:
params: A float32 Tensor. The tensor from which to gather values.
Must be at least rank 1.
indices: A Tensor. Must be one of the following types: int32, int64.
Must be in range [0, params.shape[0])
Returns:
A Tensor. Has the same type as params. Values from params gathered
from indices given by indices, with shape indices.shape + params.shape[1:].
"""
with tf.name_scope('MatMulGather'):
params_shape = sampling_ops.combined_static_and_dynamic_shape(params)
indices_shape = sampling_ops.combined_static_and_dynamic_shape(indices)
params2d = tf.reshape(params, [params_shape[0], -1])
indicator_matrix = tf.one_hot(indices, params_shape[0])
gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
return tf.reshape(gathered_result_flattened,
tf.stack(indices_shape + params_shape[1:]))
def gather(boxlist, indices, fields=None, use_static_shapes=False):
"""Gather boxes from BoxList according to indices and return new BoxList.
By default, `gather` returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a rank-1 tensor of type int32 / int64
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int32
"""
with tf.name_scope('Gather'):
if len(indices.shape.as_list()) != 1:
raise ValueError('indices should have rank 1')
if indices.dtype != tf.int32 and indices.dtype != tf.int64:
raise ValueError('indices should be an int32 / int64 tensor')
gather_op = tf.gather
if use_static_shapes:
gather_op = matmul_gather_on_zeroth_axis
subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices))
if fields is None:
fields = boxlist.get_extra_fields()
fields += ['boxes']
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = gather_op(boxlist.get_field(field), indices)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def prune_completely_outside_window(boxlist, window):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope('PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def clip_to_window(boxlist, window, filter_nonoverlapping=True):
"""Clip bounding boxes to a window.
This op clips any input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip boxes.
filter_nonoverlapping: whether to filter out boxes that do not overlap at
all with the window.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
with tf.name_scope('ClipToWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min = window[0]
win_x_min = window[1]
win_y_max = window[2]
win_x_max = window[3]
y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)
y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)
x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)
x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)
clipped = box_list.BoxList(
tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
1))
clipped = _copy_extra_fields(clipped, boxlist)
if filter_nonoverlapping:
areas = area(clipped)
nonzero_area_indices = tf.cast(
tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)
clipped = gather(clipped, nonzero_area_indices)
return clipped
def height_width(boxlist):
"""Computes height and width of boxes in boxlist.
Args:
boxlist: BoxList holding N boxes
Returns:
Height: A tensor with shape [N] representing box heights.
Width: A tensor with shape [N] representing box widths.
"""
with tf.name_scope('HeightWidth'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1])
def prune_small_boxes(boxlist, min_side):
"""Prunes small boxes in the boxlist which have a side smaller than min_side.
Args:
boxlist: BoxList holding N boxes.
min_side: Minimum width AND height of box to survive pruning.
Returns:
A pruned boxlist.
"""
with tf.name_scope('PruneSmallBoxes'):
height, width = height_width(boxlist)
is_valid = tf.logical_and(tf.greater_equal(width, min_side),
tf.greater_equal(height, min_side))
return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
def assert_or_prune_invalid_boxes(boxes):
"""Makes sure boxes have valid sizes (ymax >= ymin, xmax >= xmin).
When the hardware supports assertions, the function raises an error when
boxes have an invalid size. If assertions are not supported (e.g. on TPU),
boxes with invalid sizes are filtered out.
Args:
boxes: float tensor of shape [num_boxes, 4]
Returns:
boxes: float tensor of shape [num_valid_boxes, 4] with invalid boxes
filtered out.
Raises:
tf.errors.InvalidArgumentError: When we detect boxes with invalid size.
This is not supported on TPUs.
"""
ymin, xmin, ymax, xmax = tf.split(
boxes, num_or_size_splits=4, axis=1)
height_check = tf.Assert(tf.reduce_all(ymax >= ymin), [ymin, ymax])
width_check = tf.Assert(tf.reduce_all(xmax >= xmin), [xmin, xmax])
with tf.control_dependencies([height_check, width_check]):
boxes_tensor = tf.concat([ymin, xmin, ymax, xmax], axis=1)
boxlist = box_list.BoxList(boxes_tensor)
boxlist = prune_small_boxes(boxlist, 0)
return boxlist.get()
def to_absolute_coordinates(boxlist,
height,
width,
check_range=True,
maximum_normalized_coordinate=1.1):
"""Converts normalized box coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum box coordinate
value is larger than maximum_normalized_coordinate (in which case coordinates
are already absolute).
Args:
boxlist: BoxList with coordinates in range [0, 1].
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
boxlist with absolute coordinates in terms of the image size.
"""
with tf.name_scope('ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input boxes is correct.
if check_range:
box_maximum = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(
tf.greater_equal(maximum_normalized_coordinate, box_maximum),
['maximum box coordinate value is larger '
'than %f: ' % maximum_normalized_coordinate, box_maximum])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, height, width)
| StarcoderdataPython |
374264 | <reponame>flty/python<gh_stars>0
#!/usr/bin/python3
# -*- coding: utf8 -*-
#-- CHEASHEET -----------------------------------------------------------------#
# HOWTO: http://sublimetext.info/docs/en/reference/syntaxdefs.html
# REGEX: http://manual.macromates.com/en/regular_expressions
# Syntax Definition
syntax = {
'name': 'Python 3',
'comment': '\n\t\tWritten by <NAME> (c)2013-2014\n\t\thttp://github.com/petervaro/python\n\t',
'scopeName': 'source.python3',
'fileTypes': ['py'],
'keyEquivalent': '^~P',
# hashbang
'firstLineMatch': r'^#!/.*\bpython[\d.-]*\b',
# Folding marks for the TextEditor
'foldingStartMarker':
r'^\s*(def|class)\s+([.\w>]+)\s*(\((.*)\))?\s*:|\{\s*$|\(\s*$|\[\s*$|^\s*"""(?=.)(?!.*""")',
'foldingStopMarker':
r'^\s*$|^\s*\}|^\s*\]|^\s*\)|^\s*"""\s*$',
# Patterns
'patterns':
[
#-- COMMENT -------------------------------------------------------------------#
{
'name' : 'comment.line.hashmark.python3',
'match': r'#.*$\n?'
},
#-- NUMBERS -------------------------------------------------------------------#
{
'name' : 'constant.numeric.integer.binary.python3',
'match': r'\b0b[01]+'
},
{
'name' : 'constant.numeric.integer.hexadecimal.python3',
'match': r'\b0x\h+'
},
{
'name' : 'constant.numeric.integer.octal.python3',
'match': r'\b0o[0-7]+'
},
{
# .001 .1e6 .1E6 .1e+6 .1E+6 .1e-6 .1E-6
'name' : 'constant.numeric.float_and_complex.decimal.floatnumber.python3',
'match': r'(?<=\W|^)\.\d+([eE][+-]?\d+)?[jJ]?'
},
{
# 1. 1.0 1.e10 1.1e6 1.1E6 1.1e+6 1.1E+6 1.1e-6 1.1E-6
'name' : 'constant.numeric.float_and_complex.decimal.pointfloat.python3',
'match': r'\d+\.(\d*([eE][+-]?\d+)?)?[jJ]?(?=\W)'
},
{
# 1e6 1E6 1e+6 1E+6 1e-6 1E-6
'name' : 'constant.numeric.float_and_complex.decimal.exponent.python3',
'match': r'(?<![\.\d])\d+[eE][+-]?\d+[jJ]?'
},
{
'name' : 'constant.numeric.integer_and_complex.decimal.python3',
'match': r'\b(?<!\.)([1-9]\d*|0)[jJ]?'
},
#-- KEYWORDS ------------------------------------------------------------------#
{
'name' : 'storage.modifier.declaration.python3',
'match': r'\b(global|nonlocal)\b'
},
{
'name' : 'keyword.control.import_and_import_from.python3',
'match': r'\b(import|from)\b'
},
{
'name' : 'keyword.control.flow_block_delimiters.python3',
'match':
(
r'\b(elif|else|except|finally|for|if|try|while|'
r'with|break|continue|pass|raise|return|yield)\b'
)
},
{
'name' : 'keyword.operator.bool.logical.python3',
'match': r'\b(and|in|is|not|or)\b'
},
{
'name' : 'keyword.other.python3',
'match': r'\b(as|assert|del)\b'
},
#-- OPERATORS -----------------------------------------------------------------#
{
'name' : 'keyword.operator.comparison.python3',
'match': r'<=|>=|==|<|>|!='
},
{
'name' : 'keyword.operator.assignment.augmented.python3',
'match': r'\+=|-=|\*=|/=|//=|%=|&=|\|=|\^=|<<=|>>=|\*\*='
},
{
'name' : 'keyword.operator.arithmetic.python3',
'match': r'\+|-|\*|\*\*|/|//|%|<<|>>|&|\||\^|~'
},
{
'name' : 'keyword.operator.value_and_annotation_assignment.python3',
'match': r'=|->'
},
#-- CLASS ---------------------------------------------------------------------#
{
'name' : 'meta.class.python3',
'begin': r'^\s*(class)\s+(?=[a-zA-Z_]\w*(\s*\()?)',
'beginCaptures':
{
1: {'name': 'storage.type.class.python3'}
},
'patterns':
[
{
'contentName': 'entity.name.type.class.python3',
'begin': r'(?=[a-zA-Z_]\w*)',
'patterns':
[
{'include': '#entity_name_class'}
],
'end': r'(?!\w)'
},
{
'contentName': 'meta.class.inheritance.python3',
'begin': r'\(',
'patterns':
[
{
'contentName': 'entity.other.inherited-class.python3',
'begin': r'(?<=\(|,)\s*',
'patterns':
[
{'include': '$self'}
],
'end': r'\s*(?:,|(?=\)))',
'endCaptures':
{
1: {'name': 'punctuation.separator.inheritance.python3'}
}
}
],
'end': r'\)|:'
}
],
'end' : r'(\)?\s*:|\s+([\w#\s:]+))',
'endCaptures':
{
3: {'name': 'invalid.illegal.missing_section_begin.python3'}
}
},
#-- FUNCTION ------------------------------------------------------------------#
{
'name' : 'meta.function.python3',
'begin': r'^\s*(def)\s+(?=[a-zA-Z_]\w*\s*\()',
'beginCaptures':
{
1: {'name': 'storage.type.function.python3'}
},
'patterns':
[
# Function name
{
'contentName': 'entity.name.function.python3',
'begin': r'(?=[a-zA-Z_]\w*)',
'patterns':
[
{'include': '#entity_name_function'}
],
'end': r'(?!\w)'
},
# Arguments
{
'begin': r'\(',
'patterns':
[
# Keyword arguments
{
'begin': r'\b([a-zA-Z_]\w*)\s*(=)',
'beginCaptures':
{
1: {'name': 'variable.parameter.function.python3'},
2: {'name': 'keyword.operator.assignment.python3'}
},
'patterns':
[
# Keyword assignment
{
'begin': r'(?<=(=))\s*',
'beginCaptures':
{
1: {'name': 'keyword.operator.assignment.python3'}
},
'patterns':
[
{'include': '$self'}
],
'end': r'(?=,|[\n)])',
},
# Annotation assignment (kwargs)
{
'begin': r'(?<=:)\s*',
'patterns':
[
{'include': '$self'}
],
'end': r'(?=,|(=)|[\n)])',
'endCaptures':
{
1: {'name': 'keyword.operator.assignment.python3'}
}
}
],
'end': r'(?=,|[\n)])'
},
# Positional arguments
{
'begin': r'\b([a-zA-Z_]\w*)\s*',
'beginCaptures':
{
1: {'name': 'variable.parameter.function.python3'}
},
'patterns':
[
# Annotation assignment (args)
{
'begin': r'(?<=:)\s*',
'patterns':
[
{'include': '$self'}
],
'end': r'(?=,|[\n)])',
}
],
'end': r'(?=,|[\n)])'
}
],
'end': r'(?=\))'
},
# Annotation assignment (function)
{
'begin': r'(?<=\))\s*(->)\s*',
'beginCaptures':
{
1: {'name': 'keyword.operator.annotation.assignment.python3'}
},
'patterns':
[
{'include': '$self'}
],
'end': r'(?=\s*:)'
}
],
# todo: add illegal
'end': r'(\s*:)',
'endCaptures':
{
2: {'name': 'invalid.illegal.missing_section_begin.python3'}
}
},
#-- LAMBDA --------------------------------------------------------------------#
{
'name' : 'meta.function.anonymous.python3',
'begin': r'\b(lambda)',
'beginCaptures':
{
1: {'name': 'storage.type.function.anonymous.python3'}
},
'patterns':
[
{
'begin': r'\s+',
'patterns':
[
# Keyword arguments
{
'begin': r'\b([a-zA-Z_]\w*)\s*(=)',
'beginCaptures':
{
1: {'name': 'variable.parameter.function.python3'},
2: {'name': 'keyword.operator.assignment.python3'}
},
'patterns':
[
{'include': '$self'}
],
'end': r'(?=,|:)'
},
# Positional arguments
{
'name' : 'variable.parameter.function.python3',
'match': r'\b[a-zA-Z_]\w*'
}
],
'end': r'(?=:)'
}
],
'end': r':'
},
#-- DECORATOR -----------------------------------------------------------------#
# Decorator with arguments
{
'name' : 'meta.function.decorator.with_arguments.python3',
'begin': r'^\s*(@\s*[a-zA-Z_]\w*(\.[a-zA-Z_]\w*)*)\s*\(',
'beginCaptures':
{
1: {'name': 'support.function.decorator.python3'}
},
'patterns':
[
{'include': '#keyword_arguments'},
{'include': '$self'}
],
'end': r'\)'
},
# Decorator without arguments
{
'name' : 'meta.function.decorator.without_arguments.python3',
'begin': r'^\s*(@\s*[a-zA-Z_]\w*(\.[a-zA-Z_]\w*)*)',
'beginCaptures':
{
1: {'name': 'support.function.decorator.python3'}
},
'end': r'(?=\s|$\n?|#)'
},
#-- CONSTANTS -----------------------------------------------------------------#
{
'name' : 'constant.language.word_like.python3',
'match': r'\b(None|True|False|Ellipsis|NotImplemented)\b'
},
{
'name' : 'constant.language.symbol_like.python3',
'match': r'(?<=\W|^)\.\.\.(?=\W|$)'
},
#-- STORAGES ------------------------------------------------------------------#
{
'name' : 'storage.type.function.python3',
'match': r'\b(def|lambda)\b'
},
{
'name' : 'storage.type.class.python3',
'match': r'\b(class)\b'
},
#-- BUILTINS ------------------------------------------------------------------#
{
'include': '#builtin_types'
},
{
'include': '#builtin_functions'
},
{
'include': '#builtin_exceptions'
},
#-- MAGIC STUFFS --------------------------------------------------------------#
{
'include': '#magic_function_names'
},
{
'include': '#magic_variable_names'
},
#-- ETC -----------------------------------------------------------------------#
{
'include': '#line_continuation'
},
{
'include': '#language_variables'
},
#-- STRUCTURES ----------------------------------------------------------------#
# LIST
{
'name': 'meta.structure.list.python3',
'begin': r'\[',
'patterns':
[
{
'begin': r'(?<=\[|,)\s*(?![\],])',
'patterns':
[
{'include': '$self'}
],
'end' : r'\s*(?:,|(?=\]))'
}
],
'end' : r'\]'
},
# DICTIONARY
{
'name': 'meta.structure.dictionary.python3',
'begin': r'{',
'patterns':
[
{
'begin': r'(?<={|,|^)\s*(?![,}])',
'patterns':
[
{
'include': '$self'
}
],
'end' : r'\s*(?:(?=\})|(\:))'
},
{
'begin': r'(?<=:|^)\s*',
'patterns':
[
{
'include': '$self'
}
],
'end' : r'\s*(?:(?=\}|,))'
}
],
'end' : r'}'
},
# GROUPS, TUPLES
{
'name' : 'meta.structure.group.python3',
'begin': r'(?<=,|;|=|\+|-|\*|/|\||:|<|>|~|%|\^|\\)\s*\(',
'patterns':
[
{'include': '$self'}
],
'end': r'\)'
},
#-- ACCESS --------------------------------------------------------------------#
{
'name' : 'meta.function_call.python3',
'begin': r'(?<!:|,|;|\[|\{|\}|=|\+|-|\*|/|\||<|>|~|%|\^|\\|\n)\s*\(',
'patterns':
[
{'include': '#keyword_arguments'},
{'include': '$self'}
],
'end': r'\)'
},
#-- STRING --------------------------------------------------------------------#
{
'include': '#string_quoted'
}
],
#-- REPOSITORY ----------------------------------------------------------------#
'repository':
{
#-- BUILTINS ------------------------------------------------------------------#
'builtin_exceptions':
{
'name' : 'support.type.exception.python3',
'match':
(
r'(?<!\.)\b('
r'(Arithmetic|Buffer|Lookup|Assertion|Attribute|EOF|FloatingPoint|'
r'Import|Index|Key|Memory|Name|NotImplemented|OS|Overflow|Reference|'
r'Runtime|Syntax|Indentation|Tab|System|Type|UnboundLocal|Unicode|'
r'Unicode(Encode|Decode|Translate)?|Value|ZeroDivision|'
r'Environment|IO|VMS|Windows|BlockingIO|ChildProcess|'
r'BrokenPipe|Connection(Aborted|Refused|Reset)?|'
r'FileExists|FileNotFound|Interrupted|(Is|Not)ADirectory|'
r'Permission|ProcessLookup|Timeout)Error|(User|Deprecation|'
r'PendingDeprecation|Syntax|Runtime|Future|Import|Bytes|'
r'Resource)Warning|(Base)?Exception|(Generator|System)Exit|'
r'KeyboardInterrupt|StopIteration|Warning'
r')\b'
)
},
'builtin_functions':
{
'name' : 'support.function.builtin.python3',
'match':
(
r'(?<!\.)\b('
r'__import__|abs|all|any|ascii|bin|callable|chr|compile|delattr|'
r'dir|divmod|eval|exec|filter|format|getattr|globals|hasattr|hash|'
r'help|hex|id|input|isinstance|issubclass|iter|len|locals|map|max|'
r'min|next|oct|ord|pow|print|range|repr|round|setattr|sorted|sum|'
r'vars|zip'
r')\b'
)
},
# todo: rearrange -> what is builtin function and what is builtin type?
'builtin_types':
{
'name' : 'support.type.python3',
'match':
(
r'(?<!\.)\b('
r'basestring|bool|bytearray|bytes|classmethod|complex|dict|'
r'enumerate|float|frozenset|int|list|memoryview|object|open|'
r'property|reversed|set|slice|staticmethod|str|super|tuple|type'
r')\b'
)
},
#-- ENTITY --------------------------------------------------------------------#
'entity_name_class':
{
'patterns':
[
{'include': '#illegal_names'},
{'include': '#generic_names'}
]
},
'entity_name_function':
{
'patterns':
[
{'include': '#magic_function_names'},
{'include': '#illegal_names'},
{'include': '#generic_names'}
]
},
'generic_names':
{
'match': r'[a-zA-Z_]\w*'
},
'illegal_names':
{
'name' : 'invalid.illegal_names.name.python3',
'match':
(
r'\b('
r'and|as|assert|break|class|continue|def|del|elif|else|except|'
r'finally|for|from|global|if|import|in|is|lambda|nonlocal|not|'
r'or|pass|raise|return|try|while|with|yield'
r')\b'
)
},
#-- KEYWORDS ------------------------------------------------------------------#
'keyword_arguments':
{
'begin': r'\b([a-zA-Z_]\w*)\s*(=)(?!=)',
'beginCaptures':
{
1: {'name': 'variable.parameter.function.python3'},
2: {'name': 'keyword.operator.assignment.python3'}
},
'patterns':
[
{'include': '$self'}
],
'end': r'(?=,|[\n)])'
},
#-- MAGIC STUFFS --------------------------------------------------------------#
'magic_function_names':
{
'name' : 'support.function.magic.python3',
'match':
(
r'\b__('
r'abs|add|and|bool|bytes|call|ceil|complex|contains|copy|'
r'deepcopy|del|delattr|delete|delitem|dir|div|divmod|enter|eq|'
r'exit|float|floor|floordiv|format|ge|get|getattr|getattribute|'
r'getinitargs|getitem|getnewargs|getstate|gt|hash|hex|iadd|'
r'iand|idiv|ifloordiv|ilshift|imul|index|init|instancecheck|'
r'int|invert|ior|ipow|irshift|isub|iter|itruediv|ixor|le|len|'
r'lshift|lt|metaclass|missing|mod|mul|ne|neg|new|next|oct|or|'
r'pos|pow|prepare|radd|rand|rdiv|rdivmod|reduce|reduce_ex|'
r'repr|reversed|rfloordiv|rlshift|rmod|rmul|ror|round|rpow|'
r'rrshift|rshift|rsub|rtruediv|rxor|set|setattr|setitem|'
r'setstate|sizeof|str|sub|subclasscheck|subclasshook|truediv|'
r'trunc|unicode|weakref|xor'
r')__\b'
)
},
# todo: rearrange -> what is magic function and what is magic variable?
'magic_variable_names':
{
'name' : 'support.variable.magic.python3',
'match':
(
r'\b__('
r'all|annotations|bases|builtins|class|debug|dict|doc|file|'
r'members|metaclass|mro|name|qualname|slots|weakref'
r')__\b'
)
},
# conventions
'language_variables':
{
'name' : 'variable.language.python3',
'match': r'(?<!\.)\b(self|cls)\b'
},
'line_continuation':
{
'match': r'(\\)(.*)$\n?',
'captures':
{
1: {'name': 'punctuation.separator.continuation.line.python3'},
2: {'name': 'invalid.illegal.unexpected_text.python3'}
}
},
#-- STRING --------------------------------------------------------------------#
# todo: decide if source.sql and special words, like SELECT and INSERT needed
'string_quoted':
{
# stringprefix: "r" | "u" | "R" | "U" |
# bytesprefix : "b" | "B" | "br" | "Br" | "bR" |
# "BR" | "rb" | "rB" | "Rb" | "RB" |
'patterns':
[
# Single BLOCK
{
'name' : 'string.quoted.single.block.python3',
'begin': r"([bBuU]?)'''",
'beginCaptures':
{
1: {'name': 'storage.type.string.prefix.python3'}
},
'patterns':
[
{'include': '#string_patterns'}
],
'end': r"'''"
},
{
'name' : 'string.quoted.single.block.python3',
'begin': r"([rR][bB]|[bB][rR]|[rR])'''",
'beginCaptures':
{
1: {'name': 'storage.type.string.prefix.python3'}
},
'patterns':
[
{'include': '#string_patterns'},
{'include': '#regular_expressions'}
],
'end': r"'''"
},
# Single LINE
{
'name' : 'string.quoted.single.line.python3',
'begin': r"([bBuU]?)'",
'beginCaptures':
{
1: {'name': 'storage.type.string.prefix.python3'}
},
'patterns':
[
{'include': '#string_patterns'}
],
'end': r"'|(\n)",
'endCaptures':
{
1: {'name': 'invalid.illegal.unclosed_string.python3'}
}
},
{
'name' : 'string.quoted.single.line.python3',
'begin': r"([rR][bB]|[bB][rR]|[rR])'",
'beginCaptures':
{
1: {'name': 'storage.type.string.prefix.python3'}
},
'patterns':
[
{'include': '#string_patterns'},
{'include': '#regular_expressions'}
],
'end': r"'|(\n)",
'endCaptures':
{
1: {'name': 'invalid.illegal.unclosed_string.python3'}
}
},
# Double BLOCK
{
'name' : 'string.quoted.double.block.python3',
'begin': r'([bBuU]?)"""',
'beginCaptures':
{
1: {'name': 'storage.type.string.prefix.python3'}
},
'patterns':
[
{'include': '#string_patterns'}
],
'end': r'"""'
},
{
'name' : 'string.quoted.double.block.python3',
'begin': r'([rR][bB]|[bB][rR]|[rR])"""',
'beginCaptures':
{
1: {'name': 'storage.type.string.prefix.python3'}
},
'patterns':
[
{'include': '#string_patterns'},
{'include': '#regular_expressions'}
],
'end': r'"""'
},
# Double LINE
{
'name' : 'string.quoted.double.line.python3',
'begin': r'([bBuU]?)"',
'beginCaptures':
{
1: {'name': 'storage.type.string.prefix.python3'}
},
'patterns':
[
{'include': '#string_patterns'}
],
'end': r'"|(\n)',
'endCaptures':
{
1: {'name': 'invalid.illegal.unclosed_string.python3'}
}
},
# {
# 'name' : 'meta.format_attribute.format.python3',
# 'begin': r'(\.format)\s*\(',
# 'beginCaptures':
# {
# 1: {'name': 'invalid.illegal.none.python3'}
# },
# 'patterns':
# [
# {
# 'name' : 'string.quoted.double.format.python3',
# 'begin': r'([uUbB]?)"',
# 'beginCaptures':
# {
# 1: {'name': 'storage.type.string.prefix.python3'}
# },
# 'patterns':
# [
# {'include': '#string_patterns'},
# {'include': '#format_mini_language'}
# ],
# 'end': r'"|(\n)',
# 'endCaptures':
# {
# 1: {'name': 'invalid.illegal.unclosed_string.python3'}
# }
# }
# ],
# 'end': r'\)'
# },
# {
# 'name' : 'string.quoted.double.format.python3',
# 'begin': r'([uUbB]?)"',
# 'beginCaptures':
# {
# 1: {'name': 'storage.type.string.prefix.python3'}
# },
# 'patterns':
# [
# {'include': '#string_patterns'},
# {'include': '#format_mini_language'}
# ],
# 'end': r'"\.format', # |(\n)',
# 'endCaptures':
# {
# 2: {'name': 'invalid.illegal.unclosed_string.python3'}
# }
# },
{
'name' : 'string.quoted.double.line.python3',
'begin': r'([rR][bB]|[bB][rR]|[rR])"',
'beginCaptures':
{
1: {'name': 'storage.type.string.prefix.python3'}
},
'patterns':
[
{'include': '#string_patterns'},
{'include': '#regular_expressions'}
],
'end': r'"|(\n)',
'endCaptures':
{
1: {'name': 'invalid.illegal.unclosed_string.python3'}
}
}
]
},
'string_patterns':
{
'patterns':
[
{'include': '#constant_placeholder'},
{'include': '#escaped_characters'},
{'include': '#escaped_unicode_characters'}
]
},
'constant_placeholder':
{
'name' : 'string.interpolated.placeholder.python3',
'match': r'%(\(\w+\))?#?0?-?[ ]?\+?(\d*|\*)(\.(\d*|\*))?[hlL]?[diouxXeEfFgGcrs%]'
},
'format_mini_language':
{
'patterns':
[
{
'name' : 'constant.other.placeholder.format.python3',
'match': r'\{\}'
}
]
},
'escaped_characters':
{
# escape:
# hex | octal | newline | double-quote |
# single-quote | bell | backspace | formfeed |
# line-feed | return | tab | vertical-tab | escape char
'name' : 'constant.character.escaped.special.python3',
'match': r'\\(x\h{2}|[0-7]{3}|\n|\"|\'|a|b|f|n|r|t|v|\\)'
},
'escaped_unicode_characters':
{
# 16bit hex | 32bit hex | unicodename
'name' : 'constant.character.escaped.python3',
'match': r'\\(u\h{4}|U\h{8}|N\{[a-zA-Z\s]+\})'
},
#-- REGEX ---------------------------------------------------------------------#
'regular_expressions':
{
'patterns':
[
{
'name' : 'keyword.control.anchor.regex.python3',
'match': r'\\[bBAZzG]|\^|\$'
},
{
# \number
'name' : 'keyword.other.group_reference_order.regex.python3',
'match': r'\\[1-9]\d?'
},
{
# (?P=this_is_a_group)
'name' : 'keyword.other.group_reference_name.regex.python3',
'match': r'\(\?P=[a-zA-Z_]\w*\)'
},
{
# {2}, {2,}, {,2}, {2,3}, {2,3}?
'name' : 'keyword.operator.quantifier.regex.python3',
'match': r'[?+*][?+]?|\{(\d+,\d+|\d+,|,\d+|\d+)\}\??'
},
{
'name' : 'keyword.operator.or.regex.python3',
'match': r'\|'
},
{
# (?# comment)
'name' : 'comment.block.regex.python3',
'begin': r'\(\?#',
'end' : r'\)'
},
{
# flags: a: ASCII-only matching)
# i: ignore case
# L: locale dependent
# m: multi-line
# s: dot matches all
# u: unicode
# x: extended form (verbose)
'name' : 'keyword.other.option_toggle.regex.python3',
'match': r'\(\?[aiLmsux]+\)'
},
{
# (?= positive look-ahead)
# (?! negative look-ahead)
# (?<= positive look-behind)
# (?<! negative look-behind)
# (?: non-capturing)
# (?P<id> group)
# (?(id/name)yes-pattern|no-pattern)
'name' : 'meta.group.assertion.regex.python3',
'begin': r'\(\?(=|!|<=|<!|:|P<[a-z]\w*>|\(([1-9]\d?|[a-zA-Z_]\w*\)))?',
'patterns':
[
{'include': '#regular_expressions'}
],
'end': r'\)'
},
{
'include': '#regular_expressions_escaped_characters'
},
{
'include': '#regular_expressions_character_classes'
}
]
},
'regular_expressions_character_classes':
{
'patterns':
[
{
# \w, \W, \s, \S, \d, \D, .
'name' : 'constant.character.character_class.regex.python3',
'match': r'\\[wWsSdD]|\.'
},
{
# [set of characters]
'name' : 'constant.other.character_class.set.regex.python3',
'begin': r'\[(\^)?(\](?=.*\]))?',
'beginCaptures':
{
1: {'name': 'keyword.operator.negation.regex.python3'}
},
'patterns':
[
{
'name': 'constant.character.escaped.special.open.regex.python3',
'match': r'\['
},
{'include': '#regular_expressions_character_classes'},
{'include': '#regular_expressions_escaped_characters'}
],
'end': r'(?<!\\)\]'
}
]
},
'regular_expressions_escaped_characters':
{
'name' : 'constant.character.escaped.special.regex.python3',
'match': r'\\(\\|\?|\.|\*|\+|\{|\}|\||\(|\)|\[|\]|\^|\$)'
}
},
'uuid': '851B1429-B8B4-4C1E-8030-399BDA994393'
}
if __name__ == '__main__':
import convert
rname = 'Python3'
tname = 'Python3_TEST'
convert.dict_to_lang(dictionary=syntax,
repo_fname=rname,
repo_dname=rname,
test_fname=tname,
test_dname=tname,
test_fpath='~/Library/Application Support/'
'Sublime Text 3/Packages/User/{}/'.format(tname))
| StarcoderdataPython |
9643673 | <gh_stars>0
"""
author: <NAME>
all tasks to run are included in this file
"""
import os
import time
import threading
from cv2 import cuda_BufferPool
import RPi.GPIO as GPIO
from flask import Flask, render_template, redirect
import go_to_bed
### constants ###
MIN_DELAY = 10 # delay for tasks need to update within minute precision
FAST_DELAY = 0.01 # delay for tasks need to update immediately
SNOOZE_TIME = 1 # TODO: 10 # snooze time in minutes
SOUND_PATH = "sound/Let Her Go.mp3" # path to sound file
# FUTURE scan for available alarm music in the sound folder
# available_files = []
# for (dirpath, dirnames, filenames) in os.walk("./sound"):
# available_files.extend(filenames)
BED_TIME_THRESHOLD = 5 # minutes
SETTING_ITEM = ['bed time', 'wake up time']
LED_ON = GPIO.LOW
LED_OFF = GPIO.HIGH
# MAIN_STATUS: 0: wakeup, 1: sleep, 2: alarm
MAIN_STATUS = 'main status'
MAIN_STATUS_WAKEUP = 0
MAIN_STATUS_NEED_SLEEP = 1
MAIN_STATUS_SLEEP = 2
MAIN_STATUS_ALARM = 3
# ALARM_SWITCH: 0: on, 1: off
ALARM_STATUS = 'alarm status'
ALARM_ON = 0
ALARM_OFF = 1
# OLED_STATUS
OLED_STATUS = 'oled status'
OLED_DISPLAY = 0
OLED_SETTING = 1
OLED_SET_HOUR = 2
OLED_SET_MINUTE = 3
# setting status TODO: when oled timeout or confirm, update status
# indicate which option is currently selected
SETTING_SELECTION = 0
# display time on oled
SETTING_TIME = 1
# global variables
current_status = {MAIN_STATUS: MAIN_STATUS_WAKEUP,
ALARM_STATUS: ALARM_OFF,
OLED_STATUS: OLED_DISPLAY}
bed_time = [11, 10] # TODO: [22, 30] # time to sleep (hour, minute)
today_bed_time = 0 # today's bed time (time.time())
up_time = [11, 15] # TODO: [7, 0] # time to wake up (hour, minute)
alarm_time = up_time # time to play alarm clock sound (hour, minute)
sleep_info = [("05/6", "10:30", True), # list to store sleep info (date, time, follow schedule)
("05/7", "11:53", False),
("05/8", "10:30", True),
("05/9", "10:30", True)] # TODO: make empty []
light_threshold = 1.5 # threshold voltage for light sensor, user tunable
time_12_hour = False # 12 hour mode or 24 hour mode
setting_status = {SETTING_SELECTION: 0,
SETTING_TIME: bed_time}
settings_info = [['sleep time', f'{bed_time[0]}:{bed_time[1]}'],
['wake time', f"{up_time[0]}:{up_time[1]}"],
['volume', '100%'],
['brightness', '100%'],
['light sensitivity', light_threshold],
['12 hour format', time_12_hour]]
friends_sleep_info = [('Jerry', '83%'),
('Tom', '75%'),
('George', '72%'),
('Mary', '65%'),
('Bob', '60%'),
('Alice', '55%'),
('Jack', '50%'),
('Linda', '45%'),
('John', '40%'),
('Jane', '35%')]
# GPIO pins
SNOOZE_BUT = 24
STOP_BUT = 23
RED_LED = 25
GREEN_LED = 26
ALARM_SWITCH = 22
ENCODER_L = 14
ENCODER_R = 15
ENCODER_BUT = 16
### onetime tasks ###
def simple_GPIO_setup():
"""
setup some devices that only need input or output
devices: red/green LEDs, snooze button, stop button, alarm switch
"""
GPIO.setmode(GPIO.BCM)
# setup stop/pause button pull up by default
GPIO.setup(SNOOZE_BUT, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(SNOOZE_BUT, GPIO.RISING, callback=pause_alarm)
GPIO.setup(STOP_BUT, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(STOP_BUT, GPIO.RISING, callback=stop_alarm)
# setup alarm switch pull up by default
GPIO.setup(ALARM_SWITCH, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
current_status[ALARM_STATUS] = GPIO.input(ALARM_SWITCH)
GPIO.add_event_detect(ALARM_SWITCH, GPIO.BOTH, callback=alarm_switch)
# setup red/green LED
GPIO.setup(RED_LED, GPIO.OUT)
GPIO.output(RED_LED, LED_OFF)
GPIO.setup(GREEN_LED, GPIO.OUT)
if current_status[ALARM_STATUS] == ALARM_ON:
GPIO.output(GREEN_LED, LED_ON)
else:
GPIO.output(GREEN_LED, LED_OFF)
# setup encoder
# default to ground
GPIO.setup(ENCODER_L, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(ENCODER_R, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(ENCODER_BUT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# add event detect
GPIO.add_event_detect(ENCODER_L, GPIO.FALLING, callback=encoder_rotate)
GPIO.add_event_detect(ENCODER_BUT, GPIO.FALLING, callback=encoder_but)
# add timer
global encoder_ccw_time, encoder_cw_time
encoder_ccw_time = time.time()
encoder_cw_time = time.time()
def peripheral_setup():
"""
setup all the peripherals
peripherals: rfid, oled, clock, speaker
"""
global rfid, oled, clock, speaker, light_sensor
# setup RFID
rfid = go_to_bed.RFID()
# setup OLED (I2C)
oled = go_to_bed.OLED()
# setup led
clock = go_to_bed.Clock()
# setup speaker
speaker = go_to_bed.Speaker()
speaker.set_sound(SOUND_PATH) # FUTURE: let user choose sound
# setup light sensor
light_sensor = go_to_bed.ADC()
# setup webpage
webpage_flask = Flask(__name__, static_folder='assets')
### interrupt ###
def alarm_switch(channel):
"""
callback function to determine alarm switch state
if switch is on, turn off the alarm, green LED off
otherwise, turn on the alarm, green LED on
"""
print("switch interrupt") # TODO
# debounce, wait for 20 milliseconds
time.sleep(0.020)
if GPIO.input(channel) == ALARM_ON:
current_status[ALARM_STATUS] = ALARM_ON
GPIO.output(GREEN_LED, LED_ON)
else:
current_status[ALARM_STATUS] = ALARM_OFF
GPIO.output(GREEN_LED, LED_OFF)
def pause_alarm(channel):
"""
callback function to pause the alarm
"""
# debounce, wait for 20 milliseconds
time.sleep(0.020)
if GPIO.input(channel):
# stop sound
if not speaker.is_stopped():
speaker.stop_sound()
if current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
# snooze alarm
hour, minute, _ = get_time()
set_time(alarm_time, hour, (minute + SNOOZE_TIME))
# act as back button
elif current_status[OLED_STATUS] == OLED_SETTING:
setting_status[SETTING_SELECTION] = 0 # set selection back to 0
current_status[OLED_STATUS] = OLED_DISPLAY
oled_update_display()
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
current_status[OLED_STATUS] = OLED_SETTING
oled_update_display()
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
current_status[OLED_STATUS] = OLED_SET_HOUR
oled_update_display()
def stop_alarm(channel):
"""
callback function to stop alarm clock. If button pushed, alarm is stopped
"""
global alarm_time
# debounce, wait for 20 milliseconds
time.sleep(0.020)
if GPIO.input(channel):
# turn off alarm
if not speaker.is_stopped():
speaker.stop_sound()
if current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
# set MAIN_STATUS to wakeup
current_status[MAIN_STATUS] = MAIN_STATUS_WAKEUP
oled_update_display()
# set alarm_time to up_time
set_time(alarm_time, *up_time)
def encoder_rotate(channel):
assert channel == ENCODER_L
global encoder_ccw_time, encoder_cw_time
if GPIO.input(ENCODER_R) == GPIO.HIGH:
if time.time() - encoder_cw_time < 0.1:
pass # still clockwise
else:
if current_status[OLED_STATUS] == OLED_SETTING:
setting_status[SETTING_SELECTION] += 1
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
setting_status[SETTING_TIME][0] = (
setting_status[SETTING_TIME][0] + 1) % 24
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
setting_status[SETTING_TIME][1] = (
setting_status[SETTING_TIME][1] + 1) % 60
oled_update_display()
encoder_ccw_time = time.time()
elif GPIO.input(ENCODER_R) == GPIO.LOW:
if time.time() - encoder_ccw_time < 0.1:
pass # still counter clockwise
else:
if current_status[OLED_STATUS] == OLED_SETTING:
setting_status[SETTING_SELECTION] -= 1
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
setting_status[SETTING_TIME][0] = (
setting_status[SETTING_TIME][0] - 1) % 24
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
setting_status[SETTING_TIME][1] = (
setting_status[SETTING_TIME][1] - 1) % 60
oled_update_display()
encoder_cw_time = time.time()
def encoder_but(channel):
global bed_time, up_time
time.sleep(0.020)
if not GPIO.input(channel):
if current_status[OLED_STATUS] == OLED_DISPLAY:
current_status[OLED_STATUS] = OLED_SETTING
elif current_status[OLED_STATUS] == OLED_SETTING:
# determine whether to set bed time or up time
if setting_status[SETTING_SELECTION] == 0:
setting_status[SETTING_TIME] = bed_time
else:
setting_status[SETTING_TIME] = up_time
current_status[OLED_STATUS] = OLED_SET_HOUR
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
current_status[OLED_STATUS] = OLED_SET_MINUTE
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
# store current setting
if setting_status[SETTING_SELECTION] == 0:
bed_time[0] = setting_status[SETTING_TIME][0]
bed_time[1] = setting_status[SETTING_TIME][1]
print('update bed time:', bed_time) # TODO: test
else:
up_time[0] = setting_status[SETTING_TIME][0]
up_time[1] = setting_status[SETTING_TIME][1]
print('update up time:', up_time) # TODO: test
setting_status[SETTING_SELECTION] = 0
current_status[OLED_STATUS] = OLED_DISPLAY
oled_update_display()
### helper functions ###
def get_time():
"""
get current time
@return: hour, min, sec
"""
current_time = time.localtime()
hour = current_time.tm_hour
minute = current_time.tm_min
sec = current_time.tm_sec
return hour, minute, sec
def get_date():
"""
get today's date
@return: month, day
"""
current_time = time.localtime()
month = current_time.tm_mon
day = current_time.tm_mday
return month, day
def set_time(time_object, hour, minute):
"""
set time given hour and min in 24hr format
@param time_object: time object to set
@param hour: hour to set
@param min: minute to set
"""
time_object[1] = minute % 60
time_object[0] = (hour + minute // 60) % 24
def inc_time(time_object, hour=0, minute=0):
"""
increment
@param time_object: time object to increase
@param hour: hour increment
@param min: minute to increment
"""
set_time(time_object, time_object[0] + hour, time_object[1] + minute)
def oled_update_display():
"""
change the oled display according to different status
should be manual called everytime the current_status is changed
FUTURE: separate process to check for state change and call oled_display
automatically?
"""
oled.clear_display()
if current_status[OLED_STATUS] == OLED_DISPLAY:
if current_status[MAIN_STATUS] == MAIN_STATUS_WAKEUP:
oled.add_text('wake up') # TODO: change to picture
elif current_status[MAIN_STATUS] == MAIN_STATUS_NEED_SLEEP:
oled.add_text('need sleep') # TODO: change to picture
oled.add_text('40% slept')
elif current_status[MAIN_STATUS] == MAIN_STATUS_SLEEP:
oled.add_text('sleep') # TODO: change to picture
elif current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
oled.add_text('alarm') # TODO: change to picture
elif current_status[OLED_STATUS] == OLED_SETTING:
for i in range(len(SETTING_ITEM)):
if i == (setting_status[SETTING_SELECTION]//2 % len(SETTING_ITEM)):
oled.add_text('> ' + SETTING_ITEM[i])
else:
oled.add_text(SETTING_ITEM[i])
elif current_status[OLED_STATUS] == OLED_SET_HOUR:
h, m = setting_status[SETTING_TIME]
oled.add_text(f'-{h:02d}-:{m:02d}')
elif current_status[OLED_STATUS] == OLED_SET_MINUTE:
h, m = setting_status[SETTING_TIME]
oled.add_text(f'{h:02d}:-{m:02d}-')
oled.update_display()
@webpage_flask.route("/")
def home():
return redirect("/index")
@webpage_flask.route("/index")
def home_template():
status = 'wakeup'
if current_status[MAIN_STATUS] == MAIN_STATUS_WAKEUP:
pass
elif current_status[MAIN_STATUS] == MAIN_STATUS_NEED_SLEEP:
status = 'need sleep'
elif current_status[MAIN_STATUS] == MAIN_STATUS_SLEEP:
status = 'sleep'
elif current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
status = 'alarm'
return render_template("index.html",
sleep_info=sleep_info,
up_time=f"{up_time[0]}:{up_time[1]}",
bed_time=f"{bed_time[0]}:{bed_time[1]}",
other_info=friends_sleep_info,
status=status)
@webpage_flask.route("/settings")
def settings_template():
global settings_info
settings_info = [['sleep time', f'{bed_time[0]:02d}:{bed_time[1]:02d}'],
['wake time', f"{up_time[0]:02d}:{up_time[1]:02d}"],
['volume', '50%'],
['brightness', '50%'],
['light sensitivity', light_threshold],
['12 hour format', time_12_hour]]
return render_template("settings.html",
settings=settings_info)
### background tasks ###
def run_webpage():
"""
process that runs the webpage continuously
"""
# TODO
pass
def update_time():
"""
update the time shown on LED every 1 seconds, the ':' will blink
"""
while True:
hour, minute, _ = get_time()
if time_12_hour:
hour %= 12
if hour == 0:
hour = 12
clock.set_display(f'{hour:02d}:{minute:02d}')
time.sleep(1)
clock.set_display(f'{hour:02d}{minute:02d}')
time.sleep(1)
def check_sleeping():
"""
process that check whether light turns off and phone is nearby RFID
"""
global today_bed_time, bed_time
while True:
if current_status[MAIN_STATUS] == MAIN_STATUS_WAKEUP:
h, m, _ = get_time()
if h == bed_time[0] and m == bed_time[1]:
current_status[MAIN_STATUS] = MAIN_STATUS_NEED_SLEEP
oled_update_display()
today_bed_time = time.time()
GPIO.output(RED_LED, LED_ON)
if current_status[MAIN_STATUS] == MAIN_STATUS_NEED_SLEEP:
# check phone
rfid.read() # will block until RFID is read
voltage = light_sensor.read()
# check light sensor
if voltage <= light_threshold:
current_status[MAIN_STATUS] = MAIN_STATUS_SLEEP
oled_update_display()
# if sleep within BED_TIME_THRESHOLD, count as follow schedule
month, day = get_date()
if (time.time() - today_bed_time)/60 <= BED_TIME_THRESHOLD:
sleep_info.append((f'{month:02d}/{day:02d}',
f'{bed_time[0]:02d}:{bed_time[1]:02d}',
True))
else:
h, m, _ = get_time()
sleep_info.append((f'{month:02d}/{day:02d}',
f'{h:02d}:{m:02d}',
False))
GPIO.output(RED_LED, LED_OFF)
elif current_status[MAIN_STATUS] == MAIN_STATUS_SLEEP:
# check phone
id, _ = rfid.read_no_block()
voltage = light_sensor.read()
# check light sensor
if not id or voltage > light_threshold:
current_status[MAIN_STATUS] = MAIN_STATUS_NEED_SLEEP
oled_update_display()
time.sleep(1)
def alarm_clock():
"""
process for alarm clock
"""
while True:
h, m, _ = get_time()
if current_status[MAIN_STATUS] == MAIN_STATUS_SLEEP:
if h == up_time[0] and m == up_time[1]:
if current_status[ALARM_STATUS] == ALARM_ON:
# set status to alarm if sleep before
current_status[MAIN_STATUS] = MAIN_STATUS_ALARM
else:
current_status[MAIN_STATUS] = MAIN_STATUS_WAKEUP
oled_update_display()
if current_status[MAIN_STATUS] == MAIN_STATUS_ALARM:
if h == alarm_time[0] and m == alarm_time[1]:
# move next alarm to SNOOZE_TIME minutes later
inc_time(alarm_time, minute=SNOOZE_TIME)
speaker.play_sound()
time.sleep(MIN_DELAY)
if __name__ == "__main__":
# one time tasks
simple_GPIO_setup()
peripheral_setup()
oled_update_display()
# background tasks
background_tasks = [alarm_clock, update_time, check_sleeping]
# start background tasks
for task in background_tasks:
thread = threading.Thread(target=task, daemon=True)
thread.start()
# TODO
# # turn on webpage
# webpage_flask.run(host='0.0.0.0', port=80) #, debug=True, threaded=True)
# TODO: test only
try:
print("program started")
ex = input('type exit to exit: ')
while ex != 'exit':
ex = input('type exit to exit: ')
except KeyboardInterrupt:
pass
print("program finished, perform GPIO cleanup")
GPIO.cleanup()
| StarcoderdataPython |
5141483 | # Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
from unittest import mock
from django.contrib.auth.models import User
from django.urls import reverse
import promgen.templatetags.promgen as macro
from promgen import models, prometheus
from promgen.tests import PromgenTest
_RULES = '''
ALERT RuleName
IF up==0
FOR 1s
LABELS {severity="severe"}
ANNOTATIONS {rule="http://example.com/rule/%d/edit", summary="Test case"}
'''.lstrip()
_RULE_NEW = '''
groups:
- name: example.com
rules:
- alert: RuleName
annotations:
rule: http://example.com/rule/%d/edit
summary: Test case
expr: up==0
for: 1s
labels:
severity: severe
'''.lstrip().encode('utf-8')
class RuleTest(PromgenTest):
@mock.patch('django.dispatch.dispatcher.Signal.send')
def setUp(self, mock_signal):
self.user = User.objects.create_user(id=999, username="Foo")
self.client.force_login(self.user)
self.shard = models.Shard.objects.create(name='Shard 1')
self.site = models.Site.objects.get_current()
self.service = models.Service.objects.create(id=999, name='Service 1', shard=self.shard)
self.rule = models.Rule.create(
name='RuleName',
clause='up==0',
duration='1s',
obj=self.site
)
models.RuleLabel.objects.create(name='severity', value='severe', rule=self.rule)
models.RuleAnnotation.objects.create(name='summary', value='Test case', rule=self.rule)
@mock.patch('django.dispatch.dispatcher.Signal.send')
def test_write_old(self, mock_post):
result = prometheus.render_rules(version=1)
self.assertEqual(result, _RULES % self.rule.id)
@mock.patch('django.dispatch.dispatcher.Signal.send')
def test_write_new(self, mock_post):
result = prometheus.render_rules(version=2)
self.assertEqual(result, _RULE_NEW % self.rule.id)
@mock.patch('django.dispatch.dispatcher.Signal.send')
def test_copy(self, mock_post):
service = models.Service.objects.create(name='Service 2', shard=self.shard)
copy = self.rule.copy_to(content_type='service', object_id=service.id)
# Test that our copy has the same labels and annotations
self.assertIn('severity', copy.labels)
self.assertIn('summary', copy.annotations)
# and test that we actually duplicated them and not moved them
self.assertEqual(models.RuleLabel.objects.count(), 3, 'Copied rule has exiting labels + service label')
self.assertEqual(models.RuleAnnotation.objects.count(), 2)
@mock.patch('django.dispatch.dispatcher.Signal.send')
def test_import_v1(self, mock_post):
self.client.post(reverse('rule-import'), {
'rules': PromgenTest.data('examples', 'import.rule')
})
# Includes count of our setUp rule + imported rules
self.assertEqual(models.Rule.objects.count(), 3, 'Missing Rule')
self.assertEqual(models.RuleLabel.objects.count(), 4, 'Missing labels')
self.assertEqual(models.RuleAnnotation.objects.count(), 7, 'Missing annotations')
@mock.patch('django.dispatch.dispatcher.Signal.send')
def test_import_v2(self, mock_post):
self.client.post(reverse('rule-import'), {
'rules': PromgenTest.data('examples', 'import.rule.yml')
})
# Includes count of our setUp rule + imported rules
self.assertEqual(models.Rule.objects.count(), 3, 'Missing Rule')
self.assertEqual(models.RuleLabel.objects.count(), 4, 'Missing labels')
self.assertEqual(models.RuleAnnotation.objects.count(), 9, 'Missing annotations')
@mock.patch('django.dispatch.dispatcher.Signal.send')
def test_macro(self, mock_post):
self.project = models.Project.objects.create(name='Project 1', service=self.service)
clause = 'up{%s}' % macro.EXCLUSION_MACRO
rules = {
'common': {'assert': 'up{service!~"Service 1"}'},
'service': {'assert': 'up{service="Service 1",project!~"Project 1"}'},
'project': {'assert': 'up{service="Service 1",project="Project 1",}'},
}
common_rule = models.Rule.create(name='Common', clause=clause, duration='1s', obj=self.site)
rules['common']['model'] = models.Rule.objects.get(pk=common_rule.pk)
service_rule = common_rule.copy_to('service', self.service.id)
rules['service']['model'] = models.Rule.objects.get(pk=service_rule.pk)
project_rule = service_rule.copy_to('project', self.project.id)
rules['project']['model'] = models.Rule.objects.get(pk=project_rule.pk)
for k, r in rules.items():
self.assertEquals(macro.rulemacro(r['model'].clause, r['model']), r['assert'], 'Expansion wrong for %s' % k)
| StarcoderdataPython |
3235326 | from ..app import Blueprint
from scanner.core.plugincall import callfunction
user = Blueprint('user', __name__)
plugin=callfunction()
@user.route("/home",methods=["get","post"])
def home():
return str(plugin.pocscan("http://cn.changhong.com/"))
| StarcoderdataPython |
9785661 | from math import log10
from numpy.core.shape_base import block
from utility_functions import PSNR, PSNRfromL1, local_to_global, make_coord, ssim3D, str2bool, ssim, PSNRfromMSE
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import time
import torch.optim as optim
import os
from models import load_model, save_model
import numpy as np
from octree import OctreeNodeList
from options import *
from models import HierarchicalACORN, PositionalEncoding
import argparse
from pytorch_memlab import LineProfiler, MemReporter, profile
from torch.utils.checkpoint import checkpoint_sequential, checkpoint
from torch.multiprocessing import spawn
from torch.distributed import new_group, barrier, group, broadcast
import h5py
import socket
from netCDF4 import Dataset
class Trainer():
def __init__(self, opt):
self.opt = opt
#@profile
def train(self, rank, model, item):
torch.manual_seed(0b10101010101010101010101010101010)
if(self.opt['train_distributed']):
node_name = socket.gethostname()
with open(os.environ['COBALT_NODEFILE'], 'r') as file:
nodes = file.read().replace('\n', ',')
nodes = nodes[:len(nodes)-1]
nodes = nodes.split(',')
self.opt['node_num'] = nodes.index(node_name)
rank = rank + self.opt['gpus_per_node']*self.opt['node_num']
self.opt['device'] = "cuda:" + str(rank % self.opt['gpus_per_node'])
model.opt = self.opt
dist.init_process_group(
backend='nccl',
#init_method='env://',
init_method='file://'+os.getcwd()+"/DistTemp",
world_size = self.opt['num_nodes'] * self.opt['gpus_per_node'],
rank=rank
)
model = model.to(self.opt['device'])
model.pe = PositionalEncoding(self.opt)
#model = DDP(model, device_ids=[rank])
#print("Training in parallel, node " + str(self.opt['node_num']) + " device cuda:" + str(rank))
# Synchronize all models
for model_num in range(len(model.models)):
for param in model.models[model_num].parameters():
broadcast(param, 0)
else:
print("Training on " + self.opt['device'])
model = model.to(self.opt['device'])
if(rank == 0 or not self.opt['train_distributed']):
writer = SummaryWriter(os.path.join('tensorboard',self.opt['save_name']))
start_time = time.time()
loss = nn.L1Loss().to(self.opt["device"])
step = 0
item = item.to(self.opt['device'])
target_PSNR = self.opt['error_bound']
MSE_limit = 10 ** ((-1*target_PSNR + 20*log10(1.0))/10)
model.init_octree(item.shape)
for model_num in range(self.opt['octree_depth_end'] - self.opt['octree_depth_start']):
if(self.opt['train_distributed']):
barrier()
for m_num in range(self.opt['octree_depth_end'] - self.opt['octree_depth_start']):
for param in model.models[m_num].parameters():
param.requires_grad = model_num == m_num
model_optim = optim.Adam(model.models[model_num].parameters(), lr=self.opt["lr"],
betas=(self.opt["beta_1"],self.opt["beta_2"]))
optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model_optim,
milestones=[self.opt['epochs']*0.8], gamma=0.1)
blocks, block_positions = model.octree.depth_to_blocks_and_block_positions(
model.octree.max_depth())
block_positions = torch.tensor(block_positions,
device=self.opt['device'])
if(rank == 0 or not self.opt['train_distributed']):
print("Model %i/%i, total parameter count: %i, num blocks: %i" %
(model_num, len(model.models), model.count_parameters(), len(blocks)))
if(self.opt['train_distributed']):
num_blocks = len(blocks)
if(rank == 0):
print("Blocks: " + str(num_blocks))
if(num_blocks <
self.opt['num_nodes'] * self.opt['gpus_per_node']):
g = new_group(list(range(num_blocks)), backend='nccl')
stride = num_blocks
else:
g = group.WORLD
stride = self.opt['num_nodes'] * self.opt['gpus_per_node']
else:
stride = 1
model_caches = {}
block_error_sum = torch.tensor(0, dtype=torch.float32, device=self.opt['device'])
if(rank < len(blocks) or not self.opt['train_distributed']):
best_MSE = 1.0
best_MSE_epoch = 0
early_stop = False
epoch = self.opt['epoch']
while epoch < self.opt['epochs'] and not early_stop:
self.opt["epoch"] = epoch
model.zero_grad()
block_error_sum = torch.tensor(0, dtype=torch.float32, device=self.opt['device'])
b = rank * int(len(blocks)/stride)
b_stop = min((rank+1) * int(len(blocks)/stride), len(blocks))
if((rank == 0 or not self.opt['train_distributed']) and epoch == 0):
writer.add_scalar("num_nodes", len(blocks), model_num)
queries = max(int(self.opt['local_queries_per_iter'] / len(blocks)),
self.opt['min_queries_per_block'])
total_queries = torch.tensor(0, dtype=torch.int, device=self.opt['device'])
while b < b_stop:
blocks_this_iter = min(self.opt['max_blocks_per_iter'], b_stop-b)
#blocks_this_iter = b_stop-b
if('2D' in self.opt['mode']):
local_positions = torch.rand([blocks_this_iter, 1,
queries, 2], device=self.opt['device']) * 2 - 1
shapes = torch.tensor([block.shape for block in blocks[b:b+blocks_this_iter]], device=self.opt['device']).unsqueeze(1).unsqueeze(1)
poses = torch.tensor([block.pos for block in blocks[b:b+blocks_this_iter]], device=self.opt['device']).unsqueeze(1).unsqueeze(1)
global_positions = local_to_global(local_positions.clone(), shapes, poses, item.shape)
global_positions = global_positions.flatten(0, -2).unsqueeze(0).unsqueeze(0).contiguous()
if((b, blocks_this_iter) not in model_caches.keys()):
model_caches[(b, blocks_this_iter)] = model.block_index_to_global_indices_mapping(global_positions)
block_output = model.forward_global_positions(global_positions,
index_to_global_positions_indices=model_caches[(b, blocks_this_iter)],
depth_start=model_num if self.opt['use_residual'] else 0,
depth_end=model_num+1,
local_positions=local_positions, block_start=b)
block_item = F.grid_sample(item.expand([-1, -1, -1, -1]),
global_positions.flip(-1), mode='bilinear', align_corners=False)
else:
local_positions = torch.rand([blocks_this_iter, 1, 1,
queries, 3], device=self.opt['device']) * 2 - 1
shapes = torch.tensor([block.shape for block in blocks[b:b+blocks_this_iter]], device=self.opt['device']).unsqueeze(1).unsqueeze(1).unsqueeze(1)
poses = torch.tensor([block.pos for block in blocks[b:b+blocks_this_iter]], device=self.opt['device']).unsqueeze(1).unsqueeze(1).unsqueeze(1)
global_positions = local_to_global(local_positions.clone(), shapes, poses, item.shape)
global_positions = global_positions.flatten(0, -2).unsqueeze(0).unsqueeze(0).unsqueeze(0).contiguous()
if((b, blocks_this_iter) not in model_caches.keys()):
model_caches[(b, blocks_this_iter)] = model.block_index_to_global_indices_mapping(global_positions)
block_output = model.forward_global_positions(global_positions,
index_to_global_positions_indices=model_caches[(b, blocks_this_iter)],
depth_end=model_num+1,
local_positions=local_positions, block_start=b)
block_item = F.grid_sample(item.expand([-1, -1, -1, -1, -1]),
global_positions.flip(-1), mode='bilinear', align_corners=False)
block_error = loss(block_output,block_item) * queries * blocks_this_iter #* (blocks_this_iter/len(blocks))
block_error.backward(retain_graph=True)
block_error_sum += block_error.detach()
total_queries += (queries * blocks_this_iter)
b += blocks_this_iter
if self.opt['train_distributed']:
# Grad averaging for dist training
dist.all_reduce(block_error_sum, op=dist.ReduceOp.SUM, group=g)
dist.all_reduce(total_queries, op=dist.ReduceOp.SUM, group=g)
for param in model.models[model_num].parameters():
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM, group=g)
param.grad.data *= (1/total_queries)
else:
for param in model.models[model_num].parameters():
param.grad.data *= (1/total_queries)
block_error_sum /= total_queries
model_optim.step()
optim_scheduler.step()
if(block_error_sum > best_MSE and best_MSE_epoch < epoch - 2500):
early_stop = True
if(rank == 0 or not self.opt['train_distributed']):
print("Stopping early")
elif(block_error_sum < best_MSE):
best_MSE = block_error_sum
best_MSE_epoch = epoch
if(epoch % self.opt['save_every'] == 0 and (not self.opt['train_distributed'] or rank == 0)):
save_model(model, self.opt)
print("Saved model and octree")
if(step % self.opt['log_every'] == 0 and
(not self.opt['train_distributed'] or rank == 0)
and self.opt['log_img']):
self.log_with_image(model, item, block_error_sum, writer, step, img_size=list(item.shape[2:]))
elif(step % 5 == 0 and (not self.opt['train_distributed'] or rank == 0)):
print("Model %i/%i, epoch %i/%i, iteration %i, L1: %0.06f" % \
(model_num, len(model.models),
epoch, self.opt['epochs'],
step,
block_error_sum.item()))
writer.add_scalar('Training PSNR', PSNRfromL1(block_error_sum, torch.tensor(1.0, device=self.opt['device'])), step)
writer.add_scalar('L1', block_error_sum, step)
GBytes = (torch.cuda.max_memory_allocated(device=self.opt['device']) / (1024**3))
writer.add_scalar('GPU memory (GB)', GBytes, step)
step += 1
epoch += 1
if(self.opt['train_distributed']):
barrier()
# Synchronize all models, whether they were training or not
for param in model.models[model_num].parameters():
broadcast(param, 0)
if((rank == 0 or not self.opt['train_distributed']) and self.opt['log_img']):
self.log_with_image(model, item, block_error_sum, writer, step, img_size=list(item.shape[2:]))
if(model_num < self.opt['octree_depth_end'] - self.opt['octree_depth_start']-1):
model = model.to(self.opt['device'])
model.pe = PositionalEncoding(self.opt)
if(self.opt['use_residual']):
with torch.no_grad():
if('2D' in self.opt['mode']):
sample_points = make_coord(item.shape[2:], self.opt['device'],
flatten=False).flatten(0, -2).unsqueeze(0).unsqueeze(0).contiguous()
elif('3D' in self.opt['mode']):
sample_points = make_coord(item.shape[2:], self.opt['device'],
flatten=False).flatten(0, -2).unsqueeze(0).unsqueeze(0).unsqueeze(0).contiguous()
reconstructed = model.forward_global_positions(sample_points).detach()
reconstructed = reconstructed.reshape(item.shape)
model.residual = reconstructed
model.octree.split_from_error_max_depth(reconstructed, item,
nn.MSELoss().to(self.opt["device"]), MSE_limit)
elif(self.opt['error_bound_split']):
with torch.no_grad():
model.octree.split_from_error_max_depth_blockwise(model, item, nn.MSELoss().to(self.opt["device"]),
MSE_limit, self.opt)
elif(not self.opt['error_bound_split']):
model.octree.split_all_at_depth(model.octree.max_depth())
#optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model_optim,
# milestones=[self.opt['epochs']/5,
# 2*self.opt['epochs']/5,
# 3*self.opt['epochs']/5,
# 4*self.opt['epochs']/5],gamma=self.opt['gamma'])
self.opt['epoch'] = 0
if((rank == 0 or not self.opt['train_distributed']) and self.opt['log_CDF']):
self.log_to_netCDF(model, item, writer)
if(rank == 0 or not self.opt['train_distributed']):
print("Total parameter count: %i" % model.count_parameters())
end_time = time.time()
total_time = end_time - start_time
print("Time to train: %0.01f minutes" % (total_time/60))
save_model(model, self.opt)
print("Saved model")
#@profile
def log_with_image(self, model, item, block_error_sum, writer, step, img_size = [512, 512]):
with torch.no_grad():
if(self.opt['use_residual']):
temp_residual = model.residual
model.residual = None
if('2D' in self.opt['mode']):
sample_points = make_coord(img_size, self.opt['device'],
flatten=False).flatten(0, -2).unsqueeze(0).unsqueeze(0).contiguous()
else:
sample_points = make_coord(img_size, self.opt['device'],
flatten=False).flatten(0, -2).unsqueeze(0).unsqueeze(0).unsqueeze(0).contiguous()
reconstructed = model.forward_global_positions(sample_points).detach()
reconstructed = reconstructed.reshape(item.shape[0:2] + tuple(img_size))
if(self.opt['mode'] == '3D'):
writer.add_image("reconstruction", reconstructed[0].clamp(0, 1)[...,int(reconstructed.shape[-1]/2)], step)
#writer.add_image("real", item[0].clamp(0, 1)[...,int(item.shape[-1]/2)], step)
else:
writer.add_image("reconstruction", reconstructed[0].clamp(0, 1), step)
#writer.add_image("real", item[0].clamp(0, 1), step)
'''
if(len(model.models) > 1):
res = model.forward_global_positions(sample_points, depth_end=model.octree.max_depth())
res = res.reshape(item.shape[0:2] + tuple(img_size))
if(self.opt['mode'] == '3D'):
writer.add_image("Network"+str(len(model.models)-1)+"residual",
((reconstructed-res)[0]+0.5).clamp(0, 1)[...,int(reconstructed.shape[-1]/2)], step)
else:
writer.add_image("Network"+str(len(model.models)-1)+"residual",
((reconstructed-res)[0]+0.5).clamp(0, 1), step)
'''
octree_blocks = model.octree.get_octree_block_img(self.opt['num_channels'], self.opt['device'])
octree_blocks = F.interpolate(octree_blocks,
size=img_size, mode="bilinear" if '2D' in self.opt['mode'] else 'trilinear', align_corners=False)
if('3D' in self.opt['mode']):
writer.add_image("reconstruction_blocks", (reconstructed[0].clamp(0, 1)*octree_blocks[0])[...,int(reconstructed.shape[-1]/2)], step)
else:
writer.add_image("reconstruction_blocks", reconstructed[0].clamp(0, 1)*octree_blocks[0], step)
if(self.opt['log_psnr']):
psnr = PSNR(reconstructed, F.interpolate(item, size=img_size, mode='bilinear' if '2D' in self.opt['mode'] else 'trilinear', align_corners=False),
torch.tensor(1.0)).item()
else:
psnr = 0
if(self.opt['log_ssim']):
if('2D' in self.opt['mode']):
s = ssim(reconstructed, F.interpolate(item, size=img_size, mode='bilinear', align_corners=False)).item()
else:
s = ssim3D(reconstructed, F.interpolate(item, size=img_size, mode='trilinear', align_corners=False)).item()
else:
s = 0
print("Iteration %i, MSE: %0.06f, PSNR (dB): %0.02f, SSIM: %0.03f" % \
(step, block_error_sum.item(), psnr, s))
writer.add_scalar('PSNR', psnr, step)
writer.add_scalar('SSIM', s, step)
if(self.opt['use_residual']):
model.residual = temp_residual
def log_to_netCDF(self, model, item, writer):
with torch.no_grad():
if('2D' in self.opt['mode']):
sample_points = make_coord(item.shape[2:], self.opt['device'],
flatten=False).flatten(0, -2).unsqueeze(0).unsqueeze(0).contiguous()
else:
sample_points = make_coord(item.shape[2:], self.opt['device'],
flatten=False).flatten(0, -2).unsqueeze(0).unsqueeze(0).unsqueeze(0).contiguous()
reconstructed = model.forward_global_positions(sample_points).detach()
reconstructed = reconstructed.reshape(item.shape[0:2] + tuple(item.shape[2:]))
if(os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "recon.nc"))):
os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "recon.nc"))
if(os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "GT.nc"))):
os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "GT.nc"))
if(os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "tree.nc"))):
os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "tree.nc"))
if(self.opt['mode'] == '3D'):
rootgrp = Dataset(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "recon.nc"), "w", format="NETCDF4")
rootgrp2 = Dataset(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "GT.nc"), "w", format="NETCDF4")
rootgrp.createDimension("x")
rootgrp.createDimension("y")
rootgrp.createDimension("z")
rootgrp2.createDimension("x")
rootgrp2.createDimension("y")
rootgrp2.createDimension("z")
for chan_num in range(reconstructed.shape[1]):
dim_i = rootgrp.createVariable('channel_'+str(chan_num), np.float32, ("x","y","z"))
dim_i2 = rootgrp2.createVariable('channel_'+str(chan_num), np.float32, ("x","y","z"))
dim_i[:] = reconstructed[0,chan_num].cpu().numpy()
dim_i2[:] = item[0,chan_num].cpu().numpy()
else:
rootgrp = Dataset(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "recon.nc"), "w", format="NETCDF4")
rootgrp2 = Dataset(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "GT.nc"), "w", format="NETCDF4")
rootgrp.createDimension("x")
rootgrp.createDimension("y")
rootgrp2.createDimension("x")
rootgrp2.createDimension("y")
for chan_num in range(reconstructed.shape[1]):
dim_i = rootgrp.createVariable('channel_'+str(chan_num), np.float32, ("x","y"))
dim_i2 = rootgrp2.createVariable('channel_'+str(chan_num), np.float32, ("x","y"))
dim_i[:] = reconstructed[0,chan_num].cpu().numpy()
dim_i2[:] = item[0,chan_num].cpu().numpy()
rootgrp
octree_blocks = model.octree.get_octree_block_img(self.opt['num_channels'], self.opt['device'])
if('3D' in self.opt['mode']):
octree_grp = Dataset(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "tree.nc"), "w", format="NETCDF4")
octree_grp.createDimension("x")
octree_grp.createDimension("y")
octree_grp.createDimension("z")
dim_i = octree_grp.createVariable('blocks'+str(chan_num), np.float32, ("x","y","z"))
dim_i[:] = octree_blocks[0,0].cpu().numpy()
else:
octree_grp = Dataset(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",'SavedModels',self.opt['save_name'], "tree.nc"), "w", format="NETCDF4")
octree_grp.createDimension("x")
octree_grp.createDimension("y")
dim_i = octree_grp.createVariable('blocks'+str(chan_num), np.float32, ("x","y"))
dim_i[:] = octree_blocks[0,0].cpu().numpy()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train on an input that is 2D')
parser.add_argument('--mode',default=None,type=str,help='The type of input - 2D, 3D')
parser.add_argument('--target_signal',default=None,type=str,help='File to train on')
parser.add_argument('--save_folder',default=None,type=str,help='The folder to save the models folder into')
parser.add_argument('--save_name',default=None,type=str,help='The name for the folder to save the model')
parser.add_argument('--device',default=None,type=str,help='Device to use')
parser.add_argument('--num_channels',default=None,type=int,help='Number of channels in the data')
parser.add_argument('--feat_grid_channels',default=None,type=int,help='Channels in the feature grid')
parser.add_argument('--feat_grid_x',default=None,type=int,help='X resolution of feature grid')
parser.add_argument('--feat_grid_y',default=None,type=int,help='Y resolution of feature grid')
parser.add_argument('--feat_grid_z',default=None,type=int,help='Z resolution of feature grid (if 3D)')
parser.add_argument('--local_queries_per_iter',default=None,type=int,help='num queries per iteration while training')
parser.add_argument('--min_queries_per_block',default=None,type=int,help='min queries per block while training')
parser.add_argument('--max_blocks_per_iter',default=None,type=int,help='max blocks in a batch per iter')
parser.add_argument('--num_positional_encoding_terms',default=None,type=int,help='Number of positional encoding terms')
parser.add_argument('--FC_size_exp_start',default=None,type=float,help='How large the FC layers start')
parser.add_argument('--FC_size_exp_grow',default=None,type=float,help='How much the FC layers grow deeper in the octree')
parser.add_argument('--use_residual',default=None,type=str2bool,help='Use a cached residual to accelerate training')
parser.add_argument('--octree_depth_start',default=None,type=int,help='How deep to start the octree, inclusive')
parser.add_argument('--octree_depth_end',default=None,type=int,help='How deep to end the octree, exclusive')
parser.add_argument('--error_bound_split',default=None,type=str2bool,help='Whether to split based on error')
parser.add_argument('--error_bound',default=None,type=float,help='The target PSNR error')
parser.add_argument('--train_distributed',type=str2bool,default=None, help='Use distributed training')
parser.add_argument('--gpus_per_node',default=None, type=int,help='Whether or not to save discriminators')
parser.add_argument('--num_nodes',default=None, type=int,help='Whether or not to save discriminators')
parser.add_argument('--node_num',default=None, type=int,help='This nodes ID')
parser.add_argument('--epochs',default=None, type=int,help='Number of epochs to use')
parser.add_argument('--lr',default=None, type=float,help='Learning rate for the generator')
parser.add_argument('--beta_1',default=None, type=float,help='')
parser.add_argument('--beta_2',default=None, type=float,help='')
parser.add_argument('--load_from',default=None, type=str,help='Load a model to continue training')
parser.add_argument('--save_every',default=None, type=int,help='How often to save during training')
parser.add_argument('--log_every',default=None, type=int,help='How often to log during training')
parser.add_argument('--log_img',default=None, type=str2bool,help='Log img during training')
parser.add_argument('--log_CDF',default=None, type=str2bool,help='Log img during training')
parser.add_argument('--log_ssim',default=None, type=str2bool,help='Log ssim during training')
args = vars(parser.parse_args())
file_folder_path = os.path.dirname(os.path.abspath(__file__))
project_folder_path = os.path.join(file_folder_path, "..")
input_folder = os.path.join(project_folder_path, "TrainingData")
output_folder = os.path.join(project_folder_path, "Output")
save_folder = os.path.join(project_folder_path, "SavedModels")
prof = LineProfiler()
prof.enable()
if(args['load_from'] is None):
opt = Options.get_default()
for k in args.keys():
if args[k] is not None:
opt[k] = args[k]
model = HierarchicalACORN(opt)
for model_num in range(opt['octree_depth_end'] - opt['octree_depth_start'] - 1):
model.add_model(torch.tensor([1.0], dtype=torch.float32, device=opt['device']))
else:
opt = load_options(os.path.join(save_folder, args['load_from']))
opt['device'] = args['device']
model = load_model(opt, args['device'])
for i in range(len(model.models)):
model.models[i] = model.models[i].to(opt['device'])
item = h5py.File(os.path.join(project_folder_path, opt['target_signal']), 'r')['data']
item = torch.tensor(item).unsqueeze(0)
trainer = Trainer(opt)
if(not opt['train_distributed']):
trainer.train(0, model, item)
else:
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
spawn(trainer.train, args=(model, item), nprocs=opt['gpus_per_node'])
print(prof.display())
prof.disable() | StarcoderdataPython |
8004845 | import numpy as np
class KMeans():
"""Implementation of the KMeans Algorithm"""
def __init__(self, n_cluster, max_iter=100, e=0.0001):
"""constructor
Args
----
- n_cluster(int): "k" in K-Means
- max_iter(int): Maximum number of iterations to consider before returning
- e(double): tolerance. Difference between successive distortions to define "converged"
"""
self.n_cluster = n_cluster
self.max_iter = max_iter
self.e = e
def fit(self, x):
'''Finds n_cluster clusters in the data x
We need to determine the optimal cluster memberships and centroids.
Args
----
- x (numpy array): N X D input matrix
Returns
-------
- mu(numpy.ndarray): centroids or means
- r(numpy.array): cluster membership
- iter(int): number of iterations taken to converge
Algorithm
---------
- Initialize means by picking self.n_cluster from N data points
- Update means and membership until convergence
'''
assert len(x.shape) == 2, "fit function takes 2-D numpy arrays as input"
np.random.seed(42)
N, D = x.shape
K = self.n_cluster
# Initialize muk as random datapoints in x.
point_idx = np.arange(N)
np.random.shuffle(point_idx)
cluster_centers = point_idx[:K] # K x 1
mu = x[cluster_centers, :]
# Initializa cost
J = np.inf
for i in range(self.max_iter):
# Compute r
r = np.zeros(N)
dist = np.zeros((N, K))
for n in range(N):
for k in range(K):
dist[n, k] = np.inner(mu[k,:]-x[n,:], mu[k,:]-x[n,:])
r = np.argmin(dist, axis=1)
J_new = 0
for n in range(N):
J_new += dist[n,r[n]]
J_new /= N # Just computed the "average" distortion to speed up process
#print("Iteration [",i,"]: J = ", J ," ; Diff = ", np.absolute(J - J_new))
print("Iteration [",i,"]: J = ", J)
if np.absolute(J - J_new) <= self.e:
return (mu, r, i)
J = J_new
for k in range(K):
k_idx_samples, = np.where(r == k)
mu[k] = np.sum(x[k_idx_samples, :], axis=0) / len(k_idx_samples)
print("Did not converge!")
return (mu, r, self.max_iter)
| StarcoderdataPython |
6628221 | import unittest
from guiengine import *
def initpygame():
pygame.init()
pygame.display.set_mode((800, 600))
class TestEventBus(unittest.TestCase):
def gen_cb(self, number):
def cb(data):
if data == 1:
self.called[number] = True
return cb
def setUp(self):
self.bus = EventBus()
self.called = {}
def tearDown(self):
EventBus.active(None)
def test_emission(self):
self.bus.on('event_1', self.gen_cb(1))
self.bus.on('event_1', self.gen_cb(2))
self.bus.on('event_2', self.gen_cb(3))
self.bus.emit('event_1', 1)
self.bus.emit('event_2', 1)
self.assertEqual(self.called.get(1), True)
self.assertEqual(self.called.get(2), True)
self.assertEqual(self.called.get(3), True)
def test_disable(self):
cb1 = self.gen_cb(1)
cb2 = self.gen_cb(2)
cb3 = self.gen_cb(3)
self.bus.on('event_1', cb1)
self.bus.on('event_1', cb2)
self.bus.on('event_1', cb3)
self.bus.disable('event_1', cb2)
self.bus.emit('event_1', 1)
self.assertEqual(self.called.get(1), True)
self.assertEqual(self.called.get(2), None)
self.assertEqual(self.called.get(3), True)
self.called = {}
self.bus.disable('event_1')
self.bus.emit('event_1', 1)
self.assertEqual(self.called.get(1), None)
self.assertEqual(self.called.get(2), None)
self.assertEqual(self.called.get(3), None)
def test_disable_all(self):
self.bus.on('event_1', self.gen_cb(1))
self.bus.on('event_1', self.gen_cb(2))
self.bus.on('event_2', self.gen_cb(3))
self.bus.disable_all()
self.bus.emit('event_1', 1)
self.bus.emit('event_2', 1)
self.assertEqual(self.called.get(1), None)
self.assertEqual(self.called.get(2), None)
self.assertEqual(self.called.get(2), None)
def test_active(self):
inst = EventBus()
# EventBus.active(None)
# self.assertIsNone(EventBus.active())
EventBus.active(inst)
self.assertEqual(EventBus.active(), inst)
class TestBusProxy(unittest.TestCase):
def setUp(self):
self.bus = EventBus()
EventBus.active(self.bus)
self.proxy = BusProxy()
self.called = {}
def gen_cb(self, number):
def cb(data):
if data == 1:
self.called[number] = True
return cb
def test_emission(self):
self.bus.on('event_1', self.gen_cb(1))
self.proxy.on('event_1', self.gen_cb(2))
self.proxy.emit('event_1', 1)
self.assertTrue(self.called.get(1))
self.assertTrue(self.called.get(2))
def test_disable(self):
cb0 = self.gen_cb(1)
cb1 = self.gen_cb(2)
cb2 = self.gen_cb(3)
cb3 = self.gen_cb(4)
self.proxy.on('event_1', cb0)
self.proxy.on('event_1', cb3)
self.proxy.on('event_2', cb1)
self.bus.on('event_1', cb2)
self.proxy.disable('event_1', cb0)
self.proxy.emit('event_1', 1)
self.proxy.emit('event_2', 1)
self.assertIsNone(self.called.get(1))
self.assertTrue(self.called.get(2))
self.assertTrue(self.called.get(3))
self.assertTrue(self.called.get(4))
class TestOuterBus(unittest.TestCase):
class Listeners:
def __init__(self, outer):
"""
:type outer TestOuterBus
"""
self.outer = outer
self.moved = False
self.button_down = False
self.button_up = False
def onmousemove(self, data):
self.moved = True
self.outer.assertEqual(data, (50, 50))
def onbuttondown(self, data):
self.button_down = True
self.outer.assertEqual(data, (40, 60))
def onbuttonup(self, data):
self.button_up = True
self.outer.assertEqual(data, (40, 60))
def setUp(self):
self.outer_bus = OuterBus()
self.mousemove = pygame.event.Event(pygame.MOUSEMOTION, {
'pos': (50, 50),
'rel': (-10, 30),
'buttons': (False, False, False)
})
self.buttondown = pygame.event.Event(pygame.MOUSEBUTTONDOWN, {
'button': 1,
'pos': (40, 60)
})
self.buttonup = pygame.event.Event(pygame.MOUSEBUTTONUP, {
'button': 1,
'pos': (40, 60)
})
self.listeners = self.Listeners(self)
def _launch(self, listen_on_bus):
pygame.event.post(self.mousemove)
pygame.event.post(self.buttondown)
pygame.event.post(self.buttonup)
listen_on_bus.on(Event.MOUSEMOVE, self.listeners.onmousemove)
listen_on_bus.on(Event.MOUSEUP, self.listeners.onbuttonup)
listen_on_bus.on(Event.MOUSEDOWN, self.listeners.onbuttondown)
def test_emit_refresh(self):
self._launch(self.outer_bus)
self.outer_bus.refresh()
self.assertTrue(self.listeners.moved)
self.assertTrue(self.listeners.button_up)
self.assertTrue(self.listeners.button_down)
def test_redirect(self):
bus = EventBus()
self.outer_bus.redirect(bus)
self._launch(bus)
self.outer_bus.refresh()
self.assertTrue(self.listeners.moved)
self.assertTrue(self.listeners.button_up)
self.assertTrue(self.listeners.button_down)
class TestMouseAware(unittest.TestCase):
"""
TODO: Testar MouseAware... ou não...
"""
def test_nothing(self):
pass
class TestResourceBank(unittest.TestCase):
def setUp(self):
initpygame()
self.bank = ResourceBank.instance()
self.paths = ['resources/Cburnett V2 improved/PNGs/square brown dark_png.png',
'resources/Cburnett V2 improved/PNGs/square brown light_png.png']
def test_instance(self):
self.assertEqual(self.bank, ResourceBank.instance())
def test_image(self):
self.assertIsInstance(self.bank.image(self.paths[0]), pygame.Surface)
def test_sound(self):
sound = self.bank.sound('Music/Music.ogg')
self.assertIsInstance(sound, pygame.mixer.Sound)
def test_font(self):
font = self.bank.font(None, 12)
self.assertIsInstance(font, pygame.font.Font)
def test_caching(self):
self.assertEqual(self.bank.image(self.paths[0]), self.bank.image(self.paths[0]))
self.assertNotEqual(self.bank.image(self.paths[1]),
self.bank.image(self.paths[1], cached=False))
class TestImage(unittest.TestCase):
def setUp(self):
initpygame()
self.image = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png')
def test_scale(self):
old_w = self.image.width
old_h = self.image.height
new_rf = self.image.scale(2)
self.assertEqual(self.image.width, old_w * 2)
self.assertEqual(self.image.height, old_h * 2)
self.assertEqual(new_rf, self.image)
class TestText(unittest.TestCase):
def setUp(self):
self.c1 = 'Hola'
self.c2 = 'Adios'
self.txt = Text(self.c1, 12, None, (0, 0, 0), (255, 255, 255))
def test_to_surface(self):
# Deve retornar a mesma várias vezes, a não ser quando conteúdo ou cor mudar
s1 = self.txt.to_surface()
s2 = self.txt.to_surface()
self.txt.content(self.c2)
s3 = self.txt.to_surface()
s4 = self.txt.to_surface()
self.txt.color((0, 0, 255))
s5 = self.txt.to_surface()
s6 = self.txt.to_surface()
self.assertIs(s1, s2)
self.assertIsNot(s1, s3)
self.assertIs(s3, s4)
self.assertIsNot(s3, s5)
self.assertIs(s5, s6)
class TestRootDrawContext(unittest.TestCase):
def setUp(self):
self.ctx = RootDrawContext(Surface((500, 500)))
self.img = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png') \
.scale(1 / 10)
def test_blit(self):
rect = self.ctx.blit(self.img, (30, 30))
self.assertEqual(rect.x, 30)
self.assertEqual(rect.y, 30)
class TestDrawContext(unittest.TestCase):
def setUp(self):
initpygame()
self.root = RootDrawContext(Surface((500, 500)))
self.img = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png') \
.scale(1 / 10)
def test_sub_blit(self):
sub = self.root.sub((40, 40)).sub((60, 60))
rect = sub.blit(self.img, (50, 50))
self.assertEqual(rect.x, 40 + 60 + 50)
self.assertEqual(rect.y, 40 + 60 + 50)
class TestSound(unittest.TestCase):
def setUp(self):
self.sound = Sound('Music/Music.ogg')
def test_not_throws(self):
self.sound.play(-1).stop().play(0).play(0).play(3).stop()
class TestEmptySound(unittest.TestCase):
def test_nothing(self):
EmptySound().play(-1).play(0).stop().play(2).play(3).stop()
class TestRenderizable(unittest.TestCase):
def setUp(self):
self.ren = Renderizable((10, 20))
def test_bounds(self):
self.assertEqual(self.ren.bounds.x, 10)
self.assertEqual(self.ren.bounds.y, 20)
self.assertEqual(self.ren.bounds.width, 0)
self.assertEqual(self.ren.bounds.height, 0)
def test_bus(self):
self.assertIsInstance(self.ren._bus, BusProxy)
class TestFigureNode(unittest.TestCase):
class MockDrawContext(DrawContext):
def __init__(self):
self.blitted = False
def blit(self, imagelike, xy):
if xy == (10, 20):
self.blitted = True
def setUp(self):
self.fig = FigureNode((10, 20), Image(
'resources/Cburnett V2 improved/PNGs/square brown dark_png.png'
).scale(1 / 10))
def test_update_render(self):
mock = self.MockDrawContext()
self.fig.update_render(mock, 0.01)
self.assertTrue(mock.blitted)
class TestLayer(unittest.TestCase):
class MockNode(Renderizable):
def __init__(self, bounds):
super().__init__(bounds.topleft)
self.logic = False
self.render = False
self.destroyed = False
self.bounds = bounds
def update_logic(self, dt):
self.logic = True
def update_render(self, draw_context: DrawContext, dt):
self.render = True
def destroy(self):
self.destroyed = True
def setUp(self):
self.layer = Layer((10, 10))
self.c1 = TestLayer.MockNode(Rect((10, 10), (30, 40)))
self.c2 = TestLayer.MockNode(Rect((20, 10), (30, 40)))
self.layer._add_child(self.c1)
self.layer._add_child(self.c2)
def test_update_logic(self):
self.layer.update_logic(0.01)
self.assertTrue(self.c1.logic)
self.assertTrue(self.c2.logic)
def test_update_render(self):
self.layer.update_render(RootDrawContext(Surface((10, 10))), 0.01)
self.assertTrue(self.c1.render)
self.assertTrue(self.c2.render)
self.assertEqual(self.layer.bounds, Rect((10, 10), (40, 40)))
def test_remove_child(self):
self.layer._remove_child(self.c2)
self.layer.update_logic(0.01)
self.assertTrue(self.c1.logic)
self.assertFalse(self.c2.logic)
def test_destroy(self):
self.layer.destroy()
self.assertTrue(self.c1.destroyed)
self.assertTrue(self.c2.destroyed)
class TestScene(unittest.TestCase):
def setUp(self):
self.scene = Scene()
def test_bgm(self):
sound = Sound('Music/Music.ogg')
self.scene._bgm(sound)
class TestSceneManager(unittest.TestCase):
class MockDrawContext(DrawContext):
def __init__(self):
pass
def sub(self, origin):
pass
def blit(self, imagelike, xy: tuple):
pass
def circle(self, center, radius):
pass
def line(self, xy1, xy2):
pass
def fill(self, color):
pass
class MockScene(Scene):
def __init__(self, outer):
super().__init__()
self.outer = outer
def update_logic(self, dt):
self.outer.logic += 1
# Mesmo com esse evento, update_render ainda será chamado
# porque a troca de cenas é "lazy" (só acontece quando dou tick)
self._bus.emit(Event.SCENE_CHANGE, lambda: self.outer.second_scene)
def update_render(self, draw_context: DrawContext, dt):
self.outer.render += 1
def destroy(self):
self.outer.destroyed = True
class SecondMockScene(Scene):
def __init__(self, outer):
self.outer = outer
def update_logic(self, dt):
self.outer.logic -= 1
def update_render(self, draw_context: DrawContext, dt):
self.outer.render -= 1
def setUp(self):
self.ctx = self.MockDrawContext()
self.bus = EventBus()
EventBus.active(self.bus)
self.scene = self.MockScene(self)
self.second_scene = self.SecondMockScene(self)
self.mgr = SceneManager(self.ctx, self.bus, lambda: self.scene)
self.logic = 0
self.render = 0
# Marca quando a MockScene é destruída, momento no qual
# a SecondMockScene deve substituí-la
self.destroyed = False
def test_tick(self):
self.mgr.tick(0.01)
self.assertEqual(self.logic, 1)
self.assertEqual(self.render, 1)
self.mgr.tick(0.01)
self.assertTrue(self.destroyed)
self.assertEqual(self.logic, 0)
self.assertEqual(self.render, 0)
class TestGameObject(unittest.TestCase):
class MockScene(Scene):
def __init__(self):
super().__init__()
self.cycles = {
'logic': 0,
'render': 0
}
def update_render(self, draw_context: DrawContext, dt):
self.cycles['render'] += 1
def update_logic(self, dt):
self._bus.emit(Event.REQ_ANIM_FRAME)
self.cycles['logic'] += 1
if self.cycles['logic'] == 100:
self._bus.emit(Event.QUIT, None)
class MockDisplay(Display):
def __init__(self):
self.flipped = 0
def draw_context(self):
return TestSceneManager.MockDrawContext()
def resolution(self, width, height):
pass
def flip(self):
self.flipped += 1
def create_scene(self):
self.scene = self.MockScene()
return self.scene
def setUp(self):
self.display = self.MockDisplay()
# Atenção aqui ! Não posso instanciar uma Scene antes de GameObject,
# porque este define um bus, enquanto a outra pede um bus
self.game_object = GameObject(self.display, self.create_scene)
def test_gameloop(self):
self.game_object.gameloop()
self.assertEqual(self.scene.cycles, {
'logic': 100,
'render': 100
})
self.assertEqual(self.display.flipped, 100)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5135925 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
host = ''
port = 6666
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, port))
sock.listen(5)
while True:
conn, addr = sock.accept()
while True:
data = conn.recv(3)
print data
if not data:
break
conn.close()
| StarcoderdataPython |
370462 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-28 17:56
from __future__ import unicode_literals
from django.db import migrations
import taggit_autosuggest.managers
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_post_slug'),
]
operations = [
migrations.AlterField(
model_name='post',
name='tags',
field=taggit_autosuggest.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
| StarcoderdataPython |
187013 | from . import views
from django.urls import path,include
app_name = "book"
urlpatterns = [
path('', views.index, name = 'homepage'),
path('add',views.create_book,name = "book-add"),
path('register',views.register,name = 'register'),
path('login',views.login_user, name = 'login'),
path('logout',views.logout_user, name = 'logout'),
path('<int:book_id>/borrow',views.BorrowBook ,name = 'borrow'),
path('<int:book_id>/return', views.returnBook, name = 'return'),
path('<int:user_id>/profile',views.userProfile, name = 'profile'),
path('devoterlist',views.devoterlist,name = 'devoter')
]
| StarcoderdataPython |
8189675 | from unittest.mock import patch
import pytest
from prediction_techniques.neural_networks import mlp
from gamestonk_terminal import load
@pytest.fixture
def stock():
def load_stock(name, start='2020-06-04'):
return load(['-t', name, '-s', start], '', '', '', '')
return load_stock
@pytest.mark.e2e
def test_mlp(stock):
with patch('sys.stdout.write') as mock_out:
tesla = stock('TSLA')
mlp([], tesla[0], tesla[2], tesla[3])
mock_out.assert_any_call('Predicted share price:')
| StarcoderdataPython |
6409535 | <reponame>BlazingMammothGames/MammothBlenderTools<gh_stars>1-10
import bpy
from bpy.props import *
from bpy_extras.io_utils import ExportHelper
import json
import base64
import struct
import bmesh
import zlib
import os
# adapted from https://github.com/Kupoman/blendergltf
# helper class for dealing with vertices
class Vertex:
__slots__ = ['position', 'normal', 'uv', 'colour', 'index', 'bone_indices', 'bone_weights']
def __init__(self, vertex):
self.position = vertex.co
self.normal = vertex.normal
self.uv = None
self.colour = None
self.index = vertex.index
# bone data!
# note: limit each vertex to being attached to 4 bones at a time
# (using the most influential 4 bones)
groups = sorted(vertex.groups, key=lambda group: group.weight, reverse=True)
if len(groups) > 4:
groups = groups[:4]
self.bone_indices = [group.group for group in groups]
self.bone_weights = [group.weight for group in groups]
if len(self.bone_weights) < 4:
for _ in range(len(self.bone_weights), 4):
self.bone_weights.append(0.0)
self.bone_indices.append(0)
class MammothExporter(bpy.types.Operator, ExportHelper):
bl_idname = "export_mammoth_scene.json"
bl_label = "Export Mammoth"
filename_ext = ".json"
filter_glob = StringProperty(default="*.json", options={'HIDDEN'})
check_extension = True
# TODO: put export options as properties here
pretty_print = BoolProperty(name='Pretty Print', default=True)
pack_images = BoolProperty(name='Pack Images', default=False)
#apply_mesh_modifiers = BoolProperty(name='Apply Mesh Modifiers', default=True) # TODO: figure out modifiers!
def toGLMatrix(self, matrix):
return [i for col in matrix.col for i in col]
def execute(self, context):
# collect all the file data
file_data = {
'actions': list(bpy.data.actions),
'cameras': list(bpy.data.cameras),
'lights': list(bpy.data.lamps),
'images': list(bpy.data.images),
'materials': list(bpy.data.materials),
'meshes': list(bpy.data.meshes),
'objects': list(bpy.data.objects),
'scenes': list(bpy.data.scenes),
'textures': list(bpy.data.textures),
'armatures': list(bpy.data.armatures)
}
# convert our scene into JSON
data = self.process(file_data)
# and save it to file!
with open(self.filepath, 'w') as fout:
indent = None
if self.pretty_print:
indent = 4
txtVersion = json.JSONEncoder(indent=indent, sort_keys=True, check_circular=False).encode(data)
fout.write(txtVersion)
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
self.report({'INFO'}, '[Mammoth] saved %s! (%s)' % (os.path.basename(self.filepath), sizeof_fmt(len(txtVersion))))
if self.pretty_print:
fout.write('\n')
return {'FINISHED'}
def process(self, file_data):
#import sys
#mod_version = sys.modules['mammoth_blender_tools'].bl_info.get('version')
#mod_version_string = '.'.join(str(v) for v in mod_version)
mod_version_string = '0.0.11' # TODO: automatic version string?
data = {
'meta': {
'file': bpy.path.clean_name(bpy.path.basename(bpy.data.filepath)),
'blender': bpy.app.version_string,
'exporter_version': mod_version_string,
},
'objects': self.export_objects(file_data),
'meshes': self.export_meshes(file_data),
'lights': self.export_lights(file_data),
'cameras': self.export_cameras(file_data),
'shaders': self.export_materials(file_data),
'textures': self.export_textures(file_data),
'images': self.export_images(file_data),
'armatures': self.export_armatures(file_data)
}
return data
def export_objects(self, file_data):
def export_object(obj):
# first get our attached components
components = {}
for key, attributes in bpy.mammothComponentsLayout.items():
comp = getattr(obj, "mammoth_component_%s" % key)
if comp.internal___active:
components[key] = {}
for attribute in attributes:
# TODO: more attribute types
if attribute['type'] == 'int' or \
attribute['type'] == 'float' or \
attribute['type'] == 'bool' or \
attribute['type'] == 'string':
components[key][attribute['name']] = getattr(comp, attribute['name'])
elif attribute['type'] == 'ivec2' or \
attribute['type'] == 'ivec3' or \
attribute['type'] == 'ivec4' or \
attribute['type'] == 'vec2' or \
attribute['type'] == 'vec3' or \
attribute['type'] == 'vec4' or \
attribute['type'] == 'colour':
components[key][attribute['name']] = [i for i in getattr(comp, attribute['name'])]
else:
raise TypeError('Unsupported Mammoth attribute type \'%s\' for %s on %s' % (attribute['type'], attribute['name'], key))
def sort_quat(quat):
q = [i for i in quat]
return [q[1], q[2], q[3], q[0]]
# now build the dictionary
node = {
'name': obj.name
}
if obj.mammoth_use_transform:
oldMode = obj.rotation_mode
obj.rotation_mode = 'QUATERNION'
node['transform'] = {
'translation': [i for i in obj.location],
'rotation': sort_quat(obj.rotation_quaternion),
'scale': [i for i in obj.scale]
}
obj.rotation_mode = oldMode
if obj.children is not None and len(obj.children) > 0:
node['children'] = [export_object(child) for child in obj.children]
if components is not None and len(components) > 0:
node['components'] = components
if obj.type == 'MESH':
node['render'] = { 'mesh': obj.data.name }
if len(obj.material_slots) > 0 and obj.material_slots[0].material is not None:
node['render']['shader'] = obj.material_slots[0].material.name
armature = obj.find_armature()
if armature:
bind_shape_mat = obj.matrix_world * armature.matrix_world.inverted()
node['skin'] = {
'bindShapeMatrix': self.toGLMatrix(bind_shape_mat),
'armature': armature.data.name,
'bones': [group.name for group in obj.vertex_groups]
}
elif obj.type == 'EMPTY':
pass
elif obj.type == 'CAMERA':
node['camera'] = obj.data.name
elif obj.type == 'LAMP':
node['light'] = obj.data.name
elif obj.type == 'ARMATURE':
node['armature'] = obj.data.name
else:
raise TypeError('Unsupported object type \'%s\' (%s)' % (obj.type, obj.name))
return node
# export each _root_ object (only objects without parents)
objects = list(file_data.get('objects', []))
return [export_object(obj) for obj in objects if obj.parent is None]
def export_meshes(self, file_data):
# TODO: include vertex skinning data!
def export_mesh(src_mesh):
self.report({'INFO'}, 'exporting mesh: %s' % src_mesh.name)
me = {
'name': src_mesh.name
}
# determine if the mesh is a skin or not
is_skinned = False
for obj in list(file_data.get('objects', [])):
if obj.type == 'MESH' and obj.data == src_mesh and obj.find_armature():
is_skinned = True
break
# triangulate the mesh
bm = bmesh.new()
bm.from_mesh(src_mesh)
bmesh.ops.triangulate(bm, faces=bm.faces[:], quad_method=0, ngon_method=0)
mesh = bpy.data.meshes.new(src_mesh.name)
bm.to_mesh(mesh)
bm.free()
# prep the mesh for export
mesh.calc_normals_split()
mesh.calc_tessface()
# how many bytes per vertex?
# position + normal + uv + colours + bone indices + bone weights
num_bone_indices = 4 if is_skinned else 0
num_bone_weights = 4 if is_skinned else 0
vertexSize = (3 + 3 + (len(mesh.uv_layers) * 2) + (len(mesh.vertex_colors) * 3) + num_bone_indices + num_bone_weights) * 4
# extract the vertices
#vertices = [Vertex(mesh, loop) for loop in mesh.loops]
vertices = [Vertex(vertex) for vertex in mesh.vertices]
# add UV data
if len(mesh.uv_layers) > 0:
for loop in mesh.loops:
vertices[loop.vertex_index].uv = mesh.uv_layers[0].data[loop.index].uv
# add colour data
if len(mesh.vertex_colors) > 0:
for loop in mesh.loops:
vertices[loop.vertex_index].colour = mesh.vertex_colors[0].data[loop.index].color
vData = bytearray(vertexSize * len(vertices))
i = 0
for vertex in vertices:
struct.pack_into('ffffff', vData, i,
vertex.position[0], vertex.position[1], vertex.position[2],
vertex.normal[0], vertex.normal[1], vertex.normal[2]
)
i += struct.calcsize('ffffff')
if vertex.uv is not None:
struct.pack_into('ff', vData, i, vertex.uv[0], vertex.uv[1])
i += struct.calcsize('ff')
if vertex.colour is not None:
struct.pack_into('fff', vData, i, vertex.colour[0], vertex.colour[1], vertex.colour[2])
i += struct.calcsize('fff')
if is_skinned:
struct.pack_into('IIIIffff', vData, i,
vertex.bone_indices[0], vertex.bone_indices[1], vertex.bone_indices[2], vertex.bone_indices[3],
vertex.bone_weights[0], vertex.bone_weights[1], vertex.bone_weights[2], vertex.bone_weights[3])
i += struct.calcsize('IIIIffff')
# base-64 encode them
me['vertices'] = 'data:text/plain;base64,' + base64.b64encode(vData).decode('ascii')
#self.report({'INFO'}, '[Mammoth] Encoded %d vertices into %d bytes (%d base-64)' % (len(vertices), len(vData), len(me['vertices'])))
#self.report({'INFO'}, '; '.join(', '.join(str(p) for p in e.position) for e in vertices))
# record how the vertices are laid out
vertexDescription = ['position', 'normal']
if len(mesh.uv_layers) > 0:
vertexDescription.append('uv')
if len(mesh.vertex_colors) > 0:
vertexDescription.append('colour')
if is_skinned:
vertexDescription.append('bone_indices')
vertexDescription.append('bone_weights')
me['vlayout'] = vertexDescription
# add the indices
triangles = [face.vertices for face in mesh.polygons]
i = 0
iData = bytearray(struct.calcsize('iii') * len(triangles))
for triangle in triangles:
struct.pack_into('iii', iData, i, triangle[0], triangle[1], triangle[2])
i += struct.calcsize('iii')
# base-64 encode the indices
me['indices'] = 'data:text/plain;base64,' + base64.b64encode(iData).decode('ascii')
#self.report({'INFO'}, '[Mammoth] Encoded %d vertex indices into %d bytes (%d base-64)' % (len(triangles) * 3, len(iData), len(me['indices'])))
#self.report({'INFO'}, '; '.join(', '.join(str(i) for i in t) for t in triangles))
# destroy our temporary mesh
bpy.data.meshes.remove(mesh, do_unlink=True)
return me
# apply modifiers
#meshes = []
# TODO: figure out how to do this with multiple objects having modifiers
# (mostly point objects to the correct modified mesh)
#if self.apply_mesh_modifiers:
# # figure out all objects / meshes that have mods on them
# scene = bpy.context.scene
# modified_objects = [obj for obj in file_data.get('objects', []) if obj.is_modified(scene, 'PREVIEW')]
# for mesh in list(file_data.get('meshes', [])):
# mod_users = [obj for obj in modified_objects if obj.data == mesh]
#
# # only convert meshes with modifiers, otherwise each non-modifier
# # user ends up with a copy of the mesh and we lose instancing
# #meshes.extend([obj.to_mesh(scene, True, 'PREVIEW') for obj in mod_users])
# i = 0
# for obj in mod_users:
# mod_mesh = obj.to_mesh(scene, True, 'PREVIEW')
# mod_mesh.name = '%s.mod.%d' % (mesh.name, i)
# meshes.append(mod_mesh)
# i += 1
#
# # include meshes that don't have any mods on them
# if len(mod_users) < mesh.users:
# meshes.append(mesh)
#else:
# meshes = list(file_data.get('meshes', []))
meshes = list(file_data.get('meshes', []))
return [export_mesh(mesh) for mesh in meshes if mesh.users > 0]
def export_lights(self, file_data):
def export_light(light):
lit = {
'name': light.name,
'colour': (light.color * light.energy)[:]
}
if light.type == 'SUN':
lit['type'] = 'directional'
elif light.type == 'HEMI':
lit['type'] = 'hemi'
elif light.type == 'POINT':
lit['type'] = 'point'
lit['distance'] = light.distance
elif light.type == 'SPOT':
lit['type'] = 'spot'
lit['distance'] = light.distance
lit['angle'] = light.spot_size
lit['angleBlend'] = light.spot_blend
else:
raise TypeError('Unsupported light type \'%s\' (%s)' % (light.type, light.name))
return lit
lights = list(file_data.get('lights'))
return [export_light(light) for light in lights if light.users > 0]
def export_cameras(self, file_data):
def export_camera(camera):
scene0 = list(file_data.get('scenes', []))[0]
cam = {
'name': camera.name,
'near': camera.clip_start,
'far': camera.clip_end,
'clearColour': list(scene0.world.horizon_color[:]),
'order': camera.mammoth_render_order,
'clearFlags': camera.mammoth_clear_flags,
'viewport': {
'min': list(camera.mammoth_viewport_min[:]),
'max': list(camera.mammoth_viewport_max[:])
}
}
if camera.type == 'ORTHO':
cam['type'] = 'orthographic'
cam['ortho_size'] = camera.ortho_scale
elif camera.type == 'PERSP':
cam['type'] = 'perspective'
cam['fov'] = camera.angle_y
cam['aspect'] = camera.angle_x / camera.angle_y
else:
raise TypeError('Unsupported camera type \'%s\' (%s)' % (camera.type, camera.name))
return cam
cameras = list(file_data.get('cameras', []))
return [export_camera(cam) for cam in cameras if cam.users > 0]
def export_materials(self, file_data):
scene0 = list(file_data.get('scenes', []))[0]
def export_material(material):
mat = {
'name': material.name,
'textures': []
}
if not material.use_shadeless:
if material.diffuse_shader != 'LAMBERT':
raise TypeError('Unsupported material shader \'%s\' (material %s)', (material.diffuse_shader, material.name))
if material.specular_shader != 'COOKTORR' and material.specular_shader != 'PHONG':
raise TypeError('Unsupported material shader \'%s\' (material %s)', (material.diffuse_shader, material.name))
if material.use_shadeless:
mat['unlit'] = {
'colour': list((material.diffuse_color * material.diffuse_intensity)[:])
}
elif material.specular_intensity == 0.0:
mat['diffuse'] = {
'ambient': list((scene0.world.ambient_color * material.ambient)[:]) + [1.0],
'diffuse': list((material.diffuse_color * material.diffuse_intensity)[:])
}
else:
mat['specular'] = {
'ambient': list((scene0.world.ambient_color * material.ambient)[:]) + [1.0],
'diffuse': list((material.diffuse_color * material.diffuse_intensity)[:]),
'specular': list((material.specular_color * material.specular_intensity)[:]),
'shininess': float(material.specular_hardness) / 512.0
}
textures = [texture for texture in material.texture_slots if texture and texture.texture.type == 'IMAGE']
diffuseTextures = [t.texture.name for t in textures if t.use_map_color_diffuse]
if diffuseTextures and len(diffuseTextures) > 0:
#mat['textures']['diffuse'] = diffuseTextures
mat['textures'].extend(diffuseTextures)
return mat
materials = list(file_data.get('materials', []))
return [export_material(material) for material in materials if material.users > 0]
def export_textures(self, file_data):
def export_texture(texture):
tex = {
'name': texture.name,
'type': texture.type.lower()
}
if type(texture) is bpy.types.ImageTexture:
tex['image'] = {
'name': texture.image.name,
'wrap': 'repeat' if texture.extension == 'REPEAT' else 'clamp',
'filter': 'bilinear' if texture.use_interpolation else 'point'
}
return tex
textures = list(file_data.get('textures', []))
return [export_texture(texture) for texture in textures if texture.users > 0]
def export_images(self, file_data):
def image_to_png_uri(image, asBytes=False):
width = image.size[0]
height = image.size[1]
buf = bytearray([int(p * 255) for p in image.pixels])
# reverse the vertical line order and add null bytes at the start
width_byte_4 = width * 4
raw_data = b''.join(b'\x00' + buf[span:span + width_byte_4] for span in range((height - 1) * width_byte_4, -1, - width_byte_4))
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) +
chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
png_bytes = b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
if asBytes:
return png_bytes
else:
return 'data:image/png;base64,' + base64.b64encode(png_bytes).decode('ascii')
def export_image(image):
im = {
'name': image.name,
'width': image.size[0],
'height': image.size[1]
}
if self.pack_images or image.packed_file is not None:
im['uri'] = image_to_png_uri(image)
else:
im['uri'] = image.filepath.replace('\\', '/')
return im
images = list(file_data.get('images', []))
return [export_image(image) for image in images if image.users > 0]
def export_armatures(self, file_data):
def export_armature(armature):
def export_bone(bone):
matrix = bone.matrix_local
# TODO: should this be applied or not, given we're stored the parent->child structure
#if bone.parent:
# matrix = bone.parent.matrix_local.inverted() * matrix
return {
'name': bone.name,
'matrix': self.toGLMatrix(matrix),
'children': [export_bone(child) for child in bone.children]
}
return {
'name': armature.name,
'bones': [export_bone(bone) for bone in armature.bones if bone.parent is None]
}
armatures = list(file_data.get('armatures', []))
return [export_armature(armature) for armature in armatures if armature.users > 0]
| StarcoderdataPython |
11259850 | """Tests for the Somfy config flow."""
import asyncio
import logging
import time
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.config import async_process_ha_core_config
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.network import NoURLAvailableError
from tests.async_mock import patch
from tests.common import MockConfigEntry, mock_platform
TEST_DOMAIN = "oauth2_test"
CLIENT_SECRET = "5678"
CLIENT_ID = "1234"
REFRESH_TOKEN = "<PASSWORD>"
ACCESS_TOKEN_1 = "mock-access-token-1"
ACCESS_TOKEN_2 = "<PASSWORD>-access-token-2"
AUTHORIZE_URL = "https://example.como/auth/authorize"
TOKEN_URL = "https://example.como/auth/token"
@pytest.fixture
async def local_impl(hass):
"""Local implementation."""
assert await setup.async_setup_component(hass, "http", {})
return config_entry_oauth2_flow.LocalOAuth2Implementation(
hass, TEST_DOMAIN, CLIENT_ID, CLIENT_SECRET, AUTHORIZE_URL, TOKEN_URL
)
@pytest.fixture
def flow_handler(hass):
"""Return a registered config flow."""
mock_platform(hass, f"{TEST_DOMAIN}.config_flow")
class TestFlowHandler(config_entry_oauth2_flow.AbstractOAuth2FlowHandler):
"""Test flow handler."""
DOMAIN = TEST_DOMAIN
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {"scope": "read write"}
with patch.dict(config_entries.HANDLERS, {TEST_DOMAIN: TestFlowHandler}):
yield TestFlowHandler
class MockOAuth2Implementation(config_entry_oauth2_flow.AbstractOAuth2Implementation):
"""Mock implementation for testing."""
@property
def name(self) -> str:
"""Name of the implementation."""
return "Mock"
@property
def domain(self) -> str:
"""Domain that is providing the implementation."""
return "test"
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {"extra": "data"}
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize."""
return "http://example.com/auth"
async def async_resolve_external_data(self, external_data) -> dict:
"""Resolve external data to tokens."""
return external_data
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh a token."""
raise NotImplementedError()
def test_inherit_enforces_domain_set():
"""Test we enforce setting DOMAIN."""
class TestFlowHandler(config_entry_oauth2_flow.AbstractOAuth2FlowHandler):
"""Test flow handler."""
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
with patch.dict(config_entries.HANDLERS, {TEST_DOMAIN: TestFlowHandler}):
with pytest.raises(TypeError):
TestFlowHandler()
async def test_abort_if_no_implementation(hass, flow_handler):
"""Check flow abort when no implementations."""
flow = flow_handler()
flow.hass = hass
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "missing_configuration"
async def test_abort_if_authorization_timeout(hass, flow_handler, local_impl):
"""Check timeout generating authorization url."""
flow_handler.async_register_implementation(hass, local_impl)
flow = flow_handler()
flow.hass = hass
with patch.object(
local_impl, "async_generate_authorize_url", side_effect=asyncio.TimeoutError
):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "authorize_url_timeout"
async def test_abort_if_no_url_available(hass, flow_handler, local_impl):
"""Check no_url_available generating authorization url."""
flow_handler.async_register_implementation(hass, local_impl)
flow = flow_handler()
flow.hass = hass
with patch.object(
local_impl, "async_generate_authorize_url", side_effect=NoURLAvailableError
):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_url_available"
async def test_abort_if_oauth_error(
hass, flow_handler, local_impl, aiohttp_client, aioclient_mock, current_request
):
"""Check bad oauth token."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
flow_handler.async_register_implementation(hass, local_impl)
config_entry_oauth2_flow.async_register_implementation(
hass, TEST_DOMAIN, MockOAuth2Implementation()
)
result = await hass.config_entries.flow.async_init(
TEST_DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pick_implementation"
# Pick implementation
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"implementation": TEST_DOMAIN}
)
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["url"] == (
f"{AUTHORIZE_URL}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}&scope=read+write"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
TOKEN_URL,
json={
"refresh_token": REFRESH_TOKEN,
"access_token": ACCESS_TOKEN_1,
"type": "bearer",
"expires_in": "badnumber",
},
)
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "oauth_error"
async def test_step_discovery(hass, flow_handler, local_impl):
"""Check flow triggers from discovery."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
flow_handler.async_register_implementation(hass, local_impl)
config_entry_oauth2_flow.async_register_implementation(
hass, TEST_DOMAIN, MockOAuth2Implementation()
)
result = await hass.config_entries.flow.async_init(
TEST_DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pick_implementation"
async def test_abort_discovered_multiple(hass, flow_handler, local_impl):
"""Test if aborts when discovered multiple times."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
flow_handler.async_register_implementation(hass, local_impl)
config_entry_oauth2_flow.async_register_implementation(
hass, TEST_DOMAIN, MockOAuth2Implementation()
)
result = await hass.config_entries.flow.async_init(
TEST_DOMAIN, context={"source": config_entries.SOURCE_SSDP}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pick_implementation"
result = await hass.config_entries.flow.async_init(
TEST_DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_abort_discovered_existing_entries(hass, flow_handler, local_impl):
"""Test if abort discovery when entries exists."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
flow_handler.async_register_implementation(hass, local_impl)
config_entry_oauth2_flow.async_register_implementation(
hass, TEST_DOMAIN, MockOAuth2Implementation()
)
entry = MockConfigEntry(
domain=TEST_DOMAIN,
data={},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
TEST_DOMAIN, context={"source": config_entries.SOURCE_SSDP}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_full_flow(
hass, flow_handler, local_impl, aiohttp_client, aioclient_mock, current_request
):
"""Check full flow."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
flow_handler.async_register_implementation(hass, local_impl)
config_entry_oauth2_flow.async_register_implementation(
hass, TEST_DOMAIN, MockOAuth2Implementation()
)
result = await hass.config_entries.flow.async_init(
TEST_DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pick_implementation"
# Pick implementation
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"implementation": TEST_DOMAIN}
)
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["url"] == (
f"{AUTHORIZE_URL}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}&scope=read+write"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
TOKEN_URL,
json={
"refresh_token": REFRESH_TOKEN,
"access_token": ACCESS_TOKEN_1,
"type": "bearer",
"expires_in": 60,
},
)
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["data"]["auth_implementation"] == TEST_DOMAIN
result["data"]["token"].pop("expires_at")
assert result["data"]["token"] == {
"refresh_token": REFRESH_TOKEN,
"access_token": ACCESS_TOKEN_1,
"type": "bearer",
"expires_in": 60,
}
entry = hass.config_entries.async_entries(TEST_DOMAIN)[0]
assert (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
is local_impl
)
async def test_local_refresh_token(hass, local_impl, aioclient_mock):
"""Test we can refresh token."""
aioclient_mock.post(
TOKEN_URL, json={"access_token": ACCESS_TOKEN_2, "expires_in": 100}
)
new_tokens = await local_impl.async_refresh_token(
{
"refresh_token": REFRESH_TOKEN,
"access_token": ACCESS_TOKEN_1,
"type": "bearer",
"expires_in": 60,
}
)
new_tokens.pop("expires_at")
assert new_tokens == {
"refresh_token": REFRESH_TOKEN,
"access_token": ACCESS_TOKEN_2,
"type": "bearer",
"expires_in": 100,
}
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == {
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"grant_type": "refresh_token",
"refresh_token": REFRESH_TOKEN,
}
async def test_oauth_session(hass, flow_handler, local_impl, aioclient_mock):
"""Test the OAuth2 session helper."""
flow_handler.async_register_implementation(hass, local_impl)
aioclient_mock.post(
TOKEN_URL, json={"access_token": ACCESS_TOKEN_2, "expires_in": 100}
)
aioclient_mock.post("https://example.com", status=201)
config_entry = MockConfigEntry(
domain=TEST_DOMAIN,
data={
"auth_implementation": TEST_DOMAIN,
"token": {
"refresh_token": REFRESH_TOKEN,
"access_token": ACCESS_TOKEN_1,
"expires_in": 10,
"expires_at": 0, # Forces a refresh,
"token_type": "bearer",
"random_other_data": "should_stay",
},
},
)
now = time.time()
session = config_entry_oauth2_flow.OAuth2Session(hass, config_entry, local_impl)
resp = await session.async_request("post", "https://example.com")
assert resp.status == 201
# Refresh token, make request
assert len(aioclient_mock.mock_calls) == 2
assert (
aioclient_mock.mock_calls[1][3]["authorization"] == f"Bearer {ACCESS_TOKEN_2}"
)
assert config_entry.data["token"]["refresh_token"] == REFRESH_TOKEN
assert config_entry.data["token"]["access_token"] == ACCESS_TOKEN_2
assert config_entry.data["token"]["expires_in"] == 100
assert config_entry.data["token"]["random_other_data"] == "should_stay"
assert round(config_entry.data["token"]["expires_at"] - now) == 100
async def test_oauth_session_with_clock_slightly_out_of_sync(
hass, flow_handler, local_impl, aioclient_mock
):
"""Test the OAuth2 session helper when the remote clock is slightly out of sync."""
flow_handler.async_register_implementation(hass, local_impl)
aioclient_mock.post(
TOKEN_URL, json={"access_token": ACCESS_TOKEN_2, "expires_in": 19}
)
aioclient_mock.post("https://example.com", status=201)
config_entry = MockConfigEntry(
domain=TEST_DOMAIN,
data={
"auth_implementation": TEST_DOMAIN,
"token": {
"refresh_token": REFRESH_TOKEN,
"access_token": ACCESS_TOKEN_1,
"expires_in": 19,
"expires_at": time.time() + 19, # Forces a refresh,
"token_type": "bearer",
"random_other_data": "should_stay",
},
},
)
now = time.time()
session = config_entry_oauth2_flow.OAuth2Session(hass, config_entry, local_impl)
resp = await session.async_request("post", "https://example.com")
assert resp.status == 201
# Refresh token, make request
assert len(aioclient_mock.mock_calls) == 2
assert (
aioclient_mock.mock_calls[1][3]["authorization"] == f"Bearer {ACCESS_TOKEN_2}"
)
assert config_entry.data["token"]["refresh_token"] == REFRESH_TOKEN
assert config_entry.data["token"]["access_token"] == ACCESS_TOKEN_2
assert config_entry.data["token"]["expires_in"] == 19
assert config_entry.data["token"]["random_other_data"] == "should_stay"
assert round(config_entry.data["token"]["expires_at"] - now) == 19
async def test_oauth_session_no_token_refresh_needed(
hass, flow_handler, local_impl, aioclient_mock
):
"""Test the OAuth2 session helper when no refresh is needed."""
flow_handler.async_register_implementation(hass, local_impl)
aioclient_mock.post("https://example.com", status=201)
config_entry = MockConfigEntry(
domain=TEST_DOMAIN,
data={
"auth_implementation": TEST_DOMAIN,
"token": {
"refresh_token": REFRESH_TOKEN,
"access_token": ACCESS_TOKEN_1,
"expires_in": 500,
"expires_at": time.time() + 500, # Should NOT refresh
"token_type": "bearer",
"random_other_data": "should_stay",
},
},
)
now = time.time()
session = config_entry_oauth2_flow.OAuth2Session(hass, config_entry, local_impl)
resp = await session.async_request("post", "https://example.com")
assert resp.status == 201
# make request (no refresh)
assert len(aioclient_mock.mock_calls) == 1
assert (
aioclient_mock.mock_calls[0][3]["authorization"] == f"Bearer {ACCESS_TOKEN_1}"
)
assert config_entry.data["token"]["refresh_token"] == REFRESH_TOKEN
assert config_entry.data["token"]["access_token"] == ACCESS_TOKEN_1
assert config_entry.data["token"]["expires_in"] == 500
assert config_entry.data["token"]["random_other_data"] == "should_stay"
assert round(config_entry.data["token"]["expires_at"] - now) == 500
async def test_implementation_provider(hass, local_impl):
"""Test providing an implementation provider."""
assert (
await config_entry_oauth2_flow.async_get_implementations(hass, TEST_DOMAIN)
== {}
)
mock_domain_with_impl = "some_domain"
config_entry_oauth2_flow.async_register_implementation(
hass, mock_domain_with_impl, local_impl
)
assert await config_entry_oauth2_flow.async_get_implementations(
hass, mock_domain_with_impl
) == {TEST_DOMAIN: local_impl}
provider_source = {}
async def async_provide_implementation(hass, domain):
"""Mock implementation provider."""
return provider_source.get(domain)
config_entry_oauth2_flow.async_add_implementation_provider(
hass, "cloud", async_provide_implementation
)
assert await config_entry_oauth2_flow.async_get_implementations(
hass, mock_domain_with_impl
) == {TEST_DOMAIN: local_impl}
provider_source[
mock_domain_with_impl
] = config_entry_oauth2_flow.LocalOAuth2Implementation(
hass, "cloud", CLIENT_ID, CLIENT_SECRET, AUTHORIZE_URL, TOKEN_URL
)
assert await config_entry_oauth2_flow.async_get_implementations(
hass, mock_domain_with_impl
) == {TEST_DOMAIN: local_impl, "cloud": provider_source[mock_domain_with_impl]}
| StarcoderdataPython |
330672 | from keras.preprocessing.text import Tokenizer, one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras import layers
import numpy as np
from numpy import array
from keras.layers import Dense
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from keras.layers.embeddings import Embedding
from keras.utils import np_utils
from keras.utils import to_categorical
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import classification_report
from datetime import datetime
from sklearn.preprocessing import LabelEncoder
from joblib import dump
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
from sklearn.externals import joblib
plt.style.use('ggplot')
def plot_history(history):
accuracy = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(loss) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, accuracy, 'b', label='Training accuracy')
plt.plot(x, val_acc, 'g', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'g', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
data_today = datetime.now().strftime("_%d_%m_%Y__%H_%M")
current_directory = os.getcwd()
cnn_name = "CNN_{}.h5".format(data_today)
output_path = os.path.join(current_directory, '\\'.join(['models', cnn_name]))
log_file_name = os.path.join(current_directory, '\\'.join(['log', 'cnn_log.txt']))
tokenizer_path = os.path.join(current_directory, '\\'.join(['transformer', 'tokenizer{}.pkl'.format(data_today)]))
def create_model(num_filters, kernel_size, vocab_size, embedding_dim, maxlen, test=False):
model = Sequential()
model.add(layers.Embedding(vocab_size, embedding_dim, input_length=maxlen))
model.add(layers.Conv1D(num_filters, kernel_size, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(10, activation='relu'))
if(not test):
model.add(layers.Dense(3, activation='softmax'))
else:
model.add(layers.Dense(4, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
df_clean = "df_last.csv"
dataset = pd.read_csv(os.path.join(current_directory, '\\'.join(['df_pulito', df_clean])))
# OVERPROVISIONING
over_prov = dataset[(dataset.CLASSE == 2) | (dataset.CLASSE == 3)]
dataset = pd.concat([dataset, over_prov])
sentences = dataset['FRASE']
y = dataset['CLASSE']
S_train, S_test, y_train, y_test = train_test_split(sentences, y, test_size=0.25, random_state=42)
# Tokenize words (top 1600)
tokenizer = Tokenizer(num_words=1600)
tokenizer.fit_on_texts(S_train)
joblib.dump(tokenizer, tokenizer_path)
X_train = tokenizer.texts_to_sequences(S_train)
X_test = tokenizer.texts_to_sequences(S_test)
# Main settings
epochs = 3
maxlen = 80
batch_size = 64
embedding_dim = 100
# Pad sequences with zeros
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
# Adding 1 because of reserved 0 index
vocab_size = len(tokenizer.word_index) + 1
# Parameter grid for grid search
param_grid = dict(num_filters=[96, 128, 160, 192],
kernel_size=[5, 7, 9, 11],
vocab_size=[vocab_size],
embedding_dim=[embedding_dim, 120, 130, 140],
maxlen=[70, maxlen, 90, 100])
model = KerasClassifier(build_fn=create_model,
epochs=epochs, batch_size=batch_size,
verbose=1)
grid = RandomizedSearchCV(estimator=model, param_distributions=param_grid,
cv=4, verbose=1, n_iter=5, random_state=42)
grid_result = grid.fit(X_train, y_train)
params = grid_result.best_params_
test_model = create_model(num_filters=params['num_filters'],
kernel_size=params['kernel_size'],
vocab_size=params['vocab_size'],
embedding_dim=params['embedding_dim'],
maxlen=params['maxlen'],
test=True)
y_train = to_categorical(y_train)
history = test_model.fit(X_train, y_train,
validation_split=0.1,
epochs=epochs,
batch_size=batch_size)
# save the model
test_model.save(output_path)
plot_history(history)
y_pred = test_model.predict_classes(X_test, batch_size=batch_size)
print(classification_report(y_test, y_pred))
# save the model's parameters
file_object = open(log_file_name, 'a')
text_to_write = ('\n{}\n\n{}\n\n{}\n{}'.format(cnn_name, grid_result.best_params_, classification_report(y_test, y_pred), '*'*10))
file_object.write(text_to_write)
file_object.close()
| StarcoderdataPython |
5023164 | <reponame>ghn/django-template<filename>{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/config/settings/heroku.py
from . import get_env_variable
from .base import * # noqa
import django_heroku
django_heroku.settings(locals())
DEBUG = False
| StarcoderdataPython |
8131633 | '''
(c) University of Liverpool 2019
All rights reserved.
@author: neilswainston
'''
# pylint: disable=broad-except
# pylint: disable=invalid-name
# pylint: disable=wrong-import-order
from rdkit.Chem import inchi, rdmolfiles
from liv_ms.data import rt
import pandas as pd
def get_rt_data(filename, num_spec=1e32, regen_stats=False):
'''Get RT data.'''
# Get spectra:
df = pd.read_csv(filename, sep=';', nrows=num_spec)
# Convert RT to minutes and rename column:
df['retention time'] = df['rt'] / 60.0
# Convert InChI to SMILES:
df['smiles'] = df['inchi'].apply(_get_smiles)
df.dropna(subset=['smiles'], inplace=True)
# Add values:
df['column values'] = [[2.1, 50.0, 1.8, 1.0] for _ in df.index]
df['flow rate values'] = [[0.1] * 60 for _ in df.index]
grad_terms = [[0.0, 0.05], [3.0, 0.05],
[5.0, 0.5], [15.0, 0.85], [18.0, 0.85]]
grad_vals = rt.get_timecourse_vals(list(zip(*grad_terms)))
df['gradient values'] = [grad_vals for _ in df.index]
return rt.get_stats(df)
def _get_smiles(inchi_term):
'''Get smiles.'''
try:
mol = inchi.MolFromInchi(inchi_term, treatWarningAsError=True)
return rdmolfiles.MolToSmiles(mol)
except Exception:
return None
# get_rt_data('data/SMRT_dataset.csv',
# num_spec=10).to_csv('out.csv', index=False)
| StarcoderdataPython |
3276244 | # Generated by Django 3.1.3 on 2020-12-05 19:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0005_auto_20201203_1948'),
]
operations = [
migrations.RenameField(
model_name='branchcategory',
old_name='bc_type',
new_name='category',
),
migrations.RenameField(
model_name='category',
old_name='category',
new_name='name',
),
migrations.RenameField(
model_name='product',
old_name='p_type',
new_name='branch_type',
),
]
| StarcoderdataPython |
1887203 | <gh_stars>0
#!/usr/bin/python
"""Calculates optical properties of large hexagonal ice grains.
This file calculates the optial properties (single scattering albedo, assymetry
parameter, mass absorption coefficient and extinction, scattering and absorption cross
sections) for ice grains shaped as arbitrarily large hexagonal plates or columns.
The optical propertiesare then saved into netCDF files in the correct format for
loading into BioSNICAR.
The main function calc_optical_params() is based upon the equations of
Diedenhoven et al (2014) who provided a python script as supplementary material
for their paper. The original code can be downloaded from:
https://www.researchgate.net/publication/259821840_ice_OP_parameterization
The optical properties are calculated using a parameterization of geometric optics
calculations (Macke et al., JAS, 1996).
There are no user defined inputs for the preprocessing function, it can simply be
run as
reals, imags, wavelengths = preprocess()
The calc_optical_params() fnction takes several inputs. reals, imags and wavelengths
are output by preprocess() and side_length and depth are user defined. These are the two
parameters that control the dimensions of the ice crystals. Side_length is the length
in microns of one side of the hexagnal face of the crystal, depth is the column length
also in microns describing the z dimension. The code then calculates volume, apotherm,
aspect ratio, area etc inside the function. The optical parameters are returned.
Optional plots and printed values for the optical params are provided by setting
plots to true and the dimensions of the crystals can be reported by setting
report_dims to true in the function call.
The final function, net_cdf_updater() is used to dump the optical parameters and
metadata into a netcdf file and save it into the working directory to be used as
a lookup library for the two-stream radiative transfer model BoSNICAR_GO.
The function calls are provided at the bottom of this script in a loop, where the
user can define the range of side lengths and depths to be looped over.
NOTE: The extinction coefficient in the current implementation is 2 for all size
parameters as assumed in the conventional geometric optics approximation.
"""
import sys
sys.path.append("./src")
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
# Set paths
SAVEPATH = "./Data/GO_files/480band/"
DATAPATH = "./Data/rfidx_ice.nc"
RI_SOURCE = 2
def preprocess_RI(ri_source, path_to_ri):
"""Preprocessing of wavelength and RI data.
Preprocessing function that ensures the wavelengths and real/imaginary
parts of the refractive index for ice is provided in the correct waveband and correct
spectral resolution to interface with BioSNICAR. The refractive indices are taken
from Warren and Brandt 2008.
Grabs appropriates wavelengths, real and imaginary parts of ice
refractive index. The source of the refractive index data is
controlled by var "ri_source" where 0 = Warren 1984, 1 = Warren 2008
and 2 = Picard 2016.
These are then passed as numpy arrays to the Geometrical Optics function.
Args:
ri_source: choice of refractive index
path_to_ri: path to directory containing RI data
Returns:
reals: numpy array of real parts of RI by wavelength
imags: numpy array of imaginary parts of RI by wavelength
wavelengths: numpy array of wavelengths (um)
"""
refidx = xr.open_dataset(path_to_ri)
wavelengths = refidx["wvl"].values
if ri_source == 0:
reals = refidx["re_Wrn84"].values
imags = refidx["im_Wrn84"].values
elif ri_source == 1:
reals = refidx["re_Wrn08"].values
imags = refidx["im_Wrn08"].values
elif ri_source == 2:
reals = refidx["re_Pic16"].values
imags = refidx["im_Pic16"].values
return reals, imags, wavelengths
def calc_optical_params(
side_length,
depth,
reals,
imags,
wavelengths,
plots=False,
report_dims=False,
):
"""Calculates single scattering optical properties.
Van Diedenhoven's parameterisation is used to calculate
the single scatterign optical properties of hexagonal
ice columns of given dimensions.
Args:
side_length: length of side of hexagonal face (um)
depth: length of hexagonal column (um)
reals: numpy array of real parts of RI by wavelength
imags: numpy array of imaginary parts of RI by wavelength
wavelengths: numpy array of wavelenmgths (um)
plots: Boolean to toggle plotting OPs
report_dims: Boolean to toggle printing OP data to terminal
Returns:
g_list: assymetry parameter
ssa_list: single scattering albedo
mac_list: mass absorption coefficient
depth: length of hexagional column (um)
side_length: length of side of hexagonal face (um)
diameter: diameter across hexaginal face.
"""
ssa_list = []
g_list = []
abs_xs_list = []
mac_list = []
V = 1.5 * np.sqrt(3) * side_length**2 * depth # volume
Area_total = (
3 * side_length * (np.sqrt(3) * side_length + depth * 2)
) # total surface area
Area = Area_total / 4 # projected area
apothem = (2 * Area) / (
depth * 6
) # apothem is distance from centre point to midpoint of a side for hexagon
diameter = 2 * apothem # midpoint of one side to midpoint of opposite side
ar = depth / side_length
delta = 0.3
for i in np.arange(0, len(wavelengths), 1):
mr = reals[i]
mi = imags[i]
wl = wavelengths[i]
# ------------------------------------------------
# ---------- input tables (see Figs. 4 and 7) ----
# ------------------------------------------------
# SSA parameterization
a = [0.457593, 20.9738] # for ar=1
# SSA correction for AR != 1 (Table 2)
nc1 = 3
nc2 = 4
c_ij = np.zeros(nc1 * nc2 * 2).reshape((nc1, nc2, 2))
# ---------- Plates ----------
c_ij[:, 0, 0] = [0.000527060, 0.309748, -2.58028]
c_ij[:, 1, 0] = [0.00867596, -0.650188, -1.34949]
c_ij[:, 2, 0] = [0.0382627, -0.198214, -0.674495]
c_ij[:, 3, 0] = [0.0108558, -0.0356019, -0.141318]
# --------- Columns ----------
c_ij[:, 0, 1] = [0.000125752, 0.387729, -2.38400]
c_ij[:, 1, 1] = [0.00797282, 0.456133, 1.29446]
c_ij[:, 2, 1] = [0.00122800, -0.137621, -1.05868]
c_ij[:, 3, 1] = [0.000212673, 0.0364655, 0.339646]
# diffraction g parameterization
b_gdiffr = [-0.822315, -1.20125, 0.996653]
# raytracing g parameterization ar=1
p_a_eq_1 = [0.780550, 0.00510997, -0.0878268, 0.111549, -0.282453]
# ---- g correction for AR != 1 (Also applied to AR=1 as plate) (Table 3)
nq1 = 3
nq2 = 7
q_ij = np.zeros(nq1 * nq2 * 2).reshape((nq1, nq2, 2))
# ---------- Plates ----------
q_ij[:, 0, 0] = [-0.00133106, -0.000782076, 0.00205422]
q_ij[:, 1, 0] = [0.0408343, -0.00162734, 0.0240927]
q_ij[:, 2, 0] = [0.525289, 0.418336, -0.818352]
q_ij[:, 3, 0] = [0.443151, 1.53726, -2.40399]
q_ij[:, 4, 0] = [0.00852515, 1.88625, -2.64651]
q_ij[:, 5, 0] = [-0.123100, 0.983854, -1.29188]
q_ij[:, 6, 0] = [-0.0376917, 0.187708, -0.235359]
# ---------- Columns ----------
q_ij[:, 0, 1] = [-0.00189096, 0.000637430, 0.00157383]
q_ij[:, 1, 1] = [0.00981029, 0.0409220, 0.00908004]
q_ij[:, 2, 1] = [0.732647, 0.0539796, -0.665773]
q_ij[:, 3, 1] = [-1.59927, -0.500870, 1.86375]
q_ij[:, 4, 1] = [1.54047, 0.692547, -2.05390]
q_ij[:, 5, 1] = [-0.707187, -0.374173, 1.01287]
q_ij[:, 6, 1] = [0.125276, 0.0721572, -0.186466]
# --------- refractive index correction of asymmetry parameter
c_g = np.zeros(4).reshape(2, 2)
c_g[:, 0] = [0.96025050, 0.42918060]
c_g[:, 1] = [0.94179149, -0.21600979]
# ---- correction for absorption
s = [1.00014, 0.666094, -0.535922, -11.7454, 72.3600, -109.940]
u = [-0.213038, 0.204016]
# -------- selector for plates or columns
if ar > 1.0:
col_pla = 1 # columns
else:
col_pla = 0 # plates & compacts
# ------------------------------------------------
# ------------ Size parameters -------------------
# ------------------------------------------------
# --- absorption size parameter (Fig. 4, box 1)
Chi_abs = mi / wl * V / Area
# ----- scattering size parameter (Fig. 7, box 1)
Chi_scat = 2.0 * np.pi * np.sqrt(Area / np.pi) / wl
# ------------------------------------------------
# ------------ SINGLE SCATTERING ALBEDO ----------
# ------------------------------------------------
if Chi_abs > 0:
w_1 = 1.0 - a[0] * (
1.0 - np.exp(-Chi_abs * a[1])
) # for AR=1 (Fig. 4, box 2)
l = np.zeros(nc1)
for i in range(nc2):
l[:] += c_ij[:, i, col_pla] * np.log10(ar) ** i # (Fig. 4, box 3)
D_w = (
l[0]
* np.exp(-((np.log(Chi_abs) - l[2]) ** 2) / (2.0 * l[1] ** 2))
/ (Chi_abs * l[1] * np.sqrt(2.0 * np.pi))
) # (Fig. 4, box 3)
w = w_1 + D_w # (Fig. 4, box 4)
else:
w = 1.0
# ------------------------------------------------
# --------------- ASYMMETRY PARAMETER ------------
# ------------------------------------------------
# diffraction g
g_diffr = (
b_gdiffr[0] * np.exp(b_gdiffr[1] * np.log(Chi_scat)) + b_gdiffr[2]
) # (Fig. 7, box 2)
g_diffr = max([g_diffr, 0.5])
# raytracing g at 862 nm
g_1 = 0.0
for i in range(len(p_a_eq_1)):
g_1 += p_a_eq_1[i] * delta**i # (Fig. 7, box 3)
p_delta = np.zeros(nq1)
for i in range(nq2):
p_delta += q_ij[:, i, col_pla] * np.log10(ar) ** i # (Fig. 7, box 4)
Dg = 0.0
for i in range(nq1):
Dg += p_delta[i] * delta**i # (Fig. 7, box 4)
g_rt = 2.0 * (g_1 + Dg) - 1.0 # (Fig. 7, box 5)
# --------- refractive index correction of asymmetry parameter (Fig. 7, box 6)
epsilon = c_g[0, col_pla] + c_g[1, col_pla] * np.log10(ar)
mr1 = 1.3038 # reference value @ 862 nm band
C_m = abs(
(mr1 - epsilon) / (mr1 + epsilon) * (mr + epsilon) / (mr - epsilon)
) # abs function added according to corrigendum to the original paper
# ---- correction for absorption (Fig. 7, box 7)
if Chi_abs > 0:
C_w0 = 0.0
for i in range(len(s)):
C_w0 += s[i] * (1.0 - w) ** i
k = np.log10(ar) * u[col_pla]
C_w1 = k * w - k + 1.0
C_w = C_w0 * C_w1
else:
C_w = 1.0
# raytracing g at required wavelength
g_rt_corr = g_rt * C_m * C_w # (Fig. 7, box 9)
# ----- Calculate total asymmetry parameter and check g_tot <= 1 (Fig. 7, box 9)
g_tot = 1.0 / (2.0 * w) * ((2.0 * w - 1.0) * g_rt_corr + g_diffr)
g_tot = min([g_tot, 1.0])
absXS = Area * (1 - ((np.exp(-4 * np.pi * mi * V)) / (Area * wl)))
MAC = (
absXS / V * 914
) # divide by volume*mass to give mass absorption coefficient
ssa_list.append(w)
g_list.append(g_tot)
abs_xs_list.append(absXS)
mac_list.append(MAC)
if plots:
plt.figure(1)
plt.plot(wavelengths, ssa_list), plt.ylabel("SSA"), plt.xlabel(
"Wavelength (um)"
), plt.grid(b=None)
plt.figure(2)
plt.plot(wavelengths, g_list), plt.ylabel("Assymetry Parameter"), plt.xlabel(
"Wavelength (um)"
), plt.grid(b=None)
plt.figure(3)
plt.plot(wavelengths, mac_list), plt.ylabel(
"Mass Absorption Cross Section"
), plt.xlabel("Wavelength (um)"), plt.grid(b=None)
if report_dims:
print("Width of hexagonal plane = ", np.round(diameter / 10000, 2), " (cm)")
print("depth of hexagonal column = ", depth / 10000, " (cm)")
print("aspect ratio = ", ar)
print("ice crystal volume = ", np.round(V * 1e-12, 2), " (cm^3)")
return g_list, ssa_list, mac_list, depth, side_length, diameter
def net_cdf_updater(
ri_source, savepath, g_list, ssa_list, mac_list, depth, side_length, density
):
"""Updates a template NetCDF file with new OP data.
Args:
ri_source: chocie of refractive index file
savepath: path to save output data
g_list: asymmetry parameter
ssa_list: single scattering albedo
mac_list: mass absorption coefficient
depth: length of hexagional column (um)
side_length: length of side of hexagonal face (um)
density: density of material in kg/m3.
Returns:
None but saves NetCDF file to savepath
"""
filepath_in = savepath
mac_in = np.squeeze(mac_list)
ssa_in = np.squeeze(ssa_list)
g_in = np.squeeze(g_list)
if ri_source == 0:
stb1 = "ice_Wrn84/"
stb2 = "ice_Wrn84_"
elif ri_source == 1:
stb1 = "ice_Wrn08/"
stb2 = "ice_Wrn08_"
elif ri_source == 2:
stb1 = "ice_Pic16/"
stb2 = "ice_Pic16_"
icefile = pd.DataFrame()
icefile["asm_prm"] = g_in
icefile["ss_alb"] = ssa_in
icefile["ext_cff_mss"] = mac_in
icefile = icefile.to_xarray()
icefile.attrs["medium_type"] = "air"
icefile.attrs[
"description"
] = f"""Optical properties for ice grain: hexagonal column of side
length {side_length}um and length {depth}um"""
icefile.attrs["psd"] = "monodisperse"
icefile.attrs["side_length_um"] = depth
icefile.attrs["density_kg_m3"] = density
icefile.attrs[
"origin"
] = "Optical properties derived from geometrical optics calculations"
icefile.to_netcdf(
str(filepath_in + stb1 + stb2 + "{}_{}.nc".format(str(side_length), str(depth)))
)
return
# --------------------------------------------------------------------------------------
# FUNCTON CALLS
# --------------------------------------------------------------------------------------
# reals, imags, wavelengths = preprocess_RI(RI_SOURCE, DATAPATH)
# for side_length in np.arange(2000, 11000, 1000):
# for depth in np.arange(2000, 31000, 1000):
# (
# g_list,
# ssa_list,
# mac_list,
# depth,
# side_length,
# diameter,
# ) = calc_optical_params(
# side_length, depth, reals, imags, wavelengths, plots=False, report_dims=True
# )
# net_cdf_updater(
# RI_SOURCE, SAVEPATH, g_list, ssa_list, mac_list, depth, side_length, 917
# )
if __name__ == '__main__':
pass | StarcoderdataPython |
1613074 | import os
import mimetypes
import arrow
def datetimeformat(date_str):
"""Filtro para melhorar a exibição de informações do tipo data"""
dt = arrow.get(date_str)
return dt.humanize()
def file_type(key):
"""Filtro"""
file_info = os.path.splitext(key)
file_extension = file_info[1]
try:
return mimetypes.types_map[file_extension]
except KeyError():
return "Tipo de arquivo desconhecido." | StarcoderdataPython |
9613192 | <reponame>haifangong/TRFE-Net-for-thyroid-nodule-segmentation
import argparse
import glob
import os
import random
import socket
import time
from datetime import datetime
import numpy as np
# PyTorch includes
import torch
import torch.optim as optim
# Tensorboard include
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torchvision import transforms
# Dataloaders includes
from dataloaders import tn3k, tg3k, tatn
from dataloaders import custom_transforms as trforms
from dataloaders import utils
# Model includes
from model.deeplab_v3_plus import Deeplabv3plus
from model.fcn import FCN8s
from model.mtnet import MTNet
from model.segnet import SegNet
from model.trfe import TRFENet
from model.trfe1 import TRFENet1
from model.trfe2 import TRFENet2
from model.unet import Unet
# Loss function includes
from model.utils import soft_dice
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-gpu', type=str, default='0')
## Model settings
parser.add_argument('-model_name', type=str,
default='unet') # unet, trfe, trfe1, trfe2, mtnet, segnet, deeplab-resnet50, fcn
parser.add_argument('-criterion', type=str, default='Dice')
parser.add_argument('-pretrain', type=str, default='None') # THYROID
parser.add_argument('-num_classes', type=int, default=1)
parser.add_argument('-input_size', type=int, default=224)
parser.add_argument('-output_stride', type=int, default=16)
## Train settings
parser.add_argument('-dataset', type=str, default='TN3K') # TN3K, TG3K, TATN
parser.add_argument('-fold', type=str, default='0')
parser.add_argument('-batch_size', type=int, default=8)
parser.add_argument('-nepochs', type=int, default=60)
parser.add_argument('-resume_epoch', type=int, default=0)
## Optimizer settings
parser.add_argument('-naver_grad', type=str, default=1)
parser.add_argument('-lr', type=float, default=1e-3)
parser.add_argument('-momentum', type=float, default=0.9)
parser.add_argument('-update_lr_every', type=int, default=10)
parser.add_argument('-weight_decay', type=float, default=5e-4)
## Visualization settings
parser.add_argument('-save_every', type=int, default=10)
parser.add_argument('-log_every', type=int, default=40)
parser.add_argument('-load_path', type=str, default='')
parser.add_argument('-run_id', type=int, default=-1)
parser.add_argument('-use_eval', type=int, default=1)
parser.add_argument('-use_test', type=int, default=1)
return parser.parse_args()
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
setup_seed(1234)
def main(args):
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
if args.resume_epoch != 0:
runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
run_id = int(runs[-1].split('_')[-1]) if runs else 0
else:
runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0
if args.run_id >= 0:
run_id = args.run_id
save_dir = os.path.join(save_dir_root, 'run', 'run_' + str(run_id))
log_dir = os.path.join(save_dir, datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
batch_size = args.batch_size
if 'deeplab' in args.model_name:
if 'resnet101' in args.model_name:
net = Deeplabv3plus(nInputChannels=3, n_classes=args.num_classes, os=args.output_stride, backbone_type='resnet101')
elif 'resnet50' in args.model_name:
net = Deeplabv3plus(nInputChannels=3, n_classes=args.num_classes, os=args.output_stride, backbone_type='resnet50')
elif 'resnet34' in args.model_name:
net = Deeplabv3plus(nInputChannels=3, n_classes=args.num_classes, os=args.output_stride, backbone_type='resnet34')
else:
raise NotImplementedError
elif 'unet' in args.model_name:
net = Unet(in_ch=3, out_ch=1)
elif 'trfe' in args.model_name:
if args.model_name == 'trfe1':
net = TRFENet1(in_ch=3, out_ch=1)
elif args.model_name == 'trfe2':
net = TRFENet2(in_ch=3, out_ch=1)
elif args.model_name == 'trfe':
net = TRFENet(in_ch=3, out_ch=1)
batch_size = 4
elif 'mtnet' in args.model_name:
net = MTNet(in_ch=3, out_ch=1)
batch_size = 4
elif 'segnet' in args.model_name:
net = SegNet(input_channels=3, output_channels=1)
elif 'fcn' in args.model_name:
net = FCN8s(1)
else:
raise NotImplementedError
if args.resume_epoch == 0:
print('Training ' + args.model_name + ' from scratch...')
else:
load_path = os.path.join(save_dir, args.model_name + '_epoch-' + str(args.resume_epoch) + '.pth')
print('Initializing weights from: {}...'.format(load_path))
net.load_state_dict(torch.load(load_path))
if args.pretrain == 'THYROID':
net.load_state_dict(torch.load('./pre_train/thyroid-pretrain.pth', map_location=lambda storage, loc: storage))
print('loading pretrain model......')
torch.cuda.set_device(device=0)
net.cuda()
optimizer = optim.SGD(
net.parameters(),
lr=args.lr,
momentum=args.momentum
)
if args.criterion == 'Dice':
criterion = soft_dice
else:
raise NotImplementedError
composed_transforms_tr = transforms.Compose([
trforms.FixedResize(size=(args.input_size, args.input_size)),
trforms.RandomHorizontalFlip(),
trforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
trforms.ToTensor()])
composed_transforms_ts = transforms.Compose([
trforms.FixedResize(size=(args.input_size, args.input_size)),
trforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
trforms.ToTensor()])
if args.dataset == 'TN3K':
train_data = tn3k.TN3K(mode='train', transform=composed_transforms_tr, fold=args.fold)
val_data = tn3k.TN3K(mode='val', transform=composed_transforms_ts, fold=args.fold)
elif args.dataset == 'TG3K':
train_data = tg3k.TG3K(mode='train', transform=composed_transforms_tr)
val_data = tg3k.TG3K(mode='val', transform=composed_transforms_ts)
elif args.dataset == 'TATN':
train_data = tatn.TATN(mode='train', transform=composed_transforms_tr, fold=args.fold)
val_data = tatn.TATN(mode='val', transform=composed_transforms_ts, fold=args.fold)
trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=0)
testloader = DataLoader(val_data, batch_size=1, shuffle=False, num_workers=0)
num_iter_tr = len(trainloader)
num_iter_ts = len(testloader)
nitrs = args.resume_epoch * num_iter_tr
nsamples = args.resume_epoch * len(train_data)
print('nitrs: %d num_iter_tr: %d' % (nitrs, num_iter_tr))
print('nsamples: %d tot_num_samples: %d' % (nsamples, len(train_data)))
aveGrad = 0
global_step = 0
recent_losses = []
start_t = time.time()
best_f, cur_f = 0.0, 0.0
for epoch in range(args.resume_epoch, args.nepochs):
net.train()
epoch_losses = []
for ii, sample_batched in enumerate(trainloader):
if 'trfe' in args.model_name or args.model_name == 'mtnet':
nodules, glands = sample_batched
inputs_n, labels_n = nodules['image'].cuda(), nodules['label'].cuda()
inputs_g, labels_g = glands['image'].cuda(), glands['label'].cuda()
inputs = torch.cat([inputs_n[0].unsqueeze(0), inputs_g[0].unsqueeze(0)], dim=0)
for i in range(1, inputs_n.size()[0]):
inputs = torch.cat([inputs, inputs_n[i].unsqueeze(0)], dim=0)
inputs = torch.cat([inputs, inputs_g[i].unsqueeze(0)], dim=0)
global_step += inputs.data.shape[0]
nodule, thyroid = net.forward(inputs)
loss = 0
for i in range(inputs.size()[0]):
if i % 2 == 0:
loss += criterion(nodule[i], labels_n[int(i / 2)], size_average=False, batch_average=True)
else:
loss += 0.5 * criterion(thyroid[i], labels_g[int((i-1) / 2)], size_average=False, batch_average=True)
else:
inputs, labels = sample_batched['image'].cuda(), sample_batched['label'].cuda()
global_step += inputs.data.shape[0]
outputs = net.forward(inputs)
loss = criterion(outputs, labels, size_average=False, batch_average=True)
trainloss = loss.item()
epoch_losses.append(trainloss)
if len(recent_losses) < args.log_every:
recent_losses.append(trainloss)
else:
recent_losses[nitrs % len(recent_losses)] = trainloss
# Backward the averaged gradient
loss.backward()
aveGrad += 1
nitrs += 1
nsamples += args.batch_size
# Update the weights once in p['nAveGrad'] forward passes
if aveGrad % args.naver_grad == 0:
optimizer.step()
optimizer.zero_grad()
aveGrad = 0
if nitrs % args.log_every == 0:
meanloss = sum(recent_losses) / len(recent_losses)
print('epoch: %d ii: %d trainloss: %.2f timecost:%.2f secs' % (
epoch, ii, meanloss, time.time() - start_t))
writer.add_scalar('data/trainloss', meanloss, nsamples)
meanloss = sum(epoch_losses) / len(epoch_losses)
print('epoch: %d meanloss: %.2f' % (epoch, meanloss))
writer.add_scalar('data/epochloss', meanloss, nsamples)
if args.use_test == 1:
prec_lists = []
recall_lists = []
sum_testloss = 0.0
total_mae = 0.0
cnt = 0
count = 0
iou = 0
if args.use_eval == 1:
net.eval()
for ii, sample_batched in enumerate(testloader):
inputs, labels = sample_batched['image'].cuda(), sample_batched['label'].cuda()
with torch.no_grad():
if 'trfe' in args.model_name or args.model_name == 'mtnet':
outputs, _ = net.forward(inputs)
else:
outputs = net.forward(inputs)
loss = criterion(outputs, labels, size_average=False, batch_average=True)
sum_testloss += loss.item()
predictions = torch.sigmoid(outputs)
iou += utils.get_iou(predictions, labels)
count += 1
total_mae += utils.get_mae(predictions, labels) * predictions.size(0)
prec_list, recall_list = utils.get_prec_recall(predictions, labels)
prec_lists.extend(prec_list)
recall_lists.extend(recall_list)
cnt += predictions.size(0)
if ii % num_iter_ts == num_iter_ts - 1:
mmae = total_mae / cnt
mean_testloss = sum_testloss / num_iter_ts
mean_prec = sum(prec_lists) / len(prec_lists)
mean_recall = sum(recall_lists) / len(recall_lists)
fbeta = 1.3 * mean_prec * mean_recall / (0.3 * mean_prec + mean_recall)
iou = iou / count
print('Validation:')
print('epoch: %d, numImages: %d testloss: %.2f mmae: %.4f fbeta: %.4f iou: %.4f' % (
epoch, cnt, mean_testloss, mmae, fbeta, iou))
writer.add_scalar('data/validloss', mean_testloss, nsamples)
writer.add_scalar('data/validmae', mmae, nsamples)
writer.add_scalar('data/validfbeta', fbeta, nsamples)
writer.add_scalar('data/validiou', iou, epoch)
cur_f = iou
if cur_f > best_f:
save_path = os.path.join(save_dir, args.model_name + '_best' + '.pth')
torch.save(net.state_dict(), save_path)
print("Save model at {}\n".format(save_path))
best_f = cur_f
if epoch % args.save_every == args.save_every - 1:
save_path = os.path.join(save_dir, args.model_name + '_epoch-' + str(epoch) + '.pth')
torch.save(net.state_dict(), save_path)
print("Save model at {}\n".format(save_path))
if __name__ == "__main__":
args = get_arguments()
main(args)
| StarcoderdataPython |
5123661 | <filename>helpers/redownload.py
import json
import operator
from pathlib import Path
import textwrap
from datetime import date
import re
def getCurrentMemoryUsage():
''' Memory usage in Bytes '''
import psutil
import os
process = psutil.Process(os.getpid())
return process.memory_info().rss
def remove_suffix(value, suffix):
return value[:-len(suffix)] if value.endswith(suffix) else value
def done(fn, _mem_start):
print("* {} - used RAM {:.2f} MB of {:.2f} MB Total [ DONE ]".format(
fn,
(getCurrentMemoryUsage() - _mem_start) / 1048576,
(getCurrentMemoryUsage() / 1048576)
))
_parent = Path(__file__).resolve().parent
_package_root = Path(_parent.parent / "ec2_compare")
_tests_root = Path(_parent.parent / "tests")
print("Parsing cache file 'aws_ec2.json':")
_mem_start = getCurrentMemoryUsage()
with open(_parent / "aws_ec2.json") as json_file:
data = json.load(json_file)
# lambda elem: {key: val for v in elem.values() if isinstance(v, dict) for key, val in v.items()}
data = [{**{key: val for v in elem.values() if isinstance(v, dict)
for key, val in v.items()}, **elem} for elem in data]
data.sort(key=operator.itemgetter('InstanceType'))
_raw_package_root = Path(_package_root / "internal")
fn = "ec2data.py"
with open(_raw_package_root / fn, 'w') as outfile:
outfile.write(textwrap.dedent("""
def get_instances_list() -> list:
# pylint: disable=all
return {} # noqa: E501
""" .format(data)))
done(fn, _mem_start)
_prop_name = 'CurrentGeneration'
_prop_default = False
# TODO: use for filtering
key_structure = {
'str': list(set([n for k in data for n in k.keys() if isinstance(k[n], str)])),
'bool': list(set([n for k in data for n in k.keys() if isinstance(k[n], bool)])),
'list': list(set([n for k in data for n in k.keys() if isinstance(k[n], list)])),
'dict': list(set([n for k in data for n in k.keys() if isinstance(k[n], dict)])),
'int': list(set([n for k in data for n in k.keys() if isinstance(k[n], int)])),
'float': list(set([n for k in data for n in k.keys() if isinstance(k[n], float)])),
'other': list(set([n for k in data for n in k.keys() if not isinstance(
k[n], (dict, str, bool, list, float, int))]))
}
fn = "ec2keys.py"
with open(_raw_package_root / fn, 'w') as outfile:
outfile.write(textwrap.dedent("""
from typing import List
def keys_dict() -> dict:
# pylint: disable=all
return {} # noqa: E501
def keys_structure(*arg, **kw) -> List:
return [elem for k, v in keys_dict().items()
if k in arg or not arg for elem in v]
""" .format(key_structure)))
done(fn, _mem_start)
exclude_keys = ['InstanceType']
for t in [bool, str, list]:
for _filtered_key in set([n for k in data for n in k.keys() if isinstance(k[n], t)]):
# if _filtered_key in exclude_keys:
# print(f"Skipping {_filtered_key}")
# continue
val = []
if t == list:
val = set([n for k in data if _filtered_key in k for n in k[_filtered_key] if isinstance(n, str)])
elif _filtered_key == 'InstanceType':
val = set([k.get(_filtered_key).split(".")[0]
for k in data if not k.get(_filtered_key, None) is None])
_families = set([str(re.match(r"^(.*?)\d+", k).group(1))
for k in val if re.match(r"^(.*?)\d+", k)])
_sub_families = set([str(re.match(r"^(.*?)\d+", k).group(0))
for k in val if re.match(r"^(.*?)\d+", k)])
val = set(list(val) + list(_families) + list(_sub_families))
else:
val = set([k.get(_filtered_key)
for k in data if not k.get(_filtered_key, None) is None])
for _filtered_value in val:
_display_filtered_key = "".join([("_" + i if j > 0 and i.isupper() else i) for j, i in enumerate(
_filtered_key.replace("Supported", "").replace("Types", "").replace("-", "_"))]).strip().lower()
print(
f"Filter {_filtered_key}:{_display_filtered_key} - {_filtered_value}")
if not _display_filtered_key:
continue
_partial = []
if t == list:
_partial = list(filter(
lambda x: _filtered_key in x and _filtered_value in x[_filtered_key], data))
elif _filtered_key == 'InstanceType':
_partial = list(filter(lambda x: _filtered_key in x and str(
x[_filtered_key]).startswith(str(_filtered_value)), data))
else:
_partial = list(filter(lambda x: _filtered_key in x and str(
_filtered_value) == str(x[_filtered_key]), data))
if not _partial:
continue
if isinstance(_filtered_value, str) and ' ' in _filtered_value:
continue
_sub_package = Path(_raw_package_root / str(_display_filtered_key).lower())
_sub_package.mkdir(parents=True, exist_ok=True)
with open(_sub_package / '__init__.py', 'w') as outfile:
outfile.write(textwrap.dedent("""
# Automatically generated at {}
""" .format(date.today().strftime("%B %d, %Y"))))
_sub_tests = Path(
_tests_root / 'internal/{}'.format(str(_display_filtered_key).lower()))
_sub_tests.mkdir(parents=True, exist_ok=True)
_usage_class = str(_filtered_value).lower().replace("-", "_")
fn = "{}.py".format(_usage_class)
_partial.sort(key=operator.itemgetter('InstanceType'))
# _partial.sort(key=operator.itemgetter('VCpuInfo')['DefaultVCpus'])
_partial.sort(
key=lambda k:
k['VCpuInfo']['DefaultVCpus']
if 'VCpuInfo' in k and 'DefaultVCpus' in k['VCpuInfo']
else k['InstanceType'])
with open(_sub_package / fn, 'w') as outfile:
if not _partial:
raise ValueError(
f"usage_class {_usage_class} is empty ")
outfile.write(textwrap.dedent("""
# Automatically generated
# pylint: disable=all
get = {} # noqa: E501
def get_instances_list() -> list:
'''Returns list EC2 instances with {} = {} .'''
# pylint: disable=all
return get
""" .format(_partial, _filtered_key,
_filtered_value,
)))
__mod_name = ".".join([_display_filtered_key, _usage_class])
__func_name = __mod_name.replace(".", "-").replace("-", "_")
with open(_sub_tests / "test_{}_{}_auto.py".format(_display_filtered_key, _usage_class), 'w') as outfile:
outfile.write(textwrap.dedent("""
# Testing module {module_name}
import pytest
import ec2_compare.internal.{module_name}
def test_get_internal_data_{func_name}_get_instances_list():
assert len(ec2_compare.internal.{module_name}.get_instances_list()) > 0
def test_get_internal_data_{func_name}_get():
assert len(ec2_compare.internal.{module_name}.get) > 0
""" .format(module_name=__mod_name,
func_name=__func_name
)))
done(fn, _mem_start)
done(fn, _mem_start)
| StarcoderdataPython |
244893 | """
1628. Design an Expression Tree With Evaluate Function
Medium
Given the postfix tokens of an arithmetic expression, build and return the binary expression tree that represents this expression.
Postfix notation is a notation for writing arithmetic expressions in which the operands (numbers) appear before their operators. For example, the postfix tokens of the expression 4*(5-(7+2)) are represented in the array postfix = ["4","5","7","2","+","-","*"].
The class Node is an interface you should use to implement the binary expression tree. The returned tree will be tested using the evaluate function, which is supposed to evaluate the tree's value. You should not remove the Node class; however, you can modify it as you wish, and you can define other classes to implement it if needed.
A binary expression tree is a kind of binary tree used to represent arithmetic expressions. Each node of a binary expression tree has either zero or two children. Leaf nodes (nodes with 0 children) correspond to operands (numbers), and internal nodes (nodes with two children) correspond to the operators '+' (addition), '-' (subtraction), '*' (multiplication), and '/' (division).
It's guaranteed that no subtree will yield a value that exceeds 109 in absolute value, and all the operations are valid (i.e., no division by zero).
Follow up: Could you design the expression tree such that it is more modular? For example, is your design able to support additional operators without making changes to your existing evaluate implementation?
Example 1:
Input: s = ["3","4","+","2","*","7","/"]
Output: 2
Explanation: this expression evaluates to the above binary tree with expression ((3+4)*2)/7) = 14/7 = 2.
Example 2:
Input: s = ["4","5","2","7","+","-","*"]
Output: -16
Explanation: this expression evaluates to the above binary tree with expression 4*(5-(2+7)) = 4*(-4) = -16.
Constraints:
1 <= s.length < 100
s.length is odd.
s consists of numbers and the characters '+', '-', '*', and '/'.
If s[i] is a number, its integer representation is no more than 105.
It is guaranteed that s is a valid expression.
The absolute value of the result and intermediate values will not exceed 109.
It is guaranteed that no expression will include division by zero.
"""
# V0
# V1
# https://blog.csdn.net/qq_46105170/article/details/109441459
# V1'
# https://leetcode.com/problems/design-an-expression-tree-with-evaluate-function/discuss/1669374/python
import abc
from abc import ABC, abstractmethod
class Node(ABC):
@abstractmethod
# define your fields here
def evaluate(self) -> int:
pass
class eNode(Node):
def __init__(self, val = None):
self.val = val
self.left = None
self.right = None
def evaluate(self):
alu = {'*': lambda x, y: x * y, '-': lambda x, y: x - y, '+': lambda x, y: x + y, '/': lambda x, y: x // y}
def eval(node):
if node.val in alu:
return alu[node.val](eval(node.left), eval(node.right))
return int(node.val)
return eval(self)
class TreeBuilder(object):
def buildTree(self, postfix: List[str]) -> 'Node':
def search(node):
node.val = postfix[self.i]
if not node.val.isdigit():
self.i -= 1
node.right = search(eNode())
self.i -= 1
node.left = search(eNode())
return node
self.i = len(postfix) - 1
return search(eNode())
# V1''
# https://leetcode.com/problems/design-an-expression-tree-with-evaluate-function/discuss/1286682/Python
import abc
from abc import ABC, abstractmethod
"""
This is the interface for the expression tree Node.
You should not remove it, and you can define some classes to implement it.
"""
class Node(ABC):
@abstractmethod
def evaluate(self) -> int:
pass
"""
This is the TreeBuilder class.
You can treat it as the driver code that takes the postinfix input
and returns the expression tree represnting it as a Node.
"""
class TreeNode(Node):
def __init__(self, val=None, left=None, right=None):
self.val = val
self.left = left
self.right = right
self.ops = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.floordiv
}
def evaluate(self) -> int:
if self.val not in self.ops:
return int(self.val)
return self.ops[self.val] (self.left.evaluate(), self.right.evaluate())
class TreeBuilder(object):
def buildTree(self, postfix: List[str]) -> 'Node':
stack = []
for item in postfix:
if item in "+-*/":
r = stack.pop()
l = stack.pop()
node = TreeNode(item)
node.left = l
node.right = r
stack.append(node)
else:
stack.append(TreeNode(item))
return stack[0]
# V1'''
# https://leetcode.com/problems/design-an-expression-tree-with-evaluate-function/discuss/1095682/Python-solution
import abc
from abc import ABC, abstractmethod
"""
This is the interface for the expression tree Node.
You should not remove it, and you can define some classes to implement it.
"""
class Node(ABC):
@abstractmethod
# define your fields here
def evaluate(self) -> int:
pass
# TreeNode is inheriting from the abstract class, Node.
class TreeNode(Node):
def __init__(self, val, l = None, r = None):
self.value = val
self.left = l
self.right = r
def evaluate(self):
if not self.left and not self.right:
# leaf node => value is an integer
return self.value
if self.value == '+':
return self.left.evaluate() + self.right.evaluate()
elif self.value == '-':
return self.left.evaluate() - self.right.evaluate()
elif self.value == '*':
return self.left.evaluate() * self.right.evaluate()
elif self.value == '/':
return self.left.evaluate() // self.right.evaluate()
"""
This is the TreeBuilder class.
You can treat it as the driver code that takes the postinfix input
and returns the expression tree represnting it as a Node.
"""
class TreeBuilder(object):
def buildTree(self, postfix: List[str]) -> 'Node':
stk = []
for ch in postfix:
if ch.isdigit():
stk.append(TreeNode(int(ch)))
else:
r = stk.pop()
l = stk.pop()
stk.append(TreeNode(ch, l, r))
# The top of the stack contains the root of the expression tree.
return stk[-1]
"""
Your TreeBuilder object will be instantiated and called as such:
obj = TreeBuilder();
expTree = obj.buildTree(postfix);
ans = expTree.evaluate();
"""
# V2 | StarcoderdataPython |
8021956 | from datetime import datetime, timedelta
from collections import OrderedDict
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.urlresolvers import reverse, reverse_lazy
from django.db.models import Count, Avg, Min, Max
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render, render_to_response
from django.utils import timezone
from django.views.generic import View, ListView, DetailView, UpdateView, DeleteView, CreateView
from bokeh.plotting import figure
from bokeh.resources import CDN
from bokeh.embed import components
from bokeh.models import FixedTicker, Label
from bokeh.models.ranges import Range1d
from bokeh.models.widgets import Dropdown
from py_fitness.users.models import User
from py_fitness.core.utils import weeks_between
from .forms import WorkoutForm, WorkoutUpdateForm, ExerciseForm, SetFormSet, SetFormSetHelper
from .models import Workout, Exercise, Set
def bokeh_avg_workouts_per_day(user):
DAY_OF_WEEK = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0
}
data = user.workout_workout_author.extra({'day': 'extract(dow from date)'}).values('day').annotate(count=Count('id')).values('day', 'count')
x_factors = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
plot = figure(title="Average Workouts per Day", height=1000, width=1000, responsive=True, x_range=x_factors, y_range=Range1d(start=0, end=1))
plot.title.align = "center"
plot.xaxis[0].axis_label = "Day of Week"
plot.yaxis[0].axis_label = "Number of Workouts"
if not data:
plot.yaxis.bounds = (0,1)
plot.yaxis[0].ticker=FixedTicker(ticks=[x*0.1 for x in range(0, 11)])
plot.line(x_factors, [0,0,0,0,0,0,0], line_width=2)
notice = Label(x=80, y=150, x_units='screen', y_units='screen',
text="No data yet. Record a workout!", render_mode='css',
background_fill_color='white', background_fill_alpha=1.0)
plot.add_layout(notice)
else:
workout_range = user.workout_workout_author.aggregate(min=Min('date'), max=Max('date'))
weeks = weeks_between(workout_range['min'], workout_range['max'])
for item in data:
DAY_OF_WEEK[item['day']] += 1
plot.line(x_factors, [i/weeks for i in DAY_OF_WEEK.values()], line_width=2)
script, chart = components(plot, CDN)
return (script, chart)
def get_one_rep_max(weight, reps):
return float(weight) / (1.0278 - (0.0278 * reps))
def bokeh_exercise_1rm_weight_over_time(user, exercise):
data = Exercise.objects.filter(workout__author=user).filter(name__search=exercise).values('workout__date', 'sets__weight', 'sets__repetitions')
plot = figure(title="{} 1RM Over Time (Brzycki Method)".format(exercise), height=600, width=600, responsive=True, x_axis_type="datetime")
plot.title.align = "center"
plot.xaxis[0].axis_label = "Date"
plot.yaxis[0].axis_label = "Weight (lbs/kg)"
dates_data = {}
for item in data:
date = item['workout__date'].date()
if date in dates_data:
weight = item.get('sets__weight', None)
reps = item.get('sets__repetitions', None)
if weight is not None and reps is not None:
one_rep_max = get_one_rep_max(weight, reps)
if dates_data[date] < one_rep_max:
dates_data[date] = one_rep_max
else:
weight = item.get('sets__weight', None)
reps = item.get('sets__repetitions', None)
if weight is not None and reps is not None:
one_rep_max = get_one_rep_max(weight, reps)
dates_data[date] = one_rep_max
if not dates_data:
today = timezone.now().date()
plot.line([today - timedelta(days=7), today], [0,0], line_width=2)
notice = Label(x=70, y=150, x_units='screen', y_units='screen',
text="No data yet. Add an exercise!", render_mode='css',
background_fill_color='white', background_fill_alpha=1.0)
plot.add_layout(notice)
else:
ordered_dates = OrderedDict(sorted(dates_data.items()))
dates = list(ordered_dates.keys())
weights = list(ordered_dates.values())
plot.line(dates, weights, line_width=2)
plot.circle(dates, weights, fill_color="white", size=8)
script, chart = components(plot, CDN)
return (script, chart)
def bokeh_exercise_avg_weight_over_time(user, exercise):
data = Exercise.objects.filter(workout__author=user).filter(name__search=exercise).values('workout__date', 'sets__weight')
plot = figure(title="Average {} Weight Over Time".format(exercise), height=600, width=600, responsive=True, x_axis_type="datetime")
plot.title.align = "center"
plot.xaxis[0].axis_label = "Date"
plot.yaxis[0].axis_label = "Weight (lbs/kg)"
dates_data = {}
for item in data:
date = item['workout__date'].date()
if date in dates_data:
weight = item.get('sets__weight', None)
if weight is not None:
dates_data[date].append(weight)
else:
weight = item.get('sets__weight', None)
if weight is not None:
dates_data[date] = [weight]
if not dates_data:
today = timezone.now()
plot.line([today - timedelta(days=7), today], [0,0], line_width=2)
notice = Label(x=70, y=150, x_units='screen', y_units='screen',
text="No data yet. Add an exercise!", render_mode='css',
background_fill_color='white', background_fill_alpha=1.0)
plot.add_layout(notice)
else:
for key, value in dates_data.items():
dates_data[key] = sum(dates_data[key]) / len(dates_data[key])
ordered_dates = OrderedDict(sorted(dates_data.items()))
dates = list(ordered_dates.keys())
weights = list(ordered_dates.values())
plot.line(dates, weights, line_width=2)
plot.circle(dates, weights, fill_color="white", size=8)
script, chart = components(plot, CDN)
return (script, chart)
def bokeh_exercise_max_weight_over_time(user, exercise):
data = Exercise.objects.filter(workout__author=user).filter(name__search=exercise).values('workout__date', 'sets__weight')
plot = figure(title="Highest {} Weight Over Time".format(exercise), height=600, width=600, responsive=True, x_axis_type="datetime")
plot.title.align = "center"
plot.xaxis[0].axis_label = "Date"
plot.yaxis[0].axis_label = "Weight (lbs/kg)"
dates_data = {}
for item in data:
date = item['workout__date'].date()
if date in dates_data:
weight = item.get('sets__weight', None)
if weight is not None and weight > dates_data[date]:
dates_data[date] = weight
else:
weight = item.get('sets__weight', None)
if weight is not None:
dates_data[date] = weight
if not dates_data:
today = timezone.now()
plot.line([today - timedelta(days=7), today], [0,0], line_width=2)
notice = Label(x=70, y=150, x_units='screen', y_units='screen',
text="No data yet. Add an exercise!", render_mode='css',
background_fill_color='white', background_fill_alpha=1.0)
plot.add_layout(notice)
else:
ordered_dates = OrderedDict(sorted(dates_data.items()))
dates = list(ordered_dates.keys())
weights = list(ordered_dates.values())
plot.line(dates, weights, line_width=2)
plot.circle(dates, weights, fill_color="white", size=8)
script, chart = components(plot, CDN)
return (script, chart)
def bokeh_weight_over_time(user):
data = user.workout_workout_author.all().values('date', 'weight')
plot = figure(title="Weight Over Time", height=600, width=600, responsive=True, x_axis_type="datetime")
plot.title.align = "center"
plot.xaxis[0].axis_label = "Date"
plot.yaxis[0].axis_label = "Weight (lbs/kg)"
if not data:
today = timezone.now()
plot.line([today - timedelta(days=7), today], [0,0], line_width=2)
notice = Label(x=70, y=150, x_units='screen', y_units='screen',
text="No data yet. Record your weight in a workout!", render_mode='css',
background_fill_color='white', background_fill_alpha=1.0)
plot.add_layout(notice)
else:
dates = []
weight = []
for item in data:
dates.append(item['date'])
if item.get('weight', None) is not None:
weight.append(item['weight'])
plot.line(dates, weight, line_width=2)
plot.circle(dates, weight, fill_color="white", size=8)
script, chart = components(plot, CDN)
return (script, chart)
class WorkoutDashboardView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
workout_form = WorkoutForm()
workouts = request.user.workout_workout_author.filter(date__month__gte=timezone.now().month-1).order_by('-date')
avg_workouts_script, avg_workouts_chart = bokeh_avg_workouts_per_day(request.user)
weight_change_script, weight_change_chart = bokeh_weight_over_time(request.user)
return render(request, "pages/dashboard.html", context={"form": workout_form,
"workouts": workouts, "avg_workouts_script": avg_workouts_script,
"avg_workouts_chart": avg_workouts_chart, "weight_change_script": weight_change_script,
"weight_change_chart": weight_change_chart})
def post(self, request, *args, **kwargs):
workout_form = WorkoutForm(request.POST)
if workout_form.is_valid():
workout = workout_form.save(commit=False)
workout.author = request.user
workout.save()
return HttpResponseRedirect(workout.get_absolute_url())
else:
return render(request, 'pages/dashboard.html', context={"form": workout_form})
class WorkoutDetailView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
pk = kwargs.get('pk')
workout = Workout.objects.select_related('author').get(pk=pk)
exercises = workout.exercises.all()
form = ExerciseForm()
time = timezone.now() - workout.created
return render(request, 'workout/workout_detail.html', context={ 'workout': workout,
'exercises': exercises,
'form': form,
'time': time})
def post(self, request, *args, **kwargs):
exercise_form = ExerciseForm(request.POST)
workout = Workout.objects.get(pk=kwargs.get('pk'))
if exercise_form.is_valid():
exercise = exercise_form.save(commit=False)
exercise.workout = workout
exercise.save()
return HttpResponseRedirect(reverse('workout:exercise_update', kwargs={ 'year': workout.date.year,
'month': workout.date.month,
'pk': workout.pk,
'epk': exercise.pk}))
return render(request, 'workout/workout_detail.html', context={'workout': workout, 'form': exercise_form})
class WorkoutYearListView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
year = kwargs.get('year')
workouts = request.user.workout_workout_author.filter(date__year=year).order_by('date')
return render(request, 'workout/workout_list.html', context={'workouts': workouts})
class WorkoutMonthListView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
year = kwargs.get('year')
month = kwargs.get('month')
workouts = request.user.workout_workout_author.filter(date__year=year).filter(date__month=month).order_by('date')
return render(request, 'workout/workout_list.html', context={'workouts': workouts})
class WorkoutUpdateView(LoginRequiredMixin, UpdateView):
model = Workout
form_class = WorkoutUpdateForm
template_name = "workout/workout_form.html"
def get_success_url(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
pk = self.kwargs.get('pk')
return reverse_lazy('workout:workout_detail', kwargs={"year": year, "month": month, "pk": pk})
class WorkoutDeleteView(LoginRequiredMixin, DeleteView):
model = Workout
template_name = "workout/workout_delete.html"
success_url = reverse_lazy('workout:dashboard')
class ExerciseDetailView(LoginRequiredMixin, DetailView):
model = Exercise
template_name = "workout/exercise_detail.html"
class ExerciseListView(LoginRequiredMixin, ListView):
model = Exercise
template_name = "workout/exercise_list.html"
def get_context_data(self, **kwargs):
context = super(ExerciseDetailView, self).get_context_data(**kwargs)
query = kwargs.get('query')
exercise_avg_script, exercise_avg_chart = bokeh_exercise_avg_weight_over_time(self.request.user, query)
exercise_max_script, exercise_max_chart = bokeh_exercise_max_weight_over_time(self.request.user, query)
exercise_1rm_script, exercise_1rm_chart = bokeh_exercise_1rm_weight_over_time(self.request.user, query)
return context
def get_queryset(self):
query = self.kwargs.get('query')
queryset = Exercise.objects.filter(workout__author=self.request.user).filter(name__search=query)
return queryset
class ExerciseDeleteView(LoginRequiredMixin, DeleteView):
model = Exercise
template_name = "workout/exercise_delete.html"
pk_url_kwarg = "epk"
def get_success_url(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
pk = self.kwargs.get('pk')
return reverse_lazy('workout:workout_detail', kwargs={"year": year, "month": month, "pk": pk})
class ExerciseUpdateView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
exercise = Exercise.objects.get(pk=kwargs.get('epk'))
exercises = Exercise.objects.filter(name__search=exercise.name)
formset = SetFormSet(instance=exercise)
exercise_avg_script, exercise_avg_chart = bokeh_exercise_avg_weight_over_time(request.user, exercise.name)
exercise_max_script, exercise_max_chart = bokeh_exercise_max_weight_over_time(request.user, exercise.name)
exercise_1rm_script, exercise_1rm_chart = bokeh_exercise_1rm_weight_over_time(request.user, exercise.name)
return render(request, "workout/exercise_form.html",
context={"formset": formset, "exercise": exercise, "exercises": exercises, "helper": SetFormSetHelper(),
"exercise_avg_script": exercise_avg_script, "exercise_avg_chart": exercise_avg_chart,
"exercise_max_script": exercise_max_script, "exercise_max_chart": exercise_max_chart,
"exercise_1rm_script": exercise_1rm_script, "exercise_1rm_chart": exercise_1rm_chart})
def post(self, request, *args, **kwargs):
exercise = Exercise.objects.get(pk=kwargs.get('epk'))
formset = SetFormSet(request.POST, request.FILES, instance=exercise)
if formset.is_valid():
formset.save()
return HttpResponseRedirect(
reverse('workout:workout_detail', kwargs={
'year': exercise.workout.date.year,
'month': exercise.workout.date.month,
'pk': exercise.workout.pk}
)
)
return render(request, "workout/exercise_form.html",
context={"formset": formset, "exercise": exercise, "helper": SetFormSetHelper()})
| StarcoderdataPython |
11259184 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Eval a Keras model on embeddings."""
import time
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
from non_semantic_speech_benchmark.eval_embedding.finetune import get_data
from non_semantic_speech_benchmark.eval_embedding.finetune import models
FLAGS = flags.FLAGS
flags.DEFINE_string('file_pattern', None, 'Dataset location.')
flags.DEFINE_string('sk', None, 'Samples name.')
flags.DEFINE_alias('samples_key', 'sk')
flags.DEFINE_integer('ml', 16000, 'Minimum length.')
flags.DEFINE_alias('min_length', 'ml')
flags.DEFINE_string('label_key', None, 'Name of label to use.')
flags.DEFINE_list('label_list', None, 'List of possible label values.')
flags.DEFINE_integer('batch_size', None, 'The number of images in each batch.')
flags.DEFINE_integer('tbs', None, 'not used')
flags.DEFINE_integer('nc', None, 'num_clusters')
flags.DEFINE_boolean('ubn', None, 'Whether to normalize')
flags.DEFINE_float('lr', None, 'not used')
flags.DEFINE_string('logdir', None,
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', None,
'Directory where the results are saved to.')
flags.DEFINE_integer('take_fixed_data', None,
'If not `None`, take a fixed number of data elements.')
flags.DEFINE_integer('timeout', 7200, 'Wait-for-checkpoint timeout.')
def eval_and_report():
"""Eval on voxceleb."""
tf.logging.info('samples_key: %s', FLAGS.samples_key)
logging.info('Logdir: %s', FLAGS.logdir)
logging.info('Batch size: %s', FLAGS.batch_size)
writer = tf.summary.create_file_writer(FLAGS.eval_dir)
num_classes = len(FLAGS.label_list)
model = models.get_keras_model(num_classes, FLAGS.ubn, num_clusters=FLAGS.nc)
checkpoint = tf.train.Checkpoint(model=model)
for ckpt in tf.train.checkpoints_iterator(
FLAGS.logdir, timeout=FLAGS.timeout):
assert 'ckpt-' in ckpt, ckpt
step = ckpt.split('ckpt-')[-1]
logging.info('Starting to evaluate step: %s.', step)
checkpoint.restore(ckpt)
logging.info('Loaded weights for eval step: %s.', step)
reader = tf.data.TFRecordDataset
ds = get_data.get_data(
file_pattern=FLAGS.file_pattern,
reader=reader,
samples_key=FLAGS.samples_key,
min_length=FLAGS.min_length,
label_key=FLAGS.label_key,
label_list=FLAGS.label_list,
batch_size=FLAGS.batch_size,
loop_forever=False,
shuffle=False)
logging.info('Got dataset for eval step: %s.', step)
if FLAGS.take_fixed_data:
ds = ds.take(FLAGS.take_fixed_data)
acc_m = tf.keras.metrics.Accuracy()
xent_m = tf.keras.metrics.CategoricalCrossentropy(from_logits=True)
logging.info('Starting the ds loop...')
count, ex_count = 0, 0
s = time.time()
for wav_samples, y_onehot in ds:
wav_samples.shape.assert_is_compatible_with(
[None, FLAGS.min_length])
y_onehot.shape.assert_is_compatible_with(
[None, len(FLAGS.label_list)])
logits = model(wav_samples, training=False)
acc_m.update_state(y_true=tf.argmax(y_onehot, 1),
y_pred=tf.argmax(logits, 1))
xent_m.update_state(y_true=y_onehot, y_pred=logits)
ex_count += logits.shape[0]
count += 1
logging.info('Saw %i examples after %i iterations as %.2f secs...',
ex_count, count,
time.time() - s)
with writer.as_default():
tf.summary.scalar('accuracy', acc_m.result().numpy(), step=int(step))
tf.summary.scalar('xent_loss', xent_m.result().numpy(), step=int(step))
logging.info('Done with eval step: %s in %.2f secs.', step, time.time() - s)
def main(unused_argv):
tf.compat.v2.enable_v2_behavior()
assert tf.executing_eagerly()
assert FLAGS.file_pattern
assert FLAGS.samples_key
assert FLAGS.label_key
assert FLAGS.label_list
assert FLAGS.logdir
eval_and_report()
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
5032691 | from pypykatz.commons.common import KatzSystemArchitecture, WindowsMinBuild, WindowsBuild
from pypykatz.commons.win_datatypes import ULONG, LUID, KIWI_GENERIC_PRIMARY_CREDENTIAL, POINTER, DWORD, PVOID, PSID, GUID, DWORD64
from pypykatz.lsadecryptor.package_commons import PackageTemplate
class CloudapTemplate(PackageTemplate):
def __init__(self):
super().__init__('Cloudap')
self.signature = None
self.first_entry_offset = None
self.list_entry = None
@staticmethod
def get_template(sysinfo):
template = CloudapTemplate()
if sysinfo.buildnumber <= WindowsBuild.WIN_10_1903.value:
return None
if sysinfo.architecture == KatzSystemArchitecture.X64:
template.signature = b'\x44\x8b\x01\x44\x39\x42\x18\x75'
template.first_entry_offset = -9
template.list_entry = PKIWI_CLOUDAP_LOGON_LIST_ENTRY
elif sysinfo.architecture == KatzSystemArchitecture.X86:
template.signature = b'\x8b\x31\x39\x72\x10\x75'
template.first_entry_offset = -8
template.list_entry = PKIWI_CLOUDAP_LOGON_LIST_ENTRY
else:
raise Exception('Could not identify template! Architecture: %s sysinfo.buildnumber: %s' % (sysinfo.architecture, sysinfo.buildnumber))
template.log_template('list_entry', template.list_entry)
return template
class PKIWI_CLOUDAP_CACHE_UNK(POINTER):
def __init__(self, reader):
super().__init__(reader, KIWI_CLOUDAP_CACHE_UNK)
class KIWI_CLOUDAP_CACHE_UNK:
def __init__(self, reader):
self.unk0 = DWORD(reader)
self.unk1 = DWORD(reader)
self.unk2 = DWORD(reader)
self.unkSize = DWORD(reader).value
self.guid = GUID(reader)
self.unk = reader.read(64)
class PKIWI_CLOUDAP_CACHE_LIST_ENTRY(POINTER):
def __init__(self, reader):
super().__init__(reader, KIWI_CLOUDAP_CACHE_LIST_ENTRY)
class KIWI_CLOUDAP_CACHE_LIST_ENTRY:
def __init__(self, reader):
self.Flink = PKIWI_CLOUDAP_CACHE_LIST_ENTRY(reader)
self.Blink = PKIWI_CLOUDAP_CACHE_LIST_ENTRY(reader)
self.unk0 = DWORD(reader)
reader.align()
self.LockList = PVOID(reader)
self.unk1 = PVOID(reader)
self.unk2 = PVOID(reader)
self.unk3 = PVOID(reader)
self.unk4 = PVOID(reader)
self.unk5 = PVOID(reader)
self.unk6 = DWORD(reader)
self.unk7 = DWORD(reader)
self.unk8 = DWORD(reader)
self.unk9 = DWORD(reader)
self.unkLogin0 = PVOID(reader) #PCWSTR
self.unkLogin1 = PVOID(reader) #PCWSTR
self.toname = reader.read(130) #wchar_t [64 + 1];
reader.align()
self.Sid = PSID(reader).value
self.unk10 = DWORD(reader)
self.unk11 = DWORD(reader)
self.unk12 = DWORD(reader)
self.unk13 = DWORD(reader)
self.toDetermine = PKIWI_CLOUDAP_CACHE_UNK(reader)
self.unk14 = PVOID(reader)
self.cbPRT = DWORD(reader).value
reader.align()
self.PRT = PVOID(reader) #PBYTE(reader)
class PKIWI_CLOUDAP_LOGON_LIST_ENTRY(POINTER):
def __init__(self, reader):
super().__init__(reader, KIWI_CLOUDAP_LOGON_LIST_ENTRY)
class KIWI_CLOUDAP_LOGON_LIST_ENTRY:
def __init__(self, reader):
self.Flink = PKIWI_CLOUDAP_LOGON_LIST_ENTRY(reader)
self.Blink = PKIWI_CLOUDAP_LOGON_LIST_ENTRY(reader)
self.unk0 = DWORD(reader)
self.unk1 = DWORD(reader)
self.LocallyUniqueIdentifier = LUID(reader).value
self.unk2 = DWORD64(reader)
self.unk3 = DWORD64(reader)
self.cacheEntry = PKIWI_CLOUDAP_CACHE_LIST_ENTRY(reader)
| StarcoderdataPython |
89703 | <reponame>CoderHongKong/python-study
# -*- coding: UTF-8 -*-
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name:
# Purpose:
#
# Author: hekai
#-------------------------------------------------------------------------------
arrs = ['a', 'b', 'c', ['x', 'y'],'d', 'e']
print(arrs)
# 浅拷贝
arrs_copy = arrs.copy()
print(arrs_copy)
arrs_ = arrs[:]
print(arrs_)
l = list(arrs)
print(l)
print(arrs[0:-1:2])
print(arrs[:2])
print('------------')
for i in arrs:
print(i)
| StarcoderdataPython |
1734695 | #bin/usr/python3
#Created by : @e_f_l_6_6_6
import os
os.system('pip install colorama')
os.system('pip install hashlib')
time.sleep(2)
import hashlib
import colorama
import time
from bunner import *
import random
banner()
os.system('termux-open-url https://t.me/elf_security_cyber')
time.sleep(5)
while True:
print(f"{colorama.Fore.RED}[{colorama.Fore.YELLOW}-{colorama.Fore.RED}]loding...!")
os.chdir("/sdcard")
f = open("look_at_me.txt","w+")
c = 50
for i in range(c):
printed = '''$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$%ELF_666%$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$'''
f.write("[-]thank you for follow me "+printed+" \n\nmy channel telgram:@elf_security_cyber "+printed+"\n\n pelese sub me!")
f.close()
if c == 50:
break
if os.name =='nt':
print("channel : @elf_security_cyber")
else:
os.system('termux-open-url https://t.me/elf_security_cyber')
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
buuner()
a = '''
\t _______________________________________________________________________
\t | _ __ |
\t | ___ | | / _| |
\t | / _ \ | | | |_ |
\t | | __/ | | | _| |
\t | \__\__ |_|_| | |
\t | _________________ |
\t | [¥]14 metod hashed [-] |
\t | [£]model hash [-] |
\t | [&]md5 [1] |
\t | [&]sha1 [2] |
\t | [&]sha224 [3] |
\t | [&]sha256 [4] |
\t | [&]sha384 [5] |
\t | [&]sha512 [6] |
\t | [&]blake2b [7] |
\t | [&]blake2s [8] |
\t | [&]sha3_224 [9] |
\t | [&]sha3_256 [10] |
\t | [&1]sha3_384 [11] |
\t | [&]sha3_512 [12] |
\t | [&]shake_128 [13] |
\t | [&]shake_256 [14] |
\t | |
\t _______________________________________________________________________
'''
i = random.randint(1,10)
if i == 1:
print(colorama.Fore.GREEN+a)
if i == 2:
print(colorama.Fore.RED+colorama.Cursor.BACK()+a)
if i ==3 :
print(colorama.Fore.BLUE+colorama.Style.DIM+a)
if i == 4:
print(colorama.Fore.CYAN+colorama.Cursor.BACK(a))
if i == 5:
print(colorama.Fore.YELLOW+a)
if i == 6:
print(colorama.Fore.LIGHTWHITE_EX+colorama.Style.DIM+a)
if i == 7:
print(colorama.Fore.MAGENTA+a)
if i == 8:
print(colorama.Fore.LIGHTCYAN_EX+a)
if i == 9:
print(colorama.Fore.CYAN+colorama.Style.DIM+a)
if i ==10:
print(colorama.Fore.LIGHTBLACK_EX+colorama.Style.DIM+a)
def hash_md5():
bunner_1()
input(colorama.Fore.GREEN+'enter the named for hashed.md5 : ')
m = hashlib.md5()
a = m.hexdigest()
print(m.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("md5.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+'\n'+p)
rxt3.close()
else:
txt = open("md5.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_sha1():
bunner_2()
input(colorama.Fore.GREEN+'enter the named for hashed.sha1 : ')
a = hashlib.sha1()
b = a.hexdigest()
print(a.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("sha1.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+b+"\n"+p)
rxt3.close()
else:
txt = open("sha1.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_sha224():
bunner_3()
input(colorama.Fore.GREEN+'enter the named for hashed sha224 : ')
b = hashlib.sha224()
a = b.hexdigest()
print(b.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("sha224.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("your hashed :\n"+a+'\n'+p)
rxt3.close()
else:
txt = open("sha224.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_sha256():
bunner_4()
input(colorama.Fore.GREEN+'enter the named for hashed sha256 : ')
c = hashlib.sha256()
a = c.hexdigest()
print(c.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("sha256.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("sha256.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_sha384():
bunner_5()
input(colorama.Fore.GREEN+'enter the named for hashed.sha384 : ')
e = hashlib.sha384()
a = e.hexdigest()
print(e.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("sha384.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("sha384.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_sha512():
bunner_1()
result = input(colorama.Fore.GREEN+'enter the naemd for hashed.sha512 : ')
f = hashlib.sha3_512()
a = f.hexdigest()
print(f.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("sha512.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("sha512.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_blake2b():
bunner_2()
input(colorama.Fore.GREEN+'enter the named for hashed.blake2b : ')
g = hashlib.blake2b()
a = g.hexdigest()
print(g.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("blake2b.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("blake2b.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_blake2s():
bunner_4()
input(colorama.Fore.GREEN+'enter the named for hashed.blake2s : ')
h = hashlib.blake2s()
a = h.hexdigest()
print(h.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("blake2s.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("blake2s.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_sha3_224():
bunner_3()
input(colorama.Fore.GREEN+'enter the named for hashed.sha3224 : ')
j = hashlib.sha3_224()
a = j.hexdigest()
print(j.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("sha3_224.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("sha3_224.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_sha3_256():
bunner()
input(colorama.Fore.GREEN+'enter the named for hashed.sha3_256 : ')
k = hashlib.sha3_256()
a = k.hexdigest()
print(k.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("sha3_256.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("sha3_256.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_sha3_384():
bunner_5()
input(colorama.Fore.GREEN+'enter the named for hashed.sha3_384 : ')
l = hashlib.sha3_384()
a = l.hexdigest()
print(l.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("sha3_384.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("sha3_384.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_sha3_512():
bunner_2()
input(colorama.Fore.GREEN+'enter the named for hashed.sha3_512 : ')
q = hashlib.sha3_512()
a = q.hexdigest()
print(q.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("sha3_512.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("sha3_512.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_shake_128():
bunner_4()
input(colorama.Fore.GREEN+'enter the named for hashed.shake_123 : ')
w = hashlib.shake_128()
a = w.hexdigest()
print(w.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("shake_128.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("shake_128.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
def hash_shake_256():
bunner_1()
input("enter the name for hash shake_256")
r = hashlib.shake_256()
a = r.hexdigest
print(r.hexdigest())
res = input('save to sdcard and new file? ')
if res == 'yes':
os.chdir('/sdcard')
rxt3 = open("shake_256.txt","w+")
p = "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(1):
rxt3.write("[-]your hashed : \n"+a+"\n"+p)
rxt3.close()
else:
txt = open("shake_256.txt","a")
for i in range(1):
txt.write("[-]your hashed : \n"+a+"\n"+p)
txt.close()
a1 = '1'
a2 = '2'
a3 = '3'
a4 = '4'
a5 = '5'
a6 = '6'
a7 = '7'
a8 = '8'
a9 = '9'
a10 = '10'
a11 = '11'
a12 = '12'
a13 = '13'
a14 = '14'
for i in range(0 ,1):
b1 = input(colorama.Fore.CYAN+colorama.Back.BLACK+'enter the number [1 & 14] : ')
if b1 == a1 :
hash_md5()
if b1 == a2 :
hash_sha1()
if b1 == a3 :
hash_sha224()
if b1 == a4 :
hash_sha256()
if b1 == a5 :
hash_sha384()
if b1 == a6 :
hash_sha512()
if b1 == a7 :
hash_blake2b()
if b1 == a8 :
hash_blake2s()
if b1 == a9 :
hash_sha3_224()
if b1 == a10 :
hash_sha3_256()
if b1 == a11 :
hash_sha3_384()
if b1 == a12 :
hash_sha3_512()
if b1 == a13 :
hash_shake_128()
if b1 == a14 :
hash_shake_256()
if b1 == 'exit':
break
os.system('termux-open-url https://t.me/elf_security_cyber')
| StarcoderdataPython |
1608458 | #!/usr/bin/env python
""" this node publishes the frame of the ball with a parent of the camera_depth_optical_frame"""
from __future__ import print_function
import rospy
import tf2_ros
import tf2_geometry_msgs
import geometry_msgs.msg
if __name__ == '__main__':
rospy.init_node('my_static_tf2_broadcaster')
broadcaster = tf2_ros.StaticTransformBroadcaster()
tf_buffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tf_buffer)
loop = True
rate = rospy.Rate(10.0)
while loop:
try:
t = tf_buffer.lookup_transform("ar_marker2_6", 'base', rospy.Time())
loop = False
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rate.sleep()
continue
static_transformStamped = geometry_msgs.msg.TransformStamped()
static_transformStamped.header.frame_id = "ar_marker_6"
static_transformStamped.child_frame_id = "base"
static_transformStamped.transform.translation = t.transform.translation
static_transformStamped.transform.rotation = t.transform.rotation
while not rospy.is_shutdown():
static_transformStamped.header.stamp = rospy.Time.now()
broadcaster.sendTransform(static_transformStamped)
| StarcoderdataPython |
6448074 | #!/usr/bin/python
import matplotlib
import numpy as np
class Angle(object):
def __init__(self, a):
self.__list = [[1, 0], [0, 1], [-1, 0], [0, -1]]
self.__index = 0
self.angle = [1, 0]
for i in [0, 1, 2, 3]:
if a == self.__list[i]:
self.__index = i
self.angle = self.__list[i]
def prev(self):
if self.__index == 0:
self.__index = 3
else:
self.__index -= 1
self.angle = self.__list[self.__index]
def next(self):
if self.__index == 3:
self.__index = 0
else:
self.__index += 1
self.angle = self.__list[self.__index]
class Section(object):
def __init__(self, n, p, a):
self.number = n
self.point = p
self.angle = a
def __str__(self):
return "%d %s %d" % (self.number, self.point, self.angle)
class Chain(object):
def __init__(self, maxLen):
self.maxLen = maxLen
self.count = 1
self.chain = []
self.chain.append(Section(self.count, [0, 0], 90))
self.thisSection = self.chain[-1]
self.len = self.chain[-1].number
self.allow = True
self.bad = False
self.endPoint = []
self.length = 1.0
def __str__(self):
i = 0
str = ""
while i < self.count:
str += "%d %s\t %s\t\n" % (self.chain[i].number, self.chain[i].point, self.chain[i].angle)
i += 1
return str
def next(self, angle):
self.thisSection = self.chain[-1]
nextSection = Section(self.count, [0, 0], angle)
nextSection.point[0] = int(round(self.thisSection.point[0] + np.cos(self.thisSection.angle * np.pi / 180.)))
nextSection.point[1] = int(round(self.thisSection.point[1] + np.sin(self.thisSection.angle * np.pi / 180.)))
if self.thisSection.angle not in [0, 90, 180, 270]:
print "Chain -> next: angle is bad"
exit(0)
if self.test(nextSection.point, nextSection.angle):
self.count += 1
nextSection.number = self.count
self.chain.append(nextSection)
self.len = nextSection.number
else:
self.allow = False
def test(self, p, a):
i = self.count - 1
self.getEndPoint(p, a)
if self.endPoint == self.chain[-1].point:
self.bad = True
while i >= 0:
if self.chain[i].point == p:
return False
break
if self.chain[i].point == self.endPoint:
return False
break
i -= 1
return True
def plot(self):
import matplotlib.pyplot as plt
plt.grid(True)
plt.xlim(-self.maxLen, self.maxLen)
plt.ylim(-self.maxLen + 2, self.maxLen + 1)
self.plotChain()
plt.savefig("chain.png")
def plotChain(self):
i = 0
x = []
y = []
while i < self.count:
x.append(self.chain[i].point[0])
y.append(self.chain[i].point[1])
i += 1
x.append(self.endPoint[0])
y.append(self.endPoint[1])
import matplotlib.pyplot as plt
plt.plot(x, y, 'kx-', alpha=0.5)
def genChain(self, style, list=None):
if style == 'random': import random
i = 0
while i < self.maxLen - 1:
if style == 'random': angle = random.randrange(0, 360, 90)
if style == 'range': angle = 90
if style == 'list': angle = list[i]
self.next(angle)
i += 1
def getEndPoint(self, p, a):
x = int(round(p[0] + np.cos(a * np.pi / 180.)))
y = int(round(p[1] + np.sin(a * np.pi / 180.)))
self.endPoint = [x, y]
def getLength(self):
self.length = np.sqrt(self.endPoint[0] ** 2 + self.endPoint[1] ** 2)
class ListConfigurations(object):
def __init__(self, length):
self.countConf = 0
self.maxChainLength = length
self.gapConfigurations = []
self.allowConfigurations = []
self.listConfigurations = []
self.lengthArr = []
def genConfigurations(self):
import itertools
for list in itertools.product(range(0, 360, 90), repeat=(self.maxChainLength - 1)):
chain = Chain(self.maxChainLength)
chain.genChain('list', list)
if chain.allow:
self.allowConfigurations.append(chain)
lens = len(self.allowConfigurations)
if lens%1000 is 0:
print lens
else:
if chain.bad == False:
self.gapConfigurations.append(0)
def plot(self):
import matplotlib.pyplot as plt
plt.figure()
plt.grid(True)
plt.xlim(-self.maxChainLength, self.maxChainLength)
plt.ylim(-self.maxChainLength + 2, self.maxChainLength + 1)
i = 0
while i < len(self.allowConfigurations):
self.allowConfigurations[i].plotChain()
i += 1
plt.savefig("allChain.png")
def getLength(self):
i = 0
length = 0
while i < len(self.allowConfigurations):
self.allowConfigurations[i].getLength()
self.lengthArr.append(self.allowConfigurations[i].length)
length += self.allowConfigurations[i].length
i += 1
print("Median length: %f" % (float(length)/len(self.allowConfigurations)))
import matplotlib.pyplot as plt
plt.figure()
plt.grid(True)
plt.xlim(0, self.maxChainLength + 0.5)
plt.xlabel("Chain length")
plt.ylabel("Number of chains")
n, bins, patches = plt.hist(self.lengthArr, 1000, facecolor='black', alpha=0.5)
print max(n)
print(bins[np.where(n == max(n))])
print n
print bins
plt.savefig("distHist.png")
obj = ListConfigurations(11)
obj.genConfigurations()
print(len(obj.allowConfigurations), len(obj.gapConfigurations))
obj.allowConfigurations[8502].plot()
obj.plot()
obj.getLength()
#print(obj2.allowConfigurations, obj2.gapConfigurations) | StarcoderdataPython |
6612368 | from time import sleep
data = input('\033[1;30;107mDigite algo:\033[m ')
print('\033[1;35;107mIDENTIFICANDO PROPRIEDADES...\033[m')
sleep(3)
print('\n\033[1;30;107mÉ string.\033[m')
if data.isnumeric():
print('\033[1;30;107mÉ numérico.\033[m')
if data.isalpha():
print('\033[1;30;107mÉ alfabético.\033[m')
if data.isalnum():
print('\033[1;30;107mÉ alfanumérico.\033[m')
if data.isdigit():
print('\033[1;30;107mÉ dígito.\033[m')
if data.isdecimal():
print('\033[1;30;107mÉ decimal.\033[m')
if data.islower():
print('\033[1;30;107mÉ minúsculo.\033[m')
if data.isspace():
print('\033[1;30;107mSó tem espaços.\033[m')
if data.isupper():
print('\033[1;30;107mÉ maiúsculo.\033[m')
if data.istitle():
print('\033[1;30;107mÉ capitalizado.\033[m')
| StarcoderdataPython |
3201424 | from .client import Client
from .consts import *
class EttAPI(Client):
def __init__(self, api_key, api_seceret_key, passphrase, use_server_time=False):
Client.__init__(self, api_key, api_seceret_key, passphrase, use_server_time)
# query accounts
def get_accounts(self):
return self._request_without_params(GET, ETT_ACCOUNTS)
# query account
def get_account(self, symbol):
return self._request_without_params(GET, ETT_ACCOUNT + str(symbol))
# query ett ledger
def get_ledger(self, symbol):
return self._request_without_params(GET, ETT_LEDGER + str(symbol) + '/ledger')
# take order
def take_order(self, otype, quoto_currency, amount, size, ett, client_oid=''):
params = {'type': otype, 'quoto_currency': quoto_currency, 'amount': amount, 'size': size, 'ett': ett, 'client_oid': client_oid}
return self._request_with_params(POST, ETT_ORDER, params)
# revoke order
def revoke_order(self, order_id):
return self._request_without_params(DELETE, ETT_REVOKE + str(order_id))
# query order list
#def get_order_list(self, status, ett, otype, before, after, limit):
# params = {'status': status, 'ett': ett, 'tyoe': otype, 'before': before, 'after': after, 'limit': limit}
# return self._request_with_params(GET, ETT_ORDER_LIST, params, cursor=True)
def get_order_list(self, status, ett, otype, froms, to, limit):
params = {'status': status, 'ett': ett, 'type': otype, 'from': froms, 'to': to, 'limit': limit}
return self._request_with_params(GET, ETT_ORDER_LIST, params, cursor=True)
# query order by id
def get_specific_order(self, order_id):
return self._request_without_params(GET, ETT_SPECIFIC_ORDER + str(order_id))
# query ett constituents
def get_constituents(self, ett):
return self._request_without_params(GET, ETT_CONSTITUENTS + str(ett))
# query ett define price
def get_define_price(self, ett):
return self._request_without_params(GET, ETT_DEFINE + str(ett))
| StarcoderdataPython |
1754912 | <filename>host/sds.py
#! /usr/bin/python
import os
import sys
import numpy
import time
from subprocess import Popen, PIPE
import random
GPA = tuple([ 0 + i for i in range(32) ])
GPB = tuple([ 32 + i for i in range(16) ])
GPC = tuple([ 64 + i for i in range(16) ])
GPD = tuple([ 96 + i for i in range(16) ])
GPE = tuple([ 128 + i for i in range(16) ])
GPF = tuple([ 160 + i for i in range(16) ])
GPH = tuple([ 224 + i for i in range(16) ])
class SDS(object):
def __init__(self, host):
self.p = Popen([ 'ssh', host, './sds-server' ],
stdin = PIPE, stdout = PIPE, stderr = sys.stderr,
universal_newlines = False)
self.fi = self.p.stdout
self.fo = self.p.stdin
self.verbose = False
def read_regs(self, addr, count):
cmd = 'read_fpga 0x%x %u' % (addr, count)
if self.verbose:
print cmd
self.fo.write(cmd + '\n')
self.fo.flush()
a = numpy.zeros(count, dtype = numpy.uint32)
for i in range(count):
s = self.fi.readline()
a[i] = int(s, 0)
return a
def read_reg(self, addr):
return self.read_regs(addr, 1)[0]
def write_regs(self, addr, data):
cmd = 'write_fpga 0x%x %s' % (addr, ' '.join([ '0x%x' % v for v in data ]))
if self.verbose:
print cmd
self.fo.write(cmd + '\n')
self.fo.flush()
def write_reg(self, addr, value):
self.write_regs(addr, [ value ])
def read_soc_regs(self, addr, count):
cmd = 'read_soc 0x%x %u' % (addr, count)
if self.verbose:
print cmd
self.fo.write(cmd + '\n')
self.fo.flush()
a = numpy.zeros(count, dtype = numpy.uint32)
for i in range(count):
s = self.fi.readline()
a[i] = int(s, 0)
return a
def read_soc_reg(self, addr):
return self.read_soc_regs(addr, 1)[0]
def write_soc_regs(self, addr, data):
cmd = 'write_soc 0x%x %s' % (addr, ' '.join([ '0x%x' % v for v in data ]))
if self.verbose:
print cmd
self.fo.write(cmd + '\n')
self.fo.flush()
def write_soc_reg(self, addr, value):
self.write_soc_regs(addr, [ value ])
def read_ddr(self, addr, count):
cmd = 'read_ddr 0x%x %u' % (addr, count)
if self.verbose:
print cmd
self.fo.write(cmd + '\n')
self.fo.flush()
a = numpy.zeros(count, dtype = numpy.uint32)
for i in range(count):
s = self.fi.readline()
a[i] = int(s, 0)
return a
def read_ddr_b(self, addr, count):
cmd = 'read_ddr_b 0x%x %u' % (addr, count)
if self.verbose:
print cmd
self.fo.write(cmd + '\n')
self.fo.flush()
a = numpy.fromfile(self.fi, dtype = numpy.uint32, count = count)
return a
def write_ddr(self, addr, data):
cmd = 'write_ddr 0x%x %s' % (addr, ' '.join([ '0x%x' % v for v in data ]))
if self.verbose:
print cmd
self.fo.write(cmd + '\n')
self.fo.flush()
def set_gpio(self, pin, value):
cmd = 'set_gpio %u %u' % (pin, value)
if self.verbose:
print cmd
self.fo.write(cmd + '\n')
self.fo.flush()
def atten(self, channel, value):
assert channel >= 0 and channel < 2
assert value >= 0 and value < 2
if channel:
a = GPE[1]
b = GPH[12]
else:
a = GPA[15]
b = GPA[1]
self.set_gpio(a, 1 - value)
self.set_gpio(b, value)
def acdc(self, channel, value):
assert channel >= 0 and channel < 2
assert value >= 0 and value < 2
if channel:
a = GPD[8]
else:
a = GPA[0]
self.set_gpio(a, value)
def mux(self, channel, value):
assert channel >= 0 and channel < 2
assert value >= 0 and value < 4
if channel:
a0 = GPH[9]
a1 = GPH[11]
else:
a0 = GPC[5]
a1 = GPC[6]
self.set_gpio(a0, value & 1)
self.set_gpio(a1, (value & 2) >> 1)
def odd_relay(self, value):
assert value >= 0 and value < 2
self.set_gpio(GPE[3], value)
def ext_relay(self, value):
assert value >= 0 and value < 2
self.set_gpio(GPC[7], value)
def shifter(self, cs, bits, value, cpol = 0, cpha = 0, pulse = 0):
assert cpol >= 0 and cpol < 2
assert cpha >= 0 and cpha < 2
assert pulse >= 0 and pulse < 2
assert cs >= 0 and pulse < 6
assert bits >= 0 and bits <= 32
data = [ value,
bits | (cpha<<8) | (cpol<<9) | (pulse<<10) | (1<<16<<cs) ]
self.write_regs(0x210, data)
time.sleep(0.1)
def bu2506(self, channel, value):
assert channel >= 0 and channel < 16
assert value >= 0 and value < 1024
v = channel | (value << 4)
v2 = 0
for i in range(14):
v2 <<= 1
if v & (1<<i):
v2 |= 1
if self.verbose:
print "bu2506 0x%04x 0x%04x" % (v, v2)
self.shifter(0, 14, v2, pulse = 1)
def adf4360(self, value):
self.shifter(1, 24, value, pulse = 1)
def adc08d500(self, value):
self.shifter(2, 32, value, cpol = 1, cpha = 1)
def lmh6518(self, channel, value):
assert channel >= 0 and channel < 2
if channel:
self.shifter(4, 24, value)
else:
self.shifter(3, 24, value)
def dac8532(self, channel, value):
assert channel >= 0 and channel < 2
assert value >= 0 and value < 65536
if channel:
base = 0x100000
else:
base = 0x240000
self.shifter(5, 24, base | value, cpha = 1)
def capture(self, count):
self.write_reg(0x230, 0)
self.write_reg(0x230, 1)
time.sleep(0.1)
data0 = self.read_regs(0x4000, count)
data1 = self.read_regs(0x6000, count)
data = numpy.dstack((data0, data1))[0].reshape(len(data0)+len(data1))
print data0
print data1
print data
return data
def mig_reset(self):
self.write_soc_reg(0x200, 1)
print "ctrl 0x%08x" % self.read_soc_reg(0x200)
self.write_soc_reg(0x200, 0)
print "ctrl 0x%08x" % self.read_soc_reg(0x200)
time.sleep(0.1)
print "ctrl 0x%08x" % self.read_soc_reg(0x200)
print
def do_mig_capture(self, synthetic = 0):
self.write_soc_reg(0x230, 0)
time.sleep(0.1)
if 1:
self.mig_reset()
time.sleep(0.1)
v = 1
if synthetic:
v |= 2
self.write_soc_reg(0x230, v)
print "capture_status 0x%08x" % self.read_soc_reg(0x230)
time.sleep(0.1)
self.write_soc_reg(0x230, 0)
print "capture_status 0x%08x" % self.read_soc_reg(0x230)
time.sleep(0.1)
print "p2"
print "counts 0x%08x" % self.read_soc_reg(0x221)
decode_mig_status(self.read_soc_reg(0x220))
print "p3"
print "counts 0x%08x" % self.read_soc_reg(0x229)
decode_mig_status(self.read_soc_reg(0x228))
def mig_capture(self, count, synthetic = 0):
self.do_mig_capture(synthetic = synthetic)
t0 = time.time()
data = self.read_ddr_b(0, count)
t = time.time()
print "capture time", t - t0
if 0:
data2 = self.read_ddr_b(0, count)
assert all(data == data2)
if 0:
chunk = 32
data = data[:int(len(data) / (chunk * 2)) * (chunk * 2)]
s = numpy.reshape(data, (len(data) / (chunk * 2), 2, chunk))
s0 = numpy.reshape(s[:,0,:], len(data)/2)
s1 = numpy.reshape(s[:,1,:], len(data)/2)
if 0:
print s0[:256]
print s1[:256]
if 0:
# Check of synthetic data
o = s0[0]
for i in range(len(s0)):
d = s0[i] - (o + 2 * i)
if d:
print "bad s0", hex(i), d, s0[i-4:i+5]
o = s0[i] - 2 * i
o = s1[0]
for i in range(len(s1)):
d = s1[i] - (o + 2 * i)
if d:
print "bad s1", hex(i), d, s1[i-4:i+5]
o = s1[i] - 2 * i
data = numpy.reshape(numpy.dstack((s0, s1)), len(s0)+len(s1))
# print data[:512]
return data
def render(self, addr, count, scale):
cmd = 'render 0x%x %u %u' % (addr, count, scale)
if 1 or self.verbose:
print cmd
self.fo.write(cmd + '\n')
self.fo.flush()
a = numpy.fromfile(self.fi, dtype = numpy.uint32,
count = count * 0x100)
return a
def soc(self, count):
"""Get a SoC bus trace"""
self.write_reg(0x231, 0)
self.write_reg(0x231, 1)
time.sleep(0.1)
# Single Data Rate (SDR) signals
sdr0 = self.read_regs(0x2000, count)
sdr1 = sdr0
sdr = numpy.dstack((sdr1, sdr0))[0].reshape(len(sdr0)+len(sdr1))
print sdr0
print sdr1
print sdr
# Registered copies of SDR signals
reg0 = self.read_regs(0x2000, count)
reg1 = reg0
reg = numpy.dstack((reg1, reg0))[0].reshape(len(reg0)+len(reg1))
print reg0
print reg1
print reg
# Double Data Rate DDR signals
ddr0 = self.read_regs(0x3000, count)
ddr1 = self.read_regs(0x3800, count)
ddr = numpy.dstack((ddr1, ddr0))[0].reshape(len(ddr0)+len(ddr1))
print ddr0
print ddr1
print ddr
return sdr, reg, ddr
def set_red_led(self, value):
self.set_gpio(GPF[3], value)
def set_green_led(self, value):
assert value >= 0 and value <= 1
v = self.read_soc_reg(0x108)
if value:
v |= (1<<0)
else:
v &= ~(1<<0)
self.write_soc_reg(0x108, v)
def set_white_led(self, value):
assert value >= 0 and value <= 1
v = self.read_soc_reg(0x108)
if value:
v |= (1<<1)
else:
v &= ~(1<<1)
self.write_soc_reg(0x108, v)
def fp_init(self):
v = self.read_soc_reg(0x100)
v |= 1
self.write_soc_reg(0x100, v)
time.sleep(0.1)
v &= ~1
self.write_soc_reg(0x100, v)
def decode_mig_status(v):
print "dram status 0x%08x" % v
print "cmd_full %u" % ((v >> 25) & 1)
print "cmd_empty %u" % ((v >> 24) & 1)
print "wr_underrun %u" % ((v >> 23) & 1)
print "wr_error %u" % ((v >> 22) & 1)
print "wr_full %u" % ((v >> 21) & 1)
print "wr_empty %u" % ((v >> 20) & 1)
print "wr_count %u" % ((v >> 12) & 0x7f)
print "rd_overflow %u" % ((v >> 11) & 1)
print "rd_error %u" % ((v >> 10) & 1)
print "rd_full %u" % ((v >> 9) & 1)
print "rd_empty %u" % ((v >> 8) & 1)
print "rd_count %u" % ((v >> 0) & 0x7f)
print
def main():
sds = SDS('sds')
if 1:
sds.mig_reset()
sds.write_ddr(0, [ 0xff ] * 2048)
if 1:
return
print "0x120 -> 0x%08x" % sds.read_soc_reg(0x120)
print "0x121 -> 0x%08x" % sds.read_soc_reg(0x121)
print "0x122 -> 0x%08x" % sds.read_soc_reg(0x122)
print "0x130 -> 0x%08x" % sds.read_soc_reg(0x130)
print "0x131 -> 0x%08x" % sds.read_soc_reg(0x131)
print "0x132 -> 0x%08x" % sds.read_soc_reg(0x132)
print "0x134 -> 0x%08x" % sds.read_soc_reg(0x134)
print "0x135 -> 0x%08x" % sds.read_soc_reg(0x135)
decode_mig_status(sds.read_soc_reg(0x134))
zeros = [ 0 ] * 1024
sds.write_soc_regs(0x8000, zeros)
n = 64
rd_data = sds.read_soc_regs(0x8000, n)
print rd_data
src_addr = 0x20
dst_addr = 0x10
count = 0x300
sds.write_soc_reg(0x130, src_addr)
sds.write_soc_reg(0x120, dst_addr)
print "0x120 -> 0x%08x" % sds.read_soc_reg(0x120)
print "0x121 -> 0x%08x" % sds.read_soc_reg(0x121)
print "0x122 -> 0x%08x" % sds.read_soc_reg(0x122)
print "0x130 -> 0x%08x" % sds.read_soc_reg(0x130)
print "0x131 -> 0x%08x" % sds.read_soc_reg(0x131)
print "0x132 -> 0x%08x" % sds.read_soc_reg(0x132)
print "0x134 -> 0x%08x" % sds.read_soc_reg(0x134)
print "0x135 -> 0x%08x" % sds.read_soc_reg(0x135)
decode_mig_status(sds.read_soc_reg(0x134))
sds.write_soc_reg(0x131, count)
print "0x120 -> 0x%08x" % sds.read_soc_reg(0x120)
print "0x121 -> 0x%08x" % sds.read_soc_reg(0x121)
print "0x122 -> 0x%08x" % sds.read_soc_reg(0x122)
print "0x130 -> 0x%08x" % sds.read_soc_reg(0x130)
print "0x131 -> 0x%08x" % sds.read_soc_reg(0x131)
print "0x132 -> 0x%08x" % sds.read_soc_reg(0x132)
print "0x134 -> 0x%08x" % sds.read_soc_reg(0x134)
print "0x135 -> 0x%08x" % sds.read_soc_reg(0x135)
decode_mig_status(sds.read_soc_reg(0x134))
ram_data = sds.read_soc_regs(0x8000 + dst_addr, count)
print ram_data
ddr_data = sds.read_ddr(src_addr, count)
print ddr_data
assert all(ram_data == ddr_data)
if 0:
wr_data = [ random.randrange(100) for _ in range(16) ]
sds.write_soc_regs(0x8010, wr_data)
print wr_data
rd_data = sds.read_soc_regs(0x8000, n)
print rd_data
assert all(rd_data[0x10:0x10 + len(wr_data)] == wr_data)
if 1:
sds.write_soc_reg(0x120, 0x28)
sds.write_soc_reg(0x121, 0x14)
print "0x120 -> 0x%08x" % sds.read_soc_reg(0x120)
print "0x121 -> 0x%08x" % sds.read_soc_reg(0x121)
sds.write_soc_reg(0x122, 0x8)
print "0x120 -> 0x%08x" % sds.read_soc_reg(0x120)
print "0x121 -> 0x%08x" % sds.read_soc_reg(0x121)
print "0x122 -> 0x%08x" % sds.read_soc_reg(0x122)
time.sleep(1)
print "0x120 -> 0x%08x" % sds.read_soc_reg(0x120)
print "0x121 -> 0x%08x" % sds.read_soc_reg(0x121)
print "0x122 -> 0x%08x" % sds.read_soc_reg(0x122)
rd_data = sds.read_soc_regs(0x8000, n)
print rd_data
assert all(rd_data[0x28 : 0x28 + 0x8] == wr_data[4:4+8])
def hd(a):
w = 8
for i in range(0, len(a), w):
s = "%06x " % i
n = len(a) - i
if n > w:
n = w
for j in range(n):
s += " %08x" % a[i + j]
print s
if __name__ == '__main__':
main()
| StarcoderdataPython |
8168283 | # This file was automatically created by FeynRules 2.3.36
# Mathematica version: 11.3.0 for Linux x86 (64-bit) (March 7, 2018)
# Date: Wed 24 Feb 2021 15:52:48
from object_library import all_orders, CouplingOrder
QCD = CouplingOrder(name = 'QCD',
expansion_order = 99,
hierarchy = 1)
QED = CouplingOrder(name = 'QED',
expansion_order = 99,
hierarchy = 2)
ZEE = CouplingOrder(name = 'ZEE',
expansion_order = 99,
hierarchy = 2)
| StarcoderdataPython |
1631461 | from flask import Flask
from flask import g
app = Flask(__name__)
# 保存一些临时的数据, 配合钩子函数使用
@app.route("/")
def index():
print(g.isvip)
g.name = 'zhangsan'
print(g.name)
return 'ok'
@app.before_request # 在每一次请求之前执行
def request_before():
vip = True # 从数据库查,需要很多代码实现
g.isvip = vip
@app.before_first_request
def request_first_before():
print("第一个请求之前执行,后面不执行")
@app.errorhandler(404)
def request_404(code):
from flask import render_template
return render_template('404.html')
# @app.teardown_appcontext
# def request_teardown():
# # 不管代码有没有出错,这个钩子请求完成之后都执行
# pass
# @app.after_request
# def request_after():
# pass
#
# @app.add_template_filter('myfilter') # 注册过滤器
# def myfilter(value):
# pass
# return ""
if __name__ == '__main__':
app.run(debug=True,port=7000)
| StarcoderdataPython |
4861610 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" Implements JSON text format support. """
from fpga_interchange.converters import AbstractWriter, AbstractReader, \
to_writer, from_reader
class JsonWriter(AbstractWriter):
def __init__(self, struct_reader, parent):
super().__init__(struct_reader, parent)
self.out = {}
self.struct_reader = struct_reader
self.next_id = 0
self.obj_id_cache = {}
def get_object_with_id(self, field, value):
item = self.out[field][value]
if id(item) not in self.obj_id_cache:
self.obj_id_cache[id(item)] = self.next_id
self.next_id += 1
item_id = self.obj_id_cache[id(item)]
if '_id' not in item:
item['_id'] = item_id
else:
assert item['_id'] == item_id, (
item['_id'],
item_id,
)
return {'_id': item_id}
def dereference_value(self, annotation_type, value, root_writer,
parent_writer):
if annotation_type.type == 'root':
return root_writer.get_object_with_id(annotation_type.field, value)
elif annotation_type.type == 'rootValue':
return root_writer.get_field_value(annotation_type.field, value)
else:
assert annotation_type.type == 'parent'
return self.get_parent(annotation_type.depth).get_object_with_id(
annotation_type.field, value)
def set_value(self, key, value_which, value):
self.out[key] = value
def set_value_inner_key(self, key, inner_key, value_which, value):
self.out.update({key: {inner_key: value}})
def make_list(self):
return []
def append_to_list(self, l, value_which, value):
l.append(value)
def output(self):
return self.out
class JsonIndexCache():
def __init__(self, data):
self.data = data
self.caches = {}
def get_index(self, field, value):
if field not in self.caches:
self.caches[field] = {}
for idx, obj in enumerate(self.data[field]):
if '_id' in obj:
self.caches[field][obj['_id']] = idx
return self.caches[field][value['_id']]
class JsonReader(AbstractReader):
def __init__(self, data, parent):
super().__init__(data, parent)
self.data = data
self.index_cache = JsonIndexCache(self.data)
self.parent = parent
def get_index(self, field, value):
return self.index_cache.get_index(field, value)
def read_scalar(self, field_which, field_data):
return field_data
def reference_value(self, annotation_type, value, root_reader,
parent_reader):
if annotation_type.type == 'root':
return root_reader.get_index(annotation_type.field, value)
elif annotation_type.type == 'rootValue':
return root_reader.get_object(
annotation_type.field).get_index(value)
else:
assert annotation_type.type == 'parent'
return self.get_parent(annotation_type.depth).get_index(
annotation_type.field, value)
def keys(self):
return self.data.keys()
def get_field_keys(self, key):
return self.data[key].keys()
def get_inner_field(self, key, inner_key):
return self.data[key][inner_key]
def get_field(self, key):
return self.data[key]
def to_json(struct_reader):
""" Converts struct_reader to dict tree suitable for use with json,dump """
return to_writer(struct_reader, JsonWriter)
def from_json(message, data):
""" Converts data from json.load to FPGA interchange message. """
from_reader(message, data, JsonReader)
| StarcoderdataPython |
4865516 | <reponame>jramirez857/projects<gh_stars>10-100
import mlflow
from nbconvert import HTMLExporter
from sklearn_evaluation import NotebookIntrospector
def store_report(product, params):
if params['track']:
nb = NotebookIntrospector(product)
run_id = nb['mlflow-run-id'].strip()
# https://nbconvert.readthedocs.io/en/latest/config_options.html#preprocessor-options
exporter = HTMLExporter()
# hide code cells
exporter.exclude_input = True
body, _ = exporter.from_filename(product)
with mlflow.start_run(run_id):
mlflow.log_text(body, 'nb.html')
| StarcoderdataPython |
3245108 | <reponame>miracvbasaran/PipelineDP<filename>examples/movie_view_ratings/run_without_frameworks.py
# Copyright 2022 OpenMined.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Demo of running PipelineDP locally, without any external data processing framework"""
from absl import app
from absl import flags
import pipeline_dp
from common_utils import parse_file, write_to_file
FLAGS = flags.FLAGS
flags.DEFINE_string('input_file', None, 'The file with the movie view data')
flags.DEFINE_string('output_file', None, 'Output file')
def main(unused_argv):
# Here, we use a local backend for computations. This does not depend on
# any pipeline framework and it is implemented in pure Python in
# PipelineDP. It keeps all data in memory and is not optimized for large data.
# For datasets smaller than ~tens of megabytes, local execution without any
# framework is faster than local mode with Beam or Spark.
backend = pipeline_dp.LocalBackend()
# Define the privacy budget available for our computation.
budget_accountant = pipeline_dp.NaiveBudgetAccountant(total_epsilon=1,
total_delta=1e-6)
# Load and parse input data
movie_views = parse_file(FLAGS.input_file)
# Create a DPEngine instance.
dp_engine = pipeline_dp.DPEngine(budget_accountant, backend)
params = pipeline_dp.AggregateParams(
metrics=[
# we can compute multiple metrics at once.
pipeline_dp.Metrics.COUNT,
pipeline_dp.Metrics.SUM,
pipeline_dp.Metrics.PRIVACY_ID_COUNT
],
# Limits to how much one user can contribute:
# .. at most two movies rated per user
max_partitions_contributed=2,
# .. at most one rating for each movie
max_contributions_per_partition=1,
# .. with minimal rating of "1"
min_value=1,
# .. and maximum rating of "5"
max_value=5)
# Specify how to extract privacy_id, partition_key and value from an
# element of movie_views.
data_extractors = pipeline_dp.DataExtractors(
partition_extractor=lambda mv: mv.movie_id,
privacy_id_extractor=lambda mv: mv.user_id,
value_extractor=lambda mv: mv.rating)
# Create a computational graph for the aggregation.
# All computations are lazy. dp_result is iterable, but iterating it would
# fail until budget is computed (below).
# It’s possible to call DPEngine.aggregate multiple times with different
# metrics to compute.
dp_result = dp_engine.aggregate(movie_views, params, data_extractors)
budget_accountant.compute_budgets()
# Here's where the lazy iterator initiates computations and gets transformed
# into actual results
dp_result = list(dp_result)
# Save the results
write_to_file(dp_result, FLAGS.output_file)
return 0
if __name__ == '__main__':
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
app.run(main)
| StarcoderdataPython |
318543 | from niftivis.niftivis import make_thumbnails
__version__ = "2021.04.13"
__all__ = ["make_thumbnails"]
| StarcoderdataPython |
3240152 | from flask_mail import Mail, Message
import configparser, jinja2, re
# ENQUIRY TYPES
# Connect
# 0 General enquiry
# 1 General feedback
# Partnership
# 2 Collaboration and partnership
# 3 Marketing and sponsorship
# 4 Student-alumni relations
# Outreach
# 5 Event publicity
# 6 Recruitment notice
# Help
# 7 Academic advisory
# 8 Locker enquiry
# 9 IT support
enquiries = {
'0': 'General enquiry',
'1': 'General feedback',
'2': 'Collaboration and partnership',
'3': 'Marketing and sponsorship',
'4': 'Student-alumni relations',
'5': 'Event publicity',
'6': 'Recruitment notice',
'7': 'Academic advisory',
'8': 'Locker enquiry',
'9': 'IT support'
}
recipients = {
'0': ['<EMAIL>'],
'1': ['<EMAIL>'],
'2': ['<EMAIL>'],
'3': ['<EMAIL>'],
'4': ['<EMAIL>'],
'5': ['<EMAIL>'],
'6': ['<EMAIL>'],
'7': ['<EMAIL>'],
'8': ['<EMAIL>'],
'9': ['<EMAIL>', '<EMAIL>']
}
required_fields = ['enquiry', 'name', 'email', 'subject', 'message']
optional_fields = ['phone']
email_regex = re.compile(r"[^@]+@[^@]+")
validators = {
'enquiry': lambda x: x and len(x) == 1 and x.isdigit(),
'name': lambda x: x and 2 <= len(x) <= 30,
'email': lambda x: x and 6 <= len(x) <= 30 and email_regex.match(x),
'phone': lambda x: not x or 8 <= len(x) <= 16 and x.isdigit(),
'subject': lambda x: x and 10 <= len(x) <= 50,
'message': lambda x: x and 10 <= len(x) <= 500
}
def emeow(app, data):
insider = configparser.ConfigParser()
insider.read('himitsu.ini')
app.config['MAIL_SERVER'] = insider['emeow'].get('server')
app.config['MAIL_PORT'] = insider['emeow'].getint('port')
app.config['MAIL_USERNAME'] = insider['emeow'].get('sender')
app.config['MAIL_PASSWORD'] = insider['emeow'].get('password')
app.config['MAIL_USE_SSL'] = insider['emeow'].getboolean('ssl')
app.config['MAIL_USE_TLS'] = insider['emeow'].getboolean('tls')
mailer = Mail(app)
validated = is_valid(data)
if validated:
enquiry_id = data['enquiry']
# flask_mail.Message(
# subject, recipients, body, html, sender, cc, bcc, reply_to,
# date, charset, extra_headers, mail_options, rcpt_options
# )
mail = Message(
subject = "Connect: %s" % data['subject'],
recipients = recipients[enquiry_id],
sender = insider['emeow'].get('sender')
)
template = jinja2.Environment(
trim_blocks = True,
lstrip_blocks = True,
autoescape = True,
loader = jinja2.FileSystemLoader('templates')
).get_template('meow.html.j2')
data['enquiry'] = enquiries[enquiry_id]
mail.html = template.render(data)
mailer.send(mail)
return 'emeow: OK'
else:
return 'is_valid returns %s: %s' % validated
def is_valid(data):
if data is None or type(data) is not dict:
return (False, "Data is either None or not a dict.")
else:
for field in required_fields:
if field not in data:
return (False, "Missing field: %s." % field)
elif not validate(field, data[field]):
return (False, "Invalid value for the field: %s." % field)
for field in optional_fields:
if field not in data:
continue
elif not validate(field, data[field]):
return (False, "Invalid value for the field: %s." % field)
return (True, "Data is valid.")
def validate(field, value):
return validators[field](value)
| StarcoderdataPython |
11383794 | <filename>fa_test.py
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
from utils.scripts_utils import dynamic_memory_allocation, basic_train_parser
from utils.config_manager import Config
from ctc_segmentation import ctc_segmentation, determine_utterance_segments
from ctc_segmentation import CtcSegmentationParameters
from ctc_segmentation import prepare_token_list
import tgt, re
from pypinyin import pinyin, Style
from data.text import TextToTokens
from data.text.tokenizer import Phonemizer, Tokenizer
from data.audio import Audio
def read_lexicon(lex_path):
lexicon = {}
with open(lex_path) as f:
for line in f:
temp = re.split(r"\s+", line.strip("\n"))
word = temp[0]
phones = temp[1:]
if word.lower() not in lexicon:
lexicon[word.lower()] = phones
return lexicon
dynamic_memory_allocation()
parser = basic_train_parser()
args = parser.parse_args()
config = Config(config_path=args.config, asr=True)
config_dict = config.config
config.create_remove_dirs(clear_dir=args.clear_dir,
clear_logs=args.clear_logs,
clear_weights=args.clear_weights)
config.dump_config()
config.print_config()
model = config.get_model()
config.compile_model(model)
audio = Audio(config=config.config)
# create logger and checkpointer and restore latest model
checkpoint = tf.train.Checkpoint(step=tf.Variable(1),
optimizer=model.optimizer,
net=model)
manager = tf.train.CheckpointManager(checkpoint, config.weights_dir,
max_to_keep=config_dict['keep_n_weights'],
keep_checkpoint_every_n_hours=config_dict['keep_checkpoint_every_n_hours'])
manager_training = tf.train.CheckpointManager(checkpoint, str(config.weights_dir / 'latest'),
max_to_keep=1, checkpoint_name='latest')
checkpoint.restore(manager_training.latest_checkpoint)
if manager_training.latest_checkpoint:
print(f'\nresuming training from step {model.step} ({manager_training.latest_checkpoint})')
else:
print(f'\nstarting training from scratch')
input_wav = r'D:\winbeta\Beta.VideoProcess\Src\test\test.wav'
input_text = '这种写作方式是媒体常用的写作方式。这种模式将新闻中最重要的消息写在第一段,或是以新闻提要的方式呈现新闻的最前端,有助于受众快速了解新闻重点。由于该模式迎合了受众的接受心理,所以成为媒体应用最为普遍的形式。这种模式写作的基本格式(除了标题)是:先在导语中写出新闻事件中最有新闻价值的部分(新闻价值通俗来讲就是新闻中那些最突出,最新奇,最能吸引受众的部分;其次,在报道主体中按照事件各要素的重要程度,依次递减写下来,最后面的是最不重要的;同时需要注意的是,一个段落只写一个事件要素,不能一段到底。因为这种格式不是符合事件发展的基本时间顺序,所以在写作时要尽量从受众的角度出发来构思,按受众对事件重要程度的认识来安排事件要素,因而需要长期的实践经验和宏观的对于受众的认识。'
english_lexicon = './data/text/lexicon/librispeech-lexicon.txt'
pinyin_lexicon_path = './data/text/lexicon/pinyin-lexicon-r.txt'
pinyin_lexicon = read_lexicon(pinyin_lexicon_path)
chartuples = []
lastend = 0
phones = []
pinyins = [
p[0]
for p in pinyin(
input_text, style=Style.TONE3, strict=False, neutral_tone_with_five=True
)
]
print(pinyins)
print(pinyin_lexicon)
for p in pinyins:
if p in pinyin_lexicon:
phones += pinyin_lexicon[p]
chartuples.append((lastend, lastend+len(pinyin_lexicon[p])-1))
lastend += len(pinyin_lexicon[p])
else:
phones.append("sp")
chartuples.append((lastend, lastend))
lastend += 1
print(phones)
tokenizer = Tokenizer(add_start_end=False)
phonemes = np.array([tokenizer(' '.join(phones))])
y, sr = audio.load_wav(input_wav)
mel = audio.mel_spectrogram(y)
model_out = model.predict(mel[np.newaxis, ...])
pred_phon = model_out['encoder_output'][0]
pred_phon = tf.nn.log_softmax(pred_phon)
iphon_tar = model.text_pipeline.tokenizer.decode(phonemes[0])
iphon_tar = iphon_tar.split()
char_list = [''] +list(model.text_pipeline.tokenizer.idx_to_token.values())
config = CtcSegmentationParameters(char_list=char_list)
config.index_duration = 0.0115545
text = phonemes
ground_truth_mat, utt_begin_indices = prepare_token_list(config, text)
timings, char_probs, state_list = ctc_segmentation(config, pred_phon.numpy(), ground_truth_mat)
utt_begin_indices = list(range(2, len(timings)))
segments = determine_utterance_segments(
config, utt_begin_indices, char_probs, timings, text[0]
)
print(text.shape, len(segments))
tg = tgt.core.TextGrid('haa')
tier = tgt.core.IntervalTier(name='phonemes')
if(segments[0][-1]<-0.001):
segments[0] = (0, segments[0][1], segments[0][2])
else:
itv = tgt.core.Interval(0, segments[0][0], text='sp')
tier.add_interval(itv)
if(segments[-1][-1]<-0.001):
segments[-1] = (segments[-1][0], segments[-1][1]+0.15, segments[-1][2])
if(segments[-1][1]>mel.shape[1]*config.index_duration):
pass
else:
itv = tgt.core.Interval(segments[-1][1], mel.shape[1]*config.index_duration, text='sp')
tier.add_interval(itv)
for i, chartuple in enumerate(chartuples):
itv = tgt.core.Interval(segments[chartuple[0]][0], segments[chartuple[1]][1], text=input_text[i])
tier.add_interval(itv)
tg.add_tier(tier)
tgt.io.write_to_file(tg, "test.textgrid", format='long')
| StarcoderdataPython |
5132061 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import errno
import re
import shutil
import subprocess
from collections import defaultdict
import subprocess
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import SingleLetterAlphabet
import pandas as pd
class Alignment:
'''
Methods executed in this order generates a MDR centric assembly (round2).
1. doPile
2. getReadsFromPileup
3. roundTwoAssembly
Meant for further analysis after PADI
Input:
* contig sequence
* MDR sequence with gaps
* MDR loc
* Reads in MDR
Outputs:
* read pileup (alignment.doPile())
* reads in MDR (alignment.getReadsFromPileup())
'''
def __init__ (self, rootPath, ko):
self.rootPath = rootPath
self.ko = ko
self.start = None
self.end = None
self.contigList = {}
self.readInfo = {}
self.outputRecords = []
print("Processing %s:" % ko)
def doPile(self):
"""
Generates a full pileup for each of the contigs.
To be used later for assembly
"""
pileup = "%s/out/pileup/%s" % (self.rootPath, self.ko)
try:
os.makedirs(pileup)
except OSError as e:
if e.errno != errno.EEXIST:
raise # raises the error again
print("Initializing pileup for %s" % self.ko)
self.__getMSALOC()
self.__readContigs()
self.__readMSA()
#just want to test out how contig000001 looks like
self.__readStatusPair()
self.__parseFastQ()
def getReadsFromPileUP(self):
self.__getMSALOC()
self.__storeTAXAinfo()
self.__cutMSA()
def __cutMSA(self):
#msa alignment can be Rev and ntRev
try:
outputDir = "%s/out/preNewbler/%s" % (self.rootPath, self.ko)
os.makedirs(outputDir)
except OSError as e:
if e.errno != errno.EEXIST: #keep quiet if folder already exists
raise # raises the error again
print("extracting reads")
contigListFile = "%s/out/pAss03/%s.msa" % (self.rootPath, self.ko)
print(contigListFile)
seqIter = SeqIO.parse(contigListFile, 'fasta')
for contig in seqIter:
mdr = self.__getSeq(contig, self.start,self.end)
contigInMDR = len(mdr) > 0
if (contigInMDR):
pileupFH = SeqIO.parse("%s/out/pileup/%s/%s-%s" % (self.rootPath, self.ko, self.ko, contig.id), 'fasta')
fullContig = next(pileupFH)
try:
indexVal = str(fullContig.seq).index(mdr)
#print("%s: %s" % (contig.id, indexVal))
self.__extractReads(indexVal, indexVal + len(mdr), pileupFH)
except ValueError:
#print("%s : Cannot find MDR in 5' strand. Trying revcom of MDR" % contig.id)
try:
indexVal = str(fullContig.seq).index(str(Seq(mdr).reverse_complement()))
#print("%s: %s" % (contig.id, indexVal))
self.__extractReads(indexVal, indexVal + len(mdr), pileupFH)
except ValueError as err:
print("%s-%s has issues:"%(self.ko, contig.id))
print(err)
SeqIO.write(self.outputRecords, "%s/out/preNewbler/%s/%s" % (self.rootPath, self.ko, self.ko), "fasta")
else:
print("%s is empty" % contig.id)
def __getSeq(self, seqRecord, start, end):
nt = str(seqRecord.seq[start : end]).upper().replace("-", "")
return nt
def __extractReads(self, indexVal, howLong, iterator):
for record in iterator:
read = self.__getSeq(record, indexVal, howLong)
if len(read) > 0:
readID, contig = re.match("^(\d+)-(contig\d+)$", record.id).groups()
r = self.readInfo[readID]
#>58526338-contig00001-33057/1; KO:K00927 start: 575 offset: 287
header = "%s-%s/%s\tKO:%s\tstart:%s\toffset:%s" % (record.id, r['taxa'], r['readnum'], self.ko, indexVal, howLong)
newseq = Seq(str(record.seq).upper().translate({ord(i):None for i in '-'}), SingleLetterAlphabet())
newrecord = SeqRecord(newseq, id=header, name="", description="")
self.outputRecords.append(newrecord)
self.readInfo[readID]['readnum'] += 1
return
def __storeTAXAinfo(self):
print("processing input file...")
fq1 = self.rootPath + "/out/newbler/" + self.ko + "/input/" + self.ko + ".1.fq"
fq2 = self.rootPath + "/out/newbler/" + self.ko + "/input/" + self.ko + ".2.fq"
for file in [fq1, fq2]:
for record in SeqIO.parse(file, "fastq"):
readID, taxa = re.search("^(\d+)\|(\d+)", record.description).groups()
self.readInfo[readID] = {'taxa' : taxa , 'readnum' : 1}
print("done")
def __getMSALOC(self):
"""
grab the MDR location from MDR file originating from pass
"""
if (self.start == None and self.end == None):
file = self.rootPath + '/out/pAss11/' + self.ko + ".fna"
record = next(SeqIO.parse(file, "fasta"))
theMatch = re.search("msaStart:(\d+) msaEND:(\d+)", record.description)
self.start = int(theMatch.group(1))
self.end = int(theMatch.group(2))
print("MDR start:%s end:%s" % (self.start, self.end))
def __readContigs(self):
"""
Stores full length contigs
"""
path = self.rootPath+'/out/newbler/'+self.ko+"/454AllContigs.fna"
for record in SeqIO.parse(path, 'fasta'):
self.contigList[record.id] = {'fullseq': record.seq.upper()}
def __readMSA(self):
"""
Parses the MSA for the MDR region.
Outputs the portion of the contig sequence recorded from the 454 output which matches the sequences from the msa in the MDR
"""
path = self.rootPath + '/out/pAss03/' + self.ko + ".msa"
for record in SeqIO.parse(path, "fasta"):
contig = self.contigList[record.id]['fullseq'] #might not have this contig in the newbler cause .... shet something's seriously not right
recseq = str(record.seq)[self.start:self.end]
shrunk = recseq.replace('-', '').upper()
substrResult = contig.find(shrunk)
#its a reverse strand
if substrResult == -1:
revshrunk = str(record.seq.reverse_complement())[self.start : self.end].replace('-', '').upper()
substrResult = contig.find(revshrunk)
self.contigList[record.id]['mdr'] = { 'start': substrResult,
'end' : len(revshrunk) + substrResult,
'seq' : revshrunk,
'direction' : 'ntRev'
}
#not a reverse strand
else:
self.contigList[record.id]['mdr'] = {
'start': substrResult,
'end' : len(shrunk) + substrResult,
'seq' : shrunk,
'direction' : 'ntRev'
}
def __readStatusPair(self):
"""
Newbler interprets was given the command to intepret the reads as paired end reads.
We got newbler to process them as a pair instead as single reads.
NOTE: we should compare the results from pair and single read mapping
Currently the function only takes into account:
1. reads which assembled into the same contig,
2. assigns the reads to the starting position of the pair, not the individual read position,
this should be compared with using the read level information
Definition: Distance
Distance:
for reads that map to the same contig: the distance between the halves
for reads that Link contigs into scaffolds: the sum of the distances from the position of each half to the end of the contig. So, the total distance between the halves for these pairs would be the distance mentioned in the
Template Status Distance Left Contig Left Pos Left Dir Right Contig Right Pos Right Dir Left Distance RightDistance
332|201096|478844-478945|s_1 SameContig 115 contig02355 137 - contig02355 22 +
2334|561|4092117-4092218|s_1 SameContig 151 contig01613 106 + contig01613 257 -
5924|237|2291358-2291459|s_1 SameContig 144 contig00035 2174 + contig00035 2318 -
7317|914|1906065-1906166|s_1 SameContig 143 contig00018 2418 + contig00018 2561 -
7989|914|1904223-1904324|s_1 SameContig 150 contig00018 575 + contig00018 725 -
8070|48736|485184-485285|s_1 SameContig 139 contig00379 413 + contig00379 552 -
9763|1763|762968-763069|s_1 SameContig 150 contig01020 332 - contig01020 182 +
10918|165696|411626-411727|s_1 SameContig 144 contig00150 265 + contig00150 409 -
12391|1091|1788625-1788726|s_1 SameContig 156 contig00405 553 + contig00405 709 -
"""
# 454Pairstatus
df = pd.read_csv(self.rootPath + "/out/newbler/"+self.ko+"/454PairStatus.txt", sep="\t")
#isAss = == ['SameContig', 'Link', 'OneUnmapped']
def splitNstore(row):
"""
newbler seems to be miss labelling based on status the reads: alright assemblies -> FalsePair
False Pairs are Paired End reads whose ends either:
* map to different reference sequences or alignin contigs that are in different scaffolds
* map to locations on a single reference sequence or assembled contig with a distance outside the expected library span
* have an unexpected relative orientation
eg.
Template Status Distance Left Contig Left Pos Left Dir Right Contig Right Pos Right Dir Left Distance RightDistance
51910|221065|1544081-1544182|s_1 FalsePair 144 contig00235 156 + contig00235 300 -
"""
acceptedStatus = set(['SameContig', 'FalsePair'])
if row['Status'] in acceptedStatus: #we should consider other statuses; yep come back to bite me in the butt
splitNstore_sameContig(row)
#switcher = {
#'SameContig': splitNstore_sameContig(row)
#'Link':
#'OneUnmapped':
#}
#switcher.get(row['Status'], "not inside")
def splitNstore_sameContig(row):
readID = row['Template'].split("|")[0]
isPos = row['Left Dir'] == '+'
try:
self.readInfo[readID] = {
'parent': row['Left Contig'],
'readone': int(row['Left Pos']),
'readtwo': row['Right Pos'],
'direction': 'forward' if isPos else 'reverse'
}
except:
print("%s the parent:%s ; read1: %s; direction %s") % (readID, row['Left Contig'], row['Left Pos'], row['Left Dir'])
df.apply(splitNstore, axis=1)
def __guidedAlignment(self):
cmd = "bwa mem"
# incomplete: what i wanted to do is to have the mapping process passed onto bwa + 454ReadStatus, ie. get the read
# which belong to
def __parseFastQ(self):
"""
Parses fastqfiles, stores then outputs the reads as fq pileups on the respective contigs.
not really working. will begin work on new private method
"""
def fixAlignment(rootPath, ko, contigID, debug=False):
"""
temp fix for parseFastQ
readStatus mapping only gives location for contig not read,
ie.
123456789
--ATCGGGCAT <contig> mapping position 1-4 (3 nts)
CGATCG----- <read> maping position 3-6 (3 nts)
123456
"""
#Part1: run muscle
#"tests/out/pileup/K00927/K00927-contig000001"
pileup = "%s/out/pileup/%s" % (rootPath, ko)
inputFile = '%s/%s-%s' % (pileup, ko, contigID)
cmd = "muscle -in %s -out %s.msa" % (inputFile, inputFile)
print("Running muscle:\n%s"%cmd)
if not debug:
try:
subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError as err:
print("Error:\n", err.output)
#Part2: Store MSA sequences
msaed = {}
seqIter = SeqIO.parse("%s.msa"%inputFile, 'fasta')
for aligned in seqIter:
#print(aligned.description)
try:
readID = re.search("^(\S+)-\S+$", aligned.description).group(1)
msaed[readID] = str(aligned.seq)
except AttributeError as err:
msaed[aligned.description] = str(aligned.seq)
return msaed
testing = False
fq1 = self.rootPath + "/out/newbler/" + self.ko + "/input/" + self.ko + ".1.fq"
fq2 = self.rootPath + "/out/newbler/" + self.ko + "/input/" + self.ko + ".2.fq"
poshash = {} #defaultdict(list)
for record in SeqIO.parse(fq1, "fastq"):
readID = record.description.split("|")[0]
if readID in self.readInfo:
theParent = self.readInfo[readID]['parent']
if theParent in self.contigList.keys(): #sometimes shorter contigs are not outputed so readInfo may not match the contig info. ie. read was assigned to a contig which was not printed
#print "yes Inside"
startPos = self.readInfo[readID]['readone']
direc = self.readInfo[readID]['direction']
back = "-" * int(len(self.contigList[theParent]['fullseq']) - startPos - len(record.seq))
if direc == 'reverse':
startPos = startPos -101
front = "-" * int(startPos-1)
seq = str(record.seq.reverse_complement())
else:
front = "-" * int(startPos-1)
seq = str(record.seq)
readSEQ = front+seq+back
readID = readID + "/1"
if theParent in poshash:
if testing:
if theParent == 'contig00001':
poshash[theParent][startPos].append((readID, readSEQ))
else:
poshash[theParent][startPos].append((readID, readSEQ))
else:
poshash[theParent] = defaultdict(list)
for record in SeqIO.parse(fq2, "fastq"):
readID = record.description.split("|")[0]
if readID in self.readInfo:
theParent = self.readInfo[readID]['parent']
if theParent in self.contigList.keys(): #sometimes shorter contigs are not outputed so readInfo may not match the contig info. ie. read was assigned to a contig which was not printed
startPos = self.readInfo[readID]['readtwo']
direc = self.readInfo[readID]['direction']
if direc == 'reverse':
seq = str(record.seq)
front = "-" * int(startPos-1)
else:
seq = str(record.seq.reverse_complement())
startPos = startPos - 101
front = "-" * (int(startPos-1))
back = "-" * int(len(self.contigList[theParent]['fullseq']) - startPos - len(record.seq))
readSEQ = front+seq+back
readID = readID + "/2"
if theParent in poshash:
if testing:
if theParent == 'contig00001':
poshash[theParent][startPos].append((readID, readSEQ))
else:
poshash[theParent][startPos].append((readID, readSEQ))
else:
poshash[theParent] = defaultdict(list)
#open one file for each contig
#readAlignment
pileup = "%s/out/pileup/%s" % (self.rootPath, self.ko)
for contigID in self.contigList:
with open('%s/%s-%s' % (pileup, self.ko, contigID), 'w') as f:
#print original contig and full sequence
f.write(">%s\n" % contigID)
f.write(str(self.contigList[contigID]['fullseq']+"\n"))
#print the reads in order
if contigID in poshash:
for key, info in sorted(poshash[contigID].items()):
f.write(">%s-%s\n%s\n" % (info[0][0], contigID, info[0][1]))
#fixAlignment
for contigID in self.contigList:
msa = fixAlignment(self.rootPath, self.ko, contigID)
newOutputFile = '%s/%s-%s.reAligned.msa' % (pileup, self.ko, contigID)
with open(newOutputFile, 'w') as newOut:
#print original contig and full sequence
newOut.write(">%s\n" % contigID)
newOut.write(str(msa[contigID]) + "\n")
#print the reads in order
if contigID in poshash:
for key, info in sorted(poshash[contigID].items()):
newOut.write(">%s-%s\n%s\n" % (info[0][0], contigID, msa[info[0][0]]))
def __readStatus(self):
"""
stores read alignment information for use later
"""
filePath = "%s/out/newbler/%s/454ReadStatus.txt"%(self.rootPath, self.ko)
df = pd.read_csv(filePath, sep="\t")
df.columns=['Accno','ReadStatus','5Contig','5Position','5Strand','3Contig','3Position','3Strand']
"""
Accno Read Status 5' Contig 5' Position 5' Strand 3' Contig 3' Position 3' Strand
simuREAD_62|taxID|191767|loc|7076959-7077060|outpu Assembled contig00200 258 - contig00200 157 +
simuREAD_332|taxID|201096|loc|478844-478945|output Assembled contig02440 104 - contig02440 8 +
simuREAD_883|taxID|18|loc|841131-841232|output|s_1 Assembled contig00107 211 + contig00107 312 -
simuREAD_2334|taxID|561|loc|4092117-4092218|output Assembled contig00767 304 + contig00767 404 -
"""
def splitNstore(row):
readID = row['Accno'].split("|")[0]
isAss = row['ReadStatus'] == 'Assembled'
if isAss:
isSame = row['5Contig'] == row['3Contig']
isPos = row['5Strand'] == '+'
if isSame :
self.readInfo[readID] = {
'parent': row['5Contig'],
'startPos': int(row['5Position']) if isPos else int(row['3Position']),
'endPos' : int(row['3Position']) if isPos else int(row['5Position']),
'direction': 'forward' if isPos else 'reverse'
}
df.apply(splitNstore, axis=1)
| StarcoderdataPython |
252158 | <gh_stars>0
from django.test import TestCase
from django.urls import reverse, resolve
from purchase import views
class PurchaseUrlsTest(TestCase):
'''
Test all urls in the purchase applciation
'''
def test_new_order_resolved(self):
'''
Test new order url
'''
url = reverse('purchase:order_new')
self.assertEquals(
resolve(url).func.view_class,
views.CreateOrder)
def test_edit_order_resolved(self):
'''
Test edit order url
'''
url = reverse('purchase:order_edit', kwargs={'pk': 1})
self.assertEquals(
resolve(url).func.view_class,
views.EditOrder)
def test_order_details_resolved(self):
'''
Test order details url
'''
url = reverse('purchase:order_details', kwargs={'pk': 1})
self.assertEquals(
resolve(url).func.view_class,
views.OrderDetails)
def test_orders_list_resolved(self):
'''
Test list orders url
'''
url = reverse('purchase:orders_list')
self.assertEquals(
resolve(url).func.view_class,
views.OrdersList)
def test_delete_order_resolved(self):
'''
Test delete order url
'''
url = reverse('purchase:order_delete', kwargs={'pk': 1})
self.assertEquals(
resolve(url).func,
views.order_delete)
| StarcoderdataPython |
291941 | from flask_restx import fields
from . import api
sampledata = api.model(
"SampleData",
{
"id": fields.Integer(),
"date": fields.String(),
"channel": fields.String(),
"country": fields.String(),
"os": fields.String(),
"impressions": fields.Integer(),
"clicks": fields.Integer(),
"installs": fields.Integer(),
"spend": fields.Float(),
"revenue": fields.Float(),
"cpi": fields.Float(),
},
)
get_sample_data = api.model(
"GetSampleData",
{
"status": fields.String(description="ok|nok"),
"objects": fields.Nested(sampledata, as_list=True),
"total_rows": fields.Integer(),
},
)
| StarcoderdataPython |
11258858 | # ============================================================================
# ============================================================================
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Author: <NAME>
# E-mail:
# Description: Tests for the Algotom package.
# Contributors:
# ============================================================================
"""
Tests for the methods in rec/reconstruction.py
"""
import unittest
import numpy as np
from numba import cuda
import scipy.ndimage as ndi
import algotom.rec.reconstruction as reco
class ReconstructionMethods(unittest.TestCase):
def setUp(self):
self.size = 64
mat = np.zeros((self.size + 1, self.size + 1), dtype=np.float32)
mat[20:40, 30:45] = np.float32(1.0)
self.mat = ndi.gaussian_filter(mat, 2.0)
self.sino_360 = np.zeros((73, self.size + 1), dtype=np.float32)
self.angles = np.linspace(0.0, 360.0, len(self.sino_360),
dtype=np.float32)
for i, angle in enumerate(self.angles):
self.sino_360[i] = np.sum(ndi.rotate(self.mat, -angle,
reshape=False), axis=0)
self.sino_180 = self.sino_360[:37]
self.center = self.size // 2
def test_fbp_reconstruction(self):
mat_rec1 = reco.fbp_reconstruction(self.sino_180, self.center,
apply_log=False, gpu=False)
num1 = np.max(np.abs(self.mat - mat_rec1))
mat_rec2 = reco.fbp_reconstruction(self.sino_360, self.center,
angles=np.deg2rad(self.angles),
apply_log=False, gpu=False)
num2 = np.max(np.abs(self.mat - mat_rec2))
check = True
if cuda.is_available() is True:
mat_rec1 = reco.fbp_reconstruction(self.sino_180, self.center,
apply_log=False, gpu=True)
num3 = np.max(np.abs(self.mat - mat_rec1))
mat_rec2 = reco.fbp_reconstruction(self.sino_360, self.center,
angles=np.deg2rad(self.angles),
apply_log=False, gpu=True)
num4 = np.max(np.abs(self.mat - mat_rec2))
if num3 > 0.1 or num4 > 0.1:
check = False
self.assertTrue(num1 <= 0.1 and num2 <= 0.1 and check)
def test_dfi_reconstruction(self):
mat_rec1 = reco.dfi_reconstruction(self.sino_180, self.center,
apply_log=False)
num1 = np.max(np.abs(self.mat - mat_rec1))
mat_rec2 = reco.dfi_reconstruction(self.sino_360, self.center,
angles=np.deg2rad(self.angles),
apply_log=False)
num2 = np.max(np.abs(self.mat - mat_rec2))
self.assertTrue(num1 <= 0.1 and num2 <= 0.1)
| StarcoderdataPython |
9694046 | <filename>aiohappybase/__init__.py
"""
AIOHappyBase, a developer-friendly Python library to interact asynchronously
with Apache HBase.
"""
__all__ = [
'DEFAULT_HOST',
'DEFAULT_PORT',
'Connection',
'Table',
'Batch',
'ConnectionPool',
'NoConnectionsAvailable',
]
from . import _load_hbase_thrift # noqa
from .connection import DEFAULT_HOST, DEFAULT_PORT, Connection
from .table import Table
from .batch import Batch
from .pool import ConnectionPool, NoConnectionsAvailable
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| StarcoderdataPython |
1826700 | <reponame>rahulsingh50/Computer-Vision-Projects<gh_stars>1-10
import cv2
import numpy as np
def sketch(frame):
'''
Generate sketch given an image
@paramaters: frame
'''
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_blur = cv2.GaussianBlur(gray, (5,5), 0)
edges = cv2.Canny(gray_blur, 10, 70)
ret, mask = cv2.threshold(edges, 100, 255, cv2.THRESH_BINARY_INV)
return mask
capture = cv2.VideoCapture(0)
while (True):
response, frame = capture.read()
cv2.imshow("Those edges", sketch(frame))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows() | StarcoderdataPython |
3363771 | import numpy as np
import plotly.express as px
class Simulation:
def __init__(self):
self.init_temp = 1
self.n_x = 40
self.n_y = 40
self.reset()
def reset(self):
self.heat_sources = []
self.T = np.ones((self.n_x * self.n_y)) * self.init_temp
def add_heat_source(self, loc, sign, intensity=1.0):
self.heat_sources.append({
'loc': loc,
'intensity': intensity * sign
})
def set_heat_source_mask(self, binary_array, sign):
for loc, val in enumerate(binary_array.ravel()):
if val:
self.add_heat_source(loc, sign)
def step(self):
dt = 0.25
alpha = 1e-3
C = self.T.copy()
n = 0
m = 1 - self.n_x
for k in range(self.T.size):
if (k==0):
# South-West corner
C[k] = self.T[k] * (1 - 4 * dt * (alpha + 1)) + 4 * dt * alpha * self.init_temp + 2 * dt * (self.T[k + self.n_x] + self.T[k + 1])
elif (k==self.n_x*(self.n_y-1)):
# Nord-West corner
C[k] = self.T[k]*(1-4*dt*(alpha+1)) + 4*dt*alpha*self.init_temp + 2*dt*(self.T[k-self.n_x]+self.T[k+1])
elif (k==self.n_x*self.n_y-1):
# Nord-East corner
C[k] = self.T[k]*(1-4*dt*(alpha+1)) + 4*dt*alpha*self.init_temp + 2*dt*(self.T[k-self.n_x]+self.T[k-1])
elif (k==self.n_x-1):
# South-West corner
C[k] = self.T[k]*(1-4*dt*(alpha+1)) + 4*dt*alpha*self.init_temp + 2*dt*(self.T[k+self.n_x]+self.T[k-1])
elif (k<self.n_x-1):
# South border
C[k] = self.T[k]*(1-2*dt*(alpha+2)) + 2*dt*alpha*self.init_temp + 2*dt*(self.T[k+self.n_x] + self.T[k+1]/2 + self.T[k-1]/2)
elif ((self.n_x*self.n_y-k)<self.n_x):
# Nord border
C[k] = self.T[k]*(1-2*dt*(alpha+2)) + 2*dt*alpha*self.init_temp + 2*dt*(self.T[k-self.n_x] + self.T[k+1]/2 + self.T[k-1]/2)
elif (n==self.n_x and k!=self.n_x*(self.n_y-1)):
# West border
C[k] = self.T[k]*(1-2*dt*(alpha+2)) + 2*dt*alpha*self.init_temp + 2*dt*(self.T[k+1] + self.T[k+self.n_x]/2 + self.T[k-self.n_x]/2)
n = 0
elif (m==self.n_x):
# East border
C[k] = self.T[k]*(1-2*dt*(alpha+2)) + 2*dt*alpha*self.init_temp + 2*dt*(self.T[k-1] + self.T[k+self.n_x]/2 + self.T[k-self.n_x]/2)
m = 0
else:
# Other nodes
C[k] = self.T[k]*(1-4*dt) + dt*(self.T[k+1] + self.T[k-1] + self.T[k+self.n_x] + self.T[k-self.n_x])
# Heat sources
if len(self.heat_sources):
for hs in self.heat_sources:
if (k == hs['loc']):
C[k] = self.T[k]*(1-4*dt) + dt*(self.T[k+1] + self.T[k-1] + self.T[k+self.n_x] + self.T[k-self.n_x]) + dt * hs['intensity']
n += 1
m += 1
self.T = C.copy()
def get_heatmap(self):
fig = px.imshow(self.T.reshape((self.n_x, self.n_y)))#, zmin=self.init_temp, zmax=2 * self.init_temp)
fig.update_xaxes(visible=False)
fig.update_yaxes(visible=False)
fig.update_traces(showscale=False)
fig.update_coloraxes(showscale=False)
fig.update_layout(
margin={'l': 0, 'r': 0, 't': 0, 'b': 0},
autosize=False,
width=500,
height=500,
paper_bgcolor="black"
)
return fig
if __name__ == '__main__':
s = Simulation()
for _ in range(100):
s.step()
fig = s.get_heatmap()
fig.show()
| StarcoderdataPython |
3488205 | <gh_stars>0
# Faça um Programa que peça a idade e a altura de 5 pessoas, armazene cada informação no seu respectivo vetor. Imprima a idade
# e a altura na ordem inversa a ordem lida.
lista_idade, lista_altura = [], []
for i in range(1, 6):
idade = int(input('Informe a idade: '))
lista_idade.append(idade)
altura = float(input('Informe sua altura: '))
lista_altura.append(altura)
print()
reverso_idade = list(reversed(lista_idade))
reverso_altura = list(reversed(lista_altura))
print(f'A lista invertida da idade é: {reverso_idade}')
print(f'A lista invertida da idade é: {reverso_altura}')
| StarcoderdataPython |
1712126 | # Copyright (c) 2019 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from ansible.errors import AnsibleFilterError
except ImportError:
# NOTE(mgoddard): For unit testing we don't depend on Ansible since it is
# not in global requirements.
AnsibleFilterError = Exception
class FilterError(AnsibleFilterError):
"""Error during execution of a jinja2 filter."""
| StarcoderdataPython |
1803570 | <gh_stars>10-100
#!/usr/bin/env python
import os, subprocess, sys, time, bz2
from random import randint
"""Check if bowtie2 is installed. Stops programm if not"""
def check_bowtie2():
platform = sys.platform.lower()[0:3]
try:
if platform == 'win':
bowtie2 = subprocess.Popen(['where', 'bowtie2'], stdout=subprocess.PIPE)
else: # Linux, Mac, ...
bowtie2 = subprocess.Popen(['which', 'bowtie2'], stdout=subprocess.PIPE)
bowtie2 = bowtie2.communicate()[0].decode('utf-8')
bowtie2_version = subprocess.Popen(['bowtie2', '--version'], stdout=subprocess.PIPE)
bowtie2_version = bowtie2_version.communicate()[0].decode('utf-8').split()[2]
print('[I] Bowtie2 is installed')
print(' version: ' + str(bowtie2_version) + ', path: ' + str(bowtie2).strip())
except OSError as err:
sys.stderr.write('\n[E] Execution has encountered an error!\n')
sys.stderr.write(' ' + str(err) + '\n')
print('\n[E] Please, install Bowtie2.\n')
print(' Bowtie2 is used to generate the .bt2 index files used for mapping\n')
sys.exit()
"""Get a random color for plotting coverage curves"""
def random_color(used):
reset = False
total = ['#ff0000', '#800000', '#ffff00', '#808000', '#00ff00',
'#008000', '#00ffff', '#008080', '#008080', '#0000ff',
'#000080', '#ff00ff', '#800080', '#fa8072', '#ffa07a',
'#dc143c', '#b22222', '#8b0000', '#ff69b4', '#ff1493',
'#c71585', '#ff7f50', '#ff4500', '#ffa500', '#ffd700',
'#bdb76b', '#9400d3', '#4b0082', '#483d8b', '#6a5acd',
'#7fff00', '#32cd32', '#00fa9a', '#2e8b57', '#006400',
'#20b2aa', '#4682b4', '#4169e1', '#ffdead', '#f4a460',
'#d2691e', '#a52a2a', '#a0522d', '#b8860b', '#000000']
available = [c for c in total if c not in used]
# If we have no other available colors, than repeat the picking
if len(available) == 0:
available = total
reset = True
return (available[randint(0, len(available) - 1)], reset)
def info(s, init_new_line=False, exit=False, exit_value=0):
if init_new_line:
sys.stdout.write('\n')
sys.stdout.write('{}'.format(s))
sys.stdout.flush()
if exit:
sys.exit(exit_value) | StarcoderdataPython |
3225605 | <gh_stars>0
import threading
import random
import time
#used help from: http://rosettacode.org/wiki/Dining_philosophers#Python !!
class Philosopher(threading.Thread):
running = True
def __init__(self, name, l_fork, r_fork):
threading.Thread.__init__(self)
self.name = name
self.l_fork = l_fork
self.r_fork = r_fork
def run(self):
while(self.running):
time.sleep(random.uniform(1, 10))
self.dine()
def dine(self):
fork_1, fork_2 = self.l_fork, self.r_fork
while self.running:
fork_1.acquire(True)
locked = fork_2.acquire(False)
if locked:
break
fork_1.release()
fork_1, fork_2 = fork_2, fork_1
else:
return
self.dining()
fork_1.release()
fork_2.release()
def dining(self):
print(f'{self.name} :eating')
time.sleep(random.uniform(1, 10))
print(f'{self.name} :done eating and put forks down.')
def dining_philosophers():
forks = [threading.Lock() for n in range(5)]
philosopherNames = ('Callicles','Solon','Epicurus','Socrates', 'Plato')
philosophers= [Philosopher(philosopherNames[i], forks[i%5], forks[(i+1)%5]) for i in range(5)]
random.seed(507129)
Philosopher.running = True
for p in philosophers: p.start()
time.sleep(100)
Philosopher.running = False
print ("Finished.")
dining_philosophers()
| StarcoderdataPython |
5065485 | # coding: utf-8
"""
"""
import pytest
import io
import sys
from sampledb.logic.instruments import create_instrument
import sampledb.__main__ as scripts
from ..test_utils import app_context
@pytest.fixture
def instruments():
return [
create_instrument(name, 'Example Instrument Description')
for name in ['Instrument 1', 'Instrument 2']
]
def test_list_instruments(instruments, capsys):
scripts.main([scripts.__file__, 'list_instruments'])
output = capsys.readouterr()[0]
for instrument_id, instrument_name in [(1, 'Instrument 1'), (2, 'Instrument 2')]:
assert '- #{0}: {1}'.format(instrument_id, instrument_name) in output
def test_list_instruments_arguments(instruments, capsys):
with pytest.raises(SystemExit) as exc_info:
scripts.main([scripts.__file__, 'list_instruments', 1])
assert exc_info.value != 0
assert 'Usage' in capsys.readouterr()[0]
| StarcoderdataPython |
232827 | <filename>source/MCPM/__init__.py
from os import path
from .version import __version__
MODULE_PATH = path.abspath(__file__)
for i in range(3):
MODULE_PATH = path.dirname(MODULE_PATH)
| StarcoderdataPython |
165622 | from egpo_utils.egpo.egpo import EGPOTrainer
from egpo_utils.human_in_the_loop_env import HumanInTheLoopEnv
from egpo_utils.train.utils import initialize_ray
initialize_ray(test_mode=False)
def get_function(ckpt):
trainer = EGPOTrainer(dict(
env=HumanInTheLoopEnv,
# ===== Training =====
takeover_data_discard=False,
alpha=10.0,
recent_episode_num=5,
normalize=True,
twin_cost_q=True,
k_i=0.01,
k_p=5,
# search > 0
k_d=0.1,
# expected max takeover num
cost_limit=300,
optimization=dict(actor_learning_rate=1e-4, critic_learning_rate=1e-4, entropy_learning_rate=1e-4),
prioritized_replay=False,
horizon=400,
target_network_update_freq=1,
timesteps_per_iteration=100,
metrics_smoothing_episodes=10,
learning_starts=100,
clip_actions=False,
normalize_actions=True,
))
trainer.restore(ckpt)
def _f(obs):
ret = trainer.compute_actions({"default_policy": obs})
return ret
return _f
if __name__ == '__main__':
def make_env(env_id=None):
return HumanInTheLoopEnv(dict(manual_control=False, use_render=False))
from collections import defaultdict
super_data = defaultdict(list)
EPISODE_NUM = 50
env = make_env()
for ckpt_idx in range(12, 163, 10):
ckpt = ckpt_idx
compute_actions = get_function(
"/home/liquanyi/corl_human_exp/EGPO/SACPIDSaverTrainer_HumanInTheLoopEnv_0689e_00000_0_seed=0_2021-08-24_20-01-33/checkpoint_{}/checkpoint-{}".format(
ckpt, ckpt)
)
o = env.reset()
epi_num = 0
total_cost = 0
total_reward = 0
success_rate = 0
ep_cost = 0
ep_reward = 0
success_flag = False
horizon = 2000
step = 0
while True:
# action_to_send = compute_actions(w, [o], deterministic=False)[0]
step += 1
action_to_send = compute_actions(o)["default_policy"]
o, r, d, info = env.step(action_to_send)
total_reward += r
ep_reward += r
total_cost += info["cost"]
ep_cost += info["cost"]
if d or step > horizon:
if info["arrive_dest"]:
success_rate += 1
success_flag = True
epi_num += 1
if epi_num > EPISODE_NUM:
break
else:
o = env.reset()
super_data[ckpt].append({"reward": ep_reward, "success": success_flag, "cost": ep_cost})
ep_cost = 0.0
ep_reward = 0.0
success_flag = False
step = 0
print(
"CKPT:{} | success_rate:{}, mean_episode_reward:{}, mean_episode_cost:{}".format(ckpt,
success_rate / EPISODE_NUM,
total_reward / EPISODE_NUM,
total_cost / EPISODE_NUM))
del compute_actions
env.close()
import json
try:
with open("super_data_12_162_10.json", "w") as f:
json.dump(super_data, f)
except:
pass
print(super_data)
| StarcoderdataPython |
9739917 | import os
from logger import log
class DocumentReader(object):
def __init__(self, document_path):
self.document_path = document_path
if not os.path.isfile(self.document_path):
raise SystemExit("File not found: %s" % self.document_path)
log.info("Reading file: %s.", self.document_path)
with open(self.document_path, "rb") as f:
self.file_string = f.read()
def get_string(self):
try:
return self.file_string.decode("utf-8").strip()
except UnicodeDecodeError:
raise SystemExit("Cannot read '" + self.document_path + "': UnicodeDecodeError.")
def save(self, doc_content):
with open(self.document_path, "wb") as f:
f.write(doc_content.encode("utf-8"))
log.info("Wrote UTF-8-encoded document: %s.", self.document_path)
| StarcoderdataPython |
5004791 | """
# !/usr/bin/env python
-*- coding: utf-8 -*-
@Time : 2022/3/15 下午6:27
@Author : Yang "Jan" Xiao
@Description : RNN
"""
import torch
import torch.nn as nn
from torchaudio.transforms import MFCC
class RNN(nn.Module):
def __init__(self, input_size, num_classes, hidden_size=512, n_layers=1):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = n_layers
self.lstm = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, inputs):
batch_size, _, n_mfcc, _ = inputs.shape
inputs = inputs.reshape(batch_size, -1, n_mfcc)
h0 = torch.zeros(self.num_layers, batch_size, self.hidden_size)
c0 = torch.zeros(self.num_layers, batch_size, self.hidden_size)
out, _ = self.lstm(inputs, (h0, c0))
out = self.fc(out[:, -1, :])
return out
class MFCC_RNN(nn.Module):
def __init__(self, n_mfcc, sampling_rate, n_layers=1, hidden_size=512, num_classes=12):
super(MFCC_RNN, self).__init__()
self.sampling_rate = sampling_rate
self.num_classes = num_classes
self.n_mfcc = n_mfcc # feature length
self.mfcc_layer = MFCC(sample_rate=self.sampling_rate, n_mfcc=self.n_mfcc, log_mels=True)
self.rnn = RNN(self.n_mfcc, self.num_classes, hidden_size=hidden_size, n_layers=n_layers)
def forward(self, waveform):
mel_sepctogram = self.mfcc_layer(waveform)
logits = self.rnn(mel_sepctogram)
return logits
| StarcoderdataPython |
64860 | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
MUSDB18 data-iterator code for MSS.
'''
import random
import numpy as np
import musdb
from nnabla.utils.data_source import DataSource
class Compose():
"""Composes several augmentation transforms.
Args:
augmentations: list of augmentations to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, audio):
for t in self.transforms:
audio = t(audio)
return audio
def _augment_gain(audio, low=0.75, high=1.25):
"""Applies a random gain between `low` and `high`"""
g = random.uniform(low, high)
return audio * g
def _augment_channelswap(audio):
"""Swap channels of stereo signals with a probability of p=0.5"""
if audio.shape[0] == 2 and random.random() < 0.5:
return np.flip(audio, 0)
else:
return audio
def load_datasources(parser, args):
"""Loads the specified dataset from commandline arguments
Returns:
train_dataset, validation_dataset
"""
parser.add_argument('--is-wav', action='store_true', default=True,
help='loads wav instead of STEMS')
parser.add_argument('--samples-per-track', type=int, default=64)
parser.add_argument(
'--source-augmentations', type=str, nargs='+',
default=['gain', 'channelswap']
)
args = parser.parse_args()
source_augmentations = Compose(
[globals()['_augment_' + aug] for aug in args.source_augmentations]
)
train_dataset = MUSDBDataSource(
source_augmentations=source_augmentations, random_track_mix=True, args=args)
return train_dataset, args
class MUSDBDataSource(DataSource):
def __init__(
self,
args,
download=False,
samples_per_track=64,
source_augmentations=lambda audio: audio,
random_track_mix=False,
dtype=np.float32,
seed=42,
rng=None
):
"""
MUSDB18 nnabla.utils.data_source that samples from the MUSDB tracks
using track and excerpts with replacement.
Parameters
----------
args : additional arguments used to add further control for
the musdb dataset initialization function.
download : boolean
automatically download 7s preview version of MUS
samples_per_track : int
sets the number of samples, yielded from each track per epoch.
Defaults to 64
source_augmentations : list[callables]
provide list of augmentation function that take a multi-channel
audio file of shape (src, samples) as input and output. Defaults to
no-augmentations (input = output)
random_track_mix : boolean
randomly mixes sources from different tracks to assemble a
custom mix. This augmenation is only applied for the train subset.
seed : int
control randomness of dataset iterations
dtype : numeric type
data type of torch output tuple x and y
"""
super(MUSDBDataSource, self).__init__(shuffle=True)
if rng is None:
rng = np.random.RandomState(seed)
self.rng = rng
random.seed(seed)
self.args = args
self.download = args.root is None
self.samples_per_track = samples_per_track
self.source_augmentations = source_augmentations
self.random_track_mix = random_track_mix
self.mus = musdb.DB(
root=args.root,
is_wav=args.is_wav,
split=None,
subsets='train',
download=download
)
print(f"Finished loading dataset with {len(self.mus.tracks)} tracks.")
self.sample_rate = 44100 # musdb has fixed sample rate
self.dtype = dtype
self._size = len(self.mus.tracks) * self.samples_per_track
self._variables = ('mixture', 'target')
self.reset()
def _get_data(self, position):
index = self._indexes[position]
audio_sources = []
target_ind = None
# select track
track = self.mus.tracks[index // self.samples_per_track]
# at training time we assemble a custom mix
if self.args.seq_dur:
for k, source in enumerate(self.mus.setup['sources']):
# memorize index of target source
if source == self.args.target:
target_ind = k
# select a random track
if self.random_track_mix:
track = random.choice(self.mus.tracks)
# set the excerpt duration
track.chunk_duration = self.args.seq_dur
# set random start index
track.chunk_start = random.uniform(
0, track.duration - self.args.seq_dur
)
# load source audio and apply time domain source_augmentations
audio = track.sources[source].audio.T
audio = self.source_augmentations(audio)
audio_sources.append(audio)
# create stem tensor of shape (source, channel, samples)
stems = np.stack(audio_sources, axis=0)
# # apply linear mix over source index=0
x = np.sum(stems, axis=0)
# get the target stem
if target_ind is not None:
y = stems[target_ind]
# assuming vocal/accompaniment scenario if target!=source
else:
vocind = list(self.mus.setup['sources'].keys()).index('vocals')
# apply time domain subtraction
y = x - stems[vocind]
# for validation and test, we deterministically yield the full musdb track
else:
# get the non-linear source mix straight from musdb
x = track.audio.T
y = track.targets[self.args.target].audio.T
return x, y
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(MUSDBDataSource, self).reset()
| StarcoderdataPython |
32190 | class PrivilegeNotHeldException(UnauthorizedAccessException):
"""
The exception that is thrown when a method in the System.Security.AccessControl namespace attempts to enable a privilege that it does not have.
PrivilegeNotHeldException()
PrivilegeNotHeldException(privilege: str)
PrivilegeNotHeldException(privilege: str,inner: Exception)
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return PrivilegeNotHeldException()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def GetObjectData(self,info,context):
"""
GetObjectData(self: PrivilegeNotHeldException,info: SerializationInfo,context: StreamingContext)
Sets the info parameter with information about the exception.
info: The System.Runtime.Serialization.SerializationInfo that holds the serialized object data about the exception being thrown.
context: The System.Runtime.Serialization.StreamingContext that contains contextual information about the source or destination.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,privilege=None,inner=None):
"""
__new__(cls: type)
__new__(cls: type,privilege: str)
__new__(cls: type,privilege: str,inner: Exception)
"""
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
PrivilegeName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the privilege that is not enabled.
Get: PrivilegeName(self: PrivilegeNotHeldException) -> str
"""
SerializeObjectState=None
| StarcoderdataPython |
11386719 | #!/usr/bin/python3
import sys
from sys import argv, exit, stderr
import os
import nbformat as nbf
import yaml
from collections import OrderedDict
import numpy as np
import re, ast
def represent_dictionary_order(self, dict_data):
return self.represent_mapping('tag:yaml.org,2002:map', dict_data.items())
def setup_yaml():
yaml.add_representer(OrderedDict, represent_dictionary_order)
setup_yaml()
def add_cell(cell_type,cell_string,cell_metadata):
if cell_type=="Code":
nb['cells'].append(nbf.v4.new_code_cell(cell_string,metadata=cell_metadata));
elif cell_type=="Markdown":
nb['cells'].append(nbf.v4.new_markdown_cell(cell_string,metadata=cell_metadata));
elif cell_type=="Raw":
nb['cells'].append(nbf.v4.new_raw_cell(cell_string,metadata=cell_metadata));
#new_heading non esiste
#elif cell_type=="Heading": nb['cells'].append(nbf.v4.new_heading_cell(cell_string,metadata=cell_metadata));
else:
assert False
def usage():
print(f"""Usage: ./{os.path.basename(argv[0])} instance_file.yaml\n\n dove il parametro obbligatorio <instance_file.yaml> è il nome del file coi dati di istanza specifica.""", file=stderr)
# THE MAIN PROGRAM:
#Usage: command_name instance file.yaml
if len(argv) != 2:
print(f"Mh... you have called the script {os.path.basename(argv[0])} passing to it {len(argv)-1} parameters. Expecting just one!")
usage()
exit(1)
# BEGIN instance specific data loading
try:
with open(argv[1], 'r') as stream:
data_instance = yaml.safe_load(stream)
except FileNotFoundError:
print(f"Can\'t open file {argv[1]}. Wrong file name or file path")
exit(1)
except IOError:
print("Error: can\'t read the file")
exit(1)
#except Exception:
# tb = sys.exc_info()[2]
# raise OtherException(...).with_traceback(tb)
# BEGIN creazione variabili per generare istanza yaml per modalità libera
yaml_gen=OrderedDict()
yaml_gen['name']=data_instance['name']
yaml_gen['title']=data_instance['title']
tasks_istanza_libera=[]
edges=data_instance['edges']
edges2=data_instance['edges2']
# END creazione variabili per generare istanza yaml per modalità libera
tasks=data_instance['tasks']
total_point=0
n = 0
for i in range (0,len(tasks)):
total_point+=tasks[i]['tot_points']
n += 1
num_of_question=1
# END instance specific data loading
# Handy Ctrl-C Ctrl-V stuff:
#meta_init={"hide_input": True, "init_cell": True, "trusted": True, "deletable": False, "editable": False}
#meta_run={"hide_input": True, "editable": False, "deletable": False, "tags": ["runcell"], "trusted": True}
#meta_stud_input={"trusted": True, "deletable": False}
# NOTEBOOK DEFINITION:
nb = nbf.v4.new_notebook()
nb['cells']=[]
# ( CELL 1:
cell_type='Code'
cell_string = """\
%%javascript
window.findCellIndicesByTag = function findCellIndicesByTag(tagName) {
return (Jupyter.notebook.get_cells()
.filter(
({metadata: {tags}}) => tags && tags.includes(tagName)
)
.map((cell) => Jupyter.notebook.find_cell_index(cell))
);
};
window.runCells = function runCells() {
var c = window.findCellIndicesByTag('runcell');
Jupyter.notebook.execute_cells(c);
};
"""
cell_metadata={"hide_input": True,"tags": ["noexport"], "init_cell": True, "trusted": True, "deletable": False, "editable": False}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 1 -END)
##############
# ( CELL 2:
cell_type='Code'
cell_string =f"""\
from IPython.core.display import display, HTML, Markdown, Javascript
from IPython.display import SVG, display
from IPython.display import Latex
import copy as cp
import numpy as np
def start():
display(Javascript("window.runCells()"))
arr_point={str([-1] * n)}
"""
cell_metadata={"hide_input": True, "init_cell": True,"tags": ["noexport"], "trusted": True, "deletable": False}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 2 -END)
##############
# ( CELL 3:
cell_type='Code'
cell_string="""\
#seleziona la cella e premi ctrl-invio
start()
"""
cell_metadata={"tags": ["noexport"], "trusted": True, "deletable": False}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 3 -END)
##############
# ( CELL 4:
cell_type='Code'
cell_string=f"""\
edges={edges}
edges2={edges2}
"""
cell_metadata={"hide_input": True, "editable": False, "init_cell": True, "deletable": False, "tags": ["noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 4 -END)
############
# ( CELL 5:
cell_type='Markdown'
cell_string=f"## Esercizio \[{total_point} pts\]<br/>"\
+f"{data_instance['title']}."
cell_metadata={"hide_input": True, "editable": False, "deletable": False, "tags": ["runcell","noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 5 -END)
##############
# ( CELL 6:
cell_type='Markdown'
cell_string=f"""\
Consideriamo i seguenti due grafi chiamati GRAFO 1 (a sinistra) e GRAFO 2 (a destra):
"""
cell_metadata={"hide_input": True, "editable": False, "deletable": False, "tags": ["runcell","noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
# per istanza_libera
descript='Consideriamo i seguenti due grafi chiamati GRAFO 1 (a sinistra) e GRAFO 2 (a destra):'
# CELL 6 -END)
##############
# ( CELL 7:
cell_type='Code'
cell_string="""\
import matplotlib.pyplot as plt
from networkx import nx
n = 20
# segue una lista di precedenze della forma [u,v], cl significato che u deve essere schedulato oprima di v.
nodes=[(0+i) for i in range(n)]
prec_original_instance = []
for e in edges:
if e["flip"] == 1:
prec_original_instance.append((e["head"],e["tail"]))
else:
prec_original_instance.append((e["tail"],e["head"]))
prec_original_instance2 = []
for e in edges2:
if e["flip"] == 1:
prec_original_instance2.append((e["head"],e["tail"]))
else:
prec_original_instance2.append((e["tail"],e["head"]))
def evaluation_format(answ, pt_green,pt_red, index_pt):
pt_blue=0
if pt_green!=0:
pt_blue=pt_red-pt_green
pt_red=0
arr_point[index_pt]=pt_green
file = open("points.txt", "w")
file.write(str(arr_point))
file.close()
return f"{answ}. Totalizzeresti <span style='color:green'>[{pt_green} safe pt]</span>, \
<span style='color:blue'>[{pt_blue} possible pt]</span>, \
<span style='color:red'>[{pt_red} out of reach pt]</span>.<br>"
def visualizza_e_valuta_le_precedenze_non_rispettate(soluzione_problem_solver,lista_di_precedenze, pt_green, pt_red, index_pt, silent=False):
lista_visualizza=[] # lista di tuple (archi)
#controllo sulla lunghezza della lista fornita
if(len(soluzione_problem_solver)!=n):
#modifcare l'output, dire di che lunghezza voglio la lista e di che lunghezza l'ha data lui
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Hai fornito una soluzione di lungezza "+str(len(soluzione_problem_solver)) + ": \
dovresti fornirla di lunghezza "+str(n)
return display(Markdown(str_to_print))
check=np.zeros(len(soluzione_problem_solver))
#incremento la posizione soluzione_problem_solver[i] di uno , se sono inseriti tutti correttamente avrò
#un array di soli 1
for i in range(len(soluzione_problem_solver)):
try:
check[soluzione_problem_solver[i]]=check[soluzione_problem_solver[i]]+1
except:
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Hai inserito il nodo "+str(soluzione_problem_solver[i])+", ti ricordo che i nodi \
vanno da 0 a " + str(n-1)
return display(Markdown(str_to_print))
contatore_errori=0
#la lista contiene una e una volta sola tutti gli elementi
if(np.all((check == 1))):
if(lista_di_precedenze==1):
for element in prec_original_instance:
indice1=soluzione_problem_solver.index(element[0])
indice2=soluzione_problem_solver.index(element[1])
if(indice1>indice2):
lista_visualizza.append((element[0], element[1]))
contatore_errori=contatore_errori+1
if(lista_di_precedenze==2):
for element in prec_original_instance2:
indice1=soluzione_problem_solver.index(element[0])
indice2=soluzione_problem_solver.index(element[1])
if(indice1>indice2):
lista_visualizza.append((element[0], element[1]))
contatore_errori=contatore_errori+1
if(lista_di_precedenze!=2 and lista_di_precedenze!=1):
return "Vorresti valutare la tua soluzione rispetto alla lista di precedenze numero \
" +str(lista_di_precedenze)+ " Ti ricordo che le liste di precedenze sono 2, \
se vuoi valutare la tua soluzione rispetto alla prima lista digita 1 , altrimenti 2"
if(contatore_errori==0):
if(silent):
return 1
else:
str_to_print=evaluation_format("Si", pt_green, pt_red, index_pt) + "Sei riuscito a rispettare tutte le precedenze : hai dimostrato che il grafo fornito è un DAG!"
return display(Markdown(str_to_print))
else:
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Non hai rispettato " + str(contatore_errori) + " precedenze "
display(Markdown(str_to_print))
return visualizza(lista_visualizza)
#manca un elemento e/o un elemento viene ripetuto più di una volta
else:
if(silent):
return 0
else:
for k in range(len(check)):
if(check[k]==0):
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) +"L'array NON contiene tutti i nodi, il nodo numero " + str(k) + " non è presente "
return display(Markdown(str_to_print))
def visualizza(ordinamento):
G = nx.DiGraph()
# mathplotlib o networkx o ?
# visualizziamo il grafo coi nodi sulla linea nelle posizioni specificate da ordinamento e gli archi che fanno panza per poterli vedere
# il problem-solver deve rendersi conto di quali archi sono rivolti all'indietro.
#for i in range(len(ordinamento)-1):
# G.add_edge(ordinamento[i],ordinamento[i+1])
G.add_edges_from(ordinamento)
nx.draw_planar(G,with_labels=True,arrows=True)
plt.plot()
def ciclo_di_precedenze(soluzione_problem_solver,lista_di_precedenze, pt_green=10, pt_red=10, index_pt=5, silent=False):
lunghezza=len(soluzione_problem_solver)
precedenze_da_valutare=0
if(lista_di_precedenze==1):
precedenze_da_valutare=prec_original_instance
if(lista_di_precedenze==2):
precedenze_da_valutare=prec_original_instance2
if(lista_di_precedenze!=1 and lista_di_precedenze!=2):
if(silent):
return 0
else:
return "Vorresti valutare la tua soluzione rispetto alla lista di precedenze numero \
" +str(lista_di_precedenze)+ " ti ricordo che le liste di precedenze sono 2, \
se vuoi valutare la tua soluzione rispetto alla prima lista digita 1 , altrimenti 2"
#la lista contiene una e una volta sola tutti gli elementi
# creo una stringa che raccoglie i nodi non esistenti (se forniti dallo studente in soluzione_problem_solver)
mystr=''
for node in soluzione_problem_solver:
if node not in nodes:
if mystr=='':
mystr=f'{node}'
else:
mystr=mystr+f', {node}'
if (lunghezza>n) or (mystr!='') or (lunghezza==0):
if lunghezza>n:
str_to_print=f"Attenzione: hai fornito un ciclo più lungo del numero totale di nodi del grafo, ovvero {n}."
elif lunghezza==0:
str_to_print=f"Attenzione: hai fornito un ciclo privo di nodi"
else:
str_to_print=f"Attenzione: i nodi {mystr} non esistono !"
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + str_to_print
return display(Markdown(str_to_print))
else:
if ((soluzione_problem_solver[(len(soluzione_problem_solver)-1)],soluzione_problem_solver[0]) in precedenze_da_valutare):
for i in range(len(soluzione_problem_solver)-1):
if((soluzione_problem_solver[i],soluzione_problem_solver[i+1]) not in precedenze_da_valutare):
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Sembra che la tua lista non contenga un ciclo : controlla le precedenze tra il nodo " + str(soluzione_problem_solver[i]) + " e il nodo " + str(soluzione_problem_solver[i+1])
return display(Markdown(str_to_print))
if(silent):
return 1
else:
str_to_print=evaluation_format("Si", pt_green, pt_red, index_pt) + "La sequenza di nodi " + str(soluzione_problem_solver)+f" che hai fornito descrive un ciclo presente in GRAFO_CON_CICLO={lista_di_precedenze}"
return display(Markdown(str_to_print))
else:
if(silent):
return 0
else:
str_to_print=evaluation_format("No", 0, pt_red, index_pt) + "Sembra che la tua lista non contenga un ciclo : controlla le precedenze tra il nodo " + str(soluzione_problem_solver[lunghezza-1]) + " e il nodo " + str(soluzione_problem_solver[lunghezza-lunghezza])
return display(Markdown(str_to_print))
def visualizza_icosaedro(grafo):
front_face = [15, 16, 17, 18, 19]
back_face = [0, 1, 2, 3, 4]
middle = list(set(range(20)).difference(front_face + back_face))
shells = [front_face] + [middle] + [back_face]
pos = nx.shell_layout(grafo, shells)
#nx.draw_networkx(icosaedro, pos)
nx.draw_networkx_nodes(grafo, pos, alpha=0.6) #node_color='cyan',
nx.draw_networkx_labels(grafo, pos)
#disegna archi e etichette sugli archi
#positive=[(u,v) for (u,v,d) in grafo.edges(data=True) if d['w'] >= 0]
#negative=[(u,v) for (u,v,d) in grafo.edges(data=True) if d['w'] < 0]
positive=[(u,v)for (u,v,d) in grafo.edges(data=True)]
nx.draw_networkx_edges(grafo,pos,edgelist=positive,width=2,alpha = 0.6,edge_color = "g",arrows=True)
#nx.draw_networkx_edges(grafo,pos,edgelist=negative,width=2,alpha = 0.6,edge_color = "r",arrows=True)
#labels = nx.get_edge_attributes(grafo,'w')
#nx.draw_networkx_edge_labels(grafo,pos,edge_labels=labels)
ax = plt.gca()
ax.set_aspect('equal')
ax.set_axis_off()
def subplt(grafo_1, grafo_2):
fig = plt.figure()
plt.rcParams["figure.figsize"] = (15,7)
front_face = [15, 16, 17, 18, 19]
back_face = [0, 1, 2, 3, 4]
middle = list(set(range(20)).difference(front_face + back_face))
shells = [front_face] + [middle] + [back_face]
plt.subplot(121).title.set_text('GRAFO 1')
pos = nx.shell_layout(grafo_1, shells)
nx.draw_networkx_nodes(grafo_1, pos, alpha=0.6)
nx.draw_networkx_labels(grafo_1, pos)
positive=[(u,v) for (u,v,d) in grafo_1.edges(data=True)]
nx.draw_networkx_edges(grafo_1,pos,edgelist=positive,width=2,alpha = 0.6,edge_color = "g",arrows=True)
plt.subplot(122).title.set_text('GRAFO 2')
pos = nx.shell_layout(grafo_2, shells)
nx.draw_networkx_nodes(grafo_2, pos, alpha=0.6)
nx.draw_networkx_labels(grafo_2, pos)
positive=[(u,v) for (u,v,d) in grafo_2.edges(data=True)]
nx.draw_networkx_edges(grafo_2,pos,edgelist=positive,width=2,alpha = 0.6,edge_color = "g",arrows=True)
plt.show()
nodes=[(0+i) for i in range(20)]
edges=prec_original_instance
icosaedro_1=nx.DiGraph()
icosaedro_1.add_nodes_from(nodes)
icosaedro_1.add_edges_from(edges)
edges=prec_original_instance2
icosaedro_2=nx.DiGraph()
icosaedro_2.add_nodes_from(nodes)
icosaedro_2.add_edges_from(edges)
#visualizza_icosaedro(icosaedro_1)
subplt(icosaedro_1,icosaedro_2)
"""
cell_metadata={"hide_input": True, "editable": False, "init_cell": True, "deletable": False, "tags": ["noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 7 -END)
#############
# ( CELL 8:
cell_type='Markdown'
cell_string="Uno dei due rappresenta un DAG, mentre l'altro no. Individuali e rispondi alle seguenti domande."
cell_metadata={"hide_input": True, "editable": False, "deletable": False, "tags": ["runcell","noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
yaml_gen['description']=descript+"Uno dei due rappresenta un DAG, mentre l''altro no. Individuali e rispondi alle seguenti domande."
# CELL 8 -END)
##############
# ( CELL 9:
cell_type='Markdown'
cell_string="""__Richieste__:"""
cell_metadata={"hide_input": True, "editable": False, "deletable": False, "tags": ["runcell","noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
# CELL 9 -END)
##############
#ciclo generatore task
for i in range (0,len(tasks)):
if tasks[i]['request']=="R1":
request=f"{num_of_question}. __[{tasks[i]['tot_points']} pts]__ Quale, tra i due grafi, rappresenta un DAG e quale no?<br>Esprimi la tua scelta associando alla variabile GRAFO_CON_CICLO il numero 1 se si pensa che GRAFO 1 <b>NON</b> sia un DAG oppure il numero 2 se si pensa che GRAFO 2 <b>NON</b> sia un DAG."
verif=f"""\
# verificatore
ciclo_di_precedenze(ciclo, GRAFO_CON_CICLO, pt_green={tasks[i]['tot_points']}, pt_red={tasks[i]['tot_points']}, index_pt={num_of_question-1})
"""
elif tasks[i]['request']=="R2":
request=f"{num_of_question}. __[{tasks[i]['tot_points']} pts]__ Quale, tra i due grafi, rappresenta un DAG e quale no?<br>Esprimi la tua scelta associando alla variabile GRAFO_DAG il numero 1 se si pensa che GRAFO 1 sia un DAG oppure il numero 2 se si pensa che GRAFO 2 sia un DAG."
verif=f"""\
# verificatore
visualizza_e_valuta_le_precedenze_non_rispettate(lista_da_sinistra_a_destra,GRAFO_DAG, pt_green={tasks[i]['tot_points']}, pt_red={tasks[i]['tot_points']}, index_pt={num_of_question-1})
"""
else:
assert False
# ( CELL request:
cell_type='Markdown'
cell_string= request
cell_metadata ={"hide_input": True, "editable": False, "deletable": False, "tags": ["runcell","noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
tasks_istanza_libera+=[{'tot_points' : tasks[i]['tot_points'],'ver_points': tasks[i]['ver_points'], 'description':cell_string}]
# CELL request -END)
##############
# ( CELL answer:
if tasks[i]['request'] == "R1":
cell_type='Code'
cell_string=f"""\
# Specifica quale dei due grafi contiene un ciclo settando la seguente variabile:
GRAFO_CON_CICLO = # 1 oppure 2 ?
"""
cell_metadata={"trusted": True, "deletable": False}
add_cell(cell_type,cell_string,cell_metadata)
##################
cell_type='Markdown'
cell_string="""\
Riesci a fornire una sequenza v<sub>1</sub>,v<sub>2</sub>,...,v<sub>t</sub> di nodi che formino un ciclo in GRAFO_CON_CICLO?<br>
In pratica, all'interno di GRAFO_CON_CICLO, dovrà valere che:
<ul>
<li>da v<sub>t</sub> parte un arco che punta a v<sub>1</sub></li>
<li>da v<sub>i</sub> parte un arco che punta a v<sub>i+1</sub> ∀ i = 1,2,...,t-1</li>
</ul>
"""
cell_metadata={"hide_input": True, "editable": False, "init_cell": True, "deletable": False, "tags": ["noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
##################
cell_type='Code'
cell_string="""\
# Scrivi sotto forma di array di interi la sequenza di nodi che costituisce un ciclo in GRAFO_CON_CICLO
ciclo = []
"""
cell_metadata={"trusted": True, "deletable": False}
add_cell(cell_type,cell_string,cell_metadata)
##################
elif tasks[i]['request'] == "R2":
cell_type='Code'
cell_string=f"""\
# Specifica quale dei due grafi è un DAG settando la seguente variabile:
GRAFO_DAG = # 1 oppure 2 ?
"""
cell_metadata={"trusted": True, "deletable": False}
add_cell(cell_type,cell_string,cell_metadata)
##################
cell_type='Markdown'
cell_string="""\
Un grafo si dice DAG se e solo se è un grafo diretto e <b>presenta un ordinamento topologico</b>, ovvero una sequenza di nodi per cui ogni arco \"punta\" ad un nodo che si trova in una posizione successiva, all\'interno della sequenza, rispetto al nodo da cui tale arco parte.<br>Riesci a certificare il fatto che il grafo indicato sia effettivamente un DAG fornendo la sequenza di nodi appena descritta?
<br>L\'immagine di seguito dovrebbe aiutarti a intuire il concetto di ordinamento topologico; se ancora fatichi a capire, prova a consultare: <a href="https://en.wikipedia.org/wiki/Directed_acyclic_graph">info</a>
"""
cell_metadata={"hide_input": True, "editable": False, "init_cell": True, "deletable": False, "tags": ["noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
##################
cell_type='Code'
cell_string="""\
display(SVG(url='https://upload.wikimedia.org/wikipedia/commons/c/c6/Topological_Ordering.svg'))
"""
cell_metadata={"hide_input": True, "editable": False, "init_cell": True, "deletable": False, "tags": ["noexport"], "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
##################
cell_type='Code'
cell_string="""\
# Scrivi sotto forma di array di interi la sequenza di nodi richiesta;
# serviti pure del verificatore per visualizzare quali precedenze non hai rispettato
lista_da_sinistra_a_destra = []
"""
cell_metadata={"trusted": True, "deletable": False}
add_cell(cell_type,cell_string,cell_metadata)
else: # Alternative?
cell_type='Code'
cell_string=f"""\
"""
cell_metadata={"trusted": True, "deletable": False}
add_cell(cell_type,cell_string,cell_metadata)
#CELL answer -END)
###############
# ( CELL verifier:
if tasks[i]['request'] == "R1":
cell_type='Code'
cell_string=verif
cell_metadata={"hide_input": False, "editable": False, "deletable": False, "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
num_of_question += 1
elif tasks[i]['request'] == "R2":
cell_type='Code'
cell_string=verif
cell_metadata={"hide_input": False, "editable": False, "deletable": False, "trusted": True}
add_cell(cell_type,cell_string,cell_metadata)
num_of_question += 1
# CELL verifier -END)
###############
yaml_gen['tasks']=tasks_istanza_libera
with open(argv[1].split(".")[0]+'_libera.yaml', 'w') as file:
documents = yaml.dump(yaml_gen, file, default_flow_style=False)
nbf.write(nb, 'DAG.ipynb')
| StarcoderdataPython |
11208327 | from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from filebrowser_safe.functions import convert_filename
from django.utils.translation import ugettext_lazy as _
from django.db import models
from zipfile import ZipFile
from StringIO import StringIO
import os
from mezzanine.core.managers import DisplayableManager
from mezzanine.core.models import Displayable, Orderable
from mezzanine.utils.models import upload_to
from mezzanine.core.fields import FileField
from pari.album.models import ImageCollection, ImageCollectionImage
from pari.article.mixins import AdminThumbMixin
FACES_UPLOAD_DIR = "uploads/faces/"
class District(Displayable):
district = models.CharField(_("District"), max_length=100, unique=True)
district_description = models.CharField(_("Description(Optional)"), max_length=255, null=True, blank=True)
objects = DisplayableManager()
class Meta:
verbose_name = _("District")
verbose_name_plural = _("Districts")
ordering = ("district",)
app_label = "faces"
def __unicode__(self):
return self.district
class Face(Orderable, Displayable, AdminThumbMixin):
zip_import = models.FileField(verbose_name=_("Zip import"), blank=True,
upload_to=upload_to("faces.Face.zip_import", "faces"),
help_text=_("Upload a zip file containing images, and "
"they'll be imported into this gallery."))
image_collection = models.ForeignKey(ImageCollection)
district = models.ForeignKey(District, null=True)
is_pinned = models.BooleanField(verbose_name="Pin To Top", default=False)
admin_thumb_field = "image"
objects = DisplayableManager()
@models.permalink
def get_absolute_url(self):
return "face-detail", (), {"alphabet": self.first_letter_of_district()}
@property
def pinned_image(self):
return self.images.get(is_pinned=True).image_collection_image.file.path
class Meta:
verbose_name = _("Face")
verbose_name_plural = _("Faces")
ordering = ("district",)
def first_letter_of_district(self):
return self.district.district[0].lower()
def save(self, delete_zip_import=True, *args, **kwargs):
"""
If a zip file is uploaded, extract any images from it and add
them to the gallery, before removing the zip file.
"""
# Update if a entry for district already exists
face_exist = Face.objects.filter(district=self.district)
face_exist = face_exist and face_exist[0]
if not self.pk and face_exist:
self.image_collection = face_exist.image_collection
self.image_collection_id = face_exist.image_collection_id
self.pk = face_exist.pk
self.site_id = face_exist.site_id
if not hasattr(self, 'image_collection'):
new_image_collection = ImageCollection(title=self.district.district)
new_image_collection.save()
self.image_collection = new_image_collection
super(Face, self).save(*args, **kwargs)
if self.zip_import:
zip_file = ZipFile(self.zip_import)
# import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
import Image
for name in zip_file.namelist():
data = zip_file.read(name)
try:
image = Image.open(StringIO(data))
image.load()
image = Image.open(StringIO(data))
image.verify()
except:
continue
name = convert_filename(os.path.split(name)[1])
path = os.path.join(FACES_UPLOAD_DIR, self.slug,
name.decode("utf-8"))
try:
saved_path = default_storage.save(path, ContentFile(data))
except UnicodeEncodeError:
from warnings import warn
warn("A file was saved that contains unicode "
"characters in its path, but somehow the current "
"locale does not support utf-8. You may need to set "
"'LC_ALL' to a correct value, eg: 'en_US.UTF-8'.")
path = os.path.join(FACES_UPLOAD_DIR, self.slug,
unicode(name, errors="ignore"))
saved_path = default_storage.save(path, ContentFile(data))
face_image = FaceImage(image_file=saved_path)
self.images.add(face_image)
if delete_zip_import:
zip_file.close()
self.zip_import.delete(save=True)
def get_pinned_faces(alphabet):
return Face.objects.filter(district__district__istartswith=alphabet).filter(is_pinned=True)
def get_pinned_face_images(face):
return face.images.filter(is_pinned=True)
class FaceImage(Orderable, Displayable):
face = models.ForeignKey("Face", related_name="images")
image_collection_image = models.ForeignKey("album.ImageCollectionImage", related_name="face_image")
is_pinned = models.BooleanField(verbose_name="Pin To Top", default=False)
image_file = FileField(_("File"), max_length=200, format="Image", null=True,
upload_to=upload_to("album.ImageCollection.file", "faces"))
class Meta:
verbose_name = _("FaceImage")
verbose_name_plural = _("FaceImages")
app_label = "faces"
def __unicode__(self):
return self.description
@models.permalink
def get_absolute_url(self):
name = "face-detail"
return name, (), {"alphabet": self.face.first_letter_of_district()}
@property
def get_thumbnail(self):
return self.image_collection_image.get_thumbnail
def save(self, delete_audio_file=True, *args, **kwargs):
self.gen_description = False
if not hasattr(self, 'image_collection_image'):
image_collection_image = ImageCollectionImage(file=self.image_file)
self.face.image_collection.images.add(image_collection_image)
self.image_collection_image = image_collection_image
super(FaceImage, self).save(*args, **kwargs)
def get_face_images_by_district_first_letter(alphabet):
return FaceImage.objects.filter(face__district__district__istartswith=alphabet).extra(
select={'upper_district': 'upper(district)'}).order_by('upper_district')
| StarcoderdataPython |
3224093 | <reponame>cassiobotaro/Rivendell<filename>learningopencv/avivideo.py
import sys
import cv2
ENTER = 13
capture = cv2.VideoCapture('Lupi.AVI')
if capture is None:
print("Video not found.", file=sys.stderr)
sys.exit(1)
captured, frame = capture.read()
key = 0
while captured and key != ENTER:
cv2.imshow('video', frame)
captured, frame = capture.read()
key = cv2.waitKey(delay=33)
capture.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
323636 | <reponame>ikota3/images_to_pdf
import os
import fire
import img2pdf
from validator import is_dir, is_extension, is_bool
from typing import Union
from utils import natural_keys, show_info, setup_logger, append_prefix, UserResponse, ask
logger = setup_logger(__name__)
class PDFConverter():
def __init__(
self,
input_dir: str = '',
output_dir: str = '',
extensions: Union[str, tuple[str]] = None,
force: bool = False,
yes: bool = False
):
"""Initialize
Args:
input_dir (str): Input directory. Defaults to ''.
output_dir (str): Output directory. Defaults to ''.
extensions (Union[str, Tuple[str]]): Extensions. Defaults to ('jpg', 'jpeg', 'png').
force (bool): Flag for overwriting converted pdf file. Defaults to False.
yes (bool): Flag for asking to execute or not. Defaults to False.
"""
self.input_dir: str = input_dir
self.output_dir: str = output_dir
if not extensions:
extensions = ('jpg', 'jpeg', 'png')
self.extensions: Union[str, tuple[str]] = append_prefix(extensions, '.')
self.force: bool = force
self.yes: bool = yes
def __input_is_valid(self) -> bool:
"""Validator for input.
Returns:
bool: True if is valid, False otherwise.
"""
is_valid = True
# Check input_dir
if not is_dir(self.input_dir):
logger.error(
'You must type a valid directory for input directory.'
)
is_valid = False
# Check output_dir
if not is_dir(self.output_dir):
logger.error(
'You must type a valid directory for output directory.'
)
is_valid = False
# Check extensions
for extension in self.extensions:
if not is_extension(extension):
logger.error('You must type at least one extension.')
is_valid = False
# Check force
if not is_bool(self.force):
logger.error(
'You must just type -f flag. No need to type a parameter.'
)
is_valid = False
# Check yes
if not is_bool(self.yes):
logger.error(
'You must just type -y flag. No need to type a parameter.'
)
is_valid = False
return is_valid
def convert(self):
"""Convert the images contained in the directory to pdf.
Synopsis:
python src/convert_to_pdf.py convert -i 'path/to/dir' [OPTIONS]
Description:
Convert images contained in each directories to pdf.
Options:
-o, --output_dir <path/to/dir> Where the converted pdf file will be output.
If this option was not specified, it will be the same as input directory(-i, --input_directory).
-e, --extensions <ext1,ext2,ext3,...> What kind of image extensions to be converted to pdf.
Defaults to jpg, jpeg and png.
-f, --force Even if the same filename as the converted pdf is saved, it will be forced to be written.
-y, --yes Execute immediately without asking.
"""
show_info(self, logger)
if not self.__input_is_valid():
logger.info('Input parameter is not valid. Try again.')
return
if not self.yes:
user_response = ask()
if user_response == UserResponse.NO:
logger.info('Abort...')
return
logger.info('Start converting images to pdf...')
for current_dir, dirs, files in os.walk(self.input_dir):
logger.info(f'Watching {current_dir}.')
images = []
for filename in sorted(files, key=natural_keys):
if filename.endswith(self.extensions):
path = os.path.join(current_dir, filename)
images.append(path)
if not images:
logger.info(
f'There are no {", ".join(self.extensions).upper()} files at {current_dir}.'
)
continue
pdf_filename = os.path.join(
self.output_dir, f'{os.path.basename(current_dir)}.pdf'
)
if self.force:
with open(pdf_filename, 'wb') as f:
f.write(img2pdf.convert(images))
logger.info(f'Created {pdf_filename}!')
else:
if os.path.exists(pdf_filename):
logger.warning(f'{pdf_filename} already exist! SKIP!')
continue
with open(pdf_filename, 'wb') as f:
f.write(img2pdf.convert(images))
logger.info(f'Created {pdf_filename}!')
logger.info('Abort...')
if __name__ == '__main__':
fire.Fire(PDFConverter)
| StarcoderdataPython |
88275 | import typing
from smartcast import is_dict, is_int, is_bool, is_str, is_float, is_union, is_list
def test_is_union():
assert is_union(typing.Optional[int])
assert is_union(typing.Union[int, str])
def test_is_list():
assert is_list(typing.List)
assert is_list(typing.List[int])
assert is_list(list)
def test_is_dict():
assert is_dict(typing.Dict)
assert is_dict(typing.Dict[int, str])
assert is_dict(dict)
def test_is_primitive():
assert is_str(str)
assert is_int(int)
assert is_float(float)
assert is_bool(bool) | StarcoderdataPython |
1865103 | <gh_stars>10-100
import numpy as np
class NormalizeImagePreprocessor:
"""
Converts image from byte format (values are integers in {0, ..., 255} to normalized float format (values are
floats in the interval [0, 1].
"""
def __init__(self):
pass
def __call__(self, image):
image = image.astype(np.float32) / 255
image = np.expand_dims(image, axis=len(image.shape))
return image | StarcoderdataPython |
4902035 | #-*-coding:utf8-*-
import sys, time, traceback, random, os, json
from threading import Thread
from subprocess import *
from Queue import Queue
def run_cmd(cmd):
print cmd
p = Popen(cmd,shell=True)
p.wait()
return
# cmd upload local files to remote files
def scp2remote(srcFile, node, destFile):
cmd = 'scp {0} {1}:{2}'.format(srcFile, node.get_user_at_ip(), destFile)
print cmd
p = Popen(cmd,shell=True)
p.wait()
# cmd download remote files to local files
def scp2local(node, srcFile, destFile):
cmd = 'scp {0}:{1} {2}'.format(node.get_user_at_ip(), srcFile, destFile)
print cmd
p = Popen(cmd,shell=True)
p.wait()
# cmd
def mkdir(node, path):
cmd = 'ssh -x -t {0} "mkdir -p {1}"'.format(node.get_user_at_ip(), path)
p = Popen(cmd,shell=True)
p.wait()
def get_proc_ids(node, procKeywords):
cmd = 'ssh -x -t {0} "ps a -u wsx"'.format(node.get_user_at_ip())
print cmd
proc_ids = []
ret = Popen(cmd,shell=True,stdout=PIPE,stderr=PIPE,stdin=PIPE).stdout
for line in ret.readlines():
if 'ssh' in line or 'bash' in line:
continue
is_include_all = True
for w in procKeywords:
if not w in line:
is_include_all = False
break
if is_include_all:
proc_ids.append(int(line.strip().split(' ')[0]))
return proc_ids
# 检查一个节点是否是free的,主要看下面进程数有没有达到max num
def is_node_free(node, procKeywords, procMaxNum):
procs = get_proc_ids(node, procKeywords)
if len(procs) < procMaxNum:
return True
else:
return False
# 杀掉所有包含关键词的进程
def kill_job(node, procKeywords):
procs = get_proc_ids(node, procKeywords);
for proc in procs:
cmd = 'ssh -x -t {0} "kill {1}"'.format(node.get_user_at_ip(), str(proc))
p = Popen(cmd,shell=True)
p.wait()
return
class WorkerStopToken: # used to notify the worker to stop or if a worker is dead
pass
class Node:
def __init__(self, ip, user, num_proc):
self.ip = ip
self.user = user
self.num_proc = num_proc
def get_user_at_ip(self):
return self.user + '@' + self.ip
class Job:
def __init__(self, job_id, bin, local_work_dir, remote_work_dir, conf_file, log_file):
self.job_id = job_id
self.conf_file = conf_file
self.log_file = log_file
self.remote_work_dir = remote_work_dir
self.local_work_dir = local_work_dir
self.bin = bin
# for identifying this job
def gen_identity_str_set(self):
return [self.bin, self.conf_file]
def local_conf_file(self):
return self.local_work_dir + self.conf_file
def local_log_file(self):
return self.local_work_dir + self.log_file
def remote_conf_file(self):
return self.remote_work_dir + self.conf_file
def remote_log_file(self):
return self.remote_work_dir + self.log_file
class SshWorker(Thread):
def __init__(self,name,node,dev,job_queue,options):
Thread.__init__(self)
self.name = name
self.node = node
self.dev = dev
self.job_queue = job_queue
self.options = options # max_proc_num
assert self.dev == 'cpu' or self.dev == 'gpu'
def run(self):
while True:
# 防止所有线程同时启动,同时检查到系统free,然后开太多进程
time.sleep(random.randint(1,100))
isDone = False
while True:
job = self.job_queue.get()
if job is WorkerStopToken:
self.job_queue.put(job)
print('all job done, worker {0} stop.'.format(self.name))
isDone = True
break
if not is_node_free(self.node, [job.bin], self.options['max_proc_num']):
print '{0}: is waiting job begin...'.format(self.name)
self.job_queue.put(job)
time.sleep(600)
else:
break
if isDone:
break
try:
p = self.run_one(job)
except:
# # we failed, let others do that and we just quit
# we failed, do it again
traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
self.job_queue.put(job)
# sys.stderr.write('worker {0} fail and quit.\n'.format(self.name))
# break
time.sleep(10)
while not self.is_job_done(job.gen_identity_str_set()):
print '{0}: is waiting job end...'.format(self.name)
time.sleep(30)
self.get_job_result(job)
time.sleep(10)
def run_one(self, job):
scp2remote(job.local_conf_file(), self.node, job.remote_conf_file())
cmd = 'ssh -x -t -f {0} "cd {1};./{2} {3}"'.\
format(self.node.get_user_at_ip(), \
job.remote_work_dir, \
job.bin, \
job.conf_file)
print cmd
p = Popen(cmd,shell=True)
return p
def is_job_done(self, procKeywords):
return is_node_free(self.node, procKeywords, 1)
# 这个函数取回运行结果
def get_job_result(self, job):
scp2local(self.node, job.remote_log_file(), job.local_log_file())
def get_nodes():
n_thread = 4
node_169 = Node('10.60.1.169', 'wsx', 8)
node_168 = Node('10.60.1.168', 'wsx', 4)
node_52 = Node('10.60.0.52', 'wsx', 8)
node_59 = Node('10.60.0.59', 'wsx', 8)
node_nb = Node('10.61.2.215', 'wsx', 12)
# node_53 = Node('10.60.0.53', 'wsx', 6)
# node_168 = Node('10.60.1.168', 'wsx', 8)
# return [node_169]# , node_169]
# return [node_52,node_53] # , node_52, node_59]
# return [nod169169, node_52,node_59]#, node_59, node_169]
# return [node_52 , node_59]# , node_169]
# return [node_169]
# return [node_52, node_59, node_169]
# return [node_168, node_169]#, node_52, node_59]#, node_59]# , node_169]# , node_59]# , node_53]#, node_59]
# return [node_52, node_59]# , node_169]#, node_59]#, node_52, node_59]#, node_59]# , node_169]# , node_59]# , node_53]#, node_59]
return [node_nb]# , node_169]#, node_59]#, node_52, node_59]#, node_59]# , node_169]# , node_59]# , node_53]#, node_59]
def main():
run_nodes = get_nodes()
# kill_job(run_nodes[0] , ['textnet'])
# kill_job(run_nodes[1] , ['textnet'])
# kill_job(run_nodes[2] , ['textnet'])
# exit(0)
# max_proc_num = sys.args[1]
# bin = sys.args[2]
# local_dir = sys.args[3]
# remote_dir = sys.args[4]
bin = 'textnet'
# local_dir = '/home/wsx/exp/match/sentence/bilstm_tensor_dpool/run.17/'
# local_dir = '/home/wsx/exp/match/qa_50/ctnn/run.3/'
# local_dir = '/home/wsx/exp/match/sentence/word_sim_dpool/run.2/'
# local_dir = '/home/wsx/exp/match/qa_top1k/word_sim_dpool/run.1/'
# local_dir = '/home/wsx/exp/match/qa_top1k/mul_cnn_tensor_dpool/run.1/'
# local_dir = '/home/wsx/exp/match/qa_top10/bilstm_sim_dpool/run.1/'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/bilstm_sim_dpool/run.2/'
local_dir = '/home/wsx/exp/match/qa_top1k_4/lstmd2/run.64/'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/pyramid/run.3/'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/bilstm_lstmd2/run.3/'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/lstmd2/gate_dpool/run.2/'
# local_dir = '/home/wsx/exp/match/msrp/lstmd2/run.4/'
# local_dir = '/home/wsx/exp/match/paper/lstmd2/run.9/'
# local_dir = '/home/wsx/exp/match/paper/cnn/run.1/'
# local_dir = '/home/wsx/exp/match/sentence/mul_cnn_tensor_dpool/run./'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/bilstm_lstmd2/run.1/'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/arci/run.1/'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/bilstm_sim_dpool/run.9/'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/bilstm_tensor_dpool/run.3/'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/bilstm_concat/run.3/'
# local_dir = '/home/wsx/exp/match/qa_top1k_4/mul_cnn_tensor_dpool/run.2/'
# local_dir = '/home/wsx/exp/match/qa_balance/bilstm_sim_dpool/run.5/'
# local_dir = '/home/wsx/exp/nbp/tf/run.8/'
remote_dir = '/home/wsx/log.tmp/'
conf_files = os.listdir(local_dir)
print conf_files
jobQue = Queue(0)
for i, conf_file in enumerate(conf_files):
if 'cfg' not in conf_file and 'model' not in conf_file:
continue
conf = json.loads(open(local_dir+conf_file).read())
log_file = conf['log']
job = Job(i, bin, local_dir, remote_dir, conf_file, log_file)
jobQue.put(job)
jobQue.put(WorkerStopToken)
worker_id = 0
for node in run_nodes:
for proc in range(node.num_proc):
worker = SshWorker('worker_'+str(worker_id), node, 'cpu', jobQue, {'max_proc_num':node.num_proc})
print 'start worker:', worker_id
worker.start()
worker_id += 1
if __name__ == '__main__':
main()
| StarcoderdataPython |
8055106 | <gh_stars>0
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;controller_manager;control_toolbox;pluginlib;hardware_interface;transmission_interface;joint_limits_interface;urdf;angles".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lgazebo_ros_control;-ldefault_robot_hw_sim".split(';') if "-lgazebo_ros_control;-ldefault_robot_hw_sim" != "" else []
PROJECT_NAME = "gazebo_ros_control"
PROJECT_SPACE_DIR = "/root/catkin_ws/install"
PROJECT_VERSION = "2.5.20"
| StarcoderdataPython |
195098 | <reponame>vector-ai/vectorhub
from ...import_utils import *
if is_all_dependency_installed('audio-encoder'):
import librosa
import soundfile as sf
import tempfile
import shutil
import os
from urllib.request import urlopen, Request
from urllib.parse import quote
import io
import numpy as np
from ...base import Base2Vec, catch_vector_errors
class BaseAudio2Vec(Base2Vec):
def read(self, audio: str, new_sampling_rate: int = 16000):
"""An method to specify the read method to read the data.
"""
if type(audio) is str:
if 'http' in audio:
fd, fp = tempfile.mkstemp()
os.write(fd, urlopen(Request(quote(audio, safe=':/?*=\''),
headers={'User-Agent': "Magic Browser"})).read())
if '.mp3' in audio:
data, sampling_rate = librosa.load(fp, dtype='float32')
else:
data, sampling_rate = sf.read(fp, dtype='float32')
os.close(fd)
else:
data, sampling_rate = sf.read(audio, dtype='float32')
elif type(audio) is bytes:
data, sampling_rate = sf.read(io.BytesIO(audio), dtype='float32')
elif type(audio) is io.BytesIO:
data, sampling_rate = sf.read(audio, dtype='float32')
return np.array(librosa.resample(data.T, sampling_rate, new_sampling_rate))
@catch_vector_errors
def bulk_encode(self, audios, vector_operation='mean'):
return [self.encode(c, vector_operation) for c in audios]
| StarcoderdataPython |
11388826 | # encoding: UTF-8
"""
Created on 2018/09/20
@author: Freeman
布林带过滤策略:
参数组合:(, , )
每次交易n手
"""
import numpy as np
import talib
from pymongo import MongoClient, ASCENDING
import datetime
from vnpy.trader.vtObject import VtBarData
from vnpy.trader.vtConstant import EMPTY_STRING
from vnpy.trader.app.ctaStrategy.ctaTemplate import (CtaTemplate,
BarGenerator,
ArrayManager)
########################################################################
class TemplateStrategy(CtaTemplate):
"""布林带过滤策略"""
#策略信息
className = 'BollFltStrategy'
author = u'freeman'
# 交易参数
slip_num = 5 #滑点为最小变动单位的跳数
process_period = 1 #策略执行周期
# 策略参数
alpha = 'fu' #交易合约字母
midNum = 60 #布林带中轨
diffNum = 10 #长期过滤均线与中轨周期差值
stdNum = 1.0 #布林带上下轨, std倍数
timePeriod = 15 #策略周期
fixedSize = 3 #交易数量
initDays = 90 #初始化天数
# 策略变量
mid_line = np.nan #布林带中轨
up_line = np.nan #布林带上轨
down_line = np.nan #布林带下轨
std = np.nan #最近mid_num天的标准差
ma_flt = np.nan #长期过滤均线
open_long = False #开多信号
open_short = False #开空信号
#止盈止损相关变量
trade_price = np.nan #交易价格
stop_price = np.nan #固定止损价
stop_line = np.nan #最终止损线
stop_order_status = True #是否可以发出本地停止单
last_price = 0 #当前最新价
#仓位信息
rate = 0.3/10000 #手续费
capital = 50000 #总资产
deposit = 0 #保证金
pos_long = 0 #多头仓位
pos_short = 0 #空头仓位
cash = capital #现金
original_capital = capital #原始资本
account_datetime = 0 #账户信息更新时间
#紧急平仓信号,若status=run正常运行,status=stop策略平仓并停止运行
status = 'run'
#更换主力合约信号,若replaceContract不是None,则更改主力和约
replaceContract = None
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'alpha',
'timePeriod',
'fixedSize',
'initDays',
'status',
'rate',
'replaceContract',
'midNum',
'stdNum',
'diffNum']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos']
# 同步列表,保存了需要保存到数据库的变量名称
syncList = ['pos', 'pos_long', 'pos_short', 'capital', 'original_capital',
'cash', 'deposit', 'trade_price', 'last_price','account_datetime',
'stop_price', 'stop_line', 'mid_line', 'up_line', 'down_line',
'ma_flt']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(self.__class__, self).__init__(ctaEngine, setting)
# 创建K线合成器对象
self.long_num = self.midNum + self.diffNum
self.bg_t = BarGenerator(self.onBar, self.timePeriod, self.onTminBar)
self.am_t = ArrayManager(size=self.long_num + 1)
self.bg = BarGenerator(self.onBar, self.process_period, self.onXminBar)
self.am = ArrayManager(size=100)
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
#读取合约参数
self.writeCtaLog(u'%s读取合约参数' %self.name)
self.loadContractDetail(self.alpha)
self.slip = self.tickPrice * self.slip_num
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 只需要要在一个BarGenerator中合成1分钟K线
self.bg.updateTick(tick)
self.last_price = tick.lastPrice
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 基于X分钟判断趋势过滤,因此先更新
self.bg.updateBar(bar)
self.bg_t.updateBar(bar)
#实时更新账户信息
if self.trading and self.am_t.inited:
self.caculateAccountNoTrade(bar)
self.saveRealtimeStrategyInfor()
#---------------------------------------------------------------------------
def onTminBar(self, bar):
"""生成策略信号"""
#获取K线序列
self.am_t.updateBar(bar)
close_array = self.am_t.close
if not self.am_t.inited:
return
#计算布林带
self.std = np.std(close_array[-self.midNum : ], ddof=1) #样本标准差
self.mid_line = np.mean(close_array[-self.midNum : ]) #布林带中轨
self.up_line = self.mid_line + self.stdNum * self.std #布林带上轨
self.down_line = self.mid_line - self.stdNum * self.std #布林带下轨
self.ma_flt = np.mean(close_array[-self.long_num : ]) #长期过滤均线
#均线过滤信号
trend_up = self.mid_line >= self.ma_flt #趋势向上 :布林带中轨在过滤均线上方
trend_down = self.mid_line <= self.ma_flt #趋势向下 :布林带中轨在过滤均线下方
#交易信号
self.open_long = trend_up and bar.close > self.up_line #开多信号:收盘价大于布林带上轨
self.open_short = trend_down and bar.close < self.down_line #开空信号:收盘价小于布林带下轨
#----------------------------------------------------------------------
def onXminBar(self, bar):
"""处理交易"""
# 全撤之前发出的委托
self.cancelAll()
# 保存K线数据
am = self.am
am.updateBar(bar)
# 排除没有保存完的情况
if not self.am_t.inited:
# 实盘时数据初始化不够
if self.trading:
print u'%s, %s策略, 初始化天数不足' %(bar.datetime.strftime('%Y-%m-%d %H:%M:%S'), self.name)
print u'当前已经储存%s分钟K线数量:%s' %(self.timePeriod, self.am_t.count)
return
#------------------------------------------------------------------
#紧急平仓并且停止策略运行
if self.status == 'stop':
self.closePositionAndStop(bar)
return
#更换主力合约
if self.replaceContract:
self.replaceDominantContract(bar)
return
#------------------------------------------------------------------
# 当前为空仓
if self.pos == 0 and self.trading:
if self.open_long:
self.buy(bar.close + self.slip, self.fixedSize, False)
elif self.open_short:
self.short(bar.close - self.slip, self.fixedSize, False)
# 当前持有多头头寸
if self.pos > 0 and self.trading and self.stop_order_status:
#更新止盈止损线
self.stop_line = max(self.stop_price, self.mid_line)
self.sell(self.stop_line, abs(self.pos), True)
# 当前持有空头头寸
if self.pos < 0 and self.trading and self.stop_order_status:
#更新止盈止损线
self.stop_line = min(self.stop_price, self.mid_line)
self.cover(self.stop_line, abs(self.pos), True)
# 打印检查
# if self.trading:
# print '----------------------------~*-*~-------------------------------'
# print u'本地时间:%s K线时间:%s'%(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
# bar.datetime.strftime('%Y-%m-%d %H:%M:%S'))
# print u'*%s*策略 持有 %s 仓位为:%s'%(self.className, self.vtSymbol, self.pos)
# print u'账户详情:现金 %.2f ,保证金 %.2f, 总资产 %.2f'%(self.cash,self.deposit,self.capital)
# 同步变量到数据库
# self.saveSyncData()
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
if order.status == u'已撤销':
self.stop_order_status = True
#----------------------------------------------------------------------
def onTrade(self, trade):
"""成交信息变更"""
self.trade_price = trade.price
if trade.offset == u'开仓':
#记录成交信息
open_mid = self.mid_line #开仓时的布林带中轨
dif = self.stdNum * self.std #布林带的宽幅
self.stop_order_status = True #可以发出本地停止单
#开仓后马上挂本地停止单
if trade.direction == u'多':
open_stop = self.trade_price - dif * 1.3 #开仓价格下方1.3倍宽幅
self.stop_price = max(open_mid, open_stop) #初始固定止损价
self.sell(self.stop_price, abs(self.pos), True) #挂本地停止单
elif trade.direction == u'空':
open_stop = self.trade_price + dif * 1.3
self.stop_price = min(open_mid, open_stop)
self.cover(self.stop_price, abs(self.pos), True)
# 平仓时清空变量
else:
self.open_long = False
self.open_short = False
self.stop_price = np.nan
self.stop_line = np.nan
#--------------------------------------------------------------------
# 保存成交记录到数据库中
self.insertTrade('TradeRecord',self.name,trade)
#统计策略账户信息
self.caculateAccountTrade(trade)
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onStopOrder(self, so):
"""停止单推送"""
if so.status == u'已触发' or so.status == u'等待中':
self.stop_order_status = False
elif so.status == u'已撤销':
self.stop_order_status = True
| StarcoderdataPython |
6409301 | <filename>other/dingding/dingtalk/api/rest/OapiEduCircleTopiclistRequest.py<gh_stars>0
'''
Created by auto_sdk on 2020.11.02
'''
from dingtalk.api.base import RestApi
class OapiEduCircleTopiclistRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.biz_type = None
self.class_id = None
self.userid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.edu.circle.topiclist'
| StarcoderdataPython |
4824099 | <gh_stars>1-10
#!/usr/bin/env python2
# Copyright (c) 2017 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test Proton interface (provides AMQP 1.0 messaging support).
#
# Requirements:
# Python library for Qpid Proton:
# https://pypi.python.org/pypi/python-qpid-proton
# To install:
# pip install python-qpid-proton
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, \
start_nodes
from proton.handlers import MessagingHandler
from proton.reactor import Container
import threading
class Server(MessagingHandler):
def __init__(self, url, limit):
super(Server, self).__init__()
self.url = url
self.counter = limit
self.blockhashes = []
self.txids = []
self.blockseq = -1
self.txidseq = -1
def on_start(self, event):
print "Proton listening on:", self.url
self.container = event.container
self.acceptor = event.container.listen(self.url)
def on_message(self, event):
m = event.message
hash = bytes_to_hex_str(m.body)
sequence = m.properties['x-opt-sequence-number']
if m.subject == "hashtx":
self.txids.append(hash)
# Test that sequence id is incrementing
assert(sequence == 1 + self.txidseq)
self.txidseq = sequence
elif m.subject == "hashblock":
self.blockhashes.append(hash)
# Test that sequence id is incrementing
assert(sequence == 1 + self.blockseq)
self.blockseq = sequence
self.counter = self.counter - 1
if self.counter == 0:
self.container.stop()
class ProtonTest (BitcoinTestFramework):
port = 25672
numblocks = 10 # must be even, as two nodes generate equal number
assert(numblocks % 2 == 0)
def setup_nodes(self):
# Launch proton server in background thread
# It terminates after receiving numblocks * 2 messages (one for coinbase, one for block)
self.server = Server("127.0.0.1:%i" % self.port, self.numblocks * 2)
self.container = Container(self.server)
self.t1 = threading.Thread(target=self.container.run)
self.t1.start()
return start_nodes(4, self.options.tmpdir, extra_args=[
['-experimentalfeatures', '-debug=amqp', '-amqppubhashtx=amqp://127.0.0.1:'+str(self.port),
'-amqppubhashblock=amqp://127.0.0.1:'+str(self.port)],
[],
[],
[]
])
def run_test(self):
self.sync_all()
baseheight = self.nodes[0].getblockcount() # 200 blocks already mined
# generate some blocks
self.nodes[0].generate(self.numblocks/2)
self.sync_all()
self.nodes[1].generate(self.numblocks/2)
self.sync_all()
# wait for server to finish
self.t1.join()
# sequence numbers have already been checked in the server's message handler
# sanity check that we have the right number of block hashes and coinbase txids
assert_equal(len(self.server.blockhashes), self.numblocks)
assert_equal(len(self.server.txids), self.numblocks)
# verify that each block has the correct coinbase txid
for i in xrange(0, self.numblocks):
height = baseheight + i + 1
blockhash = self.nodes[0].getblockhash(height)
assert_equal(blockhash, self.server.blockhashes[i])
resp = self.nodes[0].getblock(blockhash)
coinbase = resp["tx"][0]
assert_equal(coinbase, self.server.txids[i])
if __name__ == '__main__':
ProtonTest().main()
| StarcoderdataPython |
1749293 | <filename>Chapter 8/large_shirts.py
def make_shirt(size = 'L', message = 'I love Python'):
print(f'This shirt is size: {size}, and has a message: {message}.')
# large shirt and a medium shirt with the default message
make_shirt()
make_shirt(size = 'M')
# shirt of any size with a different message.
make_shirt(size = 'S', message = 'I love Python & You')
| StarcoderdataPython |
11300509 | import argparse
import json
import sys
import signal
import logging
from datetime import datetime
from os import path
IMU_TOPIC = "imu".encode('utf-8')
def signal_handler_exit(sig, frame):
print('* msb_imu: bye')
sys.exit(0)
def dump_config_file(config : dict):
with open(config['dump_config_file'], 'w') as config_file_handle:
config_file_handle.writelines(
json.dumps(
config,
indent=4
)
)
def read_parse_config_file(config : dict) -> dict:
try:
config_file_handler = open(config['config_file'], 'r')
except Exception as e:
print(f'failed to open config file: {e}')
sys.exit(-1)
config_file_args = json.load(config_file_handler)
for key, value in config_file_args.items():
if key == 'config_file':
continue
if key in config:
print(f'parsing {key} : {value}')
config[key] = value
else:
print(f'key not found: {key} omitting')
return config
# 1. read config file
# 2. convert from json to dict
# 3. iterate over entries in dictionary and override parsed arguments
# build a config named tuple
def parse_arguments() -> dict:
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--verbose',
action='store_true',
help='for debugging purposes'
)
arg_parser.add_argument(
'--print',
action='store_true',
help='use this flag to print data to stdout'
)
arg_parser.add_argument(
'--logfile',
help='path to logfile',
type=str,
# default=f'/tmp/msb_imu_{datetime.now().astimezone().strftime("%Y-%m-%dT%H-%M-%S%z")}.log',
default="",
)
arg_parser.add_argument(
'--imu-output-div',
help='sensor output data rate. calculated by 1100/(1+output_data_div). default 54 (20 Hz)',
default=54,
type=int
)
arg_parser.add_argument(
'--sample-rate',
help='polling frequency with which data is retrieved from the sensor. must be >= ODR',
default=20,
type=int,
)
arg_parser.add_argument(
'--acc-range',
help=' ',
default='2g',
type=str,
)
arg_parser.add_argument(
'--acc-filter',
help='low pass filter to be applied to the raw data coming from the sensor. options are 1 - 6',
default=1,
type=int,
)
arg_parser.add_argument(
'--gyro-range',
help=' ',
default='500dps',
type=str,
)
arg_parser.add_argument(
'--gyro-filter',
help='low pass filter to be applied to the raw data coming from the gyro. options are 1 - 6',
default=1,
type=int,
)
arg_parser.add_argument(
'--config-file',
help='configuration file: overwrite all commandline options!',
default='',
type=str,
)
arg_parser.add_argument(
'--dump-config-file',
help='dumps the default config values into a file',
default='',
)
arg_parser.add_argument(
'--ipc-port',
help='IPC port used to send data to msb_broker.service. Default is 5555',
default=5555,
type=int
)
arg_parser.add_argument(
'--ipc-protocol',
help='the protocol used for IPC with zeroMQ. Default is tcp://127.0.0.1',
default='tcp://127.0.0.1',
type=str,
)
arg_parser.add_argument(
'--udp-address',
help='Address to create an udp socket to allow streaming in of data from external sensors',
default=None,
type=str
)
arg_parser.add_argument(
'--udp-port',
help='port for local udp socket. Default is 5670',
default=6666,
type=int
)
arg_parser.add_argument(
'--profile',
help='profile flag',
default=False,
action='store_true'
)
arg_parser.add_argument(
'--id',
help='id to identify the imu by',
default='imu',
type=str
)
return arg_parser.parse_args().__dict__
def init() -> dict:
signal.signal(signal.SIGINT, signal_handler_exit)
config = parse_arguments()
logging.basicConfig(
filename=config['logfile'],
level=logging.DEBUG if config['verbose'] else logging.WARNING,
format='%(levelname)s: %(asctime)s %(message)s',
datefmt='%Y%m%dT%H%M%S%z',
)
logging.debug('msb_imu.py parsing of configuration done')
if config['config_file']:
logging.debug('parsing config file')
config = read_parse_config_file(config)
logging.debug(f'updated config file: {config}')
if config['dump_config_file']:
logging.debug(f'dumping config file to {config["dump_config_file"]}')
dump_config_file(config)
return config
| StarcoderdataPython |
4822737 | <filename>src/modules/apputils/config/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from enum import Enum
from typing import Dict, List
from .ext import DataCacheExtension, OptionsExtension
from .storages import StorageType
from .storages.base_storage import BaseStorage, StorageProperty, StoragePropertyType
class BaseConfiguration(object):
__cache_invalidation: float = time.mktime(time.gmtime(8 * 3600)) # 8 hours
_options_flags_name_old = "options"
_options_flags_name = "config_options"
_options_table = "general"
class ConfigOptions(Enum):
CONF_INITIALIZED = 0
CREDENTIALS_CACHED = 1
USE_MASTER_PASSWORD = 2
def __init__(self, storage: StorageType = StorageType.SQL,
app_name: str = 'apputils', lazy_init: bool = False, upgrade_manager=None):
"""
:type upgrade_manager .upgrades.UpgradeManager
"""
from .upgrades import UpgradeManager
self.__upgrade_manager = upgrade_manager if upgrade_manager else UpgradeManager()
self.__storage: BaseStorage = storage.value(app_name=app_name, lazy=lazy_init)
self.__options = OptionsExtension(self.__storage, self._options_table, self._options_flags_name, self.ConfigOptions)
self.__caches: Dict = {}
def initialize(self):
"""
:rtype BaseConfiguration
"""
if self.is_conf_initialized:
self._storage.initialize_key()
try:
assert self._test_encrypted_property == "test"
except ValueError as e:
print(f"Error: {str(e)}")
sys.exit(-1)
self.__upgrade_manager.upgrade(self, self._storage)
else:
self.__upgrade_manager.init_config(self, self._storage)
return self
def add_cache_ext(self, name: str, cache_lifetime: float = __cache_invalidation):
if name not in self.__caches:
self.__caches[name] = DataCacheExtension(self.__storage, name, cache_lifetime)
def get_cache_ext(self, name: str) -> DataCacheExtension:
if name not in self.__caches:
self.add_cache_ext(name)
return self.__caches[name]
@property
def list_cache_ext(self) -> List[str]:
return list(self.__caches.keys())
@property
def _storage(self) -> BaseStorage:
return self.__storage
@property
def is_conf_initialized(self):
return self.__options.get(self.ConfigOptions.CONF_INITIALIZED)
@is_conf_initialized.setter
def is_conf_initialized(self, value):
self.__options.set(self.ConfigOptions.CONF_INITIALIZED, True)
@property
def __credentials_cached(self) -> bool:
return self.__options.get(self.ConfigOptions.CREDENTIALS_CACHED)
@__credentials_cached.setter
def __credentials_cached(self, value: bool):
self.__options.set(self.ConfigOptions.CREDENTIALS_CACHED, value)
@property
def __use_master_password(self):
self.__options.get(self.ConfigOptions.USE_MASTER_PASSWORD)
@__use_master_password.setter
def __use_master_password(self, value: bool):
self.__options.set(self.ConfigOptions.USE_MASTER_PASSWORD, value)
@property
def _test_encrypted_property(self):
return self._storage.get_property(self._options_table, "enctest", StorageProperty()).value
@_test_encrypted_property.setter
def _test_encrypted_property(self, value):
self._storage.set_text_property(self._options_table, "enctest", value, encrypted=True)
@property
def version(self) -> float:
p = self._storage.get_property("general", "db_version", StorageProperty(name="db_version", value="0.0"))
try:
return float(p.value)
except ValueError:
return 0.0
@version.setter
def version(self, version: float):
self._storage.set_property("general", StorageProperty(name="db_version", value=str(version)))
def reset(self):
self._storage.reset()
| StarcoderdataPython |
3266859 | <gh_stars>1-10
import os, sys, math
neighbours = [(0,-1), (-1,0), (1,0), (0,1)]
def main():
input_values = []
lows = []
with open("input.txt", 'r') as file:
for line in file.readlines():
input_values.append([int(x) for x in line if(x !='\n')])
#print(input_values)
for i in range(len(input_values)):
for j in range(len(input_values[i])):
summ = 0
count = 0
for n in neighbours:
if(j+n[0] >= 0 and j+n[0] < len(input_values[i]) and
i+n[1] >= 0 and i+n[1] < len(input_values)):
if(input_values[i][j] < input_values[i+n[1]][j+n[0]]):
summ += 1
count += 1
if(summ == count):
lows.append(input_values[i][j])
result = sum(lows) + len(lows)
print("Sum of Risk Levels: " + str(result))
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1915638 | from flask_wtf import FlaskForm
from wtforms.validators import InputRequired, DataRequired, Optional, Email
from wtforms import BooleanField, StringField, RadioField, SelectField
class SubscribeForm(FlaskForm):
email = StringField('Email', [InputRequired(), Email()])
| StarcoderdataPython |
1838198 | <filename>patterns/behavioral/chain_of_responsibility.py
from abc import abstractmethod
from typing import List
class Handler:
"""Abstract handler."""
def __init__(self, successor: "Handler") -> None:
self._successor: Handler = successor
def handler(self, request: int) -> None:
if not self.handle(request):
self._successor.handler(request)
@abstractmethod
def handle(self, request: int) -> bool:
pass
class ConcreteHandler1(Handler):
"""Concrete handler 1."""
def handle(self, request: int) -> bool:
if 0 < request <= 10:
print(f"Request {request} handled in handler 1")
return True
return False
class DefaultHandler(Handler):
"""Default handler."""
def handle(self, request: int) -> bool:
"""If there is no handler available."""
print(f"End of chain, no handler for {request}")
return True
class Client:
"""Using handlers."""
def __init__(self) -> None:
self._handler: Handler = ConcreteHandler1(DefaultHandler(None))
def delegate(self, request: List[int]) -> None:
for next_request in request:
self._handler.handler(next_request)
# Create a client
client: Client = Client()
# Create requests
requests: List[int] = [2, 5, 30]
# Send the request
client.delegate(requests)
| StarcoderdataPython |
314035 | <filename>data/python_scripts/teams.py
import json
f = open('games.json', 'r')
data = json.load(f)
teamDict = {}
for game in data:
team1ID = str(game["team1_id"])
team2ID = str(game["team2_id"])
team1Name = game["team1_name"]
team2Name = game["team2_name"]
teamDict[team1ID] = team1Name
teamDict[team2ID] = team2Name
'''jsonData = json.dumps(teamDict)
print(jsonData)'''
with open('teams.json', 'w') as g:
json.dump(teamDict, g, indent=1)
print('Mission accomplished')
f.close()
'''f = open('teams.json', 'r')
data = json.load(f)
for key in data:
print(data[key])
print("\n\n")
f.close()
'''
| StarcoderdataPython |
11338703 | from distutils.core import setup
setup(
name='streamed_job',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['streamed_job'],
scripts=['bin/run_streamed_job.py'],
url='http://www.fakeurl.nowhere',
license='LICENSE.txt',
description='Run a process with a timeout and stream output to a db',
long_description=open('README.txt').read(),
install_requires=[
"backports.tempfile",
"backoff",
"requests",
],
)
| StarcoderdataPython |
9762455 | <reponame>HardwareDesignWithPython/HDPython
import unittest
import os
import shutil
from HDPython import *
import HDPython.examples as ahe
from HDPython.tests.helpers import remove_old_files
import HDPython.tests.core_tests as core_t
import HDPython.tests.ex1 as ex1
import HDPython.tests.RamHandler as RamHandler
class TestStringMethods(unittest.TestCase):
def test_RamHandler_sim(self):
result, message = RamHandler.RamHandler_sim("tests/RamHandler_sim/")
self.assertTrue(result,message)
def test_RamHandler2VHDL(self):
result, message = RamHandler.RamHandler_2vhdl("tests/RamHandler/")
self.assertTrue(result,message)
if __name__ == '__main__':
remove_old_files()
unittest.main() | StarcoderdataPython |
1825572 | <filename>LeetCode_Solutions/804. Unique Morse Code Words.py
# Source : https://leetcode.com/problems/unique-morse-code-words/
# Author : foxfromworld
# Date : 03/11/2021
# First attempt
class Solution:
def uniqueMorseRepresentations(self, words: List[str]) -> int:
code = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
tf = []
char = 'abcdefghijklmnopqrstuvwxyz'
for word in words:
temp = ""
for ch in word:
temp += code[char.find(ch)]
if temp not in tf:
tf.append(temp)
return len(tf)
| StarcoderdataPython |
8191582 | # -*- coding: utf-8 -*-
from random import choice
import pickle
import threading
import time
import jieba
from gensim.models.doc2vec import Doc2Vec, LabeledSentence
from sklearn.externals import joblib
import numpy as np
from bert_serving.client import BertClient
from retrieval_documents import Retrieval
from fuzzy_match import fuzzy_matching
from tf_idf import TfIdf
from config import config, frequency_domain
NUM_OF_IR = 20
class Agent:
good_qualified_corpus = set()
def __init__(self):
self.config = config
self.stop_words = ''
self.punctuation_str = ''.join(self.config.punctuation_ls)
self.frequency_domain_dict = frequency_domain.frequency_dict
self.cluster_md = self.config.cluster_model
# self.vec_md = self.config.doc_vector_model
self.init_all_states()
self.fuzzy_weight = 0.2
self.tf_idf_weight = 0.8
self.good_corpus_threshold = 200
self.good_corpus_score = 0.95
def init_all_states(self):
self.retrieval = Retrieval(num_ir=NUM_OF_IR, config=self.config)
self.tf_idf = TfIdf(self.config)
self.cluster_model = joblib.load(self.cluster_md)
# self.vec_model = Doc2Vec.load(self.vec_md)
# self.vec_model = BertClient()
self.load_stop_words(self.config)
jieba.initialize()
def get_utterance_type(self, utterance):
# tmp_vector = self.vec_model.infer_vector(utterance)
# tmp_vector = self.vec_model.encode([utterance])
# label = self.cluster_model.predict(tmp_vector)
# print(label)
# return self.config.cluster_file[label[0]]
return self.config.cluster_file[0]
def record_good_conversations(self, utterance, score_ls, context_ls):
def write_conversations():
localtime = (time.asctime(time.localtime(time.time()))).replace(' ', '_').replace(':', '-')
with open(self.config.path_of_good_conversation+localtime, 'wb') as wfp:
pickle.dump(Agent.good_qualified_corpus, wfp)
Agent.good_qualified_corpus.clear()
# print(Agent.good_qualified_corpus)
for index in range(len(score_ls)):
if score_ls[index] > self.good_corpus_score:
if context_ls[index][0] and context_ls[index][1]:
# print((utterance, context_ls[index][1]))
Agent.good_qualified_corpus.add((utterance, context_ls[index][1]))
# print(len(Agent.good_qualified_corpus))
if len(Agent.good_qualified_corpus) > self.good_corpus_threshold:
record_thread = threading.Thread(target=write_conversations)
record_thread.start()
def random_chose_index(self, score_ls, max_score):
max_score_indexes = []
for i in range(len(score_ls)):
if score_ls[i] == max_score:
max_score_indexes.append(i)
return choice(max_score_indexes)
def load_stop_words(self, config):
with open(config.stop_words, 'rb') as fpr:
self.stop_words = pickle.load(fpr)
def remove_special_words(self, stop_words_ls, input_sentence):
sentence = input_sentence
for special_word in self.config.special_modal_words:
if special_word in sentence:
sentence = sentence.replace(special_word, '')
return sentence
def response_answer(self, reply_msg, max_score):
if type(max_score) is np.ndarray:
final_max_score = max_score[0][0]
else:
final_max_score = max_score
return reply_msg, final_max_score
def get_answer(self, utterance, file_name=None):
try:
utterance = utterance.rstrip(self.punctuation_str)
file_name = self.get_utterance_type(utterance)
self.retrieval.read_indexes(file_name)
context_ls = self.retrieval.search_sentences(utterance, self.stop_words)
if not context_ls:
return "", 0
utterance_no_stop = self.remove_special_words(self.stop_words, utterance)
new_context_ls = []
for each_context in context_ls:
ques = self.remove_special_words(self.stop_words, each_context[0])
ans = self.remove_special_words(self.stop_words, each_context[1])
if not ques or not ans:
new_context_ls.append((0, 0))
continue
new_context_ls.append((ques, ans))
# print("control!!!!!!!!!!!!!!!!!: {},{}".format(utterance, new_context_ls))
# print(len(new_context_ls))
fuzzy_ratio_ls = fuzzy_matching(utterance_no_stop, new_context_ls)
self.tf_idf.select_model(file_name)
self.tf_idf.predict_tfidf(utterance_no_stop, new_context_ls)
tf_idf_score_ls = self.tf_idf.calculate_distances()
if fuzzy_ratio_ls.count(max(fuzzy_ratio_ls)) > 1:
fuzzy_best_index = self.random_chose_index(fuzzy_ratio_ls, max(fuzzy_ratio_ls))
else:
fuzzy_best_index = fuzzy_ratio_ls.index(max(fuzzy_ratio_ls))
if tf_idf_score_ls.count(max(tf_idf_score_ls)) > 1:
tftdf_best_index = self.random_chose_index(tf_idf_score_ls, max(tf_idf_score_ls))
else:
tftdf_best_index = tf_idf_score_ls.index(max(tf_idf_score_ls))
fuzzy_best_content = context_ls[fuzzy_best_index][0].rstrip(self.punctuation_str)
tfidf_best_content = context_ls[tftdf_best_index][0].rstrip(self.punctuation_str)
if fuzzy_best_content == utterance or utterance.strip(''.join(config.special_modal_words)) in fuzzy_best_content:
best_index = fuzzy_best_index
# return context_ls[best_index][1], max(fuzzy_ratio_ls)
return self.response_answer(context_ls[best_index][1], max(fuzzy_ratio_ls))
if tfidf_best_content == utterance or utterance.strip(''.join(config.special_modal_words)) in tfidf_best_content:
best_index = tftdf_best_index
# return context_ls[best_index][1], max(tf_idf_score_ls)
return self.response_answer(context_ls[best_index][1], max(tf_idf_score_ls))
final_score_ls = [(fuzzy_ratio * self.fuzzy_weight + tf_tdf_score * self.tf_idf_weight) for fuzzy_ratio, tf_tdf_score in
zip(fuzzy_ratio_ls, tf_idf_score_ls)]
# TODO: find a suitable weight
self.record_good_conversations(utterance, final_score_ls, context_ls)
max_score = max(final_score_ls)
if final_score_ls.count(max_score) > 1:
best_index = self.random_chose_index(final_score_ls, max_score)
else:
best_index = final_score_ls.index(max_score)
# print("final result:{}".format(context_ls[best_index]))
# print(type(max_score))
return self.response_answer(context_ls[best_index][1], max_score)
except Exception as e:
return "", 0
def test(self, utterance):
answer = self.get_answer(utterance)
return answer
def start_cmd(self):
while True:
utterance = input(">>>")
if utterance.strip() == "exit1":
break
answer, score = self.get_answer(utterance)
print("<<<{}:{}".format(answer, score))
def api(self, utterance):
answer, score = self.get_answer(utterance)
return [answer, score]
def socket_get(self, utterance):
answer, score = self.get_answer(utterance)
# print(answer + '---' + str(score[0][0]))
return answer + '---' + str(score)
if __name__ == '__main__':
agent = Agent()
agent.start_cmd()
| StarcoderdataPython |
1910396 | <gh_stars>10-100
import re
from exporters.persistence import PERSISTENCE_LIST
class PersistenceConfigDispatcher(object):
def __init__(self, uri):
self.uri = uri
self.persistence_dispatcher = self.get_module_from_uri()
def get_module_from_uri(self):
persistence_regexes = {m.uri_regex: m for m in PERSISTENCE_LIST}
for regex, module in persistence_regexes.iteritems():
if re.match(regex, self.uri):
return module
raise ValueError('{} is not a valid persistence uri. Available handlers are {}.'
.format(self.uri, [m.uri_regex for m in PERSISTENCE_LIST]))
@property
def config(self):
return self.persistence_dispatcher.configuration_from_uri(
self.uri, self.persistence_dispatcher.uri_regex)
| StarcoderdataPython |
8182869 | <reponame>metamapper-io/metamapper
# Generated by Django 3.0.10 on 2022-03-09 19:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sso', '0001_initial'),
('authentication', '0002_workspace_team_members'),
]
operations = [
migrations.AddField(
model_name='workspace',
name='active_sso',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='sso.SSOConnection'),
),
]
| StarcoderdataPython |
11373801 | '''
Module which runs the game
'''
def main():
print('Game started!') | StarcoderdataPython |
3272507 | <reponame>Raspberry-Pi-Club/Dr.Reddy<filename>sqllite_test.py<gh_stars>0
import sqlite3
SQLConnection = sqlite3.connect('C:\wamp\www\Piyu-UI\database\interface.db')
c = SQLConnection.cursor()
#Getting Insertion time
insertionTime = ''
for row in c.execute("SELECT datetime('now','localtime')"):
insertionTime = row[0]
#Setting Last Fetch Date to DB
print(insertionTime)
c.execute("UPDATE last_fetch SET time='%s' WHERE sid = 1" % insertionTime)
#Getting Last Fetch Time from DB
for lfTime in c.execute("SELECT time FROM last_fetch WHERE sid = 1"):
print(lfTime[0])
#Last Inserted ID
toFetchID = 1
toExeCommand = ''
for lid in c.execute("SELECT sno,command FROM commands WHERE fetched=0"):
toFetchID = lid[0]
toExeCommand = lid[1]
break
print(str(toFetchID) + toExeCommand)
# 1. Set last fetch id
# 2. Set last fetch time and date
# 3. Set both on the commands database
# LOOP | StarcoderdataPython |
327499 | <gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
TODO: add short description
"""
from .queries import *
from .result import *
from .simbad_votable import *
mirrors = {'harvard': 'http://simbad.harvard.edu/simbad/sim-script?script=',
'strasbourg': 'http://simbad.u-strasbg.fr/simbad/sim-script?script='}
votabledef = 'main_id, coordinates'
| StarcoderdataPython |
5097880 | import json
import getpass
import shortuuid # type: ignore
from datetime import datetime
from functools import lru_cache
from collections import defaultdict
from typing import Any, Dict, Generator, Generic, List, Optional, Set, Tuple, Union
from followthemoney.types import registry
from nomenklatura.entity import CE
from nomenklatura.judgement import Judgement
from nomenklatura.util import PathLike, is_qid
StrIdent = Union[str, "Identifier"]
Pair = Tuple["Identifier", "Identifier"]
class ResolverLogicError(Exception):
pass
class Identifier(object):
PREFIX = "NK-"
__slots__ = ("id", "canonical", "weight")
def __init__(self, id: str):
self.id = id
self.weight: int = 1
if self.id.startswith(self.PREFIX):
self.weight = 2
elif is_qid(id):
self.weight = 3
self.canonical = self.weight > 1
def __eq__(self, other: Any) -> bool:
return str(self) == str(other)
def __lt__(self, other: Any) -> bool:
return (self.weight, self.id) < (other.weight, other.id)
def __str__(self) -> str:
return self.id
def __hash__(self) -> int:
return hash(self.id)
def __len__(self) -> int:
return len(self.id)
def __repr__(self) -> str:
return f"<I({self.id})>"
@classmethod
def get(cls, id: StrIdent) -> "Identifier":
if isinstance(id, str):
return cls(id)
return id
@classmethod
def pair(cls, left_id: StrIdent, right_id: StrIdent) -> Pair:
left = cls.get(left_id)
right = cls.get(right_id)
if left == right:
raise ResolverLogicError()
return (max(left, right), min(left, right))
@classmethod
def make(cls, value: Optional[str] = None) -> "Identifier":
key = value or shortuuid.uuid()
return cls.get(f"{cls.PREFIX}{key}")
class Edge(object):
__slots__ = ("key", "source", "target", "judgement", "score", "user", "timestamp")
def __init__(
self,
left_id: StrIdent,
right_id: StrIdent,
judgement: Judgement = Judgement.NO_JUDGEMENT,
score: Optional[float] = None,
user: Optional[str] = None,
timestamp: Optional[str] = None,
):
self.key = Identifier.pair(left_id, right_id)
self.target, self.source = self.key
self.judgement = judgement
self.score = score
self.user = user
self.timestamp = timestamp
def other(self, cur: Identifier) -> Identifier:
if cur == self.target:
return self.source
return self.target
def to_line(self) -> str:
row = [
self.target.id,
self.source.id,
self.judgement.value,
self.score,
self.user,
self.timestamp,
]
return json.dumps(row) + "\n"
def __str__(self) -> str:
return self.to_line()
def __hash__(self) -> int:
return hash(self.key)
def __eq__(self, other: Any) -> bool:
return hash(self) == hash(other)
def __lt__(self, other: Any) -> bool:
return bool(self.key < other.key)
def __repr__(self) -> str:
return f"<E({self.target.id}, {self.source.id}, {self.judgement.value})>"
@classmethod
def from_line(cls, line: str) -> "Edge":
data = json.loads(line)
return cls(
data[0],
data[1],
judgement=Judgement(data[2]),
score=data[3],
user=data[4],
timestamp=data[5],
)
class Resolver(Generic[CE]):
UNDECIDED = (Judgement.NO_JUDGEMENT, Judgement.UNSURE)
def __init__(self, path: Optional[PathLike] = None) -> None:
self.path = path
self.edges: Dict[Pair, Edge] = {}
self.nodes: Dict[Identifier, Set[Edge]] = defaultdict(set)
def get_edge(self, left_id: StrIdent, right_id: StrIdent) -> Optional[Edge]:
key = Identifier.pair(left_id, right_id)
return self.edges.get(key)
def _traverse(self, node: Identifier, seen: Set[Identifier]) -> Set[Identifier]:
connected = set([node])
if node in seen:
return connected
seen.add(node)
for edge in self.nodes.get(node, []):
if edge.judgement == Judgement.POSITIVE:
other = edge.other(node)
rec = self._traverse(other, seen)
connected.update(rec)
return connected
@lru_cache(maxsize=None)
def connected(self, node: Identifier) -> Set[Identifier]:
return self._traverse(node, set())
def get_canonical(self, entity_id: StrIdent) -> str:
"""Return the canonical identifier for the given entity ID."""
node = Identifier.get(entity_id)
best = max(self.connected(node))
if best.canonical:
return best.id
return node.id
def canonicals(self) -> Generator[Identifier, None, None]:
"""Return all the canonical cluster identifiers."""
for node in self.nodes.keys():
if not node.canonical:
continue
canonical = self.get_canonical(node)
if canonical == node.id:
yield node
def get_referents(
self, canonical_id: StrIdent, canonicals: bool = True
) -> Set[str]:
"""Get all the non-canonical entity identifiers which refer to a given
canonical identifier."""
node = Identifier.get(canonical_id)
referents: Set[str] = set()
for connected in self.connected(node):
if not canonicals and connected.canonical:
continue
if connected == node:
continue
referents.add(connected.id)
return referents
def get_resolved_edge(
self, left_id: StrIdent, right_id: StrIdent
) -> Optional[Edge]:
(left, right) = Identifier.pair(left_id, right_id)
left_connected = self.connected(left)
right_connected = self.connected(right)
for e in left_connected:
for o in right_connected:
edge = self.edges.get(Identifier.pair(e, o))
if edge is None:
continue
return edge
return None
def get_judgement(self, entity_id: StrIdent, other_id: StrIdent) -> Judgement:
"""Get the existing decision between two entities with dedupe factored in."""
entity = Identifier.get(entity_id)
other = Identifier.get(other_id)
if entity == other:
return Judgement.POSITIVE
if is_qid(entity.id) and is_qid(other.id):
return Judgement.NEGATIVE
entity_connected = self.connected(entity)
if other in entity_connected:
return Judgement.POSITIVE
other_connected = self.connected(other)
for e in entity_connected:
for o in other_connected:
edge = self.edges.get(Identifier.pair(e, o))
if edge is None:
continue
if edge.judgement == Judgement.NEGATIVE:
return edge.judgement
return Judgement.NO_JUDGEMENT
def check_candidate(self, left: StrIdent, right: StrIdent) -> bool:
"""Check if the two IDs could be merged, i.e. if there's no existing
judgement."""
judgement = self.get_judgement(left, right)
return judgement == Judgement.NO_JUDGEMENT
def _get_suggested(self) -> List[Edge]:
"""Get all NO_JUDGEMENT edges in descending order of score."""
edges_all = self.edges.values()
candidates = (e for e in edges_all if e.judgement == Judgement.NO_JUDGEMENT)
cmp = lambda x: x.score or -1.0
return sorted(candidates, key=cmp, reverse=True)
def get_candidates(
self, limit: int = 100
) -> Generator[Tuple[str, str, Optional[float]], None, None]:
returned = 0
for edge in self._get_suggested():
if not self.check_candidate(edge.source, edge.target):
continue
yield edge.target.id, edge.source.id, edge.score
returned += 1
if returned >= limit:
break
def suggest(
self, left_id: StrIdent, right_id: StrIdent, score: float
) -> Identifier:
"""Make a NO_JUDGEMENT link between two identifiers to suggest that a user
should make a decision about whether they are the same or not."""
edge = self.get_edge(left_id, right_id)
if edge is not None:
if edge.judgement in self.UNDECIDED:
edge.score = score
return edge.target
return self.decide(left_id, right_id, Judgement.NO_JUDGEMENT, score=score)
def decide(
self,
left_id: StrIdent,
right_id: StrIdent,
judgement: Judgement,
user: Optional[str] = None,
score: Optional[float] = None,
) -> Identifier:
edge = self.get_edge(left_id, right_id)
if edge is None:
edge = Edge(left_id, right_id, judgement=judgement)
# Canonicalise positive matches, i.e. make both identifiers refer to a
# canonical identifier, instead of making a direct link.
if judgement == Judgement.POSITIVE:
connected = set(self.connected(edge.target))
connected.update(self.connected(edge.source))
target = max(connected)
if not target.canonical:
canonical = Identifier.make()
self._remove(edge)
self.decide(edge.source, canonical, judgement=judgement, user=user)
self.decide(edge.target, canonical, judgement=judgement, user=user)
return canonical
edge.judgement = judgement
edge.timestamp = datetime.utcnow().isoformat()[:16]
edge.user = user or getpass.getuser()
edge.score = score or edge.score
self._register(edge)
return edge.target
def _register(self, edge: Edge) -> None:
if edge.judgement != Judgement.NO_JUDGEMENT:
edge.score = None
self.edges[edge.key] = edge
self.nodes[edge.source].add(edge)
self.nodes[edge.target].add(edge)
self.connected.cache_clear()
def _remove(self, edge: Edge) -> None:
"""Remove an edge from the graph."""
self.edges.pop(edge.key, None)
for node in (edge.source, edge.target):
if node in self.nodes:
self.nodes[node].discard(edge)
def explode(self, node_id: StrIdent) -> Set[str]:
"""Dissolve all edges linked to the cluster to which the node belongs.
This is the hard way to make sure we re-do context once we realise
there's been a mistake."""
node = Identifier.get(node_id)
affected: Set[str] = set()
for part in self.connected(node):
affected.add(str(part))
edges = self.nodes.get(part)
if edges is None:
continue
for edge in list(edges):
if edge.judgement != Judgement.NO_JUDGEMENT:
self._remove(edge)
self.connected.cache_clear()
return affected
def prune(self, keep: int = 0) -> None:
"""Remove suggested (i.e. NO_JUDGEMENT) edges, keep only the n with the
highest score. This also checks if a transitive judgement has been
established in the mean time and removes those candidates."""
kept = 0
for edge in self._get_suggested():
judgement = self.get_judgement(edge.source, edge.target)
if judgement != Judgement.NO_JUDGEMENT:
self._remove(edge)
if kept >= keep:
self._remove(edge)
kept += 1
self.connected.cache_clear()
def apply(self, proxy: CE) -> CE:
"""Replace all entity references in a given proxy with their canonical
identifiers. This is essentially the harmonisation post de-dupe."""
canonical_id = self.get_canonical(proxy.id)
if canonical_id != proxy.id:
proxy.referents = self.get_referents(canonical_id)
proxy.id = canonical_id
for prop in proxy.iterprops():
if prop.type != registry.entity:
continue
for value in proxy.pop(prop):
canonical = self.get_canonical(value)
proxy.unsafe_add(prop, canonical, cleaned=True)
return proxy
def save(self) -> None:
"""Store the resolver adjacency list to a plain text JSON list."""
if self.path is None:
raise RuntimeError("Resolver has no path")
edges = sorted(self.edges.values())
with open(self.path, "w") as fh:
for edge in edges:
fh.write(edge.to_line())
@classmethod
def load(cls, path: PathLike) -> "Resolver[CE]":
resolver = cls(path=path)
if not path.exists():
return resolver
with open(path, "r") as fh:
while True:
line = fh.readline()
if not line:
break
edge = Edge.from_line(line)
resolver._register(edge)
return resolver
def __repr__(self) -> str:
path = self.path.name if self.path is not None else ":memory:"
return f"<Resolver({path!r}, {len(self.edges)})>"
| StarcoderdataPython |
1957231 | <gh_stars>10-100
import os
import sys
import numpy as np
from scipy import spatial as ss
import pdb
import cv2
from utils import hungarian,read_pred_and_gt,AverageMeter,AverageCategoryMeter
gt_file = 'val_gt_loc.txt'
pred_file = 'tiny_val_loc_0.8_0.3.txt'
flagError = False
id_std = [i for i in range(3110,3610,1)]
id_std[59] = 3098
num_classes = 6
def compute_metrics(dist_matrix,match_matrix,pred_num,gt_num,sigma,level):
for i_pred_p in range(pred_num):
pred_dist = dist_matrix[i_pred_p,:]
match_matrix[i_pred_p,:] = pred_dist<=sigma
tp, assign = hungarian(match_matrix)
fn_gt_index = np.array(np.where(assign.sum(0)==0))[0]
tp_pred_index = np.array(np.where(assign.sum(1)==1))[0]
tp_gt_index = np.array(np.where(assign.sum(0)==1))[0]
fp_pred_index = np.array(np.where(assign.sum(1)==0))[0]
level_list = level[tp_gt_index]
tp = tp_pred_index.shape[0]
fp = fp_pred_index.shape[0]
fn = fn_gt_index.shape[0]
tp_c = np.zeros([num_classes])
fn_c = np.zeros([num_classes])
for i_class in range(num_classes):
tp_c[i_class] = (level[tp_gt_index]==i_class).sum()
fn_c[i_class] = (level[fn_gt_index]==i_class).sum()
return tp,fp,fn,tp_c,fn_c
def main():
cnt_errors = {'mae':AverageMeter(),'mse':AverageMeter(),'nae':AverageMeter(),}
metrics_s = {'tp':AverageMeter(), 'fp':AverageMeter(), 'fn':AverageMeter(), 'tp_c':AverageCategoryMeter(num_classes), 'fn_c':AverageCategoryMeter(num_classes)}
metrics_l = {'tp':AverageMeter(), 'fp':AverageMeter(), 'fn':AverageMeter(), 'tp_c':AverageCategoryMeter(num_classes), 'fn_c':AverageCategoryMeter(num_classes)}
pred_data, gt_data = read_pred_and_gt(pred_file,gt_file)
for i_sample in id_std:
print(i_sample)
# init
gt_p,pred_p,fn_gt_index,tp_pred_index,fp_pred_index= [],[],[],[],[]
tp_s,fp_s,fn_s,tp_l,fp_l,fn_l = [0,0,0,0,0,0]
tp_c_s = np.zeros([num_classes])
fn_c_s = np.zeros([num_classes])
tp_c_l = np.zeros([num_classes])
fn_c_l = np.zeros([num_classes])
if gt_data[i_sample]['num'] ==0 and pred_data[i_sample]['num'] !=0:
pred_p = pred_data[i_sample]['points']
fp_pred_index = np.array(range(pred_p.shape[0]))
fp_s = fp_pred_index.shape[0]
fp_l = fp_pred_index.shape[0]
if pred_data[i_sample]['num'] ==0 and gt_data[i_sample]['num'] !=0:
gt_p = gt_data[i_sample]['points']
level = gt_data[i_sample]['level']
fn_gt_index = np.array(range(gt_p.shape[0]))
fn_s = fn_gt_index.shape[0]
fn_l = fn_gt_index.shape[0]
for i_class in range(num_classes):
fn_c_s[i_class] = (level[fn_gt_index]==i_class).sum()
fn_c_l[i_class] = (level[fn_gt_index]==i_class).sum()
if gt_data[i_sample]['num'] !=0 and pred_data[i_sample]['num'] !=0:
pred_p = pred_data[i_sample]['points']
gt_p = gt_data[i_sample]['points']
sigma_s = gt_data[i_sample]['sigma'][:,0]
sigma_l = gt_data[i_sample]['sigma'][:,1]
level = gt_data[i_sample]['level']
# dist
dist_matrix = ss.distance_matrix(pred_p,gt_p,p=2)
match_matrix = np.zeros(dist_matrix.shape,dtype=bool)
# sigma_s and sigma_l
tp_s,fp_s,fn_s,tp_c_s,fn_c_s = compute_metrics(dist_matrix,match_matrix,pred_p.shape[0],gt_p.shape[0],sigma_s,level)
tp_l,fp_l,fn_l,tp_c_l,fn_c_l = compute_metrics(dist_matrix,match_matrix,pred_p.shape[0],gt_p.shape[0],sigma_l,level)
metrics_s['tp'].update(tp_s)
metrics_s['fp'].update(fp_s)
metrics_s['fn'].update(fn_s)
metrics_s['tp_c'].update(tp_c_s)
metrics_s['fn_c'].update(fn_c_s)
metrics_l['tp'].update(tp_l)
metrics_l['fp'].update(fp_l)
metrics_l['fn'].update(fn_l)
metrics_l['tp_c'].update(tp_c_l)
metrics_l['fn_c'].update(fn_c_l)
gt_count,pred_cnt = gt_data[i_sample]['num'],pred_data[i_sample]['num']
s_mae = abs(gt_count-pred_cnt)
s_mse = (gt_count-pred_cnt)*(gt_count-pred_cnt)
cnt_errors['mae'].update(s_mae)
cnt_errors['mse'].update(s_mse)
if gt_count !=0:
s_nae = abs(gt_count-pred_cnt)/gt_count
cnt_errors['nae'].update(s_nae)
ap_s = metrics_s['tp'].sum/(metrics_s['tp'].sum+metrics_s['fp'].sum+1e-20)
ar_s = metrics_s['tp'].sum/(metrics_s['tp'].sum+metrics_s['fn'].sum+1e-20)
f1m_s = 2*ap_s*ar_s/(ap_s+ar_s)
ar_c_s = metrics_s['tp_c'].sum/(metrics_s['tp_c'].sum+metrics_s['fn_c'].sum+1e-20)
ap_l = metrics_l['tp'].sum/(metrics_l['tp'].sum+metrics_l['fp'].sum+1e-20)
ar_l = metrics_l['tp'].sum/(metrics_l['tp'].sum+metrics_l['fn'].sum+1e-20)
f1m_l = 2*ap_l*ar_l/(ap_l+ar_l)
ar_c_l = metrics_l['tp_c'].sum/(metrics_l['tp_c'].sum+metrics_l['fn_c'].sum+1e-20)
print('-----Localization performance-----')
print('AP_small: '+str(ap_s))
print('AR_small: '+str(ar_s))
print('F1m_small: '+str(f1m_s))
print('AR_small_category: '+str(ar_c_s))
print(' avg: '+str(ar_c_s.mean()))
print('AP_large: '+str(ap_l))
print('AR_large: '+str(ar_l))
print('F1m_large: '+str(f1m_l))
print('AR_large_category: '+str(ar_c_l))
print(' avg: '+str(ar_c_l.mean()))
mae = cnt_errors['mae'].avg
mse = np.sqrt(cnt_errors['mse'].avg)
nae = cnt_errors['nae'].avg
print('-----Counting performance-----')
print('MAE: '+str(mae))
print('MSE: '+str(mse))
print('NAE: '+str(nae))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1787247 | def unify_faces(face_data):
ans = face_data['faceLandmarks']
for mark in ans:
ans[mark]['x'] -= face_data['faceRectangle']['left']
ans[mark]['x'] *= 1000 / face_data['faceRectangle']['width']
ans[mark]['y'] -= face_data['faceRectangle']['top']
ans[mark]['y'] *= 1000 / face_data['faceRectangle']['height']
return ans
data1 = unify_faces(data1)
data2 = unify_faces(data2)
data3 = unify_faces(data3)
data4 = unify_faces(data4)
print(data1)
print(data2)
print(data3)
print(data4)
| StarcoderdataPython |
376371 | <reponame>uit-cosmo/fpp-closed-expresions<gh_stars>1-10
"""
Excess time statisitics
In all cases, the signal z should have been normalized as (z-<z>)/z_rms
"""
import numpy as np
import mpmath as mm
import warnings
def eT(X, g):
"""
Returns the fraction of time above threshold for the normalized shot noise process X.
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
Output:
F: The fraction of time above threshold. The total time is T*F.
"""
F = np.ones(len(X))
assert g > 0
g = mm.mpf(g)
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = mm.gammainc(g, a=np.sqrt(g) * X[i] + g, regularized=True)
return F
def eX(X, g, l):
"""
Returns the rate of upwards level crossings above threshold for the normalized shot noise process X.
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
l: pulse asymmetry parameter, float.
Output:
F: The rate of upward crossings above threshold. The total number of crossings is td*F/T.
"""
assert g > 0
assert l >= 0
assert l <= 1
l = mm.mpf(l)
g = mm.mpf(g)
F = np.zeros(len(X))
def eXtmp(x, g, l):
if (l > 0) & (l < 1):
return (
(
l ** (g * l - 1)
* (1 - l) ** (g * (1 - l) - 1)
* g ** (g / 2 - 1)
/ (mm.gamma(g * l) * mm.gamma(g * (1 - l)))
)
* (x + np.sqrt(g)) ** g
* mm.exp(-np.sqrt(g) * x - g)
)
else:
return (
g ** (g / 2)
* (x + np.sqrt(g)) ** g
* mm.exp(-np.sqrt(g) * x - g)
/ mm.gamma(g)
)
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = eXtmp(X[i], g, l)
return F
def eX_l0(X, g):
"""
Returns the rate of upwards level crossings above threshold for the normalized shot noise process X with a one sided pulse shape (l=0).
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
Output:
F: The rate of upward crossings above threshold. The total number of crossings is td*F/T.
"""
warnings.warn("The functionality of eX_l0 has been added to eX.")
assert g > 0
g = mm.mpf(g)
F = np.zeros(len(X))
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = (
g ** (g / 2)
* (X[i] + np.sqrt(g)) ** g
* mm.exp(-np.sqrt(g) * X[i] - g)
/ mm.gamma(g)
)
return F
def avT(X, g, l):
"""
Returns the normalized average time above threshold for the normalized shot noise process X.
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
l: pulse asymmetry parameter, float.
Output:
F: The normalized average time above threshold. The unnormalized version is F/td.
"""
assert g > 0
assert l >= 0
assert l <= 1
l = mm.mpf(l)
g = mm.mpf(g)
F = np.zeros(len(X))
def avTtmp(x, g, l):
if (l > 0) & (l < 1):
return (
(
mm.gamma(g * l)
* mm.gamma(g * (1 - l))
* l ** (1 - g * l)
* (1 - l) ** (1 - g * (1 - l))
* g ** (1 - g / 2)
)
* mm.gammainc(g, a=np.sqrt(g) * x + g, regularized=True)
* (x + np.sqrt(g)) ** (-g)
* mm.exp(np.sqrt(g) * x + g)
)
else:
return (
(mm.gamma(g) * g ** (-g / 2))
* mm.gammainc(g, a=np.sqrt(g) * X[i] + g, regularized=True)
* (x + np.sqrt(g)) ** (-g)
* mm.exp(np.sqrt(g) * x + g)
)
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = avTtmp(X[i], g, l)
return F
def avT_l0(X, g):
"""
Returns the normalized average time above threshold for the normalized shot noise process X with pulse asymmetry parameter l=0.
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
Output:
F: The normalized average time above threshold. The unnormalized version is F/td.
"""
warnings.warn("The functionality of avT_l0 has been added to avT.")
assert g > 0
g = mm.mpf(g)
F = np.zeros(len(X))
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = (
(mm.gamma(g) * g ** (-g / 2))
* mm.gammainc(g, a=np.sqrt(g) * X[i] + g, regularized=True)
* (X[i] + np.sqrt(g)) ** (-g)
* mm.exp(np.sqrt(g) * X[i] + g)
)
return F
| StarcoderdataPython |
3392999 | <filename>src/aioprometheus/asgi/starlette.py
from starlette.requests import Request
from starlette.responses import Response
from aioprometheus import REGISTRY, render
async def metrics(request: Request) -> Response:
"""Render metrics into format specified by 'accept' header.
This function first attempts to retrieve the metrics Registry from
``request.app.state`` in case the app is using a custom registry instead
of the default registry. If this fails then the default registry is used.
"""
registry = (
request.app.state.registry
if hasattr(request.app.state, "registry")
else REGISTRY
)
content, http_headers = render(registry, request.headers.getlist("Accept"))
return Response(content=content, media_type=http_headers["Content-Type"])
| StarcoderdataPython |
395764 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-05 08:12
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_number', models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format; '+999999999'. Upto 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
('contact_name', models.CharField(max_length=50)),
],
),
]
| StarcoderdataPython |
8007009 | <reponame>remram44/find_projections
import matplotlib
import matplotlib.pyplot as plt
def print_projection(pr):
print pr.get_class(),pr.get_att1(),\
pr.get_att2(),pr.get_total(),\
pr.get_att1_start(),pr.get_att1_end(),\
pr.get_att2_start(),pr.get_att2_end(),\
pr.get_pos(),pr.get_neg(),\
pr.get_pos()/float(pr.get_pos()+pr.get_neg())
def show_projection(pr,data,labels,headers):
fig=plt.figure(figsize=(9,9),dpi=80,facecolor='w',edgecolor='k')
bounds = [pr.get_att1_start(),pr.get_att1_end(),\
pr.get_att2_start(),pr.get_att2_end()]
dims = [pr.get_att1(),pr.get_att2()]
x = data[:,dims[0]]
y = data[:,dims[1]]
l = labels
fig = plt.figure()
ax=fig.add_subplot(111)
ax.add_patch(matplotlib.patches.Rectangle((bounds[0],bounds[2]),bounds[1]-bounds[0],bounds[3]-bounds[2],fill=False))
matplotlib.pyplot.scatter(x,y,s=5,c=l,linewidths=.25)
plt.xlabel(headers[dims[0]])
plt.ylabel(headers[dims[1]])
def show_box( box, data,labels,headers):
fig=plt.figure(figsize=(9,9),dpi=80,facecolor='w',edgecolor='k')
bounds = box[5:]
dims = box[3:5]
dims = [int(i) for i in dims]
x=data[:,dims[0]]
y=data[:,dims[1]]
l = labels
fig=plt.figure()
ax=fig.add_subplot(111)
ax.add_patch(matplotlib.patches.Rectangle((bounds[0],bounds[2]),bounds[1]-bounds[0],bounds[3]-bounds[2],fill=False))
matplotlib.pyplot.scatter(x,y,s=5,c=l,linewidths=.25)
plt.xlabel(headers[dims[0]])
plt.ylabel(headers[dims[1]])
def show_projection_rlabel(pr,data,labels,headers):
bounds = [pr.get_att1_start(),pr.get_att1_end(),\
pr.get_att2_start(),pr.get_att2_end()]
dims = [pr.get_att1(),pr.get_att2()]
x = data[:,dims[0]]
y = data[:,dims[1]]
l = labels
fig = plt.figure()
ax=fig.add_subplot(111)
ax.add_patch(matplotlib.patches.Rectangle((bounds[0],bounds[2]),bounds[1]-bounds[0],bounds[3]-bounds[2],fill=False))
matplotlib.pyplot.scatter(x,y,s=5,c=l,cmap='gnuplot2',linewidths=.25)
plt.colorbar()
plt.xlabel(headers[dims[0]])
plt.ylabel(headers[dims[1]])
def show_projection_binary(pr,data,labels,headers):
bounds = [pr.get_att1_start(),pr.get_att1_end(),\
pr.get_att2_start(),pr.get_att2_end()]
dims = [pr.get_att1(),pr.get_att2()]
classnum = pr.get_class()
x = data[:,dims[0]]
y = data[:,dims[1]]
l = labels
fig = plt.figure()
ax=fig.add_subplot(111)
ax.add_patch(matplotlib.patches.Rectangle((bounds[0],bounds[2]),bounds[1]-bounds[0],bounds[3]-bounds[2],fill=False))
use_colors={0: "r",1:"b"}
matplotlib.pyplot.scatter(x,y,s=5,c=[use_colors[i] for i in l],linewidths=0)
plt.xlabel(headers[dims[0]])
plt.ylabel(headers[dims[1]])
| StarcoderdataPython |
6473360 | <reponame>chenrui333/cartridge-cli
from utils import run_command_and_get_output
def test_version_command(cartridge_cmd):
for version_cmd in ["version", "-v", "--version"]:
rc, output = run_command_and_get_output([cartridge_cmd, version_cmd])
assert rc == 0
assert 'Tarantool Cartridge CLI v2' in output
| StarcoderdataPython |
4835758 | <filename>sicwebapp/page/migrations/0014_rename_package_careerful_package.py
# Generated by Django 4.0 on 2022-05-22 20:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('page', '0013_careerful'),
]
operations = [
migrations.RenameField(
model_name='careerful',
old_name='Package',
new_name='package',
),
]
| StarcoderdataPython |
68480 | <filename>pymed/pymed.py
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import json
import re
import textwrap
from copy import deepcopy
from .constants import PMD
from Bio import Entrez, Medline
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
try:
# For Python 3.0 and later
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen, HTTPError
DOI_REGEX = '(10\\.\\d{4,6}/[^"\'&<% \t\n\r\x0c\x0b]+)'
DOI_ORG = 'http://dx.doi.org/'
BIBTEX_TMP = r"""
@%(PT)s{%(KEY)s,
Author = {%(AU)s},
Title = {%(TI)s},
Year = {%(YR)s},
Journal = {%(JN)s},
Number = {%(NU)s},
Pages = {%(PG)s},
Volume = {%(VOL)s}}
"""
def read_records(fname):
""" Load records from disk
Parameters
----------
fname : string
absolute path to the file to be loaded.
Returns
-------
recs : isinstance of pymed.Records
"""
loaded = json.load(open(fname), object_hook=PubmedRecord)
return Records(loaded)
def write_records(records, fname, mode='w', indent=None,
separators=None):
""" Save records to json file
Parameters
----------
records : instance of pymed.Records
The records to be saved
fname : str
The name of the file.
mode : str
The mode of the file handler. Should be 'w', 'wb', 'a' 'ab'.
indent : int | None
The indentation to use. If None, it defaults to 4.
separators : tuple
The separators to be used for elements and mappings.
"""
if indent is None:
indent = 4
if separators is None:
separators = (',', ': ')
records = [r for i, r in enumerate(records) if i not in records.exclude_]
json.dump(records, open(fname, mode), indent=indent,
separators=separators)
def _bibtex_get_author(author_list):
"""Aux Function"""
out = []
for author in author_list:
names = author.split(' ')
if len(names) == 2:
name, surname = names
elif len(names) >= 2:
name = ' '.join(names[:len(names) - 1])
surname = names[-1]
out += ['%s, %s' % (name, '.'.join(surname))]
return ' and '.join(out)
def _bibtex_make_id(author, journal, year):
"""Aux Function"""
fmt = (''.join(c for c in author[0].split(' ')[0].lower() if c.isalpha()),
str(year))
return ':'.join(fmt)
def _bibtex_get_pages(pages_str):
"""Aux Function"""
if PMD.SEP_PAGES_ENTRY in pages_str:
pages_str = pages_str.split(PMD.SEP_PAGES_ENTRY)[0]
if '-' in pages_str:
pfrom, pto = [int(k) for k in pages_str.split(PMD.SEP_PAGES_RANGE)]
if pfrom > pto:
pto += pfrom
pages_str = PMD.SEP_PAGES_RANGE.join([str(k) for k in [pfrom, pto]])
return pages_str
def _bibtex_get_publication_type(ins):
"""Aux Function"""
out = 'article'
# XXX currently only article supported.
return out
def _make_chunks(n, iterable, padvalue=None):
"""Aux Function: create chunks"""
return izip_longest(*[iter(iterable)] * n, fillvalue=padvalue)
def _get_doi(rec):
"""Aux Function"""
doi = rec.get('AID', rec.get('SO', rec.get('LID', None)))
if isinstance(doi, list):
doi = ''.join([d for d in doi if 'doi' in d])
if doi is not None:
res = re.findall(DOI_REGEX, doi)
if res:
return res[0]
def resolve_doi(rec):
"""Resolve the doi of a given record"""
doi = _get_doi(rec)
if doi is not None:
res = None
try:
res = urlopen(DOI_ORG + doi)
except HTTPError as e:
res = e
return res.url
def _export_records(records, fname, end, method):
"""Aux Function"""
if not fname.endswith(end):
fname += end
with open(fname, 'w') as fd:
for ii, rec in enumerate(records):
if ii not in records.exclude_:
fd.write(getattr(rec, method)())
class PubmedRecord(dict):
"""Handle PubMed data
Note. As the PubMed ID is unique instances of PubmedRecord can be used
As keys in a dict and can be used with set functions. This is useful
when dealing with a larger number of records.
Attributes
----------
pubmed_id : str
The PubMed ID of the record.
year : int
The year of the publication.
Methods
-------
as_corpus:
Creates single string from record values.
to_ascii:
Create formatted text from the records for reading and printing.
to_nbib:
Create record in Medline format for importing in Bibliography software.
to_bibtex:
Create record in BibTex format for importing in Bibliography software.
get_doi:
Parse the doi of the article.
resolve_doi:
Get the internet location for the article.
"""
def __init__(self, mapping):
for k, v in mapping.items():
self[k] = v
def as_corpus(self, fields=None):
"""Return record as single string.
Parameters
----------
fields : list-like | None
The fields to be included in the corpus. If None,
defaults to Title, Author and Abstract.
Returns
-------
corpus : str
The record concatenated as single string.
"""
corpus = []
if fields is None:
fields = ('TI', 'AU', 'AB')
for k, v in self.iteritems():
if isinstance(v, list):
v = ', '.join(v)
if any([k in fields,
fields == 'all']):
corpus += [v]
return ''.join(corpus)
def to_ascii(self, show_fields=('TI', 'AU', 'DP', 'AB'), width=80):
"""pretty print record
Parameters
----------
show_fields : list-like
The fields to display.
inplace : bool
If True, records are dropped in-place
width : int
The number of characters to display in one line.
"""
print('')
print('----- %s' % self.pubmed_id)
for field in show_fields:
pretty_field = '\n' + PMD[field] + ':\n'
out = self.get(field, '%s not available for this rec')
if isinstance(out, list):
out = ' '.join(out)
ind = ' '
out = textwrap.fill(out, width=width,
initial_indent=ind,
subsequent_indent=ind)
print(pretty_field + out)
def match(self, regexp):
"""match the text corpus against a regular expression or substring
Note . after regexp support in MNE-Python.
regexp : str
Regular expression or substring to tell whether a particular
expression matches characters in a record.
"""
def is_substring(string):
for rep in '-_ ':
string = string.replace(rep, '')
return string.isalnum()
r_ = (re.compile('.*%s.*' % regexp if is_substring(regexp)
else regexp))
return r_.match(self.as_corpus())
def to_nbib(self):
"""Export record in Medline format
Returns
-------
nbib_record : str
The record in Medline format.
"""
out = '\n\nPMID- ' + self.pubmed_id
for k, v in self.items():
if not k == 'PMID':
if len(k) < 4:
k += (' ' * (4 - len(k)))
if isinstance(v, list):
v = ' '.join(v)
v = textwrap.fill(v, subsequent_indent=(' ' * 6))
out += '\n' + '- '.join([k, v])
return out
def to_bibtex(self):
"""Export record in BibTex format
Returns
-------
bibtex_record : str
The record in BibTex format.
"""
fmt = {
'PT': _bibtex_get_publication_type(self.get('PT', 'NA')),
'KEY': _bibtex_make_id(self.get('AU', ''), self.get('JT', 'NA'),
self.year),
'AU': _bibtex_get_author(self.get('AU', 'NA')),
'TI': self.get('TI', 'NA'),
'JN': self.get('JT', 'NA').replace('&', '\&'),
'YR': '%s' % self.year,
'NU': self.get('IP', 'NA'),
'VOL': self.get('IV', 'NA'),
'PG': self.get('PG', 'NA')
}
return BIBTEX_TMP % fmt
def get_pdf(self):
"""Find and download the associated PDF"""
raise NotImplemented('This functionality is not available at present.')
def get_doi(self):
"""Check whethe record as doi
Returns
-------
doi : str | None
The doi associated with the record. If not available,
None is returned.
"""
return _get_doi(self)
def resolve_doi(self):
""" Get address from doi
"""
return resolve_doi(self)
@property
def pubmed_id(self):
""" The pubmed ID of the record.
"""
return self.get('PMID', None)
@property
def year(self):
""" The year of the publication
"""
dp = self.get('DP')
return int(dp[:4]) if dp else dp
def __hash__(self):
return self.pubmed_id.__hash__()
class Records(list):
"""Process PubMed records
Note. Records is a subclass of list, hence, for instances of Records
all list methods are available. However, the list methods semantics
is slightly adapted to support processing PubMed records.
These differences can be summarized as follows:
- the `append` and `extend` methods will only accept iterables of
PubmedRecord. The same holds true for the `+` and `+=` operators.
- `pop` modifies the exclude_ attribute if it is not empty so all indices
remain intact after removing entires.
- Slicing will return a new instance of Records, but discards the
`exclude_` attribute.
- `insert` requires `exclude_` to be empty. This is to prevent funky
side-effects.
To access standard list functionality the convenience method `tolist` is
provided.
Parameters
----------
records : listlike
An Instance of Records or a list of PubmedRecord instances.
"""
def __init__(self, records=None):
self.exclude_ = []
if records:
self.extend(records)
def browse(self, show_fields=None, inplace=True, width=80):
""" Browse and drop records
This method allows to iterate over records, display their contents
and to make a decision whether to keep the record or not.
If the user input is `n`, the record will be discarded or marked for
removal, depending on the parameters passed.
If the user input is `q`, the procedure halts.
Parameters
----------
show_fields : list-like
The fields to display.
inplace : bool
If True, records are dropped in-place. If False, the indices
are added to to the `exclude_` attribute.
width : int
The number of characters to display in one line.
"""
if show_fields is None:
show_fields = 'AU', 'TI', 'AB',
remove_idx = []
for idx, rec in enumerate(self):
if rec not in self.exclude_:
rec.to_ascii(show_fields=show_fields, width=width)
print('\n --> keep this record? (y/n/q)')
res = input()
if res == 'n':
remove_idx += [idx]
elif res == 'q':
break
if inplace and self.exclude_:
self.drop(remove_idx)
else:
self.exclude_ = [i for i in remove_idx if i not in self.exclude_]
def find(self, regexp):
"""Find records for which as substring or regexp matches
regexp : str
Regular expression or substring to select particular records. E.g.
'Brain' will return all records in which this
substring is contained.
"""
return Records(r for r in self if r.match(regexp))
def drop(self):
"""Delete records
Returns
-------
self : instance of pymed.Records
"""
[self.remove(r) for ii, r in enumerate(self) if ii in self.exclude_]
self.exclude_ = []
def save(self, fname, mode='w', indent=None, separators=None):
"""Save records to json file
Parameters
----------
fname : str
The name of the file.
mode : str
The mode of the file handler. Should be 'w', 'wb', 'a' 'ab'.
indent : int | None
The indentation to use. If None, it defaults to 4.
separators : tuple
The separators to be used for elements and mappings.
"""
write_records(self, fname)
def save_as_bibtex(self, fname):
"""Export records in bibtex file
Parameters
----------
fname : str
The name of the file to save the records in.
"""
_export_records(self, fname, '.bib', 'to_bibtex')
def save_as_nbib(self, fname):
"""Export records in bibtex file
Parameters
----------
fname : str
The name of the file to save the records in.
"""
_export_records(self, fname, '.nbib', 'to_nbib')
def tolist(self):
"""Convert records to list
Returns
-------
The records as a list
"""
return list(self)
def copy(self):
"""Copy records
Returns
-------
self : instance of pymed.Records
"""
return deepcopy(self)
def append(self, value):
"""Append records
Parameters
----------
value : instance of pymed.PubmedRecord
A single PubMed record.
Returns
-------
self : instance of pymed.Records
"""
if not isinstance(value, PubmedRecord):
raise TypeError('The item to be added must be an instance of '
'PubmedRecord.')
self.insert(len(self), value)
def extend(self, values):
"""Extend records
Parameters
----------
values : listlike
An iterable of single PubMed records.
Returns
-------
self : instance of pymed.Records
"""
for v in values:
if not isinstance(v, PubmedRecord):
raise ValueError('Finder item must be of type Record')
self.append(v)
def insert(self, index, record):
"""Insert record
Parameters
----------
index : int
The position to insert the record at.
record : instance of pymed.PubmedRecord
The PubmedRecord to be inserted.
"""
if self.exclude_:
raise RuntimeError('Indices marked for exclusion must be dropped '
'before inserting new records. Please check the'
' .exclude_ attribute')
else:
list.insert(self, index, record)
def pop(self, index):
"""Remove and return record
Parameters
----------
index : int
The position of the record to be popped.
Returns
-------
pmd : instance of pymed.PubmedRecord
The PubMed record to be removed.
"""
if index in self.exclude_:
self.exclude_.remove(index)
return list.pop(self, index)
def __repr__(self):
""" Summarize Records """
out = '<Records | %i entries' % len(self)
if self:
minyear = min(r.year for r in self)
maxyear = max(r.year for r in self)
if minyear == maxyear:
yrange = str(minyear)
else:
yrange = ' | %i - %i' % (maxyear, minyear)
else:
yrange = ''
return out + '%s>' % yrange
def __add__(self, other):
"""Add different instances of Records"""
if not isinstance(other, Records):
raise TypeError('Only instances of Records can be added together.')
return Records(list.__add__(self, other))
def __iadd__(self, values):
"""Append Operator"""
if not isinstance(values, Records):
raise TypeError('Only instances of Records can be added together.')
self.extend(values)
return self
def __getslice__(self, *args):
"""Slicing operator"""
return Records(list.__getslice__(self, *args))
def query_records(term, client, pubmed_fields='all', chunksize=50):
"""Get records from PubMed search
Parameters
----------
term : string
The search term.
client : string
the user's email address (important for not getting blocked).
pubmed_fields: list-like
PubMed fields to constrain the search to.
chunksize : integer
size of the searches per query. In case the query fails, try
using a slightly lower chunk size.
Returns
-------
recs : instance of pymed.Records
"""
if pubmed_fields is None:
pubmed_fields = PMD.DEF_FIELDS
print('Starting query.')
print('... please be patient. This may take some time.')
Entrez.email = client
handle = Entrez.egquery(term=term) # create handle
record = Entrez.read(handle) # launch search an return records
_retmax = sum(int(r['Count']) for r in record['eGQueryResult']
if r['DbName'] == 'pubmed')
print('... %i records found.' % _retmax)
print('... downloading records.')
handle = Entrez.esearch(db='pubmed', term=term,
retmax=str(_retmax),
usehistory='n') # create another handle
hit = Entrez.read(handle) # parse pubmed IDs...
id_list = list(hit['IdList'])
if not id_list:
print(r"I couldn't find anything")
chunks = _make_chunks(chunksize, id_list, '')
def match(key):
if isinstance(pubmed_fields, list):
return True if key in pubmed_fields else False
elif pubmed_fields == 'all':
return True
else:
return RuntimeError('No instruction how to select fields.')
recs = Records()
while True:
chunk = next(chunks, 0)
if chunk == 0: # initialize end
break
elif not chunk: # iterate through trailing block.
chunk = [ch for ch in chunk if ch]
handle = Entrez.efetch(db='pubmed', id=','.join(chunk),
rettype='medline', retmode='text')
for rec in Medline.parse(handle):
recs.append(PubmedRecord(dict((k, v) for k, v in rec.items()
if match(k))))
print('Ready.')
return recs
| StarcoderdataPython |
3456088 | <reponame>mtk-watch/android_external_v8
# Copyright 2006-2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Dictionary that is passed as defines for js2c.py.
# Used for defines that must be defined for all native JS files.
define NONE = 0;
define READ_ONLY = 1;
define DONT_ENUM = 2;
define DONT_DELETE = 4;
# 2^32 - 1
define kMaxUint32 = 4294967295;
# Type query macros.
#
# Note: We have special support for typeof(foo) === 'bar' in the compiler.
# It will *not* generate a runtime typeof call for the most important
# values of 'bar'.
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_UNDEFINED(arg) = (arg === (void 0));
# Macro for ES queries of the type: "Type(O) is Object."
macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg));
# Macro for ES queries of the type: "IsCallable(O)"
macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
# Macro for ES RequireObjectCoercible
# https://tc39.github.io/ecma262/#sec-requireobjectcoercible
# Throws a TypeError of the form "[functionName] called on null or undefined".
macro REQUIRE_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw %make_type_error(kCalledOnNullOrUndefined, functionName);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro TO_BOOLEAN(arg) = (!!(arg));
macro TO_INTEGER(arg) = (%_ToInteger(arg));
macro TO_LENGTH(arg) = (%_ToLength(arg));
macro TO_STRING(arg) = (%_ToString(arg));
macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
macro DEFINE_METHODS_LEN(obj, class_def, len) = %DefineMethodsInternal(obj, class class_def, len);
macro DEFINE_METHOD_LEN(obj, method_def, len) = %DefineMethodsInternal(obj, class { method_def }, len);
macro DEFINE_METHODS(obj, class_def) = DEFINE_METHODS_LEN(obj, class_def, -1);
macro DEFINE_METHOD(obj, method_def) = DEFINE_METHOD_LEN(obj, method_def, -1);
# Constants. The compiler constant folds them.
define INFINITY = (1/0);
define UNDEFINED = (void 0);
# This should be kept consistent with Intl::Type.
define NUMBER_FORMAT_TYPE = 0;
define COLLATOR_TYPE = 1;
define DATE_TIME_FORMAT_TYPE = 2;
define PLURAL_RULES_TYPE = 3;
define BREAK_ITERATOR_TYPE = 4;
define LOCALE_TYPE = 5;
| StarcoderdataPython |
9633161 | # flake8: noqa: F405
import re
import pytest
from rich.console import Console
from rich.table import Table
from pyinaturalist.formatters import *
from test.sample_data import *
# Lists of JSON records that can be formatted into tables
TABULAR_RESPONSES = [
j_comments,
[j_controlled_term_1, j_controlled_term_2],
[j_identification_1, j_identification_2],
[j_observation_1, j_observation_2],
j_obs_species_counts,
j_life_list,
[j_listed_taxon_1, j_listed_taxon_2_partial],
[j_message],
[j_photo_1, j_photo_2_partial],
[j_place_1, j_place_2],
j_places_nearby,
[j_project_1, j_project_2],
j_search_results,
[j_taxon_1, j_taxon_2_partial],
[j_user_1, j_user_2_partial],
]
def get_variations(response_object):
"""Formatting functions should accept any of these variations"""
return [{'results': [response_object]}, [response_object], response_object]
# TODO: More thorough tests for table content
@pytest.mark.parametrize('response', TABULAR_RESPONSES)
def test_format_table(response):
table = format_table(response)
assert isinstance(table, Table)
def _get_id(value):
return str(
value.get('id') or value.get('record', {}).get('id') or value.get('taxon', {}).get('id')
)
# Just make sure at least object IDs show up in the table
console = Console()
rendered = '\n'.join([str(line) for line in console.render_lines(table)])
if isinstance(response, list):
assert all([_get_id(value) in rendered for value in response])
# for obj in response:
# assert all([value in rendered_table for value in obj.row.values()])
# TODO: Test content written to stdout. For now, just make sure it doesn't explode.
@pytest.mark.parametrize('response', TABULAR_RESPONSES)
def test_pprint(response):
console = Console(force_terminal=False, width=120)
with console.capture() as output:
pprint(response)
rendered = output.get()
@pytest.mark.parametrize('input', get_variations(j_controlled_term_1))
def test_format_controlled_terms(input):
assert (
format_controlled_terms(input)
== '[12] Plant Phenology: No Evidence of Flowering, Flowering, Fruiting, Flower Budding'
)
@pytest.mark.parametrize('input', get_variations(j_identification_1))
def test_format_identifications(input):
expected_str = '[155554373] Species: 60132 (supporting) added on Feb 18, 2021 by jkcook'
assert format_identifications(input) == expected_str
@pytest.mark.parametrize('input', get_variations(j_observation_1))
def test_format_observation(input):
expected_str = (
'[16227955] 🪲 Species: Lixus bardanae observed on Sep 05, 2018 '
'by niconoe at 54 rue des Badauds'
)
assert format_observations(input) == expected_str
@pytest.mark.parametrize('input', get_variations(j_project_1))
def test_format_projects(input):
expected_str = '[8291] PNW Invasive Plant EDDR'
assert format_projects(input) == expected_str
@pytest.mark.parametrize('input', get_variations(j_place_1))
def test_format_places(input):
expected_str = '[89191] Conservation Area Riversdale'
assert format_places(input) == expected_str
def test_format_places__nearby():
places_str = """
[97394] North America
[97395] Asia
[97393] Oceania
[11770] Mehedinti
[119755] Mahurangi College
[150981] Ceap Breatainn
""".strip()
assert format_places(j_places_nearby) == places_str
def test_format_search_results():
expected_str = (
'[Taxon] [47792] 🐛 Order: Odonata (Dragonflies and Damselflies)\n'
'[Place] [113562] Odonates of Peninsular India and Sri Lanka\n'
'[Project] [9978] Ohio Dragonfly Survey (Ohio Odonata Survey)\n'
'[User] [113886] odonatanb (Gilles Belliveau)'
)
assert format_search_results(j_search_results) == expected_str
@pytest.mark.parametrize('input', get_variations(j_species_count_1))
def test_format_species_counts(input):
expected_str = '[48484] 🐞 Species: Harmonia axyridis (Asian Lady Beetle): 31'
assert format_species_counts(input) == expected_str
@pytest.mark.parametrize('input', get_variations(j_taxon_1))
def test_format_taxa__with_common_name(input):
expected_str = '[70118] 🪲 Species: Nicrophorus vespilloides (Lesser Vespillo Burying Beetle)'
assert format_taxa(input) == expected_str
@pytest.mark.parametrize('input', get_variations(j_taxon_3_no_common_name))
def test_format_taxon__without_common_name(input):
assert format_taxa(input) == '[124162] 🪰 Species: Temnostoma vespiforme'
@pytest.mark.parametrize('input', get_variations(j_user_2_partial))
def test_format_users(input):
expected_str = '[886482] niconoe (Nicolas Noé)'
assert format_users(input) == expected_str
def test_simplify_observation():
simplified_obs = simplify_observations(j_observation_1)
# Not much worth testing here, just make sure it returns something that can be formatted
assert format_observations(simplified_obs)
PRINTED_OBSERVATION = """
Observation(
id=16227955,
created_at=datetime.datetime(2018, 9, 5, 0, 0, tzinfo=tzoffset('Europe/Paris', 3600)),
captive=False,
community_taxon_id=493595,
description='',
faves=[],
geoprivacy=None,
identifications_count=2,
identifications_most_agree=True,
identifications_most_disagree=False,
identifications_some_agree=True,
license_code='CC0',
location=(50.646894, 4.360086),
mappable=True,
num_identification_agreements=2,
num_identification_disagreements=0,
oauth_application_id=None,
obscured=False,
observed_on=datetime.datetime(2018, 9, 5, 14, 6, tzinfo=tzoffset('Europe/Paris', 3600)),
outlinks=[{'source': 'GBIF', 'url': 'http://www.gbif.org/occurrence/1914197587'}],
out_of_range=None,
owners_identification_from_vision=True,
place_guess='54 rue des Badauds',
place_ids=[7008, 8657, 14999, 59614, 67952, 80627, 81490, 96372, 96794, 97391, 97582, 108692],
positional_accuracy=23,
preferences={'prefers_community_taxon': None},
project_ids=[],
project_ids_with_curator_id=[],
project_ids_without_curator_id=[],
public_positional_accuracy=23,
quality_grade='research',
quality_metrics=[],
reviewed_by=[180811, 886482, 1226913],
site_id=1,
sounds=[],
species_guess='Lixus bardanae',
tags=[],
updated_at=datetime.datetime(2018, 9, 22, 19, 19, 27, tzinfo=tzoffset(None, 7200)),
uri='https://www.inaturalist.org/observations/16227955',
uuid='6448d03a-7f9a-4099-86aa-ca09a7740b00',
votes=[],
annotations=[],
comments=[
borisb on Sep 05, 2018: I now see: Bonus species on observation! You ma...,
borisb on Sep 05, 2018: suspect L. bardanae - but sits on Solanum (non-...
],
identifications=[
[34896306] 🪲 Genus: Lixus (improving) added on Sep 05, 2018 by niconoe,
[34926789] 🪲 Species: Lixus bardanae (improving) added on Sep 05, 2018 by borisb,
[36039221] 🪲 Species: Lixus bardanae (supporting) added on Sep 22, 2018 by jpreudhomme
],
ofvs=[],
photos=[
[24355315] https://static.inaturalist.org/photos/24355315/original.jpeg?1536150664 (CC-BY, 1445x1057),
[24355313] https://static.inaturalist.org/photos/24355313/original.jpeg?1536150659 (CC-BY, 2048x1364)
],
project_observations=[],
taxon=[493595] 🪲 Species: Lixus bardanae,
user=[886482] niconoe (<NAME>)
)
"""
def test_get_model_fields():
"""Ensure that nested model objects are included in get_model_fields() output"""
observation = Observation.from_json(j_observation_1)
model_fields = get_model_fields(observation)
n_nested_model_objects = 8
n_regular_attrs = len(Observation.__attrs_attrs__)
assert len(model_fields) == n_regular_attrs + n_nested_model_objects
def test_pretty_print():
"""Test rich.pretty with modifications, via get_model_fields()"""
console = Console(force_terminal=False, width=120)
observation = Observation.from_json(j_observation_1)
with console.capture() as output:
console.print(observation)
rendered = output.get()
# Don't check for differences in indendtation
rendered = re.sub(' +', ' ', rendered.strip())
expected = re.sub(' +', ' ', PRINTED_OBSERVATION.strip())
assert rendered == expected
| StarcoderdataPython |
1847754 | #coding:utf-8
import unittest
from cvtron.modeling.detector.slim_object_detector import SlimObjectDetector
class TestDetector(unittest.TestCase):
def test_detector(self):
sod = SlimObjectDetector()
sod.set_label_map('/media/sfermi/Programming/project/web/cvtron/cvtron-serve/cvtron-serve/tmp/img_d_b79cc2ba/label_map.pbtxt')
sod.init('/media/sfermi/Programming/project/web/cvtron/cvtron-serve/cvtron-serve/tmp/img_d_b79cc2ba/frozen_inference_graph.pb')
result = sod.detect('tests/cat.jpg')
print(result)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.