hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d02f65a15a35f3f5f2205ff5270eaa60e785026
| 12,032
|
py
|
Python
|
virtual/lib/python3.6/site-packages/sqlalchemy/sql/default_comparator.py
|
mzazakeith/flask-blog
|
2833404cc5e96ffdbfb767f35b9caf2bdcce7997
|
[
"MIT"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
virtual/lib/python3.6/site-packages/sqlalchemy/sql/default_comparator.py
|
mzazakeith/flask-blog
|
2833404cc5e96ffdbfb767f35b9caf2bdcce7997
|
[
"MIT"
] | 32
|
2018-05-01T05:24:43.000Z
|
2022-03-11T23:20:39.000Z
|
lib/python2.7/site-packages/sqlalchemy/sql/default_comparator.py
|
anish03/weather-dash
|
d517fa9da9028d1fc5d8fd71d77cee829ddee87b
|
[
"MIT"
] | 53
|
2019-03-12T16:50:21.000Z
|
2022-03-15T23:16:18.000Z
|
# sql/default_comparator.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementation of SQL comparison operations.
"""
from .. import exc, util
from . import type_api
from . import operators
from .elements import BindParameter, True_, False_, BinaryExpression, \
Null, _const_expr, _clause_element_as_expr, \
ClauseList, ColumnElement, TextClause, UnaryExpression, \
collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \
Slice, Visitable, _literal_as_binds, CollectionAggregate
from .selectable import SelectBase, Alias, Selectable, ScalarSelect
def _boolean_compare(expr, op, obj, negate=None, reverse=False,
_python_is_types=(util.NoneType, bool),
result_type = None,
**kwargs):
if result_type is None:
result_type = type_api.BOOLEANTYPE
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and \
isinstance(obj, (bool, True_, False_)):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
elif op in (operators.is_distinct_from, operators.isnot_distinct_from):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(expr, _const_expr(obj),
operators.is_,
negate=operators.isnot,
type_=result_type
)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(expr, _const_expr(obj),
operators.isnot,
negate=operators.is_,
type_=result_type
)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()', "
"'is_distinct_from()', 'isnot_distinct_from()' "
"operators can be used with None/True/False")
else:
obj = _check_literal(expr, op, obj)
if reverse:
return BinaryExpression(obj,
expr,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
return BinaryExpression(expr,
obj,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
def _custom_op_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
if result_type is None:
if op.return_type:
result_type = op.return_type
elif op.is_comparison:
result_type = type_api.BOOLEANTYPE
return _binary_operate(
expr, op, obj, reverse=reverse, result_type=result_type, **kw)
def _binary_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
obj = _check_literal(expr, op, obj)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator)
return BinaryExpression(
left, right, op, type_=result_type, modifiers=kw)
def _conjunction_operate(expr, op, other, **kw):
if op is operators.and_:
return and_(expr, other)
elif op is operators.or_:
return or_(expr, other)
else:
raise NotImplementedError()
def _scalar(expr, op, fn, **kw):
return fn(expr)
def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, ScalarSelect):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op)
elif isinstance(seq_or_selectable, SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return _boolean_compare(
expr, op, seq_or_selectable.as_scalar(),
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, (Selectable, TextClause)):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, ClauseElement):
if isinstance(seq_or_selectable, BindParameter) and \
seq_or_selectable.expanding:
return _boolean_compare(
expr, op,
seq_or_selectable,
negate=negate_op)
else:
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r'
% seq_or_selectable)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r' % o)
elif o is None:
o = Null()
else:
o = expr._bind_param(op, o)
args.append(o)
if len(args) == 0:
op, negate_op = (
operators.empty_in_op,
operators.empty_notin_op) if op is operators.in_op \
else (
operators.empty_notin_op,
operators.empty_in_op)
return _boolean_compare(expr, op,
ClauseList(*args).self_group(against=op),
negate=negate_op)
def _getitem_impl(expr, op, other, **kw):
if isinstance(expr.type, type_api.INDEXABLE):
other = _check_literal(expr, op, other)
return _binary_operate(expr, op, other, **kw)
else:
_unsupported_impl(expr, op, other, **kw)
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError("Operator '%s' is not supported on "
"this expression" % op.__name__)
def _inv_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__inv__`."""
if hasattr(expr, 'negation_clause'):
return expr.negation_clause
else:
return expr._negate()
def _neg_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg, type_=expr.type)
def _match_impl(expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return _boolean_compare(
expr, operators.match_op,
_check_literal(
expr, operators.match_op, other),
result_type=type_api.MATCHTYPE,
negate=operators.notmatch_op
if op is operators.match_op else operators.match_op,
**kw
)
def _distinct_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(expr, operator=operators.distinct_op,
type_=expr.type)
def _between_impl(expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
_check_literal(expr, operators.and_, cleft),
_check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False, group_contents=False),
op,
negate=operators.notbetween_op
if op is operators.between_op
else operators.between_op,
modifiers=kw)
def _collate_impl(expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operator_lookup = {
"and_": (_conjunction_operate,),
"or_": (_conjunction_operate,),
"inv": (_inv_impl,),
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_custom_op_operate,),
"json_path_getitem_op": (_binary_operate, ),
"json_getitem_op": (_binary_operate, ),
"concat_op": (_binary_operate,),
"any_op": (_scalar, CollectionAggregate._create_any),
"all_op": (_scalar, CollectionAggregate._create_all),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"is_distinct_from": (_boolean_compare, operators.isnot_distinct_from),
"isnot_distinct_from": (_boolean_compare, operators.is_distinct_from),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, UnaryExpression._create_desc),
"asc_op": (_scalar, UnaryExpression._create_asc),
"nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst),
"nullslast_op": (_scalar, UnaryExpression._create_nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"notmatch_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl, ),
"notbetween_op": (_between_impl, ),
"neg": (_neg_impl,),
"getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
"contains": (_unsupported_impl,),
}
def _check_literal(expr, operator, other, bindparam_type=None):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and \
other.type._isnull:
other = other._clone()
other.type = expr.type
return other
elif hasattr(other, '__clause_element__'):
other = other.__clause_element__()
elif isinstance(other, type_api.TypeEngine.Comparator):
other = other.expr
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, Visitable):
return expr._bind_param(operator, other, type_=bindparam_type)
else:
return other
| 36.907975
| 79
| 0.593251
| 1,297
| 12,032
| 5.184271
| 0.189668
| 0.0232
| 0.05815
| 0.026026
| 0.283611
| 0.180101
| 0.163147
| 0.133998
| 0.118084
| 0.092356
| 0
| 0.001312
| 0.303108
| 12,032
| 325
| 80
| 37.021538
| 0.800596
| 0.086769
| 0
| 0.262948
| 0
| 0
| 0.069476
| 0.004022
| 0
| 0
| 0
| 0.003077
| 0
| 1
| 0.059761
| false
| 0
| 0.01992
| 0.007968
| 0.191235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d0300b0501feb02f7f7fdb3d0f6bb946c42313a
| 2,153
|
py
|
Python
|
recipes/serializers.py
|
klharshini/recipe-django-api
|
7ceb00ab26f6e0d19196519ece297d2f4d616a5d
|
[
"Apache-2.0"
] | null | null | null |
recipes/serializers.py
|
klharshini/recipe-django-api
|
7ceb00ab26f6e0d19196519ece297d2f4d616a5d
|
[
"Apache-2.0"
] | 3
|
2020-06-05T21:58:22.000Z
|
2021-06-10T21:40:50.000Z
|
recipes/serializers.py
|
klharshini/recipe-django-api
|
7ceb00ab26f6e0d19196519ece297d2f4d616a5d
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.auth.validators import UnicodeUsernameValidator
from rest_framework import serializers
from django.contrib.auth.models import User
from recipes.models import Recipe, Ingredient, Step
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("username", "last_name", "first_name", "email")
extra_kwargs = {
'username': {
'validators': [UnicodeUsernameValidator()],
}
}
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = Ingredient
fields = ["text"]
class StepSerializer(serializers.ModelSerializer):
class Meta:
model = Step
fields = ["step_text"]
class RecipeSerializer(serializers.ModelSerializer):
ingredients = IngredientSerializer(many=True, required=False)
steps = StepSerializer(many=True, required=False)
user = UserSerializer(required=True)
def create(self, validated_data):
steps_data = validated_data.pop('steps')
ingredients_data = validated_data.pop('ingredients')
user_data = validated_data.pop('user')
username = user_data.pop('username')
user = User.objects.get_by_natural_key(username)
recipe = Recipe.objects.create(user=user, **validated_data)
for steps in steps_data:
Step.objects.create(recipe=recipe, **steps)
for ingredients in ingredients_data:
Ingredient.objects.create(recipe=recipe, **ingredients)
return recipe
class Meta:
model = Recipe
fields = ("name", "user", "steps", "ingredients")
def update(self, instance, validated_data):
steps_data = validated_data.pop('steps')
ingredients_data = validated_data.pop('ingredients')
Step.objects.filter(recipe=instance).delete()
Ingredient.objects.filter(recipe=instance).delete()
for steps in steps_data:
Step.objects.create(recipe=instance, **steps)
for ingredients in ingredients_data:
Ingredient.objects.create(recipe=instance, **ingredients)
return instance
| 33.123077
| 69
| 0.672085
| 218
| 2,153
| 6.518349
| 0.261468
| 0.073188
| 0.059817
| 0.070373
| 0.401126
| 0.270232
| 0.270232
| 0.270232
| 0.270232
| 0.211119
| 0
| 0
| 0.228518
| 2,153
| 64
| 70
| 33.640625
| 0.855509
| 0
| 0
| 0.24
| 0
| 0
| 0.060845
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.38
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d0375507c268370ae9c58f1a6b3dd509a4f4999
| 1,652
|
py
|
Python
|
tests/test_model/test_temporal_regression_head.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
tests/test_model/test_temporal_regression_head.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
tests/test_model/test_temporal_regression_head.py
|
jcwon0/BlurHPE
|
c97a57e92a8a7f171b0403aee640222a32513562
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
import torch
from mmpose.models import TemporalRegressionHead
def test_temporal_regression_head():
"""Test temporal head."""
head = TemporalRegressionHead(
in_channels=1024,
num_joints=17,
loss_keypoint=dict(type='MPJPELoss', use_target_weight=True))
head.init_weights()
with pytest.raises(AssertionError):
# ndim of the input tensor should be 3
input_shape = (1, 1024, 1, 1)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
with pytest.raises(AssertionError):
# size of the last dim should be 1
input_shape = (1, 1024, 3)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
input_shape = (1, 1024, 1)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out.shape == torch.Size([1, 17, 3])
loss = head.get_loss(out, out, torch.ones_like(out))
assert torch.allclose(loss['reg_loss'], torch.tensor(0.))
_ = head.inference_model(inputs)
_ = head.inference_model(inputs, [(0, 1), (2, 3)])
acc = head.get_accuracy(out, out, torch.ones_like(out))
assert acc['mpjpe'] == 0.
np.testing.assert_almost_equal(acc['p_mpjpe'], 0.)
def _demo_inputs(input_shape=(1, 1024, 1)):
"""Create a superset of inputs needed to run head.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 1024, 1).
Returns:
Random input tensor with the size of input_shape.
"""
inps = np.random.random(input_shape)
inps = torch.FloatTensor(inps)
return inps
| 28.982456
| 70
| 0.625303
| 215
| 1,652
| 4.613953
| 0.390698
| 0.100806
| 0.080645
| 0.060484
| 0.210685
| 0.194556
| 0.129032
| 0
| 0
| 0
| 0
| 0.041051
| 0.262712
| 1,652
| 56
| 71
| 29.5
| 0.773399
| 0.174334
| 0
| 0.212121
| 0
| 0
| 0.022817
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.060606
| false
| 0
| 0.121212
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d04161d9491b3b2bccc61b8d346c61f251d0a5b
| 1,774
|
py
|
Python
|
353-Design-Snake-Game/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
353-Design-Snake-Game/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
353-Design-Snake-Game/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
class SnakeGame(object):
def __init__(self, width,height,food):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
:type width: int
:type height: int
:type food: List[List[int]]
"""
self.width=width
self.height=height
self.food=collections.deque(food)
self.position=collections.deque([(0,0)])
self.moveops={'U':(-1,0),'L':(0,-1),'R':(0,1),'D':(1,0)}
self.score=0
def move(self, direction):
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
:type direction: str
:rtype: int
"""
if direction not in self.moveops:
return -1
peak,tail=self.position[0],self.position[-1]
self.position.pop()
idxi,idxj=self.moveops[direction]
newi,newj=peak[0]+idxi,peak[1]+idxj
if (newi,newj) in self.position or \
newi<0 or newi>=self.height or \
newj<0 or newj>=self.width:
return -1
self.position.appendleft((newi,newj))
if self.food and [newi,newj]==self.food[0]:
self.food.popleft()
self.position.append(tail)
self.score+=1
return self.score
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction)
| 35.48
| 102
| 0.567644
| 244
| 1,774
| 4.106557
| 0.348361
| 0.083832
| 0.02994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02502
| 0.301578
| 1,774
| 50
| 103
| 35.48
| 0.783697
| 0.379369
| 0
| 0.08
| 0
| 0
| 0.00418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d046e78f1ff28c88e9dd3fba255e0d381257af6
| 975
|
py
|
Python
|
scripts/register_sam.py
|
jessebrennan/azul
|
65970a0947f38fae439a3bf8fd960d351787b7a3
|
[
"Apache-2.0"
] | null | null | null |
scripts/register_sam.py
|
jessebrennan/azul
|
65970a0947f38fae439a3bf8fd960d351787b7a3
|
[
"Apache-2.0"
] | null | null | null |
scripts/register_sam.py
|
jessebrennan/azul
|
65970a0947f38fae439a3bf8fd960d351787b7a3
|
[
"Apache-2.0"
] | null | null | null |
from itertools import (
chain,
)
import logging
from azul import (
config,
require,
)
from azul.logging import (
configure_script_logging,
)
from azul.terra import (
TDRClient,
TDRSourceName,
)
log = logging.getLogger(__name__)
def main():
configure_script_logging(log)
tdr = TDRClient()
tdr.register_with_sam()
tdr_catalogs = (
catalog.name
for catalog in config.catalogs.values()
if catalog.plugins['repository'] == 'tdr'
)
for source in set(chain(*map(config.tdr_sources, tdr_catalogs))):
source = TDRSourceName.parse(source)
api_project = tdr.lookup_source_project(source)
require(api_project == source.project,
'Actual Google project of TDR source differs from configured '
'one',
api_project, source)
tdr.check_api_access(source)
tdr.check_bigquery_access(source)
if __name__ == '__main__':
main()
| 22.159091
| 78
| 0.644103
| 109
| 975
| 5.486239
| 0.431193
| 0.040134
| 0.050167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.262564
| 975
| 43
| 79
| 22.674419
| 0.831711
| 0
| 0
| 0
| 0
| 0
| 0.086154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.138889
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d0497316627f4c13c17f9d3db674195ece95272
| 3,584
|
py
|
Python
|
altitude/players.py
|
StamKaly/altitude-mod-foundation
|
403befeba7d0e2e6afe3897081cd3e01f438e3d5
|
[
"MIT"
] | 1
|
2018-07-11T16:55:49.000Z
|
2018-07-11T16:55:49.000Z
|
altitude/players.py
|
StamKaly/altitude-mod-foundation
|
403befeba7d0e2e6afe3897081cd3e01f438e3d5
|
[
"MIT"
] | null | null | null |
altitude/players.py
|
StamKaly/altitude-mod-foundation
|
403befeba7d0e2e6afe3897081cd3e01f438e3d5
|
[
"MIT"
] | null | null | null |
class Player:
def __init__(self, nickname, vapor_id, player_id, ip):
self.nickname = nickname
self.vapor_id = vapor_id
self.player_id = player_id
self.ip = ip
self.not_joined = True
self.loads_map = True
self.joined_after_change_map = True
class Players:
def __init__(self, main_object, modded, lobby):
self.main = main_object
self.players = []
self.modded = modded
self.map_changed = False
self.lobby = lobby
self.commands = None
def get_commands_object(self, commands_object):
self.commands = commands_object
def _on_map_change(self, map_name):
self.map_changed = map_name
if self.modded and self.players:
for player in self.players:
player.loads_map = True
def check_if_everyone_joined_after_change_map(self):
for player in self.players:
if player.loads_map and not player.joined_after_change_map:
return False
return True
def _on_player_info_ev(self, player_id):
player = [player for player in self.players if player.player_id == player_id][0]
if self.map_changed or hasattr(player, "not_joined"):
if player.loads_map and player.joined_after_change_map:
player.joined_after_change_map = False
elif player.loads_map and not player.joined_after_change_map:
player.loads_map = False
player.joined_after_change_map = True
self.main.on_player_map_change(player, self.map_changed)
if hasattr(player, "not_joined"):
del player.not_joined
self.main.on_client_join(player)
if self.check_if_everyone_joined_after_change_map():
self.map_changed = False
def check_nickname_existence(self, nickname):
for player in self.players:
if nickname == player.nickname:
return True
return False
def get_all_players(self, nicknames, vapor_ids, player_ids, ips):
players_list = [nicknames, vapor_ids, player_ids, ips]
for count in range(len(nicknames)):
self.players.append(Player(*[player[count] for player in players_list]))
def add(self, nickname, vapor_id, player_id, ip):
self.players.append(Player(nickname, vapor_id, player_id, ip))
def remove(self, nickname):
for player in self.players:
if nickname == player.nickname:
self.players.remove(player)
break
if self.lobby and len(self.players) == 0:
self.commands.change_map(self.lobby)
def nickname_change(self, old_nickname, new_nickname):
for player in self.players:
if old_nickname == player.nickname:
player.nickname = new_nickname
break
def all_nicknames(self):
return [player.nickname for player in self.players]
def player_from_nickname(self, nickname):
for player in self.players:
if nickname == player.nickname:
return player
def player_from_vapor_id(self, vapor_id):
for player in self.players:
if vapor_id == player.vapor_id:
return player
def player_from_player_id(self, player_id):
for player in self.players:
if player_id == player.player_id:
return player
def get_all_vapor_ids(self):
return [player.vapor_id for player in self.players]
| 35.84
| 88
| 0.626395
| 457
| 3,584
| 4.647702
| 0.131291
| 0.088041
| 0.062147
| 0.077684
| 0.428437
| 0.353578
| 0.282486
| 0.198682
| 0.130885
| 0.130885
| 0
| 0.000797
| 0.300223
| 3,584
| 99
| 89
| 36.20202
| 0.846093
| 0
| 0
| 0.268293
| 0
| 0
| 0.00558
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.195122
| false
| 0
| 0
| 0.02439
| 0.329268
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d0642e93e7a866ace737fc8f40342ddac2993c4
| 17,835
|
py
|
Python
|
dsn/editor/construct.py
|
expressionsofchange/nerf0
|
788203619fc89c92e8c7301d62bbc4f1f4ee66e1
|
[
"MIT"
] | 2
|
2019-04-30T05:42:05.000Z
|
2019-08-11T19:17:20.000Z
|
dsn/editor/construct.py
|
expressionsofchange/nerf0
|
788203619fc89c92e8c7301d62bbc4f1f4ee66e1
|
[
"MIT"
] | null | null | null |
dsn/editor/construct.py
|
expressionsofchange/nerf0
|
788203619fc89c92e8c7301d62bbc4f1f4ee66e1
|
[
"MIT"
] | null | null | null |
"""
Tools to "play notes for the editor clef", which may be thought of as "executing editor commands".
NOTE: in the below, we often connect notes together "manually", i.e. using NoteSlur(..., previous_hash). As an
alternative, we could consider `nouts_for_notes`.
"""
from s_address import node_for_s_address, s_dfs
from dsn.s_expr.legato import NoteSlur, NoteCapo
from dsn.s_expr.utils import (
bubble_history_up,
calc_possibility,
insert_text_at,
insert_node_at,
replace_text_at,
weave_disjoint_replaces,
)
from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode
from dsn.s_expr.structure import TreeNode
from dsn.editor.clef import (
CursorChild,
CursorDFS,
CursorParent,
CursorSet,
EDelete,
EncloseWithParent,
InsertNodeChild,
InsertNodeSibbling,
MoveSelectionChild,
MoveSelectionSibbling,
LeaveChildrenBehind,
SwapSibbling,
TextInsert,
TextReplace,
)
def edit_note_play(structure, edit_note):
# :: EditStructure, EditNote => (new) s_cursor, posacts, error
def an_error():
return structure.s_cursor, [], True
if isinstance(edit_note, TextInsert):
posacts = insert_text_at(structure.tree, edit_note.parent_s_address, edit_note.index, edit_note.text)
new_s_cursor = edit_note.parent_s_address + [edit_note.index]
return new_s_cursor, posacts, False
if isinstance(edit_note, TextReplace):
posacts = replace_text_at(structure.tree, edit_note.s_address, edit_note.text)
return edit_note.s_address, posacts, False
if isinstance(edit_note, InsertNodeSibbling):
if structure.s_cursor == []:
return an_error() # adding sibblings to the root is not possible (it would lead to a forest)
# There is no need to check that the new index is a valid one. (Assuming: the cursor is valid, and direction is
# in the range [0, 1]; such assumptions fit with the general idea of "we only check that the user's command can
# be executed at this point, we do not check for arbitrary programming errors here). The proof flows directly
# from the idea that, for lists of length n, insertions at [0, n] are valid (insertion at n being an append).
index = structure.s_cursor[-1] + edit_note.direction
posacts = insert_node_at(structure.tree, structure.s_cursor[:-1], index)
new_s_cursor = structure.s_cursor[:-1] + [index]
return new_s_cursor, posacts, False
if isinstance(edit_note, InsertNodeChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not isinstance(cursor_node, TreeNode):
# for now... we just silently ignore the user's request when they ask to add a child node to a non-node
return an_error()
index = len(cursor_node.children)
posacts = insert_node_at(structure.tree, structure.s_cursor, index)
new_s_cursor = structure.s_cursor + [index]
return new_s_cursor, posacts, False
if isinstance(edit_note, EDelete):
if structure.s_cursor == []:
# silently ignored ('delete root' is not defined, because the root is assumed to exist.)
return an_error()
delete_from = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, delete_from).metadata.nout_hash
p, h = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
if delete_at_index == len(node_for_s_address(structure.tree, delete_from).children) - 1:
# deletion makes cursor pos invalid: up to parent (alternative: sibbling-up first, until no more sibblings)
new_s_cursor = delete_from
else:
new_s_cursor = structure.s_cursor # "stay in place (although new contents slide into the cursor position)
posacts = [p] + bubble_history_up(h, structure.tree, delete_from)
return new_s_cursor, posacts, False
if isinstance(edit_note, SwapSibbling):
if structure.s_cursor == []:
return an_error() # root has no sibblings
parent = node_for_s_address(structure.tree, structure.s_cursor[:-1])
index = structure.s_cursor[-1] + edit_note.direction
if not (0 <= index <= len(parent.children) - 1):
return an_error()
# For now, SwapSibbling is simply implemented as a "delete and insert"; if (or when) we'll introduce "Move" into
# the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
reinsert_later_hash = node_for_s_address(structure.tree, structure.s_cursor).metadata.nout_hash
p0, hash_after_deletion = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
p1, hash_after_insertion = calc_possibility(NoteSlur(Insert(index, reinsert_later_hash), hash_after_deletion))
new_cursor = structure.s_cursor[:-1] + [index]
posacts = [p0, p1] + bubble_history_up(hash_after_insertion, structure.tree, parent_s_address)
return new_cursor, posacts, False
if isinstance(edit_note, MoveSelectionChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children'):
return an_error() # The target must be a node to be able to add as a child
return do_move(structure, edit_note, structure.s_cursor, len(cursor_node.children))
if isinstance(edit_note, MoveSelectionSibbling):
if len(structure.s_cursor) == 0:
return an_error() # there is no sibbling of the root node
# edit_note.direction points to a valid insertion point for the same reasons detailed in the comment on
# InsertNodeSibbling
return do_move(structure, edit_note, structure.s_cursor[:-1], structure.s_cursor[-1] + edit_note.direction)
if isinstance(edit_note, LeaveChildrenBehind):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children'):
return an_error() # Leave _children_ behind presupposes the existance of children
if structure.s_cursor == []:
return an_error() # Root cannot die
# For now, LeaveChildrenBehind is simply implemented as a "delete and insert"; if (or when) we'll introduce
# "Move" into the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
p, hash_ = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
posacts = [p]
removed_node = node_for_s_address(structure.tree, structure.s_cursor)
for i, child in enumerate(removed_node.children):
p, hash_ = calc_possibility(NoteSlur(Insert(structure.s_cursor[-1] + i, child.metadata.nout_hash), hash_))
posacts.append(p)
# In general, leaving the cursor at the same s_address will be great: post-deletion you'll be in the right spot
new_cursor = structure.s_cursor
if len(removed_node.children) == 0:
# ... however, if there are no children to leave behind... this "right spot" may be illegal
parent_node = node_for_s_address(structure.tree, parent_s_address)
if len(parent_node.children) == 1:
# if the deleted node was the only node: fall back to the parent
new_cursor = parent_s_address
else:
# otherwise, make sure to stay in bounds.
new_cursor[len(new_cursor) - 1] = min(
len(parent_node.children) - 1 - 1, # len - 1 idiom; -1 for deletion.
new_cursor[len(new_cursor) - 1])
posacts += bubble_history_up(hash_, structure.tree, parent_s_address)
return new_cursor, posacts, False
if isinstance(edit_note, EncloseWithParent):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if structure.s_cursor == []:
# I am not sure about this one yet: should we have the option to create a new root? I don't see any direct
# objections (by which I mean: it's possible in terms of the math), but I still have a sense that it may
# create some asymmetries. For now I'm disallowing it; we'll see whether a use case arises.
return an_error()
# For now, EncloseWithParent is simply implemented as a "replace with the parent"; if (or when) we'll introduce
# "Move" (in particular: the MoveReplace) into the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
replace_at_index = structure.s_cursor[-1]
replace_on_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
reinsert_later_hash = node_for_s_address(structure.tree, structure.s_cursor).metadata.nout_hash
p_capo, hash_capo = calc_possibility(NoteCapo())
p_create, hash_create = calc_possibility(NoteSlur(BecomeNode(), hash_capo))
p_enclosure, hash_enclosure = calc_possibility(NoteSlur(Insert(0, reinsert_later_hash), hash_create))
p_replace, hash_replace = calc_possibility(
NoteSlur(Replace(replace_at_index, hash_enclosure), replace_on_hash))
posacts = [p_capo, p_create, p_enclosure, p_replace] + bubble_history_up(
hash_replace, structure.tree, parent_s_address)
# We jump the cursor to the newly enclosed location:
new_cursor = structure.s_cursor + [0]
return new_cursor, posacts, False
def move_cursor(new_cursor):
return new_cursor, [], False
if isinstance(edit_note, CursorDFS):
dfs = s_dfs(structure.tree, [])
dfs_index = dfs.index(structure.s_cursor) + edit_note.direction
if not (0 <= dfs_index <= len(dfs) - 1):
return an_error()
return move_cursor(dfs[dfs_index])
"""At some point I had "regular sibbling" (as opposed to DFS sibbling) in the edit_clef. It looks like this:
if structure.s_cursor == []:
return an_error() # root has no sibblings
parent = node_for_s_address(structure.tree, s_cursor[:-1])
index = s_cursor[-1] + edit_node.direction
if not (0 <= index <= len(parent.children) - 1):
return an_error()
return move_cursor(s_cursor[:-1] + [index])
"""
if isinstance(edit_note, CursorSet):
return move_cursor(edit_note.s_address)
if isinstance(edit_note, CursorParent):
if structure.s_cursor == []:
return an_error()
return move_cursor(structure.s_cursor[:-1])
if isinstance(edit_note, CursorChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children') or len(cursor_node.children) == 0:
return an_error()
return move_cursor(structure.s_cursor + [0])
raise Exception("Unknown Note")
def do_move(structure, edit_note, target_parent_path, target_index):
selection_edge_0 = edit_note.selection_edge_0
selection_edge_1 = edit_note.selection_edge_1
def an_error():
return structure.s_cursor, [], True
if selection_edge_0[:-1] != selection_edge_1[:-1]:
# i.e. if not same-parent: this is an error. This may very well be too restrictive, but I'd rather move in the
# direction of "relax constraints later" than in the other directions. One particular reason I'm so restrictive
# for now: if I ever want to express a note "move" using a target_node, a source node and to indices in the
# source node, such a single-parent restriction is indeed a necessity.
# Note that "single parent" implies "same depth", but not vice versa. One possible relaxation is: make the
# restriction on "same depth" instead.
# Generally, the paths towards relaxation are to either [a] "be smart about the meaning of the selection's
# edges", i.e. find the first common ancestor and the relevant children of that ancestor or [b] to not care so
# much about single-parent.
return an_error()
if selection_edge_0 <= (target_parent_path + [target_index])[:len(selection_edge_0)] <= selection_edge_1:
# If the full target location, truncated to the length of the sources, is (inclusively) in the source's range,
# you're trying to move to [a descendant of] yourself. This is illegal. Moving something to a child of itself:
# I simply don't know what it would mean. Moving something to the same location (single source item, target path
# identical to the source path) could at least be understood to mean the no-op, so it's slightly less
# meaningless, but here I don't find that enough, so I'm just calling both scenarios error-scenarios.
# This implies protection against moving the root node around (because everything descends from the root node)
return an_error()
source_parent_path = selection_edge_0[:-1]
source_parent = node_for_s_address(structure.tree, source_parent_path)
target_parent = node_for_s_address(structure.tree, target_parent_path)
# For now, the "edit move" operations are simply implemented as a "insert and delete"; if (or when) we'll introduce
# "Move" into the Clef, we should note the move here.
posacts = []
source_index_lo, source_index_hi = sorted([selection_edge_0[-1], selection_edge_1[-1]])
hash_ = target_parent.metadata.nout_hash
for target_offset, source_index in enumerate(range(source_index_lo, source_index_hi + 1)): # edge-inclusive range
insert_hash = node_for_s_address(structure.tree, source_parent_path + [source_index]).metadata.nout_hash
p, hash_ = calc_possibility(NoteSlur(Insert(target_index + target_offset, insert_hash), hash_))
posacts.append(p)
weave_correction = 0
cursor_correction = 0
# TODO this part is still broken:
# Not only if the parents are exactly the same, but also if one parent is a prefix of the other (said differently:
# the longest_common_prefix of both parents matches one of them).
# In that case, we need to somehow connect the parents....
# (For the case of "parents match exactly", I did this using the idea "just don't reset hash_"... which works,
# because it allows you to continue operating on the the same "future". But in the case of shared prefix, this won't
# work.
if source_parent_path != target_parent_path:
wdr_hash = hash_
hash_ = source_parent.metadata.nout_hash
else:
if target_index < source_index_lo:
# We insert before we delete. If we do this on the same parent, and the insertions happen at lower indices
# than the deletions, they will affect the locations where the deletions must take place, by precisely the
# number of insertions that happened. (If we reverse the order of operations, we have the opposite problem)
# The reason we have this problem at all, is because we implement something that is atomic from the user's
# point of view in a non-atomic way in the clef. The problem may auto-disappear if we add "Move" to the
# clef.
# Another way we could handle the problem is once we have some tools to "realinearize while preserving
# meaning". I.e. we have deletions, we have insertions: at one point (e.g. once we build the cooperative
# editor) we should be able to express "weave those together, rewriting indices as required".
# In the if-statement above, we could pick either lo/hi for the comparison; source_index_lo and
# source_index_hi will never straddle target_index, because of the child-of-yourself checks at the top.
weave_correction = source_index_hi - source_index_lo + 1
else:
cursor_correction = source_index_hi - source_index_lo + 1
# we do _not_ fetch hash_ here, the idea being: it's the hash we just created.
# nor do we bubble up (yet); we can do a single bubble-up
for source_index in range(source_index_lo, source_index_hi + 1): # edge-inclusive range
# Note: we just Delete n times at the "lo" index (everything shifting to the left after each deletion)
p, hash_ = calc_possibility(NoteSlur(Delete(source_index_lo + weave_correction), hash_))
posacts.append(p)
if source_parent_path != target_parent_path:
posacts = posacts + weave_disjoint_replaces(
structure.tree,
target_parent_path, wdr_hash,
source_parent_path, hash_)
else:
posacts = posacts + bubble_history_up(hash_, structure.tree, source_parent_path)
# The current solution for "where to put the cursor after the move" is "at the end". This "seems intuitive" (but
# that may just be habituation). In any case, it's wat e.g. LibreOffice does when cut/pasting. (However, for a
# mouse-drag initiated move in LibreOffice, the selection is preserved).
# As it stands: the selection disappears automatically, because it points at a no-longer existing location. If we
# want to make the selection appear at the target-location, we need to change the interface of edit_note_play to
# include the resulting selection.
new_cursor = target_parent_path + [target_index + target_offset - cursor_correction]
return new_cursor, posacts, False
| 47.18254
| 120
| 0.691786
| 2,549
| 17,835
| 4.635936
| 0.179678
| 0.033765
| 0.059575
| 0.025387
| 0.373784
| 0.329864
| 0.297707
| 0.266819
| 0.229923
| 0.18778
| 0
| 0.005173
| 0.230502
| 17,835
| 377
| 121
| 47.307692
| 0.855873
| 0.371124
| 0
| 0.317949
| 0
| 0
| 0.003376
| 0
| 0
| 0
| 0
| 0.002653
| 0
| 1
| 0.025641
| false
| 0
| 0.030769
| 0.015385
| 0.225641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d06e71f9e23ea91aab0fd29960c633c1cc96c2f
| 636
|
py
|
Python
|
src/pytong/base.py
|
richtong/pytong
|
6ff07a1bdf1d5e2232bfc102cce2dd74783bb111
|
[
"MIT"
] | null | null | null |
src/pytong/base.py
|
richtong/pytong
|
6ff07a1bdf1d5e2232bfc102cce2dd74783bb111
|
[
"MIT"
] | null | null | null |
src/pytong/base.py
|
richtong/pytong
|
6ff07a1bdf1d5e2232bfc102cce2dd74783bb111
|
[
"MIT"
] | null | null | null |
"""Base for all Classes.
Base mainly includes the description fields
"""
import logging
from typing import Optional
from .log import Log # type: ignore
class BaseLog:
"""
Set a base logging.
Use this as the base class for all your work. This adds a logging root.
"""
def __init__(self, log_root: Optional[Log] = None):
"""Set the Root Log."""
# since we have no log otherwise
self.log_root = log_root
self.log = (
log_root.log_class(self)
if log_root is not None
else logging.getLogger(__name__)
)
self.log.debug(f"{self=}")
| 22.714286
| 75
| 0.610063
| 88
| 636
| 4.25
| 0.511364
| 0.093583
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.300314
| 636
| 27
| 76
| 23.555556
| 0.840449
| 0.349057
| 0
| 0
| 0
| 0
| 0.018325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d073e43082666b4fb7e947816cf1a811338dbe3
| 620
|
py
|
Python
|
subprocess-10.py
|
GuillaumeFalourd/poc-subprocess
|
8f014a709ac2e471092d4ea1f61f1a9ff65ff571
|
[
"Apache-2.0"
] | 1
|
2021-12-08T12:58:14.000Z
|
2021-12-08T12:58:14.000Z
|
subprocess-10.py
|
GuillaumeFalourd/poc-subprocess
|
8f014a709ac2e471092d4ea1f61f1a9ff65ff571
|
[
"Apache-2.0"
] | null | null | null |
subprocess-10.py
|
GuillaumeFalourd/poc-subprocess
|
8f014a709ac2e471092d4ea1f61f1a9ff65ff571
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
import re
programs = input('Separe the programs with a space: ').split()
secure_pattern = '[\w\d]'
for program in programs:
if not re.match(secure_pattern, program):
print("Sorry we can't check that program")
continue
process = subprocess. run(
['which', program], capture_output=True, text=True)
if process.returncode == 0:
print(f'The program "{program}" is installed')
print(f'The location of the binary is: {process.stdout}')
else:
print(f'Sorry the {program} is not installed')
print(process.stderr)
print('\n')
| 22.962963
| 65
| 0.63871
| 82
| 620
| 4.792683
| 0.585366
| 0.045802
| 0.045802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002123
| 0.240323
| 620
| 27
| 66
| 22.962963
| 0.832272
| 0
| 0
| 0
| 0
| 0
| 0.320451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0.352941
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d0751ff55112db535e551765b215e8ad53a88d2
| 2,320
|
py
|
Python
|
authentication/socialaccount/forms.py
|
vo0doO/pydj-persweb
|
efcd6b7090230f7c0b9ec056008f6d1d9e876ed9
|
[
"CC0-1.0"
] | null | null | null |
authentication/socialaccount/forms.py
|
vo0doO/pydj-persweb
|
efcd6b7090230f7c0b9ec056008f6d1d9e876ed9
|
[
"CC0-1.0"
] | 4
|
2020-05-06T17:22:00.000Z
|
2021-12-13T20:43:30.000Z
|
authentication/socialaccount/forms.py
|
vo0doO/pydj-persweb
|
efcd6b7090230f7c0b9ec056008f6d1d9e876ed9
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import absolute_import
from django import forms
from authentication.account.forms import BaseSignupForm
from . import app_settings, signals
from .adapter import get_adapter
from .models import SocialAccount
class SignupForm(BaseSignupForm):
def __init__(self, *args, **kwargs):
self.sociallogin = kwargs.pop('sociallogin')
initial = get_adapter().get_signup_form_initial_data(
self.sociallogin)
kwargs.update({
'initial': initial,
'email_required': kwargs.get('email_required',
app_settings.EMAIL_REQUIRED)})
super(SignupForm, self).__init__(*args, **kwargs)
def save(self, request):
adapter = get_adapter(request)
user = adapter.save_user(request, self.sociallogin, form=self)
self.custom_signup(request, user)
return user
def validate_unique_email(self, value):
try:
return super(SignupForm, self).validate_unique_email(value)
except forms.ValidationError:
raise forms.ValidationError(
get_adapter().error_messages['email_taken']
% self.sociallogin.account.get_provider().name)
class DisconnectForm(forms.Form):
account = forms.ModelChoiceField(queryset=SocialAccount.objects.none(),
widget=forms.RadioSelect,
required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.accounts = SocialAccount.objects.filter(user=self.request.user)
super(DisconnectForm, self).__init__(*args, **kwargs)
self.fields['account'].queryset = self.accounts
def clean(self):
cleaned_data = super(DisconnectForm, self).clean()
account = cleaned_data.get('account')
if account:
get_adapter(self.request).validate_disconnect(
account,
self.accounts)
return cleaned_data
def save(self):
account = self.cleaned_data['account']
account.delete()
signals.social_account_removed.send(sender=SocialAccount,
request=self.request,
socialaccount=account)
| 35.692308
| 76
| 0.617241
| 226
| 2,320
| 6.110619
| 0.309735
| 0.036206
| 0.030413
| 0.021723
| 0.036206
| 0.036206
| 0
| 0
| 0
| 0
| 0
| 0
| 0.289655
| 2,320
| 64
| 77
| 36.25
| 0.837985
| 0
| 0
| 0.039216
| 0
| 0
| 0.036638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d0792b2d66082b8e779a07e75899ce616d825f2
| 12,110
|
py
|
Python
|
pytheos/pytheos.py
|
nilsbeck/pytheos
|
de4f3a03330ddb28e68ddcaa7b4888ea9a25e238
|
[
"MIT"
] | null | null | null |
pytheos/pytheos.py
|
nilsbeck/pytheos
|
de4f3a03330ddb28e68ddcaa7b4888ea9a25e238
|
[
"MIT"
] | 1
|
2021-10-30T16:31:41.000Z
|
2021-10-30T16:31:41.000Z
|
pytheos/pytheos.py
|
nilsbeck/pytheos
|
de4f3a03330ddb28e68ddcaa7b4888ea9a25e238
|
[
"MIT"
] | 1
|
2021-10-30T14:24:58.000Z
|
2021-10-30T14:24:58.000Z
|
#!/usr/bin/env python
""" Provides the primary interface into the library """
from __future__ import annotations
import asyncio
import logging
from typing import Callable, Optional, Union
from . import utils
from . import controllers
from .networking.connection import Connection
from .networking.types import SSDPResponse
from .networking.errors import ChannelUnavailableError
from .models.heos import HEOSEvent
from .models.system import AccountStatus
logger = logging.getLogger('pytheos')
class Pytheos:
""" Pytheos interface """
DEFAULT_PORT = 1255
@staticmethod
def check_channel_availability(channel: Connection):
""" Checks to make sure that the provided channel is available.
:param channel: Channel connection
:raises: ChannelUnavailableError
:return: None
"""
if not channel or not channel.connected:
raise ChannelUnavailableError()
@property
def log_level(self):
return logger.level
@log_level.setter
def log_level(self, value):
logger.setLevel(value)
@property
def connected(self):
return self._connected
@property
def signed_in(self):
return self._account_status == AccountStatus.SignedIn
@property
def username(self):
return self._account_username
def __init__(self, server: Union[str, SSDPResponse]=None, port: Optional[int]=DEFAULT_PORT):
""" Constructor
:param server: Server hostname or IP
:param port: Port number
"""
if isinstance(server, SSDPResponse):
server = utils.extract_host(server.location)
self.server: str = server
self.port: int = port
self._command_channel = Connection()
self._event_channel = Connection()
self._event_queue = asyncio.Queue()
self._event_task: Optional[asyncio.Task] = None
self._event_processor: Optional[asyncio.Task] = None
self._connected: bool = False
self._event_subscriptions: dict = {}
self._receive_events: bool = True
self._account_status: Optional[AccountStatus] = None
self._account_username: Optional[str] = None
self._players: list = []
self._groups: dict = {} # FIXME?: Not sure I like having this as a dict.
self._sources: dict = {} # FIXME?: Not sure I like having this as a dict.
self.api: Connection = self._command_channel
self._init_internal_event_handlers()
def __repr__(self):
return f'<Pytheos(server={self.server}, port={self.port})>'
def __enter__(self):
if not self._connected:
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._connected:
self.close()
async def connect(self, enable_event_connection: bool=True, refresh: bool=True) -> Pytheos:
""" Connect to our HEOS device.
:param enable_event_connection: Enables establishing an additional connection for system events
:param refresh: Determines if the system state should be automatically refreshed
:return: self
"""
logger.info(f'Connecting to {self.server}:{self.port}')
await self._command_channel.connect(self.server, self.port)
self._connected = True
self._receive_events = enable_event_connection
if self._receive_events:
await self._event_channel.connect(self.server, self.port, deduplicate=True)
await self.enable_event_reception(True)
loop = asyncio.get_running_loop()
self._event_task = loop.create_task(self._listen_for_events())
self._event_processor = loop.create_task(self._process_events())
if refresh:
await self.refresh()
return self
async def _set_register_for_change_events(self, value: bool):
""" Notifies HEOS that we want event messages on the event channel.
:param value: True or False
:return: None
"""
await self._event_channel.system.register_for_change_events(value)
def close(self):
""" Close the connection to our HEOS device
:return: None
"""
logger.info(f'Closing connection to {self.server}:{self.port}')
if self._event_task:
self._event_task.cancel()
if self._event_processor:
self._event_processor.cancel()
self._connected = False
def subscribe(self, event_name: str, callback: Callable):
""" Subscribe a callback function to a specific event
:param event_name: Event name
:param callback: Callback function
:return: None
"""
# FIXME: Change event_name to an enum
if self._event_subscriptions.get(event_name) is None:
self._event_subscriptions[event_name] = []
self._event_subscriptions[event_name].append(callback)
async def refresh(self):
""" Refreshes internal information from the HEOS system.
:return: None
"""
await self.check_account()
await self.get_players()
await self.get_groups()
await self.get_sources()
async def reboot(self):
""" Instructs the system to reboot.
:return: None
"""
await self.api.system.reboot()
async def check_account(self) -> tuple:
""" Checks if the system is logged into HEOS and returns the status and account name, if available.
:return: tuple
"""
self._account_status, self._account_username = await self.api.system.check_account()
return self._account_status, self._account_username
async def sign_in(self, username: str, password: str):
""" Signs the system into the HEOS service.
:param username: Username
:param password: Password
:return: None
"""
await self.api.system.sign_in(username, password)
async def sign_out(self):
""" Signs out from the HEOS service.
:return: None
"""
await self.api.system.sign_out()
async def get_players(self):
""" Retrieves a mapping of IDs to Players present in the HEOS system.
:return: list
"""
self._players = [controllers.Player(self, player) for player in await self.api.player.get_players()]
return self._players
async def get_group(self, group_id):
""" Retrieve a specific group by ID.
:param group_id: Group ID
:return: PytheosGroup
"""
groups = await self.get_groups()
return groups.get(group_id)
async def get_groups(self):
""" Retrieves a mapping of IDs to Groups present in the HEOS system.
:return: dict
"""
self._groups = {}
for group in await self.api.group.get_groups():
self._groups[group.group_id] = controllers.Group(self, group)
return self._groups
async def get_sources(self):
""" Retrieves a mapping of IDs to Sources present in the HEOS system.
:return:
"""
self._sources = {}
for source in await self.api.browse.get_music_sources():
self._sources[source.source_id] = controllers.Source(self, source)
return self._sources
def is_receiving_events(self):
""" Retrieves whether or not we're receiving events.
:return: bool
"""
return self._receive_events
async def enable_event_reception(self, value):
""" Enables or disables event reception.
:param value: True or False
:return: None
"""
self._receive_events = value
await self._set_register_for_change_events(value)
async def _listen_for_events(self):
""" Async task that reads messages from the event channel and adds them to our event queue for
later processing.
:return: None
"""
while True:
results = await self._event_channel.read_message()
if results:
event = HEOSEvent(results)
logger.debug(f"Received event: {event!r}")
await self._event_queue.put(event)
await asyncio.sleep(0.5)
async def _process_events(self):
""" Async task that processes events that originate from the event channel.
:return: None
"""
while True:
event = await self._event_queue.get()
if event:
logger.debug(f'Processing event: {event!r}')
await self._event_handler(event)
await asyncio.sleep(0.5)
async def _event_handler(self, event: HEOSEvent):
""" Internal event handler
:param event: HEOS Event
:return: None
"""
loop = asyncio.get_running_loop()
for callback in self._event_subscriptions.get(event.command, []):
logger.debug(f'Calling registered callback {callback} for event {event!r}')
loop.create_task(callback(event))
def _init_internal_event_handlers(self):
""" Initialize the internal event handlers
:return: None
"""
# FIXME: Meh, do something better with this.
internal_handler_map = {
# 'event/sources_changed': self._handle_sources_changed,
# 'event/players_changed': self._handle_players_changed,
# 'event/groups_changed': self._handle_groups_changed,
# 'event/player_state_changed': self._handle_player_state_changed,
# 'event/player_now_playing_changed': self._handle_now_playing_changed,
# 'event/player_now_playing_progress': self._handle_now_playing_progress,
# 'event/player_playback_error': self._handle_playback_error,
# 'event/player_queue_changed': self._handle_queue_changed,
# 'event/player_volume_changed': self._handle_volume_changed,
# 'event/repeat_mode_changed': self._handle_repeat_mode_changed,
# 'event/shuffle_mode_changed': self._handle_shuffle_mode_changed,
# 'event/group_volume_changed': self._handle_group_volume_changed,
# 'event/user_changed': self._handle_user_changed,
}
for event, callback in internal_handler_map.items():
self.subscribe(event, callback)
def _handle_sources_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_players_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_groups_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_player_state_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_progress(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_playback_error(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_queue_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_repeat_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_shuffle_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_group_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_user_changed(self, event: HEOSEvent):
raise NotImplementedError()
async def connect(host: Union[SSDPResponse, str], port: int=Pytheos.DEFAULT_PORT) -> Pytheos:
""" Connect to the provided host and return a context manager for use with the connection.
:param host: Host to connect to
:param port: Port to connect to
:raises: ValueError
:return: The Pytheos instance
"""
if isinstance(host, SSDPResponse):
host = utils.extract_host(host.location)
conn = Pytheos(host, port)
return await conn.connect()
| 31.952507
| 108
| 0.650372
| 1,394
| 12,110
| 5.416069
| 0.176471
| 0.042914
| 0.033377
| 0.039603
| 0.249007
| 0.189007
| 0.143046
| 0.101722
| 0.062517
| 0.029139
| 0
| 0.000896
| 0.262675
| 12,110
| 378
| 109
| 32.037037
| 0.844663
| 0.12981
| 0
| 0.138889
| 0
| 0
| 0.030354
| 0.009636
| 0
| 0
| 0
| 0.007937
| 0
| 1
| 0.15
| false
| 0.011111
| 0.061111
| 0.027778
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d090b8cc45daaa469bff1113230ad77ae43f4b6
| 9,596
|
py
|
Python
|
tianshou/data/collector.py
|
DZ9/tianshou
|
04208e6cce722b7a2353d9a5f4d6f0fc05797d67
|
[
"MIT"
] | 1
|
2020-04-01T04:47:39.000Z
|
2020-04-01T04:47:39.000Z
|
tianshou/data/collector.py
|
TommeyChang/tianshou
|
4f843d3f51789f488169131a5b5decba8bab2b31
|
[
"MIT"
] | null | null | null |
tianshou/data/collector.py
|
TommeyChang/tianshou
|
4f843d3f51789f488169131a5b5decba8bab2b31
|
[
"MIT"
] | 1
|
2022-01-23T10:52:48.000Z
|
2022-01-23T10:52:48.000Z
|
import time
import torch
import warnings
import numpy as np
from tianshou.env import BaseVectorEnv
from tianshou.data import Batch, ReplayBuffer,\
ListReplayBuffer
from tianshou.utils import MovAvg
class Collector(object):
"""docstring for Collector"""
def __init__(self, policy, env, buffer=None, stat_size=100):
super().__init__()
self.env = env
self.env_num = 1
self.collect_step = 0
self.collect_episode = 0
self.collect_time = 0
if buffer is None:
self.buffer = ReplayBuffer(100)
else:
self.buffer = buffer
self.policy = policy
self.process_fn = policy.process_fn
self._multi_env = isinstance(env, BaseVectorEnv)
self._multi_buf = False # True if buf is a list
# need multiple cache buffers only if storing in one buffer
self._cached_buf = []
if self._multi_env:
self.env_num = len(env)
if isinstance(self.buffer, list):
assert len(self.buffer) == self.env_num, \
'The number of data buffer does not match the number of ' \
'input env.'
self._multi_buf = True
elif isinstance(self.buffer, ReplayBuffer):
self._cached_buf = [
ListReplayBuffer() for _ in range(self.env_num)]
else:
raise TypeError('The buffer in data collector is invalid!')
self.reset_env()
self.reset_buffer()
# state over batch is either a list, an np.ndarray, or a torch.Tensor
self.state = None
self.step_speed = MovAvg(stat_size)
self.episode_speed = MovAvg(stat_size)
def reset_buffer(self):
if self._multi_buf:
for b in self.buffer:
b.reset()
else:
self.buffer.reset()
def get_env_num(self):
return self.env_num
def reset_env(self):
self._obs = self.env.reset()
self._act = self._rew = self._done = self._info = None
if self._multi_env:
self.reward = np.zeros(self.env_num)
self.length = np.zeros(self.env_num)
else:
self.reward, self.length = 0, 0
for b in self._cached_buf:
b.reset()
def seed(self, seed=None):
if hasattr(self.env, 'seed'):
return self.env.seed(seed)
def render(self, **kwargs):
if hasattr(self.env, 'render'):
return self.env.render(**kwargs)
def close(self):
if hasattr(self.env, 'close'):
self.env.close()
def _make_batch(self, data):
if isinstance(data, np.ndarray):
return data[None]
else:
return np.array([data])
def collect(self, n_step=0, n_episode=0, render=0):
warning_count = 0
if not self._multi_env:
n_episode = np.sum(n_episode)
start_time = time.time()
assert sum([(n_step != 0), (n_episode != 0)]) == 1, \
"One and only one collection number specification permitted!"
cur_step = 0
cur_episode = np.zeros(self.env_num) if self._multi_env else 0
reward_sum = 0
length_sum = 0
while True:
if warning_count >= 100000:
warnings.warn(
'There are already many steps in an episode. '
'You should add a time limitation to your environment!',
Warning)
if self._multi_env:
batch_data = Batch(
obs=self._obs, act=self._act, rew=self._rew,
done=self._done, obs_next=None, info=self._info)
else:
batch_data = Batch(
obs=self._make_batch(self._obs),
act=self._make_batch(self._act),
rew=self._make_batch(self._rew),
done=self._make_batch(self._done),
obs_next=None,
info=self._make_batch(self._info))
result = self.policy(batch_data, self.state)
self.state = result.state if hasattr(result, 'state') else None
if isinstance(result.act, torch.Tensor):
self._act = result.act.detach().cpu().numpy()
elif not isinstance(self._act, np.ndarray):
self._act = np.array(result.act)
else:
self._act = result.act
obs_next, self._rew, self._done, self._info = self.env.step(
self._act if self._multi_env else self._act[0])
if render > 0:
self.env.render()
time.sleep(render)
self.length += 1
self.reward += self._rew
if self._multi_env:
for i in range(self.env_num):
data = {
'obs': self._obs[i], 'act': self._act[i],
'rew': self._rew[i], 'done': self._done[i],
'obs_next': obs_next[i], 'info': self._info[i]}
if self._cached_buf:
warning_count += 1
self._cached_buf[i].add(**data)
elif self._multi_buf:
warning_count += 1
self.buffer[i].add(**data)
cur_step += 1
else:
warning_count += 1
self.buffer.add(**data)
cur_step += 1
if self._done[i]:
if n_step != 0 or np.isscalar(n_episode) or \
cur_episode[i] < n_episode[i]:
cur_episode[i] += 1
reward_sum += self.reward[i]
length_sum += self.length[i]
if self._cached_buf:
cur_step += len(self._cached_buf[i])
self.buffer.update(self._cached_buf[i])
self.reward[i], self.length[i] = 0, 0
if self._cached_buf:
self._cached_buf[i].reset()
if isinstance(self.state, list):
self.state[i] = None
elif self.state is not None:
if isinstance(self.state[i], dict):
self.state[i] = {}
else:
self.state[i] = self.state[i] * 0
if isinstance(self.state, torch.Tensor):
# remove ref count in pytorch (?)
self.state = self.state.detach()
if sum(self._done):
obs_next = self.env.reset(np.where(self._done)[0])
if n_episode != 0:
if isinstance(n_episode, list) and \
(cur_episode >= np.array(n_episode)).all() or \
np.isscalar(n_episode) and \
cur_episode.sum() >= n_episode:
break
else:
self.buffer.add(
self._obs, self._act[0], self._rew,
self._done, obs_next, self._info)
cur_step += 1
if self._done:
cur_episode += 1
reward_sum += self.reward
length_sum += self.length
self.reward, self.length = 0, 0
self.state = None
obs_next = self.env.reset()
if n_episode != 0 and cur_episode >= n_episode:
break
if n_step != 0 and cur_step >= n_step:
break
self._obs = obs_next
self._obs = obs_next
if self._multi_env:
cur_episode = sum(cur_episode)
duration = time.time() - start_time
self.step_speed.add(cur_step / duration)
self.episode_speed.add(cur_episode / duration)
self.collect_step += cur_step
self.collect_episode += cur_episode
self.collect_time += duration
if isinstance(n_episode, list):
n_episode = np.sum(n_episode)
else:
n_episode = max(cur_episode, 1)
return {
'n/ep': cur_episode,
'n/st': cur_step,
'v/st': self.step_speed.get(),
'v/ep': self.episode_speed.get(),
'rew': reward_sum / n_episode,
'len': length_sum / n_episode,
}
def sample(self, batch_size):
if self._multi_buf:
if batch_size > 0:
lens = [len(b) for b in self.buffer]
total = sum(lens)
batch_index = np.random.choice(
total, batch_size, p=np.array(lens) / total)
else:
batch_index = np.array([])
batch_data = Batch()
for i, b in enumerate(self.buffer):
cur_batch = (batch_index == i).sum()
if batch_size and cur_batch or batch_size <= 0:
batch, indice = b.sample(cur_batch)
batch = self.process_fn(batch, b, indice)
batch_data.append(batch)
else:
batch_data, indice = self.buffer.sample(batch_size)
batch_data = self.process_fn(batch_data, self.buffer, indice)
return batch_data
| 40.150628
| 79
| 0.492393
| 1,100
| 9,596
| 4.080909
| 0.140909
| 0.032747
| 0.02896
| 0.021831
| 0.185342
| 0.056137
| 0.012029
| 0
| 0
| 0
| 0
| 0.009566
| 0.411734
| 9,596
| 238
| 80
| 40.319328
| 0.785651
| 0.021259
| 0
| 0.198198
| 0
| 0
| 0.034957
| 0
| 0
| 0
| 0
| 0
| 0.009009
| 1
| 0.045045
| false
| 0
| 0.031532
| 0.004505
| 0.112613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d0a9e073091c730b7a6a4929db2b2500c65ff5d
| 1,591
|
py
|
Python
|
drink_partners/partners/tests/views/test_search_partner_view.py
|
henriquebraga/drink-partners
|
4702263ae3e43ea9403cff5a72b68245d61880c7
|
[
"Apache-2.0"
] | null | null | null |
drink_partners/partners/tests/views/test_search_partner_view.py
|
henriquebraga/drink-partners
|
4702263ae3e43ea9403cff5a72b68245d61880c7
|
[
"Apache-2.0"
] | 22
|
2020-05-02T19:32:24.000Z
|
2021-10-17T21:19:46.000Z
|
drink_partners/partners/tests/views/test_search_partner_view.py
|
henriquebraga/drink-partners
|
4702263ae3e43ea9403cff5a72b68245d61880c7
|
[
"Apache-2.0"
] | null | null | null |
from drink_partners.contrib.samples import partner_bar_legal
class TestSearchPartner:
async def test_should_return_bad_request_for_str_coordinates(
self,
client,
partner_search_with_str_coordinates_url
):
async with client.get(partner_search_with_str_coordinates_url) as response: # noqa
assert response.status == 400
response_json = await response.json()
assert response_json['error_code'] == 'bad_request'
assert response_json['error_message'] == (
'Invalid coordinate longitude:a latitude:a'
)
async def test_should_return_nearest_partner_for_coordinate(
self,
client,
partner_search_coordinates_url,
save_partners
):
async with client.get(partner_search_coordinates_url) as response: # noqa
assert response.status == 200
response_json = await response.json()
assert response_json == partner_bar_legal()
async def test_should_return_not_found_when_no_partner_covers_coordinate(
self,
client,
partner_search_coordinates_url
):
async with client.get(partner_search_coordinates_url) as response: # noqa
assert response.status == 404
response_json = await response.json()
assert response_json['error_code'] == 'not_found'
assert response_json['error_message'] == (
'Partners not found covering area for '
'latitude:-43.36556 longitude:-22.99669'
)
| 31.82
| 90
| 0.649906
| 174
| 1,591
| 5.574713
| 0.333333
| 0.136082
| 0.092784
| 0.094845
| 0.683505
| 0.547423
| 0.519588
| 0.437113
| 0.278351
| 0.278351
| 0
| 0.020193
| 0.284098
| 1,591
| 49
| 91
| 32.469388
| 0.831431
| 0.0088
| 0
| 0.432432
| 0
| 0
| 0.115702
| 0
| 0
| 0
| 0
| 0
| 0.216216
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.054054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d0d63268d357d52fa0b7327baa9d61702e3b1cd
| 3,341
|
py
|
Python
|
mjecv/io/base.py
|
mje-nz/mjecv
|
9a02c005a0abc7d21594f65c348cfe5185c90184
|
[
"MIT"
] | null | null | null |
mjecv/io/base.py
|
mje-nz/mjecv
|
9a02c005a0abc7d21594f65c348cfe5185c90184
|
[
"MIT"
] | null | null | null |
mjecv/io/base.py
|
mje-nz/mjecv
|
9a02c005a0abc7d21594f65c348cfe5185c90184
|
[
"MIT"
] | null | null | null |
import multiprocessing
from typing import List, Optional
import numpy as np
from ..util import dill_for_apply
class ImageSequenceWriter:
def __init__(self, pattern, writer, *, max_index=None):
if type(pattern) is not str:
raise ValueError("Pattern must be string")
if pattern.format(1, index="1") == pattern.format(2, index="2"):
raise ValueError("Pattern must use {} or {index}")
self._pattern = pattern
self._writer = writer
self._max_index = max_index
self._index = 1
@property
def next_filename(self):
index = str(self._index)
if self._max_index:
index = "{:0{}d}".format(self._index, len(str(self._max_index)))
return self._pattern.format(self._index, index=index)
def _save(self, filename: str, image: np.ndarray):
self._writer(filename, image)
def save(self, image: np.ndarray):
self._save(self.next_filename, image)
self._index += 1
def finish(self):
pass
class MultiprocessingImageSequenceWriter(ImageSequenceWriter):
"""Image sequence writer that uses multiprocessing to save several images in
parallel.
This falls apart for large objects, as multiprocessing pickles them and pipes them
into the subprocesses.
"""
def __init__(self, *args, max_workers=None, max_waiting=None, **kwargs):
super().__init__(*args, **kwargs)
if max_workers is None:
max_workers = multiprocessing.cpu_count() - 1
ctx = multiprocessing.get_context("spawn")
self._pool = ctx.Pool(max_workers)
if max_waiting is not None:
# Semaphore's value is number of slots available for tasks to wait in
self._sem = ctx.Semaphore(
max_waiting
) # type: Optional[multiprocessing.synchronize.Semaphore]
else:
self._sem = None
self._results = [] # type: List[multiprocessing.pool.AsyncResult]
def __del__(self):
self.terminate()
def _save(self, filename: str, image: np.ndarray):
# Limit number of waiting tasks
if self._sem:
self._sem.acquire()
def callback(v):
assert self._sem is not None
self._sem.release()
else:
callback = None # type: ignore
args = (self._writer, (filename, image))
if dill_for_apply:
# Use dill instead of pickle, and make sure writer returns the filename
_writer = self._writer # Exclude self from capture to avoid dilling _pool
args = dill_for_apply(lambda f, i: _writer(f, i) or f, filename, image)
result = self._pool.apply_async(
*args, callback=callback, error_callback=callback,
)
self._results.append(result)
def terminate(self):
self._pool.terminate()
self._pool.join()
def finish(self, result_handler=None):
try:
# self._pool.close()
for result in self._results:
filename = result.get()
if result_handler is not None:
result_handler(filename)
self._pool.close()
except KeyboardInterrupt:
self._pool.terminate()
finally:
self._pool.join()
| 33.079208
| 86
| 0.609398
| 393
| 3,341
| 4.977099
| 0.328244
| 0.03272
| 0.018405
| 0.026585
| 0.03681
| 0.03681
| 0.03681
| 0.03681
| 0
| 0
| 0
| 0.003409
| 0.297516
| 3,341
| 100
| 87
| 33.41
| 0.829996
| 0.161329
| 0
| 0.109589
| 0
| 0
| 0.02381
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 1
| 0.150685
| false
| 0.013699
| 0.054795
| 0
| 0.246575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d0f2e44cd4703366dc6065304ee5f71411d41c4
| 1,495
|
py
|
Python
|
377_combination_sum_iv.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 2
|
2018-04-24T19:17:40.000Z
|
2018-04-24T19:33:52.000Z
|
377_combination_sum_iv.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | null | null | null |
377_combination_sum_iv.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 3
|
2020-06-17T05:48:52.000Z
|
2021-01-02T06:08:25.000Z
|
# 377 Combination Sum IV
# Given an integer array with all positive numbers and no duplicates,
# find the number of possible combinations that add up to a positive integer target.
#
# Example:
#
# nums = [1, 2, 3]
# target = 4
#
# The possible combination ways are:
# (1, 1, 1, 1)
# (1, 1, 2)
# (1, 2, 1)
# (1, 3)
# (2, 1, 1)
# (2, 2)
# (3, 1)
#
# Note that different sequences are counted as different combinations.
#
# Therefore the output is 7.
#
# Follow up:
# What if negative numbers are allowed in the given array?
# How does it change the problem?
# What limitation we need to add to the question to allow negative numbers?
class Solution:
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
res = [0] * (target + 1)
for i in range(1, len(res)):
for num in nums:
if num > i:
break
elif num == i:
res[i] += 1
else:
res[i] += res[i-num]
return res[target]
# https://www.hrwhisper.me/leetcode-combination-sum-iv/
# dp[i] += dp[i-num]
def combinationSum4(self, nums, target):
dp = [1] + [0] * target
for i in range(1, target+1):
for num in nums:
if i >= num:
dp[i] += dp[i-num]
return dp[target]
print(Solution().combinationSum4([1, 2, 3], 4))
| 24.508197
| 84
| 0.535786
| 208
| 1,495
| 3.850962
| 0.423077
| 0.017478
| 0.014981
| 0.014981
| 0.174782
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042596
| 0.340468
| 1,495
| 60
| 85
| 24.916667
| 0.769777
| 0.471572
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0
| 0
| 0.238095
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d102888dd921effe96e5fc388b2a1b8ea50b383
| 3,440
|
py
|
Python
|
nvidia-texture-tools/conanfile.py
|
koeleck/conan-packages
|
da43e82c2444e934e69a38e524998d028f8edcc3
|
[
"Unlicense"
] | null | null | null |
nvidia-texture-tools/conanfile.py
|
koeleck/conan-packages
|
da43e82c2444e934e69a38e524998d028f8edcc3
|
[
"Unlicense"
] | null | null | null |
nvidia-texture-tools/conanfile.py
|
koeleck/conan-packages
|
da43e82c2444e934e69a38e524998d028f8edcc3
|
[
"Unlicense"
] | null | null | null |
from conans import ConanFile, CMake, tools
import os
STATIC_LIBS = ["nvtt", "squish", "rg_etc1", "nvimage", "bc6h", "posh",
"bc7", "nvmath", "nvthread", "nvcore"]
SHARED_LIBS = ["nvtt", "nvimage", "nvthread", "nvmath", "nvcore"]
class NvidiatexturetoolsConan(ConanFile):
name = "nvidia-texture-tools"
version = "662d223626185f7c6c7e0d822a4796a691acc05a"
license = "MIT"
author = "koeleck"
url = "<Package recipe repository url here, for issues about the package>"
description = "The NVIDIA Texture Tools is a collection of image processing and texture manipulation tools, designed to be integrated in game tools and asset processing pipelines."
settings = "os", "compiler", "build_type", "arch"
source_subfolder = "nvtt"
no_copy_source = True
options = {"shared": [True, False],
"fPIC": [True, False],
"use_OpenMP": [True, False]
}
default_options = "shared=False", "fPIC=True", "use_OpenMP=True"
generators = "cmake"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
url = "https://github.com/castano/nvidia-texture-tools/archive/{}.zip".format(self.version)
tools.get(url)
os.rename('nvidia-texture-tools-{}'.format(self.version), self.source_subfolder)
tools.replace_in_file(os.path.join(self.source_subfolder, "CMakeLists.txt"), "PROJECT(NV)",
'''PROJECT(NV)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.definitions["HAVE_CUDA"] = False
cmake.definitions["HAVE_OPENMP"] = self.options.use_OpenMP
cmake.configure(source_folder=self.source_subfolder)
cmake.build()
def package(self):
self.copy("license*", src=self.source_subfolder, ignore_case=True, keep_path=False)
self.copy("nvtt.h", dst="include/nvtt", src=os.path.join(self.source_subfolder, "src", "nvtt"), keep_path=False)
self.copy("nvtt_wrapper.h", dst="include/nvtt", src=os.path.join(self.source_subfolder, "src", "nvtt"), keep_path=False)
if self.options.shared:
for libname in SHARED_LIBS:
self.copy("*{}*.dll".format(libname), dst="bin", src=os.path.join(self.build_folder, "bin"), keep_path=False)
self.copy("*{}*.lib".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
self.copy("*{}*.so*".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
else:
for libname in STATIC_LIBS:
self.copy("*{}*.a".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
self.copy("*{}*.lib".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
def package_info(self):
all_libs = tools.collect_libs(self)
if self.options.shared:
libs = all_libs
else:
libs = []
for libname in STATIC_LIBS:
libs += [lib for lib in all_libs if libname in lib]
self.cpp_info.libs = libs
if self.settings.os == "Linux":
self.cpp_info.libs.extend(["dl", "pthread"])
if self.options.shared:
self.cpp_info.defines = ["NVTT_SHARED=1"]
| 47.123288
| 184
| 0.627326
| 432
| 3,440
| 4.87037
| 0.30787
| 0.022814
| 0.038023
| 0.053232
| 0.269487
| 0.248574
| 0.217205
| 0.203897
| 0.203897
| 0.203897
| 0
| 0.011936
| 0.22064
| 3,440
| 72
| 185
| 47.777778
| 0.772846
| 0
| 0
| 0.145161
| 0
| 0.016129
| 0.23054
| 0.018789
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.032258
| 0
| 0.322581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d105ccb37935c70d4da4645c5743044452805b9
| 2,890
|
py
|
Python
|
train_args.py
|
MyWay/Create-Your-Own-Image-Classifier
|
70e5744084435af8a74b2cfe2098c25b0745c9af
|
[
"MIT"
] | null | null | null |
train_args.py
|
MyWay/Create-Your-Own-Image-Classifier
|
70e5744084435af8a74b2cfe2098c25b0745c9af
|
[
"MIT"
] | null | null | null |
train_args.py
|
MyWay/Create-Your-Own-Image-Classifier
|
70e5744084435af8a74b2cfe2098c25b0745c9af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" train_args.py
train_args.py command-line args.
"""
import argparse
def get_args():
"""
"""
parser = argparse.ArgumentParser(
description="This script lets you train and save your model.",
usage="python3 train.py flowers/train --gpu --learning_rate 0.001 --epochs 11 --gpu --hidden_units 500",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('data_directory', action="store")
parser.add_argument('--arch',
action="store",
default="alexnet",
dest='arch',
type=str,
help='Directory to save the model file.',
)
parser.add_argument('--save_dir',
action="store",
default=".",
dest='save_dir',
type=str,
help='Directory to save the model file.',
)
parser.add_argument('--save_name',
action="store",
default="checkpoint",
dest='save_name',
type=str,
help='Checkpoint filename.',
)
parser.add_argument('--categories_json',
action="store",
default="cat_to_name.json",
dest='categories_json',
type=str,
help='Path to file containing the categories.',
)
parser.add_argument('--gpu',
action="store_true",
dest="use_gpu",
default=False,
help='Use the GPU to train instead of the CPU')
hp = parser.add_argument_group('hyperparameters')
hp.add_argument('--learning_rate',
action="store",
default=0.001,
type=float,
help='Learning rate')
hp.add_argument('--hidden_units', '-hu',
action="store",
dest="hidden_units",
default=[4096],
type=int,
nargs='+',
help='Hidden layer units')
hp.add_argument('--epochs',
action="store",
dest="epochs",
default=1,
type=int,
help='Epochs to train the model for')
parser.parse_args()
return parser
def main():
"""
Main Function
"""
print(f'Command line argument utility for train.py.\nTry "python train.py -h".')
if __name__ == '__main__':
main()
"""
main() is called if script is executed on it's own.
"""
| 30.104167
| 112
| 0.450173
| 259
| 2,890
| 4.868726
| 0.382239
| 0.087232
| 0.09437
| 0.031721
| 0.093577
| 0.093577
| 0.093577
| 0.093577
| 0.093577
| 0.093577
| 0
| 0.012369
| 0.440484
| 2,890
| 96
| 113
| 30.104167
| 0.767471
| 0.028374
| 0
| 0.227273
| 0
| 0.015152
| 0.261158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.015152
| 0
| 0.060606
| 0.015152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d12933f8b3900ae610ac625eadbf5cf407b20ba
| 1,483
|
py
|
Python
|
apps/payment/views.py
|
canadiyaman/thetask
|
0f1cea1d8eea4966138ef0bdc303a53e3511e57d
|
[
"RSA-MD"
] | null | null | null |
apps/payment/views.py
|
canadiyaman/thetask
|
0f1cea1d8eea4966138ef0bdc303a53e3511e57d
|
[
"RSA-MD"
] | null | null | null |
apps/payment/views.py
|
canadiyaman/thetask
|
0f1cea1d8eea4966138ef0bdc303a53e3511e57d
|
[
"RSA-MD"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.conf import settings
from django.views.generic import TemplateView
from apps.payment.models import PaymentLog
from apps.payment.stripe import get_token, get_payment_charge
from apps.subscription.views import start_subscription
class ChargeView(TemplateView):
template_name = 'payment/charge.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['stripe_public_key'] = settings.STRIPE_PUBLISHABLE_KEY
context['amount'] = 100
context['currency'] = 'tl'
return context
def post(self, request):
name = request.POST.get('name')
card_number = request.POST.get('cardnumber')
exp_month = int(request.POST.get('exp-date').split('/')[0])
exp_year = int(request.POST.get('exp-date').split('/')[1])
cvc = request.POST.get('cvc')
card = {
"name": name,
"number": card_number,
"exp_month": exp_month,
"exp_year": exp_year,
"cvc": cvc
}
token = get_token(card)
charge = get_payment_charge(amount=100, currency="usd", description="test", token=token.stripe_id)
if charge.paid:
log_payment(user=request.user, data=charge)
start_subscription(request.user)
return HttpResponseRedirect('/')
def log_payment(user, data):
PaymentLog.objects.create(user=user, data=data)
| 34.488372
| 106
| 0.653405
| 177
| 1,483
| 5.316384
| 0.355932
| 0.058448
| 0.074389
| 0.036132
| 0.061637
| 0.061637
| 0.061637
| 0
| 0
| 0
| 0
| 0.006969
| 0.225893
| 1,483
| 42
| 107
| 35.309524
| 0.812718
| 0
| 0
| 0
| 0
| 0
| 0.084289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.171429
| 0
| 0.371429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d12e645166a3997ff332b7bb734f77bb3785c93
| 1,407
|
py
|
Python
|
secure_data_store/cli.py
|
HumanBrainProject/secure-data-store
|
69b615cf979fc08f4ae8474ca9cd3e6d2f04b7f0
|
[
"MIT"
] | 1
|
2021-11-23T12:26:01.000Z
|
2021-11-23T12:26:01.000Z
|
secure_data_store/cli.py
|
HumanBrainProject/secure-data-store
|
69b615cf979fc08f4ae8474ca9cd3e6d2f04b7f0
|
[
"MIT"
] | null | null | null |
secure_data_store/cli.py
|
HumanBrainProject/secure-data-store
|
69b615cf979fc08f4ae8474ca9cd3e6d2f04b7f0
|
[
"MIT"
] | 1
|
2020-05-21T15:51:44.000Z
|
2020-05-21T15:51:44.000Z
|
# -*- coding: utf-8 -*-
"""Console script for secure_data_store."""
import click
from . import secure_data_store as sds
CONFIG='~/.sdsrc'
@click.group()
def main():
"""Wrapper for GoCryptFS"""
@main.command()
@click.argument('name')
@click.option('--config', help='Path to config file', default='~/.sdsrc')
def create(name, config=None):
"""Create a new secure data container NAME."""
try:
config = sds.read_config(config)
sds.create(config, name)
except (sds.ContainerError, sds.GCFSError, FileExistsError, sds.ConfigError) as err:
print(err)
@main.command()
@click.argument('name')
@click.option('--config', help='Path to config file', default='~/.sdsrc')
def open(name, config=None):
"""Open an existing secure data container NAME.
Will print path to the opened, clear-text container."""
try:
config = sds.read_config(config)
sds.mount(config, name)
except (sds.ContainerError, sds.GCFSError, sds.ConfigError, sds.MountError) as err:
print(err)
@main.command()
@click.argument('name')
@click.option('--config', help='Path to config file', default='~/.sdsrc')
def close(name, config=None):
"""Close an opend data container NAME."""
try:
config = sds.read_config(config)
sds.unmount(config, name)
except (sds.ContainerError, sds.GCFSError, sds.ConfigError) as err:
print(err)
main()
| 29.3125
| 88
| 0.663113
| 183
| 1,407
| 5.060109
| 0.311475
| 0.058315
| 0.051836
| 0.077754
| 0.633909
| 0.633909
| 0.633909
| 0.518359
| 0.518359
| 0.390929
| 0
| 0.000864
| 0.177683
| 1,407
| 47
| 89
| 29.93617
| 0.799481
| 0.183369
| 0
| 0.545455
| 0
| 0
| 0.111707
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.060606
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d13dcd7f99f525058d1ada523c294f362a0d8b9
| 1,052
|
py
|
Python
|
Support/Fuego/Pythia/pythia-0.4/packages/pyre/pyre/graph/Node.py
|
marient/PelePhysics
|
e6ad1839d77b194e09ab44ff850c9489652e5d81
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-04-24T13:32:23.000Z
|
2019-04-24T13:32:23.000Z
|
Support/Fuego/Pythia/pythia-0.4/packages/pyre/pyre/graph/Node.py
|
marient/PelePhysics
|
e6ad1839d77b194e09ab44ff850c9489652e5d81
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
Support/Fuego/Pythia/pythia-0.4/packages/pyre/pyre/graph/Node.py
|
marient/PelePhysics
|
e6ad1839d77b194e09ab44ff850c9489652e5d81
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from Drawable import Drawable
def nodeAttributes():
"""return a list of valid attributes for Node"""
return Node._validAttributes.keys()
class Node(Drawable):
def id(self): return self._id
def __init__(self, id):
Drawable.__init__(self)
self._id = id
return
_validAttributes = {
"color" : None,
"fontcolor" : None,
"fontname" : None,
"fontsize" : None,
"height" : None,
"label" : None,
"layer" : None,
"shape" : None,
"shapefile" : None,
"style" : None,
"width" : None
}
# version
__id__ = "$Id$"
#
# End of file
| 19.849057
| 82
| 0.429658
| 86
| 1,052
| 5.069767
| 0.593023
| 0.041284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011142
| 0.31749
| 1,052
| 52
| 83
| 20.230769
| 0.5961
| 0.403992
| 0
| 0
| 0
| 0
| 0.121911
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.043478
| 0.043478
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d13e82b9800d2ed9d73368f30bea490d35c562b
| 3,522
|
py
|
Python
|
cairis/gui/RiskScatterPanel.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
cairis/gui/RiskScatterPanel.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
cairis/gui/RiskScatterPanel.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pprint
import random
import wx
from cairis.core.armid import *
from cairis.core.Borg import Borg
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
def riskColourCode(riskScore):
if (riskScore <= 1):
return '#fef2ec'
elif (riskScore == 2):
return '#fcd9c8'
elif (riskScore == 3):
return '#f7ac91'
elif (riskScore == 4):
return '#f67e61'
elif (riskScore == 5):
return '#f2543d'
elif (riskScore == 6):
return '#e42626'
elif (riskScore == 7):
return '#b9051a'
elif (riskScore == 8):
return '#900014'
else:
return '#52000D'
class RiskScatterPanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,RISKSCATTER_ID)
b = Borg()
self.dbProxy = b.dbProxy
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigCanvas(self, -1, self.fig)
self.axes = self.fig.add_subplot(111,xlabel='Severity',ylabel='Likelihood',autoscale_on=False)
self.axes.set_xticklabels(['Marginal','Critical','Catastrophic'])
self.axes.set_yticks([0,1,2,3,4,5])
self.toolbar = NavigationToolbar(self.canvas)
envs = self.dbProxy.getDimensionNames('environment')
self.envCombo = wx.ComboBox(self,RISKSCATTER_COMBOENVIRONMENT_ID,envs[0],choices=envs,size=(300,-1),style=wx.CB_DROPDOWN)
self.envCombo.Bind(wx.EVT_COMBOBOX,self.onEnvironmentChange)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.Add(self.envCombo,0, wx.EXPAND)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.vbox)
self.vbox.Fit(self)
self.drawScatter(envs[0])
def drawScatter(self,envName):
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlabel('Severity')
self.axes.set_ylabel('Likelihood')
self.axes.set_xbound(0,4)
self.axes.set_ybound(0,5)
xs,ys,cs = self.dbProxy.riskScatter(envName)
ccs = []
for c in cs:
ccs.append(riskColourCode(c))
if ((len(xs) > 0) and (len(ys) > 0)):
self.axes.scatter(xs,ys,c=ccs,marker='d')
self.canvas.draw()
def onEnvironmentChange(self,evt):
envName = self.envCombo.GetStringSelection()
self.drawScatter(envName)
def on_save_plot(self, event):
fileChoices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(self,message="Save risk scatter",defaultDir=os.getcwd(),defaultFile="scatter.png",wildcard=fileChoices,style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
| 33.865385
| 141
| 0.70017
| 488
| 3,522
| 5
| 0.422131
| 0.032787
| 0.027049
| 0.018443
| 0.019672
| 0.019672
| 0.019672
| 0
| 0
| 0
| 0
| 0.027138
| 0.173481
| 3,522
| 103
| 142
| 34.194175
| 0.811061
| 0.216638
| 0
| 0
| 0
| 0
| 0.068953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065789
| false
| 0
| 0.118421
| 0
| 0.315789
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d14d6320ca92dcf32c70f780204293a845032e6
| 21,539
|
py
|
Python
|
contrib/memcache_whisper.py
|
TimWhalen/graphite-web
|
e150af45e01d01141a8767ec0597e218105b9914
|
[
"Apache-2.0"
] | 1
|
2020-01-23T16:25:50.000Z
|
2020-01-23T16:25:50.000Z
|
contrib/memcache_whisper.py
|
TimWhalen/graphite-web
|
e150af45e01d01141a8767ec0597e218105b9914
|
[
"Apache-2.0"
] | 2
|
2016-07-28T20:55:46.000Z
|
2016-08-02T13:59:28.000Z
|
contrib/memcache_whisper.py
|
TimWhalen/graphite-web
|
e150af45e01d01141a8767ec0597e218105b9914
|
[
"Apache-2.0"
] | 1
|
2020-03-05T06:50:02.000Z
|
2020-03-05T06:50:02.000Z
|
#!/usr/bin/env python
# Copyright 2008 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is an implementation of the Whisper database API
# Here is the basic layout of a whisper data file
#
# File = Header,Data
# Header = Metadata,ArchiveInfo+
# Metadata = lastUpdate,maxRetention,xFilesFactor,archiveCount
# ArchiveInfo = Offset,SecondsPerPoint,Points
# Data = Archive+
# Archive = Point+
# Point = timestamp,value
"""
NOTE: This is a modified version of whisper.py
For details on the modification, read https://bugs.launchpad.net/graphite/+bug/245835
"""
import os, struct, time
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK = False
CACHE_HEADERS = False
__headerCache = {}
longFormat = "!L"
longSize = struct.calcsize(longFormat)
floatFormat = "!f"
floatSize = struct.calcsize(floatFormat)
timestampFormat = "!L"
timestampSize = struct.calcsize(timestampFormat)
valueFormat = "!d"
valueSize = struct.calcsize(valueFormat)
pointFormat = "!Ld"
pointSize = struct.calcsize(pointFormat)
metadataFormat = "!2LfL"
metadataSize = struct.calcsize(metadataFormat)
archiveInfoFormat = "!3L"
archiveInfoSize = struct.calcsize(archiveInfoFormat)
debug = startBlock = endBlock = lambda *a,**k: None
def exists(path):
return os.path.exists(path)
def drop(path):
os.remove(path)
def enableMemcache(servers = ['127.0.0.1:11211'], min_compress_len = 0):
from StringIO import StringIO
import memcache
global open, exists, drop
MC = memcache.Client(servers)
class open(StringIO):
def __init__(self,*args,**kwargs):
self.name = args[0]
self.mode = args[1]
if self.mode == "r+b" or self.mode == "rb":
StringIO.__init__(self, MC.get(self.name))
else:
StringIO.__init__(self)
def close(self):
if self.mode == "r+b" or self.mode == "wb":
MC.set(self.name, self.getvalue(), min_compress_len = min_compress_len)
StringIO.close(self)
def exists(path):
return MC.get(path) != None
def drop(path):
MC.delete(path)
def enableDebug():
global open, debug, startBlock, endBlock
class open(file):
def __init__(self,*args,**kwargs):
file.__init__(self,*args,**kwargs)
self.writeCount = 0
self.readCount = 0
def write(self,data):
self.writeCount += 1
debug('WRITE %d bytes #%d' % (len(data),self.writeCount))
return file.write(self,data)
def read(self,bytes):
self.readCount += 1
debug('READ %d bytes #%d' % (bytes,self.readCount))
return file.read(self,bytes)
def debug(message):
print('DEBUG :: %s' % message)
__timingBlocks = {}
def startBlock(name):
__timingBlocks[name] = time.time()
def endBlock(name):
debug("%s took %.5f seconds" % (name,time.time() - __timingBlocks.pop(name)))
def __readHeader(fh):
info = __headerCache.get(fh.name)
if info: return info
#startBlock('__readHeader')
originalOffset = fh.tell()
fh.seek(0)
packedMetadata = fh.read(metadataSize)
(lastUpdate,maxRetention,xff,archiveCount) = struct.unpack(metadataFormat,packedMetadata)
archives = []
for i in xrange(archiveCount):
packedArchiveInfo = fh.read(archiveInfoSize)
(offset,secondsPerPoint,points) = struct.unpack(archiveInfoFormat,packedArchiveInfo)
archiveInfo = {
'offset' : offset,
'secondsPerPoint' : secondsPerPoint,
'points' : points,
'retention' : secondsPerPoint * points,
'size' : points * pointSize,
}
archives.append(archiveInfo)
fh.seek(originalOffset)
info = {
'lastUpdate' : lastUpdate,
'maxRetention' : maxRetention,
'xFilesFactor' : xff,
'archives' : archives,
}
if CACHE_HEADERS:
__headerCache[fh.name] = info
#endBlock('__readHeader')
return info
def __changeLastUpdate(fh):
return #XXX Make this a NOP, use os.stat(filename).st_mtime instead
startBlock('__changeLastUpdate()')
originalOffset = fh.tell()
fh.seek(0) #Based on assumption that first field is lastUpdate
now = int( time.time() )
packedTime = struct.pack(timestampFormat,now)
fh.write(packedTime)
fh.seek(originalOffset)
endBlock('__changeLastUpdate()')
def create(path,archiveList,xFilesFactor=0.5):
"""create(path,archiveList,xFilesFactor=0.5)
path is a string
archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints)
xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur
"""
#Validate archive configurations...
assert archiveList, "You must specify at least one archive configuration!"
archiveList.sort(key=lambda a: a[0]) #sort by precision (secondsPerPoint)
for i,archive in enumerate(archiveList):
if i == len(archiveList) - 1: break
next = archiveList[i+1]
assert archive[0] < next[0],\
"You cannot configure two archives with the same precision %s,%s" % (archive,next)
assert (next[0] % archive[0]) == 0,\
"Higher precision archives' precision must evenly divide all lower precision archives' precision %s,%s" % (archive[0],next[0])
retention = archive[0] * archive[1]
nextRetention = next[0] * next[1]
assert nextRetention > retention,\
"Lower precision archives must cover larger time intervals than higher precision archives %s,%s" % (archive,next)
#Looks good, now we create the file and write the header
assert not exists(path), "File %s already exists!" % path
fh = open(path,'wb')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
lastUpdate = struct.pack( timestampFormat, int(time.time()) )
oldest = sorted([secondsPerPoint * points for secondsPerPoint,points in archiveList])[-1]
maxRetention = struct.pack( longFormat, oldest )
xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) )
archiveCount = struct.pack(longFormat, len(archiveList))
packedMetadata = lastUpdate + maxRetention + xFilesFactor + archiveCount
fh.write(packedMetadata)
headerSize = metadataSize + (archiveInfoSize * len(archiveList))
archiveOffsetPointer = headerSize
for secondsPerPoint,points in archiveList:
archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points)
fh.write(archiveInfo)
archiveOffsetPointer += (points * pointSize)
zeroes = '\x00' * (archiveOffsetPointer - headerSize)
fh.write(zeroes)
fh.close()
def __propagate(fh,timestamp,xff,higher,lower):
lowerIntervalStart = timestamp - (timestamp % lower['secondsPerPoint'])
lowerIntervalEnd = lowerIntervalStart + lower['secondsPerPoint']
fh.seek(higher['offset'])
packedPoint = fh.read(pointSize)
(higherBaseInterval,higherBaseValue) = struct.unpack(pointFormat,packedPoint)
if higherBaseInterval == 0:
higherFirstOffset = higher['offset']
else:
timeDistance = lowerIntervalStart - higherBaseInterval
pointDistance = timeDistance / higher['secondsPerPoint']
byteDistance = pointDistance * pointSize
higherFirstOffset = higher['offset'] + (byteDistance % higher['size'])
higherPoints = lower['secondsPerPoint'] / higher['secondsPerPoint']
higherSize = higherPoints * pointSize
higherLastOffset = higherFirstOffset + (higherSize % higher['size'])
fh.seek(higherFirstOffset)
if higherFirstOffset < higherLastOffset: #we don't wrap the archive
seriesString = fh.read(higherLastOffset - higherFirstOffset)
else: #We do wrap the archive
higherEnd = higher['offset'] + higher['size']
seriesString = fh.read(higherEnd - higherFirstOffset)
fh.seek(higher['offset'])
seriesString += fh.read(higherLastOffset - higher['offset'])
#Now we unpack the series data we just read
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values
neighborValues = [None] * points
currentInterval = lowerIntervalStart
step = higher['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
neighborValues[i/2] = unpackedSeries[i+1]
currentInterval += step
#Propagate aggregateValue to propagate from neighborValues if we have enough known points
knownValues = [v for v in neighborValues if v is not None]
knownPercent = float(len(knownValues)) / float(len(neighborValues))
if knownPercent >= xff: #we have enough data to propagate a value!
aggregateValue = float(sum(knownValues)) / float(len(knownValues)) #TODO another CF besides average?
myPackedPoint = struct.pack(pointFormat,lowerIntervalStart,aggregateValue)
fh.seek(lower['offset'])
packedPoint = fh.read(pointSize)
(lowerBaseInterval,lowerBaseValue) = struct.unpack(pointFormat,packedPoint)
if lowerBaseInterval == 0: #First propagated update to this lower archive
fh.seek(lower['offset'])
fh.write(myPackedPoint)
else: #Not our first propagated update to this lower archive
timeDistance = lowerIntervalStart - lowerBaseInterval
pointDistance = timeDistance / lower['secondsPerPoint']
byteDistance = pointDistance * pointSize
lowerOffset = lower['offset'] + (byteDistance % lower['size'])
fh.seek(lowerOffset)
fh.write(myPackedPoint)
return True
else:
return False
def update(path,value,timestamp=None):
"""update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
#startBlock('complete update')
value = float(value)
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
if timestamp is None: timestamp = now
timestamp = int(timestamp)
diff = now - timestamp
assert diff < header['maxRetention'] and diff >= 0, "Timestamp not covered by any archives in this database"
for i,archive in enumerate(header['archives']): #Find the highest-precision archive that covers timestamp
if archive['retention'] < diff: continue
lowerArchives = header['archives'][i+1:] #We'll pass on the update to these lower precision archives later
break
#First we update the highest-precision archive
myInterval = timestamp - (timestamp % archive['secondsPerPoint'])
myPackedPoint = struct.pack(pointFormat,myInterval,value)
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0: #This file's first update
fh.seek(archive['offset'])
fh.write(myPackedPoint)
baseInterval,baseValue = myInterval,value
else: #Not our first update
timeDistance = myInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
fh.write(myPackedPoint)
#Now we propagate the update to lower-precision archives
#startBlock('update propagation')
higher = archive
for lower in lowerArchives:
if not __propagate(fh,myInterval,header['xFilesFactor'],higher,lower): break
higher = lower
#endBlock('update propagation')
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update')
def update_many(path,points):
"""update_many(path,points)
path is a string
points is a list of (timestamp,value) points
"""
#startBlock('complete update_many path=%s points=%d' % (path,len(points)))
if not points: return
points = [ (int(t),float(v)) for (t,v) in points]
points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
archives = iter( header['archives'] )
currentArchive = next(archives)
#debug(' update_many currentArchive=%s' % str(currentArchive))
currentPoints = []
for point in points:
age = now - point[0]
#debug(' update_many iterating points, point=%s age=%d' % (str(point),age))
while currentArchive['retention'] < age: #we can't fit any more points in this archive
#debug(' update_many this point is too old to fit here, currentPoints=%d' % len(currentPoints))
if currentPoints: #commit all the points we've found that it can fit
currentPoints.reverse() #put points in chronological order
__archive_update_many(fh,header,currentArchive,currentPoints)
currentPoints = []
try:
currentArchive = next(archives)
#debug(' update_many using next archive %s' % str(currentArchive))
except StopIteration:
#debug(' update_many no more archives!')
currentArchive = None
break
if not currentArchive: break #drop remaining points that don't fit in the database
#debug(' update_many adding point=%s' % str(point))
currentPoints.append(point)
#debug(' update_many done iterating points')
if currentArchive and currentPoints: #don't forget to commit after we've checked all the archives
currentPoints.reverse()
__archive_update_many(fh,header,currentArchive,currentPoints)
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update_many path=%s points=%d' % (path,len(points)))
def __archive_update_many(fh,header,archive,points):
step = archive['secondsPerPoint']
#startBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
alignedPoints = [ (timestamp - (timestamp % step), value)
for (timestamp,value) in points ]
#Create a packed string for each contiguous sequence of points
#startBlock('__archive_update_many string packing')
packedStrings = []
previousInterval = None
currentString = ""
for (interval,value) in alignedPoints:
#debug('__archive_update_many iterating alignedPoint at %s' % interval)
if (not previousInterval) or (interval == previousInterval + step):
#debug('__archive_update_many was expected, packing onto currentString')
currentString += struct.pack(pointFormat,interval,value)
previousInterval = interval
else:
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many was NOT expected, appending to packedStrings startInterval=%s currentString=%d bytes' % (startInterval,len(currentString)))
packedStrings.append( (startInterval,currentString) )
currentString = struct.pack(pointFormat,interval,value)
previousInterval = interval
if currentString:
#startInterval = previousInterval - (step * len(currentString) / pointSize) + step
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many done iterating alignedPoints, remainder currentString of %d bytes, startInterval=%s' % (len(currentString),startInterval))
packedStrings.append( (startInterval,currentString) )
#endBlock('__archive_update_many string packing')
#Read base point and determine where our writes will start
fh.seek(archive['offset'])
packedBasePoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedBasePoint)
if baseInterval == 0: #This file's first update
#debug('__archive_update_many first update')
baseInterval = packedStrings[0][0] #use our first string as the base, so we start at the start
#debug('__archive_update_many baseInterval is %s' % baseInterval)
#Write all of our packed strings in locations determined by the baseInterval
#startBlock('__archive_update_many write() operations')
for (interval,packedString) in packedStrings:
timeDistance = interval - baseInterval
pointDistance = timeDistance / step
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
archiveEnd = archive['offset'] + archive['size']
bytesBeyond = (myOffset + len(packedString)) - archiveEnd
#debug(' __archive_update_many myOffset=%d packedString=%d archiveEnd=%d bytesBeyond=%d' % (myOffset,len(packedString),archiveEnd,bytesBeyond))
if bytesBeyond > 0:
fh.write( packedString[:-bytesBeyond] )
#debug('We wrapped an archive!')
assert fh.tell() == archiveEnd, "archiveEnd=%d fh.tell=%d bytesBeyond=%d len(packedString)=%d" % (archiveEnd,fh.tell(),bytesBeyond,len(packedString))
fh.seek( archive['offset'] )
fh.write( packedString[-bytesBeyond:] ) #safe because it can't exceed the archive (retention checking logic above)
else:
fh.write(packedString)
#endBlock('__archive_update_many write() operations')
#Now we propagate the updates to lower-precision archives
#startBlock('__archive_update_many propagation')
higher = archive
lowerArchives = [arc for arc in header['archives'] if arc['secondsPerPoint'] > archive['secondsPerPoint']]
#debug('__archive_update_many I have %d lower archives' % len(lowerArchives))
for lower in lowerArchives:
fit = lambda i: i - (i % lower['secondsPerPoint'])
lowerIntervals = [fit(p[0]) for p in alignedPoints]
uniqueLowerIntervals = set(lowerIntervals)
#debug(' __archive_update_many points=%d unique=%d' % (len(alignedPoints),len(uniqueLowerIntervals)))
propagateFurther = False
for interval in uniqueLowerIntervals:
#debug(' __archive_update_many propagating from %d to %d, interval=%d' % (higher['secondsPerPoint'],lower['secondsPerPoint'],interval))
if __propagate(fh,interval,header['xFilesFactor'],higher,lower):
propagateFurther = True
#debug(' __archive_update_many Successful propagation!')
#debug(' __archive_update_many propagateFurther=%s' % propagateFurther)
if not propagateFurther: break
higher = lower
#endBlock('__archive_update_many propagation')
#endBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
def info(path):
"""info(path)
path is a string
"""
fh = open(path,'rb')
info = __readHeader(fh)
fh.close()
return info
def fetch(path,fromTime,untilTime=None):
"""fetch(path,fromTime,untilTime=None)
path is a string
fromTime is an epoch time
untilTime is also an epoch time, but defaults to now
"""
fh = open(path,'rb')
header = __readHeader(fh)
now = int( time.time() )
if untilTime is None or untilTime > now:
untilTime = now
if fromTime < (now - header['maxRetention']):
fromTime = now - header['maxRetention']
assert fromTime < untilTime, "Invalid time interval"
diff = now - fromTime
for archive in header['archives']:
if archive['retention'] >= diff: break
fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) )
untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) )
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0:
step = archive['secondsPerPoint']
points = (untilInterval - fromInterval) / step
timeInfo = (fromInterval,untilInterval,step)
valueList = [None] * points
return (timeInfo,valueList)
#Determine fromOffset
timeDistance = fromInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
fromOffset = archive['offset'] + (byteDistance % archive['size'])
#Determine untilOffset
timeDistance = untilInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
untilOffset = archive['offset'] + (byteDistance % archive['size'])
#Read all the points in the interval
fh.seek(fromOffset)
if fromOffset < untilOffset: #If we don't wrap around the archive
seriesString = fh.read(untilOffset - fromOffset)
else: #We do wrap around the archive, so we need two reads
archiveEnd = archive['offset'] + archive['size']
seriesString = fh.read(archiveEnd - fromOffset)
fh.seek(archive['offset'])
seriesString += fh.read(untilOffset - archive['offset'])
#Now we unpack the series data we just read (anything faster than unpack?)
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values (optimize this!)
valueList = [None] * points #pre-allocate entire list for speed
currentInterval = fromInterval
step = archive['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
pointValue = unpackedSeries[i+1]
valueList[i/2] = pointValue #in-place reassignment is faster than append()
currentInterval += step
fh.close()
timeInfo = (fromInterval,untilInterval,step)
return (timeInfo,valueList)
| 40.563089
| 160
| 0.720553
| 2,509
| 21,539
| 6.114388
| 0.182144
| 0.022163
| 0.025487
| 0.017209
| 0.242162
| 0.183169
| 0.166352
| 0.149273
| 0.120657
| 0.107425
| 0
| 0.004745
| 0.168299
| 21,539
| 530
| 161
| 40.639623
| 0.851624
| 0.282697
| 0
| 0.350923
| 0
| 0
| 0.084923
| 0
| 0
| 0
| 0
| 0.001887
| 0.021108
| 1
| 0.060686
| false
| 0
| 0.013193
| 0.005277
| 0.108179
| 0.002639
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d15ebcd4b1cb7692dfb4253406f6c027f0525d0
| 17,824
|
py
|
Python
|
venv/lib/python3.7/site-packages/Xlib/ext/xinput.py
|
umr-bot/sliding-puzzle-solver-bot
|
826532a426f343bcc66034b241a42b3bd864e07c
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/Xlib/ext/xinput.py
|
umr-bot/sliding-puzzle-solver-bot
|
826532a426f343bcc66034b241a42b3bd864e07c
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/Xlib/ext/xinput.py
|
umr-bot/sliding-puzzle-solver-bot
|
826532a426f343bcc66034b241a42b3bd864e07c
|
[
"MIT"
] | null | null | null |
# Xlib.ext.xinput -- XInput extension module
#
# Copyright (C) 2012 Outpost Embedded, LLC
# Forest Bond <forest.bond@rapidrollout.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
'''
A very incomplete implementation of the XInput extension.
'''
import sys
import array
import struct
# Python 2/3 compatibility.
from six import integer_types
from Xlib.protocol import rq
from Xlib import X
extname = 'XInputExtension'
PropertyDeleted = 0
PropertyCreated = 1
PropertyModified = 2
NotifyNormal = 0
NotifyGrab = 1
NotifyUngrab = 2
NotifyWhileGrabbed = 3
NotifyPassiveGrab = 4
NotifyPassiveUngrab = 5
NotifyAncestor = 0
NotifyVirtual = 1
NotifyInferior = 2
NotifyNonlinear = 3
NotifyNonlinearVirtual = 4
NotifyPointer = 5
NotifyPointerRoot = 6
NotifyDetailNone = 7
GrabtypeButton = 0
GrabtypeKeycode = 1
GrabtypeEnter = 2
GrabtypeFocusIn = 3
GrabtypeTouchBegin = 4
AnyModifier = (1 << 31)
AnyButton = 0
AnyKeycode = 0
AsyncDevice = 0
SyncDevice = 1
ReplayDevice = 2
AsyncPairedDevice = 3
AsyncPair = 4
SyncPair = 5
SlaveSwitch = 1
DeviceChange = 2
MasterAdded = (1 << 0)
MasterRemoved = (1 << 1)
SlaveAdded = (1 << 2)
SlaveRemoved = (1 << 3)
SlaveAttached = (1 << 4)
SlaveDetached = (1 << 5)
DeviceEnabled = (1 << 6)
DeviceDisabled = (1 << 7)
AddMaster = 1
RemoveMaster = 2
AttachSlave = 3
DetachSlave = 4
AttachToMaster = 1
Floating = 2
ModeRelative = 0
ModeAbsolute = 1
MasterPointer = 1
MasterKeyboard = 2
SlavePointer = 3
SlaveKeyboard = 4
FloatingSlave = 5
KeyClass = 0
ButtonClass = 1
ValuatorClass = 2
ScrollClass = 3
TouchClass = 8
KeyRepeat = (1 << 16)
AllDevices = 0
AllMasterDevices = 1
DeviceChanged = 1
KeyPress = 2
KeyRelease = 3
ButtonPress = 4
ButtonRelease = 5
Motion = 6
Enter = 7
Leave = 8
FocusIn = 9
FocusOut = 10
HierarchyChanged = 11
PropertyEvent = 12
RawKeyPress = 13
RawKeyRelease = 14
RawButtonPress = 15
RawButtonRelease = 16
RawMotion = 17
DeviceChangedMask = (1 << DeviceChanged)
KeyPressMask = (1 << KeyPress)
KeyReleaseMask = (1 << KeyRelease)
ButtonPressMask = (1 << ButtonPress)
ButtonReleaseMask = (1 << ButtonRelease)
MotionMask = (1 << Motion)
EnterMask = (1 << Enter)
LeaveMask = (1 << Leave)
FocusInMask = (1 << FocusIn)
FocusOutMask = (1 << FocusOut)
HierarchyChangedMask = (1 << HierarchyChanged)
PropertyEventMask = (1 << PropertyEvent)
RawKeyPressMask = (1 << RawKeyPress)
RawKeyReleaseMask = (1 << RawKeyRelease)
RawButtonPressMask = (1 << RawButtonPress)
RawButtonReleaseMask = (1 << RawButtonRelease)
RawMotionMask = (1 << RawMotion)
GrabModeSync = 0
GrabModeAsync = 1
GrabModeTouch = 2
DEVICEID = rq.Card16
DEVICE = rq.Card16
DEVICEUSE = rq.Card8
class FP1616(rq.Int32):
def check_value(self, value):
return int(value * 65536.0)
def parse_value(self, value, display):
return float(value) / float(1 << 16)
class FP3232(rq.ValueField):
structcode = 'lL'
structvalues = 2
def check_value(self, value):
return value
def parse_value(self, value, display):
integral, frac = value
ret = float(integral)
# optimised math.ldexp(float(frac), -32)
ret += float(frac) * (1.0 / (1 << 32))
return ret
class XIQueryVersion(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(47),
rq.RequestLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'),
rq.Pad(20),
)
def query_version(self):
return XIQueryVersion(
display=self.display,
opcode=self.display.get_extension_major(extname),
major_version=2,
minor_version=0,
)
class Mask(rq.List):
def __init__(self, name):
rq.List.__init__(self, name, rq.Card32, pad=0)
def pack_value(self, val):
mask_seq = array.array(rq.struct_to_array_codes['L'])
if isinstance(val, integer_types):
# We need to build a "binary mask" that (as far as I can tell) is
# encoded in native byte order from end to end. The simple case is
# with a single unsigned 32-bit value, for which we construct an
# array with just one item. For values too big to fit inside 4
# bytes we build a longer array, being careful to maintain native
# byte order across the entire set of values.
if sys.byteorder == 'little':
def fun(val):
mask_seq.insert(0, val)
elif sys.byteorder == 'big':
fun = mask_seq.append
else:
raise AssertionError(sys.byteorder)
while val:
fun(val & 0xFFFFFFFF)
val = val >> 32
else:
mask_seq.extend(val)
return mask_seq.tostring(), len(mask_seq), None
EventMask = rq.Struct(
DEVICE('deviceid'),
rq.LengthOf('mask', 2),
Mask('mask'),
)
class XISelectEvents(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(46),
rq.RequestLength(),
rq.Window('window'),
rq.LengthOf('masks', 2),
rq.Pad(2),
rq.List('masks', EventMask),
)
def select_events(self, event_masks):
'''
select_events(event_masks)
event_masks:
Sequence of (deviceid, mask) pairs, where deviceid is a numerical device
ID, or AllDevices or AllMasterDevices, and mask is either an unsigned
integer or sequence of 32 bits unsigned values
'''
return XISelectEvents(
display=self.display,
opcode=self.display.get_extension_major(extname),
window=self,
masks=event_masks,
)
AnyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Pad(2),
)
class ButtonMask(object):
def __init__(self, value, length):
self._value = value
self._length = length
def __len__(self):
return self._length
def __getitem__(self, key):
return self._value & (1 << key)
def __str__(self):
return repr(self)
def __repr__(self):
return '0b{value:0{width}b}'.format(value=self._value,
width=self._length)
class ButtonState(rq.ValueField):
structcode = None
def __init__(self, name):
rq.ValueField.__init__(self, name)
def parse_binary_value(self, data, display, length, fmt):
# Mask: bitfield of <length> button states.
mask_len = 4 * ((((length + 7) >> 3) + 3) >> 2)
mask_data = data[:mask_len]
mask_value = 0
for byte in reversed(struct.unpack('={0:d}B'.format(mask_len), mask_data)):
mask_value <<= 8
mask_value |= byte
data = data[mask_len:]
assert (mask_value & 1) == 0
return ButtonMask(mask_value >> 1, length), data
ButtonInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf(('state', 'labels'), 2),
ButtonState('state'),
rq.List('labels', rq.Card32),
)
KeyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf('keycodes', 2),
rq.List('keycodes', rq.Card32),
)
ValuatorInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card32('label'),
FP3232('min'),
FP3232('max'),
FP3232('value'),
rq.Card32('resolution'),
rq.Card8('mode'),
rq.Pad(3),
)
ScrollInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card16('scroll_type'),
rq.Pad(2),
rq.Card32('flags'),
FP3232('increment'),
)
TouchInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card8('mode'),
rq.Card8('num_touches'),
)
INFO_CLASSES = {
KeyClass: KeyInfo,
ButtonClass: ButtonInfo,
ValuatorClass: ValuatorInfo,
ScrollClass: ScrollInfo,
TouchClass: TouchInfo,
}
class ClassInfoClass(object):
structcode = None
def parse_binary(self, data, display):
class_type, length = struct.unpack('=HH', data[:4])
class_struct = INFO_CLASSES.get(class_type, AnyInfo)
class_data, _ = class_struct.parse_binary(data, display)
data = data[length * 4:]
return class_data, data
ClassInfo = ClassInfoClass()
DeviceInfo = rq.Struct(
DEVICEID('deviceid'),
rq.Card16('use'),
rq.Card16('attachment'),
rq.LengthOf('classes', 2),
rq.LengthOf('name', 2),
rq.Bool('enabled'),
rq.Pad(1),
rq.String8('name', 4),
rq.List('classes', ClassInfo),
)
class XIQueryDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(48),
rq.RequestLength(),
DEVICEID('deviceid'),
rq.Pad(2),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.LengthOf('devices', 2),
rq.Pad(22),
rq.List('devices', DeviceInfo),
)
def query_device(self, deviceid):
return XIQueryDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
)
class XIGrabDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(51),
rq.RequestLength(),
rq.Window('grab_window'),
rq.Card32('time'),
rq.Cursor('cursor', (X.NONE, )),
DEVICEID('deviceid'),
rq.Set('grab_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Set('paired_device_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Bool('owner_events'),
rq.Pad(1),
rq.LengthOf('mask', 2),
Mask('mask'),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card8('status'),
rq.Pad(23),
)
def grab_device(self, deviceid, time, grab_mode, paired_device_mode, owner_events, event_mask):
return XIGrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
time=time,
cursor=X.NONE,
grab_mode=grab_mode,
paired_device_mode=paired_device_mode,
owner_events=owner_events,
mask=event_mask,
)
class XIUngrabDevice(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(52),
rq.RequestLength(),
rq.Card32('time'),
DEVICEID('deviceid'),
rq.Pad(2),
)
def ungrab_device(self, deviceid, time):
return XIUngrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
time=time,
deviceid=deviceid,
)
class XIPassiveGrabDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(54),
rq.RequestLength(),
rq.Card32('time'),
rq.Window('grab_window'),
rq.Cursor('cursor', (X.NONE, )),
rq.Card32('detail'),
DEVICEID('deviceid'),
rq.LengthOf('modifiers', 2),
rq.LengthOf('mask', 2),
rq.Set('grab_type', 1, (GrabtypeButton, GrabtypeKeycode, GrabtypeEnter,
GrabtypeFocusIn, GrabtypeTouchBegin)),
rq.Set('grab_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Set('paired_device_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Bool('owner_events'),
rq.Pad(2),
Mask('mask'),
rq.List('modifiers', rq.Card32),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.LengthOf('modifiers', 2),
rq.Pad(22),
rq.List('modifiers', rq.Card32),
)
def passive_grab_device(self, deviceid, time, detail,
grab_type, grab_mode, paired_device_mode,
owner_events, event_mask, modifiers):
return XIPassiveGrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
time=time,
cursor=X.NONE,
detail=detail,
grab_type=grab_type,
grab_mode=grab_mode,
paired_device_mode=paired_device_mode,
owner_events=owner_events,
mask=event_mask,
modifiers=modifiers,
)
def grab_keycode(self, deviceid, time, keycode,
grab_mode, paired_device_mode,
owner_events, event_mask, modifiers):
return passive_grab_device(self, deviceid, time, keycode,
GrabtypeKeycode,
grab_mode, paired_device_mode,
owner_events, event_mask, modifiers)
class XIPassiveUngrabDevice(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(55),
rq.RequestLength(),
rq.Window('grab_window'),
rq.Card32('detail'),
DEVICEID('deviceid'),
rq.LengthOf('modifiers', 2),
rq.Set('grab_type', 1, (GrabtypeButton, GrabtypeKeycode,
GrabtypeEnter, GrabtypeFocusIn,
GrabtypeTouchBegin)),
rq.Pad(3),
rq.List('modifiers', rq.Card32),
)
def passive_ungrab_device(self, deviceid, detail, grab_type, modifiers):
return XIPassiveUngrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
detail=detail,
grab_type=grab_type,
modifiers=modifiers,
)
def ungrab_keycode(self, deviceid, keycode, modifiers):
return passive_ungrab_device(self, deviceid, keycode,
GrabtypeKeycode, modifiers)
HierarchyInfo = rq.Struct(
DEVICEID('deviceid'),
DEVICEID('attachment'),
DEVICEUSE('type'),
rq.Bool('enabled'),
rq.Pad(2),
rq.Card32('flags'),
)
HierarchyEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('flags'),
rq.LengthOf('info', 2),
rq.Pad(10),
rq.List('info', HierarchyInfo),
)
ModifierInfo = rq.Struct(
rq.Card32('base_mods'),
rq.Card32('latched_mods'),
rq.Card32('locked_mods'),
rq.Card32('effective_mods'),
)
GroupInfo = rq.Struct(
rq.Card8('base_group'),
rq.Card8('latched_group'),
rq.Card8('locked_group'),
rq.Card8('effective_group'),
)
DeviceEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('detail'),
rq.Window('root'),
rq.Window('event'),
rq.Window('child'),
FP1616('root_x'),
FP1616('root_y'),
FP1616('event_x'),
FP1616('event_y'),
rq.LengthOf('buttons', 2),
rq.Card16('valulators_len'),
DEVICEID('sourceid'),
rq.Pad(2),
rq.Card32('flags'),
rq.Object('mods', ModifierInfo),
rq.Object('groups', GroupInfo),
ButtonState('buttons'),
)
DeviceChangedEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.LengthOf('classes', 2),
DEVICEID('sourceid'),
rq.Card8('reason'),
rq.Pad(11),
rq.List('classes', ClassInfo),
)
def init(disp, info):
disp.extension_add_method('display', 'xinput_query_version', query_version)
disp.extension_add_method('window', 'xinput_select_events', select_events)
disp.extension_add_method('display', 'xinput_query_device', query_device)
disp.extension_add_method('window', 'xinput_grab_device', grab_device)
disp.extension_add_method('display', 'xinput_ungrab_device', ungrab_device)
disp.extension_add_method('window', 'xinput_grab_keycode', grab_keycode)
disp.extension_add_method('window', 'xinput_ungrab_keycode', ungrab_keycode)
if hasattr(disp,"ge_add_event_data"):
for device_event in (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion):
disp.ge_add_event_data(info.major_opcode, device_event, DeviceEventData)
disp.ge_add_event_data(info.major_opcode, DeviceChanged, DeviceEventData)
disp.ge_add_event_data(info.major_opcode, HierarchyChanged, HierarchyEventData)
| 27.212214
| 96
| 0.601212
| 1,985
| 17,824
| 5.265491
| 0.226196
| 0.026024
| 0.018178
| 0.011481
| 0.381936
| 0.345388
| 0.298603
| 0.278129
| 0.248374
| 0.22656
| 0
| 0.033212
| 0.276986
| 17,824
| 654
| 97
| 27.253823
| 0.777838
| 0.090103
| 0
| 0.363289
| 0
| 0
| 0.085054
| 0.001355
| 0
| 0
| 0.000645
| 0
| 0.003824
| 1
| 0.047801
| false
| 0.01912
| 0.011472
| 0.028681
| 0.151052
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1b794c29c7e14b547cb2e45a43996ab2eb188a
| 60,985
|
py
|
Python
|
tests/api/test_attributes.py
|
DowneyTung/saleor
|
50f299d8e276b594753ee439d9e1a212f85a91b1
|
[
"CC-BY-4.0"
] | 19
|
2019-12-03T17:28:07.000Z
|
2021-09-10T21:30:52.000Z
|
tests/api/test_attributes.py
|
DowneyTung/saleor
|
50f299d8e276b594753ee439d9e1a212f85a91b1
|
[
"CC-BY-4.0"
] | 51
|
2019-12-06T08:06:07.000Z
|
2021-05-06T02:10:50.000Z
|
tests/api/test_attributes.py
|
DowneyTung/saleor
|
50f299d8e276b594753ee439d9e1a212f85a91b1
|
[
"CC-BY-4.0"
] | 20
|
2020-02-03T00:38:59.000Z
|
2022-01-03T13:07:52.000Z
|
from typing import Union
from unittest import mock
import graphene
import pytest
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.template.defaultfilters import slugify
from graphene.utils.str_converters import to_camel_case
from saleor.core.taxes import zero_money
from saleor.graphql.core.utils import snake_to_camel_case
from saleor.graphql.product.enums import AttributeTypeEnum, AttributeValueType
from saleor.graphql.product.filters import filter_attributes_by_product_types
from saleor.graphql.product.mutations.attributes import validate_value_is_unique
from saleor.graphql.product.types.attributes import resolve_attribute_value_type
from saleor.product import AttributeInputType
from saleor.product.error_codes import ProductErrorCode
from saleor.product.models import (
Attribute,
AttributeProduct,
AttributeValue,
AttributeVariant,
Category,
Collection,
Product,
ProductType,
ProductVariant,
)
from saleor.product.utils.attributes import associate_attribute_values_to_instance
from tests.api.utils import get_graphql_content
def test_validate_value_is_unique(color_attribute):
value = color_attribute.values.first()
# a new value but with existing slug should raise an error
with pytest.raises(ValidationError):
validate_value_is_unique(color_attribute, AttributeValue(slug=value.slug))
# a new value with a new slug should pass
validate_value_is_unique(
color_attribute, AttributeValue(slug="spanish-inquisition")
)
# value that already belongs to the attribute shouldn't be taken into account
validate_value_is_unique(color_attribute, value)
def test_get_single_attribute_by_pk(user_api_client, color_attribute_without_values):
attribute_gql_id = graphene.Node.to_global_id(
"Attribute", color_attribute_without_values.id
)
query = """
query($id: ID!) {
attribute(id: $id) {
id
slug
}
}
"""
content = get_graphql_content(
user_api_client.post_graphql(query, {"id": attribute_gql_id})
)
assert content["data"]["attribute"], "Should have found an attribute"
assert content["data"]["attribute"]["id"] == attribute_gql_id
assert content["data"]["attribute"]["slug"] == color_attribute_without_values.slug
QUERY_ATTRIBUTES = """
query {
attributes(first: 20) {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
"""
def test_attributes_query(user_api_client, product):
attributes = Attribute.objects
query = QUERY_ATTRIBUTES
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert attributes_data
assert len(attributes_data) == attributes.count()
def test_attributes_query_hidden_attribute(user_api_client, product, color_attribute):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.get_visible_to_user(
user_api_client.user
).count()
assert attribute_count == 1
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
def test_attributes_query_hidden_attribute_as_staff_user(
staff_api_client, product, color_attribute, permission_manage_products
):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.all().count()
# The user doesn't have the permission yet to manage products,
# the user shouldn't be able to see the hidden attributes
assert Attribute.objects.get_visible_to_user(staff_api_client.user).count() == 1
# The user should now be able to see the attributes
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES = """
{
products(first: 1) {
edges {
node {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
variants {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
}
}
}
}
}
"""
@pytest.mark.parametrize("is_staff", (False, True))
def test_resolve_attributes_with_hidden(
user_api_client,
product,
color_attribute,
size_attribute,
staff_user,
is_staff,
permission_manage_products,
):
"""Ensure non-staff users don't see hidden attributes, and staff users having
the 'manage product' permission can.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_attribute = color_attribute
variant_attribute = size_attribute
expected_product_attribute_count = product.attributes.count() - 1
expected_variant_attribute_count = variant.attributes.count() - 1
if is_staff:
api_client.user = staff_user
expected_product_attribute_count += 1
expected_variant_attribute_count += 1
staff_user.user_permissions.add(permission_manage_products)
# Hide one product and variant attribute from the storefront
for attribute in (product_attribute, variant_attribute):
attribute.visible_in_storefront = False
attribute.save(update_fields=["visible_in_storefront"])
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
assert len(product["attributes"]) == expected_product_attribute_count
assert len(product["variants"][0]["attributes"]) == expected_variant_attribute_count
def test_resolve_attribute_values(user_api_client, product, staff_user):
"""Ensure the attribute values are properly resolved."""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product_attribute_values = list(
product.attributes.first().values.values_list("slug", flat=True)
)
variant_attribute_values = list(
variant.attributes.first().values.values_list("slug", flat=True)
)
assert len(product_attribute_values) == 1
assert len(variant_attribute_values) == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == len(product_attribute_values)
assert len(variant_attributes) == len(variant_attribute_values)
assert product_attributes[0]["attribute"]["slug"] == "color"
assert product_attributes[0]["values"][0]["slug"] == product_attribute_values[0]
assert product_attributes[0]["value"]["slug"] == product_attribute_values[0]
assert variant_attributes[0]["attribute"]["slug"] == "size"
assert variant_attributes[0]["values"][0]["slug"] == variant_attribute_values[0]
assert variant_attributes[0]["value"]["slug"] == variant_attribute_values[0]
def test_resolve_attribute_values_non_assigned_to_node(
user_api_client, product, staff_user
):
"""Ensure the attribute values are properly resolved when an attribute is part
of the product type but not of the node (product/variant), thus no values should be
resolved.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_type = product.product_type
# Create dummy attributes
unassigned_product_attribute = Attribute.objects.create(name="P", slug="product")
unassigned_variant_attribute = Attribute.objects.create(name="V", slug="variant")
# Create a value for each dummy attribute to ensure they are not returned
# by the product or variant as they are not associated to them
AttributeValue.objects.bulk_create(
[
AttributeValue(slug="a", name="A", attribute=unassigned_product_attribute),
AttributeValue(slug="b", name="B", attribute=unassigned_product_attribute),
]
)
# Assign the dummy attributes to the product type and push them at the top
# through a sort_order=0 as the other attributes have sort_order=null
AttributeProduct.objects.create(
attribute=unassigned_product_attribute, product_type=product_type, sort_order=0
)
AttributeVariant.objects.create(
attribute=unassigned_variant_attribute, product_type=product_type, sort_order=0
)
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert len(variant_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert product_attributes[0]["attribute"]["slug"] == "product"
assert product_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
assert variant_attributes[0]["attribute"]["slug"] == "variant"
assert variant_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
def test_attributes_filter_by_product_type_with_empty_value():
"""Ensure passing an empty or null value is ignored and the queryset is simply
returned without any modification.
"""
qs = Attribute.objects.all()
assert filter_attributes_by_product_types(qs, "...", "") is qs
assert filter_attributes_by_product_types(qs, "...", None) is qs
def test_attributes_filter_by_product_type_with_unsupported_field():
"""Ensure using an unknown field to filter attributes by raises a NotImplemented
exception.
"""
qs = Attribute.objects.all()
with pytest.raises(NotImplementedError) as exc:
filter_attributes_by_product_types(qs, "in_space", "a-value")
assert exc.value.args == ("Filtering by in_space is unsupported",)
def test_attributes_filter_by_non_existing_category_id():
"""Ensure using a non-existing category ID returns an empty query set."""
category_id = graphene.Node.to_global_id("Category", -1)
mocked_qs = mock.MagicMock()
qs = filter_attributes_by_product_types(mocked_qs, "in_category", category_id)
assert qs == mocked_qs.none.return_value
@pytest.mark.parametrize("test_deprecated_filter", [True, False])
@pytest.mark.parametrize("tested_field", ["inCategory", "inCollection"])
def test_attributes_in_collection_query(
user_api_client,
product_type,
category,
collection,
collection_with_products,
test_deprecated_filter,
tested_field,
):
if "Collection" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Collection", collection.pk)
elif "Category" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Category", category.pk)
else:
raise AssertionError(tested_field)
expected_qs = Attribute.objects.filter(
Q(attributeproduct__product_type_id=product_type.pk)
| Q(attributevariant__product_type_id=product_type.pk)
)
# Create another product type and attribute that shouldn't get matched
other_category = Category.objects.create(name="Other Category", slug="other-cat")
other_attribute = Attribute.objects.create(name="Other", slug="other")
other_product_type = ProductType.objects.create(
name="Other type", has_variants=True, is_shipping_required=True
)
other_product_type.product_attributes.add(other_attribute)
other_product = Product.objects.create(
name=f"Another Product",
product_type=other_product_type,
category=other_category,
price=zero_money(),
is_published=True,
)
# Create another collection with products but shouldn't get matched
# as we don't look for this other collection
other_collection = Collection.objects.create(
name="Other Collection",
slug="other-collection",
is_published=True,
description="Description",
)
other_collection.products.add(other_product)
query = """
query($nodeID: ID!) {
attributes(first: 20, %(filter_input)s) {
edges {
node {
id
name
slug
}
}
}
}
"""
if test_deprecated_filter:
query = query % {"filter_input": f"{tested_field}: $nodeID"}
else:
query = query % {"filter_input": "filter: { %s: $nodeID }" % tested_field}
variables = {"nodeID": filtered_by_node_id}
content = get_graphql_content(user_api_client.post_graphql(query, variables))
attributes_data = content["data"]["attributes"]["edges"]
flat_attributes_data = [attr["node"]["slug"] for attr in attributes_data]
expected_flat_attributes_data = list(expected_qs.values_list("slug", flat=True))
assert flat_attributes_data == expected_flat_attributes_data
CREATE_ATTRIBUTES_QUERY = """
mutation createAttribute($name: String!, $values: [AttributeValueCreateInput]) {
attributeCreate(input: {name: $name, values: $values}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_create_attribute_and_attribute_values(
staff_api_client, permission_manage_products
):
query = CREATE_ATTRIBUTES_QUERY
attribute_name = "Example name"
name = "Value name"
variables = {"name": attribute_name, "values": [{"name": name}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert not content["data"]["attributeCreate"]["errors"]
data = content["data"]["attributeCreate"]
# Check if the attribute was correctly created
assert data["attribute"]["name"] == attribute_name
assert data["attribute"]["slug"] == slugify(
attribute_name
), "The default slug should be the slugified name"
assert (
data["attribute"]["productTypes"]["edges"] == []
), "The attribute should not have been assigned to a product type"
# Check if the attribute values were correctly created
assert len(data["attribute"]["values"]) == 1
assert data["attribute"]["values"][0]["name"] == name
assert data["attribute"]["values"][0]["slug"] == slugify(name)
@pytest.mark.parametrize(
"input_slug, expected_slug, expected_error",
(
("my-slug", "my-slug", []),
(None, "my-name", []),
(
"",
None,
[{"field": "slug", "message": "The attribute's slug cannot be blank."}],
),
),
)
def test_create_attribute_with_given_slug(
staff_api_client,
permission_manage_products,
input_slug,
expected_slug,
expected_error,
):
staff_api_client.user.user_permissions.add(permission_manage_products)
query = """
mutation createAttribute(
$name: String!, $slug: String) {
attributeCreate(input: {name: $name, slug: $slug}) {
errors {
field
message
}
attribute {
slug
}
}
}
"""
attribute_name = "My Name"
variables = {"name": attribute_name, "slug": input_slug}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))
# Check if the error is as expected: null or something else
assert content["data"]["attributeCreate"]["errors"] == expected_error
# Check if the slug was correctly set if no error was expected
if expected_error is None:
assert content["data"]["attributeCreate"]["attribute"]["slug"] == expected_slug
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_create_attribute_and_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
permission_manage_products,
product_type,
):
query = CREATE_ATTRIBUTES_QUERY
variables = {"name": "Example name", "values": [{"name": name_1}, {"name": name_2}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeCreate"]["errors"]
assert errors
assert errors[0]["field"] == "values"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeCreate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
UPDATE_ATTRIBUTE_QUERY = """
mutation updateAttribute(
$id: ID!, $name: String!, $addValues: [AttributeValueCreateInput]!,
$removeValues: [ID]!) {
attributeUpdate(
id: $id,
input: {
name: $name, addValues: $addValues,
removeValues: $removeValues}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_update_attribute_name(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "Wings name"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"name": name, "id": node_id, "addValues": [], "removeValues": []}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert data["attribute"]["name"] == name == attribute.name
assert data["attribute"]["productTypes"]["edges"] == []
def test_update_attribute_remove_and_add_values(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "Wings name"
attribute_value_name = "Red Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
attribute_value_id = attribute.values.first().id
value_id = graphene.Node.to_global_id("AttributeValue", attribute_value_id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [value_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert not data["errors"]
assert data["attribute"]["name"] == name == attribute.name
assert not attribute.values.filter(pk=attribute_value_id).exists()
assert attribute.values.filter(name=attribute_value_name).exists()
def test_update_empty_attribute_and_add_values(
staff_api_client, color_attribute_without_values, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute_without_values
name = "Wings name"
attribute_value_name = "Yellow Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
attribute.refresh_from_db()
assert attribute.values.count() == 1
assert attribute.values.filter(name=attribute_value_name).exists()
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_update_attribute_and_add_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
color_attribute,
permission_manage_products,
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": "Example name",
"id": node_id,
"removeValues": [],
"addValues": [{"name": name_1}, {"name": name_2}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "addValues"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
def test_update_attribute_and_remove_others_attribute_value(
staff_api_client, color_attribute, size_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
size_attribute = size_attribute.values.first()
attr_id = graphene.Node.to_global_id("AttributeValue", size_attribute.pk)
variables = {
"name": "Example name",
"id": node_id,
"slug": "example-slug",
"addValues": [],
"removeValues": [attr_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "removeValues"
err_msg = "Value %s does not belong to this attribute." % str(size_attribute)
assert errors[0]["message"] == err_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == ProductErrorCode.INVALID.name
def test_delete_attribute(
staff_api_client, color_attribute, permission_manage_products, product_type
):
attribute = color_attribute
query = """
mutation deleteAttribute($id: ID!) {
attributeDelete(id: $id) {
errors {
field
message
}
attribute {
id
}
}
}
"""
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeDelete"]
assert data["attribute"]["id"] == variables["id"]
with pytest.raises(attribute._meta.model.DoesNotExist):
attribute.refresh_from_db()
CREATE_ATTRIBUTE_VALUE_QUERY = """
mutation createAttributeValue(
$attributeId: ID!, $name: String!) {
attributeValueCreate(
attribute: $attributeId, input: {name: $name}) {
productErrors {
field
message
code
}
attribute {
values {
name
}
}
attributeValue {
name
type
slug
}
}
}
"""
def test_create_attribute_value(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
name = "test name"
variables = {"name": name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert not data["productErrors"]
attr_data = data["attributeValue"]
assert attr_data["name"] == name
assert attr_data["slug"] == slugify(name)
assert attr_data["type"] == "STRING"
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_create_attribute_value_not_unique_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
def test_create_attribute_value_capitalized_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name.upper(), "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
UPDATE_ATTRIBUTE_VALUE_QUERY = """
mutation updateChoice(
$id: ID!, $name: String!) {
attributeValueUpdate(
id: $id, input: {name: $name}) {
errors {
field
message
}
attributeValue {
name
slug
}
attribute {
values {
name
}
}
}
}
"""
def test_update_attribute_value(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
name = "Crimson name"
variables = {"name": name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
value.refresh_from_db()
assert data["attributeValue"]["name"] == name == value.name
assert data["attributeValue"]["slug"] == slugify(name)
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_update_attribute_value_name_not_unique(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value.attribute.values.create(
name="Example Name", slug="example-name", value="#RED"
)
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"name": pink_attribute_value.name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
assert data["errors"]
assert data["errors"][0]["message"]
assert data["errors"][0]["field"] == "name"
def test_delete_attribute_value(
staff_api_client, color_attribute, pink_attribute_value, permission_manage_products
):
value = color_attribute.values.get(name="Red")
query = """
mutation updateChoice($id: ID!) {
attributeValueDelete(id: $id) {
attributeValue {
name
slug
}
}
}
"""
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"id": node_id}
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
with pytest.raises(value._meta.model.DoesNotExist):
value.refresh_from_db()
@pytest.mark.parametrize(
"raw_value, expected_type",
[
("#0000", AttributeValueType.COLOR),
("#FF69B4", AttributeValueType.COLOR),
("rgb(255, 0, 0)", AttributeValueType.COLOR),
("hsl(0, 100%, 50%)", AttributeValueType.COLOR),
("hsla(120, 60%, 70%, 0.3)", AttributeValueType.COLOR),
("rgba(100%, 255, 0, 0)", AttributeValueType.COLOR),
("http://example.com", AttributeValueType.URL),
("https://example.com", AttributeValueType.URL),
("ftp://example.com", AttributeValueType.URL),
("example.com", AttributeValueType.STRING),
("Foo", AttributeValueType.STRING),
("linear-gradient(red, yellow)", AttributeValueType.GRADIENT),
("radial-gradient(#0000, yellow)", AttributeValueType.GRADIENT),
],
)
def test_resolve_attribute_value_type(raw_value, expected_type):
assert resolve_attribute_value_type(raw_value) == expected_type
def test_resolve_assigned_attribute_without_values(api_client, product_type, product):
"""Ensure the attributes assigned to a product type are resolved even if
the product doesn't provide any value for it or is not directly associated to it.
"""
# Retrieve the product's variant
variant = product.variants.get()
# Remove all attributes and values from the product and its variant
product.attributesrelated.clear()
variant.attributesrelated.clear()
# Retrieve the product and variant's attributes
products = get_graphql_content(
api_client.post_graphql(
"""
{
products(first: 10) {
edges {
node {
attributes {
attribute {
slug
}
values {
name
}
}
variants {
attributes {
attribute {
slug
}
values {
name
}
}
}
}
}
}
}
"""
)
)["data"]["products"]["edges"]
# Ensure we are only working on one product and variant, the ones we are testing
assert len(products) == 1
assert len(products[0]["node"]["variants"]) == 1
# Retrieve the nodes data
product = products[0]["node"]
variant = product["variants"][0]
# Ensure the product attributes values are all None
assert len(product["attributes"]) == 1
assert product["attributes"][0]["attribute"]["slug"] == "color"
assert product["attributes"][0]["values"] == []
# Ensure the variant attributes values are all None
assert variant["attributes"][0]["attribute"]["slug"] == "size"
assert variant["attributes"][0]["values"] == []
ASSIGN_ATTR_QUERY = """
mutation assign($productTypeId: ID!, $operations: [AttributeAssignInput]!) {
attributeAssign(productTypeId: $productTypeId, operations: $operations) {
errors {
field
message
}
productType {
id
productAttributes {
id
}
variantAttributes {
id
}
}
}
}
"""
def test_assign_attributes_to_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Default Type", has_variants=True)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = []
variables = {"productTypeId": product_type_global_id, "operations": operations}
product_attributes_ids = {attr.pk for attr in attribute_list[:2]}
variant_attributes_ids = {attr.pk for attr in attribute_list[2:]}
for attr_id in product_attributes_ids:
operations.append(
{"type": "PRODUCT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
for attr_id in variant_attributes_ids:
operations.append(
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeAssign"]
assert not content["errors"], "Should have succeeded"
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == len(
product_attributes_ids
)
assert len(content["productType"]["variantAttributes"]) == len(
variant_attributes_ids
)
found_product_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["productAttributes"]
}
found_variant_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["variantAttributes"]
}
assert found_product_attrs_ids == product_attributes_ids
assert found_variant_attrs_ids == variant_attributes_ids
def test_assign_variant_attribute_to_product_type_with_disabled_variants(
staff_api_client,
permission_manage_products,
product_type_without_variant,
color_attribute_without_values,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute as a variant attribute when
the product type doesn't support variants"""
product_type = product_type_without_variant
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Variants are disabled in this product type.",
}
]
def test_assign_variant_attribute_having_unsupported_input_type(
staff_api_client, permission_manage_products, product_type, size_attribute
):
"""The assignAttribute mutation should raise an error when trying
to use an attribute as a variant attribute when
the attribute's input type doesn't support variants"""
attribute = size_attribute
attribute.input_type = AttributeInputType.MULTISELECT
attribute.save(update_fields=["input_type"])
product_type.variant_attributes.clear()
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": (
"Attributes having for input types ['multiselect'] cannot be assigned "
"as variant attributes"
),
}
]
@pytest.mark.parametrize(
"product_type_attribute_type, gql_attribute_type",
(
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.VARIANT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.VARIANT),
),
)
def test_assign_attribute_to_product_type_having_already_that_attribute(
staff_api_client,
permission_manage_products,
color_attribute_without_values,
product_type_attribute_type,
gql_attribute_type,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute already contained in the product type."""
product_type = ProductType.objects.create(name="Type")
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
if product_type_attribute_type == AttributeTypeEnum.PRODUCT:
product_type.product_attributes.add(attribute)
elif product_type_attribute_type == AttributeTypeEnum.VARIANT:
product_type.variant_attributes.add(attribute)
else:
raise ValueError(f"Unknown: {product_type}")
query = ASSIGN_ATTR_QUERY
operations = [
{
"type": gql_attribute_type.value,
"id": graphene.Node.to_global_id("Attribute", attribute.pk),
}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Color (color) have already been assigned to this product type.",
}
]
UNASSIGN_ATTR_QUERY = """
mutation unAssignAttribute(
$productTypeId: ID!, $attributeIds: [ID]!
) {
attributeUnassign(productTypeId: $productTypeId, attributeIds: $attributeIds) {
errors {
field
message
}
productType {
id
variantAttributes {
id
}
productAttributes {
id
}
}
}
}
"""
def test_unassign_attributes_from_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
variant_attribute, *product_attributes = attribute_list
product_type.product_attributes.add(*product_attributes)
product_type.variant_attributes.add(variant_attribute)
remaining_attribute_global_id = graphene.Node.to_global_id(
"Attribute", product_attributes[1].pk
)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", product_attributes[0].pk)
],
}
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 1
assert len(content["productType"]["variantAttributes"]) == 1
assert (
content["productType"]["productAttributes"][0]["id"]
== remaining_attribute_global_id
)
def test_unassign_attributes_not_in_product_type(
staff_api_client, permission_manage_products, color_attribute_without_values
):
"""The unAssignAttribute mutation should not raise any error when trying
to remove an attribute that is not/no longer in the product type."""
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", color_attribute_without_values.pk)
],
}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 0
assert len(content["productType"]["variantAttributes"]) == 0
def test_retrieve_product_attributes_input_type(
staff_api_client, product, permission_manage_products
):
query = """
{
products(first: 10) {
edges {
node {
attributes {
values {
type
inputType
}
}
}
}
}
}
"""
found_products = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["products"]["edges"]
assert len(found_products) == 1
for gql_attr in found_products[0]["node"]["attributes"]:
assert len(gql_attr["values"]) == 1
assert gql_attr["values"][0]["type"] == "STRING"
assert gql_attr["values"][0]["inputType"] == "DROPDOWN"
@pytest.mark.parametrize(
"attribute, expected_value",
(
("filterable_in_storefront", True),
("filterable_in_dashboard", True),
("visible_in_storefront", True),
("available_in_grid", True),
("value_required", False),
("storefront_search_position", 0),
),
)
def test_retrieving_the_restricted_attributes_restricted(
staff_api_client,
color_attribute,
permission_manage_products,
attribute,
expected_value,
):
"""Checks if the attributes are restricted and if their default value
is the expected one."""
attribute = to_camel_case(attribute)
query = (
"""
{
attributes(first: 10) {
edges {
node {
%s
}
}
}
}
"""
% attribute
)
found_attributes = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["attributes"]["edges"]
assert len(found_attributes) == 1
assert found_attributes[0]["node"][attribute] == expected_value
ATTRIBUTES_RESORT_QUERY = """
mutation ProductTypeReorderAttributes(
$productTypeId: ID!
$moves: [ReorderInput]!
$type: AttributeTypeEnum!
) {
productTypeReorderAttributes(
productTypeId: $productTypeId
moves: $moves
type: $type
) {
productType {
id
variantAttributes {
id
slug
}
productAttributes {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_attributes_within_product_type_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid product type (invalid ID)."""
product_type_id = graphene.Node.to_global_id("ProductType", -1)
attribute_id = graphene.Node.to_global_id("Attribute", -1)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "productTypeId",
"message": f"Couldn't resolve to a product type: {product_type_id}",
}
]
def test_sort_attributes_within_product_type_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder an attribute not associated to the given product type."""
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
@pytest.mark.parametrize(
"attribute_type, relation_field, backref_field",
(
("VARIANT", "variant_attributes", "attributevariant"),
("PRODUCT", "product_attributes", "attributeproduct"),
),
)
def test_sort_attributes_within_product_type(
staff_api_client,
attribute_list,
permission_manage_products,
attribute_type,
relation_field,
backref_field,
):
attributes = attribute_list
assert len(attributes) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
m2m_attributes = getattr(product_type, relation_field)
m2m_attributes.set(attributes)
sort_method = getattr(m2m_attributes, f"{relation_field}_sorted")
attributes = list(sort_method())
assert len(attributes) == 3
variables = {
"type": attribute_type,
"productTypeId": product_type_id,
"moves": [
{
"id": graphene.Node.to_global_id("Attribute", attributes[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("Attribute", attributes[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [attributes[1].pk, attributes[2].pk, attributes[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTES_RESORT_QUERY, variables)
)["data"]["productTypeReorderAttributes"]
assert not content["errors"]
assert (
content["productType"]["id"] == product_type_id
), "Did not return the correct product type"
gql_attributes = content["productType"][snake_to_camel_case(relation_field)]
assert len(gql_attributes) == len(expected_order)
for attr, expected_pk in zip(gql_attributes, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "Attribute"
assert int(gql_attr_id) == expected_pk
ATTRIBUTE_VALUES_RESORT_QUERY = """
mutation attributeReorderValues($attributeId: ID!, $moves: [ReorderInput]!) {
attributeReorderValues(attributeId: $attributeId, moves: $moves) {
attribute {
id
values {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_values_within_attribute_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid attribute (invalid ID)."""
attribute_id = graphene.Node.to_global_id("Attribute", -1)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "attributeId",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
def test_sort_values_within_attribute_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder a value not associated to the given attribute."""
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"type": "VARIANT",
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute value: {value_id}",
}
]
def test_sort_values_within_attribute(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
AttributeValue.objects.create(attribute=attribute, name="Green", slug="green")
values = list(attribute.values.all())
assert len(values) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
m2m_values = attribute.values
m2m_values.set(values)
assert values == sorted(
values, key=lambda o: o.sort_order if o.sort_order is not None else o.pk
), "The values are not properly ordered"
variables = {
"attributeId": attribute_id,
"moves": [
{
"id": graphene.Node.to_global_id("AttributeValue", values[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("AttributeValue", values[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [values[1].pk, values[2].pk, values[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTE_VALUES_RESORT_QUERY, variables)
)["data"]["attributeReorderValues"]
assert not content["errors"]
assert content["attribute"]["id"] == attribute_id
gql_values = content["attribute"]["values"]
assert len(gql_values) == len(expected_order)
actual_order = []
for attr, expected_pk in zip(gql_values, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "AttributeValue"
actual_order.append(int(gql_attr_id))
assert actual_order == expected_order
ATTRIBUTES_FILTER_QUERY = """
query($filters: AttributeFilterInput!) {
attributes(first: 10, filter: $filters) {
edges {
node {
name
slug
}
}
}
}
"""
def test_search_attributes(api_client, color_attribute, size_attribute):
variables = {"filters": {"search": "color"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "color"
def test_filter_attributes_if_filterable_in_dashboard(
api_client, color_attribute, size_attribute
):
color_attribute.filterable_in_dashboard = False
color_attribute.save(update_fields=["filterable_in_dashboard"])
variables = {"filters": {"filterableInDashboard": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_if_available_in_grid(
api_client, color_attribute, size_attribute
):
color_attribute.available_in_grid = False
color_attribute.save(update_fields=["available_in_grid"])
variables = {"filters": {"availableInGrid": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_by_global_id_list(api_client, attribute_list):
global_ids = [
graphene.Node.to_global_id("Attribute", attribute.pk)
for attribute in attribute_list[:2]
]
variables = {"filters": {"ids": global_ids}}
expected_slugs = sorted([attribute_list[0].slug, attribute_list[1].slug])
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
received_slugs = sorted(
[attributes[0]["node"]["slug"], attributes[1]["node"]["slug"]]
)
assert received_slugs == expected_slugs
ATTRIBUTES_SORT_QUERY = """
query($sortBy: AttributeSortingInput) {
attributes(first: 10, sortBy: $sortBy) {
edges {
node {
slug
}
}
}
}
"""
def test_sort_attributes_by_slug(api_client):
Attribute.objects.bulk_create(
[
Attribute(name="MyAttribute", slug="b"),
Attribute(name="MyAttribute", slug="a"),
]
)
variables = {"sortBy": {"field": "SLUG", "direction": "ASC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "a"
assert attributes[1]["node"]["slug"] == "b"
@pytest.mark.parametrize(
"sort_field, m2m_model",
(
("DASHBOARD_VARIANT_POSITION", AttributeVariant),
("DASHBOARD_PRODUCT_POSITION", AttributeProduct),
),
)
def test_sort_attributes_by_position_in_product_type(
api_client,
color_attribute,
size_attribute,
sort_field: str,
m2m_model: Union[AttributeVariant, AttributeProduct],
):
"""Sorts attributes for dashboard custom ordering inside a given product type."""
product_type = ProductType.objects.create(name="My Product Type")
m2m_model.objects.create(
product_type=product_type, attribute=color_attribute, sort_order=0
)
m2m_model.objects.create(
product_type=product_type, attribute=size_attribute, sort_order=1
)
variables = {"sortBy": {"field": sort_field, "direction": "DESC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "size"
assert attributes[1]["node"]["slug"] == "color"
def test_sort_attributes_by_default_sorting(api_client):
"""Don't provide any sorting, this should sort by name by default."""
Attribute.objects.bulk_create(
[Attribute(name="A", slug="b"), Attribute(name="B", slug="a")]
)
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, {})
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "b"
assert attributes[1]["node"]["slug"] == "a"
@pytest.mark.parametrize("is_variant", (True, False))
def test_attributes_of_products_are_sorted(
staff_api_client, product, color_attribute, is_variant
):
"""Ensures the attributes of products and variants are sorted."""
variant = product.variants.first()
if is_variant:
query = """
query($id: ID!) {
productVariant(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
else:
query = """
query($id: ID!) {
product(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
# Create a dummy attribute with a higher ID
# This will allow us to make sure it is always the last attribute
# when sorted by ID. Thus, we are sure the query is actually passing the test.
other_attribute = Attribute.objects.create(name="Other", slug="other")
# Add the attribute to the product type
if is_variant:
product.product_type.variant_attributes.set([color_attribute, other_attribute])
else:
product.product_type.product_attributes.set([color_attribute, other_attribute])
# Retrieve the M2M object for the attribute vs the product type
if is_variant:
m2m_rel_other_attr = other_attribute.attributevariant.last()
else:
m2m_rel_other_attr = other_attribute.attributeproduct.last()
# Push the last attribute to the top and let the others to None
m2m_rel_other_attr.sort_order = 0
m2m_rel_other_attr.save(update_fields=["sort_order"])
# Assign attributes to the product
node = variant if is_variant else product # type: Union[Product, ProductVariant]
node.attributesrelated.clear()
associate_attribute_values_to_instance(
node, color_attribute, color_attribute.values.first()
)
# Sort the database attributes by their sort order and ID (when None)
expected_order = [other_attribute.pk, color_attribute.pk]
# Make the node ID
if is_variant:
node_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
else:
node_id = graphene.Node.to_global_id("Product", product.pk)
# Retrieve the attributes
data = get_graphql_content(staff_api_client.post_graphql(query, {"id": node_id}))[
"data"
]
attributes = data["productVariant" if is_variant else "product"]["attributes"]
actual_order = [
int(graphene.Node.from_global_id(attr["attribute"]["id"])[1])
for attr in attributes
]
# Compare the received data against our expectations
assert actual_order == expected_order
| 31.598446
| 88
| 0.649504
| 6,532
| 60,985
| 5.785364
| 0.066748
| 0.025959
| 0.026674
| 0.025933
| 0.611802
| 0.541492
| 0.487722
| 0.455306
| 0.416433
| 0.392379
| 0
| 0.004877
| 0.24347
| 60,985
| 1,929
| 89
| 31.614826
| 0.814206
| 0.063967
| 0
| 0.469185
| 0
| 0
| 0.245785
| 0.017626
| 0
| 0
| 0
| 0
| 0.102054
| 1
| 0.033797
| false
| 0
| 0.012591
| 0
| 0.046388
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1c26b574f8d1aa48c37371f029724022116688
| 3,557
|
py
|
Python
|
3-photos/1-chromakey/app.py
|
rafacm/aws-serverless-workshop-innovator-island
|
3f982ef6f70d28dfdc4e1d19103c181609b06b08
|
[
"MIT-0"
] | 1
|
2020-11-26T16:12:16.000Z
|
2020-11-26T16:12:16.000Z
|
3-photos/1-chromakey/app.py
|
rafacm/aws-serverless-workshop-innovator-island
|
3f982ef6f70d28dfdc4e1d19103c181609b06b08
|
[
"MIT-0"
] | null | null | null |
3-photos/1-chromakey/app.py
|
rafacm/aws-serverless-workshop-innovator-island
|
3f982ef6f70d28dfdc4e1d19103c181609b06b08
|
[
"MIT-0"
] | 1
|
2020-11-26T16:12:21.000Z
|
2020-11-26T16:12:21.000Z
|
import os
import json
import cv2
import logging
import boto3
import botocore
s3 = boto3.client('s3')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then same as file_name
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = s3
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except botocore.exceptions.ClientError as e:
logging.error(e)
return False
return True
def scale_image(image):
_image = image
target_height = 800
height, width, channels = _image.shape
logger.info('Original size: {}h x {}w'.format(height, width))
scale = height/target_height
if scale > 1:
_image = cv2.resize(image, (int(width/scale), int(height/scale)))
height, width, channels = image.shape
logger.info('New size: {}h x {}w'.format(int(height/scale), int(width/scale)))
return _image
def lambda_handler(event, context):
print ("Starting handler")
# get object metadata from event
input_bucket_name = event['Records'][0]['s3']['bucket']['name']
file_key = event['Records'][0]['s3']['object']['key']
output_bucket_name = os.environ['OUTPUT_BUCKET_NAME']
output_file_key = file_key.replace('.jpg', '.png')
print("Input bucket: ", input_bucket_name)
print("Output bucket: ", output_bucket_name)
if output_bucket_name is None:
print("Error: No OUTPUT_BUCKET_NAME environment variable specified.")
return
# set up local temp file names
local_input_temp_file = '/tmp/' + file_key
local_output_temp_file = '/tmp/out_' + file_key.replace('.jpg', '.png')
logger.info('Local input file: {}'.format(local_input_temp_file))
logger.info('Local output file: {}'.format(local_output_temp_file))
# get the object
s3.download_file(input_bucket_name, file_key, local_input_temp_file)
# HSV range
# (36, 25, 25) - most extreme
# (36, 50, 50) - average
# (36, 100, 100) - relaxed
lower_range = eval(os.environ["HSV_LOWER"])
# (70, 255, 255) - default
upper_range = eval(os.environ["HSV_UPPER"])
print('Lower HSV range: ', lower_range)
print('Upper HSV range: ', upper_range)
# Read in the file
image = cv2.imread(local_input_temp_file)
# Resize the image if larger than target size
image = scale_image(image)
# Flip from RGB of JPEG to BGR of OpenCV
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Convert BGR to HSV color space
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# convert to RGBA
image_alpha = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
# Threshold the HSV image to only green colors
mask = cv2.inRange(hsv, lower_range, upper_range)
# Invert the mask (i.e. select everything not green)
mask = ~mask
# Extract the non-green parts of the image
result = cv2.bitwise_and(image_alpha, image_alpha, mask=mask)
#Save the result
cv2.imwrite(local_output_temp_file,result)
#Save to S3
if upload_file(local_output_temp_file, output_bucket_name, output_file_key):
print('Processed file uploaded.')
return True
| 30.663793
| 86
| 0.671633
| 502
| 3,557
| 4.571713
| 0.294821
| 0.043573
| 0.04183
| 0.031373
| 0.16732
| 0.088889
| 0.063617
| 0
| 0
| 0
| 0
| 0.022785
| 0.22266
| 3,557
| 115
| 87
| 30.930435
| 0.807233
| 0.22041
| 0
| 0.032258
| 0
| 0
| 0.12908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.096774
| 0
| 0.225806
| 0.112903
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1d2183e311c349b4e6a54b6abedc9e76fcc8d1
| 323
|
py
|
Python
|
metrics/overflow.py
|
DEKHTIARJonathan/pyinstrument
|
cc4f3f6fc1b493d7cd058ecf41ad012e0030a512
|
[
"BSD-3-Clause"
] | 1
|
2021-04-10T15:07:51.000Z
|
2021-04-10T15:07:51.000Z
|
metrics/overflow.py
|
DEKHTIARJonathan/pyinstrument
|
cc4f3f6fc1b493d7cd058ecf41ad012e0030a512
|
[
"BSD-3-Clause"
] | 1
|
2022-02-28T02:48:43.000Z
|
2022-02-28T02:48:43.000Z
|
metrics/overflow.py
|
DEKHTIARJonathan/pyinstrument
|
cc4f3f6fc1b493d7cd058ecf41ad012e0030a512
|
[
"BSD-3-Clause"
] | 1
|
2018-09-24T15:32:13.000Z
|
2018-09-24T15:32:13.000Z
|
from pyinstrument import Profiler
p = Profiler(use_signal=False)
p.start()
def func(num):
if num == 0:
return
b = 0
for x in range(1,100000):
b += x
return func(num - 1)
func(900)
p.stop()
print(p.output_text())
with open('overflow_out.html', 'w') as f:
f.write(p.output_html())
| 13.458333
| 41
| 0.597523
| 53
| 323
| 3.566038
| 0.679245
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053942
| 0.25387
| 323
| 23
| 42
| 14.043478
| 0.73029
| 0
| 0
| 0
| 0
| 0
| 0.055728
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.266667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1d2acfb826681789b607d0aa918460c8853f38
| 12,995
|
py
|
Python
|
scripts/gen_tee_bin.py
|
wawang621/optee_os
|
bf7298044beca7a4501ece95c6146b5987cecaa4
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/gen_tee_bin.py
|
wawang621/optee_os
|
bf7298044beca7a4501ece95c6146b5987cecaa4
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/gen_tee_bin.py
|
wawang621/optee_os
|
bf7298044beca7a4501ece95c6146b5987cecaa4
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019, Linaro Limited
#
from __future__ import print_function
from __future__ import division
import argparse
import sys
import struct
import re
import hashlib
try:
from elftools.elf.elffile import ELFFile
from elftools.elf.constants import SH_FLAGS
from elftools.elf.enums import ENUM_RELOC_TYPE_ARM
from elftools.elf.enums import ENUM_RELOC_TYPE_AARCH64
from elftools.elf.sections import SymbolTableSection
from elftools.elf.relocation import RelocationSection
except ImportError:
print("""
***
Can't find elftools module. Probably it is not installed on your system.
You can install this module with
$ apt install python3-pyelftools
if you are using Ubuntu. Or try to search for "pyelftools" or "elftools" in
your package manager if you are using some other distribution.
***
""")
raise
small_page_size = 4 * 1024
elffile_symbols = None
tee_pageable_bin = None
tee_pager_bin = None
tee_embdata_bin = None
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def round_up(n, m):
if n == 0:
return 0
else:
return (((n - 1) // m) + 1) * m
def get_arch_id(elffile):
e_machine = elffile.header['e_machine']
if e_machine == 'EM_ARM':
return 0
if e_machine == 'EM_AARCH64':
return 1
eprint('Unknown e_machine "%s"' % e_machine)
sys.exit(1)
def get_name(obj):
# Symbol or section .name might be a byte array or a string, we want a
# string
try:
name = obj.name.decode()
except (UnicodeDecodeError, AttributeError):
name = obj.name
return name
def get_symbol(elffile, name):
global elffile_symbols
global lsyms_def
if elffile_symbols is None:
elffile_symbols = dict()
lsyms_def = dict()
symbol_tables = [s for s in elffile.iter_sections()
if isinstance(s, SymbolTableSection)]
for section in symbol_tables:
for symbol in section.iter_symbols():
symbol_name = get_name(symbol)
if symbol['st_info']['bind'] == 'STB_GLOBAL':
elffile_symbols[symbol_name] = symbol
elif symbol['st_info']['bind'] == 'STB_LOCAL':
if symbol_name not in elffile_symbols.keys():
elffile_symbols[symbol_name] = symbol
if symbol_name not in lsyms_def.keys():
lsyms_def[symbol_name] = 1
else:
lsyms_def[symbol_name] += 1
if name in lsyms_def.keys() and lsyms_def[name] > 1:
eprint("Multiple definitions of local symbol %s" % name)
sys.exit(1)
if name not in elffile_symbols.keys():
eprint("Cannot find symbol %s" % name)
sys.exit(1)
return elffile_symbols[name]
def get_sections(elffile, pad_to, dump_names):
last_end = 0
bin_data = bytearray()
for section in elffile.iter_sections():
section_name = get_name(section)
if (section['sh_type'] == 'SHT_NOBITS' or
not (section['sh_flags'] & SH_FLAGS.SHF_ALLOC) or
not dump_names.match(section_name)):
continue
if last_end == 0:
bin_data = section.data()
else:
if section['sh_addr'] > last_end:
bin_data += bytearray(section['sh_addr'] - last_end)
bin_data += section.data()
last_end = section['sh_addr'] + section['sh_size']
if pad_to > last_end:
bin_data += bytearray(pad_to - last_end)
last_end = pad_to
return bin_data
def get_pageable_bin(elffile):
global tee_pageable_bin
if tee_pageable_bin is None:
pad_to = 0
dump_names = re.compile(r'^\..*_(pageable|init)$')
tee_pageable_bin = get_sections(elffile, pad_to, dump_names)
return tee_pageable_bin
def get_pager_bin(elffile):
global tee_pager_bin
if tee_pager_bin is None:
pad_to = get_symbol(elffile, '__data_end')['st_value']
dump_names = re.compile(
r'^\.(text|rodata|got|data|ARM\.exidx|ARM\.extab)$')
tee_pager_bin = get_sections(elffile, pad_to, dump_names)
return tee_pager_bin
def get_reloc_bin(elffile):
if get_arch_id(elffile) == 0:
exp_rel_type = ENUM_RELOC_TYPE_ARM['R_ARM_RELATIVE']
else:
exp_rel_type = ENUM_RELOC_TYPE_AARCH64['R_AARCH64_RELATIVE']
link_address = get_symbol(elffile, '__text_start')['st_value']
addrs = []
for section in elffile.iter_sections():
if not isinstance(section, RelocationSection):
continue
for rel in section.iter_relocations():
if rel['r_info_type'] == 0:
continue
if rel['r_info_type'] != exp_rel_type:
eprint("Unexpected relocation type 0x%x" %
rel['r_info_type'])
sys.exit(1)
addrs.append(rel['r_offset'] - link_address)
addrs.sort()
data = bytearray()
for a in addrs:
data += struct.pack('<I', a)
# Relocations has been reduced to only become the relative type with
# addend at the address (r_offset) of relocation, that is, increase by
# load_offset. The addresses (r_offset) are also sorted. The format is
# then:
# uint32_t: relocation #1
# uint32_t: relocation #2
# ...
# uint32_t: relocation #n
return data
def get_hashes_bin(elffile):
pageable_bin = get_pageable_bin(elffile)
if len(pageable_bin) % small_page_size != 0:
eprint("pageable size not a multiple of 4K: "
"{}".format(paged_area_size))
sys.exit(1)
data = bytearray()
for n in range(0, len(pageable_bin), small_page_size):
page = pageable_bin[n:n + small_page_size]
data += hashlib.sha256(page).digest()
return data
def get_embdata_bin(elffile):
global tee_embdata_bin
if tee_embdata_bin is None:
hashes_bin = get_hashes_bin(elffile)
reloc_bin = get_reloc_bin(elffile)
num_entries = 2
hash_offs = 2 * 4 + num_entries * (2 * 4)
hash_pad = round_up(len(hashes_bin), 8) - len(hashes_bin)
reloc_offs = hash_offs + len(hashes_bin) + hash_pad
reloc_pad = round_up(len(reloc_bin), 8) - len(reloc_bin)
total_len = reloc_offs + len(reloc_bin) + reloc_pad
tee_embdata_bin = struct.pack('<IIIIII', total_len, num_entries,
hash_offs, len(hashes_bin),
reloc_offs, len(reloc_bin))
tee_embdata_bin += hashes_bin + bytearray(hash_pad)
tee_embdata_bin += reloc_bin + bytearray(reloc_pad)
# The embedded data region is designed to be easy to extend when
# needed, it's formatted as:
# +---------------------------------------------------------+
# | uint32_t: Length of entire area including this field |
# +---------------------------------------------------------+
# | uint32_t: Number of entries "2" |
# +---------------------------------------------------------+
# | uint32_t: Offset of hashes from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of hashes |
# +---------------------------------------------------------+
# | uint32_t: Offset of relocations from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of relocations |
# +---------------------------------------------------------+
# | Data of hashes + eventual padding |
# +---------------------------------------------------------+
# | Data of relocations + eventual padding |
# +---------------------------------------------------------+
return tee_embdata_bin
def output_pager_bin(elffile, outf):
outf.write(get_pager_bin(elffile))
def output_pageable_bin(elffile, outf):
outf.write(get_pageable_bin(elffile))
def get_init_load_addr(elffile):
init_load_addr = get_symbol(elffile, '_start')['st_value']
init_load_addr_hi = init_load_addr >> 32
init_load_addr_lo = init_load_addr & 0xffffffff
return init_load_addr_hi, init_load_addr_lo
def output_header_v1(elffile, outf):
arch_id = get_arch_id(elffile)
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(pager_bin)
paged_area_size = len(pageable_bin)
init_mem_usage = (get_symbol(elffile, '__get_tee_init_end')['st_value'] -
get_symbol(elffile, '__text_start')['st_value'] +
len(embdata_bin))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
len(embdata_bin))
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 1
flags = 0
outf.write(struct.pack('<IBBHIIIII', magic, version, arch_id, flags,
init_size, init_load_addr[0], init_load_addr[1],
init_mem_usage, paged_size))
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
outf.write(pageable_bin[init_bin_size:])
def output_header_v2(elffile, outf):
arch_id = get_arch_id(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(get_pager_bin(elffile))
paged_area_size = len(get_pageable_bin(elffile))
embdata_bin_size = len(get_embdata_bin(elffile))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
embdata_bin_size)
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 2
flags = 0
nb_images = 1 if paged_size == 0 else 2
outf.write(struct.pack('<IBBHI', magic, version, arch_id, flags,
nb_images))
outf.write(struct.pack('<IIII', init_load_addr[0], init_load_addr[1],
0, init_size))
if nb_images == 2:
outf.write(struct.pack('<IIII', 0xffffffff, 0xffffffff, 1, paged_size))
def output_pager_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
def output_pageable_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
outf.write(get_pageable_bin(elffile)[init_bin_size:])
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True, type=argparse.FileType('rb'),
help='The input tee.elf')
parser.add_argument('--out_tee_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee.bin')
parser.add_argument('--out_tee_pager_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager.bin')
parser.add_argument('--out_tee_pageable_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable.bin')
parser.add_argument('--out_header_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_header_v2.bin')
parser.add_argument('--out_pager_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager_v2.bin')
parser.add_argument('--out_pageable_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable_v2.bin')
return parser.parse_args()
def main():
args = get_args()
elffile = ELFFile(args.input)
if args.out_tee_bin:
output_header_v1(elffile, args.out_tee_bin)
if args.out_tee_pager_bin:
output_pager_bin(elffile, args.out_tee_pager_bin)
if args.out_tee_pageable_bin:
output_pageable_bin(elffile, args.out_tee_pageable_bin)
if args.out_header_v2:
output_header_v2(elffile, args.out_header_v2)
if args.out_pager_v2:
output_pager_v2(elffile, args.out_pager_v2)
if args.out_pageable_v2:
output_pageable_v2(elffile, args.out_pageable_v2)
if __name__ == "__main__":
main()
| 33.066158
| 79
| 0.604309
| 1,660
| 12,995
| 4.408434
| 0.163855
| 0.042088
| 0.026237
| 0.020087
| 0.419923
| 0.319076
| 0.248975
| 0.221372
| 0.176004
| 0.176004
| 0
| 0.013428
| 0.260716
| 12,995
| 392
| 80
| 33.15051
| 0.748309
| 0.121431
| 0
| 0.233333
| 0
| 0
| 0.106433
| 0.008086
| 0
| 0
| 0.004394
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.051852
| 0
| 0.177778
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1d311ba4f1a92388fbc36107c0bf393d5b97bc
| 1,864
|
py
|
Python
|
CircuitPython_JEplayer_mp3/repeat.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665
|
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
CircuitPython_JEplayer_mp3/repeat.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641
|
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
CircuitPython_JEplayer_mp3/repeat.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734
|
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2020 Jeff Epler for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Make a key (button) repeat when held down
"""
import time
class KeyRepeat:
"""Track the state of a button and, while it is held, output a press every
'rate' seconds"""
def __init__(self, getter, rate=0.5):
self.getter = getter
self.rate_ns = round(rate * 1e9)
self.next = -1
@property
def value(self):
"""True when a button is first pressed, or once every 'rate' seconds
thereafter"""
state = self.getter()
if not state:
self.next = -1
return False
now = time.monotonic_ns()
if state and now > self.next:
self.next = now + self.rate_ns
return True
return False
| 38.833333
| 79
| 0.694742
| 272
| 1,864
| 4.735294
| 0.525735
| 0.068323
| 0.020186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007037
| 0.237661
| 1,864
| 47
| 80
| 39.659574
| 0.899367
| 0.70118
| 0
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1d5be9e9e0382909fb3777ed89becc272c0e93
| 767
|
py
|
Python
|
Kapitel_1/_1_public_private.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | 1
|
2020-12-24T15:42:54.000Z
|
2020-12-24T15:42:54.000Z
|
Kapitel_1/_1_public_private.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
Kapitel_1/_1_public_private.py
|
Geralonx/Classes_Tutorial
|
9499db8159efce1e3c38975b66a9c649631c6727
|
[
"MIT"
] | null | null | null |
# --- Klassendeklaration mit Konstruktor --- #
class PC:
def __init__(self, cpu, gpu, ram):
self.cpu = cpu
self.gpu = gpu
self.__ram = ram
# --- Instanziierung einer Klasse ---#
# --- Ich bevorzuge die Initialisierung mit den Keywords --- #
pc_instanz = PC(cpu='Ryzen 7', gpu='RTX2070Super', ram='GSkill')
# --- Zugriff auf normale _public_ Attribute --- #
print(pc_instanz.cpu)
print(pc_instanz.gpu)
# --- Zugriff auf ein _privates_ Attribut --- #
# Auskommentiert, da es einen AttributeError schmeißt.
# print(pc_instanz.__ram)
# --- Zugriff auf das Instanz-Dictionary, um die Inhalte jener Instanz zu erhalten. --- #
print(pc_instanz.__dict__)
# --- Zugriff auf das eigentlich _private_ Attribut. --- #
print(pc_instanz._PC__ram)
| 29.5
| 89
| 0.684485
| 95
| 767
| 5.242105
| 0.526316
| 0.108434
| 0.140562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007924
| 0.177314
| 767
| 25
| 90
| 30.68
| 0.7813
| 0.588005
| 0
| 0
| 0
| 0
| 0.083893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1d8a76e626c2ee6f2e02dabc04e268863c54e7
| 723
|
py
|
Python
|
algorithm/dynamic_programming/coin_change/solution.py
|
delaanthonio/hackerrank
|
b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27
|
[
"MIT"
] | 1
|
2017-07-02T01:35:39.000Z
|
2017-07-02T01:35:39.000Z
|
algorithm/dynamic_programming/coin_change/solution.py
|
delaanthonio/hackerrank
|
b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27
|
[
"MIT"
] | null | null | null |
algorithm/dynamic_programming/coin_change/solution.py
|
delaanthonio/hackerrank
|
b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27
|
[
"MIT"
] | 1
|
2018-04-03T15:11:56.000Z
|
2018-04-03T15:11:56.000Z
|
#!/usr/bin/env python3
"""
The Coin Change Problem
:author: Dela Anthonio
:hackerrank: https://hackerrank.com/delaanthonio
:problem: https://www.hackerrank.com/challenges/coin-change/problem
"""
from typing import List
def count_ways(amount: int, coins: List[int]) -> int:
"""Return the number of ways we can count to ``amount`` with values ``coins``."""
ways = [1] + [0] * amount
for coin in coins:
for val in range(coin, amount + 1):
ways[val] += ways[val - coin]
return ways[-1]
def main():
m, n = [int(x) for x in input().strip().split()]
coins = sorted({int(x) for x in input().strip().split()})
print(count_ways(m, coins))
if __name__ == '__main__':
main()
| 24.1
| 85
| 0.625173
| 105
| 723
| 4.209524
| 0.495238
| 0.045249
| 0.076923
| 0.036199
| 0.113122
| 0.113122
| 0.113122
| 0.113122
| 0
| 0
| 0
| 0.008696
| 0.204703
| 723
| 29
| 86
| 24.931034
| 0.76
| 0.362379
| 0
| 0
| 0
| 0
| 0.017857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.307692
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1e73e0421ce77dfe2003f2cfbf66fd1ffd338e
| 1,264
|
py
|
Python
|
setup.py
|
TheMagicNacho/artemis-nozzle
|
5c02672feb7b437a4ff0ccc45394de3010bcd5ab
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
TheMagicNacho/artemis-nozzle
|
5c02672feb7b437a4ff0ccc45394de3010bcd5ab
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
TheMagicNacho/artemis-nozzle
|
5c02672feb7b437a4ff0ccc45394de3010bcd5ab
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
from runpy import run_path
from setuptools import setup
# Get the version from the relevant file
d = run_path('skaero/version.py')
__version__ = d['__version__']
setup(
name="scikit-aero",
version=__version__,
description="Aeronautical engineering calculations in Python.",
author="Juan Luis Cano",
author_email="juanlu001@gmail.com",
url="https://github.com/Juanlu001/scikit-aero",
license="BSD",
keywords=[
"aero", "aeronautical", "aerospace",
"engineering", "atmosphere", "gas"
],
requires=["numpy", "scipy"],
packages=[
"skaero",
"skaero.atmosphere", "skaero.gasdynamics",
"skaero.util"
],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics"
],
long_description=open('README.rst').read()
)
| 30.829268
| 70
| 0.625791
| 123
| 1,264
| 6.300813
| 0.650407
| 0.073548
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009269
| 0.231804
| 1,264
| 40
| 71
| 31.6
| 0.788877
| 0.041139
| 0
| 0.083333
| 0
| 0
| 0.54177
| 0.036394
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1e977ff682cc24e27dda8c4298d920050e0d35
| 1,226
|
py
|
Python
|
appendix/AI.by.Search/backtracking.search/3-1.eight.queens.py
|
royqh1979/programming_with_python
|
7e1e8f88381151b803b6ae6ebda9809d9cc6664a
|
[
"MIT"
] | 5
|
2019-03-06T12:28:47.000Z
|
2022-01-06T14:06:02.000Z
|
appendix/AI.by.Search/backtracking.search/3-1.eight.queens.py
|
royqh1979/programming_with_python
|
7e1e8f88381151b803b6ae6ebda9809d9cc6664a
|
[
"MIT"
] | 6
|
2021-02-02T22:40:49.000Z
|
2022-03-12T00:27:54.000Z
|
appendix/AI.by.Search/backtracking.search/3-1.eight.queens.py
|
royqh1979/programming_with_python
|
7e1e8f88381151b803b6ae6ebda9809d9cc6664a
|
[
"MIT"
] | 4
|
2019-03-06T14:29:25.000Z
|
2020-06-02T15:16:40.000Z
|
"""
8皇后问题
使用栈实现回溯法
"""
def print_board(n,count):
print(f"------解.{count}------")
print(" ",end="")
for j in range(n):
print(f"{j:<2}" ,end="")
print()
for i in range(1,n+1):
print(f"{i:<2}",end="")
for j in range(1,n+1):
if queens[i] == j:
print("Q ",end="")
else:
print(" ",end="")
print()
def set_flags(i,j,n):
col_flags[j]=1
diag_flags[i+j-1]=1
diag2_flags[n+i-j]=1
def clear_flags(i,j,n):
col_flags[j]=0
diag_flags[i+j-1]=0
diag2_flags[n+i-j]=0
def can_stay(i,j,n):
if col_flags[j]==1:
return False
if diag_flags[i+j-1]==1:
return False
if diag2_flags[n+i-j]==1:
return False
return True
def try_queen(i,n):
global count
i=1
while True:
queens[i]+=1
if queens[i]>n: # backtracking
i-=1
if i<1: # all possible solutions have been tried, quit searching
break
clear_flags(i,queens[i],n)
elif can_stay(i,queens[i],n):
if i==n:
count += 1
print_board(n, count)
else:
set_flags(i, queens[i], n)
i+=1
queens[i] = 0
def queen(n):
try_queen(1,n)
n=int(input("请输入n:"))
queens = [0]*(n+1)
# 列标志
col_flags=[0]*(n+1)
# 主对角线标志
diag_flags = [0]*(2*n)
# 副对角线标志
diag2_flags = [0] * (2*n)
count = 0
queen(n)
print(f"共有{count}种解法\n")
| 15.518987
| 67
| 0.58646
| 241
| 1,226
| 2.892116
| 0.232365
| 0.028694
| 0.050215
| 0.047346
| 0.261119
| 0.126255
| 0.04878
| 0
| 0
| 0
| 0
| 0.040486
| 0.194127
| 1,226
| 78
| 68
| 15.717949
| 0.66498
| 0.084013
| 0
| 0.155172
| 0
| 0
| 0.052299
| 0.018936
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0
| 0
| 0.172414
| 0.189655
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d1ef3d88231b985a99b51b59e99bc1d40f0567f
| 24,820
|
py
|
Python
|
multimodal_affinities/evaluation/analysis/plots_producer.py
|
amzn/multimodal-affinities
|
23045eb6a9387ce0c9c6f5a15227cf1cc4282626
|
[
"CC-BY-4.0"
] | 6
|
2021-07-06T12:48:18.000Z
|
2021-12-06T01:52:24.000Z
|
multimodal_affinities/evaluation/analysis/plots_producer.py
|
amzn/multimodal-affinities
|
23045eb6a9387ce0c9c6f5a15227cf1cc4282626
|
[
"CC-BY-4.0"
] | null | null | null |
multimodal_affinities/evaluation/analysis/plots_producer.py
|
amzn/multimodal-affinities
|
23045eb6a9387ce0c9c6f5a15227cf1cc4282626
|
[
"CC-BY-4.0"
] | 5
|
2021-07-10T08:09:17.000Z
|
2022-03-24T16:27:15.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: CC-BY-4.0
import os
import cv2
from collections import namedtuple
import imageio
from PIL import Image
from random import randrange
import numpy as np
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
import torch
import matplotlib
matplotlib.use('Agg') # Required for gif animations
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as image
import matplotlib.patches as patches
from multimodal_affinities.visualization.vis_handler import VisHandler
from multimodal_affinities.visualization.image_utils import resize_image
from multimodal_affinities.visualization.colors_util import rgb_hex_to_tuple
class PlotsProducer:
def __init__(self, document, output_path):
# Load background image
self.image_path = document.image_path
self.img = plt.imread(self.image_path)
self.img_opencv = cv2.imread(self.image_path)
dpi = 120
mpl.rcParams['figure.dpi'] = dpi
height = self.img.shape[0]
width = self.img.shape[1]
self.figsize = width / float(dpi), height / float(dpi) # Fig size in inches
self.document = document
self.output_path = output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
def plot_word_boxes_on_image(self):
set_of_words = [[word] for word in self.document.get_words()] # list of singleton word lists
fig, ax = plt.subplots(1, figsize=self.figsize)
monochrome_colors_list = ['#5a5d8f' for _ in self.document.get_words()]
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='',
entity_sets=set_of_words,
colors_list=monochrome_colors_list)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_word_boxes.png'))
plt.close(fig)
def save_phrase_detection_results(self):
set_of_phrases = [[phrase] for phrase in self.document.get_phrases()] # list of singleton phrase lists
fig, ax = plt.subplots(1, figsize=self.figsize)
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='Phrase Detection', entity_sets=set_of_phrases)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_phrase_detection.png'))
plt.close(fig)
def save_clustering_results(self, with_title=True, colors_list=None):
set_of_clusters = [cluster.words for cluster in self.document.get_clusters()] # list of list of words (clusters)
self._save_set_of_clusters(set_of_clusters, with_title, colors_list)
def save_clustering_labels(self, clustering_labels, colors_list=None):
cluster_ids = np.unique(np.array(clustering_labels))
cluster_id_to_cluster_idx = {cluster_id: idx for idx, cluster_id in enumerate(cluster_ids)}
# Converts from list of labels to list of list of words (clusters)
set_of_clusters = [[] for _ in range(len(cluster_ids))]
for word_idx, word in enumerate(self.document.get_words()):
cluster_id = clustering_labels[word_idx]
if cluster_id == -1: # Ignore non-clustered words
continue
cluster_idx = cluster_id_to_cluster_idx[cluster_id]
set_of_clusters[cluster_idx].append(word)
self._save_set_of_clusters(set_of_clusters, colors_list)
def _save_set_of_clusters(self, set_of_clusters, with_title=True, colors_list=None):
"""
:param document:
:param set_of_clusters: list of list of words (clusters)
:return:
"""
output_img = self._draw_entity_bounding_boxes_opencv(bg_img=self.img_opencv,
entity_sets=set_of_clusters,
colors_list=colors_list)
cv2.imwrite(os.path.join(self.output_path, self.document.basename + '_clustering.png'), output_img)
@staticmethod
def _draw_entity_bounding_boxes_opencv(bg_img, entity_sets, colors_list=None):
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
output_img = bg_img.copy()
alpha = 0.8
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
# writing the text onto the image and returning it
rgb_color = rgb_hex_to_tuple(face_color)
cv2.rectangle(output_img, (int(x), int(y)), (int(x + width), int(y + height)),
(rgb_color[2], rgb_color[1], rgb_color[0]), cv2.FILLED)
output_img = cv2.addWeighted(output_img, alpha, bg_img, 1 - alpha, 0)
return output_img
@staticmethod
def _draw_entity_bounding_boxes(fig, ax, bg_img, title, entity_sets, colors_list=None):
ax.set_title(title)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
plt.imshow(bg_img)
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
rect = patches.Rectangle((x, y), width, height,
linewidth=2,
edgecolor=edge_color,
facecolor=face_color,
alpha=0.4)
ax.add_patch(rect)
@staticmethod
def plot_pca_embedding_space_for_clusters(document, output_path,
embedding_property='embedding',
title=''):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or getattr(words[0], embedding_property) is None:
return
if embedding_property == 'unprojected_embedding':
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
plot_title = embedding_property
if plot_title != '':
plot_title += ': ' + title
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=1, alpha=0.8)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
@staticmethod
def _find_k_furthest_words_per_cluster(document, embeddings_2d, k=3):
""" Greedy approximation algorithm for finding k furthest neighbour words per cluster.
k is expected to be relatively small (< 100)
"""
words = document.get_words()
word_to_embedding_2d_idx = {word: idx for idx, word in enumerate(words)}
clusters = document.get_clusters()
solution_per_cluster = {}
ClusterSolution = namedtuple('ClusterSolution', ['word_indices', 'words'])
for cluster in clusters:
# Generate cluster pairwise distances matrix
all_cluster_embeddings_indices = [word_to_embedding_2d_idx[word] for word in cluster.words]
all_cluster_embeddings = np.take(embeddings_2d, all_cluster_embeddings_indices, axis=0)
pairwise_distances = pdist(all_cluster_embeddings, metric='euclidean')
distances_matrix = squareform(pairwise_distances)
# Total distance from selected set so far
distances_accumulator = np.zeros(len(cluster.words))
# Sample first point
random_index = randrange(len(cluster.words))
# Indices of selected points
selected_points = [random_index]
# How many points we need to add
points_to_calc_count = min(k - 1, len(words) - 1)
for _ in range(points_to_calc_count):
last_point_selected = selected_points[-1]
# Update accumulator with distance collected from last point
distances_accumulator += distances_matrix[last_point_selected]
# Eliminate last point selected from distance matrix & accumulator
distances_matrix[:, random_index] = 0
distances_matrix[random_index, :] = 0
furthrest_point_from_set = np.argmax(distances_accumulator, axis=0)
selected_points.append(furthrest_point_from_set)
selected_words = [cluster.words[point] for point in selected_points]
selected_word_indices = [word_to_embedding_2d_idx[word] for word in selected_words]
solution_per_cluster[cluster] = ClusterSolution(word_indices=selected_word_indices, words=selected_words)
return solution_per_cluster
@staticmethod
def _extract_crops_per_cluster_solution(document, solution_per_cluster):
"""
Extracts crops for each selected word in k-furthest neighbours solution
:param document:
:param solution_per_cluster: Solution of k-furthest neighbours
:return:
"""
word_indices_to_crops = {}
for cluster, cluster_solution in solution_per_cluster.items():
for word_index, word in zip(cluster_solution.word_indices, cluster_solution.words):
bbox = word.get_bbox() # left, top, width, height
y_min = int(round(bbox[1] * document.height))
y_max = int(round((bbox[1] + bbox[3]) * document.height))
x_min = int(round(bbox[0] * document.width))
x_max = int(round((bbox[0] + bbox[2]) * document.width))
image_of_crop = document.image[max(0, y_min):min(y_max, document.height),
max(0, x_min):min(x_max, document.width), :]
pil_image = Image.fromarray(image_of_crop[...,::-1]) # BGR to RGB
pil_image = pil_image.convert('RGB')
word_indices_to_crops[word_index] = pil_image
return word_indices_to_crops
@staticmethod
def _space_out_crops(indices_to_crops, words, x_list, y_list, dist_from_pt=0.01, height=0.02):
"""
Calculates the positions and dimensions of crop images on the embedding space plot.
Makes sure crops don't overlay each other.
This method assumes a small number of crops (< 1000) and performs a naive linear comparison for each crop.
:param indices_to_crops: dict of word index (by order in doc) to PIL crop
:param words: List of words
:param x_list: List of corresponding pt x positions
:param y_list: List of corresponding pt y positions
:param dist_from_pt: How far in (x-y) coords the crop should be placed from the plot
:param height: Height of the crop, in figure axes dimensions (note: for normalized pca space: -1 to 1)
:return: indices_to_extents: dict of word index to extens describing position and dimensions of each crop.
Crops are shifted so they don't cover each other,
"""
indices_to_extents = {}
MatplotExtent = namedtuple('matplot_extent', ['left', 'right', 'bottom', 'top'])
is_extent_x_intersect = lambda e1, e2: not (e1.right < e2.left or e1.left > e2.right)
is_extent_y_intersect = lambda e1, e2: not (e1.top > e2.bottom or e1.bottom < e2.top)
is_extent_intersect = lambda e1, e2: is_extent_x_intersect(e1, e2) and is_extent_y_intersect(e1, e2)
min_x, max_x = min(x_list), max(x_list)
min_y, max_y = min(y_list), max(y_list)
height = (max_y - min_y) * height
dist_from_pt = min(max_y - min_y, max_x - min_x) * dist_from_pt
for point_index, crop in indices_to_crops.items():
word_aspect_ratio = words[point_index].geometry.width / words[point_index].geometry.height
axis_ratio = (max_x-min_x) / (max_y-min_y) / 2
width = height * word_aspect_ratio * axis_ratio
left, right = x_list[point_index] + dist_from_pt, x_list[point_index] + dist_from_pt + width
bottom, top = y_list[point_index] + dist_from_pt + height, y_list[point_index] + dist_from_pt
overlap = True
while overlap:
overlap = False
extent = MatplotExtent(left, right, bottom, top)
for other_crop_extent in indices_to_extents.values():
other_left, other_right, other_bottom, other_top = other_crop_extent
spaceout_margin = dist_from_pt / 2
if is_extent_intersect(extent, other_crop_extent):
overlap = True
# shift below
if other_bottom <= top <= other_top:
top = other_bottom + spaceout_margin
bottom = top + height
else: # shift above
bottom = other_top - spaceout_margin
top = bottom - height
continue
indices_to_extents[point_index] = extent
return indices_to_extents
def plot_clusters_and_embedding_space_with_crops(self, document, output_path, crops_per_cluster=3,
embedding_properties=['embedding', 'unprojected_embedding'],
unprojected_caption=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or \
all([getattr(words[0], embedding_property) is None for embedding_property in embedding_properties]):
return
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
# Initially empty, the first embedding property we process will set those for all figures
selected_word_crops_per_cluster = None
indices_to_crops = None
for embedding_property in embedding_properties:
if embedding_property == 'unprojected_embedding': # Can't handle tuples, concat them
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
if crops_per_cluster > 0:
if selected_word_crops_per_cluster is None and indices_to_crops is None: # Calculate per first attribute
selected_word_crops_per_cluster = PlotsProducer._find_k_furthest_words_per_cluster(document, embeddings_2d, k=crops_per_cluster)
indices_to_crops = PlotsProducer._extract_crops_per_cluster_solution(document, selected_word_crops_per_cluster)
indices_to_extents = PlotsProducer._space_out_crops(indices_to_crops, words,
x_list, y_list, dist_from_pt=0.02, height=0.04)
# Plot crop images
for point_index, crop in indices_to_crops.items():
extent = indices_to_extents[point_index]
rect = patches.Rectangle((extent.left, extent.top), extent.right-extent.left, extent.bottom-extent.top,
linewidth=0.5,
edgecolor="black",
facecolor="none",
zorder=5)
ax.imshow(crop, aspect='auto', alpha=0.65, extent=extent, zorder=4)
ax.add_patch(rect)
# Plot points
if embedding_property == 'unprojected_embedding':
plot_title = 'Initial unprojected embeddings, pre training (PCA)'
else:
if unprojected_caption is None:
plot_title = 'Projected embeddings, post training (PCA)'
else:
plot_title = unprojected_caption
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
# Finally plot clusters on original image
self.save_clustering_results(with_title=False, colors_list=colors_palette)
return colors_palette
@staticmethod
def animate_pca_embedding_space_for_clusters(document, output_path, embeddings_history, colors_palette=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or embeddings_history is None or len(embeddings_history) == 0:
return
if colors_palette is None:
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
scatter_data = []
for state_idx, embeddings_state in enumerate(embeddings_history):
epoch = state_idx + 1
normalized_embeddings_dict = embeddings_state['normalized']
unnormalized_embeddings_dict = embeddings_state['unnormalized']
if len(normalized_embeddings_dict) > 0:
normalized_embeddings = [normalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = normalized_embeddings
elif len(unnormalized_embeddings_dict) > 0:
unnormalized_embeddings = [unnormalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = unnormalized_embeddings
else:
return
embeddings_array = np.array(chosen_embedding).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
push_pull_ratio = embeddings_state['push_pull_ratio']
scatter_data.append((epoch, x_list, y_list, push_pull_ratio))
min_x = min(min(scatter_data, key=lambda entry: min(entry[1]))[1])
max_x = max(max(scatter_data, key=lambda entry: max(entry[1]))[1])
min_y = min(min(scatter_data, key=lambda entry: min(entry[2]))[2])
max_y = max(max(scatter_data, key=lambda entry: max(entry[2]))[2])
padding_factor = 0.1
min_x -= (max_x - min_x) * padding_factor
max_x += (max_x - min_x) * padding_factor
min_y -= (max_y - min_y) * padding_factor
max_y += (max_y - min_y) * padding_factor
frames = []
for epoch, x_list, y_list, push_pull_ratio in scatter_data:
fig, ax = plt.subplots(1)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
plot_title = 'Projected embeddings at epoch #' + str(epoch) + ' (PCA)'
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Used to return the plot as an image rray
fig.tight_layout()
fig.canvas.draw() # draw the canvas, cache the renderer
output_frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
output_frame = output_frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(output_frame)
imageio.mimsave(os.path.join(output_path, document.basename + '_embeddings_history.gif'), frames, fps=2)
| 50.040323
| 148
| 0.621112
| 3,028
| 24,820
| 4.824967
| 0.136724
| 0.017796
| 0.00924
| 0.008624
| 0.48768
| 0.439288
| 0.418823
| 0.380972
| 0.359754
| 0.335387
| 0
| 0.010493
| 0.293473
| 24,820
| 495
| 149
| 50.141414
| 0.822651
| 0.11801
| 0
| 0.406077
| 0
| 0
| 0.027857
| 0.005953
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038674
| false
| 0
| 0.049724
| 0
| 0.116022
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d208ff94339d61c9f91237707d44d87ad7cd192
| 6,294
|
py
|
Python
|
openstates/openstates-master/openstates/de/legislators.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/de/legislators.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/de/legislators.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
import re
import lxml.html
from openstates.utils import LXMLMixin
from billy.scrape.legislators import LegislatorScraper, Legislator
class DELegislatorScraper(LegislatorScraper,LXMLMixin):
jurisdiction = 'de'
def scrape(self, chamber, term):
url = {
'upper': 'http://legis.delaware.gov/legislature.nsf/sen?openview',
'lower': 'http://legis.delaware.gov/Legislature.nsf/Reps?openview',
}[chamber]
doc = self.lxmlize(url)
if chamber == "upper":
#for the senate, it's the same table
#but the html is hard-coded in js.
table_js = doc.xpath('.//script')[-1].text_content()
table = None
for line in table_js.split("\n"):
if line.strip().startswith("var") and "sen=" in line:
table = line.replace("var","")
table = table.replace('sen="<','<')
table = table.replace('>";','>')
break
assert table is not None, "Senate table could not be found"
table = lxml.html.fromstring(table)
table.make_links_absolute(url)
trs = table.xpath('//tr')
else:
#same table for the house, but kindly in actual html
trs = doc.xpath('//tr')
base_url = "http://legis.delaware.gov"
for tr in trs:
name_and_url = tr.xpath('.//a')[0]
bio_url = name_and_url.attrib["href"]
bio_url = bio_url.replace("JavaScript:window.top.location.href=","")
bio_url = bio_url.replace('"','')
name = name_and_url.text_content()
if name.strip() == "." or name.strip() == "":
continue
if name.strip().lower().startswith("vacant"):
continue
re_spaces=re.compile(r'\s{1,5}')
name = ' '.join(re_spaces.split(name))
district = tr.xpath('.//td')[2].text_content()
district = district.replace("District:","").strip()
leg = self.scrape_bio(term, chamber, district, name, bio_url)
leg.add_source(bio_url, page="legislator detail page")
leg.add_source(url, page="legislator list page")
self.save_legislator(leg)
def scrape_bio(self, term, chamber, district, name, url):
# this opens the committee section without having to do another request
url += '&TableRow=1.5.5'
frame_doc = self.lxmlize(url)
actual_url = frame_doc.xpath("//frame[@name='right']/@src")[0]
doc = self.lxmlize(actual_url)
# party is in one of these
party = doc.xpath('//div[@id="page_header"]')[0].text.strip()[-3:]
if '(D)' in party:
party = 'Democratic'
elif '(R)' in party:
party = 'Republican'
else:
raise AssertionError("No party found for {name}".format(name=name))
leg = Legislator(term, chamber, district, name, party=party)
photo_url = doc.xpath('//img[contains(@src, "jpg")]/@src')
if photo_url:
leg['photo_url'] = photo_url[0]
contact_info = self.scrape_contact_info(doc)
leg.update(contact_info)
return leg
def scrape_contact_info(self, doc):
# Email
email = doc.xpath(".//a[contains(@href,'mailto')]")
email = email[0].text_content().strip()
leg_email = None
dist_email = None
try:
emails = email.split(";")
except AttributeError:
pass
else:
for e in emails:
e = e.strip()
if e:
if "state.de.us" in e:
leg_email = e
else:
dist_email = e
# Offices
leg_office = dict(name="Capitol Office", type="capitol",
phone=None, fax=None, email=leg_email, address=None)
dist_office = dict(name="Outside Office", type="capitol",
phone=None,fax=None, email=dist_email, address=None)
#this is enormously painful, DE.
office_list = doc.xpath("//tr")
for office in office_list:
title_td = 0
#in some trs the photo is the first td
if len(office.xpath("./td/img")) > 0:
title_td = 1
try:
title_text = office.xpath("./td")[title_td].text_content().lower()
content = office.xpath("./td")[title_td+1].text_content()
except IndexError:
continue
leg_office = self.add_contact("legislative",
title_text,content,leg_office)
dist_office = self.add_contact("outside",
title_text,content,dist_office)
offices = [o for o in [leg_office,dist_office] if o["address"]]
assert len(offices) > 0, "No offices with addresses found "\
"make sure we're not losing any data."
return {"offices":offices}
def add_contact(self,office_type,
title_text,content,office):
#office type is the name of the office
#either "legislative" or "outside"
if "{} office".format(office_type) in title_text:
office["address"] = content.strip()
if "{} phone".format(office_type) in title_text:
phones = content.lower().split("\n")
if len(phones) == 1:
phone = self.clean_phone(phones[0])
if phone:
office["phone"] = phone
else:
for line in phones:
if "phone" in line:
phone = self.clean_phone(line)
if phone:
office["phone"] = phone
elif "fax" in line:
phone = self.clean_phone(line)
if phone:
office["fax"] = phone
return office
def clean_phone(self,phone):
if not phone.strip():
return
if not re.search("\d",phone):
return
if not ":" in phone:
return phone
return phone.split(":")[1].strip()
| 35.359551
| 82
| 0.522402
| 719
| 6,294
| 4.458971
| 0.258693
| 0.03088
| 0.015908
| 0.018715
| 0.125078
| 0.102308
| 0.049906
| 0.049906
| 0.026201
| 0.026201
| 0
| 0.005167
| 0.354306
| 6,294
| 177
| 83
| 35.559322
| 0.783711
| 0.058151
| 0
| 0.142857
| 0
| 0
| 0.127113
| 0.019777
| 0
| 0
| 0
| 0
| 0.022556
| 1
| 0.037594
| false
| 0.007519
| 0.030075
| 0
| 0.135338
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d21c5c96591c41db957d015a344d7b68da97b7a
| 4,396
|
py
|
Python
|
dabing/DABING-MIB.py
|
SvajkaJ/dabing
|
8ddd8c1056b182b52f76028e23cd2ba8418a0dec
|
[
"MIT"
] | null | null | null |
dabing/DABING-MIB.py
|
SvajkaJ/dabing
|
8ddd8c1056b182b52f76028e23cd2ba8418a0dec
|
[
"MIT"
] | null | null | null |
dabing/DABING-MIB.py
|
SvajkaJ/dabing
|
8ddd8c1056b182b52f76028e23cd2ba8418a0dec
|
[
"MIT"
] | null | null | null |
#
# PySNMP MIB module DABING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file://..\DABING-MIB.mib
# Produced by pysmi-0.3.4 at Tue Mar 22 12:53:47 2022
# On host ? platform ? version ? by user ?
# Using Python version 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 22:45:29) [MSC v.1916 32 bit (Intel)]
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity, IpAddress, ObjectIdentity, iso, Counter32, Unsigned32, Bits, NotificationType, TimeTicks, Counter64, enterprises, MibIdentifier, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity", "IpAddress", "ObjectIdentity", "iso", "Counter32", "Unsigned32", "Bits", "NotificationType", "TimeTicks", "Counter64", "enterprises", "MibIdentifier", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
dabing = ModuleIdentity((1, 3, 6, 1, 4, 1, 55532))
dabing.setRevisions(('2022-03-17 00:00',))
if mibBuilder.loadTexts: dabing.setLastUpdated('202203170000Z')
if mibBuilder.loadTexts: dabing.setOrganization('www.stuba.sk')
Parameters = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 1))
Agent = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 2))
Manager = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 3))
Notifications = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4))
NotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4, 1))
NotificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4, 2))
channel = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 1), OctetString().clone('12C')).setMaxAccess("readonly")
if mibBuilder.loadTexts: channel.setStatus('current')
interval = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 2), Integer32().clone(960)).setMaxAccess("readonly")
if mibBuilder.loadTexts: interval.setStatus('current')
trapEnabled = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapEnabled.setStatus('current')
agentIdentifier = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIdentifier.setStatus('current')
agentLabel = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 2), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentLabel.setStatus('current')
agentStatus = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentStatus.setStatus('current')
managerHostname = MibScalar((1, 3, 6, 1, 4, 1, 55532, 3, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: managerHostname.setStatus('current')
managerPort = MibScalar((1, 3, 6, 1, 4, 1, 55532, 3, 2), Integer32().clone(162)).setMaxAccess("readonly")
if mibBuilder.loadTexts: managerPort.setStatus('current')
genericPayload = MibScalar((1, 3, 6, 1, 4, 1, 55532, 4, 2, 1), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: genericPayload.setStatus('current')
malfunctionTrap = NotificationType((1, 3, 6, 1, 4, 1, 55532, 4, 1, 1)).setObjects(("DABING-MIB", "genericPayload"))
if mibBuilder.loadTexts: malfunctionTrap.setStatus('current')
testTrap = NotificationType((1, 3, 6, 1, 4, 1, 55532, 4, 1, 2)).setObjects(("DABING-MIB", "genericPayload"))
if mibBuilder.loadTexts: testTrap.setStatus('current')
mibBuilder.exportSymbols("DABING-MIB", Notifications=Notifications, channel=channel, PYSNMP_MODULE_ID=dabing, testTrap=testTrap, malfunctionTrap=malfunctionTrap, Parameters=Parameters, agentLabel=agentLabel, managerPort=managerPort, trapEnabled=trapEnabled, managerHostname=managerHostname, Manager=Manager, NotificationPrefix=NotificationPrefix, Agent=Agent, genericPayload=genericPayload, NotificationObjects=NotificationObjects, agentIdentifier=agentIdentifier, dabing=dabing, agentStatus=agentStatus, interval=interval)
| 93.531915
| 523
| 0.75364
| 511
| 4,396
| 6.479452
| 0.258317
| 0.012685
| 0.016309
| 0.021746
| 0.430384
| 0.295681
| 0.295681
| 0.228934
| 0.203564
| 0.125642
| 0
| 0.0856
| 0.08849
| 4,396
| 46
| 524
| 95.565217
| 0.740704
| 0.065287
| 0
| 0
| 0
| 0
| 0.174348
| 0.010729
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d228bf6a4bad705b90b99a4ee75f695228944a7
| 956
|
py
|
Python
|
parameter_setup/run_setup_extra_vis.py
|
kharris/allen-voxel-network
|
3c39cf7e7400c09f78ebe9d1d9f8a6d7b9ef6d7b
|
[
"BSD-2-Clause"
] | 7
|
2018-04-02T01:30:31.000Z
|
2020-02-23T03:31:57.000Z
|
parameter_setup/run_setup_extra_vis.py
|
yijizhao/allen-voxel-network
|
3c39cf7e7400c09f78ebe9d1d9f8a6d7b9ef6d7b
|
[
"BSD-2-Clause"
] | null | null | null |
parameter_setup/run_setup_extra_vis.py
|
yijizhao/allen-voxel-network
|
3c39cf7e7400c09f78ebe9d1d9f8a6d7b9ef6d7b
|
[
"BSD-2-Clause"
] | 2
|
2016-11-10T03:27:29.000Z
|
2018-08-06T17:32:05.000Z
|
import os
import numpy as np
save_stem='extra_vis_friday_harbor'
data_dir='../../data/sdk_new_100'
resolution=100
cre=False
source_acronyms=['VISal','VISam','VISl','VISp','VISpl','VISpm',
'VISli','VISpor','VISrl','VISa']
lambda_list = np.logspace(3,12,10)
scale_lambda=True
min_vox=0
# save_file_name='visual_output.hdf5'
#source_coverage=0.90
source_coverage=0.95
#source_shell = 1
source_shell=None
save_dir=os.path.join('../../data/connectivities',save_stem)
experiments_fn=None
target_acronyms=source_acronyms
solver=os.path.abspath('../smoothness_c/solve')
cmdfile=os.path.join(save_dir,'model_fitting_cmds')
selected_fit_cmds=os.path.join(save_dir,'model_fitting_after_selection_cmds')
save_mtx=True
cross_val_matrices=True
cross_val=5
fit_gaussian=False
select_one_lambda=False
if select_one_lambda:
lambda_fn='lambda_opt'
else:
lambda_fn='lambda_ipsi_contra_opt'
laplacian='free'
shuffle_seed=666
max_injection_volume=0.7
| 26.555556
| 77
| 0.789749
| 154
| 956
| 4.564935
| 0.61039
| 0.034139
| 0.042674
| 0.039829
| 0.082504
| 0.082504
| 0.082504
| 0
| 0
| 0
| 0
| 0.029345
| 0.073222
| 956
| 35
| 78
| 27.314286
| 0.764108
| 0.074268
| 0
| 0
| 0
| 0
| 0.257662
| 0.166856
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d246013ebe48dbfeb0d5a33d4837a599aed75ec
| 396
|
py
|
Python
|
examples/runall.py
|
GNiklas/MOSSEPy
|
fbae1294beefe48f321bc5dbbc70e6c72d3ffe1f
|
[
"MIT"
] | null | null | null |
examples/runall.py
|
GNiklas/MOSSEPy
|
fbae1294beefe48f321bc5dbbc70e6c72d3ffe1f
|
[
"MIT"
] | null | null | null |
examples/runall.py
|
GNiklas/MOSSEPy
|
fbae1294beefe48f321bc5dbbc70e6c72d3ffe1f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 09:42:39 2020
@author: niklas
"""
from mossepy.mosse_tracker import MOSSE
# choose position of object in first frame
# that should be done by mouse click
objPos = [256, 256]
# choose tracker type
tracker = MOSSE()
# initialize object position in first frame
tracker.setObjPos(objPos)
# start tracking
tracker.trackImg()
| 19.8
| 43
| 0.729798
| 59
| 396
| 4.881356
| 0.762712
| 0.048611
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060241
| 0.161616
| 396
| 20
| 44
| 19.8
| 0.807229
| 0.628788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d25df408d94ab31e94a89b3213ae144d0477893
| 5,761
|
py
|
Python
|
manim/mobject/svg/style_utils.py
|
5Points7Edges/manim
|
1c2a5099133dbf0abdd5517b2ac93cfc8275b842
|
[
"MIT"
] | null | null | null |
manim/mobject/svg/style_utils.py
|
5Points7Edges/manim
|
1c2a5099133dbf0abdd5517b2ac93cfc8275b842
|
[
"MIT"
] | null | null | null |
manim/mobject/svg/style_utils.py
|
5Points7Edges/manim
|
1c2a5099133dbf0abdd5517b2ac93cfc8275b842
|
[
"MIT"
] | null | null | null |
"""Utility functions for parsing SVG styles."""
__all__ = ["cascade_element_style", "parse_style", "parse_color_string"]
from xml.dom.minidom import Element as MinidomElement
from colour import web2hex
from ...utils.color import rgb_to_hex
from typing import Dict, List
CASCADING_STYLING_ATTRIBUTES: List[str] = [
"fill",
"stroke",
"fill-opacity",
"stroke-opacity",
]
# The default styling specifications for SVG images,
# according to https://www.w3.org/TR/SVG/painting.html
# (ctrl-F for "initial")
SVG_DEFAULT_ATTRIBUTES: Dict[str, str] = {
"fill": "black",
"fill-opacity": "1",
"stroke": "none",
"stroke-opacity": "1",
}
def cascade_element_style(
element: MinidomElement, inherited: Dict[str, str]
) -> Dict[str, str]:
"""Collect the element's style attributes based upon both its inheritance and its own attributes.
SVG uses cascading element styles. A closer ancestor's style takes precedence over a more distant ancestor's
style. In order to correctly calculate the styles, the attributes are passed down through the inheritance tree,
updating where necessary.
Note that this method only copies the values and does not parse them. See :meth:`parse_color_string` for converting
from SVG attributes to manim keyword arguments.
Parameters
----------
element : :class:`MinidomElement`
Element of the SVG parse tree
inherited : :class:`dict`
Dictionary of SVG attributes inherited from the parent element.
Returns
-------
:class:`dict`
Dictionary mapping svg attributes to values with `element`'s values overriding inherited values.
"""
style = inherited.copy()
# cascade the regular elements.
for attr in CASCADING_STYLING_ATTRIBUTES:
entry = element.getAttribute(attr)
if entry:
style[attr] = entry
# the style attribute should be handled separately in order to
# break it up nicely. furthermore, style takes priority over other
# attributes in the same element.
style_specs = element.getAttribute("style")
if style_specs:
for style_spec in style_specs.split(";"):
try:
key, value = style_spec.split(":")
except ValueError as e:
if not style_spec.strip():
# there was just a stray semicolon at the end, producing an emptystring
pass
else:
raise e
else:
style[key.strip()] = value.strip()
return style
def parse_color_string(color_spec: str) -> str:
"""Handle the SVG-specific color strings and convert them to HTML #rrggbb format.
Parameters
----------
color_spec : :class:`str`
String in any web-compatible format
Returns
-------
:class:`str`
Hexadecimal color string in the format `#rrggbb`
"""
if color_spec[0:3] == "rgb":
# these are only in integer form, but the Colour module wants them in floats.
splits = color_spec[4:-1].split(",")
if splits[0][-1] == "%":
# if the last character of the first number is a percentage,
# then interpret the number as a percentage
parsed_rgbs = [float(i[:-1]) / 100.0 for i in splits]
else:
parsed_rgbs = [int(i) / 255.0 for i in splits]
hex_color = rgb_to_hex(parsed_rgbs)
elif color_spec[0] == "#":
# its OK, parse as hex color standard.
hex_color = color_spec
else:
# attempt to convert color names like "red" to hex color
hex_color = web2hex(color_spec, force_long=True)
return hex_color
def fill_default_values(svg_style: Dict) -> None:
"""
Fill in the default values for properties of SVG elements,
if they are not currently set in the style dictionary.
Parameters
----------
svg_style : :class:`dict`
Style dictionary with SVG property names. Some may be missing.
Returns
-------
:class:`dict`
Style attributes; none are missing.
"""
for key in SVG_DEFAULT_ATTRIBUTES:
if key not in svg_style:
svg_style[key] = SVG_DEFAULT_ATTRIBUTES[key]
def parse_style(svg_style: Dict[str, str]) -> Dict:
"""Convert a dictionary of SVG attributes to Manim VMobject keyword arguments.
Parameters
----------
svg_style : :class:`dict`
Style attributes as a string-to-string dictionary. Keys are valid SVG element attributes (fill, stroke, etc)
Returns
-------
:class:`dict`
Style attributes, but in manim kwargs form, e.g., keys are fill_color, stroke_color
"""
manim_style = {}
fill_default_values(svg_style)
if "fill-opacity" in svg_style:
manim_style["fill_opacity"] = float(svg_style["fill-opacity"])
if "stroke-opacity" in svg_style:
manim_style["stroke_opacity"] = float(svg_style["stroke-opacity"])
# nones need to be handled specially
if "fill" in svg_style:
if svg_style["fill"] == "none":
manim_style["fill_opacity"] = 0
else:
manim_style["fill_color"] = parse_color_string(svg_style["fill"])
if "stroke" in svg_style:
if svg_style["stroke"] == "none":
# In order to not break animations.creation.Write,
# we interpret no stroke as stroke-width of zero and
# color the same as the fill color, if it exists.
manim_style["stroke_width"] = 0
if "fill_color" in manim_style:
manim_style["stroke_color"] = manim_style["fill_color"]
else:
manim_style["stroke_color"] = parse_color_string(svg_style["stroke"])
return manim_style
| 31.140541
| 119
| 0.635306
| 745
| 5,761
| 4.781208
| 0.303356
| 0.038181
| 0.022459
| 0.020213
| 0.112296
| 0.06064
| 0
| 0
| 0
| 0
| 0
| 0.00543
| 0.264711
| 5,761
| 184
| 120
| 31.309783
| 0.835458
| 0.453741
| 0
| 0.08
| 0
| 0
| 0.116438
| 0.007192
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0.013333
| 0.053333
| 0
| 0.146667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d2760eb1387adc84de54a02742f29d1beeb4ef2
| 1,361
|
py
|
Python
|
iotrigger.py
|
mm011106/iotrigger
|
643ced0440a8c4fb95ade56399f813c88ac8ddd6
|
[
"Apache-2.0"
] | null | null | null |
iotrigger.py
|
mm011106/iotrigger
|
643ced0440a8c4fb95ade56399f813c88ac8ddd6
|
[
"Apache-2.0"
] | 1
|
2017-01-08T14:22:32.000Z
|
2019-01-08T23:51:53.000Z
|
iotrigger.py
|
mm011106/iotrigger
|
643ced0440a8c4fb95ade56399f813c88ac8ddd6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#coding:utf-8
import os
import RPi.GPIO as GPIO #
import json
from time import sleep #
from twython import Twython
f=open("tw_config.json",'r')
config=json.load(f)
f.close()
CONSUMER_KEY =config['consumer_key']
CONSUMER_SECRET =config['consumer_secret']
ACCESS_TOKEN =config['access_token']
ACCESS_SECRET =config['access_secret']
dist=config['dist']
def on_positive_edge(channel):
#time stamp
timestamp = 'date +%F_%H:%M:%S'
current_time=os.popen(timestamp).readline().strip()
# get CPU temperature
cmd = '/opt/vc/bin/vcgencmd measure_temp'
line = os.popen(cmd).readline().strip()
temp = line.split('=')[1].split("'")[0]
direct_message='CPU:'+temp+'deg @'+current_time+' : by Python script'
global ledstate
if channel == trigger_input:
ledstate = not ledstate
GPIO.output(25, ledstate)
api.send_direct_message(text=direct_message ,screen_name=dist)
api = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)
trigger_input=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(25, GPIO.OUT)
GPIO.setup(trigger_input, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(trigger_input, GPIO.RISING, callback=on_positive_edge, bouncetime=1000)
ledstate = GPIO.LOW
try:
while True:
sleep(0.01)
except KeyboardInterrupt: #
pass
GPIO.cleanup() #
| 21.951613
| 93
| 0.714916
| 196
| 1,361
| 4.785714
| 0.520408
| 0.051173
| 0.040512
| 0.053305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013817
| 0.149155
| 1,361
| 61
| 94
| 22.311475
| 0.7962
| 0.045555
| 0
| 0
| 0
| 0
| 0.117738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0.026316
| 0.131579
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d2a93367402f8cedeb2edebdd44e28110111fbf
| 4,209
|
py
|
Python
|
save_tweets.py
|
iglesiasmanu/data_analysis
|
61127c91ad0eb11ecdc7258e186e430e9dddb0b6
|
[
"BSD-3-Clause"
] | null | null | null |
save_tweets.py
|
iglesiasmanu/data_analysis
|
61127c91ad0eb11ecdc7258e186e430e9dddb0b6
|
[
"BSD-3-Clause"
] | null | null | null |
save_tweets.py
|
iglesiasmanu/data_analysis
|
61127c91ad0eb11ecdc7258e186e430e9dddb0b6
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from os import path
from tweepy import OAuthHandler, Stream
from tweepy.streaming import StreamListener
from sqlalchemy.orm.exc import NoResultFound
from database import session, Tweet, Hashtag, User
consumer_key = "0qFf4T2xPWVIycLmAwk3rDQ55"
consumer_secret = "LcHpujASn4fIIrQ8sikbCTQ3oyU6T6opchFVWBBqwICahzSE64"
access_token = "4271002872-XLo7TNnE3qvYevqLmT1RBuiJ5CJ3o0DCr3WReAT"
acces_token_secret = "ulZ3dA25zuC6BGJgaFowCSTIm6gKVtOa4x9y7tO0IUDIx"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, acces_token_secret)
def save_tweets():
directory = _get_dir_absolute_path()
filepath = path.join(directory, "tweets.json")
listener = DatabaseListener(number_tweets_to_save = 1000, filepath=filepath)
stream = Stream(auth, listener)
languages = ("en",)
try:
stream.sample(languages = languages)
except KeyboardInterrupt:
listener.file.close()
class DatabaseListener(StreamListener):
def __init__(self, number_tweets_to_save, filepath = None):
self._final_count = number_tweets_to_save
self._current_count = 0
if filepath is None:
filepath = "tweets.txt"
self.file = open(filepath,"w")
#Slightly dangerous due to circular references>>
def __del__(self):
self.file.close()
def on_data(self, raw_data):
data = json.loads(raw_data)
json.dump(raw_data, self.file)
self.file.write("\n")
if "in_reply_to_status_id" in data:
return self.on_status(data)
def on_status(self, data):
#this method is define in this file
save_to_database(data)
self._current_count += 1
print("status count: {}".format(self._current_count))
if self._current_count >= self._final_count:
return False
def create_user_helper(user_data):
#alias to shorten calls
u = user_data
user = user(uid = u["id_str"],
name = u["name"],
screen_name = u["screen_name"],
created_at = u["created_at"],
description = u.get("description"),
followers_count = u["followers_count"],
statuses_count = u["statuses_count"],
favourites_count = u["favourites_count"],
listed_count = u["listed_count"],
geo_enabled = u["geo_enabled"],
lang = u.get("lang"))
return user
def create_tweet_helper(tweet_data, user):
#alias for shorten calls
t = tweet_data
retweet = True if t["text"][:3] == "RT " else False
coordinates = json.dumps(t["coordinates"])
tweet = Tweet(tid=t["id_str"],
tweet=t["text"],
user=user,
coordinates=coordinates,
created_at = t["created_at"],
favorite_count = t["favorite_count"],
in_reply_to_screen_name = t["in_reply_to_screen_name"],
in_reply_to_status_id = t["in_reply_to_status_id"],
in_reply_to_user_id = t["in_reply_to_user_id"],
lang = t.get("lang"),
quoted_status_id = t.get("quoted_status_id"),
retweet_count = t["retweet_count"],
source = t["source"],
is_retweet = retweet)
return tweet
def save_to_database(data):
try:
user = session.query(User).filter_by(id=str(data["user"]["id"])).one()
except NoResultFound:
user = create_user_helper(data["user"])
session.add(user)
hashtag_results = []
hashtags = data["entities"]["hashtags"]
for hashtag in hashtags:
hashtag = hashtag["text"].lower()
try:
hashtag_obj=session.query(Hashtag).filer_by(text = hashtag).one()
except NoResutlFound:
user = create_
hashtag_obj = Hashtag(text = hashtag)
session.add(hashtag_obj)
hashtag_results.append(hashtag_obj)
tweet = create_tweet_helper(data, user)
for hashtag in hashtag_results:
tweet.hashtags.append(hashtag)
session.add(tweet)
session.commit()
| 32.882813
| 80
| 0.623188
| 482
| 4,209
| 5.165975
| 0.290456
| 0.019679
| 0.025301
| 0.021687
| 0.049398
| 0.015261
| 0
| 0
| 0
| 0
| 0
| 0.015077
| 0.275125
| 4,209
| 127
| 81
| 33.141732
| 0.801049
| 0.029936
| 0
| 0.030612
| 0
| 0
| 0.130211
| 0.057626
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.061224
| 0
| 0.193878
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d2b1bb602059e1df9b567f6022d9d62a73d9127
| 907
|
py
|
Python
|
app/views/main.py
|
chrisjws-harness/flaskSaaS
|
f42558c523de23f03a098044df164ead3539a4dd
|
[
"MIT"
] | null | null | null |
app/views/main.py
|
chrisjws-harness/flaskSaaS
|
f42558c523de23f03a098044df164ead3539a4dd
|
[
"MIT"
] | null | null | null |
app/views/main.py
|
chrisjws-harness/flaskSaaS
|
f42558c523de23f03a098044df164ead3539a4dd
|
[
"MIT"
] | null | null | null |
from flask import render_template, jsonify
from app import app
import random
@app.route('/')
@app.route('/index')
def index():
# Feature flags init goes here!
#
# noinspection PyDictCreation
flags = {
"welcome_text": "welcome to my python FF tutorial!"
}
# Flag goes here!
#
flags["alternate_homescreen"] = False
return render_template(
'index.html',
**flags,
title='Home'
)
@app.route('/map')
def map():
return render_template('map.html', title='Map')
@app.route('/map/refresh', methods=['POST'])
def map_refresh():
points = [(random.uniform(48.8434100, 48.8634100),
random.uniform(2.3388000, 2.3588000))
for _ in range(random.randint(2, 9))]
return jsonify({'points': points})
@app.route('/contact')
def contact():
return render_template('contact.html', title='Contact')
| 19.717391
| 59
| 0.61301
| 107
| 907
| 5.121495
| 0.476636
| 0.072993
| 0.109489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051948
| 0.235943
| 907
| 45
| 60
| 20.155556
| 0.738817
| 0.080485
| 0
| 0
| 0
| 0
| 0.181159
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.111111
| 0.074074
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d2e7fe7c422e728c2698140a25e0895a9bb3030
| 254
|
py
|
Python
|
base_sample/numpy_mat.py
|
keepangry/ai_algorithm
|
21d8024296a2f2d2797448ed34eb383359259684
|
[
"Apache-2.0"
] | 2
|
2018-08-29T11:09:36.000Z
|
2018-10-22T11:46:36.000Z
|
base_sample/numpy_mat.py
|
keepangry/ai_algorithm
|
21d8024296a2f2d2797448ed34eb383359259684
|
[
"Apache-2.0"
] | null | null | null |
base_sample/numpy_mat.py
|
keepangry/ai_algorithm
|
21d8024296a2f2d2797448ed34eb383359259684
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
'''
@author: yangsen
@license:
@contact:
@software:
@file: numpy_mat.py
@time: 18-8-25 下午9:56
@desc:
'''
import numpy as np
a = np.arange(9).reshape(3,3)
# 行
a[1]
a[[1,2]]
a[np.array([1,2])]
# 列
a[:,1]
a[:,[1,2]]
a[:,np.array([1,2])]
| 11.043478
| 29
| 0.574803
| 52
| 254
| 2.788462
| 0.596154
| 0.055172
| 0.041379
| 0.055172
| 0.206897
| 0.206897
| 0.206897
| 0.206897
| 0.206897
| 0.206897
| 0
| 0.100457
| 0.137795
| 254
| 23
| 30
| 11.043478
| 0.561644
| 0.46063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d2ea06d1190699ab0cefe6b4179c68d747feca4
| 17,002
|
py
|
Python
|
ai_traineree/agents/rainbow.py
|
laszukdawid/ai-traineree
|
af32940eba8e11012de87b60d78f10f5a3b96c79
|
[
"Apache-2.0"
] | 22
|
2020-08-21T03:03:05.000Z
|
2022-02-22T10:15:36.000Z
|
ai_traineree/agents/rainbow.py
|
laszukdawid/ai-traineree
|
af32940eba8e11012de87b60d78f10f5a3b96c79
|
[
"Apache-2.0"
] | 23
|
2021-02-13T20:35:45.000Z
|
2022-02-06T20:15:37.000Z
|
ai_traineree/agents/rainbow.py
|
laszukdawid/ai-traineree
|
af32940eba8e11012de87b60d78f10f5a3b96c79
|
[
"Apache-2.0"
] | 6
|
2020-10-11T08:36:09.000Z
|
2021-11-20T18:31:03.000Z
|
import copy
from typing import Callable, Dict, List, Optional
import torch
import torch.nn as nn
import torch.optim as optim
from ai_traineree import DEVICE
from ai_traineree.agents import AgentBase
from ai_traineree.agents.agent_utils import soft_update
from ai_traineree.buffers import NStepBuffer, PERBuffer
from ai_traineree.buffers.buffer_factory import BufferFactory
from ai_traineree.loggers import DataLogger
from ai_traineree.networks.heads import RainbowNet
from ai_traineree.types import ActionType, AgentState, BufferState, DoneType, NetworkState, ObsType, RewardType
from ai_traineree.types.dataspace import DataSpace
from ai_traineree.utils import to_numbers_seq, to_tensor
class RainbowAgent(AgentBase):
"""Rainbow agent as described in [1].
Rainbow is a DQN agent with some improvments that were suggested before 2017.
As mentioned by the authors it's not exhaustive improvment but all changes are in
relatively separate areas so their connection makes sense. These improvements are:
* Priority Experience Replay
* Multi-step
* Double Q net
* Dueling nets
* NoisyNet
* CategoricalNet for Q estimate
Consider this class as a particular version of the DQN agent.
[1] "Rainbow: Combining Improvements in Deep Reinforcement Learning" by Hessel et al. (DeepMind team)
https://arxiv.org/abs/1710.02298
"""
model = "Rainbow"
def __init__(
self,
obs_space: DataSpace,
action_space: DataSpace,
state_transform: Optional[Callable]=None,
reward_transform: Optional[Callable]=None,
**kwargs
):
"""
A wrapper over the DQN thus majority of the logic is in the DQNAgent.
Special treatment is required because the Rainbow agent uses categorical nets
which operate on probability distributions. Each action is taken as the estimate
from such distributions.
Parameters:
obs_space (DataSpace): Dataspace describing the input.
action_space (DataSpace): Dataspace describing the output.
state_transform (optional func):
reward_transform (optional func):
Keyword parameters:
pre_network_fn (function that takes input_shape and returns network):
Used to preprocess state before it is used in the value- and advantage-function in the dueling nets.
hidden_layers (tuple of ints): Shape of the hidden layers in fully connected network. Default: (100, 100).
lr (default: 1e-3): Learning rate value.
gamma (float): Discount factor. Default: 0.99.
tau (float): Soft-copy factor. Default: 0.002.
update_freq (int): Number of steps between each learning step. Default 1.
batch_size (int): Number of samples to use at each learning step. Default: 80.
buffer_size (int): Number of most recent samples to keep in memory for learning. Default: 1e5.
warm_up (int): Number of samples to observe before starting any learning step. Default: 0.
number_updates (int): How many times to use learning step in the learning phase. Default: 1.
max_grad_norm (float): Maximum norm of the gradient used in learning. Default: 10.
using_double_q (bool): Whether to use Double Q Learning network. Default: True.
n_steps (int): Number of lookahead steps when estimating reward. See :ref:`NStepBuffer`. Default: 3.
v_min (float): Lower bound for distributional value V. Default: -10.
v_max (float): Upper bound for distributional value V. Default: 10.
num_atoms (int): Number of atoms (discrete states) in the value V distribution. Default: 21.
"""
super().__init__(**kwargs)
self.device = self._register_param(kwargs, "device", DEVICE, update=True)
self.obs_space = obs_space
self.action_space = action_space
self._config['obs_space'] = self.obs_space
self._config['action_space'] = self.action_space
self.action_size = action_space.to_feature()
self.lr = float(self._register_param(kwargs, 'lr', 3e-4))
self.gamma = float(self._register_param(kwargs, 'gamma', 0.99))
self.tau = float(self._register_param(kwargs, 'tau', 0.002))
self.update_freq = int(self._register_param(kwargs, 'update_freq', 1))
self.batch_size = int(self._register_param(kwargs, 'batch_size', 80, update=True))
self.buffer_size = int(self._register_param(kwargs, 'buffer_size', int(1e5), update=True))
self.warm_up = int(self._register_param(kwargs, 'warm_up', 0))
self.number_updates = int(self._register_param(kwargs, 'number_updates', 1))
self.max_grad_norm = float(self._register_param(kwargs, 'max_grad_norm', 10))
self.iteration: int = 0
self.using_double_q = bool(self._register_param(kwargs, "using_double_q", True))
self.state_transform = state_transform if state_transform is not None else lambda x: x
self.reward_transform = reward_transform if reward_transform is not None else lambda x: x
v_min = float(self._register_param(kwargs, "v_min", -10))
v_max = float(self._register_param(kwargs, "v_max", 10))
self.num_atoms = int(self._register_param(kwargs, "num_atoms", 21, drop=True))
self.z_atoms = torch.linspace(v_min, v_max, self.num_atoms, device=self.device)
self.z_delta = self.z_atoms[1] - self.z_atoms[0]
self.buffer = PERBuffer(**kwargs)
self.__batch_indices = torch.arange(self.batch_size, device=self.device)
self.n_steps = int(self._register_param(kwargs, "n_steps", 3))
self.n_buffer = NStepBuffer(n_steps=self.n_steps, gamma=self.gamma)
# Note that in case a pre_network is provided, e.g. a shared net that extracts pixels values,
# it should be explicitly passed in kwargs
kwargs["hidden_layers"] = to_numbers_seq(self._register_param(kwargs, "hidden_layers", (100, 100)))
self.net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.target_net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
self.dist_probs = None
self._loss = float('nan')
@property
def loss(self):
return {'loss': self._loss}
@loss.setter
def loss(self, value):
if isinstance(value, dict):
value = value['loss']
self._loss = value
def step(self, obs: ObsType, action: ActionType, reward: RewardType, next_obs: ObsType, done: DoneType) -> None:
"""Letting the agent to take a step.
On some steps the agent will initiate learning step. This is dependent on
the `update_freq` value.
Parameters:
obs (ObservationType): Observation.
action (int): Discrete action associated with observation.
reward (float): Reward obtained for taking action at state.
next_obs (ObservationType): Observation in a state where the action took.
done: (bool) Whether in terminal (end of episode) state.
"""
assert isinstance(action, int), "Rainbow expects discrete action (int)"
self.iteration += 1
t_obs = to_tensor(self.state_transform(obs)).float().to("cpu")
t_next_obs = to_tensor(self.state_transform(next_obs)).float().to("cpu")
reward = self.reward_transform(reward)
# Delay adding to buffer to account for n_steps (particularly the reward)
self.n_buffer.add(
state=t_obs.numpy(), action=[int(action)], reward=[reward], done=[done], next_state=t_next_obs.numpy()
)
if not self.n_buffer.available:
return
self.buffer.add(**self.n_buffer.get().get_dict())
if self.iteration < self.warm_up:
return
if len(self.buffer) >= self.batch_size and (self.iteration % self.update_freq) == 0:
for _ in range(self.number_updates):
self.learn(self.buffer.sample())
# Update networks only once - sync local & target
soft_update(self.target_net, self.net, self.tau)
def act(self, obs: ObsType, eps: float = 0.) -> int:
"""
Returns actions for given state as per current policy.
Parameters:
state: Current available state from the environment.
epislon: Epsilon value in the epislon-greedy policy.
"""
# Epsilon-greedy action selection
if self._rng.random() < eps:
# TODO: Update with action_space.sample() once implemented
assert len(self.action_space.shape) == 1, "Only 1D is supported right now"
return self._rng.randint(self.action_space.low, self.action_space.high)
t_obs = to_tensor(self.state_transform(obs)).float().unsqueeze(0).to(self.device)
self.dist_probs = self.net.act(t_obs)
q_values = (self.dist_probs * self.z_atoms).sum(-1)
return int(q_values.argmax(-1)) # Action maximizes state-action value Q(s, a)
def learn(self, experiences: Dict[str, List]) -> None:
"""
Parameters:
experiences: Contains all experiences for the agent. Typically sampled from the memory buffer.
Five keys are expected, i.e. `state`, `action`, `reward`, `next_state`, `done`.
Each key contains a array and all arrays have to have the same length.
"""
rewards = to_tensor(experiences['reward']).float().to(self.device)
dones = to_tensor(experiences['done']).type(torch.int).to(self.device)
states = to_tensor(experiences['state']).float().to(self.device)
next_states = to_tensor(experiences['next_state']).float().to(self.device)
actions = to_tensor(experiences['action']).type(torch.long).to(self.device)
assert rewards.shape == dones.shape == (self.batch_size, 1)
assert states.shape == next_states.shape == (self.batch_size,) + self.obs_space.shape
assert actions.shape == (self.batch_size, 1) # Discrete domain
with torch.no_grad():
prob_next = self.target_net.act(next_states)
q_next = (prob_next * self.z_atoms).sum(-1) * self.z_delta
if self.using_double_q:
duel_prob_next = self.net.act(next_states)
a_next = torch.argmax((duel_prob_next * self.z_atoms).sum(-1), dim=-1)
else:
a_next = torch.argmax(q_next, dim=-1)
prob_next = prob_next[self.__batch_indices, a_next, :]
m = self.net.dist_projection(rewards, 1 - dones, self.gamma ** self.n_steps, prob_next)
assert m.shape == (self.batch_size, self.num_atoms)
log_prob = self.net(states, log_prob=True)
assert log_prob.shape == (self.batch_size,) + self.action_size + (self.num_atoms,)
log_prob = log_prob[self.__batch_indices, actions.squeeze(), :]
assert log_prob.shape == m.shape == (self.batch_size, self.num_atoms)
# Cross-entropy loss error and the loss is batch mean
error = -torch.sum(m * log_prob, 1)
assert error.shape == (self.batch_size,)
loss = error.mean()
assert loss >= 0
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)
self.optimizer.step()
self._loss = float(loss.item())
if hasattr(self.buffer, 'priority_update'):
assert (~torch.isnan(error)).any()
self.buffer.priority_update(experiences['index'], error.detach().cpu().numpy())
# Update networks - sync local & target
soft_update(self.target_net, self.net, self.tau)
def state_dict(self) -> Dict[str, dict]:
"""Returns agent's state dictionary.
Returns:
State dicrionary for internal networks.
"""
return {"net": self.net.state_dict(), "target_net": self.target_net.state_dict()}
def log_metrics(self, data_logger: DataLogger, step: int, full_log: bool=False):
data_logger.log_value("loss/agent", self._loss, step)
if full_log and self.dist_probs is not None:
assert len(self.action_space.shape) == 1, "Only 1D actions currently supported"
action_size = self.action_size[0]
for action_idx in range(action_size):
dist = self.dist_probs[0, action_idx]
data_logger.log_value(f'dist/expected_{action_idx}', (dist*self.z_atoms).sum().item(), step)
data_logger.add_histogram(
f'dist/Q_{action_idx}', min=self.z_atoms[0], max=self.z_atoms[-1], num=len(self.z_atoms),
sum=dist.sum(), sum_squares=dist.pow(2).sum(), bucket_limits=self.z_atoms+self.z_delta,
bucket_counts=dist, global_step=step
)
# This method, `log_metrics`, isn't executed on every iteration but just in case we delay plotting weights.
# It simply might be quite costly. Thread wisely.
if full_log:
for idx, layer in enumerate(self.net.value_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"value_net/layer_weights_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"value_net/layer_bias_{idx}", layer.bias.cpu(), step)
for idx, layer in enumerate(self.net.advantage_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"advantage_net/layer_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"advantage_net/layer_bias_{idx}", layer.bias.cpu(), step)
def get_state(self) -> AgentState:
"""Provides agent's internal state."""
return AgentState(
model=self.model,
obs_space=self.obs_space,
action_space=self.action_space,
config=self._config,
buffer=copy.deepcopy(self.buffer.get_state()),
network=copy.deepcopy(self.get_network_state()),
)
def get_network_state(self) -> NetworkState:
return NetworkState(net=dict(net=self.net.state_dict(), target_net=self.target_net.state_dict()))
@staticmethod
def from_state(state: AgentState) -> AgentBase:
config = copy.copy(state.config)
config.update({'obs_space': state.obs_space, 'action_space': state.action_space})
agent = RainbowAgent(**config)
if state.network is not None:
agent.set_network(state.network)
if state.buffer is not None:
agent.set_buffer(state.buffer)
return agent
def set_network(self, network_state: NetworkState) -> None:
self.net.load_state_dict(network_state.net['net'])
self.target_net.load_state_dict(network_state.net['target_net'])
def set_buffer(self, buffer_state: BufferState) -> None:
self.buffer = BufferFactory.from_state(buffer_state)
def save_state(self, path: str) -> None:
"""Saves agent's state into a file.
Parameters:
path: String path where to write the state.
"""
agent_state = self.get_state()
torch.save(agent_state, path)
def load_state(self, path: str) -> None:
"""Loads state from a file under provided path.
Parameters:
path: String path indicating where the state is stored.
"""
agent_state = torch.load(path)
self._config = agent_state.get('config', {})
self.__dict__.update(**self._config)
self.net.load_state_dict(agent_state['net'])
self.target_net.load_state_dict(agent_state['target_net'])
def save_buffer(self, path: str) -> None:
"""Saves data from the buffer into a file under provided path.
Parameters:
path: String path where to write the buffer.
"""
import json
dump = self.buffer.dump_buffer(serialize=True)
with open(path, 'w') as f:
json.dump(dump, f)
def load_buffer(self, path: str) -> None:
"""Loads data into the buffer from provided file path.
Parameters:
path: String path indicating where the buffer is stored.
"""
import json
with open(path, 'r') as f:
buffer_dump = json.load(f)
self.buffer.load_buffer(buffer_dump)
def __eq__(self, o: object) -> bool:
return super().__eq__(o) \
and isinstance(o, type(self)) \
and self._config == o._config \
and self.buffer == o.buffer \
and self.get_network_state() == o.get_network_state()
| 45.218085
| 118
| 0.648688
| 2,261
| 17,002
| 4.697037
| 0.199912
| 0.016573
| 0.025612
| 0.034652
| 0.229849
| 0.161111
| 0.141055
| 0.114689
| 0.082109
| 0.05951
| 0
| 0.008118
| 0.2465
| 17,002
| 375
| 119
| 45.338667
| 0.820857
| 0.272262
| 0
| 0.048077
| 0
| 0
| 0.05104
| 0.011588
| 0
| 0
| 0
| 0.002667
| 0.057692
| 1
| 0.086538
| false
| 0
| 0.081731
| 0.014423
| 0.225962
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d2fb89dd83715259f4676be3c051b02287f606c
| 4,169
|
py
|
Python
|
nvvm/core/nvvm.py
|
uchytilc/PyCu
|
9ba25281611bf4dbd70d37f4eba0574f817d6928
|
[
"MIT"
] | null | null | null |
nvvm/core/nvvm.py
|
uchytilc/PyCu
|
9ba25281611bf4dbd70d37f4eba0574f817d6928
|
[
"MIT"
] | null | null | null |
nvvm/core/nvvm.py
|
uchytilc/PyCu
|
9ba25281611bf4dbd70d37f4eba0574f817d6928
|
[
"MIT"
] | null | null | null |
from pycu.nvvm import (get_libdevice, ir_version, version, add_module_to_program, compile_program,
create_program, destroy_program, get_compiled_result, get_compiled_result_size,
get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program)
import os
import sys
from ctypes import c_char_p
import weakref
class NVVMPtr:
# #key: arch associated with libdevice (None indicates libdevice is not arch specific)
# #value: libdevice source
# libdevice = {}
# #key:given arch
# #value: closest available arch found
# searched_arch = {}
def __init__(self, handle, arch = 20):
self.get_libdevice(arch)
self.handle = handle
def get_libdevice(self, arch = 20):
return get_libdevice(arch)
# libdevice = self.libdevice.get(arch, None)
# if libdevice is None:
# #note: use False instead of None in searched_arch.get when indicating failure to prevent getting None key from libdevice (libdevice with no "compute_" is stored under None key)
# libdevice = self.libdevice.get(self.searched_arch.get(arch, False), None)
# if libdevice is None:
# found_arch, libdevice = next(iter(get_libdevice(arch).items()))
# self.searched_arch[arch] = found_arch
# self.libdevice[arch] = libdevice
# return libdevice
def get_version(self):
return version()
def get_ir_version(self):
return ir_version()
def add_module(self, buff, name = "<unnamed>"):
if isinstance(buff, str):
buff = buff.encode('utf8')
if isinstance(name, str):
name = name.encode('utf8')
size = len(buff)
add_module_to_program(self.handle, buff, size, name)
def compile(self, options = {}):
"""
https://docs.nvidia.com/cuda/libnvvm-api/group__compilation.html#group__compilation_1g76ac1e23f5d0e2240e78be0e63450346
Valid compiler options are
-g (enable generation of debugging information, valid only with -opt=0)
-generate-line-info (generate line number information)
-opt=
0 (disable optimizations)
3 (default, enable optimizations)
-arch=
compute_35
compute_37
compute_50
compute_52 (default)
compute_53
compute_60
compute_61
compute_62
compute_70
compute_72
compute_75
compute_80
-ftz=
0 (default, preserve denormal values, when performing single-precision floating-point operations)
1 (flush denormal values to zero, when performing single-precision floating-point operations)
-prec-sqrt=
0 (use a faster approximation for single-precision floating-point square root)
1 (default, use IEEE round-to-nearest mode for single-precision floating-point square root)
-prec-div=
0 (use a faster approximation for single-precision floating-point division and reciprocals)
1 (default, use IEEE round-to-nearest mode for single-precision floating-point division and reciprocals)
-fma=
0 (disable FMA contraction)
1 (default, enable FMA contraction)
-g (enable generation of debugging information, valid only with -opt=0)
-generate-line-info (generate line number information)
"""
opt = options.get("opt", 3)
arch = options.get("arch", 52)
ftz = options.get("ftz", 0)
prec_sqrt = options.get("prec_sqrt", 1)
prec_div = options.get("prec_div", 1)
fma = options.get("fma", 0)
opts = [f"-opt={opt}",
f"-arch=compute_{arch}",
f"-ftz={ftz}",
f"-prec-sqrt={prec_sqrt}",
f"-prec-div={prec_div}",
f"-fma={fma}",]
if options.get("g", False) and opt == 0:
if opt == 0:
opts.append("-g")
else:
#raise warning (g is only valid when -opt=0)
pass
if options.get("generate-line-info", True):
opts.append("-generate-line-info")
options = (c_char_p * len(opts))(*[c_char_p(opt.encode('utf8')) for opt in opts])
compile_program(self.handle, options)
ptx = get_compiled_result(self.handle)
#TO DO
#Apply Numba's debug patch to ptx
return ptx
def verify_program(self, options = {}):
pass
# verify_program(self.handle, )
class NVVM(NVVMPtr):
def __init__(self, arch = 20):
# self.handle = handle = create_program()
handle = create_program()
weakref.finalize(self, destroy_program, handle)
super().__init__(handle, arch)
| 28.951389
| 181
| 0.708803
| 583
| 4,169
| 4.910806
| 0.291595
| 0.027943
| 0.048201
| 0.05868
| 0.22913
| 0.21446
| 0.21446
| 0.171149
| 0.155781
| 0.155781
| 0
| 0.02282
| 0.180139
| 4,169
| 143
| 182
| 29.153846
| 0.814804
| 0.611897
| 0
| 0.036364
| 0
| 0
| 0.096672
| 0.011622
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145455
| false
| 0.036364
| 0.090909
| 0.054545
| 0.345455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d332a4a020a6faa75a8b1522601b2bced79121d
| 2,621
|
py
|
Python
|
Codes/Data Processing.py
|
BrickerP/Investment-
|
8b57c0d157a7eaa38d693c8d42ce1bc7dc7bdde9
|
[
"Apache-2.0"
] | null | null | null |
Codes/Data Processing.py
|
BrickerP/Investment-
|
8b57c0d157a7eaa38d693c8d42ce1bc7dc7bdde9
|
[
"Apache-2.0"
] | null | null | null |
Codes/Data Processing.py
|
BrickerP/Investment-
|
8b57c0d157a7eaa38d693c8d42ce1bc7dc7bdde9
|
[
"Apache-2.0"
] | 1
|
2022-01-07T06:25:54.000Z
|
2022-01-07T06:25:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 21 14:51:01 2021
@author: 75638
"""
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 10000)
def process_data(path1,path2):
'''
1.path1: file path of different factor
2.path2:file path of SP500members
3.remove anomalies
4.normalized data
5.fill NaN with 0
'''
#read factor.xlsx
factor=pd.read_excel(path1,index_col=0)
#remove anomalies which is greater than median+5*std or less than median-s*std
for date in factor:
median=factor[date].quantile(0.5)
std=factor[date].std()
min=median-5*std
max=median+5*std
factor[date]=factor[date].clip(min,max)
#normalize data
for date in factor:
mean=factor[date].mean()
std=factor[date].std()
factor[date]=(factor[date]-mean)/std
# fill NAN
for date in factor:
median=factor[date].quantile(0.5)
factor.fillna(median,inplace=True)
#read SP500 member datas
member=pd.read_excel(path2,index_col=0)
#merge industry data
factor=pd.merge(member,factor,left_index=True,right_index=True)
# save processed data
factor.to_csv('C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\data\\volatility.csv')
return factor
def remove_dates(data):
columns = []
for i in data:
if '20' in i:
columns.append(i[:7])
else:
columns.append(i)
data.columns = columns
return data
def Seasonal_data_fill(path):
data = pd.read_csv('{}'.format(path))
order = 2
for j in data:
if '20' in j:
year = j.split('/')[2]
month = j.split('/')[0]
month =(int)(month)
time_1 = year + '-' +str(month+1)
time_2 = year + '-' +str(month+2)
data.insert(order+1, '{}'.format(time_1), np.nan)
data.insert(order+2, '{}'.format(time_2), np.nan)
order += 3
temp = data.iloc[:,:2]
data = data.iloc[:,2:]
data = data.ffill(axis = 1)
data = pd.concat([temp, data], axis = 1)
data.columns = remove_dates(pd.read_csv('PE.csv')).columns
data = data.set_index(data.columns[0])
return data.to_csv('New {}'.format(path))
if __name__ == '__main__':
path1='C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\original_data\\volatility.xlsx'
path2='C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\SP500\\SP500members.xlsx'
data=process_data(path1,path2)
| 33.177215
| 97
| 0.591759
| 365
| 2,621
| 4.158904
| 0.336986
| 0.059289
| 0.034256
| 0.029644
| 0.194993
| 0.129117
| 0.129117
| 0.054018
| 0.054018
| 0.054018
| 0
| 0.054724
| 0.260969
| 2,621
| 79
| 98
| 33.177215
| 0.728962
| 0.145364
| 0
| 0.125
| 0
| 0
| 0.142655
| 0.107227
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.035714
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d33f0625c53288d64064188bcbc357613405301
| 1,448
|
py
|
Python
|
tests/test_env.py
|
Majanao/pytorch-blender
|
eb5effb033094d037e7bdc2238c00806be7012ae
|
[
"MIT"
] | 381
|
2019-07-03T14:15:16.000Z
|
2022-03-30T08:58:26.000Z
|
tests/test_env.py
|
ANABUR920/pytorch-blender
|
eb5effb033094d037e7bdc2238c00806be7012ae
|
[
"MIT"
] | 18
|
2020-01-15T17:36:08.000Z
|
2021-12-31T08:37:54.000Z
|
tests/test_env.py
|
ANABUR920/pytorch-blender
|
eb5effb033094d037e7bdc2238c00806be7012ae
|
[
"MIT"
] | 34
|
2019-07-09T03:15:02.000Z
|
2022-01-13T17:36:20.000Z
|
import pytest
from pathlib import Path
from blendtorch import btt
BLENDDIR = Path(__file__).parent/'blender'
class MyEnv(btt.env.OpenAIRemoteEnv):
def __init__(self, background=True, **kwargs):
super().__init__(version='1.0.0')
self.launch(scene=BLENDDIR/'env.blend', script=BLENDDIR /
'env.blend.py', background=background, **kwargs)
# For Blender 2.9 if we pass scene='', the tests below fail since
# _env_post_step() is not called. Its unclear currently why this happens.
def _run_remote_env(background):
env = MyEnv(background=background)
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2 # 1 is already set by reset()
obs, reward, done, info = env.step(0.6)
assert obs == pytest.approx(0.6)
assert reward == 1.
assert not done
assert info['count'] == 3
for _ in range(8):
obs, reward, done, info = env.step(0.6)
assert done
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2
env.close()
@pytest.mark.background
def test_remote_env():
_run_remote_env(background=True)
def test_remote_env_ui():
_run_remote_env(background=False)
| 26.814815
| 81
| 0.641575
| 208
| 1,448
| 4.322115
| 0.360577
| 0.050056
| 0.057842
| 0.07564
| 0.369299
| 0.351502
| 0.320356
| 0.320356
| 0.320356
| 0.249166
| 0
| 0.026079
| 0.232044
| 1,448
| 53
| 82
| 27.320755
| 0.782374
| 0.112569
| 0
| 0.435897
| 0
| 0
| 0.037471
| 0
| 0
| 0
| 0
| 0
| 0.384615
| 1
| 0.102564
| false
| 0
| 0.076923
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d36081f930dd6c0a745b46f1b5a299e738d247f
| 20,670
|
py
|
Python
|
deepvariant/runtime_by_region_vis.py
|
tahashmi/deepvariant
|
441c1809d3290f4a20b29a0a0bbf8ecfb929a6e3
|
[
"BSD-3-Clause"
] | 4
|
2019-03-30T13:25:25.000Z
|
2020-10-14T18:47:21.000Z
|
deepvariant/runtime_by_region_vis.py
|
FrogEnthusiast7/deepvariant
|
84516dfacd1ed856a34507becb21848aa12e77a8
|
[
"BSD-3-Clause"
] | 1
|
2021-06-18T15:04:47.000Z
|
2021-06-18T15:04:47.000Z
|
deepvariant/runtime_by_region_vis.py
|
FrogEnthusiast7/deepvariant
|
84516dfacd1ed856a34507becb21848aa12e77a8
|
[
"BSD-3-Clause"
] | 1
|
2019-09-04T16:59:18.000Z
|
2019-09-04T16:59:18.000Z
|
# Copyright 2020 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
r"""Create a visual report of make_examples runtime by region.
Use this script to visualize the runtime-by-region data generated by running
make_examples with --runtime_by_region.
"""
from typing import Dict, Sequence, List, Tuple, Text, Any, Union
from absl import app
from absl import flags
import altair as alt
import pandas as pd
import tensorflow as tf
from third_party.nucleus.io import sharded_file_utils
# Altair uses a lot of method chaining, such as
# chart.mark_bar().encode(...).properties(...), so using backslash
# continuation to break this into separate lines makes the code more readable.
# pylint: disable=g-backslash-continuation
VEGA_URL = 'https://storage.googleapis.com/deepvariant/lib/vega'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'input', None, 'TSV file that was produced when running make_examples '
'with --runtime_by_region. Can be sharded, e.g. /path/runtime@64.tsv.')
flags.DEFINE_string(
'title', None, 'Title will be shown at the top of the report and will '
'be used as a prefix for downloaded image files.')
flags.DEFINE_string('output', 'runtime_by_region_report.html',
'Path for the output report, which will be an html file.')
RUNTIME_COLUMNS = [
'get reads', 'find candidates', 'make pileup images', 'write outputs'
]
COUNT_COLUMNS = ['num reads', 'num candidates', 'num examples']
CSS_STYLES = """
<style>
body {
font-family: sans-serif;
}
.chart-container {
padding: 30px;
}
</style>
"""
def read_sharded_runtime_tsvs(path_string: str) -> pd.DataFrame:
"""Imports data from a single or sharded path into a pandas dataframe.
Args:
path_string: The path to the input file, which may be sharded.
Returns:
A dataframe matching the TSV file(s) but with added Task column.
"""
if sharded_file_utils.is_sharded_file_spec(path_string):
paths = sharded_file_utils.generate_sharded_filenames(path_string)
else:
paths = [path_string]
list_of_dataframes = []
for i, path in enumerate(paths):
if path.startswith('gs://'):
# Once pandas is updated to 0.24+, pd.read_csv will work for gs://
# without this workaround.
with tf.io.gfile.GFile(path) as f:
d = pd.read_csv(f, sep='\t')
else:
d = pd.read_csv(path, sep='\t')
d['Task'] = i
list_of_dataframes.append(d)
return pd.concat(list_of_dataframes, axis=0, ignore_index=True)
def format_runtime_string(raw_seconds: float) -> str:
"""Creates a nice format string from a potentially large number of seconds.
Args:
raw_seconds: A number of seconds.
Returns:
The seconds divided into hours, minutes, and remaining seconds, formatted
nicely. For example, 2h3m5.012s.
"""
minutes, seconds = divmod(raw_seconds, 60)
hours, minutes = divmod(minutes, 60)
seconds = round(seconds, 3)
output = ''
if hours > 0:
output += f'{int(hours)}h'
if minutes > 0:
output += f'{int(minutes)}m'
if seconds > 0 or not output:
output += f'{seconds}s'
return output
def calculate_totals(df: pd.DataFrame) -> pd.DataFrame:
"""Calculates total runtime, formats it nicely, and sorts by it.
Args:
df: A dataframe of runtime profiling numbers.
Returns:
The same dataframe with some additional summary columns.
"""
# 'total runtime' is a simple sum of the runtime columns.
df['total runtime'] = df[RUNTIME_COLUMNS].sum(axis=1)
# Create a formatted runtime string for tooltips.
df['Runtime'] = df['total runtime'].apply(format_runtime_string)
# Sort by descending total region runtime.
df.sort_values(by='total runtime', inplace=True, ascending=False)
return df
def summarize_by_task(df: pd.DataFrame) -> pd.DataFrame:
"""Groups regions to get the total runtime for each task.
Args:
df: A dataframe of runtime profiling numbers.
Returns:
The dataframe grouped by task.
"""
by_task = df.groupby(by=['Task']).sum()
return by_task.reset_index()
def stage_histogram(d: pd.DataFrame, title: str = '') -> alt.Chart:
"""Plots a histogram of runtimes stacked by stage.
Args:
d: A dataframe of runtimes, either by region or by task.
title: A title for the plot.
Returns:
An altair chart.
"""
columns_used = RUNTIME_COLUMNS
d = d[columns_used]
return alt.Chart(d).transform_fold(
RUNTIME_COLUMNS, as_=['Stage', 'runtime_by_stage']) \
.mark_bar(opacity=0.3) \
.encode(
x=alt.X('runtime_by_stage:Q', bin=alt.Bin(maxbins=100),
title='Runtime (seconds)'),
y=alt.Y('count()', title='Count of regions', stack=None),
color=alt.Color('Stage:N', sort=None)
).properties(title=title)
def correlation_scatter_charts(d: pd.DataFrame, title: str = '') -> alt.Chart:
"""Produces a grid of scatter plots of runtimes of stages versus covariates.
Args:
d: A pandas dataframe of runtime by regions.
title: A title for the plot.
Returns:
An altair chart
"""
columns_used = ['region', 'total runtime'] + RUNTIME_COLUMNS + COUNT_COLUMNS
d = d[columns_used]
return alt.Chart(d).mark_circle(opacity=0.1).encode(
x=alt.X(alt.repeat('column'), type='quantitative',
axis=alt.Axis(labelExpr="datum.value + 's'")),
y=alt.Y(alt.repeat('row'), type='quantitative'),
tooltip='region'
).properties(width=100, height=100) \
.repeat(
column=['total runtime'] + RUNTIME_COLUMNS,
row=COUNT_COLUMNS,
).properties(title=title)
def totals_by_stage(d: pd.DataFrame) -> alt.Chart:
"""Plots total runtimes for each stage.
Args:
d: A dataframe of runtimes.
Returns:
An altair chart.
"""
stage_totals_series = d.sum()[RUNTIME_COLUMNS]
stage_totals = pd.DataFrame(
stage_totals_series, columns=['Runtime (seconds)'])
stage_totals.reset_index(inplace=True)
stage_totals = stage_totals.rename(columns={'index': 'Stage'})
stage_totals['Runtime'] = stage_totals['Runtime (seconds)'].apply(
format_runtime_string)
return alt.Chart(stage_totals).mark_bar().encode(
x='Runtime (seconds)',
y=alt.Y('Stage', sort=None),
tooltip=['Runtime'],
fill=alt.Fill('Stage',
sort=None)).properties(title='Overall runtime by stage')
def pareto_by_task_tooltip(row: pd.Series) -> str:
"""For one row of a dataframe, computes a tooltip description.
Args:
row: A Pandas Series, one row of a dataframe containing some specific
cumulative sum columns.
Returns:
A string to show as the tooltip for a pareto curve.
"""
return (f"{row['task cumsum order'] * 100:.2f}% of regions "
f"account for {row['task cumsum fraction'] * 100:.2f}% of "
f"the runtime in task {row['Task']}")
def calculate_pareto_metrics(df_subset: pd.DataFrame) -> pd.DataFrame:
"""Calculates cumulative sums for a subset of a dataframe.
Args:
df_subset: A dataframe subset of one task.
Returns:
The same dataframe subset with some additional columns.
"""
# These are the same for all regions in the same task, for the scatter plot:
df_subset['task total runtime'] = df_subset['total runtime'].sum()
df_subset['Runtime for task'] = df_subset['task total runtime'].apply(
format_runtime_string)
df_subset['task num examples'] = df_subset['num examples'].sum()
# These are cumulative sums for the pareto curves:
df_subset['task cumsum fraction'] = df_subset['total runtime'].cumsum(
) / df_subset['total runtime'].sum()
n = len(df_subset)
df_subset['task cumsum order'] = list(map(lambda x: x / n, range(0, n)))
df_subset['tooltip'] = df_subset.apply(pareto_by_task_tooltip, axis=1)
return df_subset
def pareto_and_runtimes_by_task(df: pd.DataFrame) -> alt.Chart:
"""Creates an interactive Pareto curve and scatter plot of task runtimes.
Tracing each curve shows to what extent a small proportion of long-running
regions contribute disproportionately to the overall runtime. That is,
"The longest-running X% of regions account for Y% of the total runtime."
There is a curve for each task.
Args:
df: A dataframe of all regions.
Returns:
An altair chart.
"""
grouped = df.groupby(df['Task'], sort=False)
df = grouped.apply(calculate_pareto_metrics)
# Sample along the Pareto curve, ensuring the longest regions are shown.
if len(df) > 5000:
x = 1000
df = pd.concat([df.nlargest(x, 'total runtime'), df.sample(5000 - x)])
# Limit columns to greatly reduce the size of the html report.
columns_used = [
'task cumsum order', 'task cumsum fraction', 'tooltip', 'Task',
'task total runtime', 'task num examples', 'Runtime for task'
]
df = df[columns_used]
# Brushing on the task_scatter plot highlights the same tasks in the Pareto
# curve.
brush = alt.selection_interval()
pareto_by_task = alt.Chart(df).mark_line(size=2).encode(
x=alt.X(
'task cumsum order',
title='The longest-runtime X% of regions',
axis=alt.Axis(format='%')),
y=alt.Y(
'task cumsum fraction',
title='Account for Y% of the total runtime',
axis=alt.Axis(format='%')),
tooltip='tooltip',
color=alt.condition(brush, 'Task:N', alt.value('lightgray'))).properties(
title='Pareto curve for each task').interactive()
# This chart needs to use the same dataframe as the first chart to enable the
# brushing on one to affect the other. Using max(task) for 'text' is a
# trick that causes bundling by task to avoid showing multiple overlapping
# points which otherwise make the text look funky.
task_scatter = alt.Chart(df).mark_point(size=10).encode(
x=alt.X('max(task total runtime)', title='Runtime (seconds)'),
y=alt.Y('task num examples:Q', title='Number of examples'),
color=alt.condition(brush, 'Task:N', alt.value('lightgray')),
tooltip=['Task', 'Runtime for task']
) \
.properties(title='Total runtime for each task (drag to highlight)') \
.add_selection(brush)
return pareto_by_task | task_scatter
def individual_region_bars(small_df: pd.DataFrame,
title: Union[str, Dict[str, str]] = '') -> alt.Chart:
"""Makes a stacked bar chart with runtime of each stage for individual regions.
Args:
small_df: A dataframe of regions, each of which will be shown as a bar.
title: A title for the plot. If a dict, it should contain 'title' and/or
'subtitle'.
Returns:
An altair chart.
"""
columns_used = ['region', 'Runtime'] + RUNTIME_COLUMNS
d = small_df[columns_used]
return alt.Chart(d).transform_fold(
RUNTIME_COLUMNS, as_=['Stage', 'runtime_by_stage']) \
.mark_bar().encode(
x=alt.X('region:N', sort=None),
y=alt.Y('runtime_by_stage:Q', scale=alt.Scale(type='linear'), title='Runtime (seconds)'),
fill=alt.Fill('Stage:N', sort=None),
tooltip='Runtime:N'
).properties(title=title)
def selected_longest_and_median_regions(df: pd.DataFrame) -> alt.Chart:
"""Creates a stacked bar charts of the top 20 and median 20 regions.
Args:
df: A dataframe of all regions.
Returns:
An altair chart.
"""
num_rows = len(df)
mid = round(num_rows / 2)
return individual_region_bars(df.iloc[0:20], 'Top runtime regions') \
| individual_region_bars(df.iloc[mid-10:mid+11], 'Median runtime regions')
def top_regions_producing_zero_examples(df: pd.DataFrame) -> alt.Chart:
"""Creates a chart of the top regions that produced zero examples.
Args:
df: A dataframe of all regions.
Returns:
An altair chart.
"""
regions_with_zero_examples = df[df['num examples'] == 0]
runtime_of_zeros = regions_with_zero_examples['total runtime'].sum() / 3600
total_runtime = df['total runtime'].sum() / 3600
subtitle = (
f'Spent {runtime_of_zeros:.2f} hours processing the '
f'{len(regions_with_zero_examples)} regions that produced no examples, '
f'which is {runtime_of_zeros / total_runtime * 100:.2f}% of the total '
f'runtime of {total_runtime:.2f} hours.')
return individual_region_bars(
regions_with_zero_examples.nlargest(50, 'total runtime'),
title={
'text': 'The longest-running regions that produced no examples',
'subtitle': subtitle
})
def write_to_html_report(charts: List[Dict[Text, alt.Chart]], title: str,
subtitle: str, html_output: Any) -> None:
"""Makes the html report with all the charts inserted.
Args:
charts: A list of altair chart objects.
title: The title to show at the top of the report.
subtitle: The subtitle to show just below the title on the report.
html_output: a writable file object.
Returns:
None. Writes into the html_output file object.
"""
# Start the HTML document.
html_output.write('<!DOCTYPE html>\n<html>\n<head>')
# Add dependencies vega and vega-lite, which render the altair charts.
html_output.write('<script type="text/javascript" src="{}/vega@5"></script>'
'\n'.format(VEGA_URL))
html_output.write(
'<script type="text/javascript" src="{}/vega-lite@4.8.1"></script>'
'\n'.format(VEGA_URL))
html_output.write(
'<script type="text/javascript" src="{}/vega-embed@6"></script>'
'\n'.format(VEGA_URL))
# Add styles (CSS).
html_output.write(CSS_STYLES)
html_output.write('</head>\n<body>')
html_output.write('<h1>{}</h1>\n'.format(title))
html_output.write('<h2>{}</h2>\n'.format(subtitle))
# Make a div containing all the charts.
html_output.write('<div>')
for chart in charts:
html_output.write(
'<div class="chart-container" id="vis_{}"></div>\n'.format(chart['id']))
html_output.write('</div>')
# Add JSON vega specs and hook them up to the divs with VegaEmbed.
html_output.write('<script>\n')
for chart in charts:
html_output.write('var spec_{} = {};\n'.format(chart['id'],
chart['chart'].to_json()))
download_filename = '{}_{}'.format(title.replace(' ', '_'), chart['id'])
embed_options = {'mode': 'vega-lite', 'downloadFileName': download_filename}
html_output.write('vegaEmbed("#vis_{}", spec_{}, {})\n'.format(
chart['id'], chart['id'], embed_options))
html_output.write('</script>\n')
# Close HTML document.
html_output.write('</body></html>')
def read_data_and_make_dataframes(
input_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Loads data from a file into one dataframe as-is and one by task.
Args:
input_path: str, path of the input TSV file (may be sharded).
Returns:
df: A dataframe with one row per region.
by_task: A dataframe with one row per task.
"""
df = read_sharded_runtime_tsvs(input_path)
df = calculate_totals(df)
by_task = summarize_by_task(df)
return df, by_task
def make_all_charts(
df: pd.DataFrame,
by_task: pd.DataFrame) -> List[Dict[Text, Union[str, alt.Chart]]]:
"""Creates charts and puts them in a list with their ID names.
Args:
df: A dataframe with one row per region.
by_task: A dataframe with one row per task.
Returns:
list of dicts, each containing a chart and a descriptive ID.
"""
charts = [{
'id': 'total_by_stage',
'chart': totals_by_stage(by_task)
}, {
'id': 'pareto_and_runtimes_by_task',
'chart': pareto_and_runtimes_by_task(df)
}, {
'id': 'histogram_by_task',
'chart': stage_histogram(by_task, title='Stage runtimes for each task')
}, {
'id': 'selected_longest_and_median_regions',
'chart': selected_longest_and_median_regions(df)
}, {
'id': 'zero_examples',
'chart': top_regions_producing_zero_examples(df)
}]
# Altair shows a max of 5000 data points.
if len(df) <= 5000:
# With up to 5000 points, just show them all.
charts.extend([{
'id': 'histogram',
'chart': stage_histogram(df, title='Runtime by stage for all regions')
}, {
'id': 'scatter_grid',
'chart': correlation_scatter_charts(df, title='Trends for all regions')
}])
else:
# With too many points, make different subsets to show trends better.
top_100 = df.nlargest(100, 'total runtime')
top_5000 = df.nlargest(5000, 'total runtime')
# Sample the bottom 99% to avoid outliers that obscure general trends.
bottom_99_percent = df.nsmallest(int(len(df) * .99), 'total runtime')
if len(bottom_99_percent) > 5000:
bottom_99_percent = bottom_99_percent.sample(5000)
charts.extend([{
'id':
'histogram_bottom_99_percent',
'chart':
stage_histogram(
bottom_99_percent,
title='Runtime by stage for regions in the bottom 99%')
}, {
'id':
'histogram_top_100',
'chart':
stage_histogram(
top_100, title='Runtime by stage for regions in the top 100')
}, {
'id':
'scatter_grid_top_5000',
'chart':
correlation_scatter_charts(
top_5000, title='Trends for regions in the top 5000')
}, {
'id':
'scatter_grid_bottom_99_percent',
'chart':
correlation_scatter_charts(
bottom_99_percent, title='Trends for regions in the bottom 99%')
}])
return charts
def make_report(input_path: str, title: str,
html_output: tf.io.gfile.GFile) -> None:
"""Reads data, creates charts, and composes the charts into an HTML report.
Args:
input_path: Path of the input TSV file (or sharded files).
title: Title to put at the top of the report.
html_output: Writable file object where output will be written.
"""
# Load data into pandas dataframes and add summary columns.
df, by_task = read_data_and_make_dataframes(input_path)
# Build all the charts.
charts = make_all_charts(df, by_task)
# Write a subtitle with some top-level stats.
subtitle = (f'Runtime profiling for make_examples on {len(df)} regions '
f'across {len(by_task)} task{"(s)" if len(by_task) > 1 else ""}')
# Write the HTML report with all the charts.
write_to_html_report(
charts=charts, title=title, subtitle=subtitle, html_output=html_output)
def main(argv: Sequence[str]):
if len(argv) > 1:
raise app.UsageError(
'Command line parsing failure: this script does not accept '
'positional arguments, but found these extra arguments: "{}".'
''.format(str(argv[1:])))
# Add html to the output path if that is not already the suffix.
if FLAGS.output.endswith('html'):
output_filename = FLAGS.output
else:
output_filename = f'{FLAGS.output}.html'
# Start HTML document. Using GFile enables writing to GCS too.
html_output = tf.io.gfile.GFile(output_filename, 'w')
make_report(
input_path=FLAGS.input, title=FLAGS.title, html_output=html_output)
html_output.close() # Abstracted out the file open/close to enable testing.
print('Output written to:', output_filename)
if __name__ == '__main__':
flags.mark_flags_as_required(['input', 'title'])
app.run(main)
| 34.335548
| 97
| 0.677504
| 2,943
| 20,670
| 4.633367
| 0.192321
| 0.024641
| 0.0176
| 0.010267
| 0.221326
| 0.170065
| 0.125623
| 0.086242
| 0.079642
| 0.066588
| 0
| 0.011457
| 0.206144
| 20,670
| 601
| 98
| 34.392679
| 0.81955
| 0.346589
| 0
| 0.165584
| 0
| 0.006494
| 0.267398
| 0.033882
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058442
| false
| 0
| 0.022727
| 0
| 0.12987
| 0.003247
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d373f0e4790917fc2d0b3ea420a4ad7a8c76024
| 4,096
|
py
|
Python
|
xeofs/pandas/_transformer.py
|
nicrie/xeofs
|
4c0ed49b45794ce0abb641c98b82638b2faa4828
|
[
"MIT"
] | 3
|
2022-02-22T07:56:09.000Z
|
2022-03-30T10:47:20.000Z
|
xeofs/pandas/_transformer.py
|
nicrie/xeofs
|
4c0ed49b45794ce0abb641c98b82638b2faa4828
|
[
"MIT"
] | 13
|
2022-02-15T13:44:34.000Z
|
2022-03-15T22:51:01.000Z
|
xeofs/pandas/_transformer.py
|
nicrie/xeofs
|
4c0ed49b45794ce0abb641c98b82638b2faa4828
|
[
"MIT"
] | 2
|
2022-02-17T19:02:59.000Z
|
2022-02-22T07:56:15.000Z
|
from typing import Union, Iterable, List
import numpy as np
import pandas as pd
from ..models._transformer import _ArrayTransformer, _MultiArrayTransformer
class _DataFrameTransformer(_ArrayTransformer):
'''`_ArrayTransformer` wrapper for `pandas.DataFrame`.
'''
def __init__(self):
super().__init__()
def fit(self, X : pd.DataFrame, axis : Union[int, Iterable[int]] = 0):
if not isinstance(X, pd.DataFrame):
raise ValueError('This interface is for `pandas.DataFrame` only')
if isinstance(axis, list):
axis = axis[0]
# Set sample and feature index
if axis == 0:
self.index_samples = X.index
self.index_features = X.columns
elif axis == 1:
self.index_samples = X.columns
self.index_features = X.index
else:
raise ValueError('axis must be either 0 or 1')
# Fit the data
try:
super().fit(X=X.values, axis=axis)
except AttributeError:
err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame))
raise TypeError(err_msg)
return self
def transform(self, X : pd.DataFrame) -> np.ndarray:
try:
return super().transform(X.values)
except AttributeError:
err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame))
raise TypeError(err_msg)
def fit_transform(self, X : pd.DataFrame, axis : int = 0) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def transform_weights(self, weights : pd.DataFrame) -> np.ndarray:
try:
return super().transform_weights(weights.values)
except AttributeError:
return super().transform_weights(weights)
def back_transform(self, X : np.ndarray) -> pd.DataFrame:
df = super().back_transform(X)
return pd.DataFrame(
df,
index=self.index_samples,
columns=self.index_features
)
def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame:
eofs = super().back_transform_eofs(X)
return pd.DataFrame(
eofs,
index=self.index_features,
columns=range(1, eofs.shape[-1] + 1)
)
def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame:
pcs = super().back_transform_pcs(X)
return pd.DataFrame(
pcs,
index=self.index_samples,
columns=range(1, pcs.shape[-1] + 1)
)
class _MultiDataFrameTransformer(_MultiArrayTransformer):
'Transform multiple 2D ``pd.DataFrame`` to a single 2D ``np.ndarry``.'
def __init__(self):
super().__init__()
def fit(self, X : Union[pd.DataFrame, List[pd.DataFrame]], axis : Union[int, Iterable[int]] = 0):
X = self._convert2list(X)
self.tfs = [_DataFrameTransformer().fit(x, axis=axis) for x in X]
if len(set([tf.n_valid_samples for tf in self.tfs])) > 1:
err_msg = 'All individual arrays must have same number of samples.'
raise ValueError(err_msg)
self.idx_array_sep = np.cumsum([tf.n_valid_features for tf in self.tfs])
self.axis_samples = self.tfs[0].axis_samples
return self
def transform(self, X : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray:
return super().transform(X=X)
def transform_weights(self, weights : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray:
return super().transform_weights(weights=weights)
def fit_transform(
self, X : Union[pd.DataFrame, List[pd.DataFrame]],
axis : Union[int, Iterable[int]] = 0
) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def back_transform(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform(X=X)
def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform_eofs(X=X)
def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform_pcs(X=X)
| 35.310345
| 101
| 0.616211
| 517
| 4,096
| 4.736944
| 0.193424
| 0.112291
| 0.0343
| 0.0343
| 0.562679
| 0.463046
| 0.42303
| 0.42303
| 0.373622
| 0.318906
| 0
| 0.006651
| 0.265869
| 4,096
| 115
| 102
| 35.617391
| 0.807782
| 0.04126
| 0
| 0.329545
| 0
| 0
| 0.062672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.045455
| 0.079545
| 0.420455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d38d81bebcb78fdcd3ec6d9d6e334cd582c79d5
| 1,004
|
py
|
Python
|
tests/bogus_python_model.py
|
FossilizedContainers/fossilized-controller
|
5aa14112b3728a619a37233906366c1cda2a0a77
|
[
"MIT"
] | 1
|
2022-01-24T21:54:46.000Z
|
2022-01-24T21:54:46.000Z
|
tests/bogus_python_model.py
|
FossilizedContainers/fossilized-controller
|
5aa14112b3728a619a37233906366c1cda2a0a77
|
[
"MIT"
] | null | null | null |
tests/bogus_python_model.py
|
FossilizedContainers/fossilized-controller
|
5aa14112b3728a619a37233906366c1cda2a0a77
|
[
"MIT"
] | null | null | null |
import os
import sys
import lipd
# import pythonAdapter, assumes in ../python-adapter/
tests_dir = os.path.dirname(os.path.realpath(__file__))
fc_dir = os.path.dirname(tests_dir)
python_adapter_dir = os.path.join(fc_dir, "python-adapter")
sys.path.append(python_adapter_dir)
import adapter
def fake_model(adapter):
# check to see inside function
print("\n---\nStart of the fake_model function\n---\n")
# the parameters are handed to you by the adapter
files = adapter.get_files()
# use the parameters given by the adapter to get the binary data of the LiPD file
lipd.readLipd(files['weldeab'])
# get the binary data of the NetCDF file
net_cdf_path = files['net_cdf']
# mark the NetCDF file as an output file
adapter.set_output_files(net_cdf_path)
adapter.set_output_files("lipd-files\\")
return
# have to call adapter in the adapter.py file as adapter.adapter
adapter = adapter.global_adapter
adapter.register(fake_model)
adapter.start_server()
| 26.421053
| 85
| 0.739044
| 157
| 1,004
| 4.55414
| 0.382166
| 0.072727
| 0.037762
| 0.044755
| 0.058741
| 0.058741
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167331
| 1,004
| 37
| 86
| 27.135135
| 0.855263
| 0.34761
| 0
| 0
| 0
| 0
| 0.132921
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.210526
| 0
| 0.315789
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d38e4a873930da8bc4504369cb7f1bca6894323
| 13,421
|
py
|
Python
|
tello_control_ui.py
|
banne2266/UAV-autopilot-NCTU-2021
|
1a25d4add2de9659516d045054935e3b6e04d06d
|
[
"MIT"
] | null | null | null |
tello_control_ui.py
|
banne2266/UAV-autopilot-NCTU-2021
|
1a25d4add2de9659516d045054935e3b6e04d06d
|
[
"MIT"
] | null | null | null |
tello_control_ui.py
|
banne2266/UAV-autopilot-NCTU-2021
|
1a25d4add2de9659516d045054935e3b6e04d06d
|
[
"MIT"
] | null | null | null |
from PIL import Image
from PIL import ImageTk
import tkinter as tki
from tkinter import Toplevel, Scale
import threading
import datetime
import cv2
import os
import time
import platform
class TelloUI:
"""Wrapper class to enable the GUI."""
def __init__(self,tello,outputpath):
"""
Initial all the element of the GUI,support by Tkinter
:param tello: class interacts with the Tello drone.
Raises:
RuntimeError: If the Tello rejects the attempt to enter command mode.
"""
self.tello = tello # videostream device
self.outputPath = outputpath # the path that save pictures created by clicking the takeSnapshot button
self.frame = None # frame read from h264decoder and used for pose recognition
self.thread = None # thread of the Tkinter mainloop
self.stopEvent = None
# control variables
self.distance = 0.1 # default distance for 'move' cmd
self.degree = 30 # default degree for 'cw' or 'ccw' cmd
# if the flag is TRUE,the auto-takeoff thread will stop waiting for the response from tello
self.quit_waiting_flag = False
# initialize the root window and image panel
self.root = tki.Tk()
self.panel = None
# create buttons
self.btn_snapshot = tki.Button(self.root, text="Snapshot!",
command=self.takeSnapshot)
self.btn_snapshot.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_pause = tki.Button(self.root, text="Pause", relief="raised", command=self.pauseVideo)
self.btn_pause.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_landing = tki.Button(
self.root, text="Open Command Panel", relief="raised", command=self.openCmdWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
# set a callback to handle when the window is closed
self.root.wm_title("TELLO Controller")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
# the sending_command will send command to tello every 5 seconds
self.sending_command_thread = threading.Thread(target = self._sendingCommand)
def videoLoop(self):
"""
The mainloop thread of Tkinter
Raises:
RuntimeError: To get around a RunTime error that Tkinter throws due to threading.
"""
try:
# start the thread that get GUI image and drwa skeleton
time.sleep(0.5)
self.sending_command_thread.start()
while not self.stopEvent.is_set():
system = platform.system()
# read the frame for GUI show
self.frame = self.tello.read()
if self.frame is None or self.frame.size == 0:
continue
# transfer the format from frame to image
image = Image.fromarray(self.frame)
# we found compatibility problem between Tkinter,PIL and Macos,and it will
# sometimes result the very long preriod of the "ImageTk.PhotoImage" function,
# so for Macos,we start a new thread to execute the _updateGUIImage function.
if system =="Windows" or system =="Linux":
self._updateGUIImage(image)
else:
thread_tmp = threading.Thread(target=self._updateGUIImage,args=(image,))
thread_tmp.start()
time.sleep(0.03)
except RuntimeError as e:
print("[INFO] caught a RuntimeError")
def _updateGUIImage(self,image):
"""
Main operation to initial the object of image,and update the GUI panel
"""
image = ImageTk.PhotoImage(image)
# if the panel none ,we need to initial it
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
def _sendingCommand(self):
"""
start a while loop that sends 'command' to tello every 5 second
"""
while True:
self.tello.send_command('command')
time.sleep(5)
def _setQuitWaitingFlag(self):
"""
set the variable as TRUE,it will stop computer waiting for response from tello
"""
self.quit_waiting_flag = True
def openCmdWindow(self):
"""
open the cmd window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Command Panel")
# create text input entry
text0 = tki.Label(panel,
text='This Controller map keyboard inputs to Tello control commands\n'
'Adjust the trackbar to reset distance and degree parameter',
font='Helvetica 10 bold'
)
text0.pack(side='top')
text1 = tki.Label(panel, text=
'W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\n'
'S - Move Tello Down\t\t\tArrow Down - Move Tello Backward\n'
'A - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\n'
'D - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right',
justify="left")
text1.pack(side="top")
self.btn_landing = tki.Button(
panel, text="Land", relief="raised", command=self.telloLanding)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_takeoff = tki.Button(
panel, text="Takeoff", relief="raised", command=self.telloTakeOff)
self.btn_takeoff.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# binding arrow keys to drone control
self.tmp_f = tki.Frame(panel, width=100, height=2)
self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w)
self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s)
self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a)
self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d)
self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up)
self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down)
self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left)
self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right)
self.tmp_f.pack(side="bottom")
self.tmp_f.focus_set()
self.btn_landing = tki.Button(
panel, text="Flip", relief="raised", command=self.openFlipWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.distance_bar = Scale(panel, from_=0.02, to=5, tickinterval=0.01, digits=3, label='Distance(m)',
resolution=0.01)
self.distance_bar.set(0.2)
self.distance_bar.pack(side="left")
self.btn_distance = tki.Button(panel, text="Reset Distance", relief="raised",
command=self.updateDistancebar,
)
self.btn_distance.pack(side="left", fill="both",
expand="yes", padx=10, pady=5)
self.degree_bar = Scale(panel, from_=1, to=360, tickinterval=10, label='Degree')
self.degree_bar.set(30)
self.degree_bar.pack(side="right")
self.btn_distance = tki.Button(panel, text="Reset Degree", relief="raised", command=self.updateDegreebar)
self.btn_distance.pack(side="right", fill="both",
expand="yes", padx=10, pady=5)
def openFlipWindow(self):
"""
open the flip window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Gesture Recognition")
self.btn_flipl = tki.Button(
panel, text="Flip Left", relief="raised", command=self.telloFlip_l)
self.btn_flipl.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipr = tki.Button(
panel, text="Flip Right", relief="raised", command=self.telloFlip_r)
self.btn_flipr.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipf = tki.Button(
panel, text="Flip Forward", relief="raised", command=self.telloFlip_f)
self.btn_flipf.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipb = tki.Button(
panel, text="Flip Backward", relief="raised", command=self.telloFlip_b)
self.btn_flipb.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
def takeSnapshot(self):
"""
save the current frame of the video as a jpg file and put it into outputpath
"""
# grab the current timestamp and use it to construct the filename
ts = datetime.datetime.now()
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S"))
p = os.path.sep.join((self.outputPath, filename))
# save the file
cv2.imwrite(p, cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR))
print("[INFO] saved {}".format(filename))
def pauseVideo(self):
"""
Toggle the freeze/unfreze of video
"""
if self.btn_pause.config('relief')[-1] == 'sunken':
self.btn_pause.config(relief="raised")
self.tello.video_freeze(False)
else:
self.btn_pause.config(relief="sunken")
self.tello.video_freeze(True)
def telloTakeOff(self):
return self.tello.takeoff()
def telloLanding(self):
return self.tello.land()
def telloFlip_l(self):
return self.tello.flip('l')
def telloFlip_r(self):
return self.tello.flip('r')
def telloFlip_f(self):
return self.tello.flip('f')
def telloFlip_b(self):
return self.tello.flip('b')
def telloCW(self, degree):
return self.tello.rotate_cw(degree)
def telloCCW(self, degree):
return self.tello.rotate_ccw(degree)
def telloMoveForward(self, distance):
return self.tello.move_forward(distance)
def telloMoveBackward(self, distance):
return self.tello.move_backward(distance)
def telloMoveLeft(self, distance):
return self.tello.move_left(distance)
def telloMoveRight(self, distance):
return self.tello.move_right(distance)
def telloUp(self, dist):
return self.tello.move_up(dist)
def telloDown(self, dist):
return self.tello.move_down(dist)
def updateTrackBar(self):
self.my_tello_hand.setThr(self.hand_thr_bar.get())
def updateDistancebar(self):
self.distance = self.distance_bar.get()
print ('reset distance to %.1f' % self.distance)
def updateDegreebar(self):
self.degree = self.degree_bar.get()
print ('reset distance to %d' % self.degree)
def on_keypress_w(self, event):
print ("up %d m" % self.distance)
self.telloUp(self.distance)
def on_keypress_s(self, event):
print ("down %d m" % self.distance)
self.telloDown(self.distance)
def on_keypress_a(self, event):
print ("ccw %d degree" % self.degree)
self.tello.rotate_ccw(self.degree)
def on_keypress_d(self, event):
print ("cw %d m" % self.degree)
self.tello.rotate_cw(self.degree)
def on_keypress_up(self, event):
print ("forward %d m" % self.distance)
self.telloMoveForward(self.distance)
def on_keypress_down(self, event):
print ("backward %d m" % self.distance)
self.telloMoveBackward(self.distance)
def on_keypress_left(self, event):
print ("left %d m" % self.distance)
self.telloMoveLeft(self.distance)
def on_keypress_right(self, event):
print ("right %d m" % self.distance)
self.telloMoveRight(self.distance)
def on_keypress_enter(self, event):
if self.frame is not None:
self.registerFace()
self.tmp_f.focus_set()
def onClose(self):
"""
set the stop event, cleanup the camera, and allow the rest of
the quit process to continue
"""
print("[INFO] closing...")
self.stopEvent.set()
del self.tello
self.root.quit()
| 37.177285
| 113
| 0.580583
| 1,628
| 13,421
| 4.699631
| 0.203931
| 0.024703
| 0.027447
| 0.026663
| 0.319305
| 0.166253
| 0.120507
| 0.102732
| 0.092798
| 0.084172
| 0
| 0.010194
| 0.312942
| 13,421
| 360
| 114
| 37.280556
| 0.819542
| 0.154534
| 0
| 0.118943
| 0
| 0
| 0.107172
| 0.002176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15859
| false
| 0
| 0.044053
| 0.061674
| 0.268722
| 0.057269
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d3b019f7105ea70804aca52b749a325dbd4f20c
| 416
|
py
|
Python
|
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py
|
vbsteja/code
|
0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687
|
[
"Apache-2.0"
] | 3
|
2018-08-06T15:34:58.000Z
|
2022-02-11T14:19:05.000Z
|
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py
|
vbsteja/code
|
0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687
|
[
"Apache-2.0"
] | null | null | null |
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py
|
vbsteja/code
|
0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687
|
[
"Apache-2.0"
] | 3
|
2018-08-06T15:35:01.000Z
|
2020-08-08T07:53:07.000Z
|
# Neural Networks Demystified
# Part 1: Data + Architecture
#
# Supporting code for short YouTube series on artificial neural networks.
#
# Stephen Welch
# @stephencwelch
import numpy as np
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([3,5], [5,1], [10,2]), dtype=float)
y = np.array(([75], [82], [93]), dtype=float)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100
| 24.470588
| 73
| 0.673077
| 68
| 416
| 4.117647
| 0.676471
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061047
| 0.173077
| 416
| 17
| 74
| 24.470588
| 0.752907
| 0.584135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d3c91e42dac2041a621585dba8f1dfdc1e88107
| 19,048
|
py
|
Python
|
manubot/process/util.py
|
benstear/manubot
|
df184a5c7e5eb98894a3edb43d9772d1ac3e01ab
|
[
"BSD-3-Clause"
] | null | null | null |
manubot/process/util.py
|
benstear/manubot
|
df184a5c7e5eb98894a3edb43d9772d1ac3e01ab
|
[
"BSD-3-Clause"
] | null | null | null |
manubot/process/util.py
|
benstear/manubot
|
df184a5c7e5eb98894a3edb43d9772d1ac3e01ab
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import logging
import os
import pathlib
import re
import textwrap
import warnings
from typing import List, Optional
import jinja2
import pandas
import requests
import requests_cache
import yaml
from manubot.util import read_serialized_data, read_serialized_dict
from manubot.process.bibliography import load_manual_references
from manubot.process.ci import get_continuous_integration_parameters
from manubot.process.metadata import (
get_header_includes,
get_thumbnail_url,
get_manuscript_urls,
get_software_versions,
)
from manubot.process.manuscript import (
datetime_now,
get_manuscript_stats,
get_text,
)
from manubot.cite.citekey import (
citekey_to_csl_item,
shorten_citekey,
is_valid_citekey,
standardize_citekey,
)
def check_collisions(citekeys_df):
"""
Check for short_citekey hash collisions
"""
collision_df = citekeys_df[["standard_citekey", "short_citekey"]].drop_duplicates()
collision_df = collision_df[collision_df.short_citekey.duplicated(keep=False)]
if not collision_df.empty:
logging.error(f"OMF! Hash collision. Congratulations.\n{collision_df}")
return collision_df
def check_multiple_citation_strings(citekeys_df):
"""
Identify different citation strings referring the the same reference.
"""
message = textwrap.dedent(
f"""\
{len(citekeys_df)} unique citations strings extracted from text
{citekeys_df.standard_citekey.nunique()} unique standard citations\
"""
)
logging.info(message)
multi_df = citekeys_df[citekeys_df.standard_citekey.duplicated(keep=False)]
if not multi_df.empty:
table = multi_df.to_string(
index=False, columns=["standard_citekey", "manuscript_citekey"]
)
logging.warning(f"Multiple citekeys detected for the same reference:\n{table}")
return multi_df
def read_variable_files(paths: List[str], variables: Optional[dict] = None) -> dict:
"""
Read multiple serialized data files into a user_variables dictionary.
Provide `paths` (a list of URLs or local file paths).
Paths can optionally have a namespace prepended.
For example:
```python
paths = [
'https://git.io/vbkqm', # update the dictionary's top-level
'namespace_1=https://git.io/vbkqm', # store under 'namespace_1' key
'namespace_2=some_local_path.json', # store under 'namespace_2' key
]
```
If a namespace is not provided, the JSON must contain a dictionary as its
top level. Namespaces should consist only of ASCII alphanumeric characters
(includes underscores, first character cannot be numeric).
Pass a dictionary to `variables` to update an existing dictionary rather
than create a new dictionary.
"""
if variables is None:
variables = {}
for path in paths:
logging.info(f"Reading user-provided templating variables at {path!r}")
# Match only namespaces that are valid jinja2 variable names
# http://jinja.pocoo.org/docs/2.10/api/#identifier-naming
match = re.match(r"([a-zA-Z_][a-zA-Z0-9_]*)=(.+)", path)
if match:
namespace, path = match.groups()
logging.info(
f"Using the {namespace!r} namespace for template variables from {path!r}"
)
try:
if match:
obj = {namespace: read_serialized_data(path)}
else:
obj = read_serialized_dict(path)
except Exception:
logging.exception(f"Error reading template variables from {path!r}")
continue
assert isinstance(obj, dict)
conflicts = variables.keys() & obj.keys()
if conflicts:
logging.warning(
f"Template variables in {path!r} overwrite existing "
"values for the following keys:\n" + "\n".join(conflicts)
)
variables.update(obj)
logging.debug(
f"Reading user-provided templating variables complete:\n"
f"{json.dumps(variables, indent=2, ensure_ascii=False)}"
)
return variables
def add_author_affiliations(variables: dict) -> dict:
"""
Edit variables to contain numbered author affiliations. Specifically,
add a list of affiliation_numbers for each author and add a list of
affiliations to the top-level of variables. If no authors have any
affiliations, variables is left unmodified.
"""
rows = list()
for author in variables["authors"]:
if "affiliations" not in author:
continue
if not isinstance(author["affiliations"], list):
warnings.warn(
f"Expected list for {author['name']}'s affiliations. "
f"Assuming multiple affiliations are `; ` separated. "
f"Please switch affiliations to a list.",
category=DeprecationWarning,
)
author["affiliations"] = author["affiliations"].split("; ")
for affiliation in author["affiliations"]:
rows.append((author["name"], affiliation))
if not rows:
return variables
affil_map_df = pandas.DataFrame(rows, columns=["name", "affiliation"])
affiliation_df = affil_map_df[["affiliation"]].drop_duplicates()
affiliation_df["affiliation_number"] = range(1, 1 + len(affiliation_df))
affil_map_df = affil_map_df.merge(affiliation_df)
name_to_numbers = {
name: sorted(df.affiliation_number) for name, df in affil_map_df.groupby("name")
}
for author in variables["authors"]:
author["affiliation_numbers"] = name_to_numbers.get(author["name"], [])
variables["affiliations"] = affiliation_df.to_dict(orient="records")
return variables
def load_variables(args) -> dict:
"""
Read `metadata.yaml` and files specified by `--template-variables-path` to generate
manuscript variables available for jinja2 templating.
Returns a dictionary, refered to as `variables`, with the following keys:
- `pandoc`: a dictionary for passing options to Pandoc via the `yaml_metadata_block`.
Fields in `pandoc` are either generated by Manubot or hard-coded by the user if `metadata.yaml`
includes a `pandoc` dictionary.
- `manubot`: a dictionary for manubot-related information and metadata.
Fields in `manubot` are either generated by Manubot or hard-coded by the user if `metadata.yaml`
includes a `manubot` dictionary.
- All fields from a manuscript's `metadata.yaml` that are not interpreted by Manubot are
copied to `variables`. Interpreted fields include `pandoc`, `manubot`, `title`,
`keywords`, `authors` (formerly `author_info`, now deprecated), `lang`, and `thumbnail`.
- User-specified fields inserted according to the `--template-variables-path` option.
User-specified variables take highest precedence and can overwrite values for existing
keys like `pandoc` or `manubot` (dangerous).
"""
# Generated manuscript variables
variables = {"pandoc": {}, "manubot": {}}
# Read metadata which contains pandoc_yaml_metadata
# as well as authors information.
if args.meta_yaml_path.is_file():
metadata = read_serialized_dict(args.meta_yaml_path)
else:
metadata = {}
logging.warning(
f"missing {args.meta_yaml_path} file with yaml_metadata_block for pandoc"
)
# Interpreted keys that are intended for pandoc
move_to_pandoc = "title", "keywords", "lang"
for key in move_to_pandoc:
if key in metadata:
variables["pandoc"][key] = metadata.pop(key)
# Add date to metadata
now = datetime_now()
logging.info(
f"Using {now:%Z} timezone.\n"
f"Dating manuscript with the current datetime: {now.isoformat()}"
)
variables["pandoc"]["date-meta"] = now.date().isoformat()
variables["manubot"]["date"] = f"{now:%B} {now.day}, {now.year}"
# Process authors metadata
if "author_info" in metadata:
authors = metadata.pop("author_info", [])
warnings.warn(
"metadata.yaml: 'author_info' is deprecated. Use 'authors' instead.",
category=DeprecationWarning,
)
else:
authors = metadata.pop("authors", [])
if authors is None:
authors = []
variables["pandoc"]["author-meta"] = [author["name"] for author in authors]
variables["manubot"]["authors"] = authors
add_author_affiliations(variables["manubot"])
# Set repository version metadata for CI builds
ci_params = get_continuous_integration_parameters()
if ci_params:
variables["manubot"]["ci_source"] = ci_params
# Add manuscript URLs
variables["manubot"].update(get_manuscript_urls(metadata.pop("html_url", None)))
# Add software versions
variables["manubot"].update(get_software_versions())
# Add thumbnail URL if present
thumbnail_url = get_thumbnail_url(metadata.pop("thumbnail", None))
if thumbnail_url:
variables["manubot"]["thumbnail_url"] = thumbnail_url
# Update variables with metadata.yaml pandoc/manubot dicts
for key in "pandoc", "manubot":
dict_ = metadata.pop(key, {})
if not isinstance(dict_, dict):
logging.warning(
f"load_variables expected metadata.yaml field {key!r} to be a dict."
f"Received a {dict_.__class__.__name__!r} instead."
)
continue
variables[key].update(dict_)
# Update variables with uninterpreted metadata.yaml fields
variables.update(metadata)
# Update variables with user-provided variables here
variables = read_variable_files(args.template_variables_path, variables)
# Add header-includes metadata with <meta> information for the HTML output's <head>
variables["pandoc"]["header-includes"] = get_header_includes(variables)
assert args.skip_citations
# Extend Pandoc's metadata.bibliography field with manual references paths
bibliographies = variables["pandoc"].get("bibliography", [])
if isinstance(bibliographies, str):
bibliographies = [bibliographies]
assert isinstance(bibliographies, list)
bibliographies.extend(args.manual_references_paths)
bibliographies = list(map(os.fspath, bibliographies))
variables["pandoc"]["bibliography"] = bibliographies
# enable pandoc-manubot-cite option to write bibliography to a file
variables["pandoc"]["manubot-output-bibliography"] = os.fspath(args.references_path)
variables["pandoc"]["manubot-output-citekeys"] = os.fspath(args.citations_path)
variables["pandoc"]["manubot-requests-cache-path"] = os.fspath(
args.requests_cache_path
)
variables["pandoc"]["manubot-clear-requests-cache"] = args.clear_requests_cache
return variables
def get_citekeys_df(citekeys: list, citekey_aliases: dict = {}):
"""
Generate and return citekeys_df.
citekeys_df is a pandas.DataFrame with the following columns:
- manuscript_citekey: citation keys extracted from the manuscript content files.
- detagged_citekey: manuscript_citekey but with tag citekeys dereferenced
- standard_citekey: detagged_citekey standardized
- short_citekey: standard_citekey hashed to create a shortened citekey
"""
citekeys_df = pandas.DataFrame(
{"manuscript_citekey": list(citekeys)}
).drop_duplicates()
citekeys_df["detagged_citekey"] = citekeys_df.manuscript_citekey.map(
lambda citekey: citekey_aliases.get(citekey, citekey)
)
for citation in citekeys_df.detagged_citekey:
is_valid_citekey(citation, allow_raw=True)
citekeys_df["standard_citekey"] = citekeys_df.detagged_citekey.map(
standardize_citekey
)
citekeys_df["short_citekey"] = citekeys_df.standard_citekey.map(shorten_citekey)
citekeys_df = citekeys_df.sort_values(["standard_citekey", "detagged_citekey"])
check_collisions(citekeys_df)
check_multiple_citation_strings(citekeys_df)
return citekeys_df
def read_citations_tsv(path) -> dict:
"""
Read citekey aliases from a citation-tags.tsv file.
"""
if not path.is_file():
logging.info(
f"no citation tags file at {path} "
"Not reading citekey_aliases from citation-tags.tsv."
)
return {}
tag_df = pandas.read_csv(path, sep="\t")
na_rows_df = tag_df[tag_df.isnull().any(axis="columns")]
if not na_rows_df.empty:
logging.error(
f"{path} contains rows with missing values:\n"
f"{na_rows_df}\n"
"This error can be caused by using spaces rather than tabs to delimit fields.\n"
"Proceeding to reread TSV with delim_whitespace=True."
)
tag_df = pandas.read_csv(path, delim_whitespace=True)
tag_df["manuscript_citekey"] = "tag:" + tag_df.tag
tag_df = tag_df.rename(columns={"citation": "detagged_citekey"})
citekey_aliases = dict(
zip(tag_df["manuscript_citekey"], tag_df["detagged_citekey"])
)
return citekey_aliases
def write_citekeys_tsv(citekeys_df, path):
if not path:
return
citekeys_df.to_csv(path, sep="\t", index=False)
def _citation_tags_to_reference_links(args) -> str:
"""
Convert citation-tags.tsv to markdown reference link syntax
"""
citekey_aliases = read_citations_tsv(args.citation_tags_path)
if not citekey_aliases:
return ""
text = "\n\n"
for key, value in citekey_aliases.items():
text += f"[@{key}]: {value}\n"
logging.warning(
"citation-tags.tsv is deprecated. "
f"Consider deleting citation-tags.tsv and inserting the following paragraph into your Markdown content:{text}"
)
return text
def generate_csl_items(
citekeys: list,
manual_refs: dict = {},
requests_cache_path: Optional[str] = None,
clear_requests_cache: Optional[bool] = False,
) -> list:
"""
General CSL (citeproc) items for standard_citekeys in citekeys_df.
Parameters:
- citekeys: list of standard_citekeys
- manual_refs: mapping from standard_citekey to csl_item for manual references
- requests_cache_path: path for the requests cache database.
Passed as cache_name to `requests_cache.install_cache`.
requests_cache may append an extension to this path, so it is not always the exact
path to the cache. If None, do not use requests_cache.
- clear_requests_cache: If True, clear the requests cache before generating citekey metadata.
"""
# Deduplicate citations
citekeys = list(dict.fromkeys(citekeys))
# Install cache
if requests_cache_path is not None:
requests # require `import requests` in case this is essential for monkey patching by requests_cache.
requests_cache.install_cache(requests_cache_path, include_get_headers=True)
cache = requests_cache.get_cache()
if clear_requests_cache:
logging.info("Clearing requests-cache")
requests_cache.clear()
logging.info(
f"requests-cache starting with {len(cache.responses)} cached responses"
)
csl_items = list()
failures = list()
for standard_citekey in citekeys:
if standard_citekey in manual_refs:
csl_items.append(manual_refs[standard_citekey])
continue
elif standard_citekey.startswith("raw:"):
logging.error(
f"CSL JSON Data with a standard_citekey of {standard_citekey!r} not found in manual-references.json. "
"Metadata must be provided for raw citekeys."
)
failures.append(standard_citekey)
try:
csl_item = citekey_to_csl_item(standard_citekey)
csl_items.append(csl_item)
except Exception:
logging.exception(f"Citeproc retrieval failure for {standard_citekey!r}")
failures.append(standard_citekey)
# Uninstall cache
if requests_cache_path is not None:
logging.info(
f"requests-cache finished with {len(cache.responses)} cached responses"
)
requests_cache.uninstall_cache()
if failures:
message = "CSL JSON Data retrieval failed for the following standardized citation keys:\n{}".format(
"\n".join(failures)
)
logging.error(message)
return csl_items
def _generate_csl_items(args, citekeys_df):
"""
General CSL (citeproc) items for standard_citekeys in citekeys_df.
Writes references.json to disk and logs warnings for potential problems.
"""
# Read manual references (overrides) in JSON CSL
manual_refs = load_manual_references(args.manual_references_paths)
# Retrieve CSL Items
csl_items = generate_csl_items(
citekeys=citekeys_df.standard_citekey.unique(),
manual_refs=manual_refs,
requests_cache_path=args.requests_cache_path,
clear_requests_cache=args.clear_requests_cache,
)
# Write CSL JSON bibliography for Pandoc.
write_csl_json(csl_items, args.references_path)
return csl_items
def write_csl_json(csl_items, path):
"""
Write CSL Items to a JSON file at `path`.
If `path` evaluates as False, do nothing.
"""
if not path:
return
path = pathlib.Path(path)
with path.open("w", encoding="utf-8") as write_file:
json.dump(csl_items, write_file, indent=2, ensure_ascii=False)
write_file.write("\n")
def template_with_jinja2(text, variables):
"""
Template using jinja2 with the variables dictionary unpacked as keyword
arguments.
"""
jinja_environment = jinja2.Environment(
loader=jinja2.BaseLoader(),
undefined=jinja2.make_logging_undefined(logging.getLogger()),
autoescape=False,
comment_start_string="{##",
comment_end_string="##}",
extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"],
)
template = jinja_environment.from_string(text)
return template.render(**variables)
def prepare_manuscript(args):
"""
Compile manuscript, creating manuscript.md and references.json as inputs
for pandoc.
"""
text = get_text(args.content_directory)
assert args.skip_citations
text += _citation_tags_to_reference_links(args)
variables = load_variables(args)
variables["manubot"]["manuscript_stats"] = get_manuscript_stats(text)
with args.variables_path.open("w", encoding="utf-8") as write_file:
json.dump(variables, write_file, ensure_ascii=False, indent=2)
write_file.write("\n")
text = template_with_jinja2(text, variables)
# Write manuscript for pandoc
with args.manuscript_path.open("w", encoding="utf-8") as write_file:
yaml.dump(
variables["pandoc"],
write_file,
default_flow_style=False,
explicit_start=True,
explicit_end=True,
width=float("inf"),
)
write_file.write("\n")
write_file.write(text)
| 37.496063
| 118
| 0.679546
| 2,318
| 19,048
| 5.406816
| 0.195427
| 0.031118
| 0.012208
| 0.011968
| 0.129418
| 0.078034
| 0.040373
| 0.03399
| 0.028724
| 0.026171
| 0
| 0.001896
| 0.224801
| 19,048
| 507
| 119
| 37.57002
| 0.846878
| 0.249108
| 0
| 0.147147
| 0
| 0.003003
| 0.211811
| 0.028016
| 0
| 0
| 0
| 0
| 0.012012
| 1
| 0.042042
| false
| 0
| 0.057057
| 0
| 0.147147
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d4008c47be6196efe901a8e83cca011533d0bf1
| 2,648
|
py
|
Python
|
pytorch_ares/pytorch_ares/attack_torch/mim.py
|
thu-ml/realsafe
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 107
|
2020-06-15T09:55:11.000Z
|
2020-12-20T11:27:11.000Z
|
pytorch_ares/pytorch_ares/attack_torch/mim.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 7
|
2020-06-14T03:00:18.000Z
|
2020-12-07T07:10:10.000Z
|
pytorch_ares/pytorch_ares/attack_torch/mim.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 19
|
2020-06-14T08:35:33.000Z
|
2020-12-19T13:43:41.000Z
|
import imp
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from pytorch_ares.attack_torch.utils import loss_adv
class MIM(object):
'''Projected Gradient Descent'''
def __init__(self, net, epsilon, p, stepsize, steps, decay_factor, data_name,target, loss, device):
self.epsilon = epsilon
self.p = p
self.net = net
self.decay_factor = decay_factor
self.stepsize = stepsize
self.target = target
self.steps = steps
self.loss = loss
self.data_name = data_name
self.device = device
if self.data_name=="cifar10" and self.target:
raise AssertionError('cifar10 dont support targeted attack')
def forward(self, image, label, target_labels):
image, label = image.to(self.device), label.to(self.device)
if target_labels is not None:
target_labels = target_labels.to(self.device)
batchsize = image.shape[0]
advimage = image
momentum = torch.zeros_like(image).detach()
# PGD to get adversarial example
for i in range(self.steps):
advimage = advimage.clone().detach().requires_grad_(True) # clone the advimage as the next iteration input
netOut = self.net(advimage)
loss = loss_adv(self.loss, netOut, label, target_labels, self.target, self.device)
grad = torch.autograd.grad(loss, [advimage])[0].detach()
grad_norm = torch.norm(nn.Flatten()(grad), p=1, dim=1)
grad = grad / grad_norm.view([-1]+[1]*(len(grad.shape)-1))
grad = grad + momentum*self.decay_factor
momentum = grad
if self.p==np.inf:
updates = grad.sign()
else:
normVal = torch.norm(grad.view(batchsize, -1), self.p, 1)
updates = grad/normVal.view(batchsize, 1, 1, 1)
updates = updates*self.stepsize
advimage = advimage+updates
# project the disturbed image to feasible set if needed
delta = advimage-image
if self.p==np.inf:
delta = torch.clamp(delta, -self.epsilon, self.epsilon)
else:
normVal = torch.norm(delta.view(batchsize, -1), self.p, 1)
mask = normVal<=self.epsilon
scaling = self.epsilon/normVal
scaling[mask] = 1
delta = delta*scaling.view(batchsize, 1, 1, 1)
advimage = image+delta
advimage = torch.clamp(advimage, 0, 1)#cifar10(-1,1)
return advimage
| 39.522388
| 118
| 0.583837
| 324
| 2,648
| 4.691358
| 0.305556
| 0.007895
| 0.036842
| 0.011842
| 0.063158
| 0.026316
| 0
| 0
| 0
| 0
| 0
| 0.015427
| 0.314577
| 2,648
| 67
| 119
| 39.522388
| 0.822039
| 0.064955
| 0
| 0.072727
| 0
| 0
| 0.017423
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.036364
| false
| 0
| 0.109091
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d4029e498cad9d638e5fe5f4c3a65f28490da96
| 303
|
py
|
Python
|
src/utils/templatetags/menubutton.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
src/utils/templatetags/menubutton.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
src/utils/templatetags/menubutton.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def menubuttonclass(context, appname):
if appname == context['request'].resolver_match.func.view_class.__module__.split(".")[0]:
return "btn-primary"
else:
return "btn-default"
| 25.25
| 93
| 0.716172
| 36
| 303
| 5.805556
| 0.805556
| 0.086124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003906
| 0.155116
| 303
| 11
| 94
| 27.545455
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0.099338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d424aaa1fdb6fb518af8c5169d61b82bae9ef0f
| 1,928
|
py
|
Python
|
ares/defense/randomization.py
|
KuanKuanQAQ/ares
|
40dbefc18f6438e1812021fe6d6c3195f22ca295
|
[
"MIT"
] | 206
|
2020-12-31T09:43:11.000Z
|
2022-03-30T07:02:41.000Z
|
ares/defense/randomization.py
|
afoolboy/ares
|
89610d41fdde194e4ad916d29961aaed73383692
|
[
"MIT"
] | 7
|
2021-01-26T06:45:44.000Z
|
2022-02-26T05:25:48.000Z
|
ares/defense/randomization.py
|
afoolboy/ares
|
89610d41fdde194e4ad916d29961aaed73383692
|
[
"MIT"
] | 61
|
2020-12-29T14:02:41.000Z
|
2022-03-26T14:21:10.000Z
|
''' The randomization defense method, which applies random . '''
import tensorflow as tf
from ares.defense.input_transformation import input_transformation
def randomize(xs, scale_min=0.875, pad_value=0.0):
''' Apply random rescaling and padding to xs.
:param xs: A batch of inputs for some classifier.
:param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0.
:param pad_value: ``constant_values`` parameter for the ``tf.pad`` method.
:return: A new tensor with same shape and dtype as xs.
'''
ratio = tf.random.uniform((), minval=scale_min, maxval=1.0)
height, width = tf.cast(xs.shape[1].value * ratio, tf.int32), tf.cast(xs.shape[2].value * ratio, tf.int32)
xs_rescaled = tf.image.resize(xs, (height, width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True, preserve_aspect_ratio=False)
height_rem, width_rem = xs.shape[1].value - height, xs.shape[2].value - width
pad_left = tf.random_uniform((), 0, width_rem, dtype=tf.int32)
pad_right = width_rem - pad_left
pad_top = tf.random_uniform((), 0, height_rem, dtype=tf.int32)
pad_bottom = height_rem - pad_top
xs_padded = tf.pad(xs_rescaled, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],
constant_values=pad_value)
xs_padded.set_shape(xs.shape)
return xs_padded
def randomization(scale_min=0.875, pad_value=0.0):
''' A decorator to apply randomize rescaling and padding to input of the classifier.
:param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0.
:param pad_value: ``constant_values`` parameter for the ``tf.pad`` method.
'''
def args_fn(_):
return (scale_min, pad_value)
def kwargs_fn(_):
return {}
return lambda rs_class: input_transformation(rs_class, randomize, args_fn, kwargs_fn)
| 43.818182
| 110
| 0.688797
| 289
| 1,928
| 4.401384
| 0.304498
| 0.050314
| 0.035377
| 0.018868
| 0.268868
| 0.240566
| 0.240566
| 0.240566
| 0.205975
| 0.205975
| 0
| 0.023196
| 0.195021
| 1,928
| 43
| 111
| 44.837209
| 0.796392
| 0.320539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0.090909
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d493476e5ae3fc5c2137c7a547ce012434fae4f
| 4,927
|
py
|
Python
|
inflateutils/exportmesh.py
|
arpruss/inflatemesh
|
ab4abfc7794fd4cf96f41bb797e1b2a61f687a46
|
[
"MIT"
] | 8
|
2017-11-30T14:03:25.000Z
|
2021-03-02T03:16:01.000Z
|
inflateutils/exportmesh.py
|
arpruss/inflatemesh
|
ab4abfc7794fd4cf96f41bb797e1b2a61f687a46
|
[
"MIT"
] | 2
|
2019-03-15T04:10:04.000Z
|
2021-01-11T17:44:31.000Z
|
inflateutils/exportmesh.py
|
arpruss/inflatemesh
|
ab4abfc7794fd4cf96f41bb797e1b2a61f687a46
|
[
"MIT"
] | 2
|
2018-04-08T10:59:39.000Z
|
2021-01-28T03:37:57.000Z
|
from struct import pack
from .vector import *
from .formatdecimal import decimal
from numbers import Number
import os
import sys
try:
basestring
except:
basestring = str
def isColorTriangleList(polys):
return isinstance(polys[0][1][0][0], Number)
def toPolyhedra(polys):
if isColorTriangleList(polys):
return [ (polys[0][0], list(face for rgb,face in polys)) ]
else:
return polys
def toMesh(polys):
if isColorTriangleList(polys):
return polys
else:
output = []
for rgb,polyhedron in polys:
for face in polyhedron:
output.append((rgb,face))
return output
def describeColor(c):
if c is None:
return "undef";
elif isinstance(c, str):
return c
else:
return "[%s,%s,%s]" % tuple(decimal(component) for component in c)
def toSCADModule(polys, moduleName, digitsAfterDecimal=9, colorOverride=None):
"""
INPUT:
polys: list of (color,polyhedra) pairs (counterclockwise triangles), or a list of (color,triangle) pairs (TODO: currently uses first color for all in latter case)
moduleName: OpenSCAD module name
OUTPUT: string with OpenSCAD code implementing the polys
"""
polys = toPolyhedra(polys)
scad = []
scad.append("module " +moduleName+ "() {")
for rgb,poly in polys:
if colorOverride != "" and (colorOverride or rgb):
line = " color(%s) " % describeColor(colorOverride if colorOverride else tuple(min(max(c,0.),1.0) for c in rgb))
else:
line = " "
pointsDict = {}
i = 0
line += "polyhedron(points=["
points = []
for face in poly:
for v in reversed(face):
if tuple(v) not in pointsDict:
pointsDict[tuple(v)] = i
points.append( ("[%s,%s,%s]") % tuple(decimal(x,digitsAfterDecimal) for x in v) )
i += 1
line += ",".join(points)
line += "], faces=["
line += ",".join( "[" + ",".join(str(pointsDict[tuple(v)]) for v in reversed(face)) + "]" for face in poly ) + "]"
line += ");"
scad.append(line)
scad.append("}\n")
return "\n".join(scad)
def saveSCAD(filename, polys, moduleName="object1", quiet=False):
"""
filename: filename to write OpenSCAD file
polys: list of (color,polyhedra) pairs (counterclockwise triangles)
moduleName: OpenSCAD module name
quiet: give no status message if set
"""
if not quiet: sys.stderr.write("Saving %s\n" % filename)
if filename:
with open(filename, "w") as f:
f.write(toSCADModule(polys, moduleName))
f.write("\n" + moduleName + "();\n")
else:
sys.stdout.write(toSCADModule(polys, moduleName))
sys.stdout.write("\n" + moduleName + "();\n")
def saveSTL(filename, mesh, swapYZ=False, quiet=False):
"""
filename: filename to save STL file
mesh: list of (color,triangle) pairs (counterclockwise)
swapYZ: should Y/Z axes be swapped?
quiet: give no status message if set
"""
mesh = toMesh(mesh)
if not quiet: sys.stderr.write("Saving %s\n" % filename)
minY = float("inf")
minVector = Vector(float("inf"),float("inf"),float("inf"))
numTriangles = 0
if swapYZ:
matrix = Matrix( (1,0,0), (0,0,-1), (0,1,0) )
else:
matrix = Matrix.identity(3)
mono = True
for rgb,triangle in mesh:
if rgb is not None:
mono = False
numTriangles += 1
for vertex in triangle:
vertex = matrix*vertex
minVector = Vector(min(minVector[i], vertex[i]) for i in range(3))
minVector -= Vector(0.001,0.001,0.001) # make sure all STL coordinates are strictly positive as per Wikipedia
def writeSTL(write):
write(pack("80s",b''))
write(pack("<I",numTriangles))
for rgb,tri in mesh:
if mono:
color = 0
else:
if rgb is None:
rgb = (255,255,255)
else:
rgb = tuple(min(255,max(0,int(0.5 + 255 * comp))) for comp in rgb)
color = 0x8000 | ( (rgb[0] >> 3) << 10 ) | ( (rgb[1] >> 3) << 5 ) | ( (rgb[2] >> 3) << 0 )
normal = (Vector(tri[1])-Vector(tri[0])).cross(Vector(tri[2])-Vector(tri[0])).normalize()
write(pack("<3f", *(matrix*normal)))
for vertex in tri:
write(pack("<3f", *(matrix*(vertex-minVector))))
write(pack("<H", color))
if filename:
with open(filename, "wb") as f:
writeSTL(f.write)
else:
if sys.platform == "win32":
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
writeSTL(lambda data : os.write(sys.stdout.fileno(), data))
| 33.517007
| 166
| 0.558149
| 601
| 4,927
| 4.574043
| 0.27787
| 0.003638
| 0.004365
| 0.022554
| 0.201528
| 0.120771
| 0.090215
| 0.069116
| 0.029101
| 0.029101
| 0
| 0.024005
| 0.306677
| 4,927
| 147
| 167
| 33.517007
| 0.780738
| 0.137
| 0
| 0.151786
| 0
| 0
| 0.040317
| 0
| 0.008929
| 0
| 0.00144
| 0.006803
| 0
| 1
| 0.071429
| false
| 0
| 0.0625
| 0.008929
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d4b0bc52d1482cd0028c140868e692cfb38b3c0
| 3,982
|
py
|
Python
|
Assignment1/Identification/match_module.py
|
arywatt/FDS_2020_2021
|
392f360b219c6ef5e2c685da1f3c8aab7415ce32
|
[
"MIT"
] | null | null | null |
Assignment1/Identification/match_module.py
|
arywatt/FDS_2020_2021
|
392f360b219c6ef5e2c685da1f3c8aab7415ce32
|
[
"MIT"
] | null | null | null |
Assignment1/Identification/match_module.py
|
arywatt/FDS_2020_2021
|
392f360b219c6ef5e2c685da1f3c8aab7415ce32
|
[
"MIT"
] | 1
|
2020-10-29T08:38:42.000Z
|
2020-10-29T08:38:42.000Z
|
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import histogram_module
import dist_module
def rgb2gray(rgb):
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
# model_images - list of file names of model images
# query_images - list of file names of query images
#
# dist_type - string which specifies distance type: 'chi2', 'l2', 'intersect'
# hist_type - string which specifies histogram type: 'grayvalue', 'dxdy', 'rgb', 'rg'
#
# note: use functions 'get_dist_by_name', 'get_hist_by_name' and 'is_grayvalue_hist' to obtain
# handles to distance and histogram functions, and to find out whether histogram function
# expects grayvalue or color image
def find_best_match(model_images, query_images, dist_type, hist_type, num_bins):
hist_isgray = histogram_module.is_grayvalue_hist(hist_type)
model_hists = compute_histograms(model_images, hist_type, hist_isgray, num_bins)
query_hists = compute_histograms(query_images, hist_type, hist_isgray, num_bins)
D = np.zeros((len(model_images), len(query_images)))
# compute distance for each couple of query - image
for j, query in enumerate(query_hists):
for i, model in enumerate(model_hists):
D[i, j] = dist_module.get_dist_by_name(model, query, dist_type)
best_match = [] # to save best matches
# for each query , find best model
for j in range(len(query_images)):
query_matches = D[:, j] # get query columns from matrix
argmin = np.argmin(query_matches) # get index with minimum distance
best_match.append(argmin) # save index for query
best_match = np.array(best_match) # array of best match for each query
return best_match, D
def compute_histograms(image_list, hist_type, hist_isgray, num_bins):
image_hist = []
# Compute hisgoram for each image and add it at the bottom of image_hist
# ... (your code here)
for img in image_list:
img_color = np.array(Image.open(img))
# if hist is gray type we use gray image
# othewise rgb image
img_to_process = rgb2gray(img_color) if hist_isgray else img_color.astype('double')
# We compute histogram for image
hist = histogram_module.get_hist_by_name(img=img_to_process,
num_bins_gray=num_bins,
hist_name=hist_type
)
image_hist.append(hist)
return image_hist
# For each image file from 'query_images' find and visualize the 5 nearest images from 'model_image'.
#
# Note: use the previously implemented function 'find_best_match'
# Note: use subplot command to show all the images in the same Python figure, one row per query image
def show_neighbors(model_images, query_images, dist_type, hist_type, num_bins):
plt.figure()
num_nearest = 5 # show the top-5 neighbors
# ... (your code here)
_, D = find_best_match(model_images=model_images,
query_images=query_images,
dist_type=dist_type,
hist_type=hist_type,
num_bins=num_bins
)
Q = len(query_images)
pos = 0
for j in range(Q):
query_matches = D[:, j]
best_args = np.argsort(query_matches)[:num_nearest]
query_img = query_images[j]
pos += 1
plt.subplot(Q, 6, pos);
plt.imshow(np.array(Image.open(query_img)), vmin=0, vmax=255);
plt.title(f'Q{j}')
for ind in range(len(best_args)):
pos += 1
model_ind = best_args[ind]
model_img = model_images[model_ind]
plt.subplot(Q, 6, pos);
plt.imshow(np.array(Image.open(model_img)), vmin=0, vmax=255);
plt.title(f'MO.{model_ind}')
plt.show()
| 33.745763
| 101
| 0.633852
| 563
| 3,982
| 4.26643
| 0.250444
| 0.054954
| 0.035387
| 0.036636
| 0.17985
| 0.146128
| 0.11657
| 0.090758
| 0.070774
| 0.070774
| 0
| 0.013213
| 0.27775
| 3,982
| 117
| 102
| 34.034188
| 0.821975
| 0.301356
| 0
| 0.096774
| 0
| 0
| 0.008724
| 0
| 0
| 0
| 0
| 0.008547
| 0
| 1
| 0.064516
| false
| 0
| 0.080645
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d4c8cdbc546fb237f365ef954c77cb12a3738d8
| 1,566
|
py
|
Python
|
dycco/__main__.py
|
rojalator/dycco
|
84ace8727aef84bb3d886cdaa3d3aef1089f1935
|
[
"MIT"
] | null | null | null |
dycco/__main__.py
|
rojalator/dycco
|
84ace8727aef84bb3d886cdaa3d3aef1089f1935
|
[
"MIT"
] | 1
|
2022-03-22T07:35:15.000Z
|
2022-03-22T09:15:44.000Z
|
dycco/__main__.py
|
rojalator/dycco
|
84ace8727aef84bb3d886cdaa3d3aef1089f1935
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import sys
from .dycco import document
def main(paths, output_dir, use_ascii:bool, escape_html:bool, single_file:bool):
try:
document(paths, output_dir, use_ascii, escape_html, single_file)
except IOError as e:
logging.error('Unable to open file: %s', e)
return 1
except Exception as e:
logging.error('An error occurred: %s', e)
return 1
else:
return 0
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(prog='dycco', description='Literate-style documentation generator.')
arg_parser.add_argument('source_file', nargs='+', default=sys.stdin, help='Source files to document')
arg_parser.add_argument('-o', '--output-dir', default='docs', help='Output directory (will be created if necessary)')
arg_parser.add_argument('-a', '--asciidoc3', action='store_true', default=False, dest='use_ascii',
help='Process with asciidoc3 instead of markdown (you will have to install asciidoc3, of course)')
arg_parser.add_argument('-e', '--escape-html', action='store_true', default=False, dest='escape_html',
help='Run the documentation through html.escape() before markdown or asciidoc3')
arg_parser.add_argument('-f', '--single-file', action='store_true', default=False, dest='single_file',
help='Just produce a .md or .adoc file in single-column to be processed externally')
args = arg_parser.parse_args()
sys.exit(main(args.source_file, args.output_dir, args.use_ascii, args.escape_html, args.single_file))
| 46.058824
| 121
| 0.707535
| 219
| 1,566
| 4.872146
| 0.43379
| 0.059044
| 0.056232
| 0.093721
| 0.128397
| 0.08716
| 0
| 0
| 0
| 0
| 0
| 0.00536
| 0.166028
| 1,566
| 33
| 122
| 47.454545
| 0.811639
| 0
| 0
| 0.074074
| 0
| 0
| 0.344189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.148148
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d4c9607e3defd3816cf4fbd7853e01e09dcb111
| 14,354
|
py
|
Python
|
jumpy/jumpy/ndarray.py
|
rghwer/testdocs
|
8fafa40407411ed7a3f8216e691e42e0c7d32083
|
[
"Apache-2.0"
] | 13,006
|
2015-02-13T18:35:31.000Z
|
2022-03-18T12:11:44.000Z
|
jumpy/jumpy/ndarray.py
|
pxiuqin/deeplearning4j
|
e11ddf3c24d355b43d36431687b807c8561aaae4
|
[
"Apache-2.0"
] | 5,319
|
2015-02-13T08:21:46.000Z
|
2019-06-12T14:56:50.000Z
|
jumpy/jumpy/ndarray.py
|
pxiuqin/deeplearning4j
|
e11ddf3c24d355b43d36431687b807c8561aaae4
|
[
"Apache-2.0"
] | 4,719
|
2015-02-13T22:48:55.000Z
|
2022-03-22T07:25:36.000Z
|
################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
from .java_classes import *
import numpy as np
import ctypes
import warnings
native_ops = NativeOpsHolder.getInstance().getDeviceNativeOps()
# DATA TYPE MANAGEMENT
DOUBLE = DataType.DOUBLE
FLOAT = DataType.FLOAT
HALF = DataType.HALF
LONG = DataType.LONG
INT = DataType.INT
SHORT = DataType.SHORT
UBYTE = DataType.UBYTE
BYTE = DataType.BYTE
BOOL = DataType.BOOL
UTF8 = DataType.UTF8
COMPRESSED = DataType.COMPRESSED
UNKNOWN = DataType.UNKNOWN
SUPPORTED_JAVA_DTYPES = [
DOUBLE,
FLOAT,
HALF,
LONG,
INT,
SHORT,
BOOL
#UTF8
]
SUPPORTED_PYTHON_DTYPES = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.bool_
#np.str_
]
_PY2J = {SUPPORTED_PYTHON_DTYPES[i] : SUPPORTED_JAVA_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
_J2PY = {SUPPORTED_JAVA_DTYPES[i] : SUPPORTED_PYTHON_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
def _dtype_py2j(dtype):
if isinstance(dtype, str):
dtype = np.dtype(dtype).type
elif isinstance(dtype, np.dtype):
dtype = dtype.type
jtype = _PY2J.get(dtype)
if jtype is None:
raise NotImplementedError("Unsupported type: " + dtype.name)
return jtype
def _dtype_j2py(dtype):
pytype = _J2PY.get(dtype)
if pytype is None:
raise NotImplementedError("Unsupported type: " + (str(dtype)))
return pytype
def set_context_dtype(dtype):
'''
Sets the dtype for nd4j
# Arguments
dtype: 'float' or 'double'
'''
dtype_map = {
'float32': 'float',
'float64': 'double'
}
dtype = dtype_map.get(dtype, dtype)
if dtype not in ['float', 'double']:
raise ValueError("Invalid dtype '{}'. Available dtypes are 'float' and 'double'.".format(dtype))
dtype_ = DataTypeUtil.getDtypeFromContext(dtype)
DataTypeUtil.setDTypeForContext(dtype_)
if get_context_dtype() != dtype:
warnings.warn("Can not set context dtype now. Set it at the beginning of your program.")
def get_context_dtype():
'''
Returns the nd4j dtype
'''
dtype = DataTypeUtil.getDtypeFromContext()
return DataTypeUtil.getDTypeForName(dtype)
_refs = []
def _from_numpy(np_array):
'''
Convert numpy array to nd4j array
'''
pointer_address, _ = np_array.__array_interface__['data']
_refs.append(np_array)
pointer = native_ops.pointerForAddress(pointer_address)
size = np_array.size
pointer.limit(size)
jdtype = _dtype_py2j(np_array.dtype)
'''
mapping = {
DOUBLE: DoublePointer,
FLOAT: FloatPointer,
HALF: HalfPointer,
LONG: LongPointer,
INT: IntPointer,
SHORT: ShortPointer,
BOOL: BoolPointer
}
pc = mapping[jdtype]
#pointer = pc(pointer)
'''
buff = Nd4j.createBuffer(pointer, size, jdtype)
assert buff.address() == pointer_address
_refs.append(buff)
elem_size = buff.getElementSize()
assert elem_size == np_array.dtype.itemsize
strides = np_array.strides
strides = [dim / elem_size for dim in strides]
shape = np_array.shape
nd4j_array = Nd4j.create(buff, shape, strides, 0)
assert buff.address() == nd4j_array.data().address()
return nd4j_array
def _to_numpy(nd4j_array):
'''
Convert nd4j array to numpy array
'''
buff = nd4j_array.data()
address = buff.pointer().address()
dtype = nd4j_array.dataType().toString()
mapping = {
'DOUBLE': ctypes.c_double,
'FLOAT': ctypes.c_float,
'HALF': ctypes.c_short,
'LONG': ctypes.c_long,
'INT': ctypes.c_int,
'SHORT': ctypes.c_short,
'BOOL': ctypes.c_bool
}
Pointer = ctypes.POINTER(mapping[dtype])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array
def _indarray(x):
typ = type(x)
if typ is INDArray:
return x
elif typ is ndarray:
return x.array
elif 'numpy' in str(typ):
return _from_numpy(x)
elif typ in (list, tuple):
return _from_numpy(np.array(x))
elif typ in (int, float):
return Nd4j.scalar(x)
else:
raise Exception('Data type not understood :' + str(typ))
def _nparray(x):
typ = type(x)
if typ is INDArray:
return ndarray(x).numpy()
elif typ is ndarray:
return x.numpy()
elif 'numpy' in str(typ):
return x
elif typ in (list, tuple):
return np.array(x)
elif typ in (int, float):
return np.array(x)
else:
raise Exception('Data type not understood :' + str(typ))
def broadcast_like(y, x):
xs = x.shape()
ys = y.shape()
if xs == ys:
return y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(ys)
ny = nx
elif ny > nx:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
yt = []
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
yt.append(1)
elif xd == 1:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
elif yd == 1:
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_y:
y = y.repmat(*yt)
return y
def broadcast(x, y):
xs = x.shape()
ys = y.shape()
if xs == ys:
return x, y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(*ys)
ny = nx
elif ny > nx:
diff = ny - nx
xs = ([1] * diff) + xs
x = x.reshape(*xs)
nx = ny
xt = []
yt = []
rep_x = False
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
xt.append(1)
yt.append(1)
elif xd == 1:
xt.append(yd)
yt.append(1)
rep_x = True
elif yd == 1:
xt.append(1)
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_x:
x = Nd4j.tile(x, *xt)
if rep_y:
try:
y = Nd4j.tile(y, *yt)
except:
y = Nd4j.tile(y, *yt)
return x, y
class ndarray(object):
def __init__(self, data, dtype=None):
# we ignore dtype for now
typ = type(data)
if 'nd4j' in typ.__name__:
# Note that we don't make a copy here
self.array = data
elif typ is ndarray:
self.array = data.array.dup()
else:
if typ is not np.ndarray:
data = np.array(data)
self.array = _from_numpy(data)
def numpy(self):
try:
return self.np_array
except AttributeError:
self.np_array = _to_numpy(self.array)
return self.np_array
@property
def size(self):
return self.array.length()
@property
def shape(self):
return tuple(self.array.shape())
@shape.setter
def shape(self, value):
arr = self.reshape(value)
self.array = arr.array
@property
def ndim(self):
return len(self.array.shape())
def __getitem__(self, key):
return ndarray(self.numpy()[key])
if type(key) is int:
return ndarray(self.array.get(NDArrayIndex.point(key)))
if type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
shape = self.array.shape()
if shape[0] == 1:
stop = shape[1]
else:
stop = shape[0]
if stop - start <= 0:
return None
if step is None or step == 1:
return ndarray(self.array.get(NDArrayIndex.interval(start, stop)))
else:
return ndarray(self.array.get(NDArrayIndex.interval(start, step, stop)))
if type(key) is list:
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
if type(key) is tuple:
key = list(key)
shape = self.array.shape()
ndim = len(shape)
nk = len(key)
key += [slice(None)] * (ndim - nk)
args = []
for i, dim in enumerate(key):
if type(dim) is int:
args.append(NDArrayIndex.point(dim))
elif type(dim) is slice:
if dim == slice(None):
args.append(NDArrayIndex.all())
else:
start = dim.start
stop = dim.stop
step = dim.step
if start is None:
start = 0
if stop is None:
stop = shape[i]
if stop - start <= 0:
return None
if step is None or step == 1:
args.append(NDArrayIndex.interval(start, stop))
else:
args.append(NDArrayIndex.interval(
start, step, stop))
elif type(dim) in (list, tuple):
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
return ndarray(self.array.get(*args))
def __setitem__(self, key, other):
self.numpy()[key] = _nparray(other)
return
other = _indarray(other)
view = self[key]
if view is None:
return
view = view.array
other = broadcast_like(other, view)
view.assign(other)
def __add__(self, other):
return ndarray(self.numpy() + _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.add(y))
def __sub__(self, other):
return ndarray(self.numpy() - _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.sub(y))
def __mul__(self, other):
return ndarray(self.numpy() * _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.mul(y))
def __div__(self, other):
return ndarray(self.numpy() / _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.div(y))
def __pow__(self, other):
return ndarray(self.numpy() ** _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(Transforms.pow(x, y))
def __iadd__(self, other):
self.numpy().__iadd__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.addi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.add(y)
return self
def __isub__(self, other):
self.numpy().__isub__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.subi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.sub(y)
return self
def __imul__(self, other):
self.numpy().__imul__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.muli(other)
else:
x, y = broadcast(self.array, other)
self.array = x.mul(y)
return self
def __idiv__(self, other):
self.numpy().__idiv__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.div(y)
return self
def __ipow__(self, other):
self.numpy().__ipow__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = Transforms.pow(x, y)
return self
def __getattr__(self, attr):
import ops
f = getattr(ops, attr)
setattr(ndarray, attr, f)
return getattr(self, attr)
def __int__(self):
if self.array.length() == 1:
return self.array.getInt(0)
raise Exception('Applicable only for scalars')
def __float__(self):
if self.array.length() == 1:
return self.array.getDouble(0)
raise Exception('Applicable only for scalars')
@property
def T(self):
return self.transpose()
def array(*args, **kwargs):
return ndarray(*args, **kwargs)
| 27.980507
| 106
| 0.544448
| 1,713
| 14,354
| 4.42732
| 0.161705
| 0.056962
| 0.026108
| 0.019778
| 0.396493
| 0.370649
| 0.337553
| 0.320675
| 0.307489
| 0.281646
| 0
| 0.008914
| 0.335656
| 14,354
| 512
| 107
| 28.035156
| 0.786388
| 0.0565
| 0
| 0.41
| 0
| 0
| 0.045182
| 0
| 0
| 0
| 0
| 0
| 0.0075
| 1
| 0.0825
| false
| 0
| 0.0125
| 0.0125
| 0.2425
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d4e94cca5bcc101399e2e8aec4db86507599854
| 4,839
|
py
|
Python
|
torchaudio/datasets/libritts.py
|
hahaxun/audio
|
87a1886ecfa83b398a2a9a09d9a94bd9dabc5cf5
|
[
"BSD-2-Clause"
] | 1
|
2021-04-20T09:04:24.000Z
|
2021-04-20T09:04:24.000Z
|
torchaudio/datasets/libritts.py
|
hahaxun/audio
|
87a1886ecfa83b398a2a9a09d9a94bd9dabc5cf5
|
[
"BSD-2-Clause"
] | null | null | null |
torchaudio/datasets/libritts.py
|
hahaxun/audio
|
87a1886ecfa83b398a2a9a09d9a94bd9dabc5cf5
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from typing import Tuple
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
walk_files,
)
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriTTS"
_CHECKSUMS = {
"http://www.openslr.org/60/dev-clean.tar.gz": "0c3076c1e5245bb3f0af7d82087ee207",
"http://www.openslr.org/60/dev-other.tar.gz": "815555d8d75995782ac3ccd7f047213d",
"http://www.openslr.org/60/test-clean.tar.gz": "7bed3bdb047c4c197f1ad3bc412db59f",
"http://www.openslr.org/60/test-other.tar.gz": "ae3258249472a13b5abef2a816f733e4",
"http://www.openslr.org/60/train-clean-100.tar.gz": "4a8c202b78fe1bc0c47916a98f3a2ea8",
"http://www.openslr.org/60/train-clean-360.tar.gz": "a84ef10ddade5fd25df69596a2767b2d",
"http://www.openslr.org/60/train-other-500.tar.gz": "7b181dd5ace343a5f38427999684aa6f",
}
def load_libritts_item(
fileid: str,
path: str,
ext_audio: str,
ext_original_txt: str,
ext_normalized_txt: str,
) -> Tuple[Tensor, int, str, str, int, int, str]:
speaker_id, chapter_id, segment_id, utterance_id = fileid.split("_")
utterance_id = fileid
normalized_text = utterance_id + ext_normalized_txt
normalized_text = os.path.join(path, speaker_id, chapter_id, normalized_text)
original_text = utterance_id + ext_original_txt
original_text = os.path.join(path, speaker_id, chapter_id, original_text)
file_audio = utterance_id + ext_audio
file_audio = os.path.join(path, speaker_id, chapter_id, file_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
# Load original text
with open(original_text) as ft:
original_text = ft.readline()
# Load normalized text
with open(normalized_text, "r") as ft:
normalized_text = ft.readline()
return (
waveform,
sample_rate,
original_text,
normalized_text,
int(speaker_id),
int(chapter_id),
utterance_id,
)
class LIBRITTS(Dataset):
"""Create a Dataset for LibriTTS.
Args:
root (str): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``,
``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and
``"train-other-500"``. (default: ``"train-clean-100"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriTTS"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_original_txt = ".original.txt"
_ext_normalized_txt = ".normalized.txt"
_ext_audio = ".wav"
def __init__(
self,
root: str,
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False,
) -> None:
if url in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]:
ext_archive = ".tar.gz"
base_url = "http://www.openslr.org/resources/60/"
url = os.path.join(base_url, url + ext_archive)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum)
extract_archive(archive)
walker = walk_files(
self._path, suffix=self._ext_audio, prefix=False, remove_suffix=True
)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, original_text, normalized_text, speaker_id,
chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_libritts_item(
fileid,
self._path,
self._ext_audio,
self._ext_original_txt,
self._ext_normalized_txt,
)
def __len__(self) -> int:
return len(self._walker)
| 32.046358
| 98
| 0.619963
| 589
| 4,839
| 4.887946
| 0.229202
| 0.020841
| 0.038902
| 0.047239
| 0.207711
| 0.207711
| 0.148663
| 0.097951
| 0.06669
| 0.041681
| 0
| 0.052543
| 0.260591
| 4,839
| 150
| 99
| 32.26
| 0.752096
| 0.198388
| 0
| 0
| 0
| 0
| 0.191614
| 0.059448
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040404
| false
| 0
| 0.060606
| 0.010101
| 0.171717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d4ed462393daeadb0c9bc3293879acfa5af3ab3
| 2,164
|
py
|
Python
|
Others/Source/19/19.2/barh_test.py
|
silence0201/Learn-Python
|
662da7c0e74221cedb445ba17d5cb1cd3af41c86
|
[
"MIT"
] | 1
|
2018-05-30T01:38:23.000Z
|
2018-05-30T01:38:23.000Z
|
Others/Source/19/19.2/barh_test.py
|
silence0201/Learn-Python
|
662da7c0e74221cedb445ba17d5cb1cd3af41c86
|
[
"MIT"
] | null | null | null |
Others/Source/19/19.2/barh_test.py
|
silence0201/Learn-Python
|
662da7c0e74221cedb445ba17d5cb1cd3af41c86
|
[
"MIT"
] | null | null | null |
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import matplotlib.pyplot as plt
import numpy as np
# 构建数据
x_data = ['2011', '2012', '2013', '2014', '2015', '2016', '2017']
y_data = [58000, 60200, 63000, 71000, 84000, 90500, 107000]
y_data2 = [52000, 54200, 51500,58300, 56800, 59500, 62700]
bar_width=0.3
# Y轴数据使用range(len(x_data), 就是0、1、2...
plt.barh(y=range(len(x_data)), width=y_data, label='疯狂Java讲义',
color='steelblue', alpha=0.8, height=bar_width)
# Y轴数据使用np.arange(len(x_data))+bar_width,
# 就是bar_width、1+bar_width、2+bar_width...这样就和第一个柱状图并列了
plt.barh(y=np.arange(len(x_data))+bar_width, width=y_data2,
label='疯狂Android讲义', color='indianred', alpha=0.8, height=bar_width)
# 在柱状图上显示具体数值, ha参数控制水平对齐方式, va控制垂直对齐方式
for y, x in enumerate(y_data):
plt.text(x+5000, y-bar_width/2, '%s' % x, ha='center', va='bottom')
for y, x in enumerate(y_data2):
plt.text(x+5000, y+bar_width/2, '%s' % x, ha='center', va='bottom')
# 为Y轴设置刻度值
plt.yticks(np.arange(len(x_data))+bar_width/2, x_data)
# 设置标题
plt.title("Java与Android图书对比")
# 为两条坐标轴设置名称
plt.xlabel("销量")
plt.ylabel("年份")
# 显示图例
plt.legend()
plt.show()
| 46.042553
| 74
| 0.420055
| 221
| 2,164
| 4.004525
| 0.515837
| 0.090395
| 0.045198
| 0.047458
| 0.255367
| 0.255367
| 0.144633
| 0.090395
| 0.090395
| 0.090395
| 0
| 0.104625
| 0.390481
| 2,164
| 46
| 75
| 47.043478
| 0.566338
| 0.490296
| 0
| 0
| 0
| 0
| 0.128995
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d4ed8a99839b3110a2db17a408cf4dde65b3291
| 2,336
|
py
|
Python
|
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py
|
kendny/study_docker
|
edb376fb69319a78e05f60faa5dcc88d527602c4
|
[
"BSD-2-Clause"
] | 2
|
2019-05-09T01:41:16.000Z
|
2022-01-06T01:06:07.000Z
|
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py
|
walkacross/docker_in_practice
|
da24da76b4fa3eabca5004abd59d7eef7a48988b
|
[
"BSD-2-Clause"
] | null | null | null |
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py
|
walkacross/docker_in_practice
|
da24da76b4fa3eabca5004abd59d7eef7a48988b
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 18 20:13:57 2018
@author: allen
"""
import random, os, json, datetime, time
from flask import Flask, Response
from pymongo import MongoClient
from bson import json_util
app = Flask(__name__)
MONGO_URI = "mongodb://mongodb:27017" # "mongodb:<container_name>:27017"
mongdb_client= MongoClient(MONGO_URI)
random_numbers = mongdb_client.demo.random_numbers
time.sleep(5) # hack for the mongoDb database to get running
######################
##
##########################
from pymodm.connection import connect
from pymongo.write_concern import WriteConcern
from pymodm import MongoModel, fields
# Connect to MongoDB and call the connection "my-app".
connect("mongodb://mongodb:27017/myDatabase", alias="my-app")
class User(MongoModel):
email = fields.EmailField(primary_key=True)
first_name = fields.CharField()
last_name = fields.CharField()
class Meta:
write_concern = WriteConcern(j=True)
connection_alias = 'my-app'
@app.route("/")
def hello():
html = "<h3> Hello world...</h3>"
#User('user@email.com', name, 'Ross').save()
return html
@app.route("/add_user/<name>")
def add_user(name):
#User('user@email.com', name, 'Ross').save()
html = "<h3> Hello </h3>"
User('user@email.com', name, 'Ross').save()
return "name {} save to database".format(name)
@app.route("/random/<int:lower>/<int:upper>")
def random_generator(lower, upper):
number = str(random.randint(lower, upper))
random_numbers.update(
{"_id" : "lasts"},
{"$push" : {
"items" : {
"$each": [{"value" : number, "date": datetime.datetime.utcnow()}],
"$sort" : {"date" : -1},
"$slice" : 5
}
}},
upsert=True
)
return Response(number, status=200, mimetype='application/json')
@app.route("/random-list")
def last_number_list():
last_numbers = list(random_numbers.find({"_id" : "lasts"}))
extracted = [d['value'] for d in last_numbers[0]['items']]
return Response(json.dumps(extracted, default=json_util.default), status=200, mimetype='application/json')
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.config['DEBUG'] = True
app.run(host='0.0.0.0', port=port)
| 26.545455
| 110
| 0.630137
| 296
| 2,336
| 4.844595
| 0.435811
| 0.036262
| 0.027197
| 0.033473
| 0.114365
| 0.069735
| 0.069735
| 0.050209
| 0.050209
| 0
| 0
| 0.026913
| 0.188784
| 2,336
| 87
| 111
| 26.850575
| 0.729815
| 0.133562
| 0
| 0
| 0
| 0
| 0.169388
| 0.044898
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.132075
| 0
| 0.377358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d4f428d1c149bf1e2a1658ede1f6e9adcddbdd2
| 1,523
|
py
|
Python
|
goethe/eval/analogy_space.py
|
HPI-DeepLearning/wort2vek
|
bc91c2752a8516665d270c7a7a793ec484c970c4
|
[
"MIT"
] | 4
|
2017-05-01T01:02:40.000Z
|
2022-02-03T16:14:19.000Z
|
goethe/eval/analogy_space.py
|
HPI-DeepLearning/wort2vek
|
bc91c2752a8516665d270c7a7a793ec484c970c4
|
[
"MIT"
] | 6
|
2017-04-06T22:10:09.000Z
|
2017-04-06T22:10:57.000Z
|
goethe/eval/analogy_space.py
|
HPI-DeepLearning/wort2vek
|
bc91c2752a8516665d270c7a7a793ec484c970c4
|
[
"MIT"
] | null | null | null |
#! /usr/bin/Python
from gensim.models.keyedvectors import KeyedVectors
from scipy import spatial
from numpy import linalg
import argparse
import sys
vector_file = sys.argv[1]
if len(sys.argv) != 6:
print('arguments wrong!')
print(len(sys.argv))
exit()
else:
words = [sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]]
print(words)
wvs = KeyedVectors.load_word2vec_format(vector_file, binary=True)
print('WVs loaded.')
for w in words:
if w not in wvs.vocab:
print('out of vocab!')
exit()
#print(wvs.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=3))
w1 = wvs[words[0]]
w2 = wvs[words[1]]
w3 = wvs[words[2]]
w4 = wvs[words[3]]
m1 = w1 / linalg.norm(w1)
m2 = w2 / linalg.norm(w2)
m3 = w3 / linalg.norm(w3)
m4 = w4 / linalg.norm(w4)
diff1 = w1 - w2
diff2 = w3 - w4
miff1 = m1 - m2
miff2 = m3 - m4
print('-------Word Space---------')
print('to word-4: ', 1-spatial.distance.cosine(m2+m3-m1, m4))
print('to word-3: ', 1-spatial.distance.cosine(m1+m4-m2, m3))
print('to word-2: ', 1-spatial.distance.cosine(m4+m1-m3, m2))
print('to word-1: ', 1-spatial.distance.cosine(m2+m3-m4, m1))
print('------Analogy Space-------')
print(' cosine: ', 1-spatial.distance.cosine(diff1, diff2))
print(' Euclidean: ', 1-linalg.norm(diff1-diff2)/(linalg.norm(diff1)+linalg.norm(diff2)))
print(' M-cosine: ', 1-spatial.distance.cosine(miff1, miff2))
print('M-Euclidean: ', 1-linalg.norm(miff1-miff2)/(linalg.norm(miff1)+linalg.norm(miff2)))
| 27.690909
| 91
| 0.644123
| 242
| 1,523
| 4.033058
| 0.297521
| 0.102459
| 0.098361
| 0.135246
| 0.110656
| 0.053279
| 0
| 0
| 0
| 0
| 0
| 0.064491
| 0.154957
| 1,523
| 54
| 92
| 28.203704
| 0.693862
| 0.06566
| 0
| 0.04878
| 0
| 0
| 0.137729
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.121951
| 0
| 0.121951
| 0.365854
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d5195b07f67a7785033de940e7003695bbf2ec4
| 2,497
|
py
|
Python
|
localgraphclustering/algorithms/eig2_nL.py
|
vishalbelsare/LocalGraphClustering
|
a6325350997932d548a876deb259c2387fc2c809
|
[
"MIT"
] | 106
|
2017-09-06T04:47:02.000Z
|
2022-03-30T07:43:27.000Z
|
localgraphclustering/algorithms/eig2_nL.py
|
vishalbelsare/LocalGraphClustering
|
a6325350997932d548a876deb259c2387fc2c809
|
[
"MIT"
] | 51
|
2017-09-06T02:22:09.000Z
|
2021-12-15T11:39:28.000Z
|
localgraphclustering/algorithms/eig2_nL.py
|
vishalbelsare/LocalGraphClustering
|
a6325350997932d548a876deb259c2387fc2c809
|
[
"MIT"
] | 38
|
2017-09-04T21:45:13.000Z
|
2022-01-19T09:48:25.000Z
|
import numpy as np
import scipy as sp
import scipy.sparse.linalg as splinalg
def eig2_nL(g, tol_eigs = 1.0e-6, normalize:bool = True, dim:int=1):
"""
DESCRIPTION
-----------
Computes the eigenvector that corresponds to the second smallest eigenvalue
of the normalized Laplacian matrix then it uses sweep cut to round the solution.
PARAMETERS (mandatory)
----------------------
g: graph object
PARAMETERS (optional)
---------------------
dim: positive, int
default == 1
The number of eigenvectors or dimensions to compute.
tol_eigs: positive float, double
default == 1.0e-6
Tolerance for computation of the eigenvector that corresponds to
the second smallest eigenvalue of the normalized Laplacian matrix.
normalize: bool,
default == True
True if we should return the eigenvectors of the generalized
eigenvalue problem associated with the normalized Laplacian.
This should be on unless you know what you are doing.
RETURNS
------
p: Eigenvector or Eigenvector matrixthat
corresponds to the second smallest eigenvalue of the
normalized Laplacian matrix and larger eigenvectors if dim >= 0.
"""
n = g.adjacency_matrix.shape[0]
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt.transpose(), 0, n, n)
L = sp.sparse.identity(n) - D_sqrt_neg.dot((g.adjacency_matrix.dot(D_sqrt_neg)))
emb_eig_val, p = splinalg.eigsh(L, which='SM', k=1+dim, tol = tol_eigs)
F = np.real(p[:,1:])
if normalize:
F *= g.dn_sqrt[:,np.newaxis]
return F, emb_eig_val
"""
Random walks and local cuts in graphs, Chung, LAA 2007
We just form the sub-matrix of the Laplacian and use the eigenvector there.
"""
def eig2nL_subgraph(g, ref_nodes, tol_eigs = 1.0e-6, normalize: bool = True):
A_sub = g.adjacency_matrix.tocsr()[ref_nodes, :].tocsc()[:, ref_nodes]
nref = len(ref_nodes)
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt[ref_nodes].transpose(), 0, nref, nref)
L_sub = sp.sparse.identity(nref) - D_sqrt_neg.dot((A_sub.dot(D_sqrt_neg)))
emb_eig_val, emb_eig = splinalg.eigsh(L_sub, which='SM', k=1, tol=tol_eigs)
emb_eig *= -1 if max(emb_eig) < 0 else 1
f = emb_eig[:,0]
if normalize:
f *= g.dn_sqrt[ref_nodes]
return ((ref_nodes,f), emb_eig_val)
| 33.293333
| 87
| 0.621946
| 353
| 2,497
| 4.260623
| 0.371105
| 0.031915
| 0.031915
| 0.043883
| 0.303191
| 0.292553
| 0.267287
| 0.240691
| 0.203457
| 0.163564
| 0
| 0.015292
| 0.26672
| 2,497
| 74
| 88
| 33.743243
| 0.806117
| 0.421706
| 0
| 0.086957
| 0
| 0
| 0.003597
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d52775ef423ec088ebd9b5618d6a0b7639f157e
| 2,418
|
py
|
Python
|
setup.py
|
xames3/vdoxa
|
8fa945449bb34447ded0c421214c0252ff523d4a
|
[
"Apache-2.0"
] | 1
|
2020-02-04T08:18:54.000Z
|
2020-02-04T08:18:54.000Z
|
setup.py
|
xames3/vdoxa
|
8fa945449bb34447ded0c421214c0252ff523d4a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
xames3/vdoxa
|
8fa945449bb34447ded0c421214c0252ff523d4a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 XAMES3. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
"""
vdoXA is an open-source python package for trimming the videos.
It is built as a subsystem for < XXXXX Not to be named XXXXX > project.
Originally inspired by my colleague's work, I thought of improving the
concept and build a tool to simplify the process. I hope it comes with
strong support for continuous updates, reliable functions and overall
ease of use.
Read complete documentation at: <https://github.com/xames3/vdoxa>.
"""
from setuptools import find_packages, setup
from vdoxa.vars import dev
doclines = __doc__.split('\n')
def use_readme() -> str:
"""Use `README.md` for parsing long description."""
with open('README.md') as file:
return file.read()
with open('requirements.txt', 'r') as requirements:
required_packages = [package.rstrip() for package in requirements]
setup(
name=dev.PROJECT_NAME,
version=dev.PROJECT_VERSION,
url=dev.PROJECT_LINK,
download_url=dev.PROJECT_LINK,
author=dev.AUTHOR,
author_email=dev.AUTHOR_EMAIL,
maintainer=dev.AUTHOR,
maintainer_email=dev.AUTHOR_EMAIL,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
],
license=dev.PROJECT_LICENSE,
description=f'{doclines[1]}',
long_description=use_readme(),
long_description_content_type='text/markdown',
keywords='opencv2 cv2 moviepy',
zip_safe=False,
install_requires=required_packages,
python_requires='~=3.6',
include_package_data=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'vdoxa = vdoxa.parser:main',
],
}
)
| 31
| 72
| 0.715054
| 322
| 2,418
| 5.273292
| 0.562112
| 0.035336
| 0.015312
| 0.018846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007371
| 0.158395
| 2,418
| 77
| 73
| 31.402597
| 0.827027
| 0.460711
| 0
| 0.04878
| 0
| 0
| 0.269592
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.04878
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d52ada1ae418220d17ef038d3cc8e85cc6253d2
| 2,938
|
py
|
Python
|
little_questions/utils/log.py
|
HelloChatterbox/little_questions
|
04bee86244b42fdaed9f8d010c2f83037ad753f6
|
[
"MIT"
] | null | null | null |
little_questions/utils/log.py
|
HelloChatterbox/little_questions
|
04bee86244b42fdaed9f8d010c2f83037ad753f6
|
[
"MIT"
] | null | null | null |
little_questions/utils/log.py
|
HelloChatterbox/little_questions
|
04bee86244b42fdaed9f8d010c2f83037ad753f6
|
[
"MIT"
] | null | null | null |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import logging
import sys
class LOG:
"""
Custom logger class that acts like logging.Logger
The logger name is automatically generated by the module of the caller
Usage:
>>> LOG.debug('My message: %s', debug_str)
13:12:43.673 - :<module>:1 - DEBUG - My message: hi
>>> LOG('custom_name').debug('Another message')
13:13:10.462 - custom_name - DEBUG - Another message
"""
base_path = "stdout"
fmt = '%(asctime)s.%(msecs)03d - ' \
'%(name)s - %(levelname)s - %(message)s'
datefmt = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(fmt, datefmt)
name = 'little_questions'
level = "DEBUG"
_loggers = {}
@classmethod
def set_level(cls, level="INFO"):
cls.level = level
for n in cls._loggers:
cls._loggers[n].setLevel(cls.level)
@classmethod
def create_logger(cls, name):
if name in cls._loggers:
return cls._loggers[name]
logger = logging.getLogger(name)
logger.propagate = False
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(cls.formatter)
logger.addHandler(stdout_handler)
logger.setLevel(cls.level)
cls._loggers[name] = logger
return logger
@classmethod
def _log(cls):
name = ""
if cls.name is not None:
name = cls.name + " - "
# Stack:
# [0] - _log()
# [1] - debug(), info(), warning(), or error()
# [2] - caller
stack = inspect.stack()
# Record:
# [0] - frame object
# [1] - filename
# [2] - line number
# [3] - function
# ...
record = stack[2]
name += record[3] + ':' + str(record[2])
logger = cls.create_logger(name)
return logger
@classmethod
def info(cls, *args, **kwargs):
cls._log().info(*args, **kwargs)
@classmethod
def debug(cls, *args, **kwargs):
cls._log().debug(*args, **kwargs)
@classmethod
def warning(cls, *args, **kwargs):
cls._log().warning(*args, **kwargs)
@classmethod
def error(cls, *args, **kwargs):
cls._log().error(*args, **kwargs)
@classmethod
def exception(cls, *args, **kwargs):
cls._log().exception(*args, **kwargs)
| 28.803922
| 74
| 0.596664
| 362
| 2,938
| 4.776243
| 0.400552
| 0.057837
| 0.037594
| 0.04627
| 0.08849
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018276
| 0.273656
| 2,938
| 101
| 75
| 29.089109
| 0.79194
| 0.357386
| 0
| 0.185185
| 0
| 0
| 0.063562
| 0.012603
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.055556
| 0
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d53e848dc1be11f4d81bb7ffe655fc1c2f327c3
| 1,923
|
py
|
Python
|
cvstudio/view/widgets/loading_dialog/loading_dialog.py
|
haruiz/PytorchCvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 32
|
2019-10-31T03:10:52.000Z
|
2020-12-23T11:50:53.000Z
|
cvstudio/view/widgets/loading_dialog/loading_dialog.py
|
haruiz/CvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 19
|
2019-10-31T15:06:05.000Z
|
2020-06-15T02:21:55.000Z
|
cvstudio/view/widgets/loading_dialog/loading_dialog.py
|
haruiz/PytorchCvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 8
|
2019-10-31T03:32:50.000Z
|
2020-07-17T20:47:37.000Z
|
import os
from PyQt5 import QtCore
from PyQt5.QtCore import QRect, QPoint
from PyQt5.QtGui import QMovie, QCloseEvent, QShowEvent
from PyQt5.QtWidgets import QDialog, QLabel, QVBoxLayout, QApplication, QWidget
class QLoadingDialog(QDialog):
def __init__(self, parent=None):
super(QLoadingDialog, self).__init__()
self.setFixedSize(100, 100)
# self.setWindowOpacity(0.8)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
app = QApplication.instance()
curr_theme = "light"
if app:
curr_theme = app.property("theme")
gif_file = os.path.abspath("./assets/icons/{}/loading.gif".format(curr_theme))
self.movie = QMovie(gif_file)
self.label = QLabel()
self.label.setMovie(self.movie)
self.layout = QVBoxLayout(self)
self.layout.addWidget(self.label)
def center(self, host: QWidget = None):
if host:
hostGeometry: QRect = host.geometry()
# dialogGeometry : QRect = self.geometry()
centerPoint: QPoint = hostGeometry.center()
centerPoint = host.mapToGlobal(centerPoint)
offset = 30
targetPoint = QPoint(centerPoint.x() - offset, centerPoint.y() - offset)
self.move(targetPoint)
else:
screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())
centerPoint = QApplication.desktop().screenGeometry(screen).center()
self.move(centerPoint)
return self
def showEvent(self, e: QShowEvent):
if self.movie.state() == QMovie.NotRunning:
self.movie.start()
def closeEvent(self, e: QCloseEvent):
if self.movie.state() == QMovie.Running:
self.movie.stop()
def exec_(self):
self.center()
return QDialog.exec_(self)
| 36.283019
| 95
| 0.637546
| 200
| 1,923
| 6.05
| 0.43
| 0.044628
| 0.018182
| 0.026446
| 0.036364
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009729
| 0.25169
| 1,923
| 52
| 96
| 36.980769
| 0.831133
| 0.034841
| 0
| 0
| 0
| 0
| 0.021047
| 0.01565
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.116279
| 0
| 0.302326
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d54ea522a32fa91aca889c9606f036f2de763c3
| 3,935
|
py
|
Python
|
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py
|
mpire-nxus/nxus_unity_sdk
|
34a1ebfc588c47c1c71fae11f29e82c1172c6dc2
|
[
"MIT"
] | 1
|
2018-03-13T02:44:15.000Z
|
2018-03-13T02:44:15.000Z
|
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py
|
mpire-nxus/nxus_unity_sdk
|
34a1ebfc588c47c1c71fae11f29e82c1172c6dc2
|
[
"MIT"
] | null | null | null |
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py
|
mpire-nxus/nxus_unity_sdk
|
34a1ebfc588c47c1c71fae11f29e82c1172c6dc2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import re
from subprocess import Popen, PIPE
import argparse
from pbxproj import XcodeProject, TreeType
from pbxproj import FileOptions
def main():
parser = argparse.ArgumentParser(description="MpireNxusMeasurement post build iOS script")
parser.add_argument('ios_project_path', help="path to the folder of the iOS project generated by unity3d")
with open('MpireNxusMeasurementPostBuildiOSLog.txt', 'w') as fileLog:
# Log function with file injected.
LogFunc = LogInput(fileLog)
# Path of the Xcode SDK on the system.
xcode_sdk_path = get_xcode_sdk_path(LogFunc)
# Path for unity iOS Xcode project and framework on the system.
unity_xcode_project_path, framework_path = get_paths(LogFunc, parser, xcode_sdk_path)
# Edit the Xcode project using mod_pbxproj:
# - Add the adSupport framework library.
# - Add the iAd framework library.
# - Change the compilation flags of the adjust project files to support non-ARC.
edit_unity_xcode_project(LogFunc, unity_xcode_project_path, framework_path)
# Removed.
# Change the Xcode project directly:
# - Allow objective-c exceptions
# rewrite_unity_xcode_project(LogFunc, unity_xcode_project_path)
sys.exit(0)
def LogInput(writeObject):
def Log(message, *args):
messageNLine = (message if message else "None") + "\n"
writeObject.write(messageNLine.format(*args))
return Log
def get_paths(Log, parser, xcode_sdk_path):
args, ignored_args = parser.parse_known_args()
ios_project_path = args.ios_project_path
unity_xcode_project_path = ios_project_path + "/Unity-iPhone.xcodeproj/project.pbxproj"
Log("Unity3d Xcode project path: {0}", unity_xcode_project_path)
framework_path = xcode_sdk_path + "/System/Library/Frameworks/"
Log("framework path: {0}", framework_path)
return unity_xcode_project_path, framework_path
def edit_unity_xcode_project(Log, unity_xcode_project_path, framework_path):
# load unity iOS pbxproj project file
unity_XcodeProject = XcodeProject.load(unity_xcode_project_path)
frameworks = unity_XcodeProject.get_or_create_group('Frameworks')
file_options_security_framework = FileOptions(embed_framework=False, weak=True)
unity_XcodeProject.add_file(framework_path + "Security.framework", parent=frameworks, tree='SDKROOT', force=False, file_options=file_options_security_framework)
Log("added Security framework")
# Add -ObjC to "Other Linker Flags" project settings.
unity_XcodeProject.add_other_ldflags('-ObjC')
# Save changes.
unity_XcodeProject.save()
def rewrite_unity_xcode_project(Log, unity_xcode_project_path):
unity_xcode_lines = []
# Allow objective-c exceptions
re_objc_excep = re.compile(r"\s*GCC_ENABLE_OBJC_EXCEPTIONS *= *NO.*")
with open(unity_xcode_project_path) as upf:
for line in upf:
if re_objc_excep.match(line):
#Log("matched line: {0}", re_objc_excep.match(line).group())
line = line.replace("NO","YES")
Log("Objective-c exceptions enabled")
unity_xcode_lines.append(line)
with open(unity_xcode_project_path, "w+") as upf:
upf.writelines(unity_xcode_lines)
def get_xcode_sdk_path(Log):
# Output all info from Xcode.
proc = Popen(["xcodebuild", "-version", "-sdk"], stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
if proc.returncode not in [0, 66]:
Log("Could not retrieve Xcode sdk path. code: {0}, err: {1}", proc.returncode, err)
return None
match = re.search("iPhoneOS.*?Path: (?P<sdk_path>.*?)\n", out, re.DOTALL)
xcode_sdk_path = match.group('sdk_path') if match else None
Log("Xcode sdk path: {0}", xcode_sdk_path)
return xcode_sdk_path
if __name__ == "__main__":
main()
| 38.578431
| 164
| 0.706226
| 516
| 3,935
| 5.131783
| 0.317829
| 0.086103
| 0.096299
| 0.087236
| 0.150302
| 0.135196
| 0.064955
| 0.064955
| 0
| 0
| 0
| 0.003803
| 0.198221
| 3,935
| 101
| 165
| 38.960396
| 0.835499
| 0.179161
| 0
| 0
| 0
| 0
| 0.175592
| 0.041719
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118644
| false
| 0
| 0.101695
| 0
| 0.288136
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d55a06354d86f35af5fb38858161328b7581a23
| 10,786
|
py
|
Python
|
hack/dev/gh-replay-events.py
|
sm43/pipelines-as-code
|
bd21e48c96ab128d533701ecd1a2df7a0d136d65
|
[
"Apache-2.0"
] | null | null | null |
hack/dev/gh-replay-events.py
|
sm43/pipelines-as-code
|
bd21e48c96ab128d533701ecd1a2df7a0d136d65
|
[
"Apache-2.0"
] | null | null | null |
hack/dev/gh-replay-events.py
|
sm43/pipelines-as-code
|
bd21e48c96ab128d533701ecd1a2df7a0d136d65
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Chmouel Boudjnah <chmouel@chmouel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# See README.md for documentation
import typing
import argparse
import base64
import hashlib
import hmac
import json
import os
import subprocess
import sys
import time
import requests
import ghapp_token
NAMESPACE = "pipelines-as-code"
SECRET_NAME = "pipelines-as-code-secret"
ELNAME = "pipelines-as-code"
EXPIRE_MINUTES_AS_SECONDS = (
int(os.environ.get("GITHUBAPP_TOKEN_EXPIRATION_MINUTES", 10)) * 60
)
def get_controller_route():
elroute = subprocess.run(
f"kubectl get route -n {NAMESPACE} -l pipelines-as-code/route=controller -o json",
shell=True,
check=True,
capture_output=True,
)
return (
"https://"
+ json.loads(elroute.stdout)["items"][0]["status"]["ingress"][0]["host"]
)
def get_controller_ingress():
elroute = subprocess.run(
f"kubectl get ingress -n {NAMESPACE} -l pipelines-as-code/route=controller -o json",
shell=True,
check=True,
capture_output=True,
)
return (
"http://" + json.loads(elroute.stdout)["items"][0]["spec"]["rules"][0]["host"]
)
def get_token_secret(
github_api_url=ghapp_token.GITHUB_API_URL, expiration_time=EXPIRE_MINUTES_AS_SECONDS
):
secret = subprocess.run(
f"kubectl get secret {SECRET_NAME} -n{NAMESPACE} -o json",
shell=True,
check=True,
capture_output=True,
)
jeez = json.loads(secret.stdout)
private_key = base64.b64decode(jeez["data"]["github-private-key"])
app_id = base64.b64decode(jeez["data"]["github-application-id"])
webhook_secret = base64.b64decode(jeez["data"]["webhook.secret"]).decode()
if not private_key or not app_id or not webhook_secret:
print(
f"private_key={private_key[1:10]} or app_id={app_id} or webhook_secret={webhook_secret} are empty"
)
sys.exit(1)
gh = ghapp_token.GitHub(
private_key,
app_id,
expiration_time,
github_api_url,
)
return gh.token, webhook_secret, app_id
def _request_app_delivery(token, iid=None, api_url=ghapp_token.GITHUB_API_URL):
url = f"{api_url}/app/hook/deliveries"
if iid:
url += f"/{iid}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}",
}
return requests.request("GET", url, headers=headers)
def _request_webhooks_installed(
token: str,
owner_repo: str,
iid: typing.Union[int, None] = None,
api_url: str = ghapp_token.GITHUB_API_URL,
):
url = f"{api_url}/repos/{owner_repo}/hooks"
if iid:
url += f"/{iid}/deliveries"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}",
}
return requests.request("GET", url, headers=headers)
def _request_webhooks_reattempt(
token: str,
owner_repo: str,
iid: int,
delivery_id: int,
api_url: str = ghapp_token.GITHUB_API_URL,
):
url = f"{api_url}/repos/{owner_repo}/hooks/{iid}/deliveries/{delivery_id}/attempts"
print(url)
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}",
}
return requests.request("POST", url, headers=headers)
def ask_which(token: str, api_url: str, last: bool, deliveries: dict) -> int:
dico = []
i = 1
if "message" in deliveries:
print(deliveries)
sys.exit(0)
for delivery in deliveries:
print(
f"{i}) Action={delivery['action']} Event={delivery['event']} Delivered at {delivery['delivered_at']}"
)
dico.append(delivery["id"])
if i == 10:
break
i += 1
chosen = input("Choose a delivery: ")
# return _request_app_delivery(token, dico[int(chosen) - 1], api_url=api_url).json()
return int(chosen) - 1
def webhook_get_delivery(
token: str,
owner_repo: str,
last: bool = False,
api_url: str = ghapp_token.GITHUB_API_URL,
) -> str:
r = _request_webhooks_installed(token, api_url=api_url, owner_repo=owner_repo)
r.raise_for_status()
webhooks = r.json()
if len(webhooks) == 1:
webhook_id = int(webhooks[0]["id"])
elif len(webhooks) > 1:
cnt = 1
for wh in webhooks:
print(f"{cnt}) {wh['name']} - {wh['config']['url']} ")
cnt += 1
chosen = input("Choose a delivery: ")
webhook_id = int(webhooks[int(chosen) - 1]["id"])
else:
print("could not find any webhook configuration on your repo {}")
sys.exit(1)
r = _request_webhooks_installed(
token, api_url=api_url, owner_repo=owner_repo, iid=webhook_id
)
r.raise_for_status()
deliveries = r.json()
if not deliveries:
print("no deliveries has been set ")
sys.exit(1)
if last:
delivery_id = deliveries[0]["id"]
else:
chosen = ask_which(token, api_url, last, r.json())
delivery_id = deliveries[chosen]["id"]
r = _request_webhooks_reattempt(
token=token,
owner_repo=owner_repo,
iid=webhook_id,
api_url=api_url,
delivery_id=delivery_id,
)
r.raise_for_status()
print(f"Delivery has been replayed, you can replay directly it with: ")
s = f"http POST {api_url}/repos/{owner_repo}/hooks/{webhook_id}/deliveries/{delivery_id}/attempts"
s += f' Authorization:"Bearer { os.environ.get("PASS_TOKEN", "$TOKEN") }"'
s += " Accept:application/vnd.github.v3+json"
print(s)
return s
def app_get_delivery(
token: str, last: bool = False, api_url: str = ghapp_token.GITHUB_API_URL
) -> dict:
r = _request_app_delivery(token, api_url=api_url)
r.raise_for_status()
deliveries = r.json()
if not deliveries:
print("no deliveries has been set ")
sys.exit(1)
if last:
return _request_app_delivery(token, deliveries[0]["id"], api_url=api_url).json()
chosen = ask_which(token, api_url, last, deliveries)
return _request_app_delivery(
token, deliveries[chosen]["id"], api_url=api_url
).json()
def save_script(target: str, el_route: str, headers: dict, payload: str):
s = f"""#!/usr/bin/env python3
import requests
import sys
payload = \"\"\"{json.dumps(payload)}\"\"\"
headers={headers}
el_route = "http://localhost:8080" if (len(sys.argv) > 1 and sys.argv[1] == "-l") else "{el_route}"
r = requests.request("POST",el_route,data=payload.encode("utf-8"),headers=headers)
r.raise_for_status()
print("Request has been replayed on " + el_route)
"""
with open(target, "w") as fp:
fp.write(s)
os.chmod(target, 0o755)
print(f"Request saved to {target}")
def main(args):
el = args.eroute
if not el:
try:
el = get_controller_route()
except subprocess.CalledProcessError:
try:
el = get_controller_ingress()
except subprocess.CalledProcessError:
print("Could not find an ingress or route")
sys.exit(1)
if args.webhook_repo:
token, webhook_secret = args.webhook_token, args.webhook_secret
replays = webhook_get_delivery(
token,
last=args.last_event,
api_url=args.api_url,
owner_repo=args.webhook_repo,
)
if args.save:
open(args.save, "w").write(f"""#!/usr/bin/env bash\n{replays}\n""")
os.chmod(args.save, 0o755)
print(f"Saved to {args.save}")
sys.exit(0)
else:
token, webhook_secret, app_id = get_token_secret(github_api_url=args.api_url)
delivery = app_get_delivery(token, args.last_event, args.api_url)
jeez = delivery["request"]["payload"]
headers = delivery["request"]["headers"]
payload = json.dumps(jeez)
esha256 = hmac.new(
webhook_secret.encode("utf-8"),
msg=payload.encode("utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
esha1 = hmac.new(
webhook_secret.encode("utf-8"),
msg=payload.encode("utf-8"),
digestmod=hashlib.sha1,
).hexdigest()
print("Replay event for repo " + jeez["repository"]["full_name"])
headers.update(
{
"X-Hub-Signature": "sha1=" + esha1,
"X-Hub-Signature-256": "sha256=" + esha256,
}
)
if args.save:
save_script(args.save, el, headers, jeez)
sys.exit(0)
for _ in range(args.retry):
try:
r = requests.request(
"POST", el, data=payload.encode("utf-8"), headers=headers
)
except requests.exceptions.ConnectionError:
print(f"sleeping until {el} is up")
time.sleep(5)
continue
print(f"Payload has been replayed on {el}: {r}")
return
print("You have reached the maximum number of retries")
def parse_args():
parser = argparse.ArgumentParser(description="Replay a webhook")
parser.add_argument(
"--installation-id",
"-i",
default=os.environ.get("INSTALLATION_ID"),
help="Installation ID",
)
parser.add_argument(
"--controller-route",
"-e",
dest="eroute",
help="Route hostname (default to detect on openshift/ingress)",
default=os.environ.get("EL_ROUTE"),
)
parser.add_argument("--last-event", "-L", action="store_true")
parser.add_argument(
"--webhook-repo", "-w", help="Use a webhook-repo instead of app"
)
parser.add_argument("--webhook-token", "-t", help="Use this token")
parser.add_argument("--webhook-secret", "-S", help="Use this webhook secret")
parser.add_argument(
"--save", "-s", help="save the request to a shell script to replay easily"
)
parser.add_argument(
"-a",
"--api-url",
help="Github API URL",
default=os.environ.get("GITHUB_API_URL", ghapp_token.GITHUB_API_URL),
)
parser.add_argument(
"--retry",
type=int,
default=1,
help="how many time to try to contact the el route",
)
return parser.parse_args()
if __name__ == "__main__":
main(parse_args())
| 30.555241
| 113
| 0.623215
| 1,409
| 10,786
| 4.610362
| 0.203691
| 0.039717
| 0.022167
| 0.020474
| 0.356681
| 0.29064
| 0.2246
| 0.197506
| 0.187038
| 0.175954
| 0
| 0.012112
| 0.242166
| 10,786
| 352
| 114
| 30.642045
| 0.782603
| 0.067495
| 0
| 0.300676
| 0
| 0.016892
| 0.262225
| 0.080968
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0.003378
| 0.047297
| 0
| 0.128378
| 0.060811
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d58e721508a643ec9487a7f661ca1a66cd5a971
| 3,659
|
py
|
Python
|
076_Minimum_Window_Substring.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
076_Minimum_Window_Substring.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
076_Minimum_Window_Substring.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
# Other solution
# V2
def minWindow(s, t):
need = collections.Counter(t) #hash table to store char frequency
missing = len(t) #total number of chars we care
start, end = 0, 0
i = 0
for j, char in enumerate(s, 1): #index j from 1
if need[char] > 0:
missing -= 1
need[char] -= 1
if missing == 0: #match all chars
while i < j and need[s[i]] < 0: #remove chars to find the real start
need[s[i]] += 1
i += 1
need[s[i]] += 1 #make sure the first appearing char satisfies need[char]>0
missing += 1 #we missed this first char, so add missing by 1
if end == 0 or j-i < end-start: #update window
start, end = i, j
i += 1 #update i to start+1 for next window
return s[start:end]
# Time: O(|S|+|T|)
# Space:O(|S|+|T|)
# Refer from:
# https://leetcode.com/problems/minimum-window-substring/solution/
# Sliding Window
# We start with two pointers, leftleft and rightright initially pointing to the first element of the string S.
# We use the rightright pointer to expand the window until we get a desirable window i.e. a window that contains all of the characters of T.
# Once we have a window with all the characters, we can move the left pointer ahead one by one. If the window is still a desirable one we keep on updating the minimum window size.
# If the window is not desirable any more, we repeat step 2 onwards.
# The current window is s[i:j] and the result window is s[I:J]. In need[c] I store how many times I
# need character c (can be negative) and missing tells how many characters are still missing.
# In the loop, first add the new character to the window. Then, if nothing is missing,
# remove as much as possible from the window start and then update the result.
class Solution:
def minWindow(self, s: str, t: str) -> str:
m = len(s)
n = len(t)
if m < n:
return ''
lt = {}
# put t into dict (lt) and count how many # for each char
for i in t:
if i not in lt:
lt[i] = 1
else:
lt[i] += 1
# missing is to count how many remaining char needed from substring
# finally get candidate substring which satisfy need of t
missing = n
i = I = J = 0
for j, c in enumerate(s, 1):
if c in lt and lt[c] > 0:
missing -= 1
if c in lt:
# lt can be negative
lt[c] -= 1
# i is index of candidate substring, remove as many as char from candidate
while i < j and not missing:
if not J or j-i < J-I:
I, J = i, j
if s[i] not in lt:
i += 1
continue
else:
# if lt contains s[i], then # of s[i] +1, might reach to 0
lt[s[i]] += 1
# if > 0, means we need more, then missing +1
if lt[s[i]] > 0:
missing += 1
i += 1
return s[I:J]
# Time: O(|S|+|T|)
# Space:O(|S|+|T|)
# Optimized Sliding Window
# A small improvement to the above approach can reduce the time complexity of the algorithm to O(2*∣filtered_S∣+∣S∣+∣T∣),
# where filtered(S) is the string formed from S by removing all the elements not present in T
| 36.959596
| 179
| 0.52774
| 556
| 3,659
| 3.482014
| 0.296763
| 0.011364
| 0.018595
| 0.01343
| 0.052686
| 0.015496
| 0.015496
| 0.015496
| 0
| 0
| 0
| 0.017048
| 0.390817
| 3,659
| 98
| 180
| 37.336735
| 0.848811
| 0.52446
| 0
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d5d633b271390741583d9b310e4391f6dfe899f
| 4,673
|
py
|
Python
|
fs_image/rpm/storage/tests/storage_base_test.py
|
singhaditya28/fs_image
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
[
"MIT"
] | null | null | null |
fs_image/rpm/storage/tests/storage_base_test.py
|
singhaditya28/fs_image
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
[
"MIT"
] | null | null | null |
fs_image/rpm/storage/tests/storage_base_test.py
|
singhaditya28/fs_image
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import patch, MagicMock
from typing import List, Tuple
from .. import Storage # Module import to ensure we get plugins
class StorageBaseTestCase(unittest.TestCase):
'A tiny test suite that can be used to check any Storage implementation.'
def _check_write_and_read(self, storage: Storage, writes: List[bytes]):
with storage.writer() as output:
for piece in writes:
output.write(piece)
sid = output.commit()
with storage.reader(sid) as input:
written = b''.join(writes)
partial_read = input.read(3)
if written:
self.assertGreater(len(partial_read), 0)
self.assertLessEqual(len(partial_read), 3)
self.assertEqual(written, partial_read + input.read())
return sid
def check_storage_impl(
self,
storage: Storage, *,
no_empty_blobs=False,
skip_empty_writes=False,
# To make testing more meaningful, it's useful to make sure that
# some writes fill up any output buffers. For filesystem writes
# from Python, this default is probably enough.
mul=314159, # just about 300KB
# If the blob-store has a read-through cache, we cannot effectively
# test that the remove actually happened.
remove_is_immediate=True,
) -> List[Tuple[List[str], str]]: # Writes + their storage ID
# Make sure nothing bad happens if an exception flies before a
# commit. Since we don't have an ID, we can't really test that the
# partial write got discarded.
with self.assertRaisesRegex(RuntimeError, '^humbug$'):
with storage.writer() as output:
output.write(b'bah')
raise RuntimeError('humbug')
with self.assertRaisesRegex(AssertionError, '^Cannot commit twice$'):
with storage.writer() as output:
output.write(b'foo')
output.commit(remove_on_exception=True) # Leave no litter
output.commit()
# Check that the `remove_on_exception` kwarg triggers `remove`.
mock_remove = MagicMock()
with patch.object(storage, 'remove', mock_remove):
with self.assertRaisesRegex(RuntimeError, '^remove_on_exception$'):
with storage.writer() as output:
output.write(b'foo')
id_to_remove = output.commit(remove_on_exception=True)
# Contract: committed blobs are available to read
with storage.reader(id_to_remove) as reader:
self.assertEqual(b'foo', reader.read())
raise RuntimeError('remove_on_exception')
# Check that `remove` would have been called, and then call it.
mock_remove.assert_called_once_with(id_to_remove)
storage.remove(id_to_remove) # Exercise the real `remove`
if remove_is_immediate:
# The removed ID should not longer be available.
with self.assertRaises(Exception):
with storage.reader(id_to_remove) as input:
# The reader may be a pipe from another Python process,
# let's consume its output to avoid BrokenPipe logspam.
input.read()
return [
(
writes,
self._check_write_and_read(
storage,
writes if i is None else [*writes[:i], b'', *writes[i:]],
),
) for writes in [
# Some large writes
[b'abcd' * mul, b'efgh' * mul],
[b'abc' * mul, b'defg' * mul],
[b'abc' * mul, b'def' * mul, b'g' * mul],
[b'abcd' * mul],
[b'abc' * mul, b'd' * mul],
# Some tiny writes without a multiplier
[b'a', b'b', b'c', b'd'],
[b'ab'],
[b'a', b'b'],
# While clowny, some blob storage systems refuse empty blobs.
*([] if no_empty_blobs else [
[b''],
[],
]),
]
# Test the given writes, optionally insert a blank at each pos
for i in [
None,
*([] if skip_empty_writes else range(len(writes) + 1)),
]
]
| 41.723214
| 79
| 0.558742
| 549
| 4,673
| 4.661202
| 0.380692
| 0.014068
| 0.033216
| 0.029699
| 0.116843
| 0.094177
| 0.068386
| 0.045721
| 0.031262
| 0
| 0
| 0.004627
| 0.35245
| 4,673
| 111
| 80
| 42.099099
| 0.841044
| 0.283544
| 0
| 0.077922
| 0
| 0
| 0.059552
| 0.006191
| 0
| 0
| 0
| 0
| 0.116883
| 1
| 0.025974
| false
| 0
| 0.051948
| 0
| 0.116883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d5e98fd28e904d5e1f509a5b35e66ec3047cc56
| 353
|
py
|
Python
|
lecture11/subsets.py
|
nd-cse-30872-fa20/cse-30872-fa20-examples
|
7a991a0499e03bf91ac8ba40c99245d5d926e20c
|
[
"MIT"
] | null | null | null |
lecture11/subsets.py
|
nd-cse-30872-fa20/cse-30872-fa20-examples
|
7a991a0499e03bf91ac8ba40c99245d5d926e20c
|
[
"MIT"
] | null | null | null |
lecture11/subsets.py
|
nd-cse-30872-fa20/cse-30872-fa20-examples
|
7a991a0499e03bf91ac8ba40c99245d5d926e20c
|
[
"MIT"
] | 2
|
2020-08-10T15:05:39.000Z
|
2020-08-12T15:16:01.000Z
|
#!/usr/bin/env python3
import itertools
# Constants
NUMBERS = range(0, 10)
# Main Execution
def main():
count = 0
for length in range(0, len(NUMBERS) + 1):
for subset in itertools.combinations(NUMBERS, length):
if sum(subset) % 3 == 0:
count += 1
print(count)
if __name__ == '__main__':
main()
| 16.045455
| 62
| 0.580737
| 45
| 353
| 4.377778
| 0.6
| 0.060914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040161
| 0.294618
| 353
| 21
| 63
| 16.809524
| 0.751004
| 0.130312
| 0
| 0
| 0
| 0
| 0.026316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d5ec924b9a968b7509ed5badaef99e8de842bde
| 21,520
|
py
|
Python
|
src/toil/batchSystems/abstractBatchSystem.py
|
Hexotical/toil
|
312b6e1f221ee7f7f187dd6dbfce1aecffd00e09
|
[
"Apache-2.0"
] | 348
|
2018-07-08T03:38:28.000Z
|
2022-03-11T18:57:44.000Z
|
src/toil/batchSystems/abstractBatchSystem.py
|
Hexotical/toil
|
312b6e1f221ee7f7f187dd6dbfce1aecffd00e09
|
[
"Apache-2.0"
] | 1,700
|
2018-07-05T18:28:49.000Z
|
2022-03-31T14:09:04.000Z
|
src/toil/batchSystems/abstractBatchSystem.py
|
Hexotical/toil
|
312b6e1f221ee7f7f187dd6dbfce1aecffd00e09
|
[
"Apache-2.0"
] | 126
|
2018-07-11T18:59:29.000Z
|
2022-01-24T03:14:02.000Z
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import logging
import os
import shutil
from abc import ABC, abstractmethod
from argparse import ArgumentParser, _ArgumentGroup
from contextlib import contextmanager
from typing import (Any,
Callable,
ContextManager,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
NamedTuple)
from toil.common import Toil, cacheDirName, Config
from toil.deferred import DeferredFunctionManager
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.job import JobDescription
from toil.resource import Resource
logger = logging.getLogger(__name__)
# Value to use as exitStatus in UpdatedBatchJobInfo.exitStatus when status is not available.
EXIT_STATUS_UNAVAILABLE_VALUE = 255
class BatchJobExitReason(enum.Enum):
FINISHED: int = 1 # Successfully finished.
FAILED: int = 2 # Job finished, but failed.
LOST: int = 3 # Preemptable failure (job's executing host went away).
KILLED: int = 4 # Job killed before finishing.
ERROR: int = 5 # Internal error.
MEMLIMIT: int = 6 # Job hit batch system imposed memory limit
class UpdatedBatchJobInfo(NamedTuple):
jobID: int
exitStatus: int
"""
The exit status (integer value) of the job. 0 implies successful.
EXIT_STATUS_UNAVAILABLE_VALUE is used when the exit status is not available (e.g. job is lost).
"""
exitReason: Optional[BatchJobExitReason]
wallTime: Union[float, int, None]
# Information required for worker cleanup on shutdown of the batch system.
class WorkerCleanupInfo(NamedTuple):
workDir: str
"""workdir path (where the cache would go)"""
workflowID: str
"""used to identify files specific to this workflow"""
cleanWorkDir: str
class AbstractBatchSystem(ABC):
"""
An abstract (as far as Python currently allows) base class to represent the interface the batch
system must provide to Toil.
"""
@classmethod
@abstractmethod
def supportsAutoDeployment(cls) -> bool:
"""
Whether this batch system supports auto-deployment of the user script itself. If it does,
the :meth:`.setUserScript` can be invoked to set the resource object representing the user
script.
Note to implementors: If your implementation returns True here, it should also override
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def supportsWorkerCleanup(cls) -> bool:
"""
Indicates whether this batch system invokes
:meth:`BatchSystemSupport.workerCleanup` after the last job for a
particular workflow invocation finishes. Note that the term *worker*
refers to an entire node, not just a worker process. A worker process
may run more than one job sequentially, and more than one concurrent
worker process may exist on a worker node, for the same workflow. The
batch system is said to *shut down* after the last worker process
terminates.
"""
raise NotImplementedError()
def setUserScript(self, userScript: Resource) -> None:
"""
Set the user script for this workflow. This method must be called before the first job is
issued to this batch system, and only if :meth:`.supportsAutoDeployment` returns True,
otherwise it will raise an exception.
:param userScript: the resource object representing the user script
or module and the modules it depends on.
"""
raise NotImplementedError()
@abstractmethod
def issueBatchJob(self, jobDesc: JobDescription, job_environment: Optional[Dict[str, str]] = None) -> int:
"""
Issues a job with the specified command to the batch system and returns a unique jobID.
:param jobDesc a toil.job.JobDescription
:param job_environment: a collection of job-specific environment variables
to be set on the worker.
:return: a unique jobID that can be used to reference the newly issued job
"""
raise NotImplementedError()
@abstractmethod
def killBatchJobs(self, jobIDs: List[int]) -> None:
"""
Kills the given job IDs. After returning, the killed jobs will not
appear in the results of getRunningBatchJobIDs. The killed job will not
be returned from getUpdatedBatchJob.
:param jobIDs: list of IDs of jobs to kill
"""
raise NotImplementedError()
# FIXME: Return value should be a set (then also fix the tests)
@abstractmethod
def getIssuedBatchJobIDs(self) -> List[int]:
"""
Gets all currently issued jobs
:return: A list of jobs (as jobIDs) currently issued (may be running, or may be
waiting to be run). Despite the result being a list, the ordering should not
be depended upon.
"""
raise NotImplementedError()
@abstractmethod
def getRunningBatchJobIDs(self) -> Dict[int, float]:
"""
Gets a map of jobs as jobIDs that are currently running (not just waiting)
and how long they have been running, in seconds.
:return: dictionary with currently running jobID keys and how many seconds they have
been running as the value
"""
raise NotImplementedError()
@abstractmethod
def getUpdatedBatchJob(self, maxWait: int) -> Optional[UpdatedBatchJobInfo]:
"""
Returns information about job that has updated its status (i.e. ceased
running, either successfully or with an error). Each such job will be
returned exactly once.
Does not return info for jobs killed by killBatchJobs, although they
may cause None to be returned earlier than maxWait.
:param maxWait: the number of seconds to block, waiting for a result
:return: If a result is available, returns UpdatedBatchJobInfo.
Otherwise it returns None. wallTime is the number of seconds (a strictly
positive float) in wall-clock time the job ran for, or None if this
batch system does not support tracking wall time.
"""
raise NotImplementedError()
def getSchedulingStatusMessage(self) -> Optional[str]:
"""
Get a log message fragment for the user about anything that might be
going wrong in the batch system, if available.
If no useful message is available, return None.
This can be used to report what resource is the limiting factor when
scheduling jobs, for example. If the leader thinks the workflow is
stuck, the message can be displayed to the user to help them diagnose
why it might be stuck.
:return: User-directed message about scheduling state.
"""
# Default implementation returns None.
# Override to provide scheduling status information.
return None
@abstractmethod
def shutdown(self) -> None:
"""
Called at the completion of a toil invocation.
Should cleanly terminate all worker threads.
"""
raise NotImplementedError()
def setEnv(self, name: str, value: Optional[str] = None) -> None:
"""
Set an environment variable for the worker process before it is launched. The worker
process will typically inherit the environment of the machine it is running on but this
method makes it possible to override specific variables in that inherited environment
before the worker is launched. Note that this mechanism is different to the one used by
the worker internally to set up the environment of a job. A call to this method affects
all jobs issued after this method returns. Note to implementors: This means that you
would typically need to copy the variables before enqueuing a job.
If no value is provided it will be looked up from the current environment.
"""
raise NotImplementedError()
@classmethod
def add_options(cls, parser: Union[ArgumentParser, _ArgumentGroup]) -> None:
"""
If this batch system provides any command line options, add them to the given parser.
"""
pass
OptionType = TypeVar('OptionType')
@classmethod
def setOptions(cls, setOption: Callable[[str, Optional[Callable[[Any], OptionType]], Optional[Callable[[OptionType], None]], Optional[OptionType], Optional[List[str]]], None]) -> None:
"""
Process command line or configuration options relevant to this batch system.
:param setOption: A function with signature
setOption(option_name, parsing_function=None, check_function=None, default=None, env=None)
returning nothing, used to update run configuration as a side effect.
"""
# TODO: change type to a Protocol to express kwarg names, or else use a
# different interface (generator?)
pass
def getWorkerContexts(self) -> List[ContextManager[Any]]:
"""
Get a list of picklable context manager objects to wrap worker work in,
in order.
Can be used to ask the Toil worker to do things in-process (such as
configuring environment variables, hot-deploying user scripts, or
cleaning up a node) that would otherwise require a wrapping "executor"
process.
"""
return []
class BatchSystemSupport(AbstractBatchSystem):
"""
Partial implementation of AbstractBatchSystem, support methods.
"""
def __init__(self, config: Config, maxCores: float, maxMemory: int, maxDisk: int) -> None:
"""
Initializes initial state of the object
:param toil.common.Config config: object is setup by the toilSetup script and
has configuration parameters for the jobtree. You can add code
to that script to get parameters for your batch system.
:param float maxCores: the maximum number of cores the batch system can
request for any one job
:param int maxMemory: the maximum amount of memory the batch system can
request for any one job, in bytes
:param int maxDisk: the maximum amount of disk space the batch system can
request for any one job, in bytes
"""
super().__init__()
self.config = config
self.maxCores = maxCores
self.maxMemory = maxMemory
self.maxDisk = maxDisk
self.environment: Dict[str, str] = {}
self.workerCleanupInfo = WorkerCleanupInfo(workDir=self.config.workDir,
workflowID=self.config.workflowID,
cleanWorkDir=self.config.cleanWorkDir)
def checkResourceRequest(self, memory: int, cores: float, disk: int, job_name: str = '', detail: str = '') -> None:
"""
Check resource request is not greater than that available or allowed.
:param int memory: amount of memory being requested, in bytes
:param float cores: number of cores being requested
:param int disk: amount of disk space being requested, in bytes
:param str job_name: Name of the job being checked, for generating a useful error report.
:param str detail: Batch-system-specific message to include in the error.
:raise InsufficientSystemResources: raised when a resource is requested in an amount
greater than allowed
"""
batch_system = self.__class__.__name__ or 'this batch system'
for resource, requested, available in [('cores', cores, self.maxCores),
('memory', memory, self.maxMemory),
('disk', disk, self.maxDisk)]:
assert requested is not None
if requested > available:
unit = 'bytes of ' if resource in ('disk', 'memory') else ''
R = f'The job {job_name} is r' if job_name else 'R'
if resource == 'disk':
msg = (f'{R}equesting {requested} {unit}{resource} for temporary space, '
f'more than the maximum of {available} {unit}{resource} of free space on '
f'{self.config.workDir} that {batch_system} was configured with, or enforced '
f'by --max{resource.capitalize()}. Try setting/changing the toil option '
f'"--workDir" or changing the base temporary directory by setting TMPDIR.')
else:
msg = (f'{R}equesting {requested} {unit}{resource}, more than the maximum of '
f'{available} {unit}{resource} that {batch_system} was configured with, '
f'or enforced by --max{resource.capitalize()}.')
if detail:
msg += detail
raise InsufficientSystemResources(msg)
def setEnv(self, name: str, value: Optional[str] = None) -> None:
"""
Set an environment variable for the worker process before it is launched. The worker
process will typically inherit the environment of the machine it is running on but this
method makes it possible to override specific variables in that inherited environment
before the worker is launched. Note that this mechanism is different to the one used by
the worker internally to set up the environment of a job. A call to this method affects
all jobs issued after this method returns. Note to implementors: This means that you
would typically need to copy the variables before enqueuing a job.
If no value is provided it will be looked up from the current environment.
:param str name: the environment variable to be set on the worker.
:param str value: if given, the environment variable given by name will be set to this value.
if None, the variable's current value will be used as the value on the worker
:raise RuntimeError: if value is None and the name cannot be found in the environment
"""
if value is None:
try:
value = os.environ[name]
except KeyError:
raise RuntimeError(f"{name} does not exist in current environment")
self.environment[name] = value
def formatStdOutErrPath(self, toil_job_id: int, cluster_job_id: str, std: str) -> str:
"""
Format path for batch system standard output/error and other files
generated by the batch system itself.
Files will be written to the Toil work directory (which may
be on a shared file system) with names containing both the Toil and
batch system job IDs, for ease of debugging job failures.
:param: int toil_job_id : The unique id that Toil gives a job.
:param: cluster_job_id : What the cluster, for example, GridEngine, uses as its internal job id.
:param: string std : The provenance of the stream (for example: 'err' for 'stderr' or 'out' for 'stdout')
:rtype: string : Formatted filename; however if self.config.noStdOutErr is true,
returns '/dev/null' or equivalent.
"""
if self.config.noStdOutErr:
return os.devnull
fileName: str = f'toil_{self.config.workflowID}.{toil_job_id}.{cluster_job_id}.{std}.log'
workDir: str = Toil.getToilWorkDir(self.config.workDir)
return os.path.join(workDir, fileName)
@staticmethod
def workerCleanup(info: WorkerCleanupInfo) -> None:
"""
Cleans up the worker node on batch system shutdown. Also see :meth:`supportsWorkerCleanup`.
:param WorkerCleanupInfo info: A named tuple consisting of all the relevant information
for cleaning up the worker.
"""
assert isinstance(info, WorkerCleanupInfo)
workflowDir = Toil.getLocalWorkflowDir(info.workflowID, info.workDir)
DeferredFunctionManager.cleanupWorker(workflowDir)
workflowDirContents = os.listdir(workflowDir)
AbstractFileStore.shutdownFileStore(workflowDir, info.workflowID)
if (info.cleanWorkDir == 'always'
or info.cleanWorkDir in ('onSuccess', 'onError')
and workflowDirContents in ([], [cacheDirName(info.workflowID)])):
shutil.rmtree(workflowDir, ignore_errors=True)
class NodeInfo:
"""
The coresUsed attribute is a floating point value between 0 (all cores idle) and 1 (all cores
busy), reflecting the CPU load of the node.
The memoryUsed attribute is a floating point value between 0 (no memory used) and 1 (all memory
used), reflecting the memory pressure on the node.
The coresTotal and memoryTotal attributes are the node's resources, not just the used resources
The requestedCores and requestedMemory attributes are all the resources that Toil Jobs have reserved on the
node, regardless of whether the resources are actually being used by the Jobs.
The workers attribute is an integer reflecting the number of workers currently active workers
on the node.
"""
def __init__(self, coresUsed: float, memoryUsed: float,
coresTotal: float, memoryTotal: int,
requestedCores: float, requestedMemory: int,
workers: int) -> None:
self.coresUsed = coresUsed
self.memoryUsed = memoryUsed
self.coresTotal = coresTotal
self.memoryTotal = memoryTotal
self.requestedCores = requestedCores
self.requestedMemory = requestedMemory
self.workers = workers
class AbstractScalableBatchSystem(AbstractBatchSystem):
"""
A batch system that supports a variable number of worker nodes. Used by :class:`toil.
provisioners.clusterScaler.ClusterScaler` to scale the number of worker nodes in the cluster
up or down depending on overall load.
"""
@abstractmethod
def getNodes(self, preemptable: Optional[bool] = None) -> Dict[str, NodeInfo]:
"""
Returns a dictionary mapping node identifiers of preemptable or non-preemptable nodes to
NodeInfo objects, one for each node.
:param preemptable: If True (False) only (non-)preemptable nodes will be returned.
If None, all nodes will be returned.
"""
raise NotImplementedError()
@abstractmethod
def nodeInUse(self, nodeIP: str) -> bool:
"""
Can be used to determine if a worker node is running any tasks. If the node is doesn't
exist, this function should simply return False.
:param nodeIP: The worker nodes private IP address
:return: True if the worker node has been issued any tasks, else False
"""
raise NotImplementedError()
# TODO: May be unused!
@abstractmethod
@contextmanager
def nodeFiltering(self, filter: Optional[Callable[[NodeInfo], bool]]) -> Iterator[None]:
"""
Used to prevent races in autoscaling where
1) nodes have reported to the autoscaler as having no jobs
2) scaler decides to terminate these nodes. In parallel the batch system assigns jobs to the same nodes
3) scaler terminates nodes, resulting in job failures for all jobs on that node.
Call this method prior to node termination to ensure that nodes being considered for termination are not
assigned new jobs. Call the method again passing None as the filter to disable the filtering
after node termination is done.
:param method: This will be used as a filter on nodes considered when assigning new jobs.
After this context manager exits the filter should be removed
"""
raise NotImplementedError()
@abstractmethod
def ignoreNode(self, nodeAddress: str) -> None:
"""
Stop sending jobs to this node. Used in autoscaling
when the autoscaler is ready to terminate a node, but
jobs are still running. This allows the node to be terminated
after the current jobs have finished.
:param nodeAddress: IP address of node to ignore.
"""
raise NotImplementedError()
@abstractmethod
def unignoreNode(self, nodeAddress: str) -> None:
"""
Stop ignoring this address, presumably after
a node with this address has been terminated. This allows for the
possibility of a new node having the same address as a terminated one.
"""
raise NotImplementedError()
class InsufficientSystemResources(Exception):
pass
| 42.613861
| 188
| 0.660037
| 2,646
| 21,520
| 5.345805
| 0.239229
| 0.020997
| 0.009897
| 0.02029
| 0.12796
| 0.114811
| 0.107741
| 0.096854
| 0.091481
| 0.088936
| 0
| 0.00187
| 0.279275
| 21,520
| 504
| 189
| 42.698413
| 0.910123
| 0.535781
| 0
| 0.215116
| 0
| 0
| 0.093696
| 0.018418
| 0
| 0
| 0
| 0.005952
| 0.011628
| 1
| 0.145349
| false
| 0.017442
| 0.075581
| 0
| 0.372093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d61d078db118ba78d2af9ae995c1fa84aa2f450
| 2,306
|
py
|
Python
|
arfit/cp_utils.py
|
farr/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | 5
|
2015-04-29T21:46:52.000Z
|
2021-05-13T04:59:23.000Z
|
arfit/cp_utils.py
|
afcarl/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | null | null | null |
arfit/cp_utils.py
|
afcarl/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | 2
|
2015-12-03T12:08:32.000Z
|
2018-05-26T16:20:31.000Z
|
import carmcmc as cm
from gatspy.periodic import LombScargleFast
import matplotlib.pyplot as plt
import numpy as np
def csample_from_files(datafile, chainfile, p, q):
data = np.loadtxt(datafile)
times, tind = np.unique(data[:,0], return_index=True)
data = data[tind, :]
chain = np.loadtxt(chainfile)
assert chain.shape[1] == p + q + 5, 'dimension mismatch'
return cm.CarmaSample(data[:,0], data[:,1], data[:,2], None, q=q, trace=chain[:,:-2], loglike=chain[:,-2], logpost=chain[:,-1])
def normalised_lombscargle(ts, ys, dys, oversampling=5, nyquist_factor=3):
model = LombScargleFast().fit(ts, ys, dys)
pers, pows = model.periodogram_auto(oversampling=oversampling, nyquist_factor=nyquist_factor)
fs = 1.0/pers
T = np.max(ts) - np.min(ts)
mu = 1/T*np.trapz(ys, ts)
s2 = 1/T*np.trapz(np.square(ys-mu), ts)
return fs, s2*pows/np.trapz(pows, fs)
def plot_psd_sample_data(sample, oversampling=5, nyquist_factor=3):
psd_low, psd_high, psd_med, fs = sample.plot_power_spectrum(doShow=False)
plt.clf()
plt.loglog(fs, psd_med, '-b', alpha=0.33)
plt.fill_between(fs, psd_low, psd_high, color='b', alpha=0.17)
fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor)
bw = fs[-1] - fs[0]
T = sample.time[-1] - sample.time[0]
s2 = 1/T*np.trapz(np.square(sample.ysig), sample.time)
noise_level = s2/bw
levels = noise_level*np.sqrt(sample.get_samples('measerr_scale'))
plt.axhline(np.median(levels), color='g', alpha=0.33)
plt.fill_between(fs, np.percentile(levels, 84)+0*fs, np.percentile(levels, 16)+0*fs, color='g', alpha=0.17)
plt.loglog(fs, psd, '-r', alpha=0.33)
def plot_psd_sample_draw(sample, loc='upper left', oversampling=5, nyquist_factor=3):
fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor)
ys_draw = sample.predict(sample.time, bestfit='random')[0]
fs, dpsd = normalised_lombscargle(sample.time, ys_draw, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor)
plt.loglog(fs, psd, '-k', label='Data', alpha=0.5)
plt.loglog(fs, dpsd, '-b', label='Prediction', alpha=0.5)
plt.legend(loc=loc)
| 37.193548
| 131
| 0.691674
| 359
| 2,306
| 4.328691
| 0.320334
| 0.092021
| 0.079794
| 0.095238
| 0.310811
| 0.258687
| 0.226512
| 0.171171
| 0.132561
| 0.132561
| 0
| 0.027509
| 0.148742
| 2,306
| 61
| 132
| 37.803279
| 0.764137
| 0
| 0
| 0.05
| 0
| 0
| 0.031223
| 0
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d65143322eb65bf3b9638c414999e21eb0323db
| 1,319
|
py
|
Python
|
pdiffcopy/hashing.py
|
xolox/python-pdiffcopy
|
ed765af92c0c0823818d545e61384753912a5725
|
[
"MIT"
] | 5
|
2020-03-07T00:01:24.000Z
|
2020-12-03T03:44:26.000Z
|
pdiffcopy/hashing.py
|
xolox/python-pdiffcopy
|
ed765af92c0c0823818d545e61384753912a5725
|
[
"MIT"
] | null | null | null |
pdiffcopy/hashing.py
|
xolox/python-pdiffcopy
|
ed765af92c0c0823818d545e61384753912a5725
|
[
"MIT"
] | null | null | null |
# Fast large file synchronization inspired by rsync.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: March 6, 2020
# URL: https://pdiffcopy.readthedocs.io
"""Parallel hashing of files using :mod:`multiprocessing` and :mod:`pdiffcopy.mp`."""
# Standard library modules.
import functools
import hashlib
import os
# External dependencies.
from six.moves import range
# Modules included in our package.
from pdiffcopy.mp import WorkerPool
# Public identifiers that require documentation.
__all__ = ("compute_hashes", "hash_worker")
def compute_hashes(filename, block_size, method, concurrency):
"""Compute checksums of a file in blocks (parallel)."""
with WorkerPool(
concurrency=concurrency,
generator_fn=functools.partial(range, 0, os.path.getsize(filename), block_size),
worker_fn=functools.partial(hash_worker, block_size=block_size, filename=filename, method=method),
) as pool:
for offset, digest in pool:
yield offset, digest
def hash_worker(offset, block_size, filename, method):
"""Worker function to be run in child processes."""
with open(filename, "rb") as handle:
handle.seek(offset)
context = hashlib.new(method)
context.update(handle.read(block_size))
return offset, context.hexdigest()
| 31.404762
| 106
| 0.720243
| 165
| 1,319
| 5.654545
| 0.587879
| 0.057878
| 0.036442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00553
| 0.177407
| 1,319
| 41
| 107
| 32.170732
| 0.854378
| 0.353298
| 0
| 0
| 0
| 0
| 0.032569
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.25
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d65c01cd0ad11126b7931d199f6927d742a24e8
| 2,170
|
py
|
Python
|
pyscf/nao/test/test_0003_na2_nao.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 3
|
2021-02-28T00:52:53.000Z
|
2021-03-01T06:23:33.000Z
|
pyscf/nao/test/test_0003_na2_nao.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 36
|
2018-08-22T19:44:03.000Z
|
2020-05-09T10:02:36.000Z
|
pyscf/nao/test/test_0003_na2_nao.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 4
|
2018-02-14T16:28:28.000Z
|
2019-08-12T16:40:30.000Z
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from pyscf.nao.m_siesta_utils import get_siesta_command, get_pseudo
class KnowValues(unittest.TestCase):
def test_siesta2sv_df(self):
import subprocess
import os
siesta_fdf = """
xml.write .true.
PAO.EnergyShift 100 meV
%block ChemicalSpeciesLabel
1 11 Na
%endblock ChemicalSpeciesLabel
NumberOfAtoms 2
NumberOfSpecies 1
%block AtomicCoordinatesAndAtomicSpecies
0.77573521 0.00000000 0.00000000 1
-0.77573521 0.00000000 0.00000000 1
%endblock AtomicCoordinatesAndAtomicSpecies
MD.NumCGsteps 0
COOP.Write .true.
WriteDenchar .true.
"""
label = 'siesta'
fi = open(label+'.fdf', 'w')
print(siesta_fdf, file=fi)
fi.close()
for sp in ['Na']:
try:
os.remove(sp+'.psf')
except :
pass
try:
pppath = get_pseudo(sp)
except:
print('get_pseudo( '+sp+' ) is not working--> skip siesta run' )
return
os.symlink(pppath, sp+'.psf')
errorcode = subprocess.call(get_siesta_command(label), shell=True)
if errorcode: raise RuntimeError('siesta returned an error: {0}'.format(errorcode))
# run test system_vars
from pyscf.nao import mf
sv = mf(label=label)
self.assertEqual(sv.norbs, 10)
self.assertTrue( sv.diag_check() )
self.assertTrue( sv.overlap_check())
if __name__ == "__main__": unittest.main()
| 31.449275
| 87
| 0.657604
| 273
| 2,170
| 5.120879
| 0.553114
| 0.042918
| 0.018598
| 0.02289
| 0.040057
| 0.040057
| 0.040057
| 0
| 0
| 0
| 0
| 0.050155
| 0.25576
| 2,170
| 68
| 88
| 31.911765
| 0.81548
| 0.278802
| 0
| 0.086957
| 0
| 0
| 0.410323
| 0.042581
| 0
| 0
| 0
| 0
| 0.065217
| 1
| 0.021739
| false
| 0.021739
| 0.130435
| 0
| 0.195652
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d66c8be8ed85e591a27c5733a7d2e134250bc39
| 9,393
|
py
|
Python
|
netbox/extras/forms/filtersets.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | null | null | null |
netbox/extras/forms/filtersets.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | null | null | null |
netbox/extras/forms/filtersets.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import gettext as _
from dcim.models import DeviceRole, DeviceType, Platform, Region, Site, SiteGroup
from extras.choices import *
from extras.models import *
from extras.utils import FeatureQuery
from netbox.forms.base import NetBoxModelFilterSetForm
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
add_blank_choice, APISelectMultiple, BOOLEAN_WITH_BLANK_CHOICES, ContentTypeChoiceField,
ContentTypeMultipleChoiceField, DateTimePicker, DynamicModelMultipleChoiceField, FilterForm, MultipleChoiceField,
StaticSelect, TagFilterField,
)
from virtualization.models import Cluster, ClusterGroup, ClusterType
__all__ = (
'ConfigContextFilterForm',
'CustomFieldFilterForm',
'CustomLinkFilterForm',
'ExportTemplateFilterForm',
'JournalEntryFilterForm',
'LocalConfigContextFilterForm',
'ObjectChangeFilterForm',
'TagFilterForm',
'WebhookFilterForm',
)
class CustomFieldFilterForm(FilterForm):
fieldsets = (
(None, ('q',)),
('Attributes', ('type', 'content_types', 'weight', 'required')),
)
content_types = ContentTypeMultipleChoiceField(
queryset=ContentType.objects.all(),
limit_choices_to=FeatureQuery('custom_fields'),
required=False
)
type = MultipleChoiceField(
choices=CustomFieldTypeChoices,
required=False,
label=_('Field type')
)
weight = forms.IntegerField(
required=False
)
required = forms.NullBooleanField(
required=False,
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
class CustomLinkFilterForm(FilterForm):
fieldsets = (
(None, ('q',)),
('Attributes', ('content_type', 'enabled', 'new_window', 'weight')),
)
content_type = ContentTypeChoiceField(
queryset=ContentType.objects.all(),
limit_choices_to=FeatureQuery('custom_links'),
required=False
)
enabled = forms.NullBooleanField(
required=False,
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
new_window = forms.NullBooleanField(
required=False,
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
weight = forms.IntegerField(
required=False
)
class ExportTemplateFilterForm(FilterForm):
fieldsets = (
(None, ('q',)),
('Attributes', ('content_type', 'mime_type', 'file_extension', 'as_attachment')),
)
content_type = ContentTypeChoiceField(
queryset=ContentType.objects.all(),
limit_choices_to=FeatureQuery('export_templates'),
required=False
)
mime_type = forms.CharField(
required=False,
label=_('MIME type')
)
file_extension = forms.CharField(
required=False
)
as_attachment = forms.NullBooleanField(
required=False,
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
class WebhookFilterForm(FilterForm):
fieldsets = (
(None, ('q',)),
('Attributes', ('content_types', 'http_method', 'enabled')),
('Events', ('type_create', 'type_update', 'type_delete')),
)
content_types = ContentTypeMultipleChoiceField(
queryset=ContentType.objects.all(),
limit_choices_to=FeatureQuery('webhooks'),
required=False
)
http_method = MultipleChoiceField(
choices=WebhookHttpMethodChoices,
required=False,
label=_('HTTP method')
)
enabled = forms.NullBooleanField(
required=False,
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
type_create = forms.NullBooleanField(
required=False,
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
type_update = forms.NullBooleanField(
required=False,
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
type_delete = forms.NullBooleanField(
required=False,
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
class TagFilterForm(FilterForm):
model = Tag
content_type_id = ContentTypeMultipleChoiceField(
queryset=ContentType.objects.filter(FeatureQuery('tags').get_query()),
required=False,
label=_('Tagged object type')
)
class ConfigContextFilterForm(FilterForm):
fieldsets = (
(None, ('q', 'tag_id')),
('Location', ('region_id', 'site_group_id', 'site_id')),
('Device', ('device_type_id', 'platform_id', 'role_id')),
('Cluster', ('cluster_type_id', 'cluster_group_id', 'cluster_id')),
('Tenant', ('tenant_group_id', 'tenant_id'))
)
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Regions')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site groups')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
label=_('Sites')
)
device_type_id = DynamicModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
required=False,
label=_('Device types')
)
role_id = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False,
label=_('Roles')
)
platform_id = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False,
label=_('Platforms')
)
cluster_type_id = DynamicModelMultipleChoiceField(
queryset=ClusterType.objects.all(),
required=False,
label=_('Cluster types'),
fetch_trigger='open'
)
cluster_group_id = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False,
label=_('Cluster groups')
)
cluster_id = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False,
label=_('Clusters')
)
tenant_group_id = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
required=False,
label=_('Tenant groups')
)
tenant_id = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
required=False,
label=_('Tenant')
)
tag_id = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
label=_('Tags')
)
class LocalConfigContextFilterForm(forms.Form):
local_context_data = forms.NullBooleanField(
required=False,
label=_('Has local config context data'),
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
class JournalEntryFilterForm(NetBoxModelFilterSetForm):
model = JournalEntry
fieldsets = (
(None, ('q', 'tag')),
('Creation', ('created_before', 'created_after', 'created_by_id')),
('Attributes', ('assigned_object_type_id', 'kind'))
)
created_after = forms.DateTimeField(
required=False,
label=_('After'),
widget=DateTimePicker()
)
created_before = forms.DateTimeField(
required=False,
label=_('Before'),
widget=DateTimePicker()
)
created_by_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
label=_('User'),
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
assigned_object_type_id = DynamicModelMultipleChoiceField(
queryset=ContentType.objects.all(),
required=False,
label=_('Object Type'),
widget=APISelectMultiple(
api_url='/api/extras/content-types/',
)
)
kind = forms.ChoiceField(
choices=add_blank_choice(JournalEntryKindChoices),
required=False,
widget=StaticSelect()
)
tag = TagFilterField(model)
class ObjectChangeFilterForm(FilterForm):
model = ObjectChange
fieldsets = (
(None, ('q',)),
('Time', ('time_before', 'time_after')),
('Attributes', ('action', 'user_id', 'changed_object_type_id')),
)
time_after = forms.DateTimeField(
required=False,
label=_('After'),
widget=DateTimePicker()
)
time_before = forms.DateTimeField(
required=False,
label=_('Before'),
widget=DateTimePicker()
)
action = forms.ChoiceField(
choices=add_blank_choice(ObjectChangeActionChoices),
required=False,
widget=StaticSelect()
)
user_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
label=_('User'),
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
changed_object_type_id = DynamicModelMultipleChoiceField(
queryset=ContentType.objects.all(),
required=False,
label=_('Object Type'),
widget=APISelectMultiple(
api_url='/api/extras/content-types/',
)
)
| 29.261682
| 117
| 0.639519
| 783
| 9,393
| 7.453384
| 0.189017
| 0.093557
| 0.077108
| 0.063057
| 0.447395
| 0.39085
| 0.347498
| 0.332077
| 0.322995
| 0.275017
| 0
| 0
| 0.251144
| 9,393
| 320
| 118
| 29.353125
| 0.829684
| 0
| 0
| 0.366667
| 0
| 0
| 0.118918
| 0.025232
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.246667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d672137217c3f190c65f38cc034a58e8ab7815b
| 1,440
|
py
|
Python
|
dns/rdtypes/ANY/__init__.py
|
Ashiq5/dnspython
|
5449af5318d88bada34f661247f3bcb16f58f057
|
[
"ISC"
] | null | null | null |
dns/rdtypes/ANY/__init__.py
|
Ashiq5/dnspython
|
5449af5318d88bada34f661247f3bcb16f58f057
|
[
"ISC"
] | null | null | null |
dns/rdtypes/ANY/__init__.py
|
Ashiq5/dnspython
|
5449af5318d88bada34f661247f3bcb16f58f057
|
[
"ISC"
] | null | null | null |
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Class ANY (generic) rdata type classes."""
__all__ = [
'AFSDB',
'AMTRELAY',
'AVC',
'CAA',
'CDNSKEY',
'CDS',
'CERT',
'CNAME',
'CSYNC',
'DLV',
'DNAME',
'DNSKEY',
'DS',
'EUI48',
'EUI64',
'GPOS',
'HINFO',
'HIP',
'ISDN',
'LOC',
'MX',
'NINFO',
'NS',
'NSEC',
'NSEC3',
'NSEC3PARAM',
'OPENPGPKEY',
'OPT',
'PTR',
'RP',
'RRSIG',
'RT',
'SMIMEA',
'SOA',
'SPF',
'SSHFP',
'TKEY',
'TLSA',
'TSIG',
'TXT',
'URI',
'X25',
]
| 22.5
| 75
| 0.620833
| 180
| 1,440
| 4.944444
| 0.705556
| 0.040449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022367
| 0.254861
| 1,440
| 63
| 76
| 22.857143
| 0.807083
| 0.605556
| 0
| 0
| 0
| 0
| 0.326642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d67812696614c7eac2050cda2d994e16e9201d7
| 10,519
|
py
|
Python
|
01_test.py
|
KhubbatulinMark/DCase2020-Task-2-on-Wigner-Ville-transform
|
6653d7abbaafe09fb17768d9902bb77db24945d4
|
[
"MIT"
] | 3
|
2020-09-18T10:33:37.000Z
|
2020-11-04T12:53:50.000Z
|
01_test.py
|
KhubbatulinMark/DCase2020-Task-2-on-Wigner-Ville-transform
|
6653d7abbaafe09fb17768d9902bb77db24945d4
|
[
"MIT"
] | 4
|
2020-09-26T01:07:55.000Z
|
2022-02-10T01:30:27.000Z
|
01_test.py
|
KhubbatulinMark/DCase2020-Task-2-on-Wigner-Ville-transform
|
6653d7abbaafe09fb17768d9902bb77db24945d4
|
[
"MIT"
] | null | null | null |
"""
@file 01_test.py
@brief Script for test
@author Toshiki Nakamura, Yuki Nikaido, and Yohei Kawaguchi (Hitachi Ltd.)
Copyright (C) 2020 Hitachi, Ltd. All right reserved.
"""
########################################################################
# import default python-library
########################################################################
import os
import glob
import csv
import re
import itertools
import sys
########################################################################
########################################################################
# import additional python-library
########################################################################
import numpy
# from import
from tqdm import tqdm
from sklearn import metrics
# original lib
import common as com
import keras_model
########################################################################
########################################################################
# load parameter.yaml
########################################################################
param = com.yaml_load()
#######################################################################
########################################################################
# def
########################################################################
def save_csv(save_file_path,
save_data):
with open(save_file_path, "w", newline="") as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(save_data)
def get_machine_id_list_for_test(target_dir,
dir_name="test",
ext="json"):
"""
target_dir : str
base directory path of "dev_data" or "eval_data"
test_dir_name : str (default="test")
directory containing test data
ext : str (default="wav)
file extension of audio files
return :
machine_id_list : list [ str ]
list of machine IDs extracted from the names of test files
"""
# create test files
dir_path = os.path.abspath("{dir}/{dir_name}/*.{ext}".format(dir=target_dir, dir_name=dir_name, ext=ext))
file_paths = sorted(glob.glob(dir_path))
# extract id
machine_id_list = sorted(list(set(itertools.chain.from_iterable(
[re.findall('id_[0-9][0-9]', ext_id) for ext_id in file_paths]))))
return machine_id_list
def test_file_list_generator(target_dir,
id_name,
dir_name="test",
prefix_normal="normal",
prefix_anomaly="anomaly",
ext="json"):
"""
target_dir : str
base directory path of the dev_data or eval_data
id_name : str
id of wav file in <<test_dir_name>> directory
dir_name : str (default="test")
directory containing test data
prefix_normal : str (default="normal")
normal directory name
prefix_anomaly : str (default="anomaly")
anomaly directory name
ext : str (default="wav")
file extension of audio files
return :
if the mode is "development":
test_files : list [ str ]
file list for test
test_labels : list [ boolean ]
label info. list for test
* normal/anomaly = 0/1
if the mode is "evaluation":
test_files : list [ str ]
file list for test
"""
com.logger.info("target_dir : {}".format(target_dir+"_"+id_name))
# development
if mode:
normal_files = sorted(
glob.glob("{dir}/{dir_name}/{prefix_normal}_{id_name}*.{ext}".format(dir=target_dir,
dir_name=dir_name,
prefix_normal=prefix_normal,
id_name=id_name,
ext=ext)))
normal_labels = numpy.zeros(len(normal_files))
anomaly_files = sorted(
glob.glob("{dir}/{dir_name}/{prefix_anomaly}_{id_name}*.{ext}".format(dir=target_dir,
dir_name=dir_name,
prefix_anomaly=prefix_anomaly,
id_name=id_name,
ext=ext)))
anomaly_labels = numpy.ones(len(anomaly_files))
files = numpy.concatenate((normal_files, anomaly_files), axis=0)
labels = numpy.concatenate((normal_labels, anomaly_labels), axis=0)
com.logger.info("test_file num : {num}".format(num=len(files)))
if len(files) == 0:
com.logger.exception("no_wav_file!!")
print("\n========================================")
# evaluation
else:
files = sorted(
glob.glob("{dir}/{dir_name}/*{id_name}*.{ext}".format(dir=target_dir,
dir_name=dir_name,
id_name=id_name,
ext=ext)))
labels = None
com.logger.info("test_file num : {num}".format(num=len(files)))
if len(files) == 0:
com.logger.exception("no_wav_file!!")
print("\n=========================================")
return files, labels
########################################################################
########################################################################
# main 01_test.py
########################################################################
if __name__ == "__main__":
# check mode
# "development": mode == True
# "evaluation": mode == False
mode = com.command_line_chk()
if mode is None:
sys.exit(-1)
# make output result directory
os.makedirs(param["result_directory"], exist_ok=True)
# load base directory
dirs = com.select_dirs(param=param, mode=mode)
# initialize lines in csv for AUC and pAUC
csv_lines = []
# loop of the base directory
for idx, target_dir in enumerate(dirs):
print("\n===========================")
print("[{idx}/{total}] {dirname}".format(dirname=target_dir, idx=idx+1, total=len(dirs)))
machine_type = os.path.split(target_dir)[1]
print("============== MODEL LOAD ==============")
# set model path
model_file = "{model}/model_{machine_type}.hdf5".format(model=param["model_directory"],
machine_type=machine_type)
# load model file
if not os.path.exists(model_file):
com.logger.error("{} model not found ".format(machine_type))
sys.exit(-1)
model = keras_model.load_model(model_file)
model.summary()
if mode:
# results by type
csv_lines.append([machine_type])
csv_lines.append(["id", "AUC", "pAUC"])
performance = []
machine_id_list = get_machine_id_list_for_test(target_dir)
print(machine_id_list)
for id_str in machine_id_list:
# load test file
test_files, y_true = test_file_list_generator(target_dir, id_str)
# setup anomaly score file path
anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{id_str}.csv".format(
result=param["result_directory"],
machine_type=machine_type,
id_str=id_str)
anomaly_score_list = []
print("\n============== BEGIN TEST FOR A MACHINE ID ==============")
y_pred = [0. for k in test_files]
for file_idx, file_path in tqdm(enumerate(test_files), total=len(test_files)):
try:
data = com.file_to_vector_array(file_path,
n_mels=param["feature"]["n_mels"],
frames=param["feature"]["frames"],
n_fft=param["feature"]["n_fft"],
hop_length=param["feature"]["hop_length"],
power=param["feature"]["power"])
errors = numpy.mean(numpy.square(data - model.predict(data)), axis=1)
y_pred[file_idx] = numpy.mean(errors)
anomaly_score_list.append([os.path.basename(file_path), y_pred[file_idx]])
except:
com.logger.error("file broken!!: {}".format(file_path))
# save anomaly score
save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list)
com.logger.info("anomaly score result -> {}".format(anomaly_score_csv))
if mode:
# append AUC and pAUC to lists
auc = metrics.roc_auc_score(y_true, y_pred)
p_auc = metrics.roc_auc_score(y_true, y_pred, max_fpr=param["max_fpr"])
csv_lines.append([id_str.split("_", 1)[1], auc, p_auc])
performance.append([auc, p_auc])
com.logger.info("AUC : {}".format(auc))
com.logger.info("pAUC : {}".format(p_auc))
print("\n============ END OF TEST FOR A MACHINE ID ============")
if mode:
# calculate averages for AUCs and pAUCs
averaged_performance = numpy.mean(numpy.array(performance, dtype=float), axis=0)
csv_lines.append(["Average"] + list(averaged_performance))
csv_lines.append([])
if mode:
# output results
result_path = "{result}/{file_name}".format(result=param["result_directory"], file_name=param["result_file"])
com.logger.info("AUC and pAUC results -> {}".format(result_path))
save_csv(save_file_path=result_path, save_data=csv_lines)
| 42.587045
| 118
| 0.456412
| 1,032
| 10,519
| 4.416667
| 0.197674
| 0.026108
| 0.019746
| 0.017552
| 0.285432
| 0.224002
| 0.212154
| 0.191751
| 0.148749
| 0.097411
| 0
| 0.004042
| 0.341477
| 10,519
| 246
| 119
| 42.760163
| 0.653963
| 0.166746
| 0
| 0.2
| 0
| 0
| 0.127278
| 0.046739
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023077
| false
| 0
| 0.084615
| 0
| 0.123077
| 0.061538
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d68067044bd41e0c94f3b4e115e6a6243c834c1
| 1,247
|
py
|
Python
|
src/text_split/split.py
|
i1123581321/word_split
|
6401cdc37f58aa8718793dd7cb9bf4d3a4b690a4
|
[
"MIT"
] | null | null | null |
src/text_split/split.py
|
i1123581321/word_split
|
6401cdc37f58aa8718793dd7cb9bf4d3a4b690a4
|
[
"MIT"
] | null | null | null |
src/text_split/split.py
|
i1123581321/word_split
|
6401cdc37f58aa8718793dd7cb9bf4d3a4b690a4
|
[
"MIT"
] | null | null | null |
import argparse
import os
parser = argparse.ArgumentParser(description="a simple parser")
parser.add_argument("filename", type=str)
parser.add_argument("lineno", nargs="+", type=int)
parser.add_argument("--same_length", action=argparse.BooleanOptionalAction)
def main():
args = parser.parse_args()
filename = args.filename
linenos = args.lineno
same_length = args.same_length
linenos = list(map(lambda x: x - 1, linenos))
linenos.sort()
results = []
with open(filename, "r", encoding="utf-8") as f:
content = f.readlines()
if not same_length:
start = 0
for lineno in linenos:
results.append("".join(content[start:lineno]))
start = lineno
results.append("".join(content[start:]))
else:
lineno = linenos[0] + 1 if linenos[0] else 100000
start = 0
while start < len(content):
results.append("".join(content[start: start + lineno]))
start += lineno
name, ext = os.path.splitext(filename)
for i, result in enumerate(results):
with open(f"{name}-{i + 1:02}{ext}", "w", encoding="utf-8") as f:
f.write(result)
| 30.414634
| 77
| 0.585405
| 149
| 1,247
| 4.845638
| 0.42953
| 0.055402
| 0.070637
| 0.099723
| 0.16205
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018994
| 0.282277
| 1,247
| 40
| 78
| 31.175
| 0.78771
| 0
| 0
| 0.0625
| 0
| 0
| 0.061748
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.0625
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d68e4a866db0948470d395c5ba6d5ad5676d177
| 23,037
|
py
|
Python
|
src/sentry/models/event.py
|
Ali-Tahir/sentry
|
aa7b306c5ea671ac002a3524982563679557cb31
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/event.py
|
Ali-Tahir/sentry
|
aa7b306c5ea671ac002a3524982563679557cb31
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/event.py
|
Ali-Tahir/sentry
|
aa7b306c5ea671ac002a3524982563679557cb31
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import six
import string
import warnings
import pytz
from collections import OrderedDict
from dateutil.parser import parse as parse_date
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from hashlib import md5
from semaphore.processing import StoreNormalizer
from sentry import eventtypes
from sentry.db.models import (
BoundedBigIntegerField,
BoundedIntegerField,
Model,
NodeData,
NodeField,
sane_repr,
)
from sentry.db.models.manager import EventManager
from sentry.interfaces.base import get_interfaces
from sentry.utils import json
from sentry.utils.cache import memoize
from sentry.utils.canonical import CanonicalKeyDict, CanonicalKeyView
from sentry.utils.safe import get_path
from sentry.utils.strings import truncatechars
class EventDict(CanonicalKeyDict):
"""
Creating an instance of this dictionary will send the event through basic
(Rust-based) type/schema validation called "re-normalization".
This is used as a wrapper type for `Event.data` such that creating an event
object (or loading it from the DB) will ensure the data fits the type
schema.
"""
def __init__(self, data, skip_renormalization=False, **kwargs):
is_renormalized = isinstance(data, EventDict) or (
isinstance(data, NodeData) and isinstance(data.data, EventDict)
)
if not skip_renormalization and not is_renormalized:
normalizer = StoreNormalizer(is_renormalize=True, enable_trimming=False)
data = normalizer.normalize_event(dict(data))
CanonicalKeyDict.__init__(self, data, **kwargs)
class EventCommon(object):
"""
Methods and properties common to both Event and SnubaEvent.
"""
@classmethod
def generate_node_id(cls, project_id, event_id):
"""
Returns a deterministic node_id for this event based on the project_id
and event_id which together are globally unique. The event body should
be saved under this key in nodestore so it can be retrieved using the
same generated id when we only have project_id and event_id.
"""
return md5("{}:{}".format(project_id, event_id)).hexdigest()
# TODO (alex) We need a better way to cache these properties. functools32
# doesn't quite do the trick as there is a reference bug with unsaved
# models. But the current _group_cache thing is also clunky because these
# properties need to be stripped out in __getstate__.
@property
def group(self):
from sentry.models import Group
if not self.group_id:
return None
if not hasattr(self, "_group_cache"):
self._group_cache = Group.objects.get(id=self.group_id)
return self._group_cache
@group.setter
def group(self, group):
self.group_id = group.id
self._group_cache = group
@property
def project(self):
from sentry.models import Project
if not hasattr(self, "_project_cache"):
self._project_cache = Project.objects.get(id=self.project_id)
return self._project_cache
@project.setter
def project(self, project):
if project is None:
self.project_id = None
else:
self.project_id = project.id
self._project_cache = project
def get_interfaces(self):
return CanonicalKeyView(get_interfaces(self.data))
@memoize
def interfaces(self):
return self.get_interfaces()
def get_interface(self, name):
return self.interfaces.get(name)
def get_legacy_message(self):
# TODO(mitsuhiko): remove this code once it's unused. It's still
# being used by plugin code and once the message rename is through
# plugins should instead swithc to the actual message attribute or
# this method could return what currently is real_message.
return (
get_path(self.data, "logentry", "formatted")
or get_path(self.data, "logentry", "message")
or self.message
)
def get_event_type(self):
"""
Return the type of this event.
See ``sentry.eventtypes``.
"""
return self.data.get("type", "default")
def get_event_metadata(self):
"""
Return the metadata of this event.
See ``sentry.eventtypes``.
"""
# For some inexplicable reason we have some cases where the data
# is completely empty. In that case we want to hobble along
# further.
return self.data.get("metadata") or {}
def get_grouping_config(self):
"""Returns the event grouping config."""
from sentry.grouping.api import get_grouping_config_dict_for_event_data
return get_grouping_config_dict_for_event_data(self.data, self.project)
def get_hashes(self, force_config=None):
"""
Returns the calculated hashes for the event. This uses the stored
information if available. Grouping hashes will take into account
fingerprinting and checksums.
"""
# If we have hashes stored in the data we use them, otherwise we
# fall back to generating new ones from the data. We can only use
# this if we do not force a different config.
if force_config is None:
hashes = self.data.get("hashes")
if hashes is not None:
return hashes
return filter(
None, [x.get_hash() for x in self.get_grouping_variants(force_config).values()]
)
def get_grouping_variants(self, force_config=None, normalize_stacktraces=False):
"""
This is similar to `get_hashes` but will instead return the
grouping components for each variant in a dictionary.
If `normalize_stacktraces` is set to `True` then the event data will be
modified for `in_app` in addition to event variants being created. This
means that after calling that function the event data has been modified
in place.
"""
from sentry.grouping.api import get_grouping_variants_for_event, load_grouping_config
from sentry.stacktraces.processing import normalize_stacktraces_for_grouping
# Forcing configs has two separate modes. One is where just the
# config ID is given in which case it's merged with the stored or
# default config dictionary
if force_config is not None:
if isinstance(force_config, six.string_types):
stored_config = self.get_grouping_config()
config = dict(stored_config)
config["id"] = force_config
else:
config = force_config
# Otherwise we just use the same grouping config as stored. if
# this is None the `get_grouping_variants_for_event` will fill in
# the default.
else:
config = self.data.get("grouping_config")
config = load_grouping_config(config)
if normalize_stacktraces:
normalize_stacktraces_for_grouping(self.data, config)
return get_grouping_variants_for_event(self, config)
def get_primary_hash(self):
# TODO: This *might* need to be protected from an IndexError?
return self.get_hashes()[0]
@property
def title(self):
# also see event_manager.py which inserts this for snuba
et = eventtypes.get(self.get_event_type())()
return et.get_title(self.get_event_metadata())
@property
def culprit(self):
# For a while events did not save the culprit
if self.group_id:
return self.data.get("culprit") or self.group.culprit
return self.data.get("culprit")
@property
def location(self):
# also see event_manager.py which inserts this for snuba
et = eventtypes.get(self.get_event_type())()
return et.get_location(self.get_event_metadata())
@property
def real_message(self):
# XXX(mitsuhiko): this is a transitional attribute that should be
# removed. `message` will be renamed to `search_message` and this
# will become `message`.
return (
get_path(self.data, "logentry", "formatted")
or get_path(self.data, "logentry", "message")
or ""
)
@property
def organization(self):
return self.project.organization
@property
def version(self):
return self.data.get("version", "5")
@property
def ip_address(self):
ip_address = get_path(self.data, "user", "ip_address")
if ip_address:
return ip_address
remote_addr = get_path(self.data, "request", "env", "REMOTE_ADDR")
if remote_addr:
return remote_addr
return None
@property
def tags(self):
try:
rv = sorted(
[
(t, v)
for t, v in get_path(self.data, "tags", filter=True) or ()
if t is not None and v is not None
]
)
return rv
except ValueError:
# at one point Sentry allowed invalid tag sets such as (foo, bar)
# vs ((tag, foo), (tag, bar))
return []
# For compatibility, still used by plugins.
def get_tags(self):
return self.tags
def get_tag(self, key):
for t, v in self.get_tags():
if t == key:
return v
return None
@property
def release(self):
return self.get_tag("sentry:release")
@property
def dist(self):
return self.get_tag("sentry:dist")
def get_raw_data(self):
"""Returns the internal raw event data dict."""
return dict(self.data.items())
@property
def size(self):
return len(json.dumps(dict(self.data)))
@property
def transaction(self):
return self.get_tag("transaction")
def get_email_subject(self):
template = self.project.get_option("mail:subject_template")
if template:
template = EventSubjectTemplate(template)
else:
template = DEFAULT_SUBJECT_TEMPLATE
return truncatechars(template.safe_substitute(EventSubjectTemplateData(self)), 128).encode(
"utf-8"
)
def get_environment(self):
from sentry.models import Environment
if not hasattr(self, "_environment_cache"):
self._environment_cache = Environment.objects.get(
organization_id=self.project.organization_id,
name=Environment.get_name_or_default(self.get_tag("environment")),
)
return self._environment_cache
def get_minimal_user(self):
"""
A minimal 'User' interface object that gives us enough information
to render a user badge.
"""
return self.get_interface("user")
def as_dict(self):
"""Returns the data in normalized form for external consumers."""
# We use a OrderedDict to keep elements ordered for a potential JSON serializer
data = OrderedDict()
data["event_id"] = self.event_id
data["project"] = self.project_id
data["release"] = self.release
data["dist"] = self.dist
data["platform"] = self.platform
data["message"] = self.real_message
data["datetime"] = self.datetime
data["time_spent"] = self.time_spent
data["tags"] = [(k.split("sentry:", 1)[-1], v) for (k, v) in self.tags]
for k, v in sorted(six.iteritems(self.data)):
if k in data:
continue
if k == "sdk":
v = {v_k: v_v for v_k, v_v in six.iteritems(v) if v_k != "client_ip"}
data[k] = v
# for a long time culprit was not persisted. In those cases put
# the culprit in from the group.
if data.get("culprit") is None and self.group_id:
data["culprit"] = self.group.culprit
# Override title and location with dynamically generated data
data["title"] = self.title
data["location"] = self.location
return data
# ============================================
# DEPRECATED
# ============================================
@property
def level(self):
# we might want to move to this:
# return LOG_LEVELS_MAP.get(self.get_level_display()) or self.group.level
if self.group:
return self.group.level
else:
return None
def get_level_display(self):
# we might want to move to this:
# return self.get_tag('level') or self.group.get_level_display()
if self.group:
return self.group.get_level_display()
else:
return None
# deprecated accessors
@property
def logger(self):
warnings.warn("Event.logger is deprecated. Use Event.tags instead.", DeprecationWarning)
return self.get_tag("logger")
@property
def site(self):
warnings.warn("Event.site is deprecated. Use Event.tags instead.", DeprecationWarning)
return self.get_tag("site")
@property
def server_name(self):
warnings.warn(
"Event.server_name is deprecated. Use Event.tags instead.", DeprecationWarning
)
return self.get_tag("server_name")
@property
def checksum(self):
warnings.warn("Event.checksum is no longer used", DeprecationWarning)
return ""
def error(self): # TODO why is this not a property?
warnings.warn("Event.error is deprecated, use Event.title", DeprecationWarning)
return self.title
error.short_description = _("error")
@property
def message_short(self):
warnings.warn("Event.message_short is deprecated, use Event.title", DeprecationWarning)
return self.title
class SnubaEvent(EventCommon):
"""
An event backed by data stored in snuba.
This is a readonly event and does not support event creation or save.
The basic event data is fetched from snuba, and the event body is
fetched from nodestore and bound to the data property in the same way
as a regular Event.
"""
# The minimal list of columns we need to get from snuba to bootstrap an
# event. If the client is planning on loading the entire event body from
# nodestore anyway, we may as well only fetch the minimum from snuba to
# avoid duplicated work.
minimal_columns = ["event_id", "group_id", "project_id", "timestamp"]
# A list of all useful columns we can get from snuba.
selected_columns = minimal_columns + [
"culprit",
"location",
"message",
"platform",
"title",
"type",
# Required to provide snuba-only tags
"tags.key",
"tags.value",
# Required to provide snuba-only 'user' interface
"email",
"ip_address",
"user_id",
"username",
]
__repr__ = sane_repr("project_id", "group_id")
def __init__(self, snuba_values):
"""
When initializing a SnubaEvent, think about the attributes you
might need to access on it. If you only need a few properties, and
they are all available in snuba, then you should use
`SnubaEvent.selected_colums` (or a subset depending on your needs)
But if you know you are going to need the entire event body anyway
(which requires a nodestore lookup) you may as well just initialize
the event with `SnubaEvent.minimal_colums` and let the rest of of
the attributes come from nodestore.
"""
assert all(k in snuba_values for k in SnubaEvent.minimal_columns)
# self.snuba_data is a dict of all the stuff we got from snuba
self.snuba_data = snuba_values
# self.data is a (lazy) dict of everything we got from nodestore
node_id = SnubaEvent.generate_node_id(
self.snuba_data["project_id"], self.snuba_data["event_id"]
)
self.data = NodeData(None, node_id, data=None, wrapper=EventDict)
def __getattr__(self, name):
"""
Depending on what snuba data this event was initialized with, we may
have the data available to return, or we may have to look in the
`data` dict (which would force a nodestore load). All unresolved
self.foo type accesses will come through here.
"""
if name in ("_project_cache", "_group_cache", "_environment_cache"):
raise AttributeError()
if name in self.snuba_data:
return self.snuba_data[name]
else:
return self.data[name]
# ============================================
# Snuba-only implementations of properties that
# would otherwise require nodestore data.
# ============================================
@property
def tags(self):
"""
Override of tags property that uses tags from snuba rather than
the nodestore event body. This might be useful for implementing
tag deletions without having to rewrite nodestore blobs.
"""
if "tags.key" in self.snuba_data and "tags.value" in self.snuba_data:
keys = getattr(self, "tags.key")
values = getattr(self, "tags.value")
if keys and values and len(keys) == len(values):
return sorted(zip(keys, values))
else:
return []
else:
return super(SnubaEvent, self).tags
def get_minimal_user(self):
from sentry.interfaces.user import User
return User.to_python(
{
"id": self.user_id,
"email": self.email,
"username": self.username,
"ip_address": self.ip_address,
}
)
# If the data for these is available from snuba, we assume
# it was already normalized on the way in and we can just return
# it, otherwise we defer to EventCommon implementation.
def get_event_type(self):
if "type" in self.snuba_data:
return self.snuba_data["type"]
return super(SnubaEvent, self).get_event_type()
@property
def ip_address(self):
if "ip_address" in self.snuba_data:
return self.snuba_data["ip_address"]
return super(SnubaEvent, self).ip_address
@property
def title(self):
if "title" in self.snuba_data:
return self.snuba_data["title"]
return super(SnubaEvent, self).title
@property
def culprit(self):
if "culprit" in self.snuba_data:
return self.snuba_data["culprit"]
return super(SnubaEvent, self).culprit
@property
def location(self):
if "location" in self.snuba_data:
return self.snuba_data["location"]
return super(SnubaEvent, self).location
# ====================================================
# Snuba implementations of the django fields on Event
# ====================================================
@property
def datetime(self):
"""
Reconstruct the datetime of this event from the snuba timestamp
"""
# dateutil seems to use tzlocal() instead of UTC even though the string
# ends with '+00:00', so just replace the TZ with UTC because we know
# all timestamps from snuba are UTC.
return parse_date(self.timestamp).replace(tzinfo=pytz.utc)
@property
def time_spent(self):
return None
@property
def message(self):
if "message" in self.snuba_data:
return self.snuba_data["message"]
return self.data.get("message")
@property
def platform(self):
if "platform" in self.snuba_data:
return self.snuba_data["platform"]
return self.data.get("platform")
@property
def id(self):
# Because a snuba event will never have a django row id, just return
# the hex event_id here. We should be moving to a world where we never
# have to reference the row id anyway.
return self.event_id
def save(self):
raise NotImplementedError
class Event(EventCommon, Model):
"""
An event backed by data stored in postgres.
"""
__core__ = False
group_id = BoundedBigIntegerField(blank=True, null=True)
event_id = models.CharField(max_length=32, null=True, db_column="message_id")
project_id = BoundedBigIntegerField(blank=True, null=True)
message = models.TextField()
platform = models.CharField(max_length=64, null=True)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
time_spent = BoundedIntegerField(null=True)
data = NodeField(
blank=True,
null=True,
ref_func=lambda x: x.project_id or x.project.id,
ref_version=2,
wrapper=EventDict,
)
objects = EventManager()
class Meta:
app_label = "sentry"
db_table = "sentry_message"
verbose_name = _("message")
verbose_name_plural = _("messages")
unique_together = (("project_id", "event_id"),)
index_together = (("group_id", "datetime"),)
__repr__ = sane_repr("project_id", "group_id")
def __getstate__(self):
state = Model.__getstate__(self)
# do not pickle cached info. We want to fetch this on demand
# again. In particular if we were to pickle interfaces we would
# pickle a CanonicalKeyView which old sentry workers do not know
# about
state.pop("_project_cache", None)
state.pop("_environment_cache", None)
state.pop("_group_cache", None)
state.pop("interfaces", None)
return state
class EventSubjectTemplate(string.Template):
idpattern = r"(tag:)?[_a-z][_a-z0-9]*"
class EventSubjectTemplateData(object):
tag_aliases = {"release": "sentry:release", "dist": "sentry:dist", "user": "sentry:user"}
def __init__(self, event):
self.event = event
def __getitem__(self, name):
if name.startswith("tag:"):
name = name[4:]
value = self.event.get_tag(self.tag_aliases.get(name, name))
if value is None:
raise KeyError
return six.text_type(value)
elif name == "project":
return self.event.project.get_full_name()
elif name == "projectID":
return self.event.project.slug
elif name == "shortID" and self.event.group_id:
return self.event.group.qualified_short_id
elif name == "orgID":
return self.event.organization.slug
elif name == "title":
return self.event.title
raise KeyError
DEFAULT_SUBJECT_TEMPLATE = EventSubjectTemplate("$shortID - $title")
| 33.778592
| 99
| 0.623996
| 2,869
| 23,037
| 4.874869
| 0.19519
| 0.03003
| 0.020449
| 0.010725
| 0.152224
| 0.110396
| 0.088374
| 0.075147
| 0.051266
| 0.038253
| 0
| 0.001451
| 0.282242
| 23,037
| 681
| 100
| 33.828194
| 0.844391
| 0.282545
| 0
| 0.186441
| 0
| 0
| 0.085907
| 0.002773
| 0
| 0
| 0
| 0.004405
| 0.002421
| 1
| 0.1477
| false
| 0
| 0.067797
| 0.03632
| 0.46247
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d6b89b1e8e521a2e81232c6e63ef4c5529270e8
| 2,920
|
py
|
Python
|
Assignment3/src/data/make_nowcast_dataset.py
|
shikashyam/BigDataSystemsCoursework
|
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
|
[
"MIT"
] | null | null | null |
Assignment3/src/data/make_nowcast_dataset.py
|
shikashyam/BigDataSystemsCoursework
|
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
|
[
"MIT"
] | null | null | null |
Assignment3/src/data/make_nowcast_dataset.py
|
shikashyam/BigDataSystemsCoursework
|
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
|
[
"MIT"
] | 4
|
2022-02-12T23:59:54.000Z
|
2022-02-16T22:53:32.000Z
|
"""
Makes training and test dataset for nowcasting model using SEVIR
"""
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import h5py
os.environ["HDF5_USE_FILE_LOCKING"]='FALSE'
import sys
import numpy as np
import tensorflow as tf
from nowcast_generator import get_nowcast_test_generator
# parser = argparse.ArgumentParser(description='Make nowcast training & test datasets using SEVIR')
# parser.add_argument('--sevir_data', type=str, help='location of SEVIR dataset',default='../../data/sevir')
# parser.add_argument('--sevir_catalog', type=str, help='location of SEVIR dataset',default='../../data/CATALOG.csv')
# parser.add_argument('--output_location', type=str, help='location of SEVIR dataset',default='../../data/interim')
# parser.add_argument('--n_chunks', type=int, help='Number of chucks to use (increase if memory limited)',default=10)
#args = parser.parse_args()
def generate_data(sevir_location,sevir_catalog,output_location,n_chunks=10):
"""
Runs data processing scripts to extract training set from SEVIR
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
#trn_generator = get_nowcast_train_generator(sevir_catalog=args.sevir_catalog,sevir_location=args.sevir_data)
tst_generator = get_nowcast_test_generator(sevir_catalog,sevir_location)
#ogger.info('Reading/writing training data to %s' % ('%s/nowcast_training.h5' % args.output_location))
#read_write_chunks('%s/nowcast_training.h5' % args.output_location,trn_generator,args.n_chunks)
logger.info('Reading/writing testing data to ' + output_location+'/nowcast_testing.h5')
read_write_chunks(output_location+'/nowcast_testing.h5',tst_generator,n_chunks)
def read_write_chunks( filename, generator, n_chunks ):
logger = logging.getLogger(__name__)
chunksize = len(generator)//n_chunks
# get first chunk
logger.info('Gathering chunk 0/%s:' % n_chunks)
X,Y=generator.load_batches(n_batches=chunksize,offset=0,progress_bar=True)
# Create datasets
with h5py.File(filename, 'w') as hf:
hf.create_dataset('IN', data=X[0], maxshape=(None,X[0].shape[1],X[0].shape[2],X[0].shape[3]))
hf.create_dataset('OUT', data=Y[0], maxshape=(None,Y[0].shape[1],Y[0].shape[2],Y[0].shape[3]))
# Gather other chunks
for c in range(1,n_chunks+1):
offset = c*chunksize
n_batches = min(chunksize,len(generator)-offset)
if n_batches<0: # all done
break
logger.info('Gathering chunk %d/%s:' % (c,n_chunks))
X,Y=generator.load_batches(n_batches=n_batches,offset=offset,progress_bar=True)
with h5py.File(filename, 'a') as hf:
hf['IN'].resize((hf['IN'].shape[0] + X[0].shape[0]), axis = 0)
hf['OUT'].resize((hf['OUT'].shape[0] + Y[0].shape[0]), axis = 0)
hf['IN'][-X[0].shape[0]:] = X[0]
hf['OUT'][-Y[0].shape[0]:] = Y[0]
| 42.941176
| 117
| 0.7
| 433
| 2,920
| 4.542725
| 0.297921
| 0.030503
| 0.017794
| 0.028978
| 0.213523
| 0.155567
| 0.141332
| 0.104728
| 0.104728
| 0
| 0
| 0.018444
| 0.14589
| 2,920
| 67
| 118
| 43.58209
| 0.770249
| 0.373973
| 0
| 0.057143
| 0
| 0
| 0.10962
| 0.011745
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.228571
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d6e1f190b9f10fc581499ca4a914cfa2670ffb2
| 9,576
|
py
|
Python
|
blender-plugin/import_cast.py
|
rtasan/ApexCastImporter
|
17f833ab8ff9757e295ca8eadb0cb210bfdd6476
|
[
"MIT"
] | null | null | null |
blender-plugin/import_cast.py
|
rtasan/ApexCastImporter
|
17f833ab8ff9757e295ca8eadb0cb210bfdd6476
|
[
"MIT"
] | 3
|
2021-05-24T12:29:43.000Z
|
2021-05-28T13:07:39.000Z
|
blender-plugin/import_cast.py
|
rtasan/ApexCastImporter
|
17f833ab8ff9757e295ca8eadb0cb210bfdd6476
|
[
"MIT"
] | null | null | null |
# The Original importer was created by Nick
# Copyright (c) 2020 Nick
import bpy
import bmesh
import os
import array
import math
from mathutils import *
from bpy_extras.image_utils import load_image
from .cast import Cast, Model, Animation, Curve, NotificationTrack, Mesh, Skeleton, Bone, Material, File
def utilityBuildPath(root, asset):
if os.path.isabs(asset):
return asset
root = os.path.dirname(root)
return os.path.join(root, asset)
def utilityAssignBSDFMaterialSlots(material, slots, path):
material.node_tree.nodes.remove(
material.node_tree.nodes["Principled BSDF"])
shader = material.node_tree.nodes.new("ShaderNodeGroup")
output = material.node_tree.nodes['Material Output']
# グループシェーダーを作成
shader.node_tree = bpy.data.node_groups['S/G-Blender']
#テクスチャを定義
switcher = {
"albedo": "Diffuse map",
"diffuse": "Diffuse map",
"specular": "Specular map",
"ao": "AO map",
"cavity": "Cavity map",
"gloss": "Glossiness map",
"normal": "Normal map",
"emissive": "Emission input"
}
# Loop and connect the slots
for slot in slots:
connection = slots[slot]
if not connection.__class__ is File:
continue
if not slot in switcher:
continue
texture = material.node_tree.nodes.new("ShaderNodeTexImage") #画像ノードを作成
try:
texture.image = bpy.data.images.load(
utilityBuildPath(path, connection.Path())) #画像を読み込み
except RuntimeError:
pass
if texture.image:
material.node_tree.links.new(
shader.inputs[switcher[slot]], texture.outputs["Color"])
material.node_tree.links.new(shader.outputs[0], output.inputs[0]) # texture.outputのカラーとinputをつなげる(link)
else:
material.node_tree.nodes.remove(texture)
def importSkeletonNode(name, skeleton):
if skeleton is None:
return None
armature = bpy.data.armatures.new("Joints")
armature.display_type = "STICK"
skeletonObj = bpy.data.objects.new(name, armature)
skeletonObj.show_in_front = True
bpy.context.view_layer.active_layer_collection.collection.objects.link(
skeletonObj)
bpy.context.view_layer.objects.active = skeletonObj
bpy.ops.object.mode_set(mode='EDIT')
bones = skeleton.Bones()
handles = [None] * len(bones)
matrices = {}
for i, bone in enumerate(bones):
newBone = armature.edit_bones.new(bone.Name())
newBone.tail = 0, 0.05, 0 # I am sorry but blender sucks
tempQuat = bone.LocalRotation() # Also sucks, WXYZ? => XYZW master race
matRotation = Quaternion(
(tempQuat[3], tempQuat[0], tempQuat[1], tempQuat[2])).to_matrix().to_4x4()
matTranslation = Matrix.Translation(Vector(bone.LocalPosition()))
matrices[bone.Name()] = matTranslation @ matRotation
handles[i] = newBone
for i, bone in enumerate(bones):
if bone.ParentIndex() > -1:
handles[i].parent = handles[bone.ParentIndex()]
bpy.context.view_layer.objects.active = skeletonObj
bpy.ops.object.mode_set(mode='POSE')
for bone in skeletonObj.pose.bones:
bone.matrix_basis.identity()
bone.matrix = matrices[bone.name]
bpy.ops.pose.armature_apply()
return skeletonObj
def importMaterialNode(path, material):
# If you already created the material, ignore this
materialNew = bpy.data.materials.get(material.Name())
if materialNew is not None:
return material.Name(), materialNew
materialNew = bpy.data.materials.new(name=material.Name())
materialNew.use_nodes = True
# Blender really only wants a BSDF shader node
# so we're gonna give it one
utilityAssignBSDFMaterialSlots(materialNew, material.Slots(), path)
return material.Name(), materialNew
def importModelNode(model, path):
# Extract the name of this model from the path
modelName = os.path.splitext(os.path.basename(path))[0]
# Import skeleton for binds, materials for meshes
skeletonObj = importSkeletonNode(modelName, model.Skeleton())
materialArray = {key: value for (key, value) in (
importMaterialNode(path, x) for x in model.Materials())}
meshes = model.Meshes()
for mesh in meshes:
newMesh = bpy.data.meshes.new("polySurfaceMesh")
blendMesh = bmesh.new()
vertexColorLayer = blendMesh.loops.layers.color.new("color1")
vertexWeightLayer = blendMesh.verts.layers.deform.new()
vertexUVLayers = [blendMesh.loops.layers.uv.new(
"map%d" % x) for x in range(mesh.UVLayerCount())]
vertexPositions = mesh.VertexPositionBuffer()
for x in range(0, len(vertexPositions), 3):
blendMesh.verts.new(
Vector((vertexPositions[x], vertexPositions[x + 1], vertexPositions[x + 2])))
blendMesh.verts.ensure_lookup_table()
faceLookupMap = [1, 2, 0]
vertexNormalLayer = []
vertexNormals = mesh.VertexNormalBuffer()
vertexColors = mesh.VertexColorBuffer()
vertexUVs = [mesh.VertexUVLayerBuffer(
x) for x in range(mesh.UVLayerCount())]
def vertexToFaceVertex(face):
for x, loop in enumerate(face.loops):
vertexIndex = faces[faceStart + faceLookupMap[x]]
if vertexNormals is not None:
vertexNormalLayer.append((vertexNormals[vertexIndex * 3], vertexNormals[(
vertexIndex * 3) + 1], vertexNormals[(vertexIndex * 3) + 2]))
for uvLayer in range(mesh.UVLayerCount()):
uv = Vector(
(vertexUVs[uvLayer][vertexIndex * 2], vertexUVs[uvLayer][(vertexIndex * 2) + 1]))
uv.y = 1.0 - uv.y
loop[vertexUVLayers[uvLayer]].uv = uv
if vertexColors is not None:
loop[vertexColorLayer] = [
(vertexColors[vertexIndex] >> i & 0xff) / 255.0 for i in (24, 16, 8, 0)]
faces = mesh.FaceBuffer()
for faceStart in range(0, len(faces), 3):
indices = [blendMesh.verts[faces[faceStart + faceLookupMap[0]]],
blendMesh.verts[faces[faceStart + faceLookupMap[1]]], blendMesh.verts[faces[faceStart + faceLookupMap[2]]]]
try:
newLoop = blendMesh.faces.new(indices)
except ValueError:
continue
else:
vertexToFaceVertex(newLoop)
maximumInfluence = mesh.MaximumWeightInfluence()
if maximumInfluence > 0:
weightBoneBuffer = mesh.VertexWeightBoneBuffer()
weightValueBuffer = mesh.VertexWeightValueBuffer()
for x, vert in enumerate(blendMesh.verts):
if (weightValueBuffer[x * maximumInfluence] > 0.0):
vert[vertexWeightLayer][weightBoneBuffer[x * maximumInfluence]
] = weightValueBuffer[x * maximumInfluence]
blendMesh.to_mesh(newMesh)
newMesh.create_normals_split()
if len(vertexNormalLayer) > 0:
for x, _loop in enumerate(newMesh.loops):
newMesh.loops[x].normal = vertexNormalLayer[x]
newMesh.validate(clean_customdata=False)
clnors = array.array('f', [0.0] * (len(newMesh.loops) * 3))
newMesh.loops.foreach_get("normal", clnors)
newMesh.polygons.foreach_set(
"use_smooth", [True] * len(newMesh.polygons))
newMesh.normals_split_custom_set(tuple(zip(*(iter(clnors),) * 3)))
newMesh.use_auto_smooth = True
meshObj = bpy.data.objects.new("CastMesh", newMesh)
bpy.context.view_layer.active_layer_collection.collection.objects.link(
meshObj)
bpy.context.view_layer.objects.active = meshObj
meshMaterial = mesh.Material()
if meshMaterial is not None:
meshObj.data.materials.append(materialArray[meshMaterial.Name()])
for bone in skeletonObj.pose.bones:
meshObj.vertex_groups.new(name=bone.name)
meshObj.parent = skeletonObj
modifier = meshObj.modifiers.new('Armature Rig', 'ARMATURE')
modifier.object = skeletonObj
modifier.use_bone_envelopes = False
modifier.use_vertex_groups = True
def importRootNode(node, path):
for child in node.ChildrenOfType(Model):
importModelNode(child, path)
# for child in node.ChildrenOfType(Animation):
# importAnimationNode(child, path)
def importCast(path):
cast = Cast()
cast.load(path)
for root in cast.Roots():
importRootNode(root, path)
def load(self, context, filepath=""):
# シェーダーをアペンド
shader_path = bpy.context.preferences.addons[__package__].preferences.apex_sgshader_path
try:
file_path = shader_path
inner_path = 'NodeTree'
object_name = 'S/G-Blender'
bpy.ops.wm.append(
filepath=os.path.join(file_path, inner_path, object_name),
directory=os.path.join(file_path, inner_path),
filename=object_name
)
except:
self.report({'ERROR'}, 'Set the Shader path in AddonPreferences first.')
return False
# Parse and load cast nodes
importCast(filepath)
# Update the scene, reset view mode before returning.
bpy.context.view_layer.update()
bpy.ops.object.mode_set(mode="OBJECT")
return True
| 33.957447
| 130
| 0.631788
| 1,041
| 9,576
| 5.738713
| 0.290106
| 0.012052
| 0.021426
| 0.021091
| 0.152662
| 0.108637
| 0.060931
| 0.042518
| 0.042518
| 0.042518
| 0
| 0.008635
| 0.262322
| 9,576
| 281
| 131
| 34.078292
| 0.837061
| 0.064327
| 0
| 0.091837
| 0
| 0
| 0.042627
| 0
| 0
| 0
| 0.000448
| 0
| 0
| 1
| 0.045918
| false
| 0.005102
| 0.091837
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d72ea525d5fca207b00f29574de0ed2864d8b1b
| 7,229
|
py
|
Python
|
cigeo/admin.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | 1
|
2019-05-26T22:24:01.000Z
|
2019-05-26T22:24:01.000Z
|
cigeo/admin.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | 6
|
2019-01-22T14:53:43.000Z
|
2020-09-22T16:20:28.000Z
|
cigeo/admin.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.gis import geos
from leaflet.admin import LeafletGeoAdmin, LeafletGeoAdminMixin
from .models import Lau1
from .models import Nuts3
from .models import Airport
from .models import Road
from .models import PublicTransportStop
from .models import RailwayStation
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
import nested_admin
import uuid
import json
class AirportAdmin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class RoadAdmin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class RailwayStationAdmin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class PublicTransportStopAdmin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class LAU1Admin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class NUTS3Admin(LeafletGeoAdmin):
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
#readonly_fields = ("code", "name",)
class NUTS3AdminInline(LeafletGeoAdminMixin, admin.StackedInline):
model = Nuts3
class LAU1AdminInline(LeafletGeoAdminMixin, admin.StackedInline):
model = Lau1
class NUTS3Filter(admin.SimpleListFilter):
"""Filter for admin interface of NUTS3 regions (Kraje)
"""
title = _('NUTS3 regions')
parameter_name = 'nuts3#'
def lookups(self, request, model_admin):
nuts3 = Nuts3.objects.all()
return (
(obj.id, obj.name) for obj in nuts3
)
def queryset(self, request, queryset):
val = self.value()
if val:
nuts3 = Nuts3.objects.get(pk=val)
results = queryset.filter(
location__geometry__intersects=nuts3.geometry)
else:
results = queryset
return results
class ArealFieldAdmin(nested_admin.NestedModelAdmin):
geojson_attributes = []
def get_place(self, obj):
if hasattr(obj.location, "address") and \
obj.location.address is not None:
return obj.location.address.city
else:
return ", ".join(
[l.__str__() for l in Nuts3.objects.filter(
geometry__intersects=obj.location.geometry)])
def get_search_results(self, request, queryset, search_term):
"""Add NUTS3 (by name) search and area size search (using `<>` operator)
"""
result, use_distinct = super(
ArealFieldAdmin, self).get_search_results(
request, queryset, search_term)
if search_term:
if len(result) == 0 or len(result) == len(queryset):
result = self._search_lay1_nuts3_by_name(
queryset, search_term)
if len(result) == 0 or len(result) == len(queryset):
result = self._search_area(queryset, search_term)
return (result, use_distinct)
def _search_lay1_nuts3_by_name(self, queryset, search_term):
"""Search NUTS3 (kraje) and LAU1 (okresy) region according to name
"""
filtered = queryset.none()
for cls in (Lau1, Nuts3):
objs = cls.objects.filter(name__startswith=search_term)
for o in objs:
objects = queryset.filter(
location__geometry__intersects=o.geometry)
filtered |= objects
return filtered
def _search_area(self, queryset, search_term):
"""Search all features, where MIN < area.total < MAX
"""
filtered = queryset.none()
if search_term.find("<>") > -1:
area_min, area_max = [float(x) for x in search_term.split("<>")]
filtered = queryset.filter(
areal__area__total__gte=area_min,
areal__area__total__lte=area_max)
return filtered
def changelist_view(self, request, extra_context=None):
"""Adjust change list view
add GeoJSON encoded data for the queryset
"""
extra_context = extra_context or {}
response = super().changelist_view(
request, extra_context=extra_context,
)
if hasattr(response, "context_data"):
filtered_query_set = response.context_data["cl"].queryset
extra_context['objects_data'] = \
json.dumps(self.as_geojson(filtered_query_set))
response.context_data.update(extra_context)
return response
def as_geojson(self, queryset):
if self.geojson_attributes:
attributes = self.geojson_attributes
else:
attributes = []
data = {
"type": "FeatureCollection",
"features": []
}
for obj in queryset:
geom = None
if hasattr(obj, "location_set"):
multipoint = geos.MultiPoint(
[loc.address.coordinates for loc in obj.location_set.all()])
geom = multipoint.centroid
elif hasattr(obj, "location"):
geom = obj.location.geometry.centroid
elif hasattr(obj, "geom"):
geom = obj.geom
elif hasattr(obj, "address"):
geom = obj.address.coordinates
if geom:
title = None
if hasattr(obj, "title"):
title = obj.title
elif hasattr(obj, "name"):
title = obj.name
if type(obj.pk) == uuid.UUID:
id = str(obj.pk)
else:
id = obj.pk
feature = {
"type": "Feature",
"properties": {
"name": title,
"object_url":
reverse('admin:{}_{}_change'.format(
obj._meta.app_label,
obj._meta.model_name), args=(obj.pk,)),
},
"geometry": json.loads(geom.json),
"id": id
}
for attribute in attributes:
if hasattr(obj, attribute):
value = getattr(obj, attribute.__str__())
if type(value) == uuid.UUID:
feature[attribute] = str(value)
else:
feature[attribute] = value
#print(feature)
data["features"].append(feature)
return data
# Register your models here.
admin.site.register(Lau1, LAU1Admin)
admin.site.register(Nuts3, NUTS3Admin)
admin.site.register(Road, RoadAdmin)
admin.site.register(PublicTransportStop, PublicTransportStopAdmin)
admin.site.register(RailwayStation, RailwayStationAdmin)
admin.site.register(Airport, AirportAdmin)
| 30.761702
| 80
| 0.580025
| 727
| 7,229
| 5.590096
| 0.240715
| 0.024606
| 0.023622
| 0.039862
| 0.218996
| 0.176673
| 0.159449
| 0.159449
| 0.159449
| 0.159449
| 0
| 0.025725
| 0.327846
| 7,229
| 234
| 81
| 30.893162
| 0.810661
| 0.080924
| 0
| 0.174699
| 0
| 0
| 0.030027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048193
| false
| 0
| 0.084337
| 0
| 0.385542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d733960d2eb830da7ca11bb10495536367425c3
| 6,207
|
py
|
Python
|
pycs/spells/hunters_mark.py
|
dwagon/pycs
|
4d02acbf380526d3bf0380f6bb8b757a827024b8
|
[
"MIT"
] | null | null | null |
pycs/spells/hunters_mark.py
|
dwagon/pycs
|
4d02acbf380526d3bf0380f6bb8b757a827024b8
|
[
"MIT"
] | null | null | null |
pycs/spells/hunters_mark.py
|
dwagon/pycs
|
4d02acbf380526d3bf0380f6bb8b757a827024b8
|
[
"MIT"
] | null | null | null |
"""https://www.dndbeyond.com/spells/hunters-mark"""
from unittest.mock import patch
import dice
from pycs.constant import ActionCategory
from pycs.constant import SpellType
from pycs.creature import Creature
from pycs.effect import Effect
from pycs.gear import Shortbow
from pycs.spell import SpellAction
from pycs.spells.spelltest import SpellTest
##############################################################################
##############################################################################
##############################################################################
class HuntersMark(SpellAction):
"""You choose a creature you can see within range and mystically
mark it as your quarry. Until the spell ends, you deal an extra 1d6
damage to the target whenever you hit it with a weapon attack, and
you have advantage on any Wisdom (Perception) or Wisdom (Survival)
check you make to find it. If the target drops to 0 hit points
before this spell ends, you can use a bonus action on a subsequent
turn of yours to mark a new creature.
At Higher Levels. When you cast this spell using a spell slot of
3rd or 4th level, you can maintain your concentration on the spell
for up to 8 hours. When you use a spell slot of 5th level or higher,
you can maintain your concentration on the spell for up to 24
hours."""
##########################################################################
def __init__(self, **kwargs):
name = "Hunters Mark"
kwargs.update(
{
"category": ActionCategory.BONUS,
"concentration": SpellType.CONCENTRATION,
"level": 1,
"reach": 90,
"type": SpellType.BUFF,
}
)
super().__init__(name, **kwargs)
self._victim = None
##########################################################################
def heuristic(self):
"""Should we do the spell"""
if self.pick_target():
return 6
print("No enemy in range")
return 0
##########################################################################
def pick_target(self):
"""Who should we do the spell to"""
for enemy in self.owner.pick_closest_enemy():
if self.owner.distance(enemy) > self.range()[0]:
continue
if enemy.has_effect("Hunters Mark"):
continue
self.target = enemy
return enemy
return None
##########################################################################
def cast(self):
"""Do the spell"""
self._victim = self.target
self._victim.add_effect(HuntersMarkEffect(caster=self.owner))
print(f"Cast Hunters Mark on {self._victim}")
##########################################################################
def end_concentration(self):
"""What happens when we stop concentrating"""
if self._victim:
print(f"Removing Hunters Mark from {self._victim}")
self._victim.remove_effect("Hunters Mark")
self._victim = None
##############################################################################
##############################################################################
##############################################################################
class HuntersMarkEffect(Effect):
"""Hunters Mark Effect"""
##########################################################################
def __init__(self, **kwargs):
"""Initialise"""
super().__init__("Hunters Mark", **kwargs)
##########################################################################
def hook_target_additional_damage(self, _, source, target):
"""More damage"""
if source == self.caster:
return ("1d6", 0, None)
return ("", 0, None)
##############################################################################
##############################################################################
##############################################################################
class TestHuntersMark(SpellTest):
"""Test Spell"""
##########################################################################
def setUp(self):
"""test setup"""
super().setUp()
self.caster.add_action(HuntersMark())
##########################################################################
def test_cast(self):
"""test casting"""
self.caster.options_this_turn = [ActionCategory.BONUS]
self.assertFalse(self.enemy.has_effect("Hunters Mark"))
self.caster.do_stuff(categ=ActionCategory.BONUS, moveto=False)
self.assertTrue(self.enemy.has_effect("Hunters Mark"))
##########################################################################
def test_effect(self):
"""Test the effect of casting the spell"""
print(self.caster.arena)
self.caster.moves = 99
self.caster.options_this_turn = [ActionCategory.BONUS, ActionCategory.ACTION]
self.caster.do_stuff(categ=ActionCategory.BONUS, moveto=True)
self.assertTrue(self.enemy.has_effect("Hunters Mark"))
self.caster.add_gear(Shortbow())
self.assertEqual(len(self.enemy.damage_this_turn), 0)
with patch.object(Creature, "rolld20") as mock:
mock.return_value = 18
with patch.object(dice, "roll") as mock_dice:
mock_dice.return_value = 5
self.caster.do_stuff(categ=ActionCategory.ACTION, moveto=True)
print(f"{self.enemy.damage_this_turn=}")
self.assertEqual(len(self.enemy.damage_this_turn), 2)
##########################################################################
def test_removal(self):
"""Test the effect gets removed"""
self.caster.options_this_turn = [ActionCategory.BONUS]
self.caster.do_stuff(categ=ActionCategory.BONUS, moveto=False)
self.assertTrue(self.enemy.has_effect("Hunters Mark"))
self.caster.remove_concentration()
self.assertFalse(self.enemy.has_effect("Hunters Mark"))
# EOF
| 41.10596
| 85
| 0.475914
| 584
| 6,207
| 4.943493
| 0.296233
| 0.049532
| 0.047108
| 0.043644
| 0.283339
| 0.254243
| 0.241773
| 0.226533
| 0.120887
| 0.120887
| 0
| 0.005668
| 0.204124
| 6,207
| 150
| 86
| 41.38
| 0.578745
| 0.161592
| 0
| 0.180723
| 0
| 0
| 0.078366
| 0.008396
| 0
| 0
| 0
| 0
| 0.084337
| 1
| 0.13253
| false
| 0
| 0.108434
| 0
| 0.349398
| 0.060241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5d7596fcdc1125f69dea760f3f07ca8ccf07185d
| 7,509
|
py
|
Python
|
src/pose/visualizations/visualizations.py
|
Idein/chainer-hand-pose
|
45c7b629a74bf13da8cc9b47d0ded7099c139e9b
|
[
"Apache-2.0"
] | 11
|
2019-12-14T07:55:52.000Z
|
2021-06-22T06:38:34.000Z
|
src/pose/visualizations/visualizations.py
|
terasakisatoshi/chainer-hand-pose
|
a47e0c61c4fea3369db566eea3d539d1c9398bf7
|
[
"Apache-2.0"
] | 1
|
2020-06-17T21:39:48.000Z
|
2020-06-26T13:16:43.000Z
|
src/pose/visualizations/visualizations.py
|
terasakisatoshi/chainer-hand-pose
|
a47e0c61c4fea3369db566eea3d539d1c9398bf7
|
[
"Apache-2.0"
] | 3
|
2019-12-11T13:47:54.000Z
|
2020-10-23T07:10:15.000Z
|
import logging
logger = logging.getLogger(__name__)
import random
import chainercv
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
from pose.hand_dataset.geometry_utils import normalize_joint_zyx
from pose.hand_dataset.image_utils import normalize_depth
# Decimal Code (R,G,B)
BASE_COLOR = {
"RED": (255, 0, 0),
"GREEN": (0, 255, 0),
"BLUE": (0, 0, 255),
"YELLOW": (255, 255, 0),
"CYAN": (0, 255, 255),
"MAGENTA": (255, 0, 255),
}
def vis_image(img, ax=None):
"""
extend chainercv.visualizations.vis_image
"""
C, H, W = img.shape
if C == 1:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# remove channnel dimension
ax.imshow(img.squeeze())
else:
ax = chainercv.visualizations.vis_image(img, ax)
return ax
def preprocess(point, ax, img):
input_point = np.asarray(point)
if input_point.ndim == 2:
input_point = np.expand_dims(point, axis=0)
H, W = None, None
if ax is None:
fig = plt.figure()
if input_point.shape[-1] == 3:
ax = fig.add_subplot(1, 1, 1, projection="3d")
else:
ax = fig.add_subplot(1, 1, 1)
if img is not None:
ax = vis_image(img, ax=ax)
_, H, W = img.shape
return input_point, ax, H, W
def vis_point(point, img=None, color=None, ax=None):
"""
Visualize points in an image, customized to our purpose.
Base implementation is taken from chainercv.visualizations.vis_image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
c = np.asarray(color) / 255. if color is not None else None
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x), (z,y,x).
# (K, N) -> (N, K)
pts = point[i].transpose() # (K,N) -> (N,K)
# resort coordinate order : yx -> xy or zyx -> xyz
pts = pts[::-1]
ax.scatter(*pts, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
def vis_edge(point, indices, img=None, color=None, ax=None):
"""
Visualize edges in an image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
if color is not None:
color = np.asarray(color) / 255.
else:
color = [None] * len(indices)
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x) or (z,y,x).
pts = point[i]
for ((s, t), c) in zip(indices, color):
# Select point which consists edge. It is a pair or point (start, target).
# Note that [::-1] does resort coordinate order: yx -> xy or zyx -> xyz
edge = pts[[s, t]].transpose()
edge = edge[::-1]
ax.plot(*edge, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
def vis_pose(point, indices, img=None, point_color=None, edge_color=None, ax=None):
ax = vis_point(point, img=img, color=point_color, ax=ax)
vis_edge(point, indices, img=img, color=edge_color, ax=ax)
def visualize_both(dataset, keypoint_names, edges, color_map, normalize=False):
import random
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection="3d")
ax4 = fig.add_subplot(224, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
depth = example["depth"].astype(np.float32)
depth_joint = example["depth_joint"]
depth_camera = example["depth_camera"]
depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True)
z_size = example["param"]["z_size"]
if normalize:
depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size)
depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size)
rgb = example["rgb"]
rgb_joint = example["rgb_joint"]
rgb_camera = example["rgb_camera"]
rgb_vu = rgb_camera.zyx2vu(rgb_joint)
rgb_joint = normalize_joint_zyx(rgb_joint, rgb_camera, z_size)
print(example["param"])
vis_point(rgb_vu, img=rgb, color=color, ax=ax1)
vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1)
vis_point(rgb_joint, color=color, ax=ax3)
vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3)
vis_point(depth_vu, img=depth, color=color, ax=ax2)
vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2)
vis_point(depth_joint, color=color, ax=ax4)
vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4)
for ax in [ax3, ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
def visualize_rgb(dataset, keypoint_names, edges, color_map, idx=None):
import random
if idx is None:
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(5, 10))
ax1 = fig.add_subplot(211)
ax3 = fig.add_subplot(212, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
rgb = example["rgb"]
rgb_joint = example["rgb_joint"]
rgb_camera = example["rgb_camera"]
rgb_vu = rgb_camera.zyx2vu(rgb_joint)
vis_point(rgb_vu, img=rgb, color=color, ax=ax1)
vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1)
vis_point(rgb_joint, color=color, ax=ax3)
vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3)
for ax in [ax3]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
def visualize_depth(dataset, keypoint_names, edges, color_map, normalize=False):
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(5, 10))
ax2 = fig.add_subplot(211)
ax4 = fig.add_subplot(212, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
depth = example["depth"].astype(np.float32)
depth_joint = example["depth_joint"]
depth_camera = example["depth_camera"]
depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True)
z_size = example["param"]["z_size"]
if normalize:
depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size)
depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size)
print(example["param"])
vis_point(depth_vu, img=depth, color=color, ax=ax2)
vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2)
vis_point(depth_joint, color=color, ax=ax4)
vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4)
for ax in [ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
| 31.953191
| 96
| 0.632574
| 1,172
| 7,509
| 3.888225
| 0.162116
| 0.02765
| 0.03138
| 0.0316
| 0.681589
| 0.666228
| 0.658986
| 0.623875
| 0.57889
| 0.57889
| 0
| 0.027383
| 0.231589
| 7,509
| 234
| 97
| 32.089744
| 0.762392
| 0.085764
| 0
| 0.622857
| 0
| 0
| 0.042756
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045714
| false
| 0
| 0.057143
| 0
| 0.125714
| 0.011429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
536f43c7085300c239b6e7cb90378b2df33381ad
| 1,134
|
py
|
Python
|
tools/nn/speaker.py
|
mikiec84/speaking_detection
|
ed680138627c156e1f7b0af20d6517e2bea754cc
|
[
"MIT"
] | null | null | null |
tools/nn/speaker.py
|
mikiec84/speaking_detection
|
ed680138627c156e1f7b0af20d6517e2bea754cc
|
[
"MIT"
] | null | null | null |
tools/nn/speaker.py
|
mikiec84/speaking_detection
|
ed680138627c156e1f7b0af20d6517e2bea754cc
|
[
"MIT"
] | null | null | null |
import os
import skimage.io
from torch.nn import Module
import torch.nn
from torchvision.models import resnet18
from nn.speaker_dataset import Dataset # @UnusedImport
os.environ['TORCH_MODEL_ZOO'] = '../data/'
VIDTIMIT_PATH = '../data/vidtimit/'
skimage.io.use_plugin('pil')
class Net(Module):
def __init__(self):
super().__init__()
resnet = resnet18(pretrained=True)
self.features = torch.nn.Sequential(*list(resnet.children())[:-1])
self.classifier = torch.nn.Sequential(
torch.nn.Linear(512, 2)
)
# print(len(list(self.features.parameters())))
for p in list(self.features.parameters())[:20]:
p.requires_grad = False
def forward(self, x, **kw):
# X = F.softmax(self.basenet(X))
f = self.features(x)
f = f.view(f.size(0), -1)
y = self.classifier(f)
return y
def get_speaking_detector_final():
m = torch.load('../data/speaker.pt')
m = m.eval();
return m
def get_speaking_detector(e):
m = torch.load('../data/speaker/model.e{}.pt'.format(e))
m = m.eval();
return m
| 23.625
| 74
| 0.613757
| 153
| 1,134
| 4.424837
| 0.470588
| 0.051699
| 0.050222
| 0.076809
| 0.100443
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.231922
| 1,134
| 47
| 75
| 24.12766
| 0.762342
| 0.078483
| 0
| 0.125
| 0
| 0
| 0.085495
| 0.026897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.1875
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
536ff8da70c0647265f2448d9db35e0d757a366c
| 1,551
|
py
|
Python
|
tensorflow_model_analysis/util_test.py
|
mdreves/model-analysis
|
73760b27b763e322a92ea80ff0a768ad9ef74526
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/util_test.py
|
mdreves/model-analysis
|
73760b27b763e322a92ea80ff0a768ad9ef74526
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/util_test.py
|
mdreves/model-analysis
|
73760b27b763e322a92ea80ff0a768ad9ef74526
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tests for util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_model_analysis import util
class UtilTest(tf.test.TestCase):
def testKwargsOnly(self):
@util.kwargs_only
def fn(a, b, c, d=None, e=5):
if d is None:
d = 100
if e is None:
e = 1000
return a + b + c + d + e
self.assertEqual(1 + 2 + 3 + 100 + 5, fn(a=1, b=2, c=3))
self.assertEqual(1 + 2 + 3 + 100 + 1000, fn(a=1, b=2, c=3, e=None))
with self.assertRaisesRegexp(TypeError, 'keyword-arguments only'):
fn(1, 2, 3)
with self.assertRaisesRegexp(TypeError, 'with c specified'):
fn(a=1, b=2, e=5) # pylint: disable=no-value-for-parameter
with self.assertRaisesRegexp(TypeError, 'with extraneous kwargs'):
fn(a=1, b=2, c=3, f=11) # pylint: disable=unexpected-keyword-arg
if __name__ == '__main__':
tf.test.main()
| 32.3125
| 74
| 0.691812
| 244
| 1,551
| 4.295082
| 0.479508
| 0.057252
| 0.015267
| 0.019084
| 0.14313
| 0.062977
| 0.022901
| 0
| 0
| 0
| 0
| 0.040388
| 0.201805
| 1,551
| 47
| 75
| 33
| 0.806139
| 0.419084
| 0
| 0
| 0
| 0
| 0.077185
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 1
| 0.083333
| false
| 0
| 0.208333
| 0
| 0.375
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5370c3d3d7c64120cfceac3826e677a88c4d71af
| 3,556
|
py
|
Python
|
laia/data/transforms/vision/random_beta_morphology.py
|
eivtho/PyLaia
|
2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
|
[
"MIT"
] | 89
|
2018-12-12T23:06:26.000Z
|
2022-02-03T09:04:21.000Z
|
laia/data/transforms/vision/random_beta_morphology.py
|
eivtho/PyLaia
|
2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
|
[
"MIT"
] | 30
|
2019-03-06T14:29:48.000Z
|
2022-03-16T14:53:43.000Z
|
laia/data/transforms/vision/random_beta_morphology.py
|
jpuigcerver/PyLaia
|
1b2e864247f1bfb8d95ac1910de9c52df71c017a
|
[
"MIT"
] | 26
|
2018-12-13T17:48:19.000Z
|
2022-02-28T12:52:29.000Z
|
from typing import List, Tuple, Union
import numpy as np
import scipy.special
from PIL import Image, ImageFilter
class RandomBetaMorphology:
def __init__(
self, filter_size_min: int, filter_size_max: int, alpha: float, beta: float
) -> None:
assert filter_size_min % 2 != 0, "Filter size must be odd"
assert filter_size_max % 2 != 0, "Filter size must be odd"
self.filter_size_min = filter_size_min
self.filter_size_max = filter_size_max
self.alpha = alpha
self.beta = beta
self.filter_sizes, self.filter_probs = self._create_filter_distribution(
filter_size_min, filter_size_max, alpha, beta
)
@staticmethod
def _create_filter_distribution(
filter_size_min: int, filter_size_max: int, alpha: float, beta: float
) -> Tuple[List[int], Union[List[float], np.ndarray]]:
n = (filter_size_max - filter_size_min) // 2 + 1
if n < 2:
return [filter_size_min], np.asarray([1.0], dtype=np.float32)
filter_sizes = []
filter_probs = []
for k in range(n):
filter_sizes.append(filter_size_min + 2 * k)
filter_probs.append(
scipy.special.comb(n, k) * scipy.special.beta(alpha + k, n - k + beta)
)
np_filter_probs = np.asarray(filter_probs, dtype=np.float32)
np_filter_probs = filter_probs / np_filter_probs.sum()
return filter_sizes, np_filter_probs
def sample_filter_size(self):
filter_size = np.random.choice(self.filter_sizes, p=self.filter_probs)
return filter_size
def __call__(self, *args, **kwargs):
return NotImplementedError
def __repr__(self) -> str:
return (
f"vision.{self.__class__.__name__}("
f"filter_size_min={self.filter_size_min}, "
f"filter_size_max={self.filter_size_max}, "
f"alpha={self.alpha}, beta={self.beta})"
)
class Dilate(RandomBetaMorphology):
def __init__(
self,
filter_size_min: int = 3,
filter_size_max: int = 7,
alpha: float = 1,
beta: float = 3,
) -> None:
super().__init__(filter_size_min, filter_size_max, alpha, beta)
def __call__(self, img: Image) -> Image:
filter_size = self.sample_filter_size()
return img.filter(ImageFilter.MaxFilter(filter_size))
class Erode(RandomBetaMorphology):
def __init__(
self,
filter_size_min: int = 3,
filter_size_max: int = 5,
alpha: float = 1,
beta: float = 3,
) -> None:
super().__init__(filter_size_min, filter_size_max, alpha, beta)
def __call__(self, img: Image) -> Image:
filter_size = self.sample_filter_size()
return img.filter(ImageFilter.MinFilter(filter_size))
if __name__ == "__main__":
import argparse
from PIL import ImageOps
parser = argparse.ArgumentParser()
parser.add_argument("--operation", choices=("dilate", "erode"), default="dilate")
parser.add_argument("images", type=argparse.FileType("rb"), nargs="+")
args = parser.parse_args()
transformer = Dilate() if args.operation == "dilate" else Erode()
for f in args.images:
x = Image.open(f, "r").convert("L")
x = ImageOps.invert(x)
y = transformer(x)
w, h = x.size
z = Image.new("L", (w, 2 * h))
z.paste(x, (0, 0))
z.paste(y, (0, h))
z = z.resize(size=(w // 2, h), resample=Image.BICUBIC)
z.show()
input()
| 32.327273
| 86
| 0.615298
| 461
| 3,556
| 4.425163
| 0.240781
| 0.191176
| 0.095588
| 0.041667
| 0.385784
| 0.361765
| 0.311765
| 0.291176
| 0.258824
| 0.258824
| 0
| 0.010365
| 0.267435
| 3,556
| 109
| 87
| 32.623853
| 0.772745
| 0
| 0
| 0.224719
| 0
| 0
| 0.070304
| 0.031215
| 0
| 0
| 0
| 0
| 0.022472
| 1
| 0.101124
| false
| 0
| 0.067416
| 0.022472
| 0.280899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5374082003f5a0ab7717d7cbdda9e4ca3ac483ea
| 1,236
|
py
|
Python
|
Concurrency/codeSample/Part4_Thread_Synchronuzation_Primitives/sema_signal.py
|
Chyi341152/pyConPaper
|
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
|
[
"MIT"
] | 1
|
2018-05-30T02:36:46.000Z
|
2018-05-30T02:36:46.000Z
|
Concurrency/codeSample/Part4_Thread_Synchronuzation_Primitives/sema_signal.py
|
Chyi341152/pyConPaper
|
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
|
[
"MIT"
] | null | null | null |
Concurrency/codeSample/Part4_Thread_Synchronuzation_Primitives/sema_signal.py
|
Chyi341152/pyConPaper
|
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# sema_signal.py
#
# An example of using a semaphore for signaling between threads
import threading
import time
done = threading.Semaphore(0) # Resource control.
item = None
def producer():
global item
print("I'm the producer and I produce data.")
print("Producer is going to sleep.")
time.sleep(5)
item = "Hello"
print("Producer is alive. Signaling the consumer.")
done.release() # Increments the count and signals waiting threads
def consumer():
print("I'm a consumer and I want for date.")
print("Consumer is waiting.")
done.acquire() # Waits for the count is 0, otherwise decrements the count and continues
print("Consumer got", item)
t1 = threading.Thread(target=producer)
t2 = threading.Thread(target=consumer)
t1.start()
t2.start()
"""
Semaphore Uses:
1. Resource control
You can limit the number of threads performing certain operations.For example, performing database queries making network connections
2. Signaling
Semaphores can be used to send "signals" between threads. For example, having one thread wake up another thread
"""
| 29.428571
| 145
| 0.669903
| 163
| 1,236
| 5.07362
| 0.546012
| 0.029021
| 0.016929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011727
| 0.2411
| 1,236
| 41
| 146
| 30.146341
| 0.869936
| 0.207929
| 0
| 0
| 0
| 0
| 0.2832
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.190476
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5375dec1385aae371f742bbb1feff08c0d14da3b
| 3,199
|
py
|
Python
|
temp_wc_analysis/analysis.py
|
KarrLab/wc_sim
|
5b0ee03c3d19193fa67a3797d4258b753e6bc576
|
[
"MIT"
] | 8
|
2018-03-27T21:35:25.000Z
|
2022-01-18T08:32:20.000Z
|
temp_wc_analysis/analysis.py
|
KarrLab/wc_sim
|
5b0ee03c3d19193fa67a3797d4258b753e6bc576
|
[
"MIT"
] | 114
|
2018-02-27T14:14:39.000Z
|
2020-12-30T15:06:51.000Z
|
temp_wc_analysis/analysis.py
|
KarrLab/wc_sim
|
5b0ee03c3d19193fa67a3797d4258b753e6bc576
|
[
"MIT"
] | 2
|
2019-04-05T16:17:28.000Z
|
2020-05-17T12:55:20.000Z
|
'''Analysis utility functions.
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2016-03-26
:Copyright: 2016-2018, Karr Lab
:License: MIT
'''
# TODO(Arthur): IMPORTANT: refactor and replace
from matplotlib import pyplot
from matplotlib import ticker
from wc_lang import Model, Submodel
from scipy.constants import Avogadro
import numpy as np
import re
def plot(model, time = np.zeros(0),
species_counts = None, volume = np.zeros(0), extracellular_volume = np.zeros(0),
selected_species_compartments = [],
yDatas = {},
units = 'mM', title = '', fileName = ''):
#convert time to hours
time = time.copy() / 3600
#create figure
fig = pyplot.figure()
#extract data to plot
if not yDatas:
yDatas = {}
for species_compartment_id in selected_species_compartments:
#extract data
match = re.match('^(?P<speciesId>[a-z0-9\-_]+)\[(?P<compartmentId>[a-z0-9\-_]+)\]$',
species_compartment_id, re.I).groupdict()
speciesId = match['speciesId']
compartmentId = match['compartmentId']
if isinstance(model, Model):
species = model.get_component_by_id(speciesId, 'species')
compartment = model.get_component_by_id(compartmentId, 'compartments')
yData = species_counts[species.index, compartment.index, :]
elif isinstance(model, Submodel):
yData = species_counts[species_compartment_id]
else:
raise Exception('Invalid model type %s' % model.__class__.__name__)
#scale
if compartmentId == 'c':
V = volume
else:
V = extracellular_volume
if units == 'pM':
scale = 1 / Avogadro / V * 1e12
elif units == 'nM':
scale = 1 / Avogadro / V * 1e9
elif units == 'uM':
scale = 1 / Avogadro / V * 1e6
elif units == 'mM':
scale = 1 / Avogadro / V * 1e3
elif units == 'M':
scale = 1 / Avogadro / V * 1e0
elif units == 'molecules':
scale = 1
else:
raise Exception('Invalid units "%s"' % units)
yData *= scale
yDatas[species_compartment_id] = yData
#plot results
yMin = 1e12
yMax = -1e12
for label, yData in yDatas.items():
#update range
yMin = min(yMin, np.min(yData))
yMax = max(yMax, np.max(yData))
#add to plot
pyplot.plot(time, yData, label=label)
#set axis limits
pyplot.xlim((0, time[-1]))
pyplot.ylim((yMin, yMax))
#add axis labels and legend
if title:
pyplot.title(title)
pyplot.xlabel('Time (h)')
if units == 'molecules':
pyplot.ylabel('Copy number')
else:
pyplot.ylabel('Concentration (%s)' % units)
y_formatter = ticker.ScalarFormatter(useOffset=False)
pyplot.gca().get_yaxis().set_major_formatter(y_formatter)
if len(selected_species_compartments) > 1:
pyplot.legend()
#save
if fileName:
fig.savefig(fileName)
pyplot.close(fig)
| 29.081818
| 96
| 0.56924
| 356
| 3,199
| 5
| 0.404494
| 0.020225
| 0.039326
| 0.042135
| 0.023596
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024256
| 0.316974
| 3,199
| 109
| 97
| 29.348624
| 0.790389
| 0.102845
| 0
| 0.057143
| 0
| 0.014286
| 0.073983
| 0.02244
| 0
| 0
| 0
| 0.009174
| 0
| 1
| 0.014286
| false
| 0
| 0.085714
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5378047f0579efdd010c7d57b8aefd313753aa1d
| 907
|
py
|
Python
|
setup.py
|
bstuddard/bonsai
|
3610fc50a3b24818288d850048c2a23306215367
|
[
"MIT"
] | 26
|
2021-07-18T14:52:47.000Z
|
2022-01-27T10:35:44.000Z
|
setup.py
|
bstuddard/bonsai
|
3610fc50a3b24818288d850048c2a23306215367
|
[
"MIT"
] | null | null | null |
setup.py
|
bstuddard/bonsai
|
3610fc50a3b24818288d850048c2a23306215367
|
[
"MIT"
] | 3
|
2021-07-20T03:25:22.000Z
|
2021-08-17T04:06:27.000Z
|
from setuptools import setup, find_packages
with open("README.md", "r") as readme_file:
readme = readme_file.read()
requirements = [
'xgboost>=0.90',
'catboost>=0.26',
'bayesian-optimization>=1.2.0',
'numpy>=1.19.5',
'pandas>=1.1.5',
'matplotlib>=3.2.2',
'seaborn>=0.11.1',
'plotly>=4.4.1',
'pyyaml>=5.4.1'
]
setup(
name="bonsai-tree",
version="1.2",
author="Landon Buechner",
author_email="mechior.magi@gmail.com",
description="Bayesian Optimization + Gradient Boosted Trees",
long_description=readme,
url="https://github.com/magi-1/bonsai",
packages=find_packages(),
package_data={'': ['*.yml']},
install_requires=requirements,
license = 'MIT',
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 25.914286
| 65
| 0.607497
| 113
| 907
| 4.80531
| 0.610619
| 0.104972
| 0.138122
| 0.143646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052705
| 0.205072
| 907
| 35
| 66
| 25.914286
| 0.700416
| 0
| 0
| 0
| 0
| 0
| 0.437225
| 0.055066
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
537b221bff7d480fcdf886ab83757cc48372b358
| 823
|
py
|
Python
|
_scripts/increment_version.py
|
clockhart/pathogen
|
1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36
|
[
"MIT"
] | null | null | null |
_scripts/increment_version.py
|
clockhart/pathogen
|
1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36
|
[
"MIT"
] | null | null | null |
_scripts/increment_version.py
|
clockhart/pathogen
|
1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36
|
[
"MIT"
] | null | null | null |
"""
increment_version.py
written in Python3
author: C. Lockhart <chris@lockhartlab.org>
"""
import yaml
# Read in version
with open('version.yml', 'r') as f:
version = yaml.safe_load(f.read())
# Strip "dev" out of micro
version['micro'] = int(str(version['micro']).replace('dev', ''))
# Update patch
version['micro'] += 1
# Add "dev" back to patch
if version['micro'] != 0:
version['micro'] = 'dev' + str(version['micro'])
# Output version
with open('version.yml', 'w') as f:
yaml.safe_dump(version, f, sort_keys=False)
# Transform version dict to string
version = '.'.join([str(version[key]) for key in ['major', 'minor', 'micro']])
# Write version string to pathogen/_version.py
with open('pathogen/version.py', 'w') as f:
f.write("__version__ = '{}'\n".format(version))
# Return
print(version)
| 22.243243
| 78
| 0.660996
| 120
| 823
| 4.458333
| 0.491667
| 0.134579
| 0.056075
| 0.082243
| 0.093458
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004292
| 0.150668
| 823
| 36
| 79
| 22.861111
| 0.761087
| 0.318348
| 0
| 0
| 0
| 0
| 0.212454
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
537c67be5a305675d3c345fd99a5e6be9b4b00c1
| 15,725
|
py
|
Python
|
holoviews/core/data/ibis.py
|
TheoMathurin/holoviews
|
0defcef994d6dd6d2054f75a0e332d02d121f8b0
|
[
"BSD-3-Clause"
] | 1
|
2017-03-01T07:08:23.000Z
|
2017-03-01T07:08:23.000Z
|
holoviews/core/data/ibis.py
|
chrinide/holoviews
|
e1234a60ae0809ac561c204b1998dff0452b2bf0
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/core/data/ibis.py
|
chrinide/holoviews
|
e1234a60ae0809ac561c204b1998dff0452b2bf0
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import numpy
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from .. import util
from ..element import Element
from ..ndmapping import NdMapping, item_check, sorted_context
from .interface import Interface
from . import pandas
from .util import cached
class IbisInterface(Interface):
types = ()
datatype = "ibis"
default_partitions = 100
zero_indexed_backend_modules = [
'ibis.backends.omniscidb.client',
]
# the rowid is needed until ibis updates versions
@classmethod
def has_rowid(cls):
import ibis.expr.operations
return hasattr(ibis.expr.operations, "RowID")
@classmethod
def is_rowid_zero_indexed(cls, data):
try:
from ibis.client import find_backends, validate_backends
(backend,) = validate_backends(list(find_backends(data)))
except Exception:
backend = data._find_backend()
return type(backend).__module__ in cls.zero_indexed_backend_modules
@classmethod
def loaded(cls):
return "ibis" in sys.modules
@classmethod
def applies(cls, obj):
if not cls.loaded():
return False
from ibis.expr.types import Expr
return isinstance(obj, Expr)
@classmethod
def init(cls, eltype, data, keys, values):
params = eltype.param.objects()
index = params["kdims"]
columns = params["vdims"]
if isinstance(index.bounds[1], int):
ndim = min([index.bounds[1], len(index.default)])
else:
ndim = None
nvdim = columns.bounds[1] if isinstance(columns.bounds[1], int) else None
if keys and values is None:
values = [c for c in data.columns if c not in keys]
elif values and keys is None:
keys = [c for c in data.columns if c not in values][:ndim]
elif keys is None:
keys = list(data.columns[:ndim])
if values is None:
values = [
key
for key in data.columns[ndim : ((ndim + nvdim) if nvdim else None)]
if key not in keys
]
elif keys == [] and values is None:
values = list(data.columns[: nvdim if nvdim else None])
return data, dict(kdims=keys, vdims=values), {}
@classmethod
def compute(cls, dataset):
return dataset.clone(dataset.data.execute())
@classmethod
def persist(cls, dataset):
return cls.compute(dataset)
@classmethod
@cached
def length(self, dataset):
# Get the length by counting the length of an empty query.
return dataset.data[[]].count().execute()
@classmethod
@cached
def nonzero(cls, dataset):
# Make an empty query to see if a row is returned.
return bool(len(dataset.data[[]].head(1).execute()))
@classmethod
@cached
def range(cls, dataset, dimension):
dimension = dataset.get_dimension(dimension, strict=True)
if cls.dtype(dataset, dimension).kind in 'SUO':
return None, None
if dimension.nodata is not None:
return Interface.range(dataset, dimension)
column = dataset.data[dimension.name]
return tuple(
dataset.data.aggregate([column.min(), column.max()]).execute().values[0, :]
)
@classmethod
@cached
def values(
cls,
dataset,
dimension,
expanded=True,
flat=True,
compute=True,
keep_index=False,
):
dimension = dataset.get_dimension(dimension, strict=True)
data = dataset.data[dimension.name]
if not expanded:
data = data.distinct()
return data if keep_index or not compute else data.execute().values
@classmethod
def histogram(cls, expr, bins, density=True, weights=None):
bins = numpy.asarray(bins)
bins = [int(v) if bins.dtype.kind in 'iu' else float(v) for v in bins]
binned = expr.bucket(bins).name('bucket')
hist = numpy.zeros(len(bins)-1)
hist_bins = binned.value_counts().sort_by('bucket').execute()
for b, v in zip(hist_bins['bucket'], hist_bins['count']):
if numpy.isnan(b):
continue
hist[int(b)] = v
if weights is not None:
raise NotImplementedError("Weighted histograms currently "
"not implemented for IbisInterface.")
if density:
hist = hist/expr.count().execute()
return hist, bins
@classmethod
@cached
def shape(cls, dataset):
return cls.length(dataset), len(dataset.data.columns)
@classmethod
@cached
def dtype(cls, dataset, dimension):
dimension = dataset.get_dimension(dimension)
return dataset.data.head(0).execute().dtypes[dimension.name]
dimension_type = dtype
@classmethod
def sort(cls, dataset, by=[], reverse=False):
return dataset.data.sort_by([(dataset.get_dimension(x).name, not reverse) for x in by])
@classmethod
def redim(cls, dataset, dimensions):
return dataset.data.mutate(
**{v.name: dataset.data[k] for k, v in dimensions.items()}
)
validate = pandas.PandasInterface.validate
reindex = pandas.PandasInterface.reindex
@classmethod
def _index_ibis_table(cls, data):
import ibis
if not cls.has_rowid():
raise ValueError(
"iloc expressions are not supported for ibis version %s."
% ibis.__version__
)
if "hv_row_id__" in data.columns:
return data
if cls.is_rowid_zero_indexed(data):
return data.mutate(hv_row_id__=data.rowid())
else:
return data.mutate(hv_row_id__=data.rowid() - 1)
@classmethod
def iloc(cls, dataset, index):
rows, columns = index
scalar = all(map(util.isscalar, index))
if isinstance(columns, slice):
columns = [x.name for x in dataset.dimensions()[columns]]
elif numpy.isscalar(columns):
columns = [dataset.get_dimension(columns).name]
else:
columns = [dataset.get_dimension(d).name for d in columns]
data = cls._index_ibis_table(dataset.data[columns])
if scalar:
return (
data.filter(data.hv_row_id__ == rows)[columns]
.head(1)
.execute()
.iloc[0, 0]
)
if isinstance(rows, slice):
# We should use a pseudo column for the row number but i think that is still awaiting
# a pr on ibis
if any(x is not None for x in (rows.start, rows.stop, rows.step)):
predicates = []
if rows.start:
predicates += [data.hv_row_id__ >= rows.start]
if rows.stop:
predicates += [data.hv_row_id__ < rows.stop]
return data.filter(predicates).drop(["hv_row_id__"])
else:
if not isinstance(rows, Iterable):
rows = [rows]
return data.filter([data.hv_row_id__.isin(rows)]).drop(["hv_row_id__"])
return data.drop(["hv_row_id__"])
@classmethod
def unpack_scalar(cls, dataset, data):
"""
Given a dataset object and data in the appropriate format for
the interface, return a simple scalar.
"""
if len(data.columns) > 1 or data[[]].count().execute() != 1:
return data
return data.execute().iat[0, 0]
@classmethod
def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
# aggregate the necesary dimensions
index_dims = [dataset.get_dimension(d, strict=True) for d in dimensions]
element_dims = [kdim for kdim in dataset.kdims if kdim not in index_dims]
group_kwargs = {}
if group_type != "raw" and issubclass(group_type, Element):
group_kwargs = dict(util.get_param_values(dataset), kdims=element_dims)
group_kwargs.update(kwargs)
group_kwargs["dataset"] = dataset.dataset
group_by = [d.name for d in index_dims]
# execute a query against the table to find the unique groups.
groups = dataset.data.groupby(group_by).aggregate().execute()
# filter each group based on the predicate defined.
data = [
(
tuple(s.values.tolist()),
group_type(
dataset.data.filter(
[dataset.data[k] == v for k, v in s.to_dict().items()]
),
**group_kwargs
),
)
for i, s in groups.iterrows()
]
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
return container_type(data, kdims=index_dims)
else:
return container_type(data)
@classmethod
def assign(cls, dataset, new_data):
return dataset.data.mutate(**new_data)
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
import ibis
data = dataset.data
if dimension.name not in data.columns:
if not isinstance(values, ibis.Expr) and not numpy.isscalar(values):
raise ValueError("Cannot assign %s type as a Ibis table column, "
"expecting either ibis.Expr or scalar."
% type(values).__name__)
data = data.mutate(**{dimension.name: values})
return data
@classmethod
@cached
def isscalar(cls, dataset, dim):
return (
dataset.data[dataset.get_dimension(dim, strict=True).name]
.distinct()
.count()
.compute()
== 1
)
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
if selection_mask is None:
selection_mask = cls.select_mask(dataset, selection)
indexed = cls.indexed(dataset, selection)
data = dataset.data
if isinstance(selection_mask, numpy.ndarray):
data = cls._index_ibis_table(data)
if selection_mask.dtype == numpy.dtype("bool"):
selection_mask = numpy.where(selection_mask)[0]
data = data.filter(
data["hv_row_id__"].isin(list(map(int, selection_mask)))
).drop(["hv_row_id__"])
elif selection_mask is not None and not (isinstance(selection_mask, list) and not selection_mask):
data = data.filter(selection_mask)
if indexed and data.count().execute() == 1 and len(dataset.vdims) == 1:
return data[dataset.vdims[0].name].execute().iloc[0]
return data
@classmethod
def select_mask(cls, dataset, selection):
import ibis
predicates = []
for dim, object in selection.items():
if isinstance(object, tuple):
object = slice(*object)
alias = dataset.get_dimension(dim).name
column = dataset.data[alias]
if isinstance(object, slice):
if object.start is not None:
# Workaround for dask issue #3392
bound = util.numpy_scalar_to_python(object.start)
predicates.append(bound <= column)
if object.stop is not None:
bound = util.numpy_scalar_to_python(object.stop)
predicates.append(column < bound)
elif isinstance(object, (set, list)):
# rowid conditions
condition = None
for id in object:
predicate = column == id
condition = (
predicate if condition is None else condition | predicate
)
if condition is not None:
predicates.append(condition)
elif callable(object):
predicates.append(object(column))
elif isinstance(object, ibis.Expr):
predicates.append(object)
else:
predicates.append(column == object)
return predicates
@classmethod
def sample(cls, dataset, samples=[]):
import ibis
dims = dataset.dimensions()
data = dataset.data
if all(util.isscalar(s) or len(s) == 1 for s in samples):
items = [s[0] if isinstance(s, tuple) else s for s in samples]
return data[data[dims[0].name].isin(items)]
predicates = None
for sample in samples:
if util.isscalar(sample):
sample = [sample]
if not sample:
continue
predicate = None
for i, v in enumerate(sample):
p = data[dims[i].name] == ibis.literal(util.numpy_scalar_to_python(v))
if predicate is None:
predicate = p
else:
predicate &= p
if predicates is None:
predicates = predicate
else:
predicates |= predicate
return data if predicates is None else data.filter(predicates)
@classmethod
def aggregate(cls, dataset, dimensions, function, **kwargs):
import ibis.expr.operations
data = dataset.data
columns = [d.name for d in dataset.kdims if d in dimensions]
values = dataset.dimensions("value", label="name")
new = data[columns + values]
function = {
numpy.min: ibis.expr.operations.Min,
numpy.nanmin: ibis.expr.operations.Min,
numpy.max: ibis.expr.operations.Max,
numpy.nanmax: ibis.expr.operations.Max,
numpy.mean: ibis.expr.operations.Mean,
numpy.nanmean: ibis.expr.operations.Mean,
numpy.std: ibis.expr.operations.StandardDev,
numpy.nanstd: ibis.expr.operations.StandardDev,
numpy.sum: ibis.expr.operations.Sum,
numpy.nansum: ibis.expr.operations.Sum,
numpy.var: ibis.expr.operations.Variance,
numpy.nanvar: ibis.expr.operations.Variance,
len: ibis.expr.operations.Count,
}.get(function, function)
if len(dimensions):
selection = new.groupby(columns)
if function is numpy.count_nonzero:
aggregation = selection.aggregate(
**{
x: ibis.expr.operations.Count(new[x], where=new[x] != 0).to_expr()
for x in new.columns
if x not in columns
}
)
else:
aggregation = selection.aggregate(
**{
x: function(new[x]).to_expr()
for x in new.columns
if x not in columns
}
)
else:
aggregation = new.aggregate(
**{x: function(new[x]).to_expr() for x in new.columns}
)
dropped = [x for x in values if x not in data.columns]
return aggregation, dropped
@classmethod
@cached
def mask(cls, dataset, mask, mask_value=numpy.nan):
raise NotImplementedError('Mask is not implemented for IbisInterface.')
@classmethod
@cached
def dframe(cls, dataset, dimensions):
return dataset.data[dimensions].execute()
Interface.register(IbisInterface)
| 35.022272
| 106
| 0.569348
| 1,776
| 15,725
| 4.945946
| 0.161036
| 0.030055
| 0.034836
| 0.006261
| 0.14367
| 0.088798
| 0.068989
| 0.04531
| 0.025273
| 0.025273
| 0
| 0.003155
| 0.334817
| 15,725
| 448
| 107
| 35.100446
| 0.836616
| 0.034658
| 0
| 0.217507
| 0
| 0
| 0.027348
| 0.001982
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.050398
| 0.029178
| 0.254642
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
537e41912df4cf73c680542167c1c109a8513d39
| 3,907
|
py
|
Python
|
chess/models/tournament.py
|
S0Imyr/Projet-4
|
1d93e125bc6e44bc560f3ffc9b11e14e35291c98
|
[
"MIT"
] | null | null | null |
chess/models/tournament.py
|
S0Imyr/Projet-4
|
1d93e125bc6e44bc560f3ffc9b11e14e35291c98
|
[
"MIT"
] | null | null | null |
chess/models/tournament.py
|
S0Imyr/Projet-4
|
1d93e125bc6e44bc560f3ffc9b11e14e35291c98
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Handles the tournament logic
"""
import datetime
from chess.utils.utils import get_new_id
from chess.models.actors import Player
from chess.models.round import Round
TOURNAMENT_ID_WIDTH = 8
NB_ROUND = 4
NB_PLAYERS = 8
NB_MATCH = 4
class Tournament:
""" The class Tournament is the central piece of the models. """
last_tournament_id = "0" * TOURNAMENT_ID_WIDTH
def __init__(self, name, location, timer_type, description):
Tournament.last_tournament_id = get_new_id(Tournament.last_tournament_id, TOURNAMENT_ID_WIDTH)
self.tournament_id = Tournament.last_tournament_id
self.name = name
self.location = location
self.start_date = None
self.end_date = None
self.timer_type = timer_type
self.description = description
self.number_of_rounds = NB_ROUND
self.rounds = []
self.list_of_players = []
self.players_assigned = False
self.finished = False
def define_players(self, actors):
""" Defines the list of identifier of the players who join the tournament.
:param actors:
:return: None
"""
for num_player in range(NB_PLAYERS):
self.list_of_players.append(Player(actors[num_player],
self.tournament_id,
num_player))
def init_round(self, num_round):
""" Launches the round number "num_round".
:param num_round: number of the round played
:return: None
"""
tour = Round(num_round, self.tournament_id, self.list_of_players)
tour.start_date = datetime.date.today()
tour.rank_players()
tour.define_matches()
self.rounds.append(tour)
def register_round_results(self, num_round, winner):
""" Registers the results of the round.
:param num_round: the round number.
:param winner: the list of the winners.
:return: None.
"""
self.rounds[num_round].register_results(winner)
self.rounds[num_round].assign_points()
self.rounds[num_round].finished = True
self.rounds[num_round].memorize_opponents()
self.rounds[num_round].rank_players()
self.rounds[num_round].end_date = datetime.date.today()
def tournament_to_dict(self):
""" Converts the tournament into a dictionary
:return: dictionary of the tournament instance.
"""
string_attributes = ['tournament_id',
'name',
'location',
'timer_type',
'description',
'number_of_rounds',
'players_assigned']
serialized_tournament = {}
for attribute in string_attributes:
serialized_tournament[attribute] = getattr(self, attribute)
serialized_tournament['rounds'] = []
for r0und in self.rounds:
serialized_tournament['rounds'].append(r0und.round_to_dict())
serialized_tournament['list_of_players'] = []
for player in self.list_of_players:
serialized_tournament['list_of_players'].append(player.player_to_dict())
serialized_tournament['start_date'] = str(self.start_date)
serialized_tournament['end_date'] = str(self.end_date)
return serialized_tournament
def end_tournament(self):
""" Handles the end of the tournament.
Adds the tournament_id to the players list of tournaments.
Defines the attribute finished and the end date of the tournament.
"""
for player in self.list_of_players:
player.actor.list_of_tournaments_played.append(self.tournament_id)
self.finished = True
self.end_date = datetime.date.today()
| 33.393162
| 102
| 0.621449
| 449
| 3,907
| 5.158129
| 0.211581
| 0.067358
| 0.039292
| 0.046632
| 0.141192
| 0.02418
| 0.02418
| 0
| 0
| 0
| 0
| 0.002896
| 0.293064
| 3,907
| 116
| 103
| 33.681034
| 0.835626
| 0.176606
| 0
| 0.029851
| 0
| 0
| 0.045649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089552
| false
| 0
| 0.059701
| 0
| 0.19403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
537f1ecf5b58054b91b3f560bcbfa1d5fc3ac88d
| 16,328
|
py
|
Python
|
tests/test_app.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 6
|
2021-03-09T10:24:02.000Z
|
2022-01-16T03:52:11.000Z
|
tests/test_app.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 1,319
|
2020-12-18T08:52:29.000Z
|
2022-03-31T18:17:32.000Z
|
tests/test_app.py
|
inmanta/inmanta-core
|
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
|
[
"Apache-2.0"
] | 4
|
2021-03-03T15:36:50.000Z
|
2022-03-11T11:41:51.000Z
|
"""
Copyright 2018 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import os
import re
import signal
import subprocess
import sys
from subprocess import TimeoutExpired
from threading import Timer
import pytest
import inmanta.util
from inmanta import const
def get_command(
tmp_dir,
stdout_log_level=None,
log_file=None,
log_level_log_file=None,
timed=False,
dbport=None,
dbname="inmanta",
dbhost=None,
dbuser=None,
dbpass=None,
config_dir=None,
server_extensions=[],
version=False,
):
root_dir = tmp_dir.mkdir("root").strpath
log_dir = os.path.join(root_dir, "log")
state_dir = os.path.join(root_dir, "data")
for directory in [log_dir, state_dir]:
os.mkdir(directory)
config_file = os.path.join(root_dir, "inmanta.cfg")
if dbport is not None:
port = dbport
else:
port = inmanta.util.get_free_tcp_port()
with open(config_file, "w+", encoding="utf-8") as f:
f.write("[config]\n")
f.write("log-dir=" + log_dir + "\n")
f.write("state-dir=" + state_dir + "\n")
f.write("[database]\n")
f.write("port=" + str(port) + "\n")
f.write("name=" + dbname + "\n")
if dbhost:
f.write(f"host={dbhost}\n")
if dbuser:
f.write(f"username={dbuser}\n")
if dbpass:
f.write(f"password={dbpass}\n")
f.write("[server]\n")
f.write(f"enabled_extensions={', '.join(server_extensions)}\n")
args = [sys.executable, "-m", "inmanta.app"]
if stdout_log_level:
args.append("-" + "v" * stdout_log_level)
if log_file:
log_file = os.path.join(log_dir, log_file)
args += ["--log-file", log_file]
if log_file and log_level_log_file:
args += ["--log-file-level", str(log_level_log_file)]
if timed:
args += ["--timed-logs"]
if config_dir:
args += ["--config-dir", config_dir]
if version:
args += ["--version"]
args += ["-c", config_file, "server"]
return (args, log_dir)
def do_run(args, env={}, cwd=None):
baseenv = os.environ.copy()
baseenv.update(env)
process = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=baseenv)
return process
def convert_to_ascii(text):
return [line for line in text.decode("ascii").split("\n") if line != ""]
def do_kill(process, killtime=3, termtime=2):
def do_and_log(func, msg):
def w():
print(msg)
func()
return w
t1 = Timer(killtime, do_and_log(process.kill, "killed process"))
t2 = Timer(termtime, do_and_log(process.terminate, "terminated process"))
t1.start()
t2.start()
out, err = process.communicate()
t1.cancel()
t2.cancel()
stdout = convert_to_ascii(out)
stderr = convert_to_ascii(err)
return (stdout, stderr, process.returncode)
def run_without_tty(args, env={}, killtime=3, termtime=2):
process = do_run(args, env)
return do_kill(process, killtime, termtime)
def run_with_tty(args, killtime=3, termtime=2):
"""Could not get code for actual tty to run stable in docker, so we are faking it """
env = {const.ENVIRON_FORCE_TTY: "true"}
return run_without_tty(args, env=env, killtime=killtime, termtime=termtime)
def get_timestamp_regex():
return r"[\d]{4}\-[\d]{2}\-[\d]{2} [\d]{2}\:[\d]{2}\:[\d]{2}\,[\d]{3}"
def get_compiled_regexes(regexes, timed):
result = []
for regex in regexes:
if timed:
regex = get_timestamp_regex() + " " + regex
compiled_regex = re.compile(regex)
result.append(compiled_regex)
return result
def is_colorama_package_available():
try:
import colorama # noqa: F401
except ModuleNotFoundError:
return False
return True
def test_verify_that_colorama_package_is_not_present():
"""
The colorama package turns the colored characters in TTY-based terminal into uncolored characters.
As such, this package should not be present.
"""
assert not is_colorama_package_available()
@pytest.mark.parametrize_any(
"log_level, timed, with_tty, regexes_required_lines, regexes_forbidden_lines",
[
(
3,
False,
False,
[r"[a-z.]*[ ]*INFO[\s]+Starting server endpoint", r"[a-z.]*[ ]*DEBUG[\s]+Starting Server Rest Endpoint"],
[],
),
(
2,
False,
False,
[r"[a-z.]*[ ]*INFO[\s]+Starting server endpoint"],
[r"[a-z.]*[ ]*DEBUG[\s]+Starting Server Rest Endpoint"],
),
(
3,
False,
True,
[
r"\x1b\[32m[a-z.]*[ ]*INFO[\s]*\x1b\[0m \x1b\[34mStarting server endpoint",
r"\x1b\[36m[a-z.]*[ ]*DEBUG[\s]*\x1b\[0m \x1b\[34mStarting Server Rest Endpoint",
],
[],
),
(
2,
False,
True,
[r"\x1b\[32m[a-z.]*[ ]*INFO[\s]*\x1b\[0m \x1b\[34mStarting server endpoint"],
[r"\x1b\[36m[a-z.]*[ ]*DEBUG[\s]*\x1b\[0m \x1b\[34mStarting Server Rest Endpoint"],
),
(
3,
True,
False,
[r"[a-z.]*[ ]*INFO[\s]+Starting server endpoint", r"[a-z.]*[ ]*DEBUG[\s]+Starting Server Rest Endpoint"],
[],
),
(
2,
True,
False,
[r"[a-z.]*[ ]*INFO[\s]+Starting server endpoint"],
[r"[a-z.]*[ ]*DEBUG[\s]+Starting Server Rest Endpoint"],
),
(
3,
True,
True,
[
r"\x1b\[32m[a-z.]*[ ]*INFO[\s]*\x1b\[0m \x1b\[34mStarting server endpoint",
r"\x1b\[36m[a-z.]*[ ]*DEBUG[\s]*\x1b\[0m \x1b\[34mStarting Server Rest Endpoint",
],
[],
),
(
2,
True,
True,
[r"\x1b\[32m[a-z.]*[ ]*INFO[\s]*\x1b\[0m \x1b\[34mStarting server endpoint"],
[r"\x1b\[36m[a-z.]*[ ]*DEBUG[\s]*\x1b\[0m \x1b\[34mStarting Server Rest Endpoint"],
),
],
)
@pytest.mark.timeout(20)
def test_no_log_file_set(tmpdir, log_level, timed, with_tty, regexes_required_lines, regexes_forbidden_lines):
if is_colorama_package_available() and with_tty:
pytest.skip("Colorama is present")
(args, log_dir) = get_command(tmpdir, stdout_log_level=log_level, timed=timed)
if with_tty:
(stdout, _, _) = run_with_tty(args)
else:
(stdout, _, _) = run_without_tty(args)
log_file = "server.log"
assert log_file not in os.listdir(log_dir)
assert len(stdout) != 0
check_logs(stdout, regexes_required_lines, regexes_forbidden_lines, timed)
@pytest.mark.parametrize_any(
"log_level, with_tty, regexes_required_lines, regexes_forbidden_lines",
[
(
3,
False,
[
r"[a-z.]*[ ]*INFO[\s]+[a-x\.A-Z]*[\s]Starting server endpoint",
r"[a-z.]*[ ]*DEBUG[\s]+[a-x\.A-Z]*[\s]Starting Server Rest Endpoint",
],
[],
),
(
2,
False,
[r"[a-z.]*[ ]*INFO[\s]+[a-x\.A-Z]*[\s]Starting server endpoint"],
[r"[a-z.]*[ ]*DEBUG[\s]+[a-x\.A-Z]*[\s]Starting Server Rest Endpoint"],
),
(
3,
True,
[
r"[a-z.]*[ ]*INFO[\s]+[a-x\.A-Z]*[\s]Starting server endpoint",
r"[a-z.]*[ ]*DEBUG[\s]+[a-x\.A-Z]*[\s]Starting Server Rest Endpoint",
],
[],
),
(
2,
True,
[r"[a-z.]*[ ]*INFO[\s]+[a-x\.A-Z]*[\s]Starting server endpoint"],
[r"[a-z.]*[ ]*DEBUG[\s]+[a-x\.A-Z]*[\s]Starting Server Rest Endpoint"],
),
],
)
@pytest.mark.timeout(60)
def test_log_file_set(tmpdir, log_level, with_tty, regexes_required_lines, regexes_forbidden_lines):
if is_colorama_package_available() and with_tty:
pytest.skip("Colorama is present")
log_file = "server.log"
(args, log_dir) = get_command(tmpdir, stdout_log_level=log_level, log_file=log_file, log_level_log_file=log_level)
if with_tty:
(stdout, _, _) = run_with_tty(args)
else:
(stdout, _, _) = run_without_tty(args)
assert log_file in os.listdir(log_dir)
log_file = os.path.join(log_dir, log_file)
with open(log_file, "r") as f:
log_lines = f.readlines()
check_logs(log_lines, regexes_required_lines, regexes_forbidden_lines, timed=True)
check_logs(stdout, [], regexes_required_lines, timed=True)
check_logs(stdout, [], regexes_required_lines, timed=False)
def check_logs(log_lines, regexes_required_lines, regexes_forbidden_lines, timed):
compiled_regexes_requires_lines = get_compiled_regexes(regexes_required_lines, timed)
compiled_regexes_forbidden_lines = get_compiled_regexes(regexes_forbidden_lines, timed)
for line in log_lines:
print(line)
for regex in compiled_regexes_requires_lines:
if not any(regex.match(line) for line in log_lines):
pytest.fail("Required pattern was not found in log lines: %s" % (regex.pattern,))
for regex in compiled_regexes_forbidden_lines:
if any(regex.match(line) for line in log_lines):
pytest.fail("Forbidden pattern found in log lines: %s" % (regex.pattern,))
def test_check_shutdown():
process = do_run([sys.executable, os.path.join(os.path.dirname(__file__), "miniapp.py")])
# wait for handler to be in place
try:
process.communicate(timeout=2)
except TimeoutExpired:
pass
process.send_signal(signal.SIGUSR1)
out, err, code = do_kill(process, killtime=3, termtime=1)
print(out, err)
assert code == 0
assert "----- Thread Dump ----" in out
assert "STOP" in out
assert "SHUTDOWN COMPLETE" in out
def test_check_bad_shutdown():
print([sys.executable, os.path.join(os.path.dirname(__file__), "miniapp.py"), "bad"])
process = do_run([sys.executable, os.path.join(os.path.dirname(__file__), "miniapp.py"), "bad"])
out, err, code = do_kill(process, killtime=5, termtime=2)
print(out, err)
assert code == 3
assert "----- Thread Dump ----" in out
assert "STOP" not in out
assert "SHUTDOWN COMPLETE" not in out
assert not err
def test_startup_failure(tmpdir, postgres_db, database_name):
(args, log_dir) = get_command(
tmpdir,
dbport=postgres_db.port,
dbname=database_name,
dbhost=postgres_db.host,
dbuser=postgres_db.user,
dbpass=postgres_db.password,
server_extensions=["badplugin"],
)
pp = ":".join(sys.path)
# Add a bad module
extrapath = os.path.join(os.path.dirname(__file__), "data", "bad_module_path")
(stdout, stderr, code) = run_without_tty(args, env={"PYTHONPATH": pp + ":" + extrapath}, killtime=15, termtime=10)
assert "inmanta ERROR Server setup failed" in stdout
assert (
"inmanta.server.protocol.SliceStartupException: "
"Slice badplugin.badslice failed to start because: Too bad, this plugin is broken"
) in stdout
assert code == 4
def test_compiler_exception_output(snippetcompiler):
snippetcompiler.setup_for_snippet(
"""
entity Test:
number attr
end
implement Test using std::none
o = Test(attr="1234")
"""
)
output = (
"""Could not set attribute `attr` on instance `__config__::Test (instantiated at ./main.cf:8)` """
"""(reported in Construct(Test) (./main.cf:8))
caused by:
Invalid value '1234', expected Number (reported in Construct(Test) (./main.cf:8))
"""
)
def exec(*cmd):
process = do_run([sys.executable, "-m", "inmanta.app"] + list(cmd), cwd=snippetcompiler.project_dir)
out, err = process.communicate(timeout=30)
assert out.decode() == ""
assert err.decode() == output
exec("compile")
exec("export", "-J", "out.json")
@pytest.mark.timeout(15)
@pytest.mark.parametrize_any(
"cmd", [(["-X", "compile"]), (["compile", "-X"]), (["compile"]), (["export", "-X"]), (["-X", "export"]), (["export"])]
)
def test_minus_x_option(snippetcompiler, cmd):
snippetcompiler.setup_for_snippet(
"""
entity Test:
nuber attr
end
"""
)
process = do_run([sys.executable, "-m", "inmanta.app"] + cmd, cwd=snippetcompiler.project_dir)
out, err = process.communicate(timeout=30)
assert out.decode() == ""
if "-X" in cmd:
assert "inmanta.ast.TypeNotFoundException: could not find type nuber in namespace" in str(err)
else:
assert "inmanta.ast.TypeNotFoundException: could not find type nuber in namespace" not in str(err)
@pytest.mark.timeout(20)
def test_warning_config_dir_option_on_server_command(tmpdir):
non_existing_dir = os.path.join(tmpdir, "non_existing_dir")
assert not os.path.isdir(non_existing_dir)
(args, _) = get_command(tmpdir, stdout_log_level=3, config_dir=non_existing_dir)
(stdout, _, _) = run_without_tty(args)
stdout = "".join(stdout)
assert "Starting server endpoint" in stdout
assert f"Config directory {non_existing_dir} doesn't exist" in stdout
@pytest.mark.timeout(20)
def test_warning_min_c_option_file_doesnt_exist(snippetcompiler, tmpdir):
non_existing_config_file = os.path.join(tmpdir, "non_existing_config_file")
snippetcompiler.setup_for_snippet(
"""
entity Test:
number attr
end
"""
)
config_options = ["-c", non_existing_config_file, "-vvv"]
args = [sys.executable, "-m", "inmanta.app"] + config_options + ["compile"]
process = do_run(args, cwd=snippetcompiler.project_dir)
out, err = process.communicate(timeout=30)
assert process.returncode == 0
out = out.decode()
err = err.decode()
all_output = out + err
assert "Starting compile" in all_output
assert "Compile done" in all_output
assert f"Config file {non_existing_config_file} doesn't exist" in all_output
@pytest.mark.parametrize_any(
"with_tty, version_should_be_shown, regexes_required_lines, regexes_forbidden_lines",
[
(False, True, [r"Inmanta Service Orchestrator", r"Compiler version: ", r"Extensions:", r"\s*\* core:"], []),
(True, True, [r"Inmanta Service Orchestrator", r"Compiler version: ", r"Extensions:", r"\s*\* core:"], []),
(False, False, [], [r"Inmanta Service Orchestrator", r"Compiler version: ", r"Extensions:", r"\s*\* core:"]),
(True, False, [], [r"Inmanta Service Orchestrator", r"Compiler version: ", r"Extensions:", r"\s*\* core:"]),
],
)
@pytest.mark.timeout(20)
def test_version_argument_is_set(tmpdir, with_tty, version_should_be_shown, regexes_required_lines, regexes_forbidden_lines):
(args, log_dir) = get_command(tmpdir, version=version_should_be_shown)
if with_tty:
(stdout, _, _) = run_with_tty(args, killtime=15, termtime=10)
else:
(stdout, _, _) = run_without_tty(args, killtime=15, termtime=10)
assert len(stdout) != 0
check_logs(stdout, regexes_required_lines, regexes_forbidden_lines, False)
def test_init_project(tmpdir):
args = [sys.executable, "-m", "inmanta.app", "project", "init", "-n", "test-project", "-o", tmpdir, "--default"]
(stdout, stderr, return_code) = run_without_tty(args, killtime=15, termtime=10)
test_project_path = os.path.join(tmpdir, "test-project")
assert return_code == 0
assert os.path.exists(test_project_path)
(stdout, stderr, return_code) = run_without_tty(args, killtime=15, termtime=10)
assert return_code != 0
assert len(stderr) == 1
assert "already exists" in stderr[0]
| 33.390593
| 125
| 0.614282
| 2,136
| 16,328
| 4.510768
| 0.164326
| 0.006642
| 0.004982
| 0.008718
| 0.506072
| 0.448573
| 0.394811
| 0.358588
| 0.348106
| 0.325272
| 0
| 0.013938
| 0.239833
| 16,328
| 488
| 126
| 33.459016
| 0.762327
| 0.052609
| 0
| 0.339523
| 0
| 0.045093
| 0.226046
| 0.047508
| 0
| 0
| 0
| 0
| 0.090186
| 1
| 0.066313
| false
| 0.013263
| 0.029178
| 0.005305
| 0.124668
| 0.013263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
537fda8bf126c424a17def77a9e57731a1bb799c
| 449
|
py
|
Python
|
AtC_Beg_Con_021-030/ABC027/C.py
|
yosho-18/AtCoder
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
[
"MIT"
] | null | null | null |
AtC_Beg_Con_021-030/ABC027/C.py
|
yosho-18/AtCoder
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
[
"MIT"
] | null | null | null |
AtC_Beg_Con_021-030/ABC027/C.py
|
yosho-18/AtCoder
|
50f6d5c92a01792552c31ac912ce1cd557b06fb0
|
[
"MIT"
] | null | null | null |
n = int(input())
row = 0
for i in range(100):
if 2 ** i <= n <= 2 ** (i + 1) - 1:
row = i
break
def seki(k, n):
for _ in range(n):
k = 4 * k + 2
return k
k = 0
if row % 2 != 0:
k = 2
cri = seki(k, row // 2)
if n < cri:
print("Aoki")
else:
print("Takahashi")
else:
k = 1
cri = seki(k, row // 2)
if n < cri:
print("Takahashi")
else:
print("Aoki")
| 14.966667
| 39
| 0.4098
| 72
| 449
| 2.541667
| 0.333333
| 0.081967
| 0.087432
| 0.120219
| 0.251366
| 0.251366
| 0.251366
| 0.251366
| 0.251366
| 0
| 0
| 0.065637
| 0.423163
| 449
| 29
| 40
| 15.482759
| 0.640927
| 0
| 0
| 0.44
| 0
| 0
| 0.057906
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0
| 0
| 0.08
| 0.16
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5382d0895ddebaa840fcd4f4a2179b700c0dfe67
| 21,396
|
py
|
Python
|
extplugins/codvote.py
|
Desi-Boyz/cod4x-server-B3-configs
|
03a323d7ea293efe1831ed315001391b9aaf532a
|
[
"MIT"
] | 1
|
2017-07-17T22:21:10.000Z
|
2017-07-17T22:21:10.000Z
|
extplugins/codvote.py
|
Desi-Boyz/cod4x-server-B3-configs
|
03a323d7ea293efe1831ed315001391b9aaf532a
|
[
"MIT"
] | null | null | null |
extplugins/codvote.py
|
Desi-Boyz/cod4x-server-B3-configs
|
03a323d7ea293efe1831ed315001391b9aaf532a
|
[
"MIT"
] | null | null | null |
# CoDVote plugin for BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2015 ph03n1x
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Changelog:
# v1.0.1 - Fixed vote remaining in progress if requirements for vote unmet.
# v1.0.2 - Added "!vote maps" to show what maps can be called into vote.
# - Fixed issue where person who called vote needed to vote as well. Changed to automatic yes vote.
__version__ = '1.0.2'
__author__ = 'ph03n1x'
import b3, threading
import b3.plugin
import b3.events
class CodvotePlugin(b3.plugin.Plugin):
adminPlugin = None
_vote = None # Stores which vote is currently in progress
_value = None # Stores the value of the vote
_votetime = 30 # Time before a vote will be canceled for not passing
_aVotes = {} # All votes allowed. Imported from "votes" section in config
_aMaps = {} # All vote allowed maps. Imported from "votemaps" section in config
_amt_yes = [] # Amount of players who voted yes. Checked against amount of players in game
_amt_no = []
_allplayers = [] # Amount of players in game
_mapRequested = None # Stores which map is being voted for
_kickRequested = None # Stores which player will be kicked if vote passed
_default_messages = {
'tovote': '^7Use ^2!yes ^7or ^2!no ^7 to vote',
'map': "Map vote in progress: Change map to ^3$s^7?",
'nextmap': "Next map vote in progress. Change next map to ^3$s^7?",
'kick': "Kick vote in progress: Kick ^2$s^7?",
'maprotate': "Rotate map vote in progress. Go to next map?",
'maprestart': "Maprestart vote in progress. Restart current map?",
'friendlyfire': "Friendlyfire vote in progress. Change friendlyfire mode to ^2$s^7?",
'killcam': "Killcam vote in progress. Turn killcam ^2$s^7?",
'scorelimit': "Scorelimit vote in progress. Change score limit to ^2$s^7?",
'timelimit': "Timelimit vote in progress. Change time limit to ^2$s^7?",
'roundlength': "Round length vote in progress. Change round length to ^2$s^7?",
'roundlimit': "Round limit vote in progress. Change round limit to ^2$s^7?",
}
def onStartup(self):
self.adminPlugin = self.console.getPlugin('admin')
if not self.adminPlugin:
self.error('Could not find admin plugin')
return
# Register commands
if 'commands' in self.config.sections():
for cmd in self.config.options('commands'):
level = self.config.get('commands', cmd)
sp = cmd.split('-')
alias = None
if len(sp) == 2:
cmd, alias = sp
func = self.getCmd(cmd)
if func:
self.adminPlugin.registerCommand(self, cmd, level, func, alias)
# Re-deploy commands for consideration of this plugin
self.adminPlugin.registerCommand(self, 'nextmap', 1, self.cmd_nextmap, 'nm')
self.adminPlugin.registerCommand(self, 'maprotate', 20, self.cmd_maprotate, None)
self.adminPlugin.registerCommand(self, 'allvotes', 1, self.cmd_allvotes, None)
# Register events
self.registerEvent('EVT_GAME_EXIT', self.onGameEnd)
def onLoadConfig(self):
# Load settings section
try:
self._votetime = self.config.getint('settings', 'votetime')
except:
self.debug('Unable to get [votetime] from settings. Using default: %s' % self._votetime)
# Load votemaps section
if self.config.has_section('votemaps'):
for (mapname, consolename) in self.config.items('votemaps'):
if mapname:
self._aMaps[mapname] = consolename
self.debug('Successfully entered maps for voting: %s' % self._aMaps)
# Load votes section
if self.config.has_section('votes'):
adLvl = {'guest': 0,
'user': 1,
'reg': 2,
'mod': 20,
'admin': 40,
'fulladmin': 60,
'senioradmin': 80,
'superadmin': 100}
for (entry, value) in self.config.items('votes'):
try:
value = int(value)
self._aVotes[entry.lower()] = value
except ValueError:
self._aVotes[entry.lower()] = adLvl[value]
self.debug('Allowed votes are: %s' % self._aVotes)
def getCmd(self, cmd):
cmd = 'cmd_%s' % cmd
if hasattr(self, cmd):
func = getattr(self, cmd)
return func
return None
######################### VOTE TIMING ##############################
def voteTimer(self):
t1 = threading.Timer((self._votetime - 5), self.voteMessage)
t1.start()
def voteMessage(self):
if self._vote:
self.console.say('^110 seconds until vote end!')
t2 = threading.Timer(10, self.denyVote)
t2.start()
######################### MAP HANDLING ##############################
def _search(self, maplist, partial):
a = []
for mapname, consolename in maplist.iteritems():
if partial in mapname:
a.append(mapname)
elif partial in consolename:
a.append(mapname)
return a
def mapvote(self, client, wantedMap):
# Find if map is in allowed list
match = self._search(self._aMaps, wantedMap)
if len(match) == 1:
self._mapRequested = match[0]
self._value = match[0]
return True
elif len(match) > 1:
match = (', ').join(match)
client.message('^1ABORTED!^7Multiple matches: %s' % match)
return False
elif len(match) == 0:
client.message('^1ABORTED!^7No maps matching your request')
return False
############### NEXTMAP FUNCTIONING ################
def onGameEnd(self, event):
"""
Handle EVT_GAME_ROUND_END
"""
if self._mapRequested:
self.confirmMap()
self._mapRequested = None
############### CONFIRM VOTES ######################
def confirmVote(self):
self.console.say('^3Vote passed!^7')
if self._vote == 'map':
self.confirmMap()
elif self._vote == 'nextmap':
self.debug('nextmap vote passed. Params already stored')
elif self._vote == 'kick':
self.confirmKick()
elif self._vote == 'maprotate':
if self._mapRequested:
self.confirmMap()
else:
self.console.rotateMap()
elif self._vote == 'maprestart':
self.confirmMaprestart()
elif self._vote == 'friendlyfire':
self.confirmFriendlyFire()
elif self._vote == 'killcam':
self.confirmKillCam()
elif self._vote == 'scorelimit':
self.confirmScoreLimit()
elif self._vote == 'timelimit':
self.confirmTimeLimit()
elif self._vote == 'roundlength':
self.confirmRoundLength()
elif self._vote == 'roundlimit':
self.confirmRoundLimit()
else:
self.error('Unable to commit. Vote: %s, Value: %s' % (self._vote, self._value))
self._vote = None
self._value = None
self._amt_no = []
self._amt_yes = []
self._allplayers = []
def denyVote(self):
if self._vote:
self.console.say('^3Vote failed!')
self._vote = None
self._value = None
self._amt_no = []
self._amt_yes = []
self._allplayers = []
def confirmKick(self):
# Note - to kick someone we need: client.kick(reason, keyword, admin, silent=True/False, data)
s = self._kickRequested
self.debug('Kick vote passed. Kicking %s' % s.name)
s.kick('Voted against', '', None, True, '')
self._kickRequested = None
def confirmMap(self):
# This will cycle to next map when needed.
self.console.write('map %s' % self._aMaps[self._mapRequested])
self._mapRequested = None
def confirmMaprestart(self):
# This will restart the current map
self.console.write('fast_restart')
def confirmFriendlyFire(self):
# This will toggle friendly fire on and off
setting = self._value
if not isinstance(setting, int):
if self._value == 'on':
setting = 1
elif self._value == 'off':
setting = 0
else:
self.debug('Unknown wanted setting for Friendlyfire. Toggling to next mode')
now = self.console.getCvar('scr_team_fftype').getInt()
if now >= 1:
setting = 0
elif now == 0:
setting = 1
self.console.setCvar('scr_team_fftype', int(setting))
def confirmKillCam(self):
# rcon for killcam: scr_game_allowkillcam - 0 or 1
setting = self._value
if self._value == 'on':
setting = 1
elif self._value == 'off':
setting = 0
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
now = self.console.getCvar('scr_game_allowkillcam').getInt()
self.debug('Setting being voted for is not valid. Toggling to next mode. Killcam currently: %s' % now)
if now == 0:
setting = 1
else:
setting = 0
self.console.setCvar('scr_game_allowkillcam', int(setting))
def confirmScoreLimit(self):
# CVAR to write is scr_<gametype>_scorelimit <number>
setting = self._value
gt = self.getGameType()
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
self.debug('ERROR: Could not set new scorelimit. Voted value is not integer')
return
cparams = 'scr_' + gt + '_scorelimit'
self.console.setCvar(cparams, setting)
def confirmTimeLimit(self):
setting = self._value
gt = self.getGameType()
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
self.debug('ERROR: Could not set new timelimit. Voted value is not integer')
return
cparams = 'scr_' + gt + '_timelimit'
self.console.setCvar(cparams, setting)
def confirmRoundLength(self):
setting = self._value
amodes = ['ctf', 'sd', 're', 'bas', 'dom']
gt = self.getGameType()
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
self.debug('ERROR: Could not set new round length. Voted value is not integer')
return
if gt in amodes:
cparams = 'scr_' + gt + '_roundlength'
self.console.setCvar(cparams, setting)
def confirmRoundLimit(self):
setting = self._value
amodes = ['ctf', 'sd', 're', 'bas', 'dom']
gt = self.getGameType()
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
self.debug('Could not set new round limit. Voted value is not integer')
return
if gt in amodes:
cparams = 'scr_' + gt + '_roundlimit'
self.console.setCvar(cparams, setting)
else:
self.debug('Could not set round limit as gametype do not have rounds')
def getGameType(self):
gametype = self.console.getCvar('g_gametype').getString()
if gametype:
return gametype
else:
self.debug('Error getting gametype. Response is %s' % gametype)
return False
def sendBroadcast(self):
# This wil broadcast vote message to server.
a = self._value
if a == 'maprestart' or a == 'maprotate':
self.console.say(self.getMessage(self._vote))
elif a != 'maprestart' and a != 'maprotate':
param = {'s': a}
self.console.say(self.getMessage(self._vote, param))
self.console.say(self.getMessage('tovote'))
def aquireCmdLock2(self, cmd, client, delay, all=True):
if client.maxLevel >= 20:
return True
elif cmd.time + 5 <= self.console.time():
return True
else:
return False
def checkIfAllowed(self, client, voteType):
if client.maxLevel >= self._aVotes[voteType]:
return True
else:
return False
#################################################################################
# COMMANDS #
#################################################################################
def cmd_vote(self, data, client, cmd=None):
"""\
!vote <setting> <value> - vote to change setting or cvar on server.
"""
# Check if vote already in progress
if self._vote:
client.message('^1ERROR^7: Vote already in progress')
return
# Check if we have enough data for vote
data = data.split()
if len(data) == 1 and data[0] == 'maprotate' or len(data) == 1 and data[0] == 'maprestart' or len(data) == 1 and data[0] == 'maps':
self._vote = data[0]
self._value = data[0]
elif len(data) == 2:
type = data[0]
value = data[1]
self._vote = type
self._value = value
else:
client.message('^1ERROR^7: Invalid usage. Type ^2!help vote ^7for info')
return
# Check if player is asking what maps can be voted on
if self._vote == 'maps':
v1 = self.checkIfAllowed(client, 'map')
v2 = self.checkIfAllowed(client, 'nextmap')
if v1 or v2:
cmd.sayLoudOrPM(client, 'Vote enabled maps: ^2%s' % (('^7, ^2').join(self._aMaps.keys())))
self._vote = None
self._value = None
return
else:
client.message('^2You do not have permission to call map votes')
self._vote = None
self._value = None
return
# Check if enough players in game to vote and store present players. Only players present at vote call can vote
playersInGame = 0
self._allplayers = []
for c in self.console.clients.getList():
if c.team != b3.TEAM_SPEC:
playersInGame += 1
self._allplayers.insert(0, c)
if playersInGame <= 1 and client.maxLevel < 100:
client.message('^1ABORT^7: Not enough players in game to vote.')
self._vote = None
return
# Check if type of vote is allowed
if self._vote not in self._aVotes:
client.message('Vote type not allowed. Use ^2!allvotes ^7for available votes.')
self._vote = None
return
# Check if player has permission to call vote type
v = self.checkIfAllowed(client, self._vote)
if not v:
client.message('You do not have permission to call this vote')
self._vote = None
return
# Get further info for proper processing
if self._vote == 'map' or self._vote == 'nextmap':
q = self.mapvote(client, self._value)
if not q:
self.debug('Vote aborted: Cannot vote for maps. mapvote turned out false')
self._vote = None
return
if self._vote == 'kick':
self._kickRequested = self.adminPlugin.findClientPrompt(self._value, client)
if self._kickRequested:
if self._kickRequested.maxLevel >= 20:
client.message('^1ABORTED^7: Cannot vote to kick admin!')
self._vote = None
self._value = None
self._kickRequested = None
return
self._value = self._kickRequested.name
else:
self.debug('could not get the person to kick')
self._vote = None
self._value = None
self._kickRequested = None
return
# Seems like vote is ok. Broadcast to server
self.sendBroadcast()
# Start timer
self.voteTimer()
# Set person who called vote as yes vote
self._amt_yes.insert(0, client)
if len(self._amt_yes) > (len(self._allplayers) / 2):
self.confirmVote()
def cmd_allvotes(self, data, client, cmd=None):
"""\
Show all the votes you are allowed to call
"""
allowed = []
for k in self._aVotes.keys():
if client.maxLevel >= self._aVotes[k]:
allowed.insert(0, k)
if len(allowed) > 0:
p = sorted(allowed)
x = (', ').join(p)
client.message('Allowed votes are: %s' % x)
elif len(allowed) == 0:
client.message('You are not allowed to call any votes')
def cmd_yes(self, data, client, cmd=None):
"""\
Vote yes to the vote in progress
"""
# Check if there is a vote in progress
if not self._vote:
client.message('No vote in progress')
return
# Check if player is allowed to vote
if client not in self._allplayers:
client.message('Sorry, you cannot enter current vote')
return
# Check if the player already voted. If not, register vote
if client in self._amt_yes or client in self._amt_no:
client.message('Are you drunk? You already voted!')
return
elif client not in self._amt_yes or client not in self._amt_no:
self._amt_yes.insert(0, client)
# Let player know that vote is registered
client.message('^3Your vote has been entered')
# Check if majority of players voted already
vYes = len(self._amt_yes)
vPass = len(self._allplayers) / 2
if vYes > vPass:
self.confirmVote()
def cmd_no(self, data, client=None, cmd=None):
"""\
Vote NO to the current vote
"""
# Check if there is a vote in progress
if not self._vote:
client.message('No vote in progress')
return
# Check if player is allowed to vote
if client not in self._allplayers:
client.message('Sorry, you cannot enter current vote')
return
# Check if the player already voted
if client in self._amt_yes or client in self._amt_no:
client.message('Are you drunk? You already voted!')
return
elif client not in self._amt_yes or client not in self._amt_no:
self._amt_no.insert(0, client)
# Let player know that vote is registered
client.message('^3Your vote has been entered')
# Check if majority of players voted
vNo = len(self._amt_no)
vPass = len(self._allplayers) / 2
if vNo > vPass:
self.denyVote()
def cmd_nextmap(self, data, client=None, cmd=None):
"""\
- list the next map in rotation
"""
if not self.aquireCmdLock2(cmd, client, 60, True):
client.message('^7Do not spam commands')
return
if self._mapRequested:
cmd.sayLoudOrPM(client, '^7Next Map: ^2%s' % self._mapRequested.title())
return
mapname = self.console.getNextMap()
if mapname:
cmd.sayLoudOrPM(client, '^7Next Map: ^2%s' % mapname)
else:
client.message('^1Error:^7 could not get map list')
def cmd_maprotate(self, data, client, cmd=None):
"""\
Cycle to next map in rotation
"""
if self._mapRequested:
self.confirmMap()
else:
self.console.rotateMap()
def cmd_veto(self, data, client, cmd=None):
"""\
Cancel a vote in progress
"""
if self._vote:
client.message('^3Vote canceled')
self.denyVote()
elif not self._vote:
client.message('^3No vote in progress')
| 37.081456
| 139
| 0.550804
| 2,445
| 21,396
| 4.736196
| 0.180368
| 0.026943
| 0.021762
| 0.01209
| 0.328756
| 0.260708
| 0.212263
| 0.181865
| 0.181865
| 0.165285
| 0
| 0.012919
| 0.33796
| 21,396
| 576
| 140
| 37.145833
| 0.804589
| 0.155403
| 0
| 0.403341
| 0
| 0
| 0.17158
| 0.002395
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071599
| false
| 0.016706
| 0.00716
| 0
| 0.195704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538622f0e20beb2e31f0c54850a3e278464da569
| 1,323
|
py
|
Python
|
indian-flag.py
|
aditya270520/indian-flag
|
65851eefdd229cca150d2bbe8fa61c9e06e120e0
|
[
"MIT"
] | null | null | null |
indian-flag.py
|
aditya270520/indian-flag
|
65851eefdd229cca150d2bbe8fa61c9e06e120e0
|
[
"MIT"
] | null | null | null |
indian-flag.py
|
aditya270520/indian-flag
|
65851eefdd229cca150d2bbe8fa61c9e06e120e0
|
[
"MIT"
] | null | null | null |
import turtle
turtle.bgcolor('black')
wn=turtle.Screen()
tr=turtle.Turtle()
move=1
tr.speed("fastest")
for i in range (360):
tr.write("ADITYA",'false','center',font=('Showcard gothic',50))
tr.penup()
tr.goto(-200,100)
tr.pendown()
tr.color("orange")
tr.right(move)
tr.forward(100)
tr.penup()
tr.color("white")
tr.pendown()
tr.right(30)
tr.forward(60)
tr.pendown()
tr.color("light green")
tr.left(10)
tr.forward(50)
tr.right(70)
tr.penup()
tr.pendown()
tr.color('light blue')
tr.forward(50)
tr.color('light green')
tr.pu()
tr.pd()
tr.color("light blue")
tr.forward(100)
tr.color('brown')
tr.forward(200)
tr.pu()
tr.pd()
tr.color('light green')
tr.circle(2)
tr.color('light blue')
tr.circle(4)
tr.pu()
tr.fd(20)
tr.pd()
tr.circle(6)
tr.pu()
tr.fd(40)
tr.pd()
tr.circle(8)
tr.pu()
tr.fd(80)
tr.pd()
tr.circle(10)
tr.pu()
tr.fd(120)
tr.pd()
tr.circle(20)
tr.color('yellow')
tr.circle(10)
tr.pu()
tr.pd()
tr.color('white')
tr.forward(150)
tr.color('red')
tr.fd(50)
tr.color ('blue')
tr.begin_fill()
tr.penup()
tr.home()
move=move+1
tr.penup()
tr.forward(50)
turtle.done()
| 17.64
| 67
| 0.543462
| 207
| 1,323
| 3.468599
| 0.299517
| 0.126741
| 0.058496
| 0.044568
| 0.275766
| 0.165738
| 0.05571
| 0
| 0
| 0
| 0
| 0.061162
| 0.258503
| 1,323
| 75
| 68
| 17.64
| 0.670744
| 0
| 0
| 0.465753
| 0
| 0
| 0.106495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013699
| 0
| 0.013699
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5389a92b434b224efc0d211777895516ff271648
| 1,023
|
py
|
Python
|
update_readme.py
|
CalmScout/LeetCode
|
62720934b5906e6b255c7e91d3a6fa1d713e4391
|
[
"MIT"
] | null | null | null |
update_readme.py
|
CalmScout/LeetCode
|
62720934b5906e6b255c7e91d3a6fa1d713e4391
|
[
"MIT"
] | null | null | null |
update_readme.py
|
CalmScout/LeetCode
|
62720934b5906e6b255c7e91d3a6fa1d713e4391
|
[
"MIT"
] | null | null | null |
"""
Script updates `README.md` with respect to files at ./easy and ./medium folders.
"""
import os
curr_dir = os.path.dirname(__file__)
with open(os.path.join(curr_dir, "README.md"), 'w') as readme:
readme.write("# LeetCode\nDeliberate practice in coding.\n")
langs = [l for l in os.listdir(curr_dir) if os.path.isdir(os.path.join(curr_dir, l)) and l[0] != '.']
for lang in langs:
readme.write("## {}\n".format(lang))
readme.write("### Easy\n")
easy = sorted(os.listdir(f"{curr_dir}/{lang}/easy"))
easy = [x.split("_")[0] for x in easy]
easy_solved = ""
for el in easy:
easy_solved += "{}, ".format(el)
readme.write(easy_solved[:-2] + "\n")
readme.write("### Medium\n")
medium = sorted(os.listdir(f"{curr_dir}/{lang}/medium"))
medium = [x.split("_")[0] for x in medium]
medium_solved = ""
for el in medium:
medium_solved += "{}, ".format(el)
readme.write(medium_solved[:-2] + '\n')
| 39.346154
| 105
| 0.572825
| 146
| 1,023
| 3.890411
| 0.321918
| 0.073944
| 0.035211
| 0.049296
| 0.288732
| 0.140845
| 0.09507
| 0
| 0
| 0
| 0
| 0.00641
| 0.237537
| 1,023
| 25
| 106
| 40.92
| 0.721795
| 0.078201
| 0
| 0
| 0
| 0
| 0.154011
| 0.049198
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.047619
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|