hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
3b7f19efe5226324127b16d1d9afc2df6edb7254
1,891
py
Python
list_2d_2.py
min-xu-ai/py_perf
ba9f07eefc8031a34fe77f19fc6be19d08344bff
[ "MIT" ]
null
null
null
list_2d_2.py
min-xu-ai/py_perf
ba9f07eefc8031a34fe77f19fc6be19d08344bff
[ "MIT" ]
null
null
null
list_2d_2.py
min-xu-ai/py_perf
ba9f07eefc8031a34fe77f19fc6be19d08344bff
[ "MIT" ]
null
null
null
#!/usr/bin/env pypy3 ''' Testing 2D list (list of lists) data structure. ''' import time import random from lib import benchmark, random_tuple g_list = [] g_size = 0 g_count = 0 g_get_keys = [] g_set_keys = [] def setup(size, density): ''' Populated the table. :param int size: total entries :param float density: (0,1] value for how many entries to add. ''' assert size > 0, size assert density > 0 and density <= 1, density global g_list global g_size global g_count g_list = [[None]*size for _ in range(size)] count = size * size * 1.0 * density // 1 g_size = size g_count = count i = 0 while i < count: idx = random.randint(0, size*size-1) x, y = (idx // size, idx % size) if g_list[x][y] is None: g_list[x][y] = random_tuple() i += 1 global g_get_keys for i in range(1000000): idx = random.randint(0, size*size-1) g_get_keys.append((idx // size, idx % size)) global g_set_keys g_set_keys = g_get_keys def get(): ''' Testing getting ''' global g_get_keys global g_size s = time.time() for _x, _y in g_get_keys: if g_list[_x][_y] is not None: x = g_list[_x][_y] return time.time() - s def set(): ''' Testing setting ''' global g_set_keys global g_size tmp = [1,2,3,4,5] s = time.time() for _x, _y in g_set_keys: if g_list[_x][_y] is not None: last = g_list[_x][_y] g_list[_x][_y] = tmp tmp = last return time.time() - s def scan(): global g_list s = time.time() for x in g_list: for i in x: if i is not None: _ = i[0] return time.time() - s def main(): setup(700, 0.7) benchmark(get) benchmark(set) benchmark(scan) if __name__ == "__main__": main()
21.735632
66
0.561079
301
1,891
3.299003
0.249169
0.060423
0.042296
0.049345
0.209466
0.141994
0.130916
0.07855
0.04431
0
0
0.027929
0.31835
1,891
86
67
21.988372
0.742436
0.114754
0
0.292308
0
0
0.004893
0
0
0
0
0
0.030769
1
0.076923
false
0
0.046154
0
0.169231
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3b7f2b6e0d9ea9418bfa786631467a10dace678f
10,622
py
Python
src/stepfunctions/inputs/placeholders.py
ParidelPooya/aws-step-functions-data-science-sdk-python
173b4635d8fb3ce569515bcfb6fee1d5a2c29b63
[ "Apache-2.0" ]
211
2019-11-07T17:56:56.000Z
2022-03-23T03:04:43.000Z
src/stepfunctions/inputs/placeholders.py
ParidelPooya/aws-step-functions-data-science-sdk-python
173b4635d8fb3ce569515bcfb6fee1d5a2c29b63
[ "Apache-2.0" ]
179
2019-11-08T00:47:08.000Z
2022-03-10T03:03:37.000Z
src/stepfunctions/inputs/placeholders.py
ParidelPooya/aws-step-functions-data-science-sdk-python
173b4635d8fb3ce569515bcfb6fee1d5a2c29b63
[ "Apache-2.0" ]
86
2019-11-20T12:59:03.000Z
2022-03-23T03:04:47.000Z
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. from __future__ import absolute_import import collections import json from stepfunctions.inputs.utils import flatten, replace_type_with_str ValidationResult = collections.namedtuple('ValidationResult', 'valid keys_missing keys_type_mismatch') class Placeholder(object): """ A collection of Placeholder variables. """ def __init__(self, schema=None, **kwargs): """ Args: schema (dict, optional): Schema for the placeholder collection. (default: None) Example below:: { 'ModelName': str, 'JobName': str, 'Hyperparameters': { 'tol': float } } Keyword Args: name (str, optional): Name of the placeholder variable. (default: None) type (type, optional): Type of the placeholder variable. (default: None) parent (Placeholder, optional): Parent variable for a placeholder variable. (default: None) """ self.store = {} self.immutable = False self.schema = schema if self.schema: self._set_schema(schema) self._make_immutable() self.json_str_template = "{}" self.name = kwargs.get("name") self.type = kwargs.get("type") self.parent = kwargs.get("parent") def get(self, name, type): """ Create a placeholder variable with an associated type. Args: name (str): Name of the placeholder variable. type (type): Type of the placeholder variable. Raises: ValueError: If placeholder variable with the same name but different type already exists. ValueError: If placeholder variable does not fit into a previously specified schema for the placeholder collection. Returns: Placeholder: Placeholder variable. """ if not self._is_valid_name(name): raise ValueError('Key name can only be string or integer') if name in self.store: curr_variable = self.store[name] if curr_variable.type != type: raise ValueError('Key already exists with a different value type: {current_value_type}'.format(current_value_type=curr_variable.type)) return curr_variable else: self.store[name] = self._create_variable(name=name, parent=self, type=type) return self.store[name] def get_schema_as_dict(self): """ Generate a schema for the placeholder collection as a Python dictionary. Returns: dict: Placeholder collection schema. """ schema = {} for k, v in self.store.items(): if v._is_empty(): schema[k] = v.type or str else: schema[k] = v.get_schema_as_dict() return schema def get_schema_as_json(self, pretty=False): """ Generate a schema for the placeholder collection as a JSON formatted string. Args: pretty (bool, optional): Boolean flag set to `True` if JSON string should be prettified. `False`, otherwise. (default: False) Returns: str: JSON formatted string representation of the block. """ dict_schema_str = replace_type_with_str(self.get_schema_as_dict()) if pretty: return json.dumps(dict_schema_str, indent=4) return json.dumps(dict_schema_str) def contains(self, placeholder): """ Check if the placeholder collection contains the specified placeholder variable. Args: placeholder (Placeholder): Placeholder variable to search for, in the collection. Returns: bool: `True` if placeholder variable was found in the collection. `False`, otherwise. """ for k, v in self.store.items(): if placeholder == v: return True elif v.contains(placeholder): return True return False def __contains__(self, placeholder): """ Containment check operator for placeholder variables. """ return self.contains(placeholder) def validate(self, input): """ Validate a specified input against the placeholder collection schema. Args: input (dict): Input to validate against the placeholder collection schema. Returns: ValidationResult: Named tuple with the keys: `valid` (Boolean): Representing the result of validation , `keys_missing` (list(str)): List of keys missing in the input , `keys_type_mismatch` (list(str), type, type): List of tuples with key name, expected type, and provided type. """ if input is None: return False, None, None flattened_schema = flatten(self.get_schema_as_dict()) flattened_input = flatten(input) keys_missing = [i for i in flattened_schema if i not in flattened_input] keys_type_mismatch = [] for k, v in flattened_input.items(): if k in flattened_schema and not isinstance(v, flattened_schema.get(k)): keys_type_mismatch.append((k, flattened_schema.get(k), type(v))) if len(keys_missing) > 0 or len(keys_type_mismatch) > 0: valid = False else: valid = True return ValidationResult(valid=valid, keys_missing=keys_missing, keys_type_mismatch=keys_type_mismatch) def _create_variable(self, name, parent, type=None): raise NotImplementedError def _get_path(self): """ Get path to a placeholder variable node in the collection. """ path = [] node = self while node.name is not None: path.append(node.name) node = node.parent path.reverse() return path def _is_empty(self): """ Check if the store for a placeholder collection/variable is empty. """ return len(self.store) == 0 def _set_schema(self, schema, path=[]): """ Set the schema for a placeholder collection. """ for k, v in schema.items(): if isinstance(v, dict): self._set_schema(v, path + [k]) else: current = self for node in path: current = current.get(node, dict) temp = current.get(k, v) def _make_immutable(self): """ Make a placeholder collection (including all variables contained) immutable. """ for k, v in self.store.items(): if isinstance(v, Placeholder): v._make_immutable() self.immutable = True def _is_valid_name(self, name): if isinstance(name, str) or isinstance(name, int): return True else: return False def __getitem__(self, name): """ Subscript operator to build placeholder variables. """ if not self._is_valid_name(name): raise ValueError('Key name can only be string or integer') if name in self.store: return self.store[name] else: self.store[name] = self._create_variable(name=name, parent=self) return self.store[name] def _join_path(self, path): subscript_list = [] for i in path: if isinstance(i, str): subscript_list.append("['{}']".format(i)) elif isinstance(i, int): subscript_list.append('[{}]'.format(i)) return "".join(subscript_list) def to_jsonpath(self): """ Returns a JSON path representation of the placeholder variable to be used for step parameters. Returns: str: JSON path representation of the placeholder variable """ return self.json_str_template.format(self._join_path(self._get_path())) class ExecutionInput(Placeholder): """ Top-level class for execution input placeholders. """ def __init__(self, schema=None, **kwargs): super(ExecutionInput, self).__init__(schema, **kwargs) self.json_str_template = '$$.Execution.Input{}' def _create_variable(self, name, parent, type=None): """ Creates a placeholder variable for Workflow Input. A placeholder variable can only be created if the collection is not immutable due to a pre-specified schema. """ if self.immutable: raise ValueError("Placeholder variable does not conform to schema set for the placeholder collection.") if type: return ExecutionInput(name=name, parent=parent, type=type) else: return ExecutionInput(name=name, parent=parent) class StepInput(Placeholder): """ Top-level class for step input placeholders. """ def __init__(self, schema=None, **kwargs): super(StepInput, self).__init__(schema, **kwargs) self.json_str_template = '${}' def _create_variable(self, name, parent, type=None): """ Creates a placeholder variable for Step Input. A placeholder variable can only be created if the collection is not immutable due to a pre-specified schema. """ if self.immutable: raise ValueError("Placeholder variable does not conform to schema set for the placeholder collection.") if type: return StepInput(name=name, parent=parent, type=type) else: return StepInput(name=name, parent=parent)
36.129252
150
0.588119
1,194
10,622
5.10469
0.183417
0.065463
0.035439
0.026579
0.337818
0.264643
0.225431
0.210336
0.16735
0.136505
0
0.001685
0.329505
10,622
293
151
36.25256
0.854114
0.365468
0
0.272059
0
0
0.069477
0
0
0
0
0
0
1
0.147059
false
0
0.029412
0
0.367647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3b8031ca25667feb25f8274399a41253e2becc80
1,177
py
Python
src/mrack/transformers/static.py
dav-pascual/mrack
f31b4ef1f1f847c3e95567ec012323be65a1e177
[ "Apache-2.0" ]
2
2021-05-26T15:57:13.000Z
2021-08-21T02:14:01.000Z
src/mrack/transformers/static.py
dav-pascual/mrack
f31b4ef1f1f847c3e95567ec012323be65a1e177
[ "Apache-2.0" ]
81
2020-10-02T08:30:56.000Z
2022-03-31T11:47:41.000Z
src/mrack/transformers/static.py
dav-pascual/mrack
f31b4ef1f1f847c3e95567ec012323be65a1e177
[ "Apache-2.0" ]
7
2020-10-02T08:13:57.000Z
2022-03-31T11:22:53.000Z
# Copyright 2020 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Static transformer module.""" import typing from copy import deepcopy from mrack.transformers.transformer import Transformer CONFIG_KEY = "static" class StaticTransformer(Transformer): """ Static transformer. Does almost no operation as there is nothing to provision. """ _config_key = CONFIG_KEY _required_config_attrs: typing.List[str] = [] _required_host_attrs = ["name", "os", "group", "ip"] def create_host_requirement(self, host): """Create single input for Static provisioner.""" self.dsp_name = "Static" return deepcopy(host)
30.179487
74
0.725573
160
1,177
5.25625
0.63125
0.071344
0.030916
0.03805
0
0
0
0
0
0
0
0.008386
0.189465
1,177
38
75
30.973684
0.873166
0.595582
0
0
0
0
0.057604
0
0
0
0
0
0
1
0.090909
false
0
0.272727
0
0.818182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3b809ed3a1a8eaaeae4a3ce0d4bf8bbbdbc290f8
7,421
py
Python
trieste/utils/misc.py
SomeoneSerge/trieste
a160d2400a2dc092cac599554d32217840c06e3d
[ "Apache-2.0" ]
null
null
null
trieste/utils/misc.py
SomeoneSerge/trieste
a160d2400a2dc092cac599554d32217840c06e3d
[ "Apache-2.0" ]
null
null
null
trieste/utils/misc.py
SomeoneSerge/trieste
a160d2400a2dc092cac599554d32217840c06e3d
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 The Trieste Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from abc import ABC, abstractmethod from time import perf_counter from types import TracebackType from typing import Any, Callable, Generic, Mapping, NoReturn, Optional, Tuple, Type, TypeVar import numpy as np import tensorflow as tf from typing_extensions import Final, final from ..types import TensorType C = TypeVar("C", bound=Callable[..., object]) """ A type variable bound to `typing.Callable`. """ def jit(apply: bool = True, **optimize_kwargs: Any) -> Callable[[C], C]: """ A decorator that conditionally wraps a function with `tf.function`. :param apply: If `True`, the decorator is equivalent to `tf.function`. If `False`, the decorator does nothing. :param optimize_kwargs: Additional arguments to `tf.function`. :return: The decorator. """ def decorator(func: C) -> C: return tf.function(func, **optimize_kwargs) if apply else func return decorator def shapes_equal(this: TensorType, that: TensorType) -> TensorType: """ Return a scalar tensor containing: `True` if ``this`` and ``that`` have equal runtime shapes, else `False`. """ return tf.rank(this) == tf.rank(that) and tf.reduce_all(tf.shape(this) == tf.shape(that)) def to_numpy(t: TensorType) -> np.ndarray: """ :param t: An array-like object. :return: ``t`` as a NumPy array. """ if isinstance(t, tf.Tensor): return t.numpy() return t ResultType = TypeVar("ResultType", covariant=True) """ An unbounded covariant type variable. """ class Result(Generic[ResultType], ABC): """ Represents the result of an operation that can fail with an exception. It contains either the operation return value (in an :class:`Ok`), or the exception raised (in an :class:`Err`). To check whether instances such as >>> res = Ok(1) >>> other_res = Err(ValueError("whoops")) contain a value, use :attr:`is_ok` (or :attr:`is_err`) >>> res.is_ok True >>> other_res.is_ok False We can access the value if it :attr:`is_ok` using :meth:`unwrap`. >>> res.unwrap() 1 Trying to access the value of a failed :class:`Result`, or :class:`Err`, will raise the wrapped exception >>> other_res.unwrap() Traceback (most recent call last): ... ValueError: whoops **Note:** This class is not intended to be subclassed other than by :class:`Ok` and :class:`Err`. """ @property @abstractmethod def is_ok(self) -> bool: """`True` if this :class:`Result` contains a value, else `False`.""" @property def is_err(self) -> bool: """ `True` if this :class:`Result` contains an error, else `False`. The opposite of :attr:`is_ok`. """ return not self.is_ok @abstractmethod def unwrap(self) -> ResultType: """ :return: The contained value, if it exists. :raise Exception: If there is no contained value. """ @final class Ok(Result[ResultType]): """Wraps the result of a successful evaluation.""" def __init__(self, value: ResultType): """ :param value: The result of a successful evaluation. """ self._value = value def __repr__(self) -> str: """""" return f"Ok({self._value!r})" @property def is_ok(self) -> bool: """`True` always.""" return True def unwrap(self) -> ResultType: """ :return: The wrapped value. """ return self._value @final class Err(Result[NoReturn]): """Wraps the exception that occurred during a failed evaluation.""" def __init__(self, exc: Exception): """ :param exc: The exception that occurred. """ self._exc = exc def __repr__(self) -> str: """""" return f"Err({self._exc!r})" @property def is_ok(self) -> bool: """`False` always.""" return False def unwrap(self) -> NoReturn: """ :raise Exception: Always. Raises the wrapped exception. """ raise self._exc class DEFAULTS: """Default constants used in Trieste.""" JITTER: Final[float] = 1e-6 """ The default jitter, typically used to stabilise computations near singular points, such as in Cholesky decomposition. """ K = TypeVar("K") """ An unbound type variable. """ U = TypeVar("U") """ An unbound type variable. """ V = TypeVar("V") """ An unbound type variable. """ def map_values(f: Callable[[U], V], mapping: Mapping[K, U]) -> Mapping[K, V]: """ Apply ``f`` to each value in ``mapping`` and return the result. If ``f`` does not modify its argument, :func:`map_values` does not modify ``mapping``. For example: >>> import math >>> squares = {'a': 1, 'b': 4, 'c': 9} >>> map_values(math.sqrt, squares)['b'] 2.0 >>> squares {'a': 1, 'b': 4, 'c': 9} :param f: The function to apply to the values in ``mapping``. :param mapping: A mapping. :return: A new mapping, whose keys are the same as ``mapping``, and values are the result of applying ``f`` to each value in ``mapping``. """ return {k: f(u) for k, u in mapping.items()} class Timer: """ Functionality for timing chunks of code. For example: >>> from time import sleep >>> with Timer() as timer: sleep(2.0) >>> timer.time # doctest: +SKIP 2.0 """ def __enter__(self) -> Timer: self.start = perf_counter() return self def __exit__( self, type: Optional[Type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: self.end = perf_counter() self.time = self.end - self.start def flatten_leading_dims(x: TensorType) -> Tuple[TensorType, Callable[[TensorType], TensorType]]: """ Flattens the leading dimensions of `x` (all but the last two dimensions), and returns a function that can be used to restore them (typically after first manipulating the flattened tensor). """ x_batched_shape = tf.shape(x) batch_shape = x_batched_shape[:-1] input_shape = x_batched_shape[-1:] x_flat_shape = tf.concat([[-1], input_shape], axis=0) def unflatten(y: TensorType) -> TensorType: tf.debugging.assert_rank(y, 2, message="unflatten is expecting a rank two tensor.") y_flat_shape = tf.shape(y) output_shape = y_flat_shape[1:] y_batched_shape = tf.concat([batch_shape, output_shape], axis=0) y_batched = tf.reshape(y, y_batched_shape) tf.debugging.assert_shapes([(y, ["N", "D"]), (y_batched, [..., "M", "D"])]) return y_batched return tf.reshape(x, x_flat_shape), unflatten
28.43295
100
0.62377
979
7,421
4.634321
0.292135
0.007935
0.009698
0.007274
0.090148
0.081772
0.032621
0.01631
0
0
0
0.005554
0.24781
7,421
260
101
28.542308
0.807238
0.44401
0
0.176471
0
0
0.029021
0
0
0
0
0
0.023529
1
0.235294
false
0
0.105882
0.011765
0.588235
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
3b816baf5eaa46bd1b527f1e92fb14dd928f8b46
1,185
py
Python
data/states/splash.py
andarms/pyweek20
79a5ac58c3ca06be61e5a05af0abd78a8c79e8df
[ "MIT" ]
null
null
null
data/states/splash.py
andarms/pyweek20
79a5ac58c3ca06be61e5a05af0abd78a8c79e8df
[ "MIT" ]
null
null
null
data/states/splash.py
andarms/pyweek20
79a5ac58c3ca06be61e5a05af0abd78a8c79e8df
[ "MIT" ]
null
null
null
import pygame as pg import state from .. import util class SplashState(state._State): def __init__(self): super(SplashState, self).__init__() self.bg_color = (0,0,0) self.text_color = (155,255,155) self.duration = 3 #seg self.image = pg.Surface(util.SCREEN_SIZE) self.next = "MainMenu" self.title = "HackerMan" self.titleSurface = self.make_title_surface() def start(self, data, current_time): super(SplashState, self).start(data, current_time) self.duration = 3 def make_title_surface(self): font = pg.font.Font(util.FONTS['west-england.regular'], 40) return font.render(self.title, False, self.text_color) def handle_events(self, event): if event.type == pg.KEYDOWN: if event.key == pg.K_RETURN: self.done = True def update(self, dt, current_time, keys): self.duration -= dt if self.duration <= 0: self.done = True def render(self, surface): self.image.fill(self.bg_color) self.image.blit(self.titleSurface, util.SCREEN_RECT.center) surface.blit(self.image, (0,0))
28.214286
67
0.616034
156
1,185
4.525641
0.397436
0.067989
0.056657
0.042493
0
0
0
0
0
0
0
0.021789
0.264135
1,185
41
68
28.902439
0.787844
0.002532
0
0.129032
0
0
0.031409
0
0
0
0
0
0
1
0.193548
false
0
0.096774
0
0.354839
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3b81d15b16f3a5f6338f6f8f45d8d313e95a4a7d
11,735
py
Python
froi/algorithm/array2qimage.py
zhouguangfu/FreeROI
0605c2a0fe2457e3703a4a7548299fc2c1e9aca0
[ "BSD-3-Clause" ]
null
null
null
froi/algorithm/array2qimage.py
zhouguangfu/FreeROI
0605c2a0fe2457e3703a4a7548299fc2c1e9aca0
[ "BSD-3-Clause" ]
null
null
null
froi/algorithm/array2qimage.py
zhouguangfu/FreeROI
0605c2a0fe2457e3703a4a7548299fc2c1e9aca0
[ "BSD-3-Clause" ]
null
null
null
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Some basic functions for image construction.""" import sys as _sys import numpy as _np from PyQt4 import QtGui as _qt from qimageview import qimageview as _qimageview if _sys.byteorder == 'little': _bgra = (0, 1, 2, 3) else: _bgra = (3, 2, 1, 0) bgra_dtype = _np.dtype({'b': (_np.uint8, _bgra[0], 'blue'), 'g': (_np.uint8, _bgra[1], 'green'), 'r': (_np.uint8, _bgra[2], 'red'), 'a': (_np.uint8, _bgra[3], 'alpha')}) def gray(array, alpha): """Return a rgba array which color ranges from black to white.""" h, w = array.shape new_array = _np.zeros((h, w, 4), dtype=_np.uint8) array[array<=0] = 0 array[array>255] = 255 new_array[..., 0] = array new_array[..., 1] = array new_array[..., 2] = array new_array[..., 3] = alpha * array.clip(0, 1) return new_array def red2yellow(array, alpha): """Return a rgba array which color ranges from red to yellow.""" h, w = array.shape new_array = _np.zeros((h, w, 4), dtype=_np.uint8) array[array<=0] = 0 array[array>255] = 255 new_array[..., 0] = 255 * array.clip(0, 1) new_array[..., 1] = array new_array[..., 2] = 0 new_array[..., 3] = alpha * array.clip(0, 1) return new_array def blue2cyanblue(array, alpha): """Return a rgba array which color ranges from blue to cyanblue.""" h, w = array.shape new_array = _np.zeros((h, w, 4), dtype=_np.uint8) array[array<=0] = 0 array[array>255] = 255 new_array[..., 0] = 0 new_array[..., 1] = array new_array[..., 2] = 255 * array.clip(0, 1) new_array[..., 3] = alpha * array.clip(0, 1) return new_array def red(array, alpha): """Return a whole red rgba array.""" h, w = array.shape new_array = _np.zeros((h, w, 4), dtype=_np.uint8) new_array[..., 0] = 255 * array.clip(0, 1) new_array[..., 1] = 0 new_array[..., 2] = 0 new_array[..., 3] = alpha * array.clip(0, 1) return new_array def green(array, alpha): """Return a whole green rgba array.""" h, w = array.shape new_array = _np.zeros((h, w, 4), dtype=_np.uint8) new_array[..., 0] = 0 new_array[..., 1] = 255 * array.clip(0, 1) new_array[..., 2] = 0 new_array[..., 3] = alpha * array.clip(0, 1) return new_array def blue(array, alpha): """Return a whole blue rgba array.""" h, w = array.shape new_array = _np.zeros((h, w, 4), dtype=_np.uint8) new_array[..., 0] = 0 new_array[..., 1] = 0 new_array[..., 2] = 255 * array.clip(0, 1) new_array[..., 3] = alpha * array.clip(0, 1) return new_array def single_roi(array, alpha, roi): """Return a single roi view array.""" color = (70, 70, 70) h, w = array.shape new_array = _np.zeros((h, w, 4), dtype=_np.uint8) if roi is None or roi == 0: return new_array mask = array == roi new_array[mask, 0] = color[0] new_array[mask, 1] = color[1] new_array[mask, 2] = color[2] new_array[mask, 3] = alpha return new_array def _normalize255(array, normalize, scale_length=255.0): """Normalize the array.""" if not normalize: return array if normalize is True: normalize = array.min(), array.max() elif _np.isscalar(normalize): normalize = (0, normalize) elif isinstance(normalize, tuple) and (normalize[0] == normalize[1]): normalize = array.min(), array.max() nmin, nmax = normalize if nmin: array = array - nmin if nmax == nmin: return _np.round(array) else: scale = scale_length / (nmax - nmin) if scale != 1.0: array = array * scale array[_np.logical_and(array > 0, array < 1)] = 1 return _np.round(array) def gray2qimage(array, normalize=False): """Convert a 2D numpy array 'array' into a 8-bit, indexed QImage with a specific colormap. The first dimension represents the vertical image axis. The parameter 'normalize' can be used to normalize an image's value range to 0 ~ 255: normalize = (nmin, nmax): scale & clip image values from nmin..nmax to 0..255 normalize = nmax: lets nmin default to zero, i.e. scale & clip the range 0..nmax to 0..255 normalize = True: scale image values to 0..255 (same as passing (array.min(), array.max())) If the source array 'array' contains masked values, the result will have only 255 shades of gray, and one color map entry will be used to make the corresponding pixels transparent. """ if _np.ndim(array) != 2: raise ValueError("gray2qimage can only convert 2D arrays") h, w = array.shape result = _qt.QImage(w, h, _qt.QImage.Format_Indexed8) array = _normalize255(array, normalize) for i in range(256): result.setColor(i, _qt.qRgb(i, i, i)) _qimageview(result)[:] = array.clip(0, 255) return result def byte_view(qimage, byteorder = 'little'): """Return the bytes in the view with the given byteorder.""" raw = _qimageview(qimage) result = raw.view(_np.uint8).reshape(raw.shape + (-1, )) if byteorder and byteorder != _sys.byteorder: result = result[...,::-1] return result def rgb_view(qimage, byteorder='big'): """Return the rgb value array in view.""" if byteorder is None: byteorder = _sys.byteorder bytes = byte_view(qimage, byteorder) if bytes.shape[2] != 4: raise ValueError, "For rgb_view, the image must have 32 bit pixel" + \ " size (use RGB32, ARGB32, or ARGB32_Premultiplied)" if byteorder == 'little': return bytes[..., :3] else: return bytes[..., 1:] def alpha_view(qimage): """Return the alpha value array in view.""" bytes = byte_view(qimage, byteorder = None) if bytes.shape[2] != 4: raise ValueError, "For alpha_view, the image must have 32 bit pixel" + \ " size (use RGB32, ARGB32, or ARGB32_Premultiplied)" return bytes[..., _bgra[3]] def array2qrgba(array, alpha, colormap, normalize=False, roi=None): """Convert a 2D-array into a 3D-array containing rgba value.""" if _np.ndim(array) != 2: raise ValueError("array2qrgb can only convert 2D array") if isinstance(colormap, str): if colormap != 'rainbow': if colormap != 'single ROI': array = _normalize255(array, normalize) if colormap == 'gray': new_array = gray(array, alpha) elif colormap == 'red2yellow': new_array = red2yellow(array, alpha) elif colormap == 'blue2cyanblue': new_array = blue2cyanblue(array, alpha) elif colormap == 'red': new_array = red(array, alpha) elif colormap == 'green': new_array = green(array, alpha) elif colormap == 'blue': new_array = blue(array, alpha) else: new_array = single_roi(array, alpha, roi) else: if _np.isscalar(normalize): new_array = array.clip(0, array.max()) new_array[array < 0] = 0 new_array[array > normalize] = 0 elif isinstance(normalize, tuple): new_array = array.clip(0, array.max()) new_array[array < normalize[0]] = 0 new_array[array > normalize[1]] = 0 else: new_array = array.clip(0, array.max()) new_array[array < 0] = 0 h, w = new_array.shape R, G, B = 41, 61, 83 fst_norm = 100000.0 new_array_raw = _normalize255(new_array, normalize, scale_length=fst_norm) new_array_R = _normalize255(new_array_raw % R, (0, R), scale_length=254.0) new_array_G = _normalize255(new_array_raw % G, (0, G), scale_length=254.0) new_array_B = _normalize255(new_array_raw % B, (0, B), scale_length=254.0) new_array2 = _np.zeros((h, w, 4), dtype=_np.uint8) add_ = new_array.clip(0, 1) new_array2[..., 0] = new_array_R + add_ new_array2[..., 1] = new_array_G + add_ new_array2[..., 2] = new_array_B + add_ new_array2[..., 3] = alpha * _np.sum(new_array2, 2).clip(0, 1) #_np.set_printoptions(threshold=1000000) new_array = new_array2 else: if _np.isscalar(normalize): new_array = array.clip(0, array.max()) new_array[array < 0] = 0 new_array[array > normalize] = 0 elif isinstance(normalize, tuple): new_array = array.clip(0, array.max()) new_array[array < normalize[0]] = 0 new_array[array > normalize[1]] = 0 else: new_array = array.clip(0, array.max()) new_array[array < 0] = 0 values = colormap.keys() values = [int(item) for item in values] h, w = new_array.shape new_array2 = _np.zeros((h, w, 4), dtype=_np.uint8) for item in values: new_array2[new_array==item] = [colormap[item][0], colormap[item][1], colormap[item][2], 0] new_array2[..., 3] = alpha * _np.sum(new_array2, 2).clip(0, 1) new_array = new_array2 return new_array def qcomposition(array_list): """Composite several qrgba arrays into one.""" if not len(array_list): raise ValueError('Input array list cannot be empty.') if _np.ndim(array_list[0]) != 3: raise ValueError('RGBA array must be 3D.') h, w, channel = array_list[0].shape result = _np.array(array_list[0][..., :3], dtype=_np.int64) for index in range(1, len(array_list)): item = _np.array(array_list[index], dtype=_np.int64) alpha_array = _np.tile(item[..., -1].reshape((-1, 1)), (1, 1, 3)) alpha_array = alpha_array.reshape((h, w, 3)) result = item[..., :3] * alpha_array + result * \ (255 - alpha_array) result = result / 255 result = _np.array(result, dtype=_np.uint8) return result def composition(dest, source): """Save result in place Note ---- The dest is a rgb image, while the source is a rgba image """ alpha = source[...,3].reshape(source.shape[0], source.shape[1], 1).astype(_np.float) alpha /= 255 source_rgb = source[...,:3].astype(_np.float) dest[:] = _np.uint8(source_rgb * alpha + dest.astype(_np.float) * (1 - alpha)) return dest def qrgba2qimage(array): """Convert the input array into a image.""" if _np.ndim(array) != 3: raise ValueError("RGBA array must be 3D.") h, w, channel = array.shape fmt = _qt.QImage.Format_ARGB32 result = _qt.QImage(w, h, fmt) rgb_view(result)[:] = array[..., :3] alpha = alpha_view(result) alpha[:] = 255 return result def null_image(h, w): """Return a whole black rgba array.""" new_array = _np.zeros((h, w, 4), dtype=_np.uint8) new_array[..., 3] = 255 return new_array
34.821958
94
0.55441
1,563
11,735
4.014076
0.140755
0.110934
0.030284
0.021039
0.390022
0.333758
0.326427
0.307778
0.297577
0.297577
0
0.048249
0.311206
11,735
336
95
34.925595
0.727948
0.012612
0
0.439024
0
0
0.044635
0.004232
0
0
0
0
0
0
null
null
0
0.01626
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
3b8212d16b84b98df542810b9eaf9101f2375755
7,084
py
Python
pymeet/COM.py
Melisius/PyMEET
9de470e55b0228b7c4ffed5f0c54bd27d6eba841
[ "MIT" ]
null
null
null
pymeet/COM.py
Melisius/PyMEET
9de470e55b0228b7c4ffed5f0c54bd27d6eba841
[ "MIT" ]
null
null
null
pymeet/COM.py
Melisius/PyMEET
9de470e55b0228b7c4ffed5f0c54bd27d6eba841
[ "MIT" ]
null
null
null
import numpy as np def atom_to_numbers(atom_name, number_property): """ Function that contains useful conversions. """ name2number = {"H": 1, "He": 2, "Li": 3, "Be": 4, "B": 5, "C": 6, "N": 7, "O": 8, "F": 9, "Ne": 10, "Na": 11, "Mg": 12, "Al": 13, "Si": 14, "P": 15, "S": 16, "Cl": 17, "Ar": 18, "K": 19, "Ca": 20, "Sc": 21, "Ti": 22, "V": 23, "Cr": 24, "Mn": 25, "Fe": 26, "Co": 27, "Ni": 28, "Cu": 29, "Zn": 30, "Ga": 31, "Ge": 32, "As": 33, "Se": 34, "Br": 35, "Kr": 36, "Rb": 37, "Sr": 38, "Y": 39, "Zr": 40, "Nb": 41, "Mo": 42, "Tc": 43, "Ru": 44, "Rh": 45, "Pd": 46, "Ag": 47, "Cd": 48, "In": 49, "Sn": 50, "Sb": 51, "Te": 52, "I": 53, "Xe": 54, "Cs": 55, "Ba": 56, "La": 57, "Ce": 58, "Pr": 59, "Nd": 60, "Pm": 61, "Sm": 62, "Eu": 63, "Gd": 64, "Tb": 65, "Dy": 66, "Ho": 67, "Er": 68, "Tm": 69, "Yb": 70, "Lu": 71, "Hf": 72, "Ta": 73, "W": 74, "Re": 75, "Os": 76, "Ir": 77, "Pt": 78, "Au": 79, "Hg": 80, "Tl": 81, "Pb": 82, "Bi": 83, "Po": 84, "At": 85, "Rn": 86, "Fr": 87, "Ra": 88, "Ac": 89, "Th": 90, "Pa": 91, "U": 92, "Np": 93, "Pu": 94, "Am": 95, "Cm": 96, "Bk": 97, "Cf": 98, "Es": 99, "Fm": 100, "Md": 101, "No": 102, "Lr": 103, "Rf": 104, "Db": 105, "Sg": 106, "Bh": 107, "Hs": 108, "Mt": 109, "Ds": 110, "Rg": 111, "Cn": 112, "Nh": 113, "Fl": 114, "Mc": 115, "Lv": 116, "Ts": 117, "Og": 118} number2name = {1: "H", 2: "He", 3: "Li", 4: "Be", 5: "B", 6: "C", 7: "N", 8: "O", 9: "F", 10: "Ne", 11: "Na", 12: "Mg", 13: "Al", 14: "Si", 15: "P", 16: "S", 17: "Cl", 18: "Ar", 19: "K", 20: "Ca", 21: "Sc", 22: "Ti", 23: "V", 24: "Cr", 25: "Mn", 26: "Fe", 27: "Co", 28: "Ni", 29: "Cu", 30: "Zn", 31: "Ga", 32: "Ge", 33: "As", 34: "Se", 35: "Br", 36: "Kr", 37: "Rb", 38: "Sr", 39: "Y", 40: "Zr", 41: "Nb", 42: "Mo", 43: "Tc", 44: "Ru", 45: "Rh", 46: "Pd", 47: "Ag", 48: "Cd", 49: "In", 50: "Sn", 51: "Sb", 52: "Te", 53: "I", 54: "Xe", 55: "Cs", 56: "Ba", 57: "La", 58: "Ce", 59: "Pr", 60: "Nd", 61: "Pm", 62: "Sm", 63: "Eu", 64: "Gd", 65: "Tb", 66: "Dy", 67: "Ho", 68: "Er", 69: "Tm", 70: "Yb", 71: "Lu", 72: "Hf", 73: "Ta", 74: "W", 75: "Re", 76: "Os", 77: "Ir", 78: "Pt", 79: "Au", 80: "Hg", 81: "Tl", 82: "Pb", 83: "Bi", 84: "Po", 85: "At", 86: "Rn", 87: "Fr", 88: "Ra", 89: "Ac", 90: "Th", 91: "Pa", 92: "U", 93: "Np", 94: "Pu", 95: "Am", 96: "Cm", 97: "Bk", 98: "Cf", 99: "Es", 100: "Fm", 101: "Md", 102: "No", 103: "Lr", 104: "Rf", 105: "Db", 106: "Sg", 107: "Bh", 108: "Hs", 109: "Mt", 110: "Ds", 111: "Rg", 112: "Cn", 113: "Nh", 114: "Fl", 115: "Mc", 116: "Lv", 117: "Ts", 118: "Og"} vdw_radii = {1: 2.26767118629, 2: 2.64561638401, 3: 3.43930129921, 4: 2.89128076253, 5: 3.62827389807, 6: 3.21253418058, 7: 2.9290752823, 8: 2.87238350264, 9: 2.77789720321, 10: 2.91017802241, 11: 4.28967799407, 12: 3.26922596024, 13: 3.47709581899, 14: 3.96842457602, 15: 3.40150677944, 16: 3.40150677944, 17: 3.30702048001, 18: 3.55268485853, 19: 5.19674646859, 20: 4.36526703362, 21: 3.9873218359, 22: False, 23: False, 24: False, 25: False, 26: False, 27: False, 28: 3.08025336138, 29: 2.64561638401, 30: 2.62671912412, 31: 3.53378759864, 32: 3.9873218359, 33: 3.49599307887, 34: 3.5904793783, 35: 3.49599307887, 36: 3.81724649693, 37: 5.72586974539, 38: 4.70541771156, 39: False, 40: False, 41: False, 42: False, 43: False, 44: False, 45: False, 46: 3.08025336138, 47: 3.25032870036, 48: 2.98576706195, 49: 3.64717115796, 50: 4.10070539522, 51: 3.89283553647, 52: 3.89283553647, 53: 3.74165745739, 54: 4.08180813533, 55: 6.48176014083, 56: 5.06446564939, 57: False, 58: False, 59: False, 60: False, 61: False, 62: False, 63: False, 64: False, 65: False, 66: False, 67: False, 68: False, 69: False, 70: False, 71: False, 72: False, 73: False, 74: False, 75: False, 76: False, 77: False, 78: 3.30702048001, 79: 3.13694514104, 80: 2.9290752823, 81: 3.70386293761, 82: 3.81724649693, 83: 3.91173279636, 84: 3.7227601975, 85: 3.81724649693, 86: 4.15739717487, 87: 6.57624644025, 88: 5.34792454768, 89: False, 90: False, 91: False, 92: 3.51489033876, 93: False, 94: False, 95: False, 96: False, 97: False, 98: False, 99: False, 100: False, 101: False, 102: False, 103: False, 104: False, 105: False, 106: False, 107: False, 108: False, 109: False, 110: False, 111: False, 112: False, 113: False, 114: False, 115: False, 116: False, 117: False, 118: False} mass = {"H": 1.008, "Na": 22.989, "Sc": 44.955, "Ga": 69.723, "Nb": 92.906, "Sb": 121.76, "Pm": False, "Lu": 174.9668, "Tl": 204.38, "Pa": 231.035, "Md": False, "Rg": False, "He": 4.002, "Mg": 24.305, "Ti": 47.867, "Ge": 72.63, "Mo": 95.95, "Te": 127.6, "Sm": 150.36, "Hf": 178.49, "Pb": 207.2, "U": 238.028, "No": False, "Cn": False, "Li": 6.94, "Al": 26.981, "V": 50.9415, "As": 74.921, "Tc": False, "I": 126.904, "Eu": 151.964, "Ta": 180.947, "Bi": 208.98, "Np": False, "Lr": False, "Nh": False, "Be": 9.012, "Si": 28.085, "Cr": 51.9961, "Se": 78.971, "Ru": 101.07, "Xe": 131.293, "Gd": 157.25, "W": 183.84, "Po": False, "Pu": False, "Rf": False, "Fl": False, "B": 10.81, "P": 30.973, "Mn": 54.938, "Br": 79.904, "Rh": 102.905, "Cs": 132.905, "Tb": 158.925, "Re": 186.207, "At": False, "Am": False, "Db": False, "Mc": False, "C": 12.011, "S": 32.06, "Fe": 55.845, "Kr": 83.798, "Pd": 106.42, "Ba": 137.327, "Dy": 162.5, "Os": 190.23, "Rn": False, "Cm": False, "Sg": False, "Lv": False, "N": 14.007, "Cl": 35.45, "Co": 58.933, "Rb": 85.4678, "Ag": 107.8682, "La": 138.905, "Ho": 164.93, "Ir": 192.217, "Fr": False, "Bk": False, "Bh": False, "Ts": False, "O": 15.999, "Ar": 39.948, "Ni": 58.6934, "Sr": 87.62, "Cd": 112.414, "Ce": 140.116, "Er": 167.259, "Pt": 195.084, "Ra": False, "Cf": False, "Hs": False, "Og": False, "F": 18.998, "K": 39.0983, "Cu": 63.546, "Y": 88.905, "In": 114.818, "Pr": 140.907, "Tm": 168.934, "Au": 196.966, "Ac": False, "Es": False, "Mt": False, "Ne": 20.1797, "Ca": 40.078, "Zn": 65.38, "Zr": 91.224, "Sn": 118.71, "Nd": 144.242, "Yb": 173.045, "Hg": 200.592, "Th": 232.0377, "Fm": False, "Ds": False} if number_property.lower() == "charge": return name2number[atom_name] elif number_property.lower() == "mass": return mass[atom_name] elif number_property.lower() == "vdw_radii": return vdw_radii[name2number[atom_name]] def calc_center_of_mass(elements, xyz): """ Calculates the center of elements. Input : elements, vector(n) : xyz, x,y,z coordinate matrix(n, 3) """ mass = np.zeros(len(elements)) for i in range(len(elements)): mass[i] = atom_to_numbers(elements[i], "mass") Xcm = np.sum(mass*xyz[:,0])/np.sum(mass) Ycm = np.sum(mass*xyz[:,1])/np.sum(mass) Zcm = np.sum(mass*xyz[:,2])/np.sum(mass) return np.array([Xcm, Ycm, Zcm])
101.2
191
0.502823
1,211
7,084
2.926507
0.346821
0.008465
0.015237
0.010158
0.017494
0.017494
0
0
0
0
0
0.335154
0.224591
7,084
70
192
101.2
0.310031
0.021033
0
0
0
0
0.099956
0
0
0
0
0
0
1
0.037736
false
0
0.018868
0
0.132075
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
3b8587e0ec9b0fcd443c6745863057b4a8e3cf10
2,090
py
Python
app/models/game.py
tso/avalon
36bbc534ea1f0b24c19fc2a6b72010239cd5b0c3
[ "MIT" ]
null
null
null
app/models/game.py
tso/avalon
36bbc534ea1f0b24c19fc2a6b72010239cd5b0c3
[ "MIT" ]
6
2017-12-25T07:27:27.000Z
2018-01-17T18:59:04.000Z
app/models/game.py
tso/avalon
36bbc534ea1f0b24c19fc2a6b72010239cd5b0c3
[ "MIT" ]
2
2017-12-21T04:36:56.000Z
2018-01-15T23:32:35.000Z
import random import string import uuid from channels import Group from django.db import models from app.avalon import assign_roles, gen_role_list from .util import lobby_json class GameManager(models.Manager): def create_game(self, num_players, has_mordred, has_oberon): joinable_id = ''.join(random.choices(string.ascii_uppercase, k=4)) # For the set of all unstarted games joinable ID must be unique while self.filter(is_started=False, joinable_id=joinable_id): joinable_id = ''.join(random.choices(string.ascii_uppercase, k=4)) if not gen_role_list(num_players, has_mordred, has_oberon): return False game = self.model(joinable_id=joinable_id, num_players=num_players, has_mordred=has_mordred, has_oberon=has_oberon) game.save(using=self._db) return game class Game(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) joinable_id = models.CharField(max_length=4) is_started = models.BooleanField(default=False) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) has_mordred = models.BooleanField(default=False) has_oberon = models.BooleanField(default=False) num_players = models.PositiveIntegerField(default=5) games = GameManager() def start(self): self.is_started = True assign_roles(self) self.save() self.message_players() def players(self): return self.player_set.filter(is_kicked=False).order_by('created_at').all() def message_players(self): Group(str(self.id)).send({ 'text': lobby_json(self), }) def to_dict(self): return { 'id': str(self.id), 'joinable_id': self.joinable_id, 'is_started': self.is_started, 'num_players': self.num_players, 'has_mordred': self.has_mordred, 'has_oberon': self.has_oberon, }
31.666667
83
0.653589
263
2,090
4.969582
0.34981
0.076511
0.049732
0.061209
0.158378
0.119357
0.074981
0.074981
0.074981
0.074981
0
0.003185
0.248804
2,090
65
84
32.153846
0.829299
0.029187
0
0.04
0
0
0.03404
0
0
0
0
0
0
1
0.1
false
0
0.14
0.04
0.54
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
3b86bd629224d587375d982d9e21ec4c5e570896
4,230
py
Python
root/os/DSAA/DataStructuresAndAlgorithms/python/chutils/chutils/utils/time_get_lock_info.py
chyidl/chyidlTutorial
a033e0a57abf84fdbb61e57736822f9126db6ff7
[ "MIT" ]
5
2018-10-17T05:57:39.000Z
2021-07-05T15:38:24.000Z
root/os/DSAA/DataStructuresAndAlgorithms/python/chutils/chutils/utils/time_get_lock_info.py
chyidl/chyidlTutorial
a033e0a57abf84fdbb61e57736822f9126db6ff7
[ "MIT" ]
2
2021-04-14T00:48:43.000Z
2021-04-14T02:20:50.000Z
root/os/DSAA/DataStructuresAndAlgorithms/python/chutils/chutils/utils/time_get_lock_info.py
chyidl/chyidlTutorial
a033e0a57abf84fdbb61e57736822f9126db6ff7
[ "MIT" ]
3
2019-03-02T14:36:19.000Z
2022-03-18T10:12:09.000Z
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # # time_get_lock_info.py # utils # # 🎂"Here's to the crazy ones. The misfits. The rebels. # The troublemakers. The round pegs in the square holes. # The ones who see things differently. They're not found # of rules. And they have no respect for the status quo. # You can quote them, disagree with them, glority or vilify # them. About the only thing you can't do is ignore them. # Because they change things. They push the human race forward. # And while some may see them as the creazy ones, we see genius. # Because the poeple who are crazy enough to think thay can change # the world, are the ones who do." # # Created by Chyi Yaqing on 03/16/19 12:01. # Copyright © 2019. Chyi Yaqing. # All rights reserved. # # Distributed under terms of the MIT """ 时钟的实现与C库函数绑定在一起,所以一些细节使基于特定平台的 """ import os import textwrap # Text wrapping and filling import time # Time access and conversions import hashlib available_clocks = [ ('clock', time.clock), ('monotonic', time.monotonic), ('perf_counter', time.perf_counter), ('process_time', time.process_time), ('thread_time', time.thread_time), ('time', time.time), # epoch [Unix time 1970.1.1 00:00] 开始之后的秒数以浮点数格式返回 ] for (clock_name, func) in available_clocks: print(textwrap.dedent('''\ {name}: adjustable : {info.adjustable} implementation : {info.implementation} monotonic : {info.monotonic} resolution : {info.resolution} current : {current} ''').format( name=clock_name, info=time.get_clock_info(clock_name), current=func())) # time.time() 从[epoch] 开始以后以浮点数格式返回秒 print("The time is: ", time.time()) # time.ctime() Convert a time expressed in seconds since the epoch to a string # representing local time print('The time is :', time.ctime()) later = time.time()+15 print('15 secs from now :', time.ctime(later)) # time.time() 函数返回的是系统时钟可以被用户或者系统服务更改,所以重复调用time()函数产生的 # 时间值可能会前后波动。monotonic()函数总是返回前向的时间值 # The monotonic is not affected by system clock updates. start = time.monotonic() time.sleep(0.1) end = time.monotonic() print('start : {:>9.2f}'.format(start)) print('end : {:>9.2f}'.format(end)) print('span : {:>9.2f}'.format(end - start)) # time.perf_counter() : fractional seconds of a performance counter # 用于计算 sha1校验和的数据 data = open(__file__, 'rb').read() loop_start = time.perf_counter() for i in range(5): iter_start = time.perf_counter() h = hashlib.sha1() for i in range(300000): h.update(data) cksum = h.digest() now = time.perf_counter() loop_elapsed = now - loop_start iter_elapsed = now - iter_start print(time.ctime(), ': {:0.3f} {:0.3f}'.format(iter_elapsed, loop_elapsed)) # struct_time : The type of the time value sequence returned by def show_struct(s): print(' tm_year :', s.tm_year) print(' tm_mon :', s.tm_mon) print(' tm_mday :', s.tm_mday) print(' tm_hour :', s.tm_hour) print(' tm_min :', s.tm_min) print(' tm_sec :', s.tm_sec) print(' tm_wday :', s.tm_wday) print(' tm_yday :', s.tm_yday) print(' tm_isdst:', s.tm_isdst) print('gmtime: UTC') show_struct(time.gmtime()) print('\nlocaltime:') show_struct(time.localtime()) print('\nmktime:', time.mktime(time.localtime())) # 当前时间依赖于时区设置, 时区可以由程序设置,也可以使用系统默认时区设置 # 改变时区并不会改变实际的时间,只是改变它的表现方式 def show_zone_info(): print(' TZ :', os.environ.get('TZ', '(not set)')) print(' tzname :', time.tzname) print(' Zone : {} ({})'.format(time.timezone, (time.timezone / 3600))) print(' DST :', time.daylight) print(' Time :', time.ctime()) print() print('Default :') show_zone_info() ZONES = [ 'GMT', 'Asia/Hong_Kong', ] for zone in ZONES: # 改变时区,首先设定环境变量TZ,然后调用tzset() os.environ['TZ'] = zone time.tzset() print(zone, ':') show_zone_info() # 解析和格式化时间 # strptime() strftime() now = time.ctime(1552717743.187825) print('Now:', now) parsed = time.strptime(now) print('\nParsed:') show_struct(parsed) print('\nFormatted:', time.strftime("%a %b %d %H:%M:%S %Y", parsed))
27.647059
79
0.644681
582
4,230
4.582474
0.42268
0.035996
0.028121
0.022497
0.028496
0
0
0
0
0
0
0.021226
0.20922
4,230
152
80
27.828947
0.775486
0.33948
0
0.023529
0
0
0.270742
0.007642
0
0
0
0
0
1
0.023529
false
0
0.047059
0
0.070588
0.364706
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3b88a796d1817b8904a6429803fd5870c9f9d9ef
383
py
Python
django_example/addresses/management/commands/generate_fake_data.py
JCepedaVillamayor/django-example
b099f51be8cf822546018973299006bcff17d349
[ "MIT" ]
null
null
null
django_example/addresses/management/commands/generate_fake_data.py
JCepedaVillamayor/django-example
b099f51be8cf822546018973299006bcff17d349
[ "MIT" ]
7
2018-11-01T13:10:27.000Z
2018-11-04T16:01:52.000Z
django_example/addresses/management/commands/generate_fake_data.py
JCepedaVillamayor/django-example
b099f51be8cf822546018973299006bcff17d349
[ "MIT" ]
null
null
null
from django.core.management.base import BaseCommand from ...test.factories import AddressFactory class Command(BaseCommand): help = "generates fake users to list in the application" def add_arguments(self, parser): parser.add_argument("n_of_entries", type=int) def handle(self, *args, **options): AddressFactory.create_batch(options["n_of_entries"])
27.357143
60
0.733681
49
383
5.591837
0.755102
0.021898
0.072993
0
0
0
0
0
0
0
0
0
0.164491
383
13
61
29.461538
0.85625
0
0
0
1
0
0.185864
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
3b895d1b25f903e8bc77ab1b05b04c1d12622eea
5,995
py
Python
poisson_problem/poisson.py
timudk/solving_pdes_with_neural_nets
4aeca4ee1aaa6054307e1051879bed3160ffc247
[ "MIT" ]
69
2019-04-16T06:42:22.000Z
2021-04-06T02:39:21.000Z
poisson_problem/poisson.py
timudk/solving_pdes_with_neural_nets
4aeca4ee1aaa6054307e1051879bed3160ffc247
[ "MIT" ]
null
null
null
poisson_problem/poisson.py
timudk/solving_pdes_with_neural_nets
4aeca4ee1aaa6054307e1051879bed3160ffc247
[ "MIT" ]
19
2019-04-16T14:31:47.000Z
2021-06-05T21:46:53.000Z
import tensorflow as tf tf.set_random_seed(42) import numpy as np from scipy import integrate import neural_networks import poisson_problem import matplotlib.pyplot as plt import sys, getopt class sampling_from_dataset: def __init__(self, filepath, total_samples): self.filepath = filepath self.total_samples = total_samples self.last_grab_int = 0 self.last_grab_bou = 0 def load_dataset(self): self.dataset = np.genfromtxt(self.filepath, delimiter=',') def increase_grab_number(self, num, batchsize): num += batchsize if(num==self.total_samples): return 0 else: return num def interior_samples(self, batchsize): sampling_int_draw_x = self.dataset[self.last_grab_int:(self.last_grab_int+batchsize), 0] sampling_int_draw_y = self.dataset[self.last_grab_int:(self.last_grab_int+batchsize), 1] self.last_grab_int = self.increase_grab_number(self.last_grab_int, batchsize) return sampling_int_draw_x, sampling_int_draw_y def boundary_samples(self, batchsize): sampling_bou_draw_x = self.dataset[self.last_grab_bou:(self.last_grab_bou+batchsize), 2] sampling_bou_draw_y = self.dataset[self.last_grab_bou:(self.last_grab_bou+batchsize), 3] self.last_grab_bou = self.increase_grab_number(self.last_grab_bou, batchsize) return sampling_bou_draw_x, sampling_bou_draw_y def main(argv): # DEFAULT SENSOR_DATA = False N_LAYERS = 1 BATCHSIZE = 1000 MAX_ITER = 50000 DO_SAVE = False SEED = 42 try: opts, args = getopt.getopt(argv,"hb:n:m:d:r:s:",["batchsize=","n_layers=", "max_iterations=", "sensor_data=", "random_seed=", "save_network="]) except getopt.GetoptError: print('poisson.py -b <batchsize> -n <n_layers> -m <max_iterations> -d <sensor_data> -r <random_seed> -s <save_network>') sys.exit(2) for opt, arg in opts: if opt == '-h': print('poisson.py -b <batchsize> -n <n_layers> -m <max_iterations> -d <sensor_data> -r <random_seed> -s <save_network>') sys.exit() elif opt in ("-b", "--batchsize"): BATCHSIZE = int(arg) elif opt in ("-n", "--n_layers"): N_LAYERS = int(arg) elif opt in ("-m", "--max_iterations"): MAX_ITER = int(arg) elif opt in ("-d", "--sensor_data"): if(int(arg)==1): SENSOR_DATA = True elif opt in ("-r", "--random_seed"): SEED = int(arg) tf.set_random_seed(SEED) elif opt in ("-s", "--save_network"): DO_SAVE = bool(int(arg)) if DO_SAVE: print("Saving network after training.") HIDDEN_UNITS = [] for i in range(N_LAYERS): HIDDEN_UNITS.append(16) if(SENSOR_DATA): save_name = 'test_model/' + str(len(HIDDEN_UNITS)) + '_layer_sq_loss_' + str(BATCHSIZE) + '_m_iter_' + str(MAX_ITER) + '_rs_' + str(SEED) + '_wsd' else: save_name = 'test_model/' + str(len(HIDDEN_UNITS)) + '_layer_sq_loss_' + str(BATCHSIZE) + '_m_iter_' + str(MAX_ITER) + '_rs_' + str(SEED) problem = poisson_problem.poisson_2d() sampler = sampling_from_dataset('datasets/' + str(BATCHSIZE), BATCHSIZE) sampler.load_dataset() NUM_INPUTS = 2 neural_network = neural_networks.neural_network(NUM_INPUTS, 1, HIDDEN_UNITS) int_var = tf.placeholder(tf.float64, [None, NUM_INPUTS]) bou_var = tf.placeholder(tf.float64, [None, NUM_INPUTS]) sensor_var = tf.placeholder(tf.float64, [None, NUM_INPUTS]) value_int = neural_network.value(int_var) value_bou = neural_network.value(bou_var) value_sensor = neural_network.value(sensor_var) grad = neural_network.first_derivatives(int_var) grad_grad= neural_network.second_derivatives(int_var) grad_grad_sensor = neural_network.second_derivatives(sensor_var) sol_int = tf.placeholder(tf.float64, [None, 1]) sol_bou = tf.placeholder(tf.float64, [None, 1]) sum_of_second_derivatives = 0.0 sum_of_second_derivatives_sensor = 0.0 for i in range(NUM_INPUTS): sum_of_second_derivatives += grad_grad[i] sum_of_second_derivatives_sensor += grad_grad_sensor[i] loss_int = tf.square(sum_of_second_derivatives+sol_int) loss_bou = tf.square(value_bou-sol_bou) loss_sensor_int = tf.square(sum_of_second_derivatives_sensor) loss_sensor_bou = tf.square(value_sensor) loss = tf.sqrt(tf.reduce_mean(loss_int + loss_bou)) sensor_loss = tf.sqrt(tf.reduce_mean(loss_int) + tf.reduce_mean(loss_bou) + tf.reduce_mean(loss_sensor_int) + tf.reduce_mean(loss_sensor_bou)) train_scipy = tf.contrib.opt.ScipyOptimizerInterface(loss, method='BFGS', options={'gtol':1e-14, 'disp':True, 'maxiter':MAX_ITER}) train_scipy_sensor = tf.contrib.opt.ScipyOptimizerInterface(sensor_loss, method='BFGS', options={'gtol':1e-14, 'disp':True, 'maxiter':MAX_ITER}) init = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(init) int_draw_x, int_draw_y = sampler.interior_samples(BATCHSIZE) int_draw_x = np.reshape(int_draw_x, (BATCHSIZE, 1)) int_draw_y = np.reshape(int_draw_y, (BATCHSIZE, 1)) boundary_draw_x, boundary_draw_y = sampler.boundary_samples(BATCHSIZE) boundary_draw_x = np.reshape(boundary_draw_x, (BATCHSIZE, 1)) boundary_draw_y = np.reshape(boundary_draw_y, (BATCHSIZE, 1)) int_draw = np.concatenate([int_draw_x, int_draw_y], axis=1) bou_draw = np.concatenate([boundary_draw_x, boundary_draw_y], axis=1) f = problem.rhs(int_draw) f = np.reshape(np.array(f), (BATCHSIZE, 1)) bou = problem.velocity(bou_draw) bou = np.reshape(np.array(bou), (BATCHSIZE, 1)) if(SENSOR_DATA): sensor_points_x = np.reshape(np.array([0.0, 1.0, 0.0, 1.0]), (4,1)) sensor_points_y = np.reshape(np.array([0.0, 0.0, 1.0, 1.0]), (4,1)) sensor_points = np.concatenate([sensor_points_x, sensor_points_y], axis=1) print(sensor_points) train_scipy_sensor.minimize(sess, feed_dict={sol_int:f, sol_bou:bou, int_var:int_draw, bou_var:bou_draw, sensor_var: sensor_points}) else: train_scipy.minimize(sess, feed_dict={sol_int:f, sol_bou:bou, int_var:int_draw, bou_var:bou_draw}) if DO_SAVE: save_path = saver.save(sess, save_name) print("Model saved in path: %s" % save_path) if __name__ == '__main__': main(sys.argv[1:])
33.121547
148
0.732944
951
5,995
4.290221
0.17245
0.027451
0.041176
0.025735
0.388971
0.320588
0.277941
0.231863
0.203922
0.186765
0
0.015349
0.130609
5,995
180
149
33.305556
0.76746
0.001168
0
0.069767
0
0.015504
0.097895
0
0
0
0
0
0
1
0.046512
false
0
0.054264
0
0.139535
0.03876
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3b93a8f3fb66d0f9172c7fab00d036cbaf55135b
429
py
Python
project_euler/37.py
huangshenno1/project_euler
8a3c91fd11bcb6a6a830e963b1d5aed3f5ff787d
[ "MIT" ]
null
null
null
project_euler/37.py
huangshenno1/project_euler
8a3c91fd11bcb6a6a830e963b1d5aed3f5ff787d
[ "MIT" ]
null
null
null
project_euler/37.py
huangshenno1/project_euler
8a3c91fd11bcb6a6a830e963b1d5aed3f5ff787d
[ "MIT" ]
null
null
null
maxn = 1000000 isprime = [False] * 2 + [True] * maxn for i in range(2, maxn): if isprime[i]: j = i*i while j < maxn: isprime[j] = False j += i def truncatable(n): x = n while x > 0: if not isprime[x]: return False x /= 10 b = 10 x = n % b while x < n: if not isprime[x]: return False b *= 10 x = n % b return True ans = 0 for i in range(10, maxn): if truncatable(i): ans += i print i print ans
14.793103
37
0.566434
81
429
3
0.296296
0.032922
0.049383
0.090535
0.246914
0.197531
0
0
0
0
0
0.063123
0.298368
429
28
38
15.321429
0.744186
0
0
0.153846
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
3b96b5f799965c9ad6a6862901c115567a0b79c1
1,691
py
Python
{{cookiecutter.app_slug}}/{{cookiecutter.app_slug}}/cli.py
epandurski/cookiecutter-flask-signalbus
17f0ba711205a334359643421fc7184a6f84ff32
[ "MIT" ]
null
null
null
{{cookiecutter.app_slug}}/{{cookiecutter.app_slug}}/cli.py
epandurski/cookiecutter-flask-signalbus
17f0ba711205a334359643421fc7184a6f84ff32
[ "MIT" ]
null
null
null
{{cookiecutter.app_slug}}/{{cookiecutter.app_slug}}/cli.py
epandurski/cookiecutter-flask-signalbus
17f0ba711205a334359643421fc7184a6f84ff32
[ "MIT" ]
null
null
null
import click from os import environ from flask.cli import with_appcontext @click.group('{{cookiecutter.app_slug}}') def {{cookiecutter.app_slug}}(): """Perform {{cookiecutter.app_name}} specific operations.""" @{{cookiecutter.app_slug}}.command() @with_appcontext @click.argument('queue_name') def subscribe(queue_name): # pragma: no cover """Subscribe a queue for the observed events and messages. QUEUE_NAME specifies the name of the queue. """ from .extensions import broker, MAIN_EXCHANGE_NAME from . import actors # noqa channel = broker.channel channel.exchange_declare(MAIN_EXCHANGE_NAME) click.echo(f'Declared "{MAIN_EXCHANGE_NAME}" direct exchange.') if environ.get('APP_USE_LOAD_BALANCING_EXCHANGE', '') not in ['', 'False']: bind = channel.exchange_bind unbind = channel.exchange_unbind else: bind = channel.queue_bind unbind = channel.queue_unbind bind(queue_name, MAIN_EXCHANGE_NAME, queue_name) click.echo(f'Subscribed "{queue_name}" to "{MAIN_EXCHANGE_NAME}.{queue_name}".') for actor in [broker.get_actor(actor_name) for actor_name in broker.get_declared_actors()]: if 'event_subscription' in actor.options: routing_key = f'events.{actor.actor_name}' if actor.options['event_subscription']: bind(queue_name, MAIN_EXCHANGE_NAME, routing_key) click.echo(f'Subscribed "{queue_name}" to "{MAIN_EXCHANGE_NAME}.{routing_key}".') else: unbind(queue_name, MAIN_EXCHANGE_NAME, routing_key) click.echo(f'Unsubscribed "{queue_name}" from "{MAIN_EXCHANGE_NAME}.{routing_key}".')
36.76087
101
0.690124
213
1,691
5.211268
0.309859
0.089189
0.12973
0.082883
0.235135
0.186486
0.156757
0.156757
0.156757
0.156757
0
0
0.194559
1,691
45
102
37.577778
0.814978
0.012419
0
0.064516
0
0
0.255533
0.142857
0
0
0
0
0
0
null
null
0
0.16129
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
3b99148519a93c8543e9564b329c4137fc41b8bf
1,509
py
Python
PythonBot.py
quasiyoke/PythonBot
d665a1580b683b8dbf4c68f50e112eb9ec30f8d0
[ "Apache-2.0" ]
9
2021-07-07T16:57:17.000Z
2021-11-14T17:45:10.000Z
PythonBot.py
quasiyoke/PythonBot
d665a1580b683b8dbf4c68f50e112eb9ec30f8d0
[ "Apache-2.0" ]
null
null
null
PythonBot.py
quasiyoke/PythonBot
d665a1580b683b8dbf4c68f50e112eb9ec30f8d0
[ "Apache-2.0" ]
2
2021-11-20T10:26:18.000Z
2021-11-26T09:18:13.000Z
from substrateinterface import SubstrateInterface, Keypair from substrateinterface.exceptions import SubstrateRequestException from scalecodec.type_registry import load_type_registry_file import time substrate = SubstrateInterface( url='wss://ws.mof.sora.org', ss58_format=69, type_registry_preset='default', type_registry=load_type_registry_file('custom_types.json'), ) keypair = Keypair.create_from_mnemonic('<your 12 word passphrase here>') call = substrate.compose_call( call_module='LiquidityProxy', call_function='swap', call_params={ 'dex_id': '0', 'input_asset_id': '0x0200050000000000000000000000000000000000000000000000000000000000', 'output_asset_id': '0x0200000000000000000000000000000000000000000000000000000000000000', 'swap_amount': {'WithDesiredInput': {'desired_amount_in': '13370000000000000000000', 'min_amount_out': '0'}}, 'selected_source_types': ["XYKPool","MulticollateralBondingCurvePool"], 'filter_mode': 'AllowSelected' } ) while True: try: extrinsic = substrate.create_signed_extrinsic(call=call, keypair=keypair) receipt = substrate.submit_extrinsic(extrinsic, wait_for_inclusion=False) print("Extrinsic '{}' sent".format(receipt.extrinsic_hash)) # print("Extrinsic '{}' sent and included in block '{}'".format(receipt.extrinsic_hash, receipt.block_hash)) except Exception as e: print("Failed to send: {}".format(e)) time.sleep(100)
33.533333
117
0.732936
152
1,509
7.019737
0.585526
0.056232
0.029991
0.037488
0
0
0
0
0
0
0
0.128728
0.155732
1,509
44
118
34.295455
0.708791
0.070245
0
0
0
0
0.330951
0.162974
0
0
0.094353
0
0
1
0
false
0.032258
0.129032
0
0.129032
0.064516
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3b99148cfdeb90a765f839391c5a34781f128d17
185
py
Python
app/schemas.py
ev-horrosh/fastapi-project
4e38f5eb7573b8d70add47dd52fc973e9fed07b9
[ "MIT" ]
null
null
null
app/schemas.py
ev-horrosh/fastapi-project
4e38f5eb7573b8d70add47dd52fc973e9fed07b9
[ "MIT" ]
null
null
null
app/schemas.py
ev-horrosh/fastapi-project
4e38f5eb7573b8d70add47dd52fc973e9fed07b9
[ "MIT" ]
null
null
null
from re import S from pydantic import BaseModel from typing import Optional class User(BaseModel): first_name:str last_name:str age:int sex:Optional[str]=None
16.818182
30
0.708108
27
185
4.777778
0.666667
0.108527
0
0
0
0
0
0
0
0
0
0
0.237838
185
11
31
16.818182
0.914894
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.375
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
3b9a007a7f11cb78845931d5c9222d48f264b871
17,892
py
Python
pxr/usd/usd/testenv/testUsdStagePopulationMasks.py
DougRogers-DigitalFish/USD
d8a405a1344480f859f025c4f97085143efacb53
[ "BSD-2-Clause" ]
3,680
2016-07-26T18:28:11.000Z
2022-03-31T09:55:05.000Z
pxr/usd/usd/testenv/testUsdStagePopulationMasks.py
DougRogers-DigitalFish/USD
d8a405a1344480f859f025c4f97085143efacb53
[ "BSD-2-Clause" ]
1,759
2016-07-26T19:19:59.000Z
2022-03-31T21:24:00.000Z
pxr/usd/usd/testenv/testUsdStagePopulationMasks.py
DougRogers-DigitalFish/USD
d8a405a1344480f859f025c4f97085143efacb53
[ "BSD-2-Clause" ]
904
2016-07-26T18:33:40.000Z
2022-03-31T09:55:16.000Z
#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. import unittest from pxr import Usd, Sdf, Tf class TestUsdStagePopulationMask(unittest.TestCase): def test_Basic(self): pm = Usd.StagePopulationMask.All() assert not pm.IsEmpty() assert pm.Includes('/any/path') assert pm.GetIncludedChildNames('/') == (True, []) pm = Usd.StagePopulationMask() assert pm.IsEmpty() assert not pm.Includes('/any/path') assert pm.GetIncludedChildNames('/') == (False, []) pm2 = Usd.StagePopulationMask().Add('/foo').Add('/bar') assert not pm.Includes(pm2) assert pm2.Includes(pm) assert pm.GetUnion(pm2) == pm2 assert Usd.StagePopulationMask.Union(pm, pm2) == pm2 assert pm2.GetIncludedChildNames('/') == (True, ['bar', 'foo']) assert pm2.GetIncludedChildNames('/foo') == (True, []) assert pm2.GetIncludedChildNames('/bar') == (True, []) assert pm2.GetIncludedChildNames('/baz') == (False, []) pm.Add('/World/anim/chars/CharGroup') assert pm.GetPaths() == ['/World/anim/chars/CharGroup'] assert not pm.IsEmpty() pm.Add('/World/anim/chars/CharGroup/child') assert pm.GetPaths() == ['/World/anim/chars/CharGroup'] pm.Add('/World/anim/chars/OtherCharGroup') assert pm.GetPaths() == ['/World/anim/chars/CharGroup', '/World/anim/chars/OtherCharGroup'] pm.Add('/World/sets/arch/Building') assert pm.GetPaths() == ['/World/anim/chars/CharGroup', '/World/anim/chars/OtherCharGroup', '/World/sets/arch/Building'] pm2 = Usd.StagePopulationMask() assert pm2 != pm pm2.Add('/World/anim/chars/CharGroup') assert pm2 != pm pm2.Add('/World/sets/arch/Building') pm2.Add('/World/anim/chars/OtherCharGroup') pm2.Add('/World/anim/chars/CharGroup/child') assert pm2 == pm assert pm2.GetUnion(pm) == pm assert pm2.GetUnion(pm) == pm2 pm2 = Usd.StagePopulationMask() assert Usd.StagePopulationMask.Union(pm, pm2) == pm assert Usd.StagePopulationMask.Union(pm, pm2) != pm2 assert pm.Includes('/World') assert not pm.IncludesSubtree('/World') assert pm.Includes('/World/anim') assert not pm.IncludesSubtree('/World/anim') assert pm.Includes('/World/anim/chars/CharGroup') assert pm.IncludesSubtree('/World/anim/chars/CharGroup') assert pm.Includes('/World/anim/chars/CharGroup/child') assert pm.IncludesSubtree('/World/anim/chars/CharGroup/child') pm = Usd.StagePopulationMask().Add('/world/anim') pm2 = pm.GetUnion('/world') assert pm2.GetPaths() == ['/world'] pm = Usd.StagePopulationMask(['/A', '/AA', '/B/C', '/U']) pm2 = Usd.StagePopulationMask(['/A/X', '/B', '/Q']) assert (Usd.StagePopulationMask.Union(pm, pm2) == Usd.StagePopulationMask(['/A', '/AA', '/B', '/Q', '/U'])) assert (Usd.StagePopulationMask.Intersection(pm, pm2) == Usd.StagePopulationMask(['/A/X', '/B/C'])) pm = Usd.StagePopulationMask(['/A/B', '/A/C', '/A/D/E', '/A/D/F', '/B']) assert pm.GetIncludedChildNames('/') == (True, ['A', 'B']) assert pm.GetIncludedChildNames('/A') == (True, ['B', 'C', 'D']) assert pm.GetIncludedChildNames('/A/B') == (True, []) assert pm.GetIncludedChildNames('/A/C') == (True, []) assert pm.GetIncludedChildNames('/A/D') == (True, ['E', 'F']) assert pm.GetIncludedChildNames('/A/D/E') == (True, []) assert pm.GetIncludedChildNames('/A/D/F') == (True, []) assert pm.GetIncludedChildNames('/B') == (True, []) assert pm.GetIncludedChildNames('/C') == (False, []) # Errors. with self.assertRaises(Tf.ErrorException): Usd.StagePopulationMask(['relativePath/is/no/good']) with self.assertRaises(Tf.ErrorException): Usd.StagePopulationMask().Add('relativePath/is/no/good') with self.assertRaises(Tf.ErrorException): Usd.StagePopulationMask(['/property/path/is/no.good']) with self.assertRaises(Tf.ErrorException): Usd.StagePopulationMask().Add('/property/path/is/no.good') with self.assertRaises(Tf.ErrorException): Usd.StagePopulationMask(['/variant/selection/path/is{no=good}']) with self.assertRaises(Tf.ErrorException): Usd.StagePopulationMask().Add('/variant/selection/path/is{no=good}') def test_Stages(self): unmasked = Usd.Stage.CreateInMemory() unmasked.DefinePrim('/World/anim/chars/DoryGroup/Dory') unmasked.DefinePrim('/World/anim/chars/NemoGroup/Nemo') unmasked.DefinePrim('/World/sets/Reef/Coral/CoralGroup1') unmasked.DefinePrim('/World/sets/Reef/Rocks/RockGroup1') doryMask = Usd.StagePopulationMask().Add('/World/anim/chars/DoryGroup') doryStage = Usd.Stage.OpenMasked(unmasked.GetRootLayer(), doryMask) assert doryStage.GetPopulationMask() == doryMask assert doryStage.GetPrimAtPath('/World') assert doryStage.GetPrimAtPath('/World/anim') assert doryStage.GetPrimAtPath('/World/anim/chars') assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup') assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup/Dory') assert not doryStage.GetPrimAtPath('/World/sets') assert not doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup') assert not doryStage._GetPcpCache().FindPrimIndex('/World/sets') assert not doryStage._GetPcpCache().FindPrimIndex( '/World/anim/chars/NemoGroup') doryAndNemoMask = (Usd.StagePopulationMask() .Add('/World/anim/chars/DoryGroup') .Add('/World/anim/chars/NemoGroup')) # Test modifying an existing mask. doryStage.SetPopulationMask(doryAndNemoMask) assert doryStage.GetPrimAtPath('/World') assert doryStage.GetPrimAtPath('/World/anim') assert doryStage.GetPrimAtPath('/World/anim/chars') assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup') assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup/Dory') assert doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup') assert doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup/Nemo') assert doryStage._GetPcpCache().FindPrimIndex( '/World/anim/chars/NemoGroup') doryStage.SetPopulationMask(doryMask) assert doryStage.GetPrimAtPath('/World') assert doryStage.GetPrimAtPath('/World/anim') assert doryStage.GetPrimAtPath('/World/anim/chars') assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup') assert doryStage.GetPrimAtPath('/World/anim/chars/DoryGroup/Dory') assert not doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup') assert not doryStage.GetPrimAtPath('/World/anim/chars/NemoGroup/Nemo') assert not doryStage._GetPcpCache().FindPrimIndex( '/World/anim/chars/NemoGroup') doryAndNemoStage = Usd.Stage.OpenMasked( unmasked.GetRootLayer(), doryAndNemoMask) assert doryAndNemoStage.GetPopulationMask() == doryAndNemoMask assert doryAndNemoStage.GetPrimAtPath('/World') assert doryAndNemoStage.GetPrimAtPath('/World/anim') assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars') assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars/DoryGroup') assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars/DoryGroup/Dory') assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars/NemoGroup') assert doryAndNemoStage.GetPrimAtPath('/World/anim/chars/NemoGroup/Nemo') assert not doryAndNemoStage.GetPrimAtPath('/World/sets') def test_ExpansionRelationships(self): stage = Usd.Stage.CreateInMemory() a = stage.DefinePrim('/World/A') b = stage.DefinePrim('/World/B') c = stage.DefinePrim('/World/C') d = stage.DefinePrim('/World/D') e = stage.DefinePrim('/World/E') cAttr = c.CreateAttribute('attr', Sdf.ValueTypeNames.Float) a.CreateRelationship('r').AddTarget(b.GetPath()) b.CreateRelationship('r').AddTarget(cAttr.GetPath()) c.CreateRelationship('r').AddTarget(d.GetPath()) a.CreateRelationship('pred').AddTarget(e.GetPath()) mask = Usd.StagePopulationMask().Add(a.GetPath()) masked = Usd.Stage.OpenMasked(stage.GetRootLayer(), mask) assert masked.GetPrimAtPath(a.GetPath()) assert not masked.GetPrimAtPath(b.GetPath()) assert not masked.GetPrimAtPath(c.GetPath()) assert not masked.GetPrimAtPath(d.GetPath()) assert not masked.GetPrimAtPath(e.GetPath()) # Now expand the mask for all relationships. masked.ExpandPopulationMask() assert masked.GetPrimAtPath(a.GetPath()) assert masked.GetPrimAtPath(b.GetPath()) assert masked.GetPrimAtPath(c.GetPath()) assert masked.GetPrimAtPath(d.GetPath()) assert masked.GetPrimAtPath(e.GetPath()) masked.SetPopulationMask(Usd.StagePopulationMask().Add(a.GetPath())) assert masked.GetPrimAtPath(a.GetPath()) assert not masked.GetPrimAtPath(b.GetPath()) assert not masked.GetPrimAtPath(c.GetPath()) assert not masked.GetPrimAtPath(d.GetPath()) assert not masked.GetPrimAtPath(e.GetPath()) # Expand with a predicate that only consults relationships named 'pred' masked.ExpandPopulationMask( relationshipPredicate=lambda r: r.GetName() == 'pred') assert masked.GetPrimAtPath(a.GetPath()) assert not masked.GetPrimAtPath(b.GetPath()) assert not masked.GetPrimAtPath(c.GetPath()) assert not masked.GetPrimAtPath(d.GetPath()) assert masked.GetPrimAtPath(e.GetPath()) def test_ExpansionConnections(self): stage = Usd.Stage.CreateInMemory() a = stage.DefinePrim('/World/A') b = stage.DefinePrim('/World/B') c = stage.DefinePrim('/World/C') d = stage.DefinePrim('/World/D') e = stage.DefinePrim('/World/E') bAttr = b.CreateAttribute('attr', Sdf.ValueTypeNames.Float) cAttr = c.CreateAttribute('attr', Sdf.ValueTypeNames.Float) dAttr = d.CreateAttribute('attr', Sdf.ValueTypeNames.Float) eAttr = e.CreateAttribute('attr', Sdf.ValueTypeNames.Float) floatType = Sdf.ValueTypeNames.Float a.CreateAttribute('a', floatType).AddConnection(bAttr.GetPath()) b.CreateAttribute('a', floatType).AddConnection(cAttr.GetPath()) c.CreateAttribute('a', floatType).AddConnection(dAttr.GetPath()) a.CreateAttribute('pred', floatType).AddConnection(eAttr.GetPath()) mask = Usd.StagePopulationMask().Add(a.GetPath()) masked = Usd.Stage.OpenMasked(stage.GetRootLayer(), mask) assert masked.GetPrimAtPath(a.GetPath()) assert not masked.GetPrimAtPath(b.GetPath()) assert not masked.GetPrimAtPath(c.GetPath()) assert not masked.GetPrimAtPath(d.GetPath()) assert not masked.GetPrimAtPath(e.GetPath()) # Now expand the mask for all connections. masked.ExpandPopulationMask() assert masked.GetPrimAtPath(a.GetPath()) assert masked.GetPrimAtPath(b.GetPath()) assert masked.GetPrimAtPath(c.GetPath()) assert masked.GetPrimAtPath(d.GetPath()) assert masked.GetPrimAtPath(e.GetPath()) masked.SetPopulationMask(Usd.StagePopulationMask().Add(a.GetPath())) assert masked.GetPrimAtPath(a.GetPath()) assert not masked.GetPrimAtPath(b.GetPath()) assert not masked.GetPrimAtPath(c.GetPath()) assert not masked.GetPrimAtPath(d.GetPath()) assert not masked.GetPrimAtPath(e.GetPath()) # Expand with a predicate that only consults attributes named 'pred' masked.ExpandPopulationMask( attributePredicate=lambda r: r.GetName() == 'pred') assert masked.GetPrimAtPath(a.GetPath()) assert not masked.GetPrimAtPath(b.GetPath()) assert not masked.GetPrimAtPath(c.GetPath()) assert not masked.GetPrimAtPath(d.GetPath()) assert masked.GetPrimAtPath(e.GetPath()) def test_Bug143308(self): # We didn't correctly mask calls to parallel prim indexing, leading to # errors with instancing. stage = Usd.Stage.CreateInMemory() foo, bar, i1, i2 = [ stage.DefinePrim(p) for p in ('/foo', '/bar', '/i1', '/i2')] foo.SetInstanceable(True) [p.GetReferences().AddInternalReference(foo.GetPath()) for p in (i1, i2)] assert len(stage.GetPrototypes()) stage2 = Usd.Stage.OpenMasked( stage.GetRootLayer(), Usd.StagePopulationMask(['/i1'])) assert len(stage2.GetPrototypes()) def test_Bug145873(self): # The payload inclusion predicate wasn't being invoked on ancestors of # requested index paths in pcp. payload = Usd.Stage.CreateInMemory() for n in ('One', 'Two', 'Three'): payload.DefinePrim('/CubesModel/Geom/Cube' + n) root = Usd.Stage.CreateInMemory() cubes = root.DefinePrim('/Cubes') cubes.GetPayloads().AddPayload(payload.GetRootLayer().identifier, '/CubesModel') testStage = Usd.Stage.OpenMasked( root.GetRootLayer(), Usd.StagePopulationMask(['/Cubes/Geom/CubeTwo'])) # Only /Cubes/Geom/CubeTwo (and ancestors) should be present. assert testStage.GetPrimAtPath('/Cubes') assert testStage.GetPrimAtPath('/Cubes/Geom') assert not testStage.GetPrimAtPath('/Cubes/Geom/CubeOne') assert testStage.GetPrimAtPath('/Cubes/Geom/CubeTwo') assert not testStage.GetPrimAtPath('/Cubes/Geom/CubeThree') def test_Bug152904(self): # Prototype prims weren't being generated on stages where the population # mask included paths of prims beneath instances. stage = Usd.Stage.CreateInMemory() stage.DefinePrim('/Ref/geom') stage.DefinePrim('/Ref/shading') for path in ['/Instance_1', '/Instance_2']: prim = stage.DefinePrim(path) prim.GetReferences().AddInternalReference('/Ref') prim.SetInstanceable(True) # Open the stage with a mask that includes the 'geom' prim beneath # the instances. maskedStage = Usd.Stage.OpenMasked( stage.GetRootLayer(), Usd.StagePopulationMask(['/Instance_1/geom', '/Instance_2/geom'])) # Both instances should share the same prototype prim. instance_1 = maskedStage.GetPrimAtPath('/Instance_1') assert instance_1.IsInstance() assert instance_1.GetPrototype() instance_2 = maskedStage.GetPrimAtPath('/Instance_2') assert instance_2.IsInstance() assert instance_2.GetPrototype() # Only the 'geom' prim in the prototype will be composed, since # it's the only one in the population mask. assert instance_1.GetPrototype() == instance_2.GetPrototype() prototype = instance_1.GetPrototype() assert prototype.GetChild('geom') assert not prototype.GetChild('shading') # Open the stage with a mask that includes the 'geom' prim beneath # /Instance_1 and all children beneath /Instance_2. maskedStage = Usd.Stage.OpenMasked( stage.GetRootLayer(), Usd.StagePopulationMask(['/Instance_1/geom', '/Instance_2'])) # Both instances should *not* share the same prototype, since they # are affected by different population masks. instance_1 = maskedStage.GetPrimAtPath('/Instance_1') assert instance_1.IsInstance() assert instance_1.GetPrototype() instance_2 = maskedStage.GetPrimAtPath('/Instance_2') assert instance_2.IsInstance() assert instance_2.GetPrototype() # Only the 'geom' prim will be composed in the prototype for the # /Instance_1, but both 'geom' and 'shading' will be composed for # /Instance_2. assert instance_1.GetPrototype() != instance_2.GetPrototype() prototype = instance_1.GetPrototype() assert prototype.GetChild('geom') assert not prototype.GetChild('shading') prototype = instance_2.GetPrototype() assert prototype.GetChild('geom') assert prototype.GetChild('shading') if __name__ == '__main__': unittest.main()
44.397022
81
0.649564
1,892
17,892
6.115222
0.156448
0.038894
0.052031
0.041832
0.643215
0.558341
0.511322
0.422731
0.402161
0.390925
0
0.007462
0.220993
17,892
402
82
44.507463
0.822643
0.126649
0
0.448763
0
0
0.147323
0.095776
0
0
0
0
0.530035
1
0.024735
false
0
0.007067
0
0.035336
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
1
3b9b35f7c92754e4b2f2e40b05e20b3c368edfaa
2,822
py
Python
mutalyzer_mutator/mutator.py
mutalyzer/mutator
43a9fc929e054552ef6a2ed2d0cdf71e49ebf005
[ "MIT" ]
null
null
null
mutalyzer_mutator/mutator.py
mutalyzer/mutator
43a9fc929e054552ef6a2ed2d0cdf71e49ebf005
[ "MIT" ]
null
null
null
mutalyzer_mutator/mutator.py
mutalyzer/mutator
43a9fc929e054552ef6a2ed2d0cdf71e49ebf005
[ "MIT" ]
null
null
null
""" Module to mutate sequences based on a variants list. Assumptions for which no check is performed: - Only ``deletion insertion`` operations. - Only exact locations, i.e., no uncertainties such as `10+?`. - Locations are zero-based right-open with ``start > end``. - There is no overlapping between variants locations. Notes: - If any of the above is not met, the result will be bogus. - There can be empty inserted lists. """ from .util import reverse_complement class UnknownInsertedSource(Exception): pass def _get_inverted(sequence): """ Reverse complement inversion using code extracted from BioPython. """ return reverse_complement(sequence) def _get_start_end(location): """ Get the start and the end of a location object. For point locations both start and end equal the position value. """ if location["type"] == "range": return location["start"]["position"], location["end"]["position"] elif location["type"] == "point": return location["position"], location["position"] def _get_inserted_sequence(inserted, sequences): """ Retrieves the actual sequence mentioned in the insertion. """ if inserted["source"] == "description": sequence = inserted["sequence"] elif inserted["source"] == "reference": sequence = sequences[inserted["source"]][ slice(*_get_start_end(inserted["location"])) ] elif isinstance(inserted["source"], dict) and inserted["source"].get("id"): sequence = sequences[inserted["source"]["id"]][ slice(*_get_start_end(inserted["location"])) ] else: raise UnknownInsertedSource("Inserted source not supported.") if ( inserted.get("repeat_number") and inserted["repeat_number"].get("value") is not None ): sequence = sequence * inserted.get("repeat_number")["value"] if inserted.get("inverted"): sequence = _get_inverted(sequence) return sequence def mutate(sequences, variants): """ Mutate the reference sequence under ``sequences["reference"]`` according to the provided variants operations. :arg dict sequences: Sequences dictionary. :arg list variants: Operations list. :returns: Mutated sequence. :rtype: str """ reference = sequences["reference"] variants = sorted(variants, key=lambda v: (_get_start_end(v["location"]))) parts = [] current_index = 0 for variant in variants: start, end = _get_start_end(variant["location"]) parts.append(reference[current_index:start]) for insertion in variant["inserted"]: parts.append(_get_inserted_sequence(insertion, sequences)) current_index = end parts.append(reference[current_index:]) return "".join(parts)
29.395833
79
0.665131
321
2,822
5.741433
0.376947
0.030385
0.029843
0.033641
0.069452
0.034726
0
0
0
0
0
0.001355
0.21545
2,822
95
80
29.705263
0.831075
0.318214
0
0.045455
0
0
0.143013
0
0
0
0
0
0
1
0.090909
false
0.022727
0.022727
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3b9b566f35bb3be3bbe04e1b0c6ea0b1acb1d8bc
1,791
py
Python
day11/day11_2.py
DanTGL/AdventOfCode2020
bf7cd6a4fb7701155785b941facdc1e4859ba297
[ "MIT" ]
null
null
null
day11/day11_2.py
DanTGL/AdventOfCode2020
bf7cd6a4fb7701155785b941facdc1e4859ba297
[ "MIT" ]
null
null
null
day11/day11_2.py
DanTGL/AdventOfCode2020
bf7cd6a4fb7701155785b941facdc1e4859ba297
[ "MIT" ]
null
null
null
import copy from collections import defaultdict inputs = [list(line) for line in open("day11/input").read().splitlines()] nodes = defaultdict(lambda: []) for y in range(len(inputs)): for x in range(len(inputs[y])): if inputs[y][x] != ".": for i in range(-1, 2): for j in range(-1, 2): if 0 == j and 0 == i: continue index_x = x + j index_y = y + i while 0 <= index_y < len(inputs) and 0 <= index_x < len(inputs[y]): if inputs[index_y][index_x] != ".": nodes[x + len(inputs[y]) * y].append((index_y, index_x)) break index_x += j index_y += i def round(seats): result = copy.deepcopy(seats) for y in range(len(seats)): for x in range(len(seats[y])): if seats[y][x] != ".": occupied_adjacent = 0 for node in nodes[x + len(seats[y]) * y]: neighbour = seats[node[0]][node[1]] if neighbour == "#": occupied_adjacent += 1 if seats[y][x] == "L" and occupied_adjacent == 0: result[y][x] = "#" elif seats[y][x] == "#" and occupied_adjacent >= 5: result[y][x] = "L" return result seats = inputs while True: prev_seats = copy.deepcopy(seats) seats = round(seats) if prev_seats == seats: break total_occupied = 0 for y in range(len(seats)): for x in range(len(seats[y])): if seats[y][x] == "#": total_occupied += 1 print("Total seats occupied: " + str(total_occupied))
28.887097
87
0.460078
221
1,791
3.642534
0.230769
0.069565
0.074534
0.074534
0.195031
0.119255
0.119255
0.119255
0.119255
0.119255
0
0.01687
0.404243
1,791
62
88
28.887097
0.737582
0
0
0.130435
0
0
0.023438
0
0
0
0
0
0
1
0.021739
false
0
0.043478
0
0.086957
0.021739
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3b9c267d1ea8454b5606dbcad892d1bfd4a767ba
1,553
py
Python
server/UserProfile/migrations/0002_doctorprofile.py
dimejiconsult/Telemedicine
af812bd8703d86e648105dc0c01b02f6af783dee
[ "MIT" ]
null
null
null
server/UserProfile/migrations/0002_doctorprofile.py
dimejiconsult/Telemedicine
af812bd8703d86e648105dc0c01b02f6af783dee
[ "MIT" ]
8
2020-08-04T22:42:45.000Z
2022-03-12T00:48:53.000Z
server/UserProfile/migrations/0002_doctorprofile.py
dimejiconsult/Telemedicine
af812bd8703d86e648105dc0c01b02f6af783dee
[ "MIT" ]
null
null
null
# Generated by Django 2.2.3 on 2020-07-31 14:51 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('UserProfile', '0001_initial'), ] operations = [ migrations.CreateModel( name='DoctorProfile', fields=[ ('profile_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('gender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], max_length=7)), ('date_of_birth', models.DateField()), ('Year_of_Graduation', models.DateField()), ('Sch_of_Graduation', models.CharField(max_length=255)), ('Hospital_of_housemanship', models.CharField(max_length=255)), ('Folio_Number', models.CharField(max_length=50)), ('Full_License', models.FileField(upload_to='../media/License_document/%Y/%m/%d/')), ('Evidence_of_License_Reg', models.FileField(upload_to='../media/Evidence_of_Annual_License_Reg/%Y/%m/%d/')), ('CV', models.FileField(upload_to='../media/CV/%Y/%m/%d/')), ('Specialization', models.CharField(max_length=50)), ], options={ 'abstract': False, }, bases=('UserProfile.profile', models.Model), ), ]
43.138889
200
0.599485
166
1,553
5.403614
0.518072
0.083612
0.080268
0.107023
0.211817
0
0
0
0
0
0
0.025619
0.245976
1,553
35
201
44.371429
0.740393
0.028976
0
0
1
0
0.225764
0.10093
0
0
0
0
0
1
0
false
0
0.103448
0
0.206897
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
3b9cb1b86a1c2543fda4e403d263a899de8dcbf8
529
py
Python
python/ddf/conf.py
JLLeitschuh/DDF
e4e68315dcec1ed8b287bf1ee73baa88e7e41eba
[ "Apache-2.0" ]
160
2015-01-04T03:29:24.000Z
2022-01-30T18:02:50.000Z
python/ddf/conf.py
JLLeitschuh/DDF
e4e68315dcec1ed8b287bf1ee73baa88e7e41eba
[ "Apache-2.0" ]
164
2015-01-15T12:18:28.000Z
2017-06-02T06:49:01.000Z
python/ddf/conf.py
JLLeitschuh/DDF
e4e68315dcec1ed8b287bf1ee73baa88e7e41eba
[ "Apache-2.0" ]
43
2015-01-13T08:35:37.000Z
2021-02-26T02:43:43.000Z
from __future__ import unicode_literals def find_ddf(): import os if 'DDF_HOME' in os.environ: return os.path.abspath(os.environ['DDF_HOME']) path = os.path.abspath(os.path.split(os.path.abspath(__file__))[0] + '/../../') if all([os.path.exists(os.path.join(path, x)) for x in ['core', 'spark']]): return path raise ImportError('Unable to find DDF_HOME. Please define this variable in your environment') DDF_HOME = find_ddf() # TODO: find a better way to set this SCALA_VERSION = '2.10'
27.842105
97
0.671078
83
529
4.084337
0.542169
0.106195
0.115044
0.088496
0
0
0
0
0
0
0
0.009281
0.185255
529
18
98
29.388889
0.777262
0.066163
0
0
0
0
0.219512
0
0
0
0
0.055556
0
1
0.090909
false
0
0.272727
0
0.545455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
2
3b9d1545ec19f606b37c767e1e6cb76be606291c
196
py
Python
_solutions/basics/function/function_lambda_a.py
sages-pl/2022-01-pythonsqlalchemy-aptiv
1d6d856608e9dbe25b139e8968c48b7f46753b84
[ "MIT" ]
null
null
null
_solutions/basics/function/function_lambda_a.py
sages-pl/2022-01-pythonsqlalchemy-aptiv
1d6d856608e9dbe25b139e8968c48b7f46753b84
[ "MIT" ]
null
null
null
_solutions/basics/function/function_lambda_a.py
sages-pl/2022-01-pythonsqlalchemy-aptiv
1d6d856608e9dbe25b139e8968c48b7f46753b84
[ "MIT" ]
null
null
null
numbers = (x for x in range(1, 34) if x % 3 == 0) numbers = filter(lambda x: x % 2, numbers) numbers = map(lambda x: x ** 3, numbers) numbers = list(numbers) result = sum(numbers) / len(numbers)
28
49
0.642857
34
196
3.705882
0.529412
0.031746
0.126984
0
0
0
0
0
0
0
0
0.044586
0.19898
196
6
50
32.666667
0.757962
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
3b9f847d0cac6111d1ef645972a3872e4e96abc4
3,896
py
Python
algospot/submit.py
seirion/code
3b8bf79764107255185061cec33decbc2235d03a
[ "Apache-2.0" ]
13
2015-06-07T09:26:26.000Z
2019-05-01T13:23:38.000Z
algospot/submit.py
seirion/code
3b8bf79764107255185061cec33decbc2235d03a
[ "Apache-2.0" ]
null
null
null
algospot/submit.py
seirion/code
3b8bf79764107255185061cec33decbc2235d03a
[ "Apache-2.0" ]
4
2016-03-05T06:21:05.000Z
2017-02-17T15:34:18.000Z
# written by falsetru import cookielib import os from contextlib import closing import re import getpass import webbrowser import sys import requests class AlgoSpot(object): login_url = 'https://algospot.com/accounts/login/?next=/' def __init__(self): cookiefile_path = os.path.join(os.path.expanduser('~'), '.kaka') self.cj = cj = cookielib.LWPCookieJar(cookiefile_path) try: cj.load() except IOError: pass self.opener = requests.Session() self.opener.cookies = cj def login(self, username, password): html = self._request(self.login_url) csrf_token = self._get_csrf_token(html) data = { 'username': username, 'password': password, 'csrfmiddlewaretoken': csrf_token, } html = self._request(self.login_url, data) ok = self._is_loggedin(html) if ok: self.cj.save() return ok def is_loggedin(self): html = self._request('https://algospot.com') return self._is_loggedin(html) def ensure_login(self): if self.is_loggedin(): return while True: username = raw_input('Username: ') password = getpass.getpass() if self.login(username, password): break print 'Login failure.' def submit(self, problem, lang, content): url = 'https://algospot.com/judge/problem/submit/{}'.format(problem) html = self._request(url) csrf_token = self._get_csrf_token(html) data = { 'csrfmiddlewaretoken': csrf_token, 'language': lang, 'source': content, } self._request(url, data) def open_recent_submission(self, problem): webbrowser.open('https://algospot.com/judge/submission/recent/?user={}&problem={}'.format(self.username, problem)) def _is_loggedin(self, html): if 'href="/accounts/logout/"' in html: self.username = re.search('<a href="/user/profile/\d+" class="username">([^<]+)</a>', html).group(1).strip() return True else: return False def _get_csrf_token(self, html): return re.search("name='csrfmiddlewaretoken' value='(\w+)'", html).group(1) def _request(self, url, data=None): if data is None: r = self.opener.get(url) else: r = self.opener.post(url, data, headers={'Referer': self.login_url}) return r.content ext_to_lang = { '.java' : 'java', '.scala': 'scala', '.hs' : 'hs', '.py' : 'py', '.js' : 'js', '.rb' : 'rb', '.c' : 'cpp', '.cpp' : 'cpp', '.cxx' : 'cpp', '.cc' : 'cpp', } def guess_language(filename): base, ext = os.path.splitext(filename) ext = ext.lower() return ext_to_lang.get(ext) def guess_problem(filename): filename = os.path.basename(filename) base, ext = os.path.splitext(filename) return re.search('[0-9A-Z]+', base.upper()).group(0) assert guess_language('/path/to/boggle.py') == 'py' assert guess_problem('/path/to/boggle.py') == 'BOGGLE' assert guess_problem('/path/to/snail-recursion.py') == 'SNAIL' assert guess_problem('/path/to/tripathcnt_dp.py') == 'TRIPATHCNT' def main(filepath): lang = guess_language(filepath) if not lang: print 'Language guess fail.' return problem = guess_problem(filepath) try: with open(filepath) as f: content = f.read() except IOError: print "Can't open/read file." return site = AlgoSpot() site.ensure_login() site.submit(problem, lang, content) site.open_recent_submission(problem) if __name__ == '__main__': if len(sys.argv) != 2: print 'Usage: {} <file>'.format(sys.argv[0]) sys.exit(1) main(sys.argv[1])
28.647059
122
0.585216
463
3,896
4.784017
0.304536
0.028442
0.028894
0.029797
0.140406
0.088939
0.065914
0.032506
0.032506
0
0
0.003165
0.270021
3,896
135
123
28.859259
0.775668
0.004877
0
0.147826
0
0
0.16671
0.039742
0
0
0
0
0.034783
0
null
null
0.052174
0.069565
null
null
0.034783
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
3b9faf565558a1df6837f883c4af01c1961579e5
4,806
py
Python
centersnap/utils.py
ibaiGorordo/ONNX-CenterSnap-6D-Pose-and-Shape-Estimation
f8f98b08cce5259348616db4150064d713f17445
[ "MIT" ]
13
2022-03-19T14:42:50.000Z
2022-03-31T14:04:31.000Z
centersnap/utils.py
ibaiGorordo/ONNX-CenterSnap-6D-Pose-and-Shape-Estimation
f8f98b08cce5259348616db4150064d713f17445
[ "MIT" ]
null
null
null
centersnap/utils.py
ibaiGorordo/ONNX-CenterSnap-6D-Pose-and-Shape-Estimation
f8f98b08cce5259348616db4150064d713f17445
[ "MIT" ]
1
2022-03-24T12:56:25.000Z
2022-03-24T12:56:25.000Z
import numpy as np import cv2 import open3d as o3d from .original_repo_utils import * np.random.seed(3) MAX_CLASS_NUM = 100 # In the original model there are only 7 classes segmenation_colors = np.random.randint(0, 255, (MAX_CLASS_NUM, 3)).astype("uint8") def util_draw_seg(seg_map, image, alpha = 0.5): # Convert segmentation prediction to colors color_segmap = segmenation_colors[seg_map] # Resize to match the image shape color_segmap = cv2.resize(color_segmap, (image.shape[1],image.shape[0])) # Fuse both images if(alpha == 0): combined_img = np.hstack((image, color_segmap)) else: combined_img = cv2.addWeighted(image, alpha, color_segmap, (1-alpha),0) return combined_img def util_draw_depth(depth_map, image, max_depth = 2, alpha = 0.5): # Normalize estimated depth to color it if max_depth: min_depth = 0 depth_map = depth_map/1000 # Convert to meters else: min_depth = depth_map.min() max_depth = depth_map.max() norm_depth_map = 255*(depth_map-min_depth)/(max_depth-min_depth) norm_depth_map[norm_depth_map < 0] =0 norm_depth_map[norm_depth_map >= 255] = 255 # Normalize and color the image color_depth = cv2.applyColorMap(cv2.convertScaleAbs(norm_depth_map,1), cv2.COLORMAP_PLASMA ) # Resize to match the image shape color_depth = cv2.resize(color_depth, (image.shape[1],image.shape[0])) # Fuse both images if(alpha == 0): combined_img = np.hstack((image, color_depth)) else: combined_img = cv2.addWeighted(image, alpha, color_depth, (1-alpha),0) return combined_img def util_draw_heatmap(heatmap, image, alpha = 0.5): # Normalize and color the image color_heatmap = cv2.applyColorMap(cv2.convertScaleAbs(heatmap*255,1), cv2.COLORMAP_JET) # Resize to match the image shape color_heatmap = cv2.resize(color_heatmap, (image.shape[1],image.shape[0])) # Fuse both images if(alpha == 0): combined_img = np.hstack((image, color_heatmap)) else: combined_img = cv2.addWeighted(image, alpha, color_heatmap, (1-alpha),0) return combined_img def util_draw_points2d(points_2d_list, image, label_ids): # Normalize and color the image for i, points_2d in enumerate(points_2d_list): color = (int(segmenation_colors[label_ids[i]][0]), int(segmenation_colors[label_ids[i]][1]), int(segmenation_colors[label_ids[i]][2])) for point in points_2d.astype(int): cv2.circle(image, (int(point[0]),int(point[1])), 1, color, -1) return image def util_draw_pose2d(boxes_2d_list, axes_2d_list, image, label_ids): # Normalize and color the image for i, (box, axis) in enumerate(zip(boxes_2d_list, axes_2d_list)): color = (int(segmenation_colors[label_ids[i]][0]*0.5), int(segmenation_colors[label_ids[i]][1]*0.5), int(segmenation_colors[label_ids[i]][2]*0.5)) image = draw_bboxes(image, box, axis, color) return image def util_draw_2d(points_2d_list, boxes_2d_list, axes_2d_list, image, label_ids): image = util_draw_points2d(points_2d_list, image, label_ids) return util_draw_pose2d(boxes_2d_list, axes_2d_list, image, label_ids) class Open3dVisualizer(): def __init__(self): self.point_cloud = o3d.geometry.PointCloud() self.boxes = o3d.geometry.LineSet() self.o3d_started = False self.vis = o3d.visualization.Visualizer() self.vis.create_window() def __call__(self, points_3d_list, boxes_3d_list, is_image = False): self.update(points_3d_list, boxes_3d_list, is_image) def update(self, points_3d_list, boxes_3d_list, is_image = False): # Process points all_points, all_boxes, all_lines = Open3dVisualizer.process_data(points_3d_list, boxes_3d_list) # Add values to vectors self.point_cloud.points = o3d.utility.Vector3dVector(all_points) self.boxes.points = o3d.utility.Vector3dVector(all_boxes) self.boxes.lines = o3d.utility.Vector2iVector(all_lines) # Add geometries if it is the first time if not self.o3d_started: self.vis.add_geometry(self.point_cloud) self.vis.add_geometry(self.boxes) self.o3d_started = True else: self.vis.update_geometry(self.point_cloud) self.vis.update_geometry(self.boxes) self.vis.poll_events() self.vis.update_renderer() @staticmethod def process_data(points_3d_list, boxes_3d_list): all_points = points_3d_list[0] all_boxes = boxes_3d_list[0] all_lines = np.array(open_3d_lines) box_count = 0 for points_3d, box_3d in zip(points_3d_list[1:], boxes_3d_list[1:]): box_count += 1 all_points = np.vstack((all_points, points_3d)) all_boxes = np.vstack((all_boxes, box_3d)) all_lines = np.vstack((all_lines, np.array(open_3d_lines)+8*box_count)) # Fix axis to match open3d all_points = -all_points[:,[0,1,2]] all_boxes = -all_boxes[:,[0,1,2]] all_points[:,0] = -all_points[:,0] all_boxes[:,0] = -all_boxes[:,0] return all_points, all_boxes, all_lines
28.105263
97
0.740117
773
4,806
4.324709
0.179819
0.025127
0.025127
0.04487
0.488783
0.44152
0.37242
0.3105
0.223153
0.165121
0
0.038005
0.140449
4,806
170
98
28.270588
0.771242
0.106742
0
0.134021
0
0
0.00117
0
0
0
0
0
0
1
0.103093
false
0
0.041237
0
0.226804
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
3ba02c62d0d88116daac3eef24c8c51ab27ced29
2,519
py
Python
strokes_gained_calculations.py
brentonworley/strokes-gained
f3390de62a8987fd0a73ddb41837f7dcecb29387
[ "MIT" ]
null
null
null
strokes_gained_calculations.py
brentonworley/strokes-gained
f3390de62a8987fd0a73ddb41837f7dcecb29387
[ "MIT" ]
null
null
null
strokes_gained_calculations.py
brentonworley/strokes-gained
f3390de62a8987fd0a73ddb41837f7dcecb29387
[ "MIT" ]
null
null
null
def calculate_strokes_gained(reference_value, user_putts): '''Return the strokes gained based on reference and user input''' return round((reference_value - user_putts), 2) def calculate_strokes_gained_putting(reference_data, user_input): '''Return the strokes gained value from a dictionary of user input {distance, putts} and a list of reference strokes gained data.''' # get the reference distance from the first entry in the baseline data position = 0 not_matched = True # loop through the reference data to find the right value of average putts while not_matched: # set up the reference data baseline_data = reference_data[position] reference_distance = baseline_data['distance'] reference_putts = baseline_data['putts'] min_reference_distance = reference_data[0]['distance'] max_reference_distance = reference_data[-1]['distance'] # first check that the input is within the putt_range if user_input['distance'] < min_reference_distance: # use the lowest value of the reference putts reference_putts = reference_data[0]['putts'] not_matched = False elif user_input['distance'] > max_reference_distance: # use the highest value of the reference putts reference_putts = reference_data[-1]['putts'] not_matched = False # if we get an exact match elif user_input['distance'] == reference_distance: reference_putts = reference_data[position]['putts'] not_matched = False # if the putt distance sits between baseline values elif user_input['distance'] < reference_distance and user_input['distance'] > last_distance: distance_range = reference_distance - last_distance putt_range = reference_putts - last_putts proportion = (user_input['distance'] - last_distance)/distance_range #update the reference_putts reference_putts = round(last_putts + (putt_range * proportion), 2) not_matched = False # keep track of the last distance if you don't get an exact match last_distance = reference_distance last_putts = reference_putts position += 1 print(f"Your input of distance of {user_input['distance']} feet equates to a tour averge of {reference_putts} putts") strokes_gained = calculate_strokes_gained(reference_putts, user_input['putts']) return strokes_gained
45.8
121
0.687574
314
2,519
5.283439
0.254777
0.101266
0.081977
0.047016
0.203134
0.157926
0.112116
0.061483
0.061483
0
0
0.004199
0.243748
2,519
54
122
46.648148
0.866667
0.262406
0
0.125
0
0.03125
0.111232
0.013086
0
0
0
0
0
1
0.0625
false
0
0
0
0.125
0.03125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e535a0eaed4fb2eca117828f9d5fa6d60c950b3
8,988
py
Python
CRF/cnn_word_seg_torch.py
enjlife/bert4torch
53694060fed0351649f87c79381740851a4a0b42
[ "Apache-2.0" ]
5
2021-09-09T03:25:58.000Z
2022-02-22T06:43:08.000Z
CRF/cnn_word_seg_torch.py
enjlife/bert4torch
53694060fed0351649f87c79381740851a4a0b42
[ "Apache-2.0" ]
1
2022-02-18T07:46:46.000Z
2022-02-20T10:05:25.000Z
CRF/cnn_word_seg_torch.py
enjlife/bert4torch
53694060fed0351649f87c79381740851a4a0b42
[ "Apache-2.0" ]
null
null
null
import os import torch.nn from torch import nn from crf_torch import CRF import re import random import time from torch.optim import Adam import torch.nn.functional as F from datetime import timedelta # TODO 准确率计算函数的bug修复 def get_time_dif(start_time): """获取已使用时间""" end_time = time.time() time_dif = end_time - start_time return timedelta(seconds=int(round(time_dif))) class CnnWordSeg(nn.Module): """CNN 分词""" def __init__(self, config): super(CnnWordSeg, self).__init__() vocab_size = config.vocab_size hidden_size = config.hidden_size num_labels = config.num_labels self.embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=0) self.conv1 = torch.nn.Sequential( # 这里采用重复填充 padding=1填充一层 torch.nn.Conv1d(in_channels=hidden_size, out_channels=hidden_size, kernel_size=3, stride=1, padding=1, padding_mode='replicate'), torch.nn.ReLU() ) self.conv2 = torch.nn.Sequential( torch.nn.Conv1d(hidden_size, hidden_size, 3, 1, 1, padding_mode='replicate'), torch.nn.ReLU() ) self.conv3 = torch.nn.Sequential( torch.nn.Conv1d(hidden_size, hidden_size, 3, 1, 1, padding_mode='replicate'), torch.nn.ReLU() ) self.dense = nn.Linear(hidden_size, 4) self.crf = CRF(num_tags=num_labels, batch_first=True) def forward(self, x, y, mask, test=False): hidden_state = self.embedding(x) # (batch,seq_len,hidden_size) hidden_state = hidden_state.permute(0, 2, 1) # 一维卷积是在length维度 hidden_state = self.conv1(hidden_state) hidden_state = self.conv2(hidden_state) hidden_state = self.conv3(hidden_state) hidden_state = hidden_state.permute(0, 2, 1) hidden_state = self.dense(hidden_state) if not test: hidden_state = self.crf(hidden_state, y, mask) else: hidden_state = self.crf.decode(hidden_state, mask) return hidden_state class DatasetIterater(object): def __init__(self, data_list, batch_size, device): self.batch_size = batch_size self.data_list = data_list self.n_batches = len(data_list) // batch_size self.residue = False # 记录batch数量是否为整数 if len(data_list) % self.n_batches != 0: self.residue = True self.index = 0 self.device = device def _to_tensor(self, datas): max_len = max([len(data[0]) for data in datas]) x = torch.LongTensor([data[0] + [0]*(max_len-len(data[0])) for data in datas]).to(self.device) y = torch.LongTensor([data[1] + [0]*(max_len-len(data[0])) for data in datas]).to(self.device) mask = torch.ByteTensor([data[2] + [0]*(max_len-len(data[0])) for data in datas]).to(self.device) return x, y, mask def __next__(self): if self.residue and self.index == self.n_batches: batches = self.data_list[self.index * self.batch_size: len(self.data_list)] self.index += 1 batches = self._to_tensor(batches) return batches elif self.index >= self.n_batches: self.index = 0 raise StopIteration else: batches = self.data_list[self.index * self.batch_size: (self.index + 1) * self.batch_size] self.index += 1 batches = self._to_tensor(batches) return batches def __iter__(self): return self def __len__(self): if self.residue: return self.n_batches + 1 else: return self.n_batches def build_dataset(path, max_len=32): sents = open(path, 'r', encoding='utf8').read().strip().split('\n') sents = [re.split(' +', s) for s in sents] # 词之间以两个空格隔开 sents = [[w for w in s if w] for s in sents] # 去掉空字符串 random.shuffle(sents) # 打乱语料,以便后面划分验证集 def build_vocab(sents, min_count=2): chars = {} for s in sents: for c in ''.join(s): if c in chars: chars[c] += 1 else: chars[c] = 1 chars = {i: j for i, j in chars.items() if j >= min_count} id2char = {i+1: j for i, j in enumerate(chars.keys())} char2id = {j: i for i, j in id2char.items()} return id2char, char2id id2char, char2id = build_vocab(sents) def to_id(): datasets = [] for s in sents: x, y = [], [] for w in s: if not all(c in char2id for c in w): continue x.extend([char2id[c] for c in w]) if len(w) == 1: y.append(0) elif len(w) == 2: y.extend([1, 3]) else: y.extend([1] + [2] * (len(w) - 2) + [3]) if x: datasets.append((x, y, [1]*len(x))) # x,y,mask return datasets data = to_id() trains, valids = data[:-5000], data[-5000:] return trains, valids, id2char, char2id class Train: def __init__(self, model, train_iter, dev_iter, config): self.model = model self.train_iter = train_iter self.dev_iter = dev_iter self.config = config def train(self): start_time = time.time() self.model.train() optimizer = Adam(self.model.parameters(), lr=self.config.lr) total_batch = 0 # 记录进行到多少batch dev_best_loss = float('inf') # dev 最小loss for epoch in range(self.config.num_epochs): print('Epoch [{}/{}]'.format(epoch + 1, self.config.num_epochs)) for i, (x, y, mask) in enumerate(self.train_iter): self.model.zero_grad() loss = self.model(x, y, mask) loss.backward() optimizer.step() if total_batch % 100 == 0: y_pre = self.model(x, y, mask, test=True) y_true = y.cpu().numpy().tolist() mask = mask.cpu().numpy().sum(axis=1).tolist() train_acc, rec = self.cal_acc(y_pre, y_true, mask) dev_loss, dev_acc, dev_rec = self.evaluate() if dev_loss < dev_best_loss: dev_best_loss = dev_loss torch.save(model.state_dict(), config.save_path) improve = '*' else: improve = '' time_dif = get_time_dif(start_time) msg = 'Iter: {0:>6}, Train Loss: {1:>5.2}, Train Acc: {2:>6.2%}, Rec: {3:>6.2%}, Val Loss: {4:>5.2}, Val Acc: {5:>6.2%}, Time: {6} {7}' print(msg.format(total_batch, loss.item(), train_acc, rec, dev_loss, dev_acc, time_dif, improve)) model.train() total_batch += 1 def evaluate(self): self.model.eval() loss_total = 0.0 acc_total = 0.0 rec_total = 0.0 n = 0 with torch.no_grad(): for x, y, mask in self.dev_iter: loss = self.model(x, y, mask) loss_total += loss.item() y_pre = self.model(x, y, mask, test=True) y_true = y.cpu().numpy().tolist() mask = mask.cpu().numpy().sum(axis=1).tolist() acc, rec = self.cal_acc(y_pre, y_true, mask) acc_total += acc rec_total += rec n += 1 return loss_total/n, acc_total/n, rec_total/n # 重写了准确率计算的函数,有bug待修复 def cal_acc(self, y_pre, y_true, mask): n = len(y_pre) acc, rec = 0.0, 0.0 for i in range(n): length = mask[i] tp = y_pre[i][:length] tt = y_true[i][:length] tt = set([i*2 + x for i, x in enumerate(tt) if x == 0 or x == 1]) tp = set([i*2 + x for i, x in enumerate(tp) if x == 0 or x == 1]) acc += len(tt & tp) / (len(tp)+1) rec += len(tt & tp) / (len(tt)+1) return acc/n, rec/n class Config: def __init__(self): self.lr = 1e-3 self.num_epochs = 10 self.batch_size = 128 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.num_labels = 4 self.hidden_size = 128 self.path = '../data/icwb2/msr_training.utf8' self.num_labels = 4 self.vocab_size = 0 self.save_path = 'model.ckpt' if __name__ == '__main__': config = Config() train_data, valid_data, id2char, char2id = build_dataset(config.path) config.vocab_size = len(id2char) + 1 train_iter = DatasetIterater(train_data, config.batch_size, config.device) valid_iter = DatasetIterater(valid_data, config.batch_size, config.device) model = CnnWordSeg(config).cuda(0) train = Train(model, train_iter, valid_iter, config) train.train()
36.836066
161
0.549622
1,213
8,988
3.892828
0.176422
0.041931
0.011436
0.023295
0.275307
0.212622
0.191233
0.176832
0.155654
0.128971
0
0.024495
0.32777
8,988
244
162
36.836066
0.757034
0.022363
0
0.156398
0
0.004739
0.027607
0.003536
0
0
0
0.004098
0
1
0.075829
false
0
0.047393
0.004739
0.203791
0.009479
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e54656185e027ab6cdc457485c3e4f7aee1306c
1,636
py
Python
gs_quant/backtests/execution_engine.py
skyquant2/gs-quant
b7e648fa7912b13ad1fd503b643389e34587aa1e
[ "Apache-2.0" ]
2
2021-06-22T12:14:38.000Z
2021-06-23T15:51:08.000Z
gs_quant/backtests/execution_engine.py
skyquant2/gs-quant
b7e648fa7912b13ad1fd503b643389e34587aa1e
[ "Apache-2.0" ]
null
null
null
gs_quant/backtests/execution_engine.py
skyquant2/gs-quant
b7e648fa7912b13ad1fd503b643389e34587aa1e
[ "Apache-2.0" ]
null
null
null
""" Copyright 2019 Goldman Sachs. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from gs_quant.backtests.data_handler import DataHandler from gs_quant.backtests.event import * import datetime as dt class ExecutionEngine(object): pass class SimulatedExecutionEngine(ExecutionEngine): def __init__(self, data_handler: DataHandler): self.data_handler = data_handler self.orders = [] def submit_order(self, order: OrderEvent): self.orders.append(order) self.orders.sort(key=lambda e: e.order.execution_end_time()) def ping(self, state: dt.datetime): fill_events = [] while self.orders: order: OrderBase = self.orders[0].order end_time = order.execution_end_time() if end_time > state: break else: fill = FillEvent(order=order, filled_price=order.execution_price(self.data_handler), filled_units=order.execution_quantity(self.data_handler)) fill_events.append(fill) self.orders.pop(0) return fill_events
33.387755
90
0.675428
209
1,636
5.162679
0.526316
0.055607
0.055607
0.029657
0
0
0
0
0
0
0
0.00817
0.251834
1,636
48
91
34.083333
0.873366
0.337408
0
0
0
0
0
0
0
0
0
0
0
1
0.115385
false
0.038462
0.115385
0
0.346154
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e54b6e75de5f4de964911c5a74139115880c479
19,578
py
Python
biosimulators_opencor/utils.py
biosimulators/Biosimulators_OpenCOR
e00645e372baf7475957af9487856ad9ddd18814
[ "MIT" ]
null
null
null
biosimulators_opencor/utils.py
biosimulators/Biosimulators_OpenCOR
e00645e372baf7475957af9487856ad9ddd18814
[ "MIT" ]
null
null
null
biosimulators_opencor/utils.py
biosimulators/Biosimulators_OpenCOR
e00645e372baf7475957af9487856ad9ddd18814
[ "MIT" ]
null
null
null
""" Utilities for OpenCOR :Author: Jonathan Karr <karr@mssm.edu> :Date: 2021-05-28 :Copyright: 2021, BioSimulators Team :License: MIT """ from .data_model import KISAO_ALGORITHM_MAP from biosimulators_utils.config import get_config, Config # noqa: F401 from biosimulators_utils.data_model import ValueType # noqa: F401 from biosimulators_utils.log.data_model import TaskLog # noqa: F401 from biosimulators_utils.report.data_model import VariableResults # noqa: F401 from biosimulators_utils.sedml.data_model import ( # noqa: F401 SedDocument, ModelLanguage, ModelAttributeChange, UniformTimeCourseSimulation, Algorithm, Task, RepeatedTask, VectorRange, SubTask, DataGenerator, Variable) from biosimulators_utils.sedml.io import SedmlSimulationWriter from biosimulators_utils.sedml import validation from biosimulators_utils.simulator.utils import get_algorithm_substitution_policy from biosimulators_utils.utils.core import validate_str_value, raise_errors_warnings from biosimulators_utils.warnings import warn, BioSimulatorsWarning from kisao.data_model import AlgorithmSubstitutionPolicy, ALGORITHM_SUBSTITUTION_POLICY_LEVELS from kisao.utils import get_preferred_substitute_algorithm_by_ids from unittest import mock import copy import lxml.etree import opencor import os import tempfile __all__ = [ 'validate_task', 'validate_variable_xpaths', 'validate_simulation', 'get_opencor_algorithm', 'get_opencor_parameter_value', 'build_opencor_sedml_doc', 'save_task_to_opencor_sedml_file', 'load_opencor_simulation', 'validate_opencor_simulation', 'get_results_from_opencor_simulation', 'log_opencor_execution', 'get_mock_libcellml', ] def validate_task(task, variables, config=None): """ Validate that a simulation can be executed with OpenCOR Args: task (:obj:`Task`): request simulation task variables (:obj:`list` of :obj:`Variable`): variables that should be recorded config (:obj:`Config`, optional): BioSimulators common configuration Returns: :obj:`tuple:`: * :obj:`Task`: possibly alternate task that OpenCOR should execute * :obj:`lxml.etree._ElementTree`: element tree for model * :obj:`dict`: dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it """ config = config or get_config() model = task.model sim = task.simulation if config.VALIDATE_SEDML: raise_errors_warnings(validation.validate_task(task), error_summary='Task `{}` is invalid.'.format(task.id)) raise_errors_warnings(validation.validate_model_language(model.language, ModelLanguage.CellML), error_summary='Language for model `{}` is not supported.'.format(model.id)) raise_errors_warnings(validation.validate_model_change_types(model.changes, (ModelAttributeChange,)), error_summary='Changes for model `{}` are not supported.'.format(model.id)) raise_errors_warnings(*validation.validate_model_changes(model), error_summary='Changes for model `{}` are invalid.'.format(model.id)) raise_errors_warnings(validation.validate_simulation_type(sim, (UniformTimeCourseSimulation, )), error_summary='{} `{}` is not supported.'.format(sim.__class__.__name__, sim.id)) raise_errors_warnings(*validation.validate_simulation(sim), error_summary='Simulation `{}` is invalid.'.format(sim.id)) raise_errors_warnings(*validation.validate_data_generator_variables(variables), error_summary='Data generator variables for task `{}` are invalid.'.format(task.id)) # read model; TODO: support imports model_etree = lxml.etree.parse(model.source) # validate variables opencor_variable_names = validate_variable_xpaths(variables, model_etree) # validate simulation opencor_simulation = validate_simulation(task.simulation) # check that OpenCOR can execute the request algorithm (or a similar one) opencor_algorithm = get_opencor_algorithm(task.simulation.algorithm, config=config) # create new task to manage configuration for OpenCOR opencor_task = copy.deepcopy(task) opencor_task.simulation = opencor_simulation opencor_task.simulation.algorithm = opencor_algorithm return opencor_task, model_etree, opencor_variable_names def validate_variable_xpaths(sed_variables, model_etree): """ Get the names OpenCOR uses to refer to model variable Args: model_etree (:obj:`lxml.etree._ElementTree`): element tree for model sed_variables (:obj:`list` of :obj:`Variable`): SED variables Returns: :obj:`dict`: dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it """ opencor_variable_names = {} for sed_variable in sed_variables: if not sed_variable.target: msg = 'Symbols are not supported.' raise NotImplementedError(msg) namespaces = copy.copy(sed_variable.target_namespaces) namespaces.pop(None, None) obj_target, _, attrib_target = sed_variable.target.partition('/@') xml_objs = model_etree.xpath(obj_target, namespaces=namespaces) if len(xml_objs) == 0: msg = ( 'XPath targets of variables must reference unique observables. ' 'The target `{}` of variable `{}` does not match any model elements.' ).format(sed_variable.target, sed_variable.id) raise ValueError(msg) if len(xml_objs) > 1: msg = ( 'XPath targets of variables must reference unique observables. ' 'The target `{}` of variable `{}` matches multiple model elements.' ).format(sed_variable.target, sed_variable.id) raise ValueError(msg) xml_obj = xml_objs[0] names = [] while True: name = xml_obj.attrib.get('name', None) names.append(name) xml_obj = xml_obj.getparent() ns, _, tag = xml_obj.tag[1:].partition('}') if not name or not ns.startswith('http://www.cellml.org/cellml/'): msg = 'Target `{}` of variable `{}` is not a valid observable.'.format(sed_variable.target, sed_variable.id) raise ValueError(msg) if tag == 'model': break if attrib_target: names.insert(0, attrib_target) opencor_variable_names[sed_variable.id] = '/'.join(reversed(names)) return opencor_variable_names def validate_simulation(simulation): """ Validate a simulation Args: simulation (:obj:`UniformTimeCourseSimulation`): requested simulation Returns: :obj:`UniformTimeCourseSimulation`: simulation instructions for OpenCOR """ number_of_steps = ( simulation.output_end_time - simulation.initial_time ) / ( simulation.output_end_time - simulation.output_start_time ) * simulation.number_of_steps output_start_time = simulation.initial_time if abs(number_of_steps - round(number_of_steps)) > 1e-8: msg = ( 'Number of steps must be an integer, not `{}`:' '\n Initial time: {}' '\n Output start time: {}' '\n Output end time: {}' '\n Number of steps (output start - end time) time: {}' ).format( number_of_steps, simulation.initial_time, simulation.output_start_time, simulation.output_end_time, simulation.number_of_steps, ) raise NotImplementedError(msg) else: number_of_steps = round(number_of_steps) opencor_simulation = copy.deepcopy(simulation) opencor_simulation.number_of_steps = number_of_steps opencor_simulation.output_start_time = output_start_time return opencor_simulation def get_opencor_algorithm(requested_alg, config=None): """ Get a possibly alternative algorithm that OpenCOR should execute Args: requested_alg (:obj:`Algorithm`): requested algorithm config (:obj:`Config`, optional): configuration Returns: :obj:`Algorithm`: possibly alternative algorithm that OpenCOR should execute """ exec_alg = copy.deepcopy(requested_alg) algorithm_substitution_policy = get_algorithm_substitution_policy(config=config) exec_alg.kisao_id = get_preferred_substitute_algorithm_by_ids( requested_alg.kisao_id, KISAO_ALGORITHM_MAP.keys(), substitution_policy=algorithm_substitution_policy) if exec_alg.kisao_id == requested_alg.kisao_id: alg_specs = KISAO_ALGORITHM_MAP[exec_alg.kisao_id] params_specs = alg_specs['parameters'] for change in list(exec_alg.changes): param_specs = params_specs.get(change.kisao_id, None) if param_specs: is_valid, change.new_value = get_opencor_parameter_value( change.new_value, param_specs['type'], param_specs.get('enum', None)) if not is_valid: if ( ALGORITHM_SUBSTITUTION_POLICY_LEVELS[algorithm_substitution_policy] > ALGORITHM_SUBSTITUTION_POLICY_LEVELS[AlgorithmSubstitutionPolicy.NONE] ): warn('Unsupported value `{}` of {}-valued algorithm parameter `{}` (`{}`) was ignored.'.format( change.new_value, param_specs['type'].name, param_specs['name'], change.kisao_id), BioSimulatorsWarning) exec_alg.changes.remove(change) else: msg = '`{}` (`{}`) must a {}, not `{}`.'.format( param_specs['name'], change.kisao_id, param_specs['type'].name, change.new_value) raise ValueError(msg) else: if ( ALGORITHM_SUBSTITUTION_POLICY_LEVELS[algorithm_substitution_policy] > ALGORITHM_SUBSTITUTION_POLICY_LEVELS[AlgorithmSubstitutionPolicy.NONE] ): warn('Unsupported algorithm parameter `{}` was ignored.'.format( change.kisao_id), BioSimulatorsWarning) exec_alg.changes.remove(change) else: msg = '{} ({}) does not support parameter `{}`. {} support the following parameters:\n {}'.format( alg_specs['name'], alg_specs['kisao_id'], change.kisao_id, alg_specs['name'], '\n '.join(sorted('{}: {}'.format(param_kisao_id, param_specs['name']) for param_kisao_id, param_specs in params_specs.items())) ) raise NotImplementedError(msg) else: exec_alg.changes = [] return exec_alg def get_opencor_parameter_value(value, value_type, enum_cls=None): """ Get the OpenCOR representation of a value of a parameter Args: value (:obj:`str`): string-encoded parameter value value_type (:obj:`ValueType`): expected type of the value enum_cls (:obj:`type`): allowed values of the parameter Returns: :obj:`tuple`: * :obj:`bool`: whether the value is valid * :obj:`str`: OpenCOR representation of a value of a parameter """ if not validate_str_value(value, value_type): return False, None if enum_cls: try: return True, enum_cls[value].value except KeyError: pass try: return True, enum_cls[value.replace('KISAO:', 'KISAO_')].value except KeyError: pass try: return True, enum_cls(value).value except ValueError: pass return False, None else: return True, value def build_opencor_sedml_doc(task, variables, include_data_generators=False): """ Create an OpenCOR-compatible SED-ML document for a task and its output variables Args: task (:obj:`Task`): SED task variables (:obj:`list` of :obj:`Variable`): SED variables include_data_generators (:obj:`bool`, optional): whether to export data generators Returns: :obj:`SedDocument`: SED document """ doc = SedDocument() model_copy = copy.deepcopy(task.model) model_copy.id = 'model' model_copy.source = os.path.abspath(model_copy.source) doc.models.append(model_copy) sim_copy = copy.deepcopy(task.simulation) sim_copy.id = 'simulation1' doc.simulations.append(sim_copy) basic_task = Task(id='task1', model=model_copy, simulation=sim_copy) repeated_task = RepeatedTask( id='repeatedTask', range=VectorRange(id="once", values=[1]), sub_tasks=[ SubTask(order=1, task=basic_task), ], reset_model_for_each_iteration=True, ) repeated_task.ranges = [repeated_task.range] doc.tasks.append(basic_task) doc.tasks.append(repeated_task) if include_data_generators: for variable in variables: doc.data_generators.append( DataGenerator( id='data_generator_' + variable.id, variables=[ Variable(id=variable.id, target=variable.target, target_namespaces=variable.target_namespaces, task=repeated_task), ], math=variable.id, ) ) return doc def save_task_to_opencor_sedml_file(task, variables, include_data_generators=False): """ Save a SED task to an OpenCOR-compatible SED-ML file Args: task (:obj:`Task`): SED task variables (:obj:`list` of :obj:`Variable`): SED variables include_data_generators (:obj:`bool`, optional): whether to export data generators Returns: :obj:`str`: path to SED-ML file for the SED document """ doc = build_opencor_sedml_doc(task, variables, include_data_generators=include_data_generators) fid, sed_filename = tempfile.mkstemp(suffix='.sedml') os.close(fid) doc.models[0].source = os.path.relpath(doc.models[0].source, os.path.dirname(sed_filename)) # use a mocked version because libCellML cannot be installed into the OpenCOR docker image with mock.patch.dict('sys.modules', libcellml=get_mock_libcellml()): SedmlSimulationWriter().run(doc, sed_filename, validate_models_with_languages=False) return sed_filename def load_opencor_simulation(task, variables, include_data_generators=False): """ Load an OpenCOR simulation Args: task (:obj:`Task`): SED task variables (:obj:`list` of :obj:`Variable`): SED variables include_data_generators (:obj:`bool`, optional): whether to export data generators Returns: :obj:`PythonQt.private.SimulationSupport.Simulation`: OpenCOR simulation """ # save SED-ML to a file filename = save_task_to_opencor_sedml_file(task, variables, include_data_generators=include_data_generators) # Read the SED-ML file try: opencor_sim = opencor.open_simulation(filename) finally: # clean up temporary SED-ML file os.remove(filename) validate_opencor_simulation(opencor_sim) return opencor_sim def validate_opencor_simulation(sim): """ Validate an OpenCOR simulation Args: sim (:obj:`PythonQt.private.SimulationSupport.Simulation`): OpenCOR simulation) Raises: :obj:`ValueError`: if the simulation is invalid """ if sim.hasBlockingIssues() or not sim.valid(): msg = 'The task does not describe a valid simulation:\n\n {}'.format( '\n\n '.join( ''.join(lxml.etree.fromstring('<root>' + issue + '</root>').itertext()) for issue in sim.issues() ) ) raise ValueError(msg) def get_results_from_opencor_simulation(opencor_sim, sed_task, sed_variables, opencor_variable_names): """ Get the results of SED variables from an OpenCOR simulation Args: opencor_sim (:obj:`PythonQt.private.SimulationSupport.Simulation`): OpenCOR simulation sed_task (:obj:`Task`): requested SED task sed_variables (:obj:`list` of :obj:`Variable`): SED variables opencor_variable_names (:obj:`dict`): dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it) Returns: :obj:`VariableResults`: results of the SED variables """ opencor_results = opencor_sim.results() opencor_voi_results = opencor_results.voi() opencor_states_results = opencor_results.states() opencor_rates_results = opencor_results.rates() opencor_constants_results = opencor_results.constants() opencor_algebraic_results = opencor_results.algebraic() sed_results = VariableResults() invalid_variables = [] for sed_variable in sed_variables: opencor_name = opencor_variable_names[sed_variable.id] if opencor_name == opencor_voi_results.uri(): sed_results[sed_variable.id] = opencor_voi_results.values()[-(sed_task.simulation.number_of_steps + 1):] elif opencor_name in opencor_states_results: sed_results[sed_variable.id] = opencor_states_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):] elif opencor_name in opencor_rates_results: sed_results[sed_variable.id] = opencor_rates_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):] elif opencor_name in opencor_constants_results: sed_results[sed_variable.id] = opencor_constants_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):] elif opencor_name in opencor_algebraic_results: sed_results[sed_variable.id] = opencor_algebraic_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):] else: invalid_variables.append('{}: {}'.format(sed_variable.id, sed_variable.target)) if invalid_variables: msg = ( 'The target of each variable must be a valid observable. ' 'The targets of the following variables are not valid observables.\n {}' ).format('\n '.join(invalid_variables)) raise ValueError(msg) return sed_results def log_opencor_execution(task, log): """ Log information about how OpenCOR was used to execute the simulation Args: task (:obj:`Task`): SED task log (:obj:`TaskLog`): execution log """ log.algorithm = task.simulation.algorithm.kisao_id log.simulator_details = { 'method': 'OpenCOR.SimulationSupport.Simulation.run', 'algorithmParameters': [ {'kisaoID': change.kisao_id, 'value': change.new_value} for change in task.simulation.algorithm.changes ], } def get_mock_libcellml(): """ Get a mocked version of libCellML Returns: :obj:`mock.Mock`: mocked libcellml module """ return mock.Mock( Parser=lambda: mock.Mock( parseModel=lambda: None, errorCount=lambda: 0, warningCount=lambda: 0, ), Validator=lambda: mock.Mock( validateModel=lambda model: None, errorCount=lambda: 0, warningCount=lambda: 0, ), )
38.53937
141
0.659056
2,234
19,578
5.54342
0.149955
0.02043
0.017846
0.014858
0.383398
0.332122
0.276244
0.221657
0.187419
0.171592
0
0.003321
0.246348
19,578
507
142
38.615385
0.835988
0.209521
0
0.187291
0
0
0.115847
0.018057
0
0
0
0.001972
0
1
0.040134
false
0.010033
0.063545
0
0.153846
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e54c567814fe73688defe099ca5f7a9a0fccd51
432
py
Python
estrategias/Gabrielp.py
cadualves/jogos_vorazes
e8caf062fc58b6188e94152084383940c96dba9a
[ "MIT" ]
8
2017-10-11T19:49:28.000Z
2021-06-16T12:20:39.000Z
estrategias/Gabrielp.py
cadualves/jogos_vorazes
e8caf062fc58b6188e94152084383940c96dba9a
[ "MIT" ]
6
2017-02-02T18:26:34.000Z
2018-11-08T16:32:52.000Z
estrategias/Gabrielp.py
cadualves/jogos_vorazes
e8caf062fc58b6188e94152084383940c96dba9a
[ "MIT" ]
26
2016-01-15T16:20:57.000Z
2018-12-11T21:32:57.000Z
from .jogadores import Jogador class MeuJogador(Jogador): def escolha_de_cacada(self, rodada, comida_atual, reputacao_atual, m, reputacoes_dos_jogadores): if comida_atual <= 5: escolhas = ['d' for x in reputacoes_dos_jogadores] return escolhas else: escolhas = ['d' if x > 0.8333 else 'c' if x > 0.1667 else 'd' for x in reputacoes_dos_jogadores] return escolhas
39.272727
108
0.650463
58
432
4.655172
0.534483
0.144444
0.244444
0.051852
0.318519
0.318519
0.318519
0.318519
0.318519
0
0
0.034921
0.270833
432
11
109
39.272727
0.822222
0
0
0.222222
0
0
0.009238
0
0
0
0
0
0
1
0.111111
false
0
0.111111
0
0.555556
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
8e559b65f4bffc816f6acc36951ebd073cffa8c9
3,407
py
Python
arpym/statistics/saddle_point_quadn.py
dpopadic/arpmRes
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
[ "MIT" ]
6
2021-04-10T13:24:30.000Z
2022-03-26T08:20:42.000Z
arpym/statistics/saddle_point_quadn.py
dpopadic/arpmRes
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
[ "MIT" ]
null
null
null
arpym/statistics/saddle_point_quadn.py
dpopadic/arpmRes
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
[ "MIT" ]
6
2019-08-13T22:02:17.000Z
2022-02-09T17:49:12.000Z
# -*- coding: utf-8 -*- import numpy as np from scipy.stats import norm from scipy.optimize import brentq from arpym.tools.transpose_square_root import transpose_square_root def saddle_point_quadn(y, alpha, beta, gamma, mu, sigma2): """For details, see here. Parameters ---------- y : array, shape(j_,) alpha : scalar beta : array, shape(n_,) gamma : array, shape(n_, n_) mu : array, shape(n_,) sigma2 : array, shape(n_, n_) Returns ------- cdf : array, shape(j_,) pdf : array, shape(j_,) """ y = np.asarray(y).copy().reshape(-1) beta = np.asarray(beta).copy().reshape(-1, 1) mu = np.asarray(mu).copy().reshape(-1, 1) j_ = len(y) # Step 1: Compute the eigenvalues and eigenvectors of l.T @ gamma @ l l = transpose_square_root(sigma2, 'Cholesky') lam, e = np.linalg.eig(l.T @ gamma @ l) lam = lam.reshape(-1, 1) # Step 2: Compute transformed parameters alpha_tilde = alpha + beta.T @ mu + mu.T @ gamma @ mu beta_tilde = beta + 2*gamma @ mu gamma_tilde = e.T @ l.T @ beta_tilde # Step 3: Compute the log-characteristic function and its derivatives # log-characteristic function def c_y(w): return alpha_tilde * w - 0.5 * np.sum(np.log(1 - 2.*w*lam) - w**2 * gamma_tilde**2 / (1 - 2.*w*lam)) # first derivative def c_y_prime(w): return alpha_tilde + np.sum(lam / (1 - 2.*w*lam) + gamma_tilde**2 * (w - w**2 * lam) / (1 - 2.*w*lam)**2) # second derivative def c_y_second(w): return np.array([np.sum(2. * (lam / (1 - 2.*w*lam))**2 + gamma_tilde**2 / (1 - 2.*w*lam)**3)]) # Step 4: Find w_hat numerically using Brent's method lam_max = np.max(lam) lam_min = np.min(lam) if lam_max > 0: w_max = (1 - 1e-5) / (2 * lam_max) else: w_max = 1e20 if lam_min < 0: w_min = (1 + 1e-5) / (2 * lam_min) else: w_min = -1e20 y_min = c_y_prime(w_min) y_max = c_y_prime(w_max) # initialize w_hat = np.zeros(j_) c_y_w_hat = np.zeros(j_) # c(w_hat) c_y_second_w_hat = np.zeros(j_) # c''(w_hat) idx = np.argsort(y) w_last = w_min for j in range(j_): if y[idx[j]] <= y_min: w_hat[idx[j]] = w_min elif y[idx[j]] >= y_max: w_hat[idx[j]] = w_max else: # Brent’s method for finding the root of the function. # Since y is sorted and c_y_prime is a monotone increasing function # it is guaranteed that the solution w is in the interval # [w_last, w_max]. w_hat[idx[j]] = brentq(lambda w: c_y_prime(w) - y[idx[j]], w_last, w_max) w_last = w_hat[idx[j]] c_y_w_hat[idx[j]] = c_y(w_hat[idx[j]]) c_y_second_w_hat[idx[j]] = c_y_second(w_hat[idx[j]]) # Step 5: Compute cdf and pdf r = np.sign(w_hat) * np.sqrt(2. * (w_hat * y - c_y_w_hat)) u = w_hat * np.sqrt(c_y_second_w_hat) cdf = norm.cdf(r) - norm.pdf(r) * (1. / u - 1. / r) pdf = np.exp(c_y_w_hat - w_hat * y) / np.sqrt(2 * np.pi * c_y_second_w_hat) return np.squeeze(cdf), np.squeeze(pdf)
30.693694
79
0.528618
546
3,407
3.089744
0.221612
0.052164
0.037344
0.037937
0.160047
0.097214
0.089508
0.07706
0.035566
0.035566
0
0.026304
0.330496
3,407
110
80
30.972727
0.713284
0.249486
0
0.050847
0
0
0.003222
0
0
0
0
0
0
1
0.067797
false
0
0.067797
0.050847
0.20339
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e56ed966f7b3a1a052979308f0135e0e02265d6
114
py
Python
web/wsgi_dozer.py
LinuxOSsk/Shakal-NG
c4091c7972cffd86f64aa9f9a058d2907a56e5eb
[ "MIT" ]
10
2016-02-06T15:40:30.000Z
2018-09-27T15:15:13.000Z
web/wsgi_dozer.py
LinuxOSsk/Shakal-NG
c4091c7972cffd86f64aa9f9a058d2907a56e5eb
[ "MIT" ]
94
2016-02-04T18:39:36.000Z
2022-01-20T05:25:00.000Z
web/wsgi_dozer.py
LinuxOSsk/Shakal-NG
c4091c7972cffd86f64aa9f9a058d2907a56e5eb
[ "MIT" ]
8
2016-05-10T20:29:53.000Z
2021-02-07T00:50:31.000Z
# -*- coding: utf-8 -*- from dozer import Dozer from .wsgi import application application = Dozer(application)
14.25
32
0.719298
14
114
5.857143
0.571429
0
0
0
0
0
0
0
0
0
0
0.010526
0.166667
114
7
33
16.285714
0.852632
0.184211
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
8e5772016ce9d093b9d855bc35d35474f74a9187
101
py
Python
abstract-codegen/src/atmfjstc/lib/abstract_codegen/ast/__init__.py
goc9000/python-library
0a4a09278df6e84061baedda8997071e2201103f
[ "MIT" ]
null
null
null
abstract-codegen/src/atmfjstc/lib/abstract_codegen/ast/__init__.py
goc9000/python-library
0a4a09278df6e84061baedda8997071e2201103f
[ "MIT" ]
null
null
null
abstract-codegen/src/atmfjstc/lib/abstract_codegen/ast/__init__.py
goc9000/python-library
0a4a09278df6e84061baedda8997071e2201103f
[ "MIT" ]
null
null
null
""" This package groups all the built-in node classes offered by the `abstract_codegen` package. """
25.25
92
0.752475
15
101
5
0.866667
0
0
0
0
0
0
0
0
0
0
0
0.148515
101
3
93
33.666667
0.872093
0.910891
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
8e57c1d666f0e679e553435b63623e54ee15e34a
320
py
Python
hardware/dht/__init__.py
jpalczewski/pills
ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26
[ "MIT" ]
null
null
null
hardware/dht/__init__.py
jpalczewski/pills
ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26
[ "MIT" ]
null
null
null
hardware/dht/__init__.py
jpalczewski/pills
ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26
[ "MIT" ]
null
null
null
from .DHT22 import sensor import time import pigpio async def poll_once(): pi = pigpio.pi() s = sensor(pi, 24, LED=None, power=None,DHT11=False) s.trigger() time.sleep(0.2) humidity = s.humidity() temperature = s.temperature() s.cancel() pi.stop() return (humidity, temperature)
17.777778
56
0.6375
44
320
4.613636
0.613636
0.187192
0
0
0
0
0
0
0
0
0
0.03252
0.23125
320
18
57
17.777778
0.792683
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.230769
0
0.307692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e5ba2a20b4cea3293ed973ff92b38716b7ec7fc
2,267
py
Python
test.py
gadolly/Deep_learning
b29248f97d576c36cad9eb0f67ed834d7a5aadad
[ "MIT" ]
null
null
null
test.py
gadolly/Deep_learning
b29248f97d576c36cad9eb0f67ed834d7a5aadad
[ "MIT" ]
null
null
null
test.py
gadolly/Deep_learning
b29248f97d576c36cad9eb0f67ed834d7a5aadad
[ "MIT" ]
null
null
null
# import the necessary packages from keras.preprocessing import image as image_utils from imagenet_utils import decode_predictions from imagenet_utils import preprocess_input from vgg16 import VGG16 import numpy as np import argparse import cv2 from keras.utils import np_utils import matplotlib.pyplot as plt from matplotlib import pyplot as plt # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path to the input image") args = vars(ap.parse_args()) # load the original image via OpenCV so we can draw on it and display # it to our screen later orig = cv2.imread(args["image"]) #cv2.imshow("test",orig) # load the input image using the Keras helper utility while ensuring # that the image is resized to 224x224 pxiels, the required input # dimensions for the network -- then convert the PIL image to a # NumPy array print("[INFO] loading and preprocessing image...") image = image_utils.load_img(args["image"], target_size=(224, 224)) image = image_utils.img_to_array(image) # our image is now represented by a NumPy array of shape (3, 224, 224), # but we need to expand the dimensions to be (1, 3, 224, 224) so we can # pass it through the network -- we'll also preprocess the image by # subtracting the mean RGB pixel intensity from the ImageNet dataset image = np.expand_dims(image, axis=0) image = preprocess_input(image) # load the VGG16 network print("[INFO] loading network...") model = VGG16(weights="imagenet") # classify the image print("[INFO] classifying image...") preds = model.predict(image) result = decode_predictions(preds, top=1) (inID, label, val) = decode_predictions(preds)[0][0] print(result[0]) print(len(result)) #result1 = ([col.strip() for col in part] for part in result) #print(result1) #print(decode_predictions(preds)[0]) # display the predictions to our screen print("ImageNet ID: {}, Label: {}".format(inID, label)) cv2.putText(orig, "Label: {}".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) plt.ioff() plt.imshow(orig) plt.pause(1) plt.show() #cv2.imshow("Classification", orig) #cv2.waitKey(0) P = decode_predictions(preds) (imagenetID, label, prob) = P[0][0] #plt.show()
30.635135
71
0.736215
353
2,267
4.665722
0.402266
0.051609
0.05343
0.02793
0
0
0
0
0
0
0
0.034179
0.148214
2,267
74
72
30.635135
0.818747
0.406264
0
0
0
0
0.134441
0
0
0
0
0
0
1
0
false
0
0.27027
0
0.27027
0.162162
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e5dc3da5c6988b8cfded965ef1f80d576653a4f
1,534
py
Python
methods_test.py
RyoJerryYu/DeclarationCpp2Go
3e71546b8a2ce37cb056986250acfaab74c71a76
[ "MIT" ]
null
null
null
methods_test.py
RyoJerryYu/DeclarationCpp2Go
3e71546b8a2ce37cb056986250acfaab74c71a76
[ "MIT" ]
1
2021-12-04T11:54:56.000Z
2021-12-04T11:54:56.000Z
methods_test.py
RyoJerryYu/DeclarationCpp2Go
3e71546b8a2ce37cb056986250acfaab74c71a76
[ "MIT" ]
null
null
null
'''Tests for methods.py''' from methods import FuncName, Method, TVList from common import Capitalizer from protocol import DirectReturn def test_funcname(): funcname_p = FuncName(Capitalizer()) assert funcname_p.transform('foo_bar_baz') == 'FooBarBaz' def test_funcname_default(): funcname_p = FuncName() assert funcname_p.transform('foo_bar_baz') == 'FooBarBaz' def test_tvlist(): tvlist_p = TVList(DirectReturn()) assert tvlist_p.transform('int a, int b') == 'int a, int b' def test_tvlist_default(): tvlist_p = TVList() assert tvlist_p.transform('int a, int b') == 'a int, b int' def test_method(): method_p = Method(DirectReturn(), DirectReturn(), DirectReturn()) assert method_p.transform( ' int foo(int a, int b);') == ' foo(int a, int b) int' def test_method_default(): method_p = Method() assert method_p.transform( ' int foo(int a, int b);') == ' Foo(a int, b int) int' def test_method_default_no_args(): method_p = Method() assert method_p.transform(' uint32 get_type();') == ' GetType() uint32' def test_method_void_type(): method_p = Method() assert method_p.transform( ' void foo(int a, int b);') == ' Foo(a int, b int) ' def test_method_empty_lines(): method_p = Method() assert method_p.transform('\n\n\n \n') == '' def test_method_with_const(): method_p = Method(DirectReturn(), DirectReturn(), DirectReturn()) assert method_p.transform( ' int foo(int a, int b) const ;') == ' foo(int a, int b) int'
26
79
0.666884
219
1,534
4.456621
0.173516
0.04918
0.061475
0.07377
0.63832
0.604508
0.58709
0.409836
0.348361
0.348361
0
0.003218
0.1897
1,534
58
80
26.448276
0.781979
0.013038
0
0.324324
0
0
0.232759
0
0
0
0
0
0.27027
1
0.27027
false
0
0.081081
0
0.351351
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
8e6099119f26b536e38e505e34fc237bd2e59cfb
152
py
Python
chapter-14/shakespearebot-waypoint/bot/models.py
GeSup/Hands-on-JavaScript-for-Python-Developers
a4d779e3e46797754252e97ec0e517c9e42682c6
[ "MIT" ]
41
2019-04-30T14:55:48.000Z
2022-03-27T21:59:56.000Z
chapter-14/shakespearebot-waypoint/bot/models.py
GeSup/Hands-on-JavaScript-for-Python-Developers
a4d779e3e46797754252e97ec0e517c9e42682c6
[ "MIT" ]
10
2020-05-27T22:54:38.000Z
2020-10-13T21:52:24.000Z
chapter-14/shakespearebot-waypoint/bot/models.py
GeSup/Hands-on-JavaScript-for-Python-Developers
a4d779e3e46797754252e97ec0e517c9e42682c6
[ "MIT" ]
24
2019-05-07T21:02:11.000Z
2021-10-31T23:44:14.000Z
from django.db import models class Text(models.Model): PlayerLine = models.CharField(max_length=1000) def __str__(self): return self.PlayerLine
25.333333
48
0.769737
21
152
5.333333
0.809524
0
0
0
0
0
0
0
0
0
0
0.030534
0.138158
152
6
49
25.333333
0.824427
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.2
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
4
8e615b3096b4af4bf6362be743bc75af467ed5a8
17,468
py
Python
tests/test_requirements.py
domdfcoding/packing-tape
d8570033c8088c68527db918339c14aa6953264f
[ "MIT" ]
null
null
null
tests/test_requirements.py
domdfcoding/packing-tape
d8570033c8088c68527db918339c14aa6953264f
[ "MIT" ]
null
null
null
tests/test_requirements.py
domdfcoding/packing-tape
d8570033c8088c68527db918339c14aa6953264f
[ "MIT" ]
null
null
null
# stdlib from typing import List, Sequence, Union # 3rd party import pytest from coincidence.regressions import AdvancedDataRegressionFixture from coincidence.selectors import min_version, not_windows, only_version from domdf_python_tools.paths import PathPlus from packaging.requirements import Requirement from packaging.specifiers import Specifier, SpecifierSet from pytest_regressions.data_regression import DataRegressionFixture # this package from shippinglabel.requirements import ( ComparableRequirement, check_dependencies, combine_requirements, list_requirements, parse_pyproject_dependencies, parse_pyproject_extras, parse_requirements, read_requirements, resolve_specifiers ) class TestComparableRequirement: @pytest.fixture(scope="class") def req(self): return ComparableRequirement('pytest==6.0.0; python_version <= "3.9"') @pytest.mark.parametrize( "other", [ ComparableRequirement('pytest==6.0.0; python_version <= "3.9"'), ComparableRequirement("pytest==6.0.0"), ComparableRequirement("pytest"), ComparableRequirement("pytest[extra]"), Requirement('pytest==6.0.0; python_version <= "3.9"'), Requirement("pytest==6.0.0"), Requirement("pytest"), Requirement("pytest[extra]"), "pytest", ] ) def test_eq(self, req, other): assert req == req assert req == other @pytest.mark.parametrize( "other", [ "pytest-rerunfailures", ComparableRequirement("pytest-rerunfailures"), ComparableRequirement("pytest-rerunfailures==1.2.3"), Requirement("pytest-rerunfailures"), Requirement("pytest-rerunfailures==1.2.3"), ComparableRequirement("pytest"), ComparableRequirement("pytest[extra]"), Requirement("pytest"), Requirement("pytest[extra]"), ] ) def test_gt(self, req, other): assert req < other @pytest.mark.parametrize( "other", [ "apeye", ComparableRequirement("apeye"), ComparableRequirement("apeye==1.2.3"), Requirement("apeye"), Requirement("apeye==1.2.3"), ] ) def test_lt(self, req, other): assert req > other @pytest.mark.parametrize( "other", [ "pytest-rerunfailures", ComparableRequirement("pytest-rerunfailures"), ComparableRequirement("pytest-rerunfailures==1.2.3"), ComparableRequirement('pytest==6.0.0; python_version <= "3.9"'), Requirement("pytest-rerunfailures"), Requirement("pytest-rerunfailures==1.2.3"), Requirement('pytest==6.0.0; python_version <= "3.9"'), ComparableRequirement("pytest==6.0.0"), ComparableRequirement("pytest"), ComparableRequirement("pytest[extra]"), Requirement("pytest==6.0.0"), Requirement("pytest"), Requirement("pytest[extra]"), "pytest", ] ) def test_ge(self, req, other): assert req <= other assert req <= req @pytest.mark.parametrize( "other", [ "apeye", ComparableRequirement("apeye"), ComparableRequirement("apeye==1.2.3"), Requirement("apeye"), Requirement("apeye==1.2.3"), ComparableRequirement('pytest==6.0.0; python_version <= "3.9"'), ComparableRequirement("pytest==6.0.0"), ComparableRequirement("pytest"), ComparableRequirement("pytest[extra]"), Requirement('pytest==6.0.0; python_version <= "3.9"'), Requirement("pytest==6.0.0"), Requirement("pytest"), Requirement("pytest[extra]"), "pytest", ] ) def test_le(self, req, other): assert req >= other assert req >= req def test_combine_requirements(): reqs = [ ComparableRequirement("foo"), ComparableRequirement("foo>2"), ComparableRequirement("foo>2.5"), ComparableRequirement("foo==3.2.1"), ComparableRequirement("foo==3.2.3"), ComparableRequirement("foo==3.2.5"), ] assert combine_requirements(reqs) == [Requirement("foo==3.2.1,==3.2.3,==3.2.5,>2.5")] assert str(combine_requirements(reqs)[0]) == "foo==3.2.1,==3.2.3,==3.2.5,>2.5" assert str(combine_requirements(reqs)[0].specifier) == "==3.2.1,==3.2.3,==3.2.5,>2.5" def test_combine_requirements_duplicates(): reqs = [ ComparableRequirement('typing-extensions>=3.6.4; python_version < "3.8"'), ComparableRequirement("typing-extensions>=3.7.4.3"), ComparableRequirement("typing-extensions>=3.7.4.3"), ComparableRequirement("typing-extensions>=3.7.4.3"), ComparableRequirement("typing-extensions>=3.7.4.3"), ComparableRequirement("typing-extensions>=3.7.4.1"), ComparableRequirement("typing-extensions>=3.7.4"), ComparableRequirement('typing-extensions; python_version < "3.8"'), ] combined_reqs = combine_requirements(reqs) assert len(combined_reqs) == 2 assert combined_reqs[1] == ComparableRequirement("typing-extensions>=3.7.4.3") assert combined_reqs[0] == ComparableRequirement('typing-extensions>=3.6.4; python_version < "3.8"') reqs.append(reqs.pop(0)) combined_reqs = combine_requirements(reqs) assert len(combined_reqs) == 2 assert combined_reqs[0] == ComparableRequirement("typing-extensions>=3.7.4.3") assert combined_reqs[1] == ComparableRequirement('typing-extensions>=3.6.4; python_version < "3.8"') def test_combine_requirements_differing_precision(): reqs = [ ComparableRequirement("lockfile>=0.9"), ComparableRequirement("lockfile>=0.9"), ComparableRequirement("lockfile>=0.12.2"), ] assert combine_requirements(reqs) == [Requirement("lockfile>=0.12.2")] @pytest.mark.parametrize( "reqs, combined", [ ( [ ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'), ComparableRequirement('numpy>=1.19.1; platform_system != "Windows"') ], [ ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'), ComparableRequirement('numpy>=1.19.1; platform_system != "Windows"') ], ), ( [ ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'), ComparableRequirement("numpy>=1.19.1"), ], [ ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'), ComparableRequirement("numpy>=1.19.1"), ], ), ( [ComparableRequirement("numpy==1.19.3"), ComparableRequirement("numpy>=1.19.1")], [ComparableRequirement("numpy==1.19.3")], ), ( [ComparableRequirement("numpy<=1.19.3"), ComparableRequirement("numpy==1.19.1")], [ComparableRequirement("numpy==1.19.1")], ), ( [ComparableRequirement("numpy<=1.19.3"), ComparableRequirement("numpy<1.19.1")], [ComparableRequirement("numpy<1.19.1")], ), ( [ComparableRequirement("numpy>1.2.3"), ComparableRequirement("numpy>=1.2.2")], [ComparableRequirement("numpy>1.2.3")], ), ] ) def test_combine_requirements_markers(reqs, combined): assert combine_requirements(reqs) == combined @pytest.mark.parametrize( "specifiers, resolved", [ ([Specifier(">1.2.3"), Specifier(">=1.2.2"), Specifier("<2")], SpecifierSet(">1.2.3,<2")), ([Specifier(">1.2.3"), Specifier(">=1.2.2")], SpecifierSet(">1.2.3")), ([Specifier(">=1.2.2"), Specifier("<2")], SpecifierSet(">=1.2.2,<2")), ([Specifier(">1.2.3"), Specifier("<2")], SpecifierSet(">1.2.3,<2")), ([Specifier("<1.2.2"), Specifier("<=1.2.3"), Specifier(">2")], SpecifierSet("<1.2.2,>2")), ([Specifier("<1.2.2"), Specifier("<=1.2.3")], SpecifierSet("<1.2.2")), ([Specifier("<=1.2.3"), Specifier(">2")], SpecifierSet("<=1.2.3,>2")), ([Specifier("<1.2.2"), Specifier(">2")], SpecifierSet("<1.2.2,>2")), ] ) def test_resolve_specifiers(specifiers, resolved): assert resolve_specifiers(specifiers) == resolved requirements_a = [ "autodocsumm>=0.2.0", "default-values>=0.2.0", "domdf-sphinx-theme>=0.1.0", "extras-require>=0.2.0", "repo-helper-sphinx-theme>=0.0.2", "seed-intersphinx-mapping>=0.1.1", "sphinx>=3.0.3", "ruamel-yaml>=0.16.12", "sphinx-click>=2.5.0", "sphinx-copybutton>=0.2.12", "sphinx-notfound-page>=0.5", "sphinx-prompt>=1.1.0", "sphinx-tabs>=1.1.13", "sphinx-toolbox>=1.7.1", "sphinxcontrib-autoprogram>=0.1.5", "sphinxcontrib-httpdomain>=1.7.0", "sphinxemoji>=0.1.6", "toctree-plus>=0.0.4", ] requirements_b = [ "autodocsumm>=0.2.0", "default-values>=0.2.0", "domdf-sphinx-theme>=0.1.0", "domdf-sphinx-theme>=0.1.0", "extras-require>=0.2.0", "repo-helper-sphinx-theme>=0.0.2", "seed-intersphinx-mapping>=0.1.1", "sphinx>=3.0.3", "sphinx-click>=2.5.0", "sphinx-copybutton>=0.2.12", "sphinx-copybutton>=0.2.12", "sphinx-notfound-page>=0.5", "sphinx-prompt>=1.1.0", "sphinx-tabs>=1.1.13", "sphinx-toolbox>=1.7.1", "ruamel.yaml>=0.16.12", "sphinxcontrib-autoprogram>=0.1.5", "sphinxcontrib-autoprogram>=0.1.5", "sphinxcontrib-httpdomain>=1.7.0", "sphinxemoji>=0.1.6", "toctree-plus>=0.0.4", "toctree-plus>=0.0.3", ] requirements_c = [ 'numpy==1.19.3; platform_system == "Windows"', 'numpy>=1.19.1; platform_system != "Windows"', ] @pytest.mark.parametrize( "requirements", [ pytest.param(requirements_a, id='a'), pytest.param(requirements_b, id='b'), pytest.param(requirements_c, id='c'), ] ) def test_read_requirements( tmp_pathplus, advanced_data_regression: AdvancedDataRegressionFixture, requirements: List[str], ): (tmp_pathplus / "requirements.txt").write_lines(requirements) advanced_data_regression.check([ str(x) for x in sorted(read_requirements(tmp_pathplus / "requirements.txt")[0]) ]) @pytest.mark.parametrize( "requirements", [ pytest.param(requirements_a, id='a'), pytest.param(requirements_b, id='b'), pytest.param(requirements_c, id='c'), pytest.param(iter(requirements_a), id="iter(a)"), pytest.param(iter(requirements_b), id="iter(b)"), pytest.param(iter(requirements_c), id="iter(c)"), pytest.param(set(requirements_a), id="set(a)"), pytest.param(set(requirements_b), id="set(b)"), pytest.param(set(requirements_c), id="set(c)"), pytest.param(tuple(requirements_a), id="tuple(a)"), pytest.param(tuple(requirements_b), id="tuple(b)"), pytest.param(tuple(requirements_c), id="tuple(c)"), ] ) def test_parse_requirements( tmp_pathplus: PathPlus, advanced_data_regression: AdvancedDataRegressionFixture, requirements: List[str], ): advanced_data_regression.check([str(x) for x in sorted(parse_requirements(requirements)[0])]) def test_read_requirements_invalid( tmp_pathplus: PathPlus, advanced_data_regression: AdvancedDataRegressionFixture ): (tmp_pathplus / "requirements.txt").write_lines([ "# another comment", "autodocsumm>=apples", "default-value---0.2.0", "domdf-sphinx-theme!!!0.1.0", "0.2.0", '', '', "https://bbc.co.uk", "toctree-plus>=0.0.4", "# a comment", ]) with pytest.warns(UserWarning) as record: requirements, comments = read_requirements(tmp_pathplus / "requirements.txt") # check that only one warning was raised assert len(record) == 3 # check that the message matches for idx, warning in enumerate([ "Creating a LegacyVersion has been deprecated and will be removed in the next major release", "Ignored invalid requirement 'domdf-sphinx-theme!!!0.1.0'", "Ignored invalid requirement 'https://bbc.co.uk'", ]): assert record[idx].message.args[0] == warning # type: ignore advanced_data_regression.check([str(x) for x in sorted(requirements)]) assert comments == ["# another comment", "# a comment"] def test_sort_mixed_requirements(): requirements: Sequence[Union[str, ComparableRequirement]] = [ "urllib3", ComparableRequirement("six==1.15.0"), "botocore", ComparableRequirement("requests>=2.19.1"), "python-dateutil", ] assert sorted(requirements) == [ "botocore", "python-dateutil", ComparableRequirement("requests>=2.19.1"), ComparableRequirement("six==1.15.0"), "urllib3", ] def test_check_dependencies(capsys): deps = ["pytest", "domdf_python_tools", "madeup_module"] missing_deps = check_dependencies(deps, False) assert isinstance(missing_deps, list) assert len(missing_deps) == 1 assert missing_deps == ["madeup_module"] missing_deps = check_dependencies(deps) captured = capsys.readouterr() stdout = captured.out.split('\n') assert stdout[0] == "The following modules are missing:" assert stdout[1] == "['madeup_module']" assert stdout[2] == "Please check the documentation." assert stdout[3] == '' assert isinstance(missing_deps, list) assert len(missing_deps) == 1 assert missing_deps == ["madeup_module"] missing_deps = check_dependencies(["pytest"]) captured = capsys.readouterr() stdout = captured.out.split('\n') assert stdout[0] == "All modules installed" assert stdout[1] == '' assert isinstance(missing_deps, list) assert len(missing_deps) == 0 assert missing_deps == [] def test_comparable_requirement(): assert ComparableRequirement("foo") != ComparableRequirement("bar") assert ComparableRequirement("foo") == ComparableRequirement("foo") assert ComparableRequirement("foo>=1.2.3") == ComparableRequirement("foo >= 1.2.3") def req_with_marker(): return ComparableRequirement('importlib-metadata>=1.5.0; python_version < "3.8"') def req_without_marker(): return ComparableRequirement("importlib-metadata>=1.5.0") def req_with_different_marker(): return ComparableRequirement('importlib-metadata>=1.5.0; python_version < "3.10"') assert req_with_marker() == req_with_marker() assert req_with_marker() is not req_with_marker() assert req_without_marker() is not req_without_marker() assert req_with_marker() != req_with_different_marker() assert "importlib-metadata" in [req_with_marker()] assert req_without_marker() in [req_with_marker()] assert req_with_marker() in [req_with_marker()] assert "importlib-metadata" in (req_with_marker(), ) assert req_without_marker() in (req_with_marker(), ) assert req_with_marker() in (req_with_marker(), ) assert {req_without_marker(), req_without_marker()} == {req_without_marker()} assert {req_with_marker(), req_with_marker()} == {req_with_marker()} assert hash(req_with_marker()) == hash(req_with_marker()) assert hash(req_with_marker()) != hash(req_without_marker()) assert req_without_marker() not in {req_with_marker()} assert req_with_marker() in {req_with_marker()} assert req_without_marker() != "123foo?" only_36 = pytest.param("3.6", marks=only_version((3, 6), reason="Output differs on Python 3.6")) only_37 = pytest.param("3.7", marks=only_version((3, 7), reason="Output differs on Python 3.7")) only_38 = pytest.param("3.8", marks=only_version((3, 8), reason="Output differs on Python 3.8")) min_38 = pytest.param("3.8+", marks=min_version((3, 8), reason="Output differs on Python 3.8+")) only_39 = pytest.param("3.9", marks=only_version((3, 9), reason="Output differs on Python 3.9")) only_310 = pytest.param("3.10", marks=only_version((3, 10), reason="Output differs on Python 3.10")) @not_windows("Output differs on Windows") @pytest.mark.parametrize("py_version", [ only_36, only_37, only_38, only_39, only_310, ]) @pytest.mark.parametrize( "library", [ "shippinglabel", "apeye", "cachecontrol[filecache]", "domdf-python-tools", "domdf_python_tools", ] ) @pytest.mark.parametrize("depth", [-1, 0, 1, 2, 3]) # @pytest.mark.parametrize("depth", [3]) def test_list_requirements( data_regression: DataRegressionFixture, library, depth, py_version, ): data_regression.check(list(list_requirements(library, depth=depth))) @not_windows("Output differs on Windows") @pytest.mark.parametrize("py_version", [ only_36, only_37, min_38, ]) @pytest.mark.parametrize("depth", [-1, 0, 1, 2, 3]) # @pytest.mark.parametrize("depth", [3]) def test_list_requirements_pytest( data_regression: DataRegressionFixture, depth, py_version, ): data_regression.check(list(list_requirements("pytest", depth=depth))) @pytest.fixture() def pyproject_toml(tmp_pathplus: PathPlus): filename = (tmp_pathplus / "pyproject.toml") filename.write_lines([ "[build-system]", 'requires = [ "setuptools>=40.6.0", "wheel>=0.34.2",]', 'build-backend = "setuptools.build_meta"', '', "[project]", "dependencies = [", ' "httpx",', ' "gidgethub[httpx]>4.0.0",', " \"django>2.1; os_name != 'nt'\",", " \"django>2.0; os_name == 'nt'\"", ']', '', "[project.optional-dependencies]", "test = [", ' "pytest < 5.0.0",', ' "pytest-cov[all]"', ']', "[tool.flit.metadata]", "requires = [", '\t"requests >=2.6",', "\t\"configparser; python_version == '2.7'\",", ']', '', "[tool.flit.metadata.requires-extra]", "test = [", '\t"pytest >=2.7.3",', '\t"pytest-cov",', ']', ]) return filename @pytest.mark.parametrize("flavour", ["auto", "pep621", "flit"]) def test_parse_pyproject_dependencies( pyproject_toml: PathPlus, advanced_data_regression: AdvancedDataRegressionFixture, flavour: str, ): deps = parse_pyproject_dependencies(pyproject_toml, flavour) # type: ignore advanced_data_regression.check(sorted(str(x) for x in deps)) @pytest.mark.parametrize("flavour", ["auto", "pep621", "flit"]) def test_parse_pyproject_extras( pyproject_toml: PathPlus, advanced_data_regression: AdvancedDataRegressionFixture, flavour: str, ): extras = parse_pyproject_extras(pyproject_toml, flavour) # type: ignore advanced_data_regression.check({k: sorted(str(x) for x in v) for k, v in extras.items()})
30.916814
101
0.676551
2,184
17,468
5.277015
0.120879
0.006074
0.006247
0.042777
0.676443
0.632278
0.596529
0.554013
0.533623
0.464642
0
0.043826
0.141802
17,468
564
102
30.971631
0.724968
0.012365
0
0.447205
0
0.008282
0.28293
0.081028
0
0
0
0
0.122153
1
0.05176
false
0
0.028986
0.008282
0.093168
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e6171a69d7112d24e0deaed0a6f8f8e780b1f04
6,682
py
Python
tests/ut/python/parallel/test_uniform_candidate_sampler.py
Vincent34/mindspore
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
[ "Apache-2.0" ]
2
2021-07-08T13:10:42.000Z
2021-11-08T02:48:57.000Z
tests/ut/python/parallel/test_uniform_candidate_sampler.py
peixinhou/mindspore
fcb2ec2779b753e95c762cf292b23bd81d1f561b
[ "Apache-2.0" ]
null
null
null
tests/ut/python/parallel/test_uniform_candidate_sampler.py
peixinhou/mindspore
fcb2ec2779b753e95c762cf292b23bd81d1f561b
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import numpy as np import pytest import mindspore as ms import mindspore.context as context from mindspore import Tensor, Parameter import mindspore.nn as nn from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, Momentum from mindspore.ops import operations as P class Net(nn.Cell): def __init__(self, embedding_weight, num_true, num_sampled, unique, range_max, seed, remove_accidential, strategy1=None): super(Net, self).__init__() self.sampler = P.UniformCandidateSampler(num_true, num_sampled, unique, range_max, seed, remove_accidential) if strategy1: self.sampler.shard(strategy1) self.embedding_table = Parameter(embedding_weight, "embedding_weight") self.gatherv2 = P.Gather() self.reduce_sum = P.ReduceSum() self.reduce_sum2 = P.ReduceSum() self.reduce_sum3 = P.ReduceSum() def construct(self, x): out1, out2, out3 = self.sampler(x) lookup = self.gatherv2(self.embedding_table, out1, 0) loss = out1 - out3 loss = self.reduce_sum(loss, (0,)) loss2 = self.reduce_sum2(lookup, (0, 1)) loss3 = self.reduce_sum3(out2, (0, 1)) loss4 = loss + loss2 + loss3 return loss4 class Net2(nn.Cell): def __init__(self, mul_weight, num_true, num_sampled, unique, range_max, seed, remove_accidential, strategy1=None): super(Net2, self).__init__() self.sampler = P.UniformCandidateSampler(num_true, num_sampled, unique, range_max, seed, remove_accidential) self.cast = P.Cast() self.weight = Parameter(mul_weight, "w1") self.mul = P.Mul() if strategy1: self.sampler.shard(strategy1) def construct(self, x): x = self.mul(x, self.weight) x = self.cast(x, ms.int32) _, out2, _ = self.sampler(x) return out2 _w = Tensor(np.ones([48, 16]), dtype=ms.float32) _w1 = Tensor(np.ones([96, 64]), dtype=ms.float32) _x = Tensor(np.ones([48, 16]), dtype=ms.int32) def compile_net(net): context.set_context(mode=context.GRAPH_MODE, save_graphs=False) optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_net = TrainOneStepCell(net, optimizer) train_net.set_auto_parallel() train_net.set_train() _executor.compile(train_net, _x) context.reset_auto_parallel_context() def test_uniform_candidate_sampler_no_full_0d_split(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) strategy1 = ((4, 1),) net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1, remove_accidential=False, strategy1=strategy1) compile_net(net) def test_uniform_candidate_sampler_no_full_1d_split(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) strategy1 = ((1, 4),) net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1, remove_accidential=False, strategy1=strategy1) compile_net(net) def test_uniform_candidate_sampler_full_0d_split(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) strategy1 = ((8, 1),) net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1, remove_accidential=False, strategy1=strategy1) compile_net(net) def test_uniform_candidate_sampler_full_1d_split(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) strategy1 = ((1, 8),) net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1, remove_accidential=False, strategy1=strategy1) compile_net(net) def test_uniform_candidate_sampler_full_1d_unqiue_false(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) strategy1 = ((1, 8),) net = Net(_w1, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1, remove_accidential=False, strategy1=strategy1) compile_net(net) def test_uniform_candidate_sampler_auto_parllel(): context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0) net = Net(_w1, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1, remove_accidential=False, strategy1=None) compile_net(net) def test_uniform_candidate_sampler_auto_parllel_unqiue_true(): context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0) net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1, remove_accidential=False, strategy1=None) compile_net(net) def test_uniform_candidate_sampler_auto_parllel_remove_true(): context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0) net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1, remove_accidential=True, strategy1=None) compile_net(net) def test_uniform_candidate_sampler_full_1d_remove_true(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) strategy1 = ((1, 8),) net = Net(_w1, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1, remove_accidential=True, strategy1=strategy1) with pytest.raises(RuntimeError): compile_net(net) def test_uniform_candidate_sampler_as_final(): context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) strategy1 = ((1, 8),) net = Net2(_w, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1, remove_accidential=False, strategy1=strategy1) with pytest.raises(RuntimeError): compile_net(net)
41.246914
109
0.700389
923
6,682
4.777898
0.192849
0.059864
0.032426
0.052154
0.648753
0.641043
0.624717
0.606576
0.599773
0.571655
0
0.03914
0.185573
6,682
161
110
41.503106
0.771224
0.09548
0
0.461538
0
0
0.030353
0
0
0
0
0
0
1
0.128205
false
0
0.076923
0
0.239316
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e622edaf8f47d87d5f8233d0e8589b835af46c3
3,464
py
Python
lib/servers/data_vault.py
clayton-ho/EGGs_Control
312f02488b47cf880c6e6600ce10856a871123df
[ "MIT" ]
null
null
null
lib/servers/data_vault.py
clayton-ho/EGGs_Control
312f02488b47cf880c6e6600ce10856a871123df
[ "MIT" ]
null
null
null
lib/servers/data_vault.py
clayton-ho/EGGs_Control
312f02488b47cf880c6e6600ce10856a871123df
[ "MIT" ]
null
null
null
# Copyright (C) 2007 Matthew Neeley # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ ### BEGIN NODE INFO [info] name = Data Vault version = 3.0.2 description = Store and retrieve numeric data [startup] cmdline = %PYTHON% %FILE% timeout = 20 [shutdown] message = 987654321 timeout = 5 ### END NODE INFO """ from __future__ import absolute_import import os import sys from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks, returnValue import labrad.util import labrad.wrappers from data_vault import SessionStore from data_vault.server import DataVault @inlineCallbacks def load_settings(cxn, name): """Load settings from registry with fallback to command line if needed. Attempts to load the data vault configuration for this node from the registry. If not configured, we instead prompt the user to enter a path to use for storing data, and save this config into the registry to be used later. """ path = ['', 'Servers', name, 'Repository'] nodename = labrad.util.getNodeName() reg = cxn.registry yield reg.cd(path, True) (dirs, keys) = yield reg.dir() if nodename in keys: datadir = yield reg.get(nodename) elif '__default__' in keys: datadir = yield reg.get('__default__') else: default_datadir = os.path.expanduser('~/.labrad/vault') print('Could not load repository location from registry.') print('Please enter data storage directory or hit enter to use') print('the default directory ({}):'.format(default_datadir)) datadir = os.path.expanduser(input('>>>')) if datadir == '': datadir = default_datadir if not os.path.exists(datadir): os.makedirs(datadir) # set as default and for this node yield reg.set(nodename, datadir) yield reg.set('__default__', datadir) print('Data location configured in the registry at {}: {}'.format(\ path + [nodename], datadir)) print('To change this, edit the registry keys and restart the server.') returnValue(datadir) def main(argv=sys.argv): @inlineCallbacks def start(): opts = labrad.util.parseServerOptions(name=DataVault.name) cxn = yield labrad.wrappers.connectAsync( host=opts['host'], port=int(opts['port']), password=opts['password']) datadir = yield load_settings(cxn, opts['name']) yield cxn.disconnect() session_store = SessionStore(datadir, hub=None) server = DataVault(session_store) session_store.hub = server # Run the server. We do not need to start the reactor, but we will # stop it after the data_vault shuts down. labrad.util.runServer(server, run_reactor=False, stop_reactor=True) _ = start() reactor.run() if __name__ == '__main__': main()
32.679245
81
0.687356
464
3,464
5.047414
0.411638
0.020495
0.016652
0.024338
0.055508
0.044406
0
0
0
0
0
0.007407
0.220554
3,464
105
82
32.990476
0.86
0.374134
0
0.038462
0
0
0.160057
0
0
0
0
0
0
1
0.057692
false
0.019231
0.173077
0
0.230769
0.096154
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e64479dc605db78a7e77be2782621351e868958
446
py
Python
atlantisbot_api/migrations/0004_auto_20201107_0019.py
johnvictorfs/atlantisbot-api
1a00ae33497b5c5bf51d7bac154e96d4d9ab534b
[ "MIT" ]
null
null
null
atlantisbot_api/migrations/0004_auto_20201107_0019.py
johnvictorfs/atlantisbot-api
1a00ae33497b5c5bf51d7bac154e96d4d9ab534b
[ "MIT" ]
null
null
null
atlantisbot_api/migrations/0004_auto_20201107_0019.py
johnvictorfs/atlantisbot-api
1a00ae33497b5c5bf51d7bac154e96d4d9ab534b
[ "MIT" ]
null
null
null
# Generated by Django 2.2.16 on 2020-11-07 00:19 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('atlantisbot_api', '0003_auto_20201107_0018'), ] operations = [ migrations.AlterModelTable( name='discordingamename', table=None, ), migrations.AlterModelTable( name='discorduser', table=None, ), ]
20.272727
55
0.58296
41
446
6.243902
0.756098
0.195313
0.226563
0
0
0
0
0
0
0
0
0.104918
0.316144
446
21
56
21.238095
0.734426
0.103139
0
0.4
1
0
0.165829
0.057789
0
0
0
0
0
1
0
false
0
0.066667
0
0.266667
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
8e65b59f5232680aea8dce90eae39a5dcfa86850
5,465
py
Python
py-opentsdb.py
langerma/py-opentsdb
d652a96d3a53bf7c6785a1d586427d666bb3da96
[ "BSD-2-Clause" ]
2
2020-02-20T16:00:11.000Z
2020-02-20T16:00:21.000Z
py-opentsdb.py
langerma/py-opentsdb
d652a96d3a53bf7c6785a1d586427d666bb3da96
[ "BSD-2-Clause" ]
null
null
null
py-opentsdb.py
langerma/py-opentsdb
d652a96d3a53bf7c6785a1d586427d666bb3da96
[ "BSD-2-Clause" ]
null
null
null
import requests import pandas try: # Use ujson if available. import ujson as json except Exception: import json class OpenTSDBResponseSerie(object): """ A single OpenTSDB response serie i.e 1 element of the response array. Params: **kwargs : OpenTSDB response serie data """ def __init__(self, **kwargs): for k,v in kwargs.items(): setattr(self, k, v) @property def id(self): """ id for serie Returns: metric{sorted=tag,key=value} """ if len(self.tags.keys()) > 0: tags = ",".join(["%s=%s" % (k, self.tags[k]) for k in sorted(self.tags.keys())]) return "%s{%s}" % (self.metric, tags) else: return self.metric def alias(self, functOrStr): """ User specified alias using lambda functions and string formatting using metadata provided by opentsdb. This function fails silently. Params: functOrStr : lambda function or python string format. When using lambda functions, they must begin with '!' e.g. !lambda x: x.... Return: Formatted alias on success and id or failure. """ flatData = self.__flattenedMetadata() # Normalized alias _alias = "" if functOrStr.startswith("!"): try: _alias = eval(functOrStr[1:])(flatData) except Exception as e: pass else: try: _alias = functOrStr % (flatData) except Exception as e: pass if _alias == "": return self.id return _alias def __flattenedMetadata(self): """ Flattens all metadata which is used for normalization """ return dict([("metric", self.metric)] + [("tags.%s" % (k), v) for k, v in self.tags.items()]) def datapoints(self, convertTime=False): """ Converts datapoints Params: convertTime : Whether to convert epoch to pandas datetime Return: Array of tuples (time, value) """ if convertTime: return dict([(pandas.to_datetime(int(k), unit='s'), v) for k, v in self.dps.items()]) return dict([(int(k), v) for k, v in self.dps.items()]) class OpenTSDBResponse(object): """ Complete OpenTSDB response """ def __init__(self, otsdbResp): """ Params: otsdbResp : raw opentsdb response as a str, list or tuple. """ if isinstance(otsdbResp, str) or isinstance(otsdbResp, unicode): # string response self._series = [ OpenTSDBResponseSerie(**s) for s in json.loads(otsdbResp) ] elif isinstance(otsdbResp, list) or isinstance(otsdbResp, tuple): # dict response self._series = [ OpenTSDBResponseSerie(**s) for s in otsdbResp ] else: raise RuntimeError("Invalid type: %s" % (type(otsdbResp))) @property def series(self): """ Use iterator for better memory management """ for s in self._series: yield s def DataFrame(self, aliasTransform=None, convertTime=False): """ Converts an OpenTSDB array response into a DataFrame Params: convertTime : Whether to convert epoch to pandas datetime aliasTransform : lambda function or string format to customize serie name i.e. alias Return: OpenTSDB response DataFrame """ if aliasTransform == None: return pandas.DataFrame(dict([ (s.id, s.datapoints(convertTime)) for s in self.series ])) else: return pandas.DataFrame(dict([ (s.alias(aliasTransform), s.datapoints(convertTime)) for s in self.series ])) class BaseClient(object): def __init__(self, host, port=4242, ssl=False): if ssl: self.url = "https://%s:%d" % (host, port) else: self.url = "http://%s:%d" % (host, port) def queryUrl(self, **kwargs): return str("%s/api/query?%s" % (self.url, self.__urlEncodedParams(**kwargs))) def __urlEncodedParams(self, aggr="sum", rate=False, counter=False, end=None, **kwargs): timeStr = "start=%s" % (kwargs["start"]) if end != None: timeStr += "&end=%s" % (end) if rate: prefix = "%s:rate:%s" % (aggr, kwargs["metric"]) elif counter: prefix = "%s:rate{counter,,1}:%s" % (aggr, kwargs["metric"]) else: prefix = "%s:%s" % (aggr, kwargs["metric"]) # TODO: check tagsStr = ",".join([ "%s=%s" % (k, kwargs["tags"][k]) for k in sorted(kwargs["tags"].keys()) ]) if tagsStr != "": return "%s&m=%s{%s}" % (timeStr, prefix, tagsStr) else: return "%s&m=%s" % (timeStr, prefix) class Client(BaseClient): def query(self, **kwargs): resp = requests.get(self.queryUrl(**kwargs)) if resp.status_code >= 200 and resp.status_code < 400: return OpenTSDBResponse(resp.text) #return resp.text # error return resp.text
31.589595
103
0.530101
586
5,465
4.890785
0.298635
0.004885
0.010468
0.00977
0.171668
0.147941
0.115143
0.110258
0.037683
0
0
0.003951
0.351693
5,465
172
104
31.773256
0.804968
0.226715
0
0.202247
0
0
0.05159
0.005881
0
0
0
0.005814
0
1
0.134831
false
0.022472
0.044944
0.011236
0.382022
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e65daebe577c08239034ca2c192e6c446ad91d9
5,865
py
Python
tests/integration/test_clone_project.py
superannotateai/superannotate-python-sdk
e2ce848b61efed608265fa64f3781fd5a17c929b
[ "MIT" ]
26
2020-09-25T06:25:06.000Z
2022-01-30T16:44:07.000Z
tests/integration/test_clone_project.py
superannotateai/superannotate-python-sdk
e2ce848b61efed608265fa64f3781fd5a17c929b
[ "MIT" ]
12
2020-12-21T19:59:48.000Z
2022-01-21T10:32:07.000Z
tests/integration/test_clone_project.py
superannotateai/superannotate-python-sdk
e2ce848b61efed608265fa64f3781fd5a17c929b
[ "MIT" ]
11
2020-09-17T13:39:19.000Z
2022-03-02T18:12:29.000Z
import os from os.path import dirname from unittest import TestCase import pytest import src.superannotate as sa class TestCloneProject(TestCase): PROJECT_NAME_1 = "test_create_like_project_1" PROJECT_NAME_2 = "test_create_like_project_2" PROJECT_DESCRIPTION = "desc" PROJECT_TYPE = "Vector" IMAGE_QUALITY = "original" PATH_TO_URLS = "data_set/attach_urls.csv" def setUp(self, *args, **kwargs): self.tearDown() self._project_1 = sa.create_project( self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE ) def tearDown(self) -> None: sa.delete_project(self.PROJECT_NAME_1) sa.delete_project(self.PROJECT_NAME_2) def test_create_like_project(self): _, _, _ = sa.attach_image_urls_to_project( self.PROJECT_NAME_1, os.path.join(dirname(dirname(__file__)), self.PATH_TO_URLS), ) sa.create_annotation_class( self.PROJECT_NAME_1, "rrr", "#FFAAFF", [ { "name": "tall", "is_multiselect": 0, "attributes": [{"name": "yes"}, {"name": "no"}], }, { "name": "age", "is_multiselect": 0, "attributes": [{"name": "young"}, {"name": "old"}], }, ], ) sa.set_project_default_image_quality_in_editor(self.PROJECT_NAME_1,self.IMAGE_QUALITY) sa.set_project_workflow( self.PROJECT_NAME_1, [ { "step": 1, "className": "rrr", "tool": 3, "attribute": [ { "attribute": { "name": "young", "attribute_group": {"name": "age"}, } }, { "attribute": { "name": "yes", "attribute_group": {"name": "tall"}, } }, ], } ], ) new_project = sa.clone_project( self.PROJECT_NAME_2, self.PROJECT_NAME_1, copy_contributors=True ) source_project = sa.get_project_metadata(self.PROJECT_NAME_1) self.assertEqual(new_project['upload_state'], source_project['upload_state']) new_settings = sa.get_project_settings(self.PROJECT_NAME_2) image_quality = None for setting in new_settings: if setting["attribute"].lower() == "imagequality": image_quality = setting["value"] break self.assertEqual(image_quality,self.IMAGE_QUALITY) self.assertEqual(new_project["description"], self.PROJECT_DESCRIPTION) self.assertEqual(new_project["type"].lower(), "vector") ann_classes = sa.search_annotation_classes(self.PROJECT_NAME_2) self.assertEqual(len(ann_classes), 1) self.assertEqual(ann_classes[0]["name"], "rrr") self.assertEqual(ann_classes[0]["color"], "#FFAAFF") new_workflow = sa.get_project_workflow(self.PROJECT_NAME_2) self.assertEqual(len(new_workflow), 1) self.assertEqual(new_workflow[0]["className"], "rrr") self.assertEqual(new_workflow[0]["tool"], 3) self.assertEqual(len(new_workflow[0]["attribute"]), 2) self.assertEqual(new_workflow[0]["attribute"][0]["attribute"]["name"], "young") self.assertEqual( new_workflow[0]["attribute"][0]["attribute"]["attribute_group"]["name"], "age", ) self.assertEqual(new_workflow[0]["attribute"][1]["attribute"]["name"], "yes") self.assertEqual( new_workflow[0]["attribute"][1]["attribute"]["attribute_group"]["name"], "tall", ) class TestCloneProjectAttachedUrls(TestCase): PROJECT_NAME_1 = "TestCloneProjectAttachedUrls_1" PROJECT_NAME_2 = "TestCloneProjectAttachedUrls_2" PROJECT_DESCRIPTION = "desc" PROJECT_TYPE = "Document" @pytest.fixture(autouse=True) def inject_fixtures(self, caplog): self._caplog = caplog def setUp(self, *args, **kwargs): self.tearDown() self._project_1 = sa.create_project( self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE ) def tearDown(self) -> None: sa.delete_project(self.PROJECT_NAME_1) sa.delete_project(self.PROJECT_NAME_2) def test_create_like_project(self): sa.create_annotation_class( self.PROJECT_NAME_1, "rrr", "#FFAAFF", [ { "name": "tall", "is_multiselect": 0, "attributes": [{"name": "yes"}, {"name": "no"}], }, { "name": "age", "is_multiselect": 0, "attributes": [{"name": "young"}, {"name": "old"}], }, ], ) new_project = sa.clone_project( self.PROJECT_NAME_2, self.PROJECT_NAME_1, copy_contributors=True ) self.assertEqual(new_project["description"], self.PROJECT_DESCRIPTION) self.assertEqual(new_project["type"].lower(), "document") ann_classes = sa.search_annotation_classes(self.PROJECT_NAME_2) self.assertEqual(len(ann_classes), 1) self.assertEqual(ann_classes[0]["name"], "rrr") self.assertEqual(ann_classes[0]["color"], "#FFAAFF") self.assertIn("Workflow copy is deprecated for Document projects.",self._caplog.text)
35.762195
94
0.539301
574
5,865
5.207317
0.1777
0.103045
0.100368
0.064236
0.619605
0.552024
0.529274
0.517899
0.45634
0.45634
0
0.014392
0.336573
5,865
163
95
35.981595
0.753791
0
0
0.430556
0
0
0.139471
0.023188
0
0
0
0
0.145833
1
0.048611
false
0
0.034722
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e672b30ac2d4293d252c3f28a77e16722f9d3c0
1,139
py
Python
dmg/classical_amg.py
amkatrutsa/dmg
ac07e28982ddd7b03137b296e03af4d5515c8ad3
[ "MIT" ]
14
2018-07-06T12:04:46.000Z
2022-03-21T10:01:27.000Z
dmg/classical_amg.py
amkatrutsa/dmg
ac07e28982ddd7b03137b296e03af4d5515c8ad3
[ "MIT" ]
null
null
null
dmg/classical_amg.py
amkatrutsa/dmg
ac07e28982ddd7b03137b296e03af4d5515c8ad3
[ "MIT" ]
4
2019-01-08T03:31:55.000Z
2021-12-28T01:39:15.000Z
import pyamg from . import gmg_base class ClassicalAMG(gmg_base.GMG): def __init__(self, A, max_levels=10, presmoother="jacobi", presmooth_par={"omega": 2./3, "iterations": 2, "withrho": False}, postsmoother=None, postsmooth_par=None, cycle="V"): super().__init__(A, cycle, presmoother, presmooth_par, postsmoother, postsmooth_par, max_levels) self._amg_solver = pyamg.classical.classical.ruge_stuben_solver(A.to_csr(), max_levels=max_levels, max_coarse=1) pyamg.relaxation.smoothing.change_smoothers(self._amg_solver, presmoother=(self._presmoother, self._presmooth_par), postsmoother=(self._postsmoother, self._postsmooth_par) ) def _V_cycle(self, rhs, x): return self._amg_solver.solve(b=rhs, x0=x, maxiter=1, cycle="V") def __str__(self): return self._amg_solver.__str__()
54.238095
107
0.539069
114
1,139
4.973684
0.447368
0.063492
0.091711
0.067019
0
0
0
0
0
0
0
0.01105
0.364355
1,139
21
108
54.238095
0.772099
0
0
0
0
0
0.026316
0
0
0
0
0
0
1
0.157895
false
0
0.105263
0.105263
0.421053
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
8e673db362c569f9ad4cae086dd2478daf171db2
13,601
py
Python
figures.py
maria-kuruvilla/temp_collective_new
c45b72cee7c17072507eb67790d1699f5684098a
[ "MIT" ]
null
null
null
figures.py
maria-kuruvilla/temp_collective_new
c45b72cee7c17072507eb67790d1699f5684098a
[ "MIT" ]
null
null
null
figures.py
maria-kuruvilla/temp_collective_new
c45b72cee7c17072507eb67790d1699f5684098a
[ "MIT" ]
null
null
null
""" Code to plot average nearest neighbor distance between fish in a school as a function of group size - one line per water temperature. """ # imports import sys, os import numpy as np import matplotlib.pyplot as plt import pickle from matplotlib import cm import argparse #argparse def boolean_string(s): # this function helps with getting Boolean input if s not in ['False', 'True']: raise ValueError('Not a valid boolean string') return s == 'True' # note use of == # create the parser object parser = argparse.ArgumentParser() # NOTE: argparse will throw an error if: # - a flag is given with no value # - the value does not match the type # and if a flag is not given it will be filled with the default. parser.add_argument('-a', '--a_string', default='annd', type=str) #parser.add_argument('-s', '--a_string', default='annd_std', type=str) parser.add_argument('-b', '--integer_b', default=3, type=int) parser.add_argument('-c', '--float_c', default=1.5, type=float) parser.add_argument('-v', '--verbose', default=True, type=boolean_string) # Note that you assign a short name and a long name to each argument. # You can use either when you call the program, but you have to use the # long name when getting the values back from "args". # get the arguments args = parser.parse_args() xx=0 h = 0.3 if args.a_string=='annd': y_label = 'ANND (Body Length)' xx = 1 if args.a_string=='speed': y_label = 'Speed (Body Length/s)' if args.a_string=='acceleration': y_label = 'Acceleration (Body Length/s'+r'$^2$)' if args.a_string=='polarization': y_label = 'Polarization' xx=1 if args.a_string=='spikes': y_label = 'Number of \n startles' h = 0.4 if args.a_string=='accurate': y_label = 'Number of \n accurate startles' h = 0.4 if args.a_string=='latency': y_label = 'Latency (frames)' if args.a_string=='local_pol': y_label = 'Local polarization' xx = 1 if args.a_string=='local_pol_m': y_label = 'Local polarization' xx = 1 if args.a_string=='dtc': y_label = 'Distance to center \n (pixels)' if args.a_string=='dtc_roi': y_label = 'Distance to center \n (pixels)' if args.a_string=='dtc_roi_norm': y_label = 'Distance to center \n (Body Length)' if args.a_string=='percentile_speed99': y_label = '99th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed90': y_label = '90th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed80': y_label = '80th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed70': y_label = '70th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed60': y_label = '60th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed100': y_label = '100th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed_low_pass99': y_label = '99th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed_low_pass90': y_label = '90th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed_low_pass80': y_label = '80th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed_low_pass70': y_label = '70th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed_low_pass60': y_label = '60th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_speed_low_pass100': y_label = '100th percentile of speed \n (Body Length/s)' if args.a_string=='percentile_acc99': y_label = '99th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc90': y_label = '90th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc80': y_label = '80th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc70': y_label = '70th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc60': y_label = '60th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc100': y_label = '100th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc_low_pass99': y_label = '99th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc_low_pass90': y_label = '90th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc_low_pass80': y_label = '80th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc_low_pass70': y_label = '70th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc_low_pass60': y_label = '60th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='percentile_acc_low_pass100': y_label = '100th percentile of \n acceleration \n (Body Length/s'+r'$^2$)' if args.a_string=='unmasked_startles': y_label = 'No. of startles \n per unit unmasked time' if args.a_string=='max_loom_speed': y_label = 'Maximum speed during loom \n (Body Length/s)' if args.a_string=='loom_speed99': y_label = '99th percentile of \n speed during loom \n (Body Length/s)' if args.a_string=='loom_speed90': y_label = '90th percentile of \n speed during loom \n (Body Length/s)' if args.a_string=='max_loom_acc': y_label = 'Maximum acceleration during loom \n (Body Length/s'+r'$^2$)' if args.a_string=='loom_acc99': y_label = '99th percentile of \n acceleration during loom \n (Body Length/s'+r'$^2$)' if args.a_string=='loom_acc90': y_label = '90th percentile of \n acceleration during loom \n (Body Length/s'+r'$^2$)' if args.a_string=='max_loom_speed_low_pass': y_label = 'Maximum speed during loom \n (Body Length/s)' if args.a_string=='loom_speed99_low_pass': y_label = '99th percentile of \n speed during loom \n (Body Length/s)' if args.a_string=='loom_speed90_low_pass': y_label = '90th percentile of \n speed during loom \n (Body Length/s)' if args.a_string=='max_loom_acc_low_pass': y_label = 'Maximum acceleration during loom \n (Body Length/s'+r'$^2$)' if args.a_string=='loom_acc99_low_pass': y_label = '99th percentile of \n acceleration during loom \n (Body Length/s'+r'$^2$)' if args.a_string=='loom_acc90_low_pass': y_label = '90th percentile of \n acceleration during loom \n (Body Length/s'+r'$^2$)' if args.a_string=='ratio_max_loom_speed_low_pass': y_label = 'Ratio of maximum \n speed during loom \n to before loom' if args.a_string=='ratio_loom_speed99_low_pass': y_label = 'Ratio of 99th percentile of \n speed during loom \n to before loom' if args.a_string=='ratio_loom_speed90_low_pass': y_label = 'Ratio of 90th percentile of \n speed during loom \n to before loom' if args.a_string=='ratio_max_loom_acc_low_pass': y_label = 'Ratio of maximum \n acceleration during loom \n to before loom' if args.a_string=='ratio_loom_acc99_low_pass': y_label = 'Ratio of 99th percentile of \n acceleration during loom \n to before loom' if args.a_string=='ratio_loom_acc90_low_pass': y_label = 'Ratio of 90th percentile of \n acceleration during loom \n to before loom' if args.a_string=='ratio_loom_acc50_low_pass': y_label = 'Ratio of 50th percentile of \n acceleration during loom \n to before loom' if args.a_string=='ratio_loom_speed50_low_pass': y_label = 'Ratio of 50th percentile of \n speed during loom \n to before loom' if args.a_string=='max_non_loom_speed_low_pass': y_label = 'Maximum speed before loom \n (Body Length/s)' if args.a_string=='non_loom_speed99_low_pass': y_label = '99th percentile of \n speed before loom \n (Body Length/s)' if args.a_string=='non_loom_speed90_low_pass': y_label = '90th percentile of \n speed before loom \n (Body Length/s)' if args.a_string=='max_non_loom_acc_low_pass': y_label = 'Maximum acceleration before loom \n (Body Length/s'+r'$^2$)' if args.a_string=='non_loom_acc99_low_pass': y_label = '99th percentile of \n acceleration before loom \n (Body Length/s'+r'$^2$)' if args.a_string=='non_loom_acc90_low_pass': y_label = '90th percentile of \n acceleration before loom \n (Body Length/s'+r'$^2$)' if args.a_string=='unmasked_startles_ratio': y_label = 'Proportion of accurate startles' if args.a_string=='new_masked_startles_ratio': y_label = 'Proportion of accurate startles' if args.a_string=='prop_startles': y_label = 'Proportion of individuals \n that startle' xx=1 if args.a_string=='prop_startles_new_mask': y_label = 'Proportion of individuals \n that startle' xx=1 if args.a_string=='prop_startles_no_nan_new_mask': y_label = 'Proportion of individuals \n that startle' xx=1 if args.a_string=='loom_startles': y_label = 'Number of startles \n per fish during loom' if args.a_string=='loom_startles_normalized': y_label = 'Number of startles \n per fish during loom' if args.a_string=='preloom_startles_normalized': y_label = 'Number of startles \n per fish before loom' if args.a_string=='nonloom_startles_normalized': y_label = 'Number of startles \n per fish between looms' if args.a_string=='ind_startle_speed': y_label = 'Maximum startle speed \n (Body Length/s)' if args.a_string=='ind_median_speed': y_label = 'Median speed before loom \n (Body Length/s)' if args.a_string=='ind_ratio_speed': y_label = 'Ratio of max startle speed \n to median speed before loom' in_dir1 = '../../output/temp_collective/roi/' + args.a_string + '.p' annd_values = pickle.load(open(in_dir1, 'rb')) # 'rb is for read binary in_dir2 = '../../output/temp_collective/roi/' + args.a_string + '_std.p' out_dir = '../../output/temp_collective/roi_figures/' + args.a_string + '.png' std_annd_values = pickle.load(open(in_dir2, 'rb')) # 'rb is for read binary temperature = [9,13,17,21,25,29] if xx == 0: group = [1,2,4,8,16] else: group = [2,4,8,16] #group = [1,8] x = 5 # 5 for gs upto 16 #Plotting lw=1.25 fs=12 colors = plt.cm.viridis(np.linspace(0,1,6)) plt.close('all') # always start by cleaning up fig = plt.figure(figsize=(10,6)) ax = fig.add_subplot(211) plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=h) for i in range(6): ax.plot(group[:x], annd_values[i,:x], label = str(temperature[i])+ r'$^{\circ}$C', linewidth = lw, color = colors[i]) ax.fill_between(group[:x], annd_values[i,:x] - std_annd_values[i,:x], annd_values[i,:x] + std_annd_values[i,:x], alpha = 0.3, color = colors[i]) plt.xlabel('Group Size', size = fs) plt.ylabel(y_label, size = fs) plt.xscale('log',basex=2) if xx == 0: plt.xticks(ticks = [1,2,4,8,16], labels = [1,2,4,8,16]) else: plt.xticks(ticks = [2,4,8,16], labels = [2,4,8,16]) """ if xx == 0: plt.xticks(ticks = [1,2,4,8,16,32], labels = [1,2,4,8,16,32]) else: plt.xticks(ticks = [2,4,8,16,32], labels = [2,4,8,16,32]) """ #plt.xlim(right = 30) ax.tick_params(labelsize=.9*fs) ax.set_title('a)', loc='left', fontsize = fs) plt.legend(fontsize=fs, loc='upper right', title = 'Water Temperature', framealpha = 0.5) x=6 colors = plt.cm.viridis(np.linspace(0,1,5)) # 5 for gs upto 16 ax = fig.add_subplot(212) for i in range(4): ax.plot(temperature[0:x], annd_values[0:x,i], label = str(group[i]), linewidth = lw, color = colors[i]) ax.fill_between(temperature[0:x], annd_values[0:x,i] - std_annd_values[0:x,i], annd_values[0:x,i] + std_annd_values[0:x,i], alpha = 0.3, color = colors[i]) plt.xlabel('Temperature '+r'($^{\circ}$C)', size = fs) plt.locator_params(axis='x', nbins=5) plt.ylabel(y_label, size = fs) plt.xticks(ticks = [9,13,17,21,25,29], labels = [9,13,17,21,25,29]) #plt.xlim(right = 30) ax.tick_params(labelsize=.9*fs) ax.set_title('b)', loc='left', fontsize = fs) plt.legend(fontsize=fs, loc='upper right', title = 'Group Size', framealpha = 0.5) fig.savefig(out_dir, dpi = 300) plt.show() """ #SPEED in_dir1 = '../../output/temp_collective/roi/average_speed.p' speed_values = pickle.load(open(in_dir1, 'rb')) # 'rb is for read binary in_dir2 = '../../output/temp_collective/roi/speed_std.p' out_dir = '../../output/temp_collective/roi_figures/speed.png' std_speed = pickle.load(open(in_dir2, 'rb')) # 'rb is for read binary temperature = [29,25,21,17,13,9] group = [1,2,4,8,16] x = 5 #Plotting lw=1.25 fs=14 colors = plt.cm.viridis_r(np.linspace(0,1,6)) plt.close('all') # always start by cleaning up fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(211) for i in range(6): ax.plot(group[0:x], speed_values[i,0:x], label = str(temperature[i])+ r'$^{\circ}$C', linewidth = lw, color = colors[i]) ax.fill_between(group[0:x], speed_values[i,0:x] - std_speed[i,0:x], speed_values[i,0:x] + std_speed[i,0:x], alpha = 0.3, color = colors[i]) plt.xlabel('Group Size', size = 0.9*fs) plt.ylabel('Speed (Body Length/s)', size = 0.9*fs) ax.tick_params(labelsize=.8*fs) ax.set_title('a)', loc='left', fontsize = fs) plt.legend(fontsize=fs, loc='upper right', title = 'Water Temperature') x=6 colors = plt.cm.viridis(np.linspace(0,1,5)) ax = fig.add_subplot(212) for i in range(1,5): ax.plot(temperature[0:x], speed_values[0:x,i], label = str(group[i]), linewidth = lw, color = colors[i]) ax.fill_between(temperature[0:x], speed_values[0:x,i] - std_speed[0:x,i], speed_values[0:x,i] + std_speed[0:x,i], alpha = 0.3, color = colors[i]) plt.xlabel('Temperature '+r'($^{\circ}$C)', size = 0.9*fs) plt.locator_params(axis='x', nbins=5) plt.ylabel('Speed (Body Length/s)', size = 0.9*fs) ax.tick_params(labelsize=.8*fs) ax.set_title('b)', loc='left', fontsize = fs) plt.legend(fontsize=fs, loc='upper right', title = 'Group Size') #fig.suptitle('Average Nearest Neighbor Distance (ANND)', size = 1.5*fs) fig.savefig(out_dir) plt.show() """
35.792105
160
0.710462
2,404
13,601
3.849002
0.121048
0.060521
0.092727
0.105371
0.818653
0.794661
0.758133
0.72852
0.690587
0.668972
0
0.038361
0.131755
13,601
379
161
35.886544
0.745194
0.063378
0
0.286996
0
0
0.503086
0.097643
0
0
0
0
0
1
0.004484
false
0.143498
0.026906
0
0.035874
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
6
8e67c9cb778e91d2cab352898b79d1683798c293
144
py
Python
Externals/micromegas_4.3.5/Packages/smodels-v1.1.0patch1/smodels/tools/__init__.py
yuanfangtardis/vscode_project
2d78a85413cc85789cc4fee8ec991eb2a0563ef8
[ "Apache-2.0" ]
null
null
null
Externals/micromegas_4.3.5/Packages/smodels-v1.1.0patch1/smodels/tools/__init__.py
yuanfangtardis/vscode_project
2d78a85413cc85789cc4fee8ec991eb2a0563ef8
[ "Apache-2.0" ]
null
null
null
Externals/micromegas_4.3.5/Packages/smodels-v1.1.0patch1/smodels/tools/__init__.py
yuanfangtardis/vscode_project
2d78a85413cc85789cc4fee8ec991eb2a0563ef8
[ "Apache-2.0" ]
1
2022-01-15T12:22:30.000Z
2022-01-15T12:22:30.000Z
""" .. module:: tools.__init__ :synopsis: This package contains tools for handling results obtained with the main SModelS code. """
20.571429
81
0.6875
17
144
5.588235
0.941176
0
0
0
0
0
0
0
0
0
0
0
0.215278
144
6
82
24
0.840708
0.930556
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
8e68d491045b46e0d5c3609fa40d0f8cbf83aabf
3,106
py
Python
src/image_caption_machine/world/place.py
brandontrabucco/ros-image-captioner
5fd18317f2ec600cdc61628028292a22eef45fc2
[ "MIT" ]
3
2018-09-08T10:28:59.000Z
2019-09-08T00:11:33.000Z
src/image_caption_machine/world/place.py
brandontrabucco/ros-image-captioner
5fd18317f2ec600cdc61628028292a22eef45fc2
[ "MIT" ]
null
null
null
src/image_caption_machine/world/place.py
brandontrabucco/ros-image-captioner
5fd18317f2ec600cdc61628028292a22eef45fc2
[ "MIT" ]
2
2019-04-17T17:24:28.000Z
2019-06-10T18:16:44.000Z
"""Author: Brandon Trabucco. Utility class for loading and managing locations in the robot's map. """ import json import math import rospy from rt_msgs.msg import Odom from std_msgs.msg import Header from geometry_msgs.msg import Pose from geometry_msgs.msg import Point from geometry_msgs.msg import Quaternion from geometry_msgs.msg import PoseStamped from tf.transformations import euler_from_quaternion from image_caption_machine.msg import WorldPlace from image_caption_machine.convert.message import convert_ros_message_to_dictionary from image_caption_machine.convert.message import convert_dictionary_to_ros_message class Place(object): """Utility class for managing physycal naed locations. """ def __init__(self, name="default", pose_stamped=PoseStamped( Header(0, rospy.Time(secs=0, nsecs=0), "None"), Pose(Point(0.0, 0.0, 0.0), Quaternion(0.0, 0.0, 0.0, 0.0))), x=None, y=None, json=None, msg=None): """Initialize the class with default parameters. Args: name: str REQUIRED pose_stamped: PoseStamped REQUIRED x: float y: float json: {name: "...", pose_stamped: {...}} msg: WorldPlace message """ self.name = name self.pose_stamped = pose_stamped if x is not None: self.pose_stamped.pose.position.x = x if y is not None: self.pose_stamped.pose.position.y = y if json is not None: self.json = json if msg is not None: self.msg = msg @property def json(self): """Serialize the place to json. """ return {"name": self.name, "pose_stamped": convert_ros_message_to_dictionary(self.pose_stamped)} @json.setter def json(self, val): """Load json into the odom object. """ self.name = val["name"] self.pose_stamped = convert_dictionary_to_ros_message( "geometry_msgs/PoseStamped", val["pose_stamped"]) @property def msg(self): """Utility to convert Place() to WorldPlace message. """ return WorldPlace(name=self.name, pose_stamped=self.pose_stamped) @msg.setter def msg(self, val): """Utility to convert WorldPlace message to Place(). """ self.name = val.name self.pose_stamped = val.pose_stamped @property def x(self): """Helper to get the x position. """ return self.pose_stamped.pose.position.x @property def y(self): """Helper to get the y position. """ return self.pose_stamped.pose.position.y def to(self, other): """Helper to get the length to another place. Args: other: Place() object """ dx = self.x - other.x dy = self.y - other.y return math.sqrt((dx * dx) + (dy * dy)) def __str__(self): """Helper to convert the object to string. """ return self.name
24.650794
83
0.603348
397
3,106
4.579345
0.219144
0.10286
0.016502
0.017602
0.366337
0.182618
0.177118
0.094609
0
0
0
0.007805
0.298777
3,106
125
84
24.848
0.826905
0.245654
0
0.068966
0
0
0.03098
0.01139
0
0
0
0
0
1
0.155172
false
0
0.224138
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e69d02ee0597be4c48dd1fc7fd8cd5d2f553e35
2,238
py
Python
joplin_web/api/serializers.py
kuyper/joplin-web
7a13b75cbb55741ddfb58767af34c7ad164fec11
[ "BSD-3-Clause" ]
null
null
null
joplin_web/api/serializers.py
kuyper/joplin-web
7a13b75cbb55741ddfb58767af34c7ad164fec11
[ "BSD-3-Clause" ]
null
null
null
joplin_web/api/serializers.py
kuyper/joplin-web
7a13b75cbb55741ddfb58767af34c7ad164fec11
[ "BSD-3-Clause" ]
1
2019-12-13T15:18:58.000Z
2019-12-13T15:18:58.000Z
from rest_framework import serializers from joplin_web.models import Folders, Notes, Tags, NoteTags, Version class FoldersSerializer(serializers.ModelSerializer): nb_notes = serializers.IntegerField(read_only=True) class Meta: fields = ('id', 'title', 'parent_id', 'nb_notes', 'created_time') model = Folders class NotesSerializer(serializers.ModelSerializer): parent = FoldersSerializer(read_only=True) parent_id = serializers.PrimaryKeyRelatedField(queryset=Folders.objects.using('joplin').all(), source='folders', write_only=True) class Meta: fields = ('id', 'parent_id', 'parent', 'title', 'body', 'is_todo', 'todo_due', 'created_time', 'updated_time', 'source', 'source_application', 'latitude', 'longitude', 'altitude', 'author') model = Notes class TagsSerializer(serializers.ModelSerializer): nb_notes = serializers.IntegerField(read_only=True) class Meta: fields = '__all__' model = Tags class NoteTagsSerializer(serializers.ModelSerializer): note = NotesSerializer(read_only=True) tag = TagsSerializer(read_only=True) note_id = serializers.PrimaryKeyRelatedField( queryset=Notes.objects.using('joplin').all(), source='notes', write_only=True) tag_id = serializers.PrimaryKeyRelatedField( queryset=Tags.objects.using('joplin').all(), source='tags', write_only=True) class Meta: fields = ('id', 'note_id', 'note', 'tag_id', 'tag', 'created_time', 'updated_time', 'user_created_time', 'user_updated_time', 'encryption_cipher_text', 'encryption_applied') model = NoteTags class NoteTagsByNoteIdSerializer(serializers.ModelSerializer): tag = TagsSerializer(read_only=True) class Meta: fields = ('tag',) model = NoteTags class VersionSerializer(serializers.ModelSerializer): version = serializers.IntegerField() class Meta: fields = ('version', ) read_only_fields = ('version', ) model = Version
29.84
98
0.628239
209
2,238
6.526316
0.277512
0.052786
0.052786
0.062317
0.280059
0.18695
0.165689
0.121701
0.121701
0.121701
0
0
0.259607
2,238
74
99
30.243243
0.823174
0
0
0.25
0
0
0.14924
0.00983
0
0
0
0
0
1
0
false
0
0.041667
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e6a3aa8541823f934e62f4e12fc8d0777921c86
435
py
Python
bs.py
marlinarnz/roster_creator
4a158254bbe68e46e604426a59fe26e68711281b
[ "MIT" ]
null
null
null
bs.py
marlinarnz/roster_creator
4a158254bbe68e46e604426a59fe26e68711281b
[ "MIT" ]
null
null
null
bs.py
marlinarnz/roster_creator
4a158254bbe68e46e604426a59fe26e68711281b
[ "MIT" ]
null
null
null
import datetime from app.constants import Constants as c from app.input import InputMonthly from app.output import OutputFactory from app.create import CreatorUtility, SolverMIP inp = InputMonthly() out = OutputFactory() solv = SolverMIP() creator = CreatorUtility(inp, out, solv) settings = {c.START: datetime.date(2019, 1, 1), c.END: datetime.date(2019, 31, 1), c.EXCEL_OUT: True} creator.create(settings)
25.588235
48
0.728736
58
435
5.448276
0.465517
0.088608
0.101266
0
0
0
0
0
0
0
0
0.036111
0.172414
435
16
49
27.1875
0.841667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.384615
0
0.384615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
8e6ab08948cc89750d63dd9c07947a6c58786c2f
5,859
py
Python
Plots/MapProjections/NCL_sat_3.py
learn2free/GeoCAT-examples
3ac152a767e78a362a8ebb6f677005f3de320ca6
[ "Apache-2.0" ]
1
2021-05-09T02:54:10.000Z
2021-05-09T02:54:10.000Z
Plots/MapProjections/NCL_sat_3.py
learn2free/GeoCAT-examples
3ac152a767e78a362a8ebb6f677005f3de320ca6
[ "Apache-2.0" ]
null
null
null
Plots/MapProjections/NCL_sat_3.py
learn2free/GeoCAT-examples
3ac152a767e78a362a8ebb6f677005f3de320ca6
[ "Apache-2.0" ]
null
null
null
""" NCL_sat_3.py ================ This script illustrates the following concepts: - zooming into an orthographic projection - plotting filled contour data on an orthographic map - plotting lat/lon tick marks on an orthographic map See following URLs to see the reproduced NCL plot & script: - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/sat_3.ncl - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/sat_3_lg.png """ import cartopy.crs as ccrs import cartopy.feature as cfeature import geocat.datafiles as gdf ############################################################################### # Import packages: import matplotlib.pyplot as plt import matplotlib.ticker as mticker import numpy as np import xarray as xr from geocat.viz import util as gvutil ############################################################################### # Define a helper function for plotting lat/lon ticks on an orthographic plane def plotOrthoTicks(coords, loc): if loc == 'zero': for lon, lat in coords: ax.text(lon, lat, '{0}\N{DEGREE SIGN}'.format(lon), va='bottom', ha='center', transform=ccrs.PlateCarree()) if loc == 'left': for lon, lat in coords: ax.text(lon, lat, '{0}\N{DEGREE SIGN} N '.format(lat), va='center', ha='right', transform=ccrs.PlateCarree()) if loc == 'right': for lon, lat in coords: ax.text(lon, lat, '{0}\N{DEGREE SIGN} N '.format(lat), va='center', ha='left', transform=ccrs.PlateCarree()) if loc == 'top': for lon, lat in coords: ax.text(lon, lat, '{0}\N{DEGREE SIGN} W '.format(-lon), va='bottom', ha='center', transform=ccrs.PlateCarree()) if loc == 'bottom': for lon, lat in coords: ax.text(lon, lat, '{0}\N{DEGREE SIGN} W '.format(-lon), va='top', ha='center', transform=ccrs.PlateCarree()) ############################################################################### # Read in data: # Open a netCDF data file using xarray default engine and # load the data into xarrays ds = xr.open_dataset(gdf.get('netcdf_files/h_avg_Y0191_D000.00.nc'), decode_times=False) # Extract a slice of the data t = ds.T.isel(time=0, z_t=0) ############################################################################### # Plot: plt.figure(figsize=(8, 8)) # Create an axis with an orthographic projection ax = plt.axes(projection=ccrs.Orthographic(central_longitude=-35, central_latitude=60), anchor='C') # Set extent of map ax.set_extent((-80, -10, 30, 80), crs=ccrs.PlateCarree()) # Add natural feature to map ax.coastlines(resolution='110m') ax.add_feature(cfeature.LAND, facecolor='lightgray', zorder=3) ax.add_feature(cfeature.COASTLINE, linewidth=0.2, zorder=3) ax.add_feature(cfeature.LAKES, edgecolor='black', linewidth=0.2, facecolor='white', zorder=4) # plot filled contour data heatmap = t.plot.contourf(ax=ax, transform=ccrs.PlateCarree(), levels=80, vmin=-1.5, vmax=28.5, cmap='RdGy', add_colorbar=False, zorder=1) # Add color bar cbar_ticks = np.arange(-1.5, 31.5, 3) cbar = plt.colorbar(heatmap, orientation='horizontal', extendfrac=[0, .1], shrink=0.8, aspect=14, pad=0.05, extendrect=True, ticks=cbar_ticks) cbar.ax.tick_params(labelsize=10) # Get rid of black outline on colorbar cbar.outline.set_visible(False) # Set main plot title main = r"$\bf{Example}$" + " " + r"$\bf{of}$" + " " + r"$\bf{Zooming}$" + \ " " + r"$\bf{a}$" + " " + r"$\bf{Sat}$" + " " + r"$\bf{Projection}$" # Set plot subtitles using NetCDF metadata left = t.long_name right = t.units # Use geocat-viz function to create main, left, and right plot titles title = gvutil.set_titles_and_labels(ax, maintitle=main, maintitlefontsize=16, lefttitle=left, lefttitlefontsize=14, righttitle=right, righttitlefontsize=14, xlabel="", ylabel="") # Plot gridlines gl = ax.gridlines(color='black', linewidth=0.2, zorder=2) # Set frequency of gridlines in the x and y directions gl.xlocator = mticker.FixedLocator(np.arange(-180, 180, 15)) gl.ylocator = mticker.FixedLocator(np.arange(-90, 90, 15)) # Manually plot tick marks. # NCL has automatic tick mark placement on orthographic projections, # Python's cartopy module does not have this functionality yet. plotOrthoTicks([(0, 81.7)], 'zero') plotOrthoTicks([(-80, 30), (-76, 20), (-88, 40), (-107, 50)], 'left') plotOrthoTicks([(-9, 30), (-6, 40), (1, 50), (13, 60)], 'right') plotOrthoTicks([(-120, 60), (-60, 82.5)], 'top') plotOrthoTicks([(-75, 16.0), (-60, 25.0), (-45, 29.0), (-30, 29.5), (-15, 26.5)], 'bottom') plt.tight_layout() plt.show()
33.48
82
0.497013
656
5,859
4.396341
0.397866
0.020804
0.049931
0.019071
0.197642
0.166436
0.126907
0.126907
0.126907
0.126907
0
0.040806
0.330773
5,859
174
83
33.672414
0.694721
0.206861
0
0.287037
0
0
0.082501
0.008134
0
0
0
0
0
1
0.009259
false
0
0.074074
0
0.083333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e6c93847574069cca7db77ebf31e5ff0a8a00ef
2,047
py
Python
bot/team.py
mcfunley/clippingsbot
2954d5b5aa854b57d062a98e2133d258f9fd86c7
[ "MIT" ]
1
2019-02-06T16:52:05.000Z
2019-02-06T16:52:05.000Z
bot/team.py
mcfunley/clippingsbot
2954d5b5aa854b57d062a98e2133d258f9fd86c7
[ "MIT" ]
null
null
null
bot/team.py
mcfunley/clippingsbot
2954d5b5aa854b57d062a98e2133d258f9fd86c7
[ "MIT" ]
null
null
null
from bot import db def save(data): sql = """ insert into clippingsbot.teams ( team_id, access_token, user_id, team_name, scope ) values ( :team_id, :access_token, :user_id, :team_name, :scope ) on conflict (team_id) do update set scope = excluded.scope, access_token = excluded.access_token, user_id = excluded.user_id, team_name = excluded.team_name returning team_id """ return db.scalar(sql, **data) def find(team_id): return db.find_one( 'select * from clippingsbot.teams where team_id = :team_id', team_id = team_id) def watch(team, channel_id, pattern, pattern_id): sql = """ insert into clippingsbot.team_patterns ( team_id, channel_id, pattern_id, display_pattern ) values (:team_id, :channel_id, :pattern_id, :pattern) on conflict (team_id, channel_id, pattern_id) do nothing """ db.execute( sql, team_id=team['team_id'], channel_id=channel_id, pattern_id=pattern_id, pattern=pattern ) def find_patterns(team, channel_id): sql = """ select * from clippingsbot.team_patterns where team_id = :team_id and channel_id = :channel_id """ return db.find(sql, team_id=team['team_id'], channel_id=channel_id) def count_other_channel_patterns(team, channel_id): sql = """ select count(*) from clippingsbot.team_patterns where team_id = :team_id and channel_id != :channel_id """ return db.scalar(sql, team_id=team['team_id'], channel_id=channel_id) def count_patterns(team): sql = """ select count(*) from clippingsbot.team_patterns where team_id = :team_id """ return db.scalar(sql, team_id=team['team_id']) def stop(team, channel_id, pattern): sql = """ delete from clippingsbot.team_patterns where team_id = :team_id and channel_id = :channel_id and lower(display_pattern) = lower(:pattern) """ db.execute(sql, team_id=team['team_id'], channel_id=channel_id, pattern=pattern)
28.041096
76
0.662921
283
2,047
4.5053
0.162544
0.141176
0.120784
0.065882
0.615686
0.611765
0.465882
0.465882
0.465882
0.409412
0
0
0.225696
2,047
72
77
28.430556
0.804416
0
0
0.241379
0
0
0.579873
0.085002
0
0
0
0
0
1
0.12069
false
0
0.017241
0.017241
0.224138
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e6d24e204761284a5dd415da03add5895524b76
3,947
py
Python
meeshkan/nlp/spec_transformer.py
meeshkan/meeshkan-nlp
63ef1e0ef31fd9c2031c89e9fd6ca3fc46eef13e
[ "MIT" ]
1
2020-04-02T08:02:33.000Z
2020-04-02T08:02:33.000Z
meeshkan/nlp/spec_transformer.py
meeshkan/meeshkan-nlp
63ef1e0ef31fd9c2031c89e9fd6ca3fc46eef13e
[ "MIT" ]
9
2020-03-24T21:09:16.000Z
2020-07-24T09:58:11.000Z
meeshkan/nlp/spec_transformer.py
meeshkan/meeshkan-nlp
63ef1e0ef31fd9c2031c89e9fd6ca3fc46eef13e
[ "MIT" ]
null
null
null
import typing from operator import itemgetter from http_types import HttpExchange from jsonpath_rw import parse from openapi_typed_2 import OpenAPIObject, convert_from_openapi, convert_to_openapi from meeshkan.nlp.data_extractor import DataExtractor from meeshkan.nlp.entity_extractor import EntityExtractor from meeshkan.nlp.ids.id_classifier import IdClassifier, IdType from meeshkan.nlp.operation_classifier import OperationClassifier from meeshkan.nlp.spec_normalizer import SpecNormalizer class SpecTransformer: def __init__( self, extractor: EntityExtractor, path_analyzer, normalizer: SpecNormalizer, id_classifier: IdClassifier, ): self._extractor = extractor self._path_analyzer = path_analyzer self._normalizer = normalizer self._operation_classifier = OperationClassifier() self._id_classifier = id_classifier self._data_extractor = DataExtractor() def optimize_spec( self, spec: OpenAPIObject, recordings: typing.List[HttpExchange] ) -> OpenAPIObject: entity_paths = self._extractor.get_entity_from_spec(spec) spec_dict = convert_from_openapi(spec) datapaths, spec_dict = self._normalizer.normalize(spec_dict, entity_paths) grouped_records = self._data_extractor.group_records(spec_dict, recordings) spec_dict = self._replace_path_ids(spec_dict, grouped_records) spec_dict = self._operation_classifier.fill_operations(spec_dict) data = self._data_extractor.extract_data(datapaths, grouped_records) spec_dict = self._add_entity_ids(spec_dict, data) spec_dict = self._inject_data(spec_dict, data) return convert_to_openapi(spec_dict) def _replace_path_ids(self, spec, grouped_records): for pathname, path_record in grouped_records.items(): for param in reversed(path_record.path_args): res = self._id_classifier.by_values(path_record.path_arg_values[param]) if res != IdType.UNKNOWN: path_item = spec["paths"].pop(pathname) for param_desc in path_item["parameters"]: if param_desc["name"] == param: param_desc["name"] = "id" param_desc["x-meeshkan-id-type"] = res.value break pathname = pathname.replace("{{{}}}".format(param), "{id}") spec["paths"][pathname] = path_item break return spec def _add_entity_ids(self, spec_dict, data): for name, values in data.items(): schema = spec_dict["components"]["schemas"][name] potential_ids = [] for property in schema["properties"]: name_score = self._id_classifier.by_name(name, property) if name_score > 0: res = self._id_classifier.by_values( (v[property] for v in values if property in v) ) if res != IdType.UNKNOWN: potential_ids.append((property, res, name_score)) if len(potential_ids) > 0: idx = max(potential_ids, key=itemgetter(2)) schema["x-meeshkan-id-path"] = idx[0] schema["x-meeshkan-id-type"] = idx[1].value return spec_dict def _inject_data(self, spec_dict, data): spec_dict["x-meeshkan-data"] = {} for name, values in data.items(): expr = parse(spec_dict["components"]["schemas"][name]["x-meeshkan-id-path"]) injected_values = dict() for val in values: idx = expr.find(val) if len(idx) > 0: injected_values[idx[0].value] = val spec_dict["x-meeshkan-data"][name] = list(injected_values.values()) return spec_dict
40.27551
88
0.624272
445
3,947
5.253933
0.229213
0.071856
0.032079
0.023097
0.124038
0.047049
0.023952
0
0
0
0
0.002841
0.286547
3,947
97
89
40.690722
0.827415
0
0
0.1
0
0
0.047124
0
0
0
0
0
0
1
0.0625
false
0
0.125
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e6f58d75f26add4efd1a1ca913400a65b65e8ba
62
py
Python
app/main/errors.py
AJowett/chatty
a23fa594d53ac4d0851d7ce44d3fa81836e379d1
[ "MIT" ]
null
null
null
app/main/errors.py
AJowett/chatty
a23fa594d53ac4d0851d7ce44d3fa81836e379d1
[ "MIT" ]
null
null
null
app/main/errors.py
AJowett/chatty
a23fa594d53ac4d0851d7ce44d3fa81836e379d1
[ "MIT" ]
null
null
null
from flask import render_template, request from . import main
20.666667
42
0.822581
9
62
5.555556
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.145161
62
2
43
31
0.943396
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8e7144c085cff446c01b799bb109c5bbe09b0b02
3,216
py
Python
policies.py
IBM/LOA
9cd402c814f1d9c8b4de52ee18a3cb7ec2c6d07a
[ "MIT" ]
12
2021-12-15T09:03:36.000Z
2022-03-28T21:37:25.000Z
policies.py
IBM/LOA
9cd402c814f1d9c8b4de52ee18a3cb7ec2c6d07a
[ "MIT" ]
3
2022-01-04T18:03:01.000Z
2022-03-31T16:15:25.000Z
policies.py
IBM/LOA
9cd402c814f1d9c8b4de52ee18a3cb7ec2c6d07a
[ "MIT" ]
4
2022-01-04T17:44:23.000Z
2022-03-28T21:37:42.000Z
import os import sys import torch.nn as nn if True: DDLNN_HOME = os.environ['DDLNN_HOME'] meta_rule_home = '{}/src/meta_rule/'.format(DDLNN_HOME) src_rule_home = '{}/dd_lnn/'.format(DDLNN_HOME) sys.path.append(meta_rule_home) sys.path.append(src_rule_home) from lnn_operators \ import and_lukasiewicz, \ and_lukasiewicz_unconstrained, and_lukasiewicz_lambda EPS = 1e-10 class SimpleAndLNN(nn.Module): def __init__(self, arity=4, use_slack=True, alpha=0.95, constrained=True, use_lambda=True): super().__init__() self.alpha = alpha self.use_slack = use_slack self.arity = arity self.constrained = constrained self.use_lambda = use_lambda if use_lambda: assert constrained, \ 'Lambda LNN can only be used for constrained version' if constrained: if use_lambda: self.and_node = and_lukasiewicz_lambda(alpha, arity, use_slack) else: self.and_node = and_lukasiewicz(alpha, arity, use_slack) else: self.and_node = \ and_lukasiewicz_unconstrained(alpha, arity, use_slack) def forward(self, x): final_pred, final_slack = self.and_node(x) return final_pred, final_slack def extract_weights(self, normed=True, verbose=False): if self.constrained: if self.use_lambda: beta, wts = self.and_node.get_params() else: beta, wts, slacks = self.and_node.cdd() else: beta, wts = self.and_node.get_params() if normed: wts = wts / wts.max() if verbose: print('beta : ' + str(beta.item())) print('argument weights : ' + str(wts.detach())) return beta, wts class PolicyLNNTWC_SingleAnd(nn.Module): def __init__(self, admissible_verbs, use_constraint=True, num_by_arity=None): super().__init__() alpha = 0.95 use_slack = True self.alpha = alpha self.use_slack = use_slack self.use_constraint = use_constraint self.admissible_verbs = admissible_verbs self.models = nn.ModuleDict() if num_by_arity is None: self.total_inputs = {1: 6, 2: 12} else: self.total_inputs = num_by_arity for v, arity in admissible_verbs.items(): self.init_model_for_verb(v, self.total_inputs[arity]) def init_model_for_verb(self, v, nb_inputs): self.models[v] = \ SimpleAndLNN(arity=nb_inputs, use_slack=self.alpha, alpha=self.alpha, constrained=self.use_constraint) def compute_constraint_loss(self, lnn_model_name='go', lam=0.0001): return \ self.models[lnn_model_name].\ and_node.compute_constraint_loss(lam=lam)\ if self.models[lnn_model_name].and_node.lam else 0.0 def forward_eval(self, x, lnn_model_name='go', split=True): out, _ = self.models[lnn_model_name](x) activations = out.view(1, -1) + EPS return activations
30.056075
79
0.600435
403
3,216
4.506203
0.26799
0.044053
0.042401
0.029736
0.202093
0.155286
0.155286
0.093612
0.093612
0.051762
0
0.010733
0.304726
3,216
106
80
30.339623
0.801431
0
0
0.182927
0
0
0.036692
0
0
0
0
0
0.012195
1
0.085366
false
0
0.04878
0.012195
0.207317
0.02439
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e714557c8e4d44f9b9d7485ed6921411a4740ad
359
py
Python
test/test_sns.py
TSNobleSoftware/awssert
a924d591523813b070d67bb4df4bd08913c12eac
[ "Apache-2.0" ]
19
2021-04-25T13:28:12.000Z
2021-06-16T18:30:38.000Z
test/test_sns.py
TSNobleSoftware/awssert
a924d591523813b070d67bb4df4bd08913c12eac
[ "Apache-2.0" ]
23
2021-04-06T17:00:04.000Z
2021-04-20T18:03:35.000Z
test/test_sns.py
TSNoble/awssert
a924d591523813b070d67bb4df4bd08913c12eac
[ "Apache-2.0" ]
1
2021-04-07T07:45:37.000Z
2021-04-07T07:45:37.000Z
import moto import boto3 @moto.mock_sns def test_topic_should_receive_message_assertion(): arn = boto3.client("sns").create_topic(Name="foo")["TopicArn"] topic = boto3.resource("sns").Topic(arn) with topic.should.receive("foo"): topic.publish(Message="foo") with topic.should_not.receive("bar"): topic.publish(Message="foo")
27.615385
66
0.696379
48
359
5.041667
0.479167
0.136364
0.14876
0.181818
0
0
0
0
0
0
0
0.009804
0.147632
359
12
67
29.916667
0.781046
0
0
0.2
0
0
0.08078
0
0
0
0
0
0.1
1
0.1
false
0
0.2
0
0.3
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
8e71ce111f7ea9dd7c20cea0fda5b54b66380cdd
423
py
Python
stopwatch/__init__.py
hrishikeshrt/py_stopwatch
8a8dd2f35def2d6ca7f5b6d04f7d7428df9ef3a6
[ "MIT" ]
2
2022-03-04T11:37:44.000Z
2022-03-04T13:53:13.000Z
stopwatch/__init__.py
hrishikeshrt/py_stopwatch
8a8dd2f35def2d6ca7f5b6d04f7d7428df9ef3a6
[ "MIT" ]
null
null
null
stopwatch/__init__.py
hrishikeshrt/py_stopwatch
8a8dd2f35def2d6ca7f5b6d04f7d7428df9ef3a6
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Apr 12 21:22:49 2021 @author: Hrishikesh Terdalkar """ ############################################################################### __author__ = """Hrishikesh Terdalkar""" __email__ = 'hrishikeshrt@linuxmail.org' __version__ = '0.0.2' ############################################################################### from .stopwatch import Stopwatch
22.263158
79
0.43026
34
423
5
0.852941
0.188235
0.294118
0
0
0
0
0
0
0
0
0.044503
0.096927
423
18
80
23.5
0.400524
0.260047
0
0
0
0
0.351724
0.17931
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
8e723b8f4a32d0c8a03c62c48807cc3c480dfc71
16,604
py
Python
PsychoPy/testscript.py
esbenkc/Experimental-Methods-1
e2fa12df0f98043ea83f61f439525a5e78978340
[ "MIT" ]
null
null
null
PsychoPy/testscript.py
esbenkc/Experimental-Methods-1
e2fa12df0f98043ea83f61f439525a5e78978340
[ "MIT" ]
null
null
null
PsychoPy/testscript.py
esbenkc/Experimental-Methods-1
e2fa12df0f98043ea83f61f439525a5e78978340
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This experiment was created using PsychoPy3 Experiment Builder (v3.1.3), on June 24, 2019, at 16:21 If you publish work using this script please cite the PsychoPy publications: Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13. Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008 """ from __future__ import absolute_import, division from psychopy import locale_setup, sound, gui, visual, core, data, event, logging, clock from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED, STOPPED, FINISHED, PRESSED, RELEASED, FOREVER) import numpy as np # whole numpy lib is available, prepend 'np.' from numpy import (sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray) from numpy.random import random, randint, normal, shuffle import os # handy system and path functions import sys # to get file system encoding from psychopy.hardware import keyboard # Ensure that relative paths start from the same directory as this script _thisDir = os.path.dirname(os.path.abspath(__file__)) os.chdir(_thisDir) # Store info about the experiment session psychopyVersion = '3.1.3' expName = 'stroop' # from the Builder filename that created this script expInfo = {'session': '01', 'participant': ''} dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName) if dlg.OK == False: core.quit() # user pressed cancel expInfo['date'] = data.getDateStr() # add a simple timestamp expInfo['expName'] = expName expInfo['psychopyVersion'] = psychopyVersion # Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc filename = _thisDir + os.sep + u'data' + os.sep + '%s_%s' % (expInfo['participant'], expInfo['date']) # An ExperimentHandler isn't essential but helps with data saving thisExp = data.ExperimentHandler(name=expName, version='', extraInfo=expInfo, runtimeInfo=None, originPath='C:\\Users\\lpzdb\\pavloviaDemos\\stroop\\stroop.py', savePickle=True, saveWideText=True, dataFileName=filename) # save a log file for detail verbose info logFile = logging.LogFile(filename+'.log', level=logging.EXP) logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file endExpNow = False # flag for 'escape' or other condition => quit the exp # Start Code - component code to be run before the window creation # Setup the Window win = visual.Window( size=[1920, 1080], fullscr=True, screen=0, winType='pyglet', allowGUI=False, allowStencil=False, monitor='testMonitor', color='black', colorSpace='rgb', blendMode='avg', useFBO=True, units='height') # store frame rate of monitor if we can measure it expInfo['frameRate'] = win.getActualFrameRate() if expInfo['frameRate'] != None: frameDur = 1.0 / round(expInfo['frameRate']) else: frameDur = 1.0 / 60.0 # could not measure, so guess # create a default keyboard (e.g. to check for escape) defaultKeyboard = keyboard.Keyboard() # Initialize components for Routine "instruct" instructClock = core.Clock() instrText = visual.TextStim(win=win, name='instrText', text='OK. Ready for the real thing?\n\nRemember, ignore the word itself; press:\nLeft for red LETTERS\nDown for green LETTERS\nRight for blue LETTERS\n(Esc will quit)\n\nPress any key to continue', font='Arial', units='height', pos=[0, 0], height=0.05, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, languageStyle='LTR', depth=0.0); # Initialize components for Routine "trial" trialClock = core.Clock() word = visual.TextStim(win=win, name='word', text='default text', font='Arial', units='height', pos=[0, 0], height=0.15, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, languageStyle='LTR', depth=0.0); # Initialize components for Routine "thanks" thanksClock = core.Clock() thanksText = visual.TextStim(win=win, name='thanksText', text='This is the end of the experiment.\n\nThanks!', font='Arial', units='height', pos=[0, 0], height=0.05, wrapWidth=None, ori=0, color='white', colorSpace='rgb', opacity=1, languageStyle='LTR', depth=0.0); # Create some handy timers globalClock = core.Clock() # to track the time since experiment started routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine # ------Prepare to start Routine "instruct"------- t = 0 instructClock.reset() # clock frameN = -1 continueRoutine = True # update component parameters for each repeat ready = keyboard.Keyboard() # keep track of which components have finished instructComponents = [instrText, ready] for thisComponent in instructComponents: thisComponent.tStart = None thisComponent.tStop = None thisComponent.tStartRefresh = None thisComponent.tStopRefresh = None if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED # -------Start Routine "instruct"------- while continueRoutine: # get current time t = instructClock.getTime() frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *instrText* updates if t >= 0 and instrText.status == NOT_STARTED: # keep track of start time/frame for later instrText.tStart = t # not accounting for scr refresh instrText.frameNStart = frameN # exact frame index win.timeOnFlip(instrText, 'tStartRefresh') # time at next scr refresh instrText.setAutoDraw(True) # *ready* updates waitOnFlip = False if t >= 0 and ready.status == NOT_STARTED: # keep track of start time/frame for later ready.tStart = t # not accounting for scr refresh ready.frameNStart = frameN # exact frame index win.timeOnFlip(ready, 'tStartRefresh') # time at next scr refresh ready.status = STARTED # keyboard checking is just starting win.callOnFlip(ready.clearEvents, eventType='keyboard') # clear events on next screen flip if ready.status == STARTED and not waitOnFlip: theseKeys = ready.getKeys(keyList=None, waitRelease=False) if len(theseKeys): theseKeys = theseKeys[0] # at least one key was pressed # check for quit: if "escape" == theseKeys: endExpNow = True # a response ends the routine continueRoutine = False # check for quit (typically the Esc key) if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]): core.quit() # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine break continueRoutine = False # will revert to True if at least one component still running for thisComponent in instructComponents: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() # -------Ending Routine "instruct"------- for thisComponent in instructComponents: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) thisExp.addData('instrText.started', instrText.tStartRefresh) thisExp.addData('instrText.stopped', instrText.tStopRefresh) # the Routine "instruct" was not non-slip safe, so reset the non-slip timer routineTimer.reset() # set up handler to look after randomisation of conditions etc trials = data.TrialHandler(nReps=5, method='random', extraInfo=expInfo, originPath=-1, trialList=data.importConditions('trialTypes.xls'), seed=None, name='trials') thisExp.addLoop(trials) # add the loop to the experiment thisTrial = trials.trialList[0] # so we can initialise stimuli with some values # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb) if thisTrial != None: for paramName in thisTrial: exec('{} = thisTrial[paramName]'.format(paramName)) for thisTrial in trials: currentLoop = trials # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb) if thisTrial != None: for paramName in thisTrial: exec('{} = thisTrial[paramName]'.format(paramName)) # ------Prepare to start Routine "trial"------- t = 0 trialClock.reset() # clock frameN = -1 continueRoutine = True # update component parameters for each repeat word.setColor(letterColor, colorSpace='rgb') word.setText(text) resp = keyboard.Keyboard() # keep track of which components have finished trialComponents = [word, resp] for thisComponent in trialComponents: thisComponent.tStart = None thisComponent.tStop = None thisComponent.tStartRefresh = None thisComponent.tStopRefresh = None if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED # -------Start Routine "trial"------- while continueRoutine: # get current time t = trialClock.getTime() frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *word* updates if t >= 0.5 and word.status == NOT_STARTED: # keep track of start time/frame for later word.tStart = t # not accounting for scr refresh word.frameNStart = frameN # exact frame index win.timeOnFlip(word, 'tStartRefresh') # time at next scr refresh word.setAutoDraw(True) # *resp* updates waitOnFlip = False if t >= 0.5 and resp.status == NOT_STARTED: # keep track of start time/frame for later resp.tStart = t # not accounting for scr refresh resp.frameNStart = frameN # exact frame index win.timeOnFlip(resp, 'tStartRefresh') # time at next scr refresh resp.status = STARTED # keyboard checking is just starting waitOnFlip = True win.callOnFlip(resp.clock.reset) # t=0 on next screen flip win.callOnFlip(resp.clearEvents, eventType='keyboard') # clear events on next screen flip if resp.status == STARTED and not waitOnFlip: theseKeys = resp.getKeys(keyList=['left', 'down', 'right'], waitRelease=False) if len(theseKeys): theseKeys = theseKeys[0] # at least one key was pressed # check for quit: if "escape" == theseKeys: endExpNow = True resp.keys = theseKeys.name # just the last key pressed resp.rt = theseKeys.rt # was this 'correct'? if (resp.keys == str(corrAns)) or (resp.keys == corrAns): resp.corr = 1 else: resp.corr = 0 # a response ends the routine continueRoutine = False # check for quit (typically the Esc key) if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]): core.quit() # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine break continueRoutine = False # will revert to True if at least one component still running for thisComponent in trialComponents: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() # -------Ending Routine "trial"------- for thisComponent in trialComponents: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) trials.addData('word.started', word.tStartRefresh) trials.addData('word.stopped', word.tStopRefresh) # check responses if resp.keys in ['', [], None]: # No response was made resp.keys = None # was no response the correct answer?! if str(corrAns).lower() == 'none': resp.corr = 1; # correct non-response else: resp.corr = 0; # failed to respond (incorrectly) # store data for trials (TrialHandler) trials.addData('resp.keys',resp.keys) trials.addData('resp.corr', resp.corr) if resp.keys != None: # we had a response trials.addData('resp.rt', resp.rt) trials.addData('resp.started', resp.tStartRefresh) trials.addData('resp.stopped', resp.tStopRefresh) # the Routine "trial" was not non-slip safe, so reset the non-slip timer routineTimer.reset() thisExp.nextEntry() # completed 5 repeats of 'trials' # get names of stimulus parameters if trials.trialList in ([], [None], None): params = [] else: params = trials.trialList[0].keys() # save data for this loop trials.saveAsExcel(filename + '.xlsx', sheetName='trials', stimOut=params, dataOut=['n','all_mean','all_std', 'all_raw']) # ------Prepare to start Routine "thanks"------- t = 0 thanksClock.reset() # clock frameN = -1 continueRoutine = True routineTimer.add(2.000000) # update component parameters for each repeat # keep track of which components have finished thanksComponents = [thanksText] for thisComponent in thanksComponents: thisComponent.tStart = None thisComponent.tStop = None thisComponent.tStartRefresh = None thisComponent.tStopRefresh = None if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED # -------Start Routine "thanks"------- while continueRoutine and routineTimer.getTime() > 0: # get current time t = thanksClock.getTime() frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *thanksText* updates if t >= 0.0 and thanksText.status == NOT_STARTED: # keep track of start time/frame for later thanksText.tStart = t # not accounting for scr refresh thanksText.frameNStart = frameN # exact frame index win.timeOnFlip(thanksText, 'tStartRefresh') # time at next scr refresh thanksText.setAutoDraw(True) frameRemains = 0.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left if thanksText.status == STARTED and t >= frameRemains: # keep track of stop time/frame for later thanksText.tStop = t # not accounting for scr refresh thanksText.frameNStop = frameN # exact frame index win.timeOnFlip(thanksText, 'tStopRefresh') # time at next scr refresh thanksText.setAutoDraw(False) # check for quit (typically the Esc key) if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]): core.quit() # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine break continueRoutine = False # will revert to True if at least one component still running for thisComponent in thanksComponents: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() # -------Ending Routine "thanks"------- for thisComponent in thanksComponents: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) thisExp.addData('thanksText.started', thanksText.tStartRefresh) thisExp.addData('thanksText.stopped', thanksText.tStopRefresh) # Flip one final time so any remaining win.callOnFlip() # and win.timeOnFlip() tasks get executed before quitting win.flip() # these shouldn't be strictly necessary (should auto-save) thisExp.saveAsWideText(filename+'.csv') thisExp.saveAsPickle(filename) logging.flush() # make sure everything is closed down thisExp.abort() # or data files will save again on exit win.close() core.quit()
41.51
201
0.668152
2,021
16,604
5.476497
0.242454
0.0206
0.008945
0.015179
0.500271
0.489339
0.451662
0.369353
0.369353
0.342971
0
0.011611
0.232293
16,604
399
202
41.614035
0.856672
0.333895
0
0.441948
0
0.003745
0.091625
0.006597
0
0
0
0
0
1
0
false
0
0.037453
0
0.037453
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e73639fd89c53082140ccfdb67b914c88ac5358
831
py
Python
t1.py
eferro/pydatastructsalgorithms
20b84afaa85ee1bba59fcc2bcbe6a2dd058da7bf
[ "MIT" ]
null
null
null
t1.py
eferro/pydatastructsalgorithms
20b84afaa85ee1bba59fcc2bcbe6a2dd058da7bf
[ "MIT" ]
null
null
null
t1.py
eferro/pydatastructsalgorithms
20b84afaa85ee1bba59fcc2bcbe6a2dd058da7bf
[ "MIT" ]
null
null
null
from pydatastructsalgorithms import tree_list as tree # r = tree.binary_tree(3) # tree.insert_left(r, 4) # tree.insert_left(r, 5) # tree.insert_right(r, 6) # tree.insert_right(r, 7) # l = tree.get_left_child(r) # tree.set_root_val(l, 9) # tree.insert_left(l, 11) # print(tree.get_right_child(tree.get_right_child(r))) # x = tree.binary_tree('a') # tree.insert_left(x, 'b') # tree.insert_right(x, 'c') # print x # tree.insert_right(tree.get_right_child(x), 'd') # print x # tree.insert_left(tree.get_right_child(tree.get_right_child(x)), 'e') # print x def build_tree(): r = tree.binary_tree('a') tree.insert_left(r, 'b') tree.insert_right(r, 'c') tree.insert_right(tree.get_left_child(r), 'd') tree.insert_left(tree.get_right_child(r), 'e') tree.insert_right(tree.get_right_child(r), 'f') return r print build_tree()
23.742857
70
0.713598
152
831
3.638158
0.230263
0.253165
0.177215
0.21519
0.566004
0.424955
0.424955
0.122966
0
0
0
0.010825
0.11071
831
35
71
23.742857
0.737483
0.55716
0
0
0
0
0.017094
0
0
0
0
0
0
0
null
null
0
0.1
null
null
0.1
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
8e73968f0c29704beef26acb5999c4d5909b9d5e
309
py
Python
choptop-backend/sensor_controller.py
rossng/choptop
cf776d3fdcca92cda2ba56768bac04c37416ec0f
[ "MIT" ]
1
2018-04-10T20:10:39.000Z
2018-04-10T20:10:39.000Z
choptop-backend/sensor_controller.py
rossng/choptop
cf776d3fdcca92cda2ba56768bac04c37416ec0f
[ "MIT" ]
1
2020-04-29T23:14:11.000Z
2020-04-29T23:14:11.000Z
choptop-backend/sensor_controller.py
rossng/choptop
cf776d3fdcca92cda2ba56768bac04c37416ec0f
[ "MIT" ]
null
null
null
from flask import _app_ctx_stack, jsonify from choptop import app def get_model(): appContext = _app_ctx_stack.top choptop = getattr(appContext, "ChopTop", None) return choptop @app.route('/position') def get_position(self): choptop = get_model() return jsonify(choptop.finger_position)
23.769231
50
0.7411
41
309
5.341463
0.487805
0.082192
0.100457
0
0
0
0
0
0
0
0
0
0.165049
309
13
51
23.769231
0.848837
0
0
0
0
0
0.051613
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
8e745ff62ea6033b9af40da163096d4969eae110
3,856
py
Python
EmbLearning/config.py
zhangjindou/SoLE
2c20e39603ece315d571f8eb12674c6be8d378a4
[ "MIT" ]
2
2021-03-14T06:35:12.000Z
2022-01-03T08:39:30.000Z
EmbLearning/config.py
zhangjindou/SoLE
2c20e39603ece315d571f8eb12674c6be8d378a4
[ "MIT" ]
null
null
null
EmbLearning/config.py
zhangjindou/SoLE
2c20e39603ece315d571f8eb12674c6be8d378a4
[ "MIT" ]
1
2021-03-14T06:35:13.000Z
2021-03-14T06:35:13.000Z
# ----------------------- PATH ------------------------ ROOT_PATH = "." DATA_PATH = "%s/../Datasets" % ROOT_PATH FB15K_DATA_PATH = "%s/fb15k" % DATA_PATH DB100K_DATA_PATH = "%s/db100k" % DATA_PATH FB15K_SPARSE_DATA_PATH = "%s/fb15k-sparse" % DATA_PATH LOG_PATH = "%s/log_dir" % ROOT_PATH CHECKPOINT_PATH = "%s/checkpoint" % ROOT_PATH # ----------------------- DATA ------------------------ DATASET = {} FB15K_TRAIN_RAW = "%s/train.txt" % FB15K_DATA_PATH FB15K_VALID_RAW = "%s/valid.txt" % FB15K_DATA_PATH FB15K_TEST_RAW = "%s/test.txt" % FB15K_DATA_PATH FB15K_TRAIN = "%s/digitized_train.txt" % FB15K_DATA_PATH FB15K_VALID = "%s/digitized_valid.txt" % FB15K_DATA_PATH FB15K_TEST = "%s/digitized_test.txt" % FB15K_DATA_PATH FB15K_E2ID = "%s/e2id.txt" % FB15K_DATA_PATH FB15K_R2ID = "%s/r2id.txt" % FB15K_DATA_PATH FB15K_GNDS = "%s/groundings.txt" % FB15K_DATA_PATH FB15K_RULES = "%s/lifted_rules.txt" % FB15K_DATA_PATH DATASET["fb15k"] = { "train_raw": FB15K_TRAIN_RAW, "valid_raw": FB15K_VALID_RAW, "test_raw": FB15K_TEST_RAW, "train": FB15K_TRAIN, "valid": FB15K_VALID, "test": FB15K_TEST, "e2id": FB15K_E2ID, "r2id": FB15K_R2ID, "groundings": FB15K_GNDS, } DB100K_TRAIN_RAW = "%s/train.txt" % DB100K_DATA_PATH DB100K_VALID_RAW = "%s/valid.txt" % DB100K_DATA_PATH DB100K_TEST_RAW = "%s/test.txt" % DB100K_DATA_PATH DB100K_TRAIN = "%s/digitized_train.txt" % DB100K_DATA_PATH DB100K_VALID = "%s/digitized_valid.txt" % DB100K_DATA_PATH DB100K_TEST = "%s/digitized_test.txt" % DB100K_DATA_PATH DB100K_E2ID = "%s/e2id.txt" % DB100K_DATA_PATH DB100K_R2ID = "%s/r2id.txt" % DB100K_DATA_PATH DB100K_GNDS = "%s/groundings.txt" % DB100K_DATA_PATH DATASET["db100k"] = { "train_raw": DB100K_TRAIN_RAW, "valid_raw": DB100K_VALID_RAW, "test_raw": DB100K_TEST_RAW, "train": DB100K_TRAIN, "valid": DB100K_VALID, "test": DB100K_TEST, "e2id": DB100K_E2ID, "r2id": DB100K_R2ID, "groundings": DB100K_GNDS, } FB15K_SPARSE_TRAIN_RAW = "%s/train.txt" % FB15K_SPARSE_DATA_PATH FB15K_SPARSE_VALID_RAW = "%s/valid.txt" % FB15K_SPARSE_DATA_PATH FB15K_SPARSE_TEST_RAW = "%s/test.txt" % FB15K_SPARSE_DATA_PATH FB15K_SPARSE_TRAIN = "%s/digitized_train.txt" % FB15K_SPARSE_DATA_PATH FB15K_SPARSE_VALID = "%s/digitized_valid.txt" % FB15K_SPARSE_DATA_PATH FB15K_SPARSE_TEST = "%s/digitized_test.txt" % FB15K_SPARSE_DATA_PATH FB15K_SPARSE_E2ID = "%s/e2id.txt" % FB15K_SPARSE_DATA_PATH FB15K_SPARSE_R2ID = "%s/r2id.txt" % FB15K_SPARSE_DATA_PATH FB15K_SPARSE_GNDS = "%s/groundings.txt" % FB15K_SPARSE_DATA_PATH DATASET["fb15k-sparse"] = { "train_raw": FB15K_SPARSE_TRAIN_RAW, "valid_raw": FB15K_SPARSE_VALID_RAW, "test_raw": FB15K_SPARSE_TEST_RAW, "train": FB15K_SPARSE_TRAIN, "valid": FB15K_SPARSE_VALID, "test": FB15K_SPARSE_TEST, "e2id": FB15K_SPARSE_E2ID, "r2id": FB15K_SPARSE_R2ID, "groundings": FB15K_SPARSE_GNDS, } groundings = [str(50 + i * 5) for i in range(11)] + ['oneTime'] for item in groundings: DATASET["fb15k_" + str(item)] = { "train_raw": FB15K_TRAIN_RAW, "valid_raw": FB15K_VALID_RAW, "test_raw": FB15K_TEST_RAW, "train": FB15K_TRAIN, "valid": FB15K_VALID, "test": FB15K_TEST, "e2id": FB15K_E2ID, "r2id": FB15K_R2ID, "groundings": "%s/groundings_%s.txt" % (FB15K_DATA_PATH,str(item)), } for item in groundings: DATASET["db100k_" + str(item)] = { "train_raw": DB100K_TRAIN_RAW, "valid_raw": DB100K_VALID_RAW, "test_raw": DB100K_TEST_RAW, "train": DB100K_TRAIN, "valid": DB100K_VALID, "test": DB100K_TEST, "e2id": DB100K_E2ID, "r2id": DB100K_R2ID, "groundings": "%s/groundings_%s.txt" % (DB100K_DATA_PATH,str(item)), } # ----------------------- PARAM ----------------------- RANDOM_SEED = 123
34.428571
76
0.673237
534
3,856
4.430712
0.071161
0.125106
0.098901
0.088335
0.72612
0.581572
0.480981
0.346577
0.3153
0.243449
0
0.093683
0.158454
3,856
111
77
34.738739
0.635439
0.041753
0
0.357895
0
0
0.238147
0.052831
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e79f3580f36653daa75d2b29b580bf63af34199
932
py
Python
Krypton/WebApp/__init__.py
BolunHan/Krypton
8caf8e8efad6172ea0783c777e7df49a2ac512cb
[ "MIT" ]
null
null
null
Krypton/WebApp/__init__.py
BolunHan/Krypton
8caf8e8efad6172ea0783c777e7df49a2ac512cb
[ "MIT" ]
null
null
null
Krypton/WebApp/__init__.py
BolunHan/Krypton
8caf8e8efad6172ea0783c777e7df49a2ac512cb
[ "MIT" ]
null
null
null
from flask import Flask from werkzeug.middleware.dispatcher import DispatcherMiddleware from werkzeug.serving import run_simple from Base import Telemetric, CONFIG __all__ = ['start_app'] __version__ = "0.1.0" LOGGER = Telemetric.LOGGER.getChild('WebApp') APP = Flask(__name__) HOSTNAME = CONFIG.get('WebApp', 'HOST', fallback='0.0.0.0') PORT = CONFIG.getint('WebApp', 'PORT', fallback=80) import WebApp.Monitor import WebApp.FileServer mounts = { '/Monitor': WebApp.Monitor.FLASK_APP, '/FileServer': WebApp.FileServer.FLASK_APP, } def start_app(): application = DispatcherMiddleware(APP, mounts) if __name__ == '__main__': for mount_path in mounts: LOGGER.info(f'WebApp running on http://{HOSTNAME}:{PORT}/{mount_path}') run_simple( hostname=HOSTNAME, port=PORT, application=application ) if __name__ == '__main__': start_app()
23.3
83
0.683476
109
932
5.504587
0.422018
0.04
0.01
0
0
0
0
0
0
0
0
0.012
0.195279
932
39
84
23.897436
0.788
0
0
0.071429
0
0
0.146996
0
0
0
0
0
0
1
0.035714
false
0
0.214286
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e7b1a04d745dc6e204362c61a41930cc35f005b
682
py
Python
class3/testsvg.py
dnsbob/pynet_testz
8a4c778e8592efd796dc27417b7ae7ee4d9111cc
[ "Apache-2.0" ]
null
null
null
class3/testsvg.py
dnsbob/pynet_testz
8a4c778e8592efd796dc27417b7ae7ee4d9111cc
[ "Apache-2.0" ]
null
null
null
class3/testsvg.py
dnsbob/pynet_testz
8a4c778e8592efd796dc27417b7ae7ee4d9111cc
[ "Apache-2.0" ]
null
null
null
''' testsvg.py ''' import pygal fa4_in_packets = [24, 21, 40, 32, 21, 21, 49, 9, 21, 34, 24, 21] fa4_out_packets = [21, 24, 21, 40, 32, 21, 21, 49, 9, 21, 34, 24] # Create a Chart of type Line line_chart = pygal.Line() # Title line_chart.title = 'Input/Output Packets and Bytes' # X-axis labels (samples were every five minutes) line_chart.x_labels = ['5', '10', '15', '20', '25', '30', '35', '40', '45', '50', '55', '60'] # Add each one of the above lists into the graph as a line with corresponding label line_chart.add('InPackets', fa4_in_packets) line_chart.add('OutPackets', fa4_out_packets) # Create an output image file from this line_chart.render_to_file('test.svg')
29.652174
93
0.678886
121
682
3.68595
0.561983
0.121076
0.053812
0.035874
0.09417
0.09417
0.09417
0.09417
0.09417
0.09417
0
0.127622
0.16129
682
22
94
31
0.652098
0.313783
0
0
0
0
0.175055
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.111111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e7b99b3286e2086dc64ba2272a4da8ef40cb9cf
2,573
py
Python
CKC102_python_example.py
sagenew/scc-ckc-api-examples
fd86e435877cf68f35d01b8314a47a08b83eb391
[ "MIT" ]
null
null
null
CKC102_python_example.py
sagenew/scc-ckc-api-examples
fd86e435877cf68f35d01b8314a47a08b83eb391
[ "MIT" ]
null
null
null
CKC102_python_example.py
sagenew/scc-ckc-api-examples
fd86e435877cf68f35d01b8314a47a08b83eb391
[ "MIT" ]
null
null
null
import urllib.parse, urllib.request, json, ssl # Authentication and API Requests # LEARNING LAB 2 Cisco Kinetic for Cities # The Initial login steps are the same as Learning Lab 1. # You can skip ahead to 'LEARNING LAB 2 CODE BEGINS HERE' #Ignore invalid Certificates ssl._create_default_https_context = ssl._create_unverified_context ############################### LEARNING LAB 2 CODE BEGINS HERE ############################ # # In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/locations/user/{userId}/info # In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/{userId}/info # The access_token and user_id from Learning Lab 1 will be used to obtain the current Users Location Information print('Learning Lab 2 Starts Here:') user_id = '86847897-ab35-489c-af17-6fbf301a6016' access_token = '0f493c98-9689-37c4-ad76-b957020d0d6c' #Define the required GET Headers needed by the CKC API headers = { 'authorization': "Bearer " + access_token, 'Content-Type': "application/json" } #The URL with queryParms to request user details requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/' + user_id + '/info' print('\nGetting User Location Info: (' + requestUrl + ')\n') # create the request request = urllib.request.Request(requestUrl, headers = headers) # perform the request response = urllib.request.urlopen(request) results = response.read().decode(encoding) responseDictionary = json.loads(results) print('User Location Info:', results, '\n') ############################### LEARNING LAB 2 PART-2 ############################ # # In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/capabilities/customer # In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer # The access_token obtained as explained in Learning Lab 1 is used for authorization #Define the required GET Headers needed by the CKC API headers = {'authorization': "Bearer " + access_token } #The URL with queryParms to request user details requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer' print('\nGetting User capabilities: (' + requestUrl + ')\n') # create the request request = urllib.request.Request(requestUrl, headers = headers) # perform the request response = urllib.request.urlopen(request) results = response.read().decode(encoding) responseDictionary = json.loads(results) print('User Capabilities:', results, '\n')
37.289855
128
0.724835
350
2,573
5.285714
0.342857
0.047568
0.032432
0.04973
0.634054
0.634054
0.588108
0.588108
0.588108
0.588108
0
0.027196
0.128255
2,573
68
129
37.838235
0.797593
0.465604
0
0.333333
0
0.083333
0.335223
0.0583
0
0
0
0
0
1
0
false
0
0.041667
0
0.041667
0.208333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e7d265dcc13b68469fdea2d8131380b85fbb3c6
4,780
py
Python
judge/machine.py
Means88/judge-backend
6e998ebb145911e66f8baec6568f007082835a61
[ "MIT" ]
null
null
null
judge/machine.py
Means88/judge-backend
6e998ebb145911e66f8baec6568f007082835a61
[ "MIT" ]
3
2020-06-05T19:21:25.000Z
2021-06-10T20:54:22.000Z
judge/machine.py
Means88/judge-backend
6e998ebb145911e66f8baec6568f007082835a61
[ "MIT" ]
null
null
null
import json import uuid import os import docker import time from celery.utils.log import get_task_logger from config import settings from .language import LANGUAGE from .status import ComputingStatus logger = get_task_logger(__name__) class Machine: client = docker.from_env() def __init__(self): self.container = None self.src_path = None self.stdout_path = None self.output_path = None self.start_time = None # s self.time_limit = None # ms self.memory_limit = None # byte self.uuid = str(uuid.uuid4()) self.temp_file_path = os.path.join(settings.BASE_DIR, 'tmp', self.uuid + '.log') f = open(self.temp_file_path, 'w') f.write('') f.close() self.status = ComputingStatus.PENDING def create(self, language, src_path, stdin_path, output_path, error_path, time_limit=1000, memory_limit=256 * 1024 * 1024): if self.container: raise Exception('Container already exist') self.src_path = src_path self.output_path = output_path self.time_limit = time_limit self.memory_limit = memory_limit self.container = self.client.containers.create( LANGUAGE.get_image_name(language), volumes={ src_path: {'bind': '/judge/{}'.format(LANGUAGE.get_source_name(language)), 'mode': 'ro'}, stdin_path: {'bind': '/judge/stdin', 'mode': 'ro'}, # stdout_path: {'bind': '/judge/stdout', 'mode': 'ro'}, output_path: {'bind': '/judge/userout', 'mode': 'rw'}, error_path: {'bind': '/judge/usererr', 'mode': 'rw'}, self.temp_file_path: {'bind': '/judge/return', 'mode': 'rw'} }, mem_limit=int(memory_limit / 0.95), memswap_limit=int(memory_limit / 0.95), oom_kill_disable=True, ) def start(self): self.start_time = time.time() self.container.start() def stats(self): return self.container.stats(decode=True, stream=False) def container_status(self): self.container.reload() return self.container.status def _wait_for_computing(self): cpu_usage = 0 memory_usage = 0 logger.debug('judge machine compute: %s' % self.src_path) logger.debug('time_limit: %s', self.time_limit) for stats in self.container.stats(decode=True): time_used = time.time() - self.start_time cpu_usage = max(cpu_usage, time_used / 2 * 1000) logger.debug('time_used: %s', time_used) logger.debug('cpu_usage: %s', cpu_usage) # stats = self.stats() logger.debug(json.dumps(stats, indent=2, sort_keys=True)) if self.container_status() == 'exited': self.status = ComputingStatus.FINISHED break cpu_usage = max(cpu_usage, stats['cpu_stats']['cpu_usage']['total_usage'] / 1e6) logger.debug('time_limit : %s' % self.time_limit) logger.debug('cpu_usage : %s' % cpu_usage) memory_usage = max(memory_usage, stats['memory_stats'].get('max_usage', 0)) if cpu_usage > self.time_limit: self.status = ComputingStatus.TIME_LIMIT_EXCEED break logger.debug('memory_limit: %s' % self.memory_limit) logger.debug('memory_usage: %s' % memory_usage) if memory_usage >= self.memory_limit: self.status = ComputingStatus.MEMORY_LIMIT_EXCEED break if time_used > self.time_limit * 2 / 1000: self.status = ComputingStatus.TIME_LIMIT_EXCEED self.container.stop(timeout=0) break time.sleep(0.5) try: result = json.load(open(self.temp_file_path, mode='r')) except: result = None return { 'status': self.status, 'cpu_usage': cpu_usage, 'memory_usage': memory_usage, 'output': open(self.output_path, mode='r'), 'result': result, } def wait_for_computing(self): try: return self._wait_for_computing() except Exception as e: logger.error(e) return { 'status': ComputingStatus.ERROR, 'cpu_usage': 0, 'memory_usage': 0, 'output': None, 'result': None, } finally: self.destroy() def destroy(self): if self.container: self.container.stop(timeout=0) self.container.remove() self.container = None
32.965517
105
0.565063
549
4,780
4.703097
0.227687
0.070488
0.030209
0.024787
0.201394
0.112316
0.048025
0.026336
0
0
0
0.013543
0.320293
4,780
144
106
33.194444
0.781163
0.017573
0
0.136752
0
0
0.086354
0
0
0
0
0
0
1
0.068376
false
0
0.076923
0.008547
0.205128
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e7e2fd82b90a62a490603dc24f1988ca7e27fb8
31
py
Python
networth/__init__.py
Hedde/django-networth
496311e31f3b49202bda9c0b2997ee4508cc9477
[ "MIT" ]
null
null
null
networth/__init__.py
Hedde/django-networth
496311e31f3b49202bda9c0b2997ee4508cc9477
[ "MIT" ]
3
2020-02-12T00:02:35.000Z
2021-06-10T19:38:41.000Z
networth/__init__.py
Hedde/django-networth
496311e31f3b49202bda9c0b2997ee4508cc9477
[ "MIT" ]
null
null
null
__author__ = 'heddevanderheide'
31
31
0.83871
2
31
11
1
0
0
0
0
0
0
0
0
0
0
0
0.064516
31
1
31
31
0.758621
0
0
0
0
0
0.5
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
8e7edf92edac4cf5b0a634e3bcb329f30e6b8e66
2,160
py
Python
sources/classic/messaging_kombu/consumer.py
variasov/classic_messaging_kombu
c4191f3d1f788a39f50dc137eca1b67f3ee2af20
[ "MIT" ]
1
2021-11-12T08:19:53.000Z
2021-11-12T08:19:53.000Z
sources/classic/messaging_kombu/consumer.py
variasov/classic_messaging_kombu
c4191f3d1f788a39f50dc137eca1b67f3ee2af20
[ "MIT" ]
null
null
null
sources/classic/messaging_kombu/consumer.py
variasov/classic_messaging_kombu
c4191f3d1f788a39f50dc137eca1b67f3ee2af20
[ "MIT" ]
null
null
null
from functools import partial import logging from typing import Callable, Any, Iterable from collections import defaultdict from kombu import Connection from kombu.mixins import ConsumerMixin from classic.components import component from .handlers import MessageHandler, SimpleMessageHandler from .scheme import BrokerScheme logger = logging.getLogger(__file__) AnyCallable = Callable[[Any], None] @component class KombuConsumer(ConsumerMixin): connection: Connection scheme: BrokerScheme def __attrs_post_init__(self): self._handlers = defaultdict(list) def _get_queues(self, queue_names: Iterable[str]): queues = [] for name in queue_names: assert name in self.scheme.queues, \ f'Queue with name {name} do not exists in broker scheme!' queues.append(self.scheme.queues[name]) return queues def register_handler(self, handler: MessageHandler, *queue_names: str): queues = self._get_queues(queue_names) self._handlers[handler].extend(queues) def register_function(self, function: AnyCallable, *queue_names: str, late_ack: bool = True): handler = SimpleMessageHandler( function=function, late_ack=late_ack, ) queues = self._get_queues(queue_names) self._handlers[handler].extend(queues) def get_consumers(self, consumer_cls, channel): consumers = [] for handler, queues in self._handlers.items(): on_message = partial(self.on_message, handler=handler) c = consumer_cls( queues=queues, callbacks=[on_message], ) consumers.append(c) return consumers @staticmethod def on_message(body, message, handler): try: logger.info(f'Trying to call {handler}') handler.handle(message, body) except Exception as error: logger.error(error) def run(self, *args, **kwargs): logger.info('Worker started') return super().run(*args, **kwargs)
29.589041
75
0.6375
231
2,160
5.796537
0.380952
0.04481
0.023898
0.028379
0.0941
0.0941
0.0941
0.0941
0.0941
0.0941
0
0
0.27963
2,160
72
76
30
0.86054
0
0
0.071429
0
0
0.042593
0
0
0
0
0
0.017857
1
0.125
false
0
0.160714
0
0.392857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e7ff2193d4240f5f73671b8a5f9d6d5555d5513
2,004
py
Python
du4/du4.py
Honzaik/PocAlgDU
a3d32d1906298ba4bc1627640ecc04370ff4e49c
[ "Unlicense" ]
null
null
null
du4/du4.py
Honzaik/PocAlgDU
a3d32d1906298ba4bc1627640ecc04370ff4e49c
[ "Unlicense" ]
null
null
null
du4/du4.py
Honzaik/PocAlgDU
a3d32d1906298ba4bc1627640ecc04370ff4e49c
[ "Unlicense" ]
null
null
null
from cmath import exp, pi from math import log2 def vratLiche(a): oddA = list(); for i in range(len(a)): if(i % 2 == 1): oddA.append(a[i]) return oddA def vratSude(a): evenA = list() for i in range(len(a)): if(i % 2 == 0): evenA.append(a[i]) return evenA def roundComplex(vysl): #zaokrouhlování newVysl = list() for v in vysl: a = round(v.real,5) b = round(v.imag,5) newVysl.append(complex(a,b)) return newVysl def recursiveComplexFFT(n, prim, a): if(n == 1): return [a[0]] else: nHalf = int(n/2) newPrim = prim*prim b = recursiveComplexFFT(nHalf, newPrim, vratSude(a)) c = recursiveComplexFFT(nHalf, newPrim, vratLiche(a)) result = [0]*int(n) for i in range(nHalf): tempPrim = prim**i result[i] = b[i]+(tempPrim)*c[i] result[nHalf+i] = b[i]-(tempPrim)*c[i] return roundComplex(result) def rev(i,k): #rev funkce mask = '{0:0' + str(k) + 'b}' return int(mask.format(i)[::-1],2) def iterativeComplexFFT(n, prim, a): k = int(log2(n)) A = [0]*n for i in range(n): A[i] = a[rev(i,k)] prims = [0]*k prims[k-1] = prim for i in range(k-2,-1,-1): prims[i] = prims[i+1]*prims[i+1] for u in range(1,k+1,1): m = 2**u for i in range(0, n-m+1, m): for j in range(0,int(m/2),1): temp = (prims[u-1]**j)*A[i+j+int(m/2)] v1 = A[i+j] + temp v2 = A[i+j] - temp A[i+j] = v1 A[i+j+int(m/2)] = v2 return roundComplex(A) vektor = [1,1,2,2,5,2,4,7] #pocitani vektor n = len(vektor) myPrim = exp((2j*pi)/n) #primitivni odmocnina res = recursiveComplexFFT(n, myPrim, vektor) #rekurzivni fft print(res) myPrim = exp((2j*pi)/n) res2 = iterativeComplexFFT(n, myPrim, vektor) #iterativni fft print(res2)
27.833333
65
0.510978
312
2,004
3.282051
0.237179
0.054688
0.035156
0.064453
0.136719
0.085938
0.044922
0.044922
0.044922
0.044922
0
0.038405
0.324351
2,004
72
66
27.833333
0.717873
0.043912
0
0.061538
0
0
0.00314
0
0
0
0
0
0
1
0.092308
false
0
0.030769
0
0.230769
0.030769
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e80ad9615affb9458b6c1ded08a8495b039b868
609
py
Python
lang/py/pylib/07/pickle/pickle_dump_to_file1.py
ch1huizong/learning
632267634a9fd84a5f5116de09ff1e2681a6cc85
[ "MIT" ]
null
null
null
lang/py/pylib/07/pickle/pickle_dump_to_file1.py
ch1huizong/learning
632267634a9fd84a5f5116de09ff1e2681a6cc85
[ "MIT" ]
null
null
null
lang/py/pylib/07/pickle/pickle_dump_to_file1.py
ch1huizong/learning
632267634a9fd84a5f5116de09ff1e2681a6cc85
[ "MIT" ]
null
null
null
#! /usr/bin/env python # -*- coding:UTF-8 -*- # 把对象pickle至文件 try: import cPickle as pickle except: import pickle import sys class SimpleObject(object): def __init__(self,name): self.name = name self.name_backwards = name[::-1] if __name__ == '__main__': data = [] data.append(SimpleObject("pickle")) data.append(SimpleObject("cPickle")) data.append(SimpleObject("last")) filename = sys.argv[1] with open(filename, 'wb') as f: for o in data: print "WRITING : %s (%s)" % (o.name, o.name_backwards) pickle.dump(o, f)
20.3
66
0.596059
76
609
4.592105
0.552632
0.068768
0.189112
0
0
0
0
0
0
0
0
0.006623
0.256158
609
29
67
21
0.763797
0.090312
0
0
0
0
0.079855
0
0
0
0
0
0
0
null
null
0
0.157895
null
null
0.052632
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
8e85f751c8a5501a2b056c1fde74847efffec00d
4,147
py
Python
tests/test_cv.py
goyoambrosio/RobotAtHome2
9ab31e5e11d8551b9f6934d90245221449dbbbf4
[ "MIT" ]
1
2022-03-08T19:00:37.000Z
2022-03-08T19:00:37.000Z
tests/test_cv.py
goyoambrosio/RobotAtHome2
9ab31e5e11d8551b9f6934d90245221449dbbbf4
[ "MIT" ]
null
null
null
tests/test_cv.py
goyoambrosio/RobotAtHome2
9ab31e5e11d8551b9f6934d90245221449dbbbf4
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8; buffer-read-only: t -*- __author__ = "Gregorio Ambrosio" __contact__ = "gambrosio[at]uma.es" __copyright__ = "Copyright 2021, Gregorio Ambrosio" __date__ = "2021/02/22" __license__ = "MIT" import unittest import os import sys import pandas as pd import matplotlib.pyplot as plt import robotathome as rh from robotathome import logger, set_log_level class Test(unittest.TestCase): """Test class of toolbox module """ # @unittest.skip("testing skipping") def setUp(self): """ The setUp() method allow you to define instructions that will be executed before and after each test method Examples: python -m unittest <testModule>.<className>.<function_name> $ cd ~/cloud/GIT/RobotAtHome_API/tests $ python -m unittest test_reader.Test.test_get_home_names """ # we are testing: set the lowest log level rh.set_log_level('TRACE') logger.trace("*** Test.setUp") # Local references ''' /home/user └─── WORKSPACE ├─── R@H2-2.0.1 │ └── files │ ├── rgbd │ └── scene └─────── rh.db ''' self.rh_path = os.path.expanduser('~/WORKSPACE/R@H2-2.0.1') self.wspc_path = os.path.expanduser('~/WORKSPACE') self.rgbd_path = os.path.join(self.rh_path, 'files/rgbd') self.scene_path = os.path.join(self.rh_path, 'files/scene') self.db_filename = 'rh.db' try: self.rh = rh.RobotAtHome(rh_path = self.rh_path, rgbd_path = self.rgbd_path, scene_path = self.scene_path, wspc_path = self.wspc_path, db_filename = self.db_filename ) except: logger.error("setUp: something was wrong") # exit without handling os._exit(1) def tearDown(self): """The tearDown() method allow you to define instructions that will be executed after each test method""" logger.trace("*** Test.tearDown") del self.rh def test_say_hello(self): """Testing of say_hello """ logger.trace("*** Testing of say_hello()") logger.info("Running say_hello in _greetings.py") logger.info(rh.say_hello()) def test_get_labeled_img(self): """Testing of get_labeled_img """ logger.trace("*** Testing of get_labeled_img()") logger.info("Getting labeled image") id = 100000 # 100000 <= id < 200000 [rgb_f, _] = self.rh.get_RGBD_files(id) labels = self.rh.get_RGBD_labels(id) [labeled_img, _] = rh.get_labeled_img(labels, rgb_f) plt.imshow(labeled_img) plt.show() def test_plot_labeled_img(self): """Testing of plot_labels """ logger.trace("*** Testing of plot_labeled_img()") logger.info("Plotting RGB image patched with labels") set_log_level('INFO') id = 100000 # 100000 <= id < 200000 [rgb_f, _] = self.rh.get_RGBD_files(id) labels = self.rh.get_RGBD_labels(id) logger.info("\nlabel names: \n{}", labels['name']) logger.info("\nlabel masks type: \n{}", type(labels['mask'].iat[0])) rh.plot_labeled_img(labels, rgb_f) def test_get_scan_xy(self): """ Docstring """ id = 200000 # 0 <= id <= inf laser_scan = self.rh.get_laser_scan(id) xy = rh.get_scan_xy(laser_scan) print(xy) def test_plot_scan(self): """ Docstring """ id = 200000 # 0 <= id <= inf laser_scan = self.rh.get_laser_scan(id) rh.plot_scan(laser_scan) def test_plot_scene(self): scenes = self.rh.get_scenes() s_id = 0 logger.info("\nScene file: \n{}", scenes.iloc[s_id].scene_file) rh.plot_scene(scenes.iloc[s_id].scene_file) if __name__ == '__main__': unittest.main()
30.947761
78
0.564022
521
4,147
4.305182
0.318618
0.034775
0.028087
0.023183
0.317878
0.231832
0.187249
0.187249
0.161391
0.161391
0
0.025965
0.312756
4,147
133
79
31.180451
0.751228
0.196528
0
0.136986
0
0
0.155533
0.007311
0
0
0
0
0
1
0.109589
false
0
0.09589
0
0.219178
0.013699
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e8737e7bdcd75430db3502155a2cb8e2ea47372
4,483
py
Python
third_party/DiffAugment_pytorch.py
SuperStar0907/lecam-gan
e502c9b182345ddd03d29edda56b76caa7d8fb41
[ "Apache-2.0" ]
135
2021-03-23T23:07:47.000Z
2022-03-30T03:08:42.000Z
third_party/DiffAugment_pytorch.py
SuperStar0907/lecam-gan
e502c9b182345ddd03d29edda56b76caa7d8fb41
[ "Apache-2.0" ]
12
2021-04-06T16:57:14.000Z
2021-12-31T07:06:05.000Z
third_party/DiffAugment_pytorch.py
SuperStar0907/lecam-gan
e502c9b182345ddd03d29edda56b76caa7d8fb41
[ "Apache-2.0" ]
13
2021-03-24T14:37:48.000Z
2022-03-06T13:24:52.000Z
# Differentiable Augmentation for Data-Efficient GAN Training # Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han # https://arxiv.org/pdf/2006.10738 import torch import torch.nn.functional as F from torch.distributions.dirichlet import _Dirichlet def BetaSample(alpha, beta, sample_shape=torch.Size()): concentration = torch.stack([alpha, beta], -1) shape = sample_shape + concentration.shape[:-1] + concentration.shape[-1:] concentration = concentration.expand(shape) return _Dirichlet.apply(concentration).select(-1, 0) def DiffAugment(x, policy='', channels_first=True): if policy: x_ori = x.clone() if not channels_first: x = x.permute(0, 3, 1, 2) for p in policy.split(','): if p in list(AUGMENT_FNS.keys()): for f in AUGMENT_FNS[p]: x = f(x) if not channels_first: x = x.permute(0, 2, 3, 1) x = x.contiguous() # mixup if 'mixup' in policy: if not channels_first: x1 = x_ori.permute(0, 3, 1, 2) else: x1 = x_ori.clone() for p in policy.split(','): if p in list(AUGMENT_FNS.keys()): for f in AUGMENT_FNS[p]: x1 = f(x1) if not channels_first: x1 = x1.permute(0, 2, 3, 1) x1 = x1.contiguous() #TODO alpha = torch.ones(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)*0.1 beta = torch.ones(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)*0.1 weight = BetaSample(alpha, beta) x = (1 - weight)*x1 + weight*x '''weight = torch.distributions.beta.Beta(alpha, beta).sample() weight = torch.max(weight, 1 - weight) x = (1 - weight)*x_ori + weight*x''' return x def rand_brightness(x): x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5) return x def rand_saturation(x): x_mean = x.mean(dim=1, keepdim=True) x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean return x def rand_contrast(x): x_mean = x.mean(dim=[1, 2, 3], keepdim=True) x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean return x def rand_translation(x, ratio=0.125): shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5) translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device) translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device) grid_batch, grid_x, grid_y = torch.meshgrid( torch.arange(x.size(0), dtype=torch.long, device=x.device), torch.arange(x.size(2), dtype=torch.long, device=x.device), torch.arange(x.size(3), dtype=torch.long, device=x.device), ) grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1) grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1) x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0]) x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2) return x def rand_cutout(x, ratio=0.5): cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5) offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device) offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device) grid_batch, grid_x, grid_y = torch.meshgrid( torch.arange(x.size(0), dtype=torch.long, device=x.device), torch.arange(cutout_size[0], dtype=torch.long, device=x.device), torch.arange(cutout_size[1], dtype=torch.long, device=x.device), ) grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1) grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1) mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device) mask[grid_batch, grid_x, grid_y] = 0 x = x * mask.unsqueeze(1) return x def noise(x, sd=0.05): x = x + torch.randn_like(x)*sd*sd return x AUGMENT_FNS = { 'color': [rand_brightness, rand_saturation, rand_contrast], 'translation': [rand_translation], 'cutout': [rand_cutout], 'noise': [noise], }
39.672566
110
0.586438
731
4,483
3.48974
0.153215
0.05096
0.081537
0.024696
0.519796
0.459428
0.411995
0.400235
0.378283
0.378283
0
0.049063
0.249833
4,483
112
111
40.026786
0.709486
0.03636
0
0.244186
0
0
0.008193
0
0
0
0
0.008929
0
1
0.093023
false
0
0.034884
0
0.22093
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e8a1596a6b3ed1679875e09d7a25bdcda290e69
3,000
py
Python
advent_of_code_2021/day4/giant_squid.py
mortendaehli/advent-of-code-2021
b36959eeff461d1d9eb8bf32c1efc767f6f00b23
[ "MIT" ]
null
null
null
advent_of_code_2021/day4/giant_squid.py
mortendaehli/advent-of-code-2021
b36959eeff461d1d9eb8bf32c1efc767f6f00b23
[ "MIT" ]
null
null
null
advent_of_code_2021/day4/giant_squid.py
mortendaehli/advent-of-code-2021
b36959eeff461d1d9eb8bf32c1efc767f6f00b23
[ "MIT" ]
null
null
null
import re from dataclasses import dataclass from typing import List, Optional @dataclass class PlayBoard: numbers: List[List[Optional[int]]] def read_numbers() -> List[int]: with open("data.txt", "r") as file: data = file.readline() return list(map(int, data.split(","))) def read_boards() -> List[PlayBoard]: """ Reading each board defined by a new line then 5 lists of 5 ints. Given the data format, this divides equally by 6 for possible performant mapping. """ with open("data.txt", "r") as file: data = file.readlines()[2:] cleaned_data = list(map(lambda x: re.split("\s+", x.strip()), data)) # noqa play_boards: List[PlayBoard] = list() for i in range(0, len(data), 6): play_boards.append(PlayBoard(numbers=[list(map(int, x)) for x in cleaned_data[i : i + 5]])) return play_boards def calculate_final_score(play_board: PlayBoard, number: int) -> int: """Sum remaining values on the play board.""" return sum([sum([val for val in row if val]) for row in play_board.numbers]) * number def check_board_and_return_optional_score(play_board: PlayBoard, number: int) -> Optional[int]: # Check rows for row_num, row in enumerate(play_board.numbers): if number in row: row[row.index(number)] = None if row == [None] * 5: final_score = calculate_final_score(play_board=play_board, number=number) return final_score # check cols for n in range(5): col = [x[n] for x in play_board.numbers] if col == [None] * 5: final_score = calculate_final_score(play_board=play_board, number=number) return final_score else: return None def part_one() -> int: numbers, play_boards = read_numbers(), read_boards() game_results = list() for number in numbers: for play_board in play_boards: score = check_board_and_return_optional_score(play_board=play_board, number=number) if score: game_results.append(score) return game_results[0] def part_two() -> int: numbers, play_boards = read_numbers(), read_boards() game_results = list() for number in numbers: for play_board in play_boards: score = check_board_and_return_optional_score(play_board=play_board, number=number) if score: game_results.append(score) return game_results[-1] if __name__ == "__main__": print("Day 4: Giant Squid") print("-" * 80) result_part_1 = part_one() print( f"Part 1: To guarantee victory against the giant squid, figure out which board will win first. " f"What will your final score be if you choose that board?: {result_part_1}" ) print("-" * 80) result_part_2 = part_two() print( f"Part 2: Figure out which board will win last. Once it wins, what would its final score be?: {result_part_2}" ) print("-" * 80)
30.30303
118
0.640333
426
3,000
4.316901
0.293427
0.078303
0.045677
0.039152
0.434475
0.426862
0.371398
0.371398
0.349103
0.316476
0
0.011146
0.252333
3,000
98
119
30.612245
0.808738
0.071333
0
0.378788
0
0.015152
0.117071
0
0
0
0
0
0
1
0.090909
false
0
0.045455
0
0.287879
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e8ac78399e840a9f4584fc74b5d093c38c0fc44
265
py
Python
lastrender/settings.py
jc855/lastgraph
a2917e73f0e0b9409e897e4a83944e72161a33ce
[ "BSD-3-Clause" ]
77
2015-01-03T20:26:28.000Z
2021-07-07T15:08:25.000Z
lastrender/settings.py
jc855/lastgraph
a2917e73f0e0b9409e897e4a83944e72161a33ce
[ "BSD-3-Clause" ]
1
2021-06-10T23:42:31.000Z
2021-06-10T23:42:31.000Z
lastrender/settings.py
jc855/lastgraph
a2917e73f0e0b9409e897e4a83944e72161a33ce
[ "BSD-3-Clause" ]
20
2015-01-17T16:33:41.000Z
2021-12-23T03:40:36.000Z
import os static_path = os.path.join(os.path.dirname(__file__), "..", "static") apiurl = "http://localhost:8000/api/%s" local_store = os.path.join(static_path, "graphs") local_store_url = "http://localhost:8000/static/graphs" nodename = "lg" nodepwd = "lg@home"
24.090909
69
0.709434
39
265
4.589744
0.538462
0.100559
0.111732
0
0
0
0
0
0
0
0
0.033473
0.098113
265
10
70
26.5
0.715481
0
0
0
0
0
0.324528
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e8b609df5d78fd1e3a458dac9a51ed8f9a19335
952
py
Python
src/omnis/structure_nodes/loop.py
rodrigogomesantos/omnis
a6f59c870d86c112f26a5b98c31889d64eea39eb
[ "MIT" ]
null
null
null
src/omnis/structure_nodes/loop.py
rodrigogomesantos/omnis
a6f59c870d86c112f26a5b98c31889d64eea39eb
[ "MIT" ]
null
null
null
src/omnis/structure_nodes/loop.py
rodrigogomesantos/omnis
a6f59c870d86c112f26a5b98c31889d64eea39eb
[ "MIT" ]
null
null
null
class loop(): def __init__(self, _loop_type, **kwargs) -> None: self.type = _loop_type self.kwargs = kwargs self.break_function = self.kwargs.get("break_function") self.range = kwargs.get("range") self.start = getattr(self, f"_{self.type}") self.counter = 0 self.outPut_function = 0 def _while(self, function, *ags, **kws): while not self.break_function(): self.counter = 0 while not self.pause_function(): self.outPut_function = function(*ags, **kws) self.counter+=1 return self.counter, self.outPut_function def _for(self, function, *args, **kwargs): self.counter = 0 for _c_ in self.range: self.outPut_function = function(*args, **kwargs) self.counter = _c_ return self.counter, self.outPut_function def break_verify(self): self.break_function()
35.259259
63
0.590336
113
952
4.743363
0.265487
0.143657
0.16791
0.078358
0.25
0.141791
0.141791
0
0
0
0
0.00744
0.294118
952
27
64
35.259259
0.790179
0
0
0.208333
0
0
0.032529
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.291667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e8bc66edbc27feb19c1a24e01f7065d5f4aedb0
4,646
py
Python
mesh_vertex_color/np_ray_triangle_intersection.py
naysok/Mesh_Vertex_Color
c6fafe480957305176ac1adc14c093d9278baa94
[ "MIT" ]
1
2020-09-17T16:41:34.000Z
2020-09-17T16:41:34.000Z
mesh_vertex_color/np_ray_triangle_intersection.py
naysok/Mesh_Vertex_Color
c6fafe480957305176ac1adc14c093d9278baa94
[ "MIT" ]
null
null
null
mesh_vertex_color/np_ray_triangle_intersection.py
naysok/Mesh_Vertex_Color
c6fafe480957305176ac1adc14c093d9278baa94
[ "MIT" ]
null
null
null
import sys import numpy as np ############################################################# ### ### ### Module for Python3 ### ### * Using Numpy ( + Cupy ? ) ### ### ### ############################################################# class RayTriangleIntersection(): ### https://pheema.hatenablog.jp/entry/ray-triangle-intersection def __init__(self): pass def calc_intersection(self, o, d, v0, v1, v2): e1 = np.subtract(v1, v0) e2 = np.subtract(v2, v0) ### https://www.it-swarm.dev/ja/python/python-numpy-machine-epsilon/1041749812/ kEpsilon = np.finfo(float).eps alpha = np.cross(d, e2) # det = np.dot(e1, alpha) det = np.sum(e1 * alpha, axis=1) # print("e1.shape : {}".format(e1.shape)) # print("e2.shape : {}".format(e2.shape)) # print("alpha.shape : {}".format(alpha.shape)) # print("det.shape : {}".format(det.shape)) # intersect_count = np.count_nonzero(det) ### True = InterSection ### (1) Check Parallel bool_p = (-kEpsilon > det) | (det > kEpsilon) ### Remove (1) v0 = v0[bool_p] v1 = v1[bool_p] v2 = v2[bool_p] e1 = e1[bool_p] e2 = e2[bool_p] alpha = alpha[bool_p] det = det[bool_p] # print("det.shape (1) : {}".format(det.shape)) det_inv = 1.0 / det r = np.subtract(o, v0) ### (2) Check u-Value in the Domain (0 <= u <= 1) # u = np.dot(alpha, r) * det_inv u = np.sum(alpha * r, axis=1) * det_inv bool_u = (0.0 < u) & (u < 1.0) ### Remove (2) v0 = v0[bool_u] v1 = v1[bool_u] v2 = v2[bool_u] e1 = e1[bool_u] e2 = e2[bool_u] alpha = alpha[bool_u] r = r[bool_u] u = u[bool_u] det = det[bool_u] det_inv = det_inv[bool_u] # print("det.shape (2) : {}".format(det.shape)) beta = np.cross(r, e1) ### (3) Check v-Value in the Domain (0 <= v <= 1) ### and ### Check (u + v = 1) # v = np.dot(d, beta) * det_inv v = np.sum(d * beta, axis=1) * det_inv bool_v = (0.0 < v) & (u + v < 1.0) ### Remove (3) v0 = v0[bool_v] v1 = v1[bool_v] v2 = v2[bool_v] e1 = e1[bool_v] e2 = e2[bool_v] alpha = alpha[bool_v] beta = beta[bool_v] r = r[bool_v] u = u[bool_v] v = v[bool_v] det = det[bool_v] det_inv = det_inv[bool_v] # print("det.shape (3) : {}".format(det.shape)) ### (4) Check t_value (t >= 0) # t = np.dot(e2, beta) * det_inv t = np.sum(e2 * beta, axis=1) * det_inv bool_t = 0.0 < t ### Remove (4) v0 = v0[bool_t] v1 = v1[bool_t] v2 = v2[bool_t] e1 = e1[bool_t] e2 = e2[bool_t] alpha = alpha[bool_t] beta = beta[bool_t] r = r[bool_t] t = t[bool_t] u = u[bool_t] v = v[bool_t] det = det[bool_t] det_inv = det_inv[bool_t] # print("det.shape (4) : {}".format(det.shape)) ### Intersett : True !! # intersect_val = [t, u, v] ### Barycenrinc_Coordinate >> XYZ ### ((1 - u - v) * v0) + (u * v1) + (v * v2) new_amp = 1.0 - u - v new_v0 = np.multiply(v0, new_amp[:, np.newaxis]) new_v1 = np.multiply(v1, u[:, np.newaxis]) new_v2 = np.multiply(v2, v[:, np.newaxis]) intersect_pos = np.add(np.add(new_v0, new_v1), new_v2) ray_line = np.subtract(intersect_pos, o) # print("ray_line.shape : {}".format(ray_line.shape)) ### (5) Check Line-Triangle Intersection ### Compare Length, Line-Length / Origin-IntersectPoint-Length line_length = np.linalg.norm(d) intersect_length = np.linalg.norm(ray_line, axis=1) # print("line_len : {}".format(line_length)) # print("inter_len : {}".format(intersect_length)) # print("inter_len.shape : {}".format(intersect_length.shape)) bool_l = intersect_length < line_length # print(bool_l) intersect_count = np.count_nonzero(bool_l) return intersect_count
30.168831
88
0.44619
588
4,646
3.357143
0.17517
0.035461
0.030395
0.016717
0.096758
0.01925
0
0
0
0
0
0.042479
0.381834
4,646
154
89
30.168831
0.644847
0.314895
0
0
0
0
0
0
0
0
0
0
0
1
0.027027
false
0.013514
0.027027
0
0.081081
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e8c088d3edb685bf729a71250bfe8e5e7bfb65d
2,046
py
Python
src/dungeonbot/plugins/helpers/die_roll.py
tlake/dungeonbot_backup
715c14d3a06d8a7a8771572371b67cc87c7e17fb
[ "MIT" ]
null
null
null
src/dungeonbot/plugins/helpers/die_roll.py
tlake/dungeonbot_backup
715c14d3a06d8a7a8771572371b67cc87c7e17fb
[ "MIT" ]
null
null
null
src/dungeonbot/plugins/helpers/die_roll.py
tlake/dungeonbot_backup
715c14d3a06d8a7a8771572371b67cc87c7e17fb
[ "MIT" ]
null
null
null
class DieRoll(object): """Roll object that parses roll string and calls appropriate function.""" def __init__(self, roll_str, flag): """Initialize Die roll object by breaking apart roll string.""" valid_flags = { "a": self.advantage, "d": self.disadvantage } self.roll_str = roll = roll_str self.operator = "+" self.action = valid_flags[flag] if flag else self.roll_die self.modifier = 0 self.message = "" valid_operators = ["+", "-"] for o in valid_operators: if o in roll: self.operator = o roll, mod = roll.split(o) self.modifier = int(mod) * -1 if o == "-" else int(mod) self.number, self.sides = map(int, roll.split("d")) self.min_roll = self.number self.max_roll = self.sides * self.number def print_results(self, roll_result, name=None): """Return result of roll.""" roll_plus_mods = "{} {} {}".format( roll_result, self.operator, abs(self.modifier) ) final_result = "*[ {} ]* _({} = {}) (min {}, max {}) {}_".format( roll_result + self.modifier, self.roll_str, roll_plus_mods, self.min_roll + self.modifier, self.max_roll + self.modifier, self.message ) if name: final_result += " with {}".format(name) return final_result def roll_die(self): """Standard roll of die.""" import random result = 0 for x in range(0, self.number): result += random.randint(1, self.sides) return result def advantage(self): """Roll with advantage.""" self.message = "with advantage" return max(self.roll_die(), self.roll_die()) def disadvantage(self): """Roll with disadvantage.""" self.message = "with disadvantage" return min(self.roll_die(), self.roll_die())
31.476923
77
0.535679
233
2,046
4.553648
0.283262
0.082941
0.051838
0.042413
0.04147
0.04147
0
0
0
0
0
0.003693
0.338221
2,046
64
78
31.96875
0.779911
0.105083
0
0
0
0
0.052222
0
0
0
0
0
0
1
0.102041
false
0
0.020408
0
0.22449
0.020408
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e8d954a7e320b872b94573d4e171b827ee4d202
1,099
py
Python
src/utils/load_or_make.py
jlehnersd/metis_project2
0bde762c43c4cf9aa5c6672b894e704803616aa3
[ "MIT" ]
16
2019-04-08T22:09:51.000Z
2021-08-02T18:18:41.000Z
src/utils/load_or_make.py
jlehnersd/metis_project2
0bde762c43c4cf9aa5c6672b894e704803616aa3
[ "MIT" ]
1
2019-11-19T06:27:37.000Z
2019-12-26T20:56:03.000Z
src/utils/load_or_make.py
floraxinru/metisproject04
80ee97eedbf675d6f5064eb92fd7166b56bb81e6
[ "MIT" ]
8
2019-04-08T23:01:39.000Z
2021-08-02T18:18:43.000Z
import os, pickle import functools def load_or_make(creator): """ Loads data that is pickled at filepath if filepath exists; otherwise, calls creator(*args, **kwargs) to create the data and pickle it at filepath. Returns the data in either case. Inputs: - filepath: path to where data is / should be stored - creator: function to create data if it is not already pickled - *args, **kwargs: arguments passed to creator() Outputs: - item: the data that is stored at filepath Usage: @load_or_make def data_creator(args): # code # return data my_data = data_creator(save_file_path, *args, **kwargs) """ @functools.wraps(creator) def cached_creator(filepath, *args, **kwargs): if os.path.isfile(filepath): with open(filepath, 'rb') as pkl: item = pickle.load(pkl) else: item = creator(*args, **kwargs) with open(filepath, 'wb') as pkl: pickle.dump(item, pkl) return item return cached_creator
28.179487
67
0.606915
140
1,099
4.685714
0.435714
0.07622
0.030488
0
0
0
0
0
0
0
0
0
0.304823
1,099
38
68
28.921053
0.858639
0.495905
0
0
0
0
0.008677
0
0
0
0
0
0
1
0.142857
false
0
0.142857
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e8e40f2f08d10e053b3679ecaea9d2e6c4d91a1
183
py
Python
djangoreactAnimalRescue/animalresque/animals/urls.py
LegolasVzla/django
bbecd7175fb4833731041f88c0ba3327f8a898f0
[ "MIT" ]
null
null
null
djangoreactAnimalRescue/animalresque/animals/urls.py
LegolasVzla/django
bbecd7175fb4833731041f88c0ba3327f8a898f0
[ "MIT" ]
9
2019-12-04T22:50:51.000Z
2022-02-10T10:25:40.000Z
djangoreactAnimalRescue/animalresque/animals/urls.py
LegolasVzla/django
bbecd7175fb4833731041f88c0ba3327f8a898f0
[ "MIT" ]
null
null
null
from rest_framework import routers from .api import AnimalViewSet router = routers.DefaultRouter() router.register('api/animals', AnimalViewSet, 'animals') urlpatterns = router.urls
26.142857
56
0.808743
21
183
7
0.619048
0
0
0
0
0
0
0
0
0
0
0
0.098361
183
7
57
26.142857
0.890909
0
0
0
0
0
0.097826
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
8e8f2cd4383b58674dc6f3bff361444a5618a257
13,075
py
Python
ir.py
safx/nu-scraper
6b18d9f4937bd2a1cd5b89b141868e1ae60a5a4e
[ "MIT" ]
3
2021-02-05T08:30:40.000Z
2021-02-05T11:33:16.000Z
ir.py
safx/nu-scraper
6b18d9f4937bd2a1cd5b89b141868e1ae60a5a4e
[ "MIT" ]
null
null
null
ir.py
safx/nu-scraper
6b18d9f4937bd2a1cd5b89b141868e1ae60a5a4e
[ "MIT" ]
null
null
null
from os import replace from typing import List, Dict, Any, Callable import os import re import json import functools ST_UNKNOWN = "*" ST_BOOL = "bool" ST_INT = "integer" ST_STR = "string" ST_FLOAT = "float" ST_URL = "url" ST_DATETIME = "datetime" REGEXP_URL = re.compile('^https?://.+$') REGEXP_DATE = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$') class TypeBase: @property def isLeaf(self) -> bool: return True class NullType(TypeBase): def __repr__(self) -> str: return 'null' class UniTypeHolder(TypeBase): def __init__(self, vtype: TypeBase) -> None: assert(type(vtype) != NullType) self._type = vtype @property def type(self) -> TypeBase: return self._type def replaceWithCommonObject(self, commonObject: 'CommonObjectType'): self._type = commonObject @property def isLeaf(self) -> bool: if self._type is None: return False return self._type.isLeaf class Nullable(UniTypeHolder): def __repr__(self) -> str: return str(self._type) + '?' class ValueType(TypeBase): def __init__(self, typename: str) -> None: assert(type(typename) == str) self.__typename = typename def __eq__(self, other): return type(other) == ValueType and self.__typename == other.__typename def __repr__(self) -> str: return '"' + self.__typename + '"' @property def typename(self): return self.__typename class ArrayType(UniTypeHolder): def __repr__(self) -> str: return '[' + str(self._type) + ']' if self._type is not None else '[]' class ObjectType(TypeBase): def __init__(self, props) -> None: assert(type(props) == dict) self.__props = props @property def isLeaf(self) -> bool: return False @property def isPlain(self): return all(map(lambda e: e.isLeaf, self.__props.values())) def get(self, v): return self.__props.get(v, None) def keys(self): return self.__props.keys() def items(self): return self.__props.items() def __repr__(self) -> str: return '{' + ','.join(['"%s":%s' % (k,str(v)) for (k,v) in self.__props.items()]) + '}' @property def numOfKeys(self): return len(list(self.keys())) def hasSameKeysOf(self, other) -> bool: assert(type(other) == ObjectType) return set(self.keys()) == set(other.keys()) def containsAllKeysOf(self, other) -> bool: assert(type(other) == ObjectType) return set(self.keys()).issuperset(set(other.keys())) def replaceWithCommonObject(self, key, commonObject: 'CommonObjectType'): self.__props[key] = commonObject class CommonObjectType(TypeBase): def __init__(self, typename, object: ObjectType) -> None: assert(type(object) == ObjectType) self.__typename = typename self.__object = object def __repr__(self) -> str: return '"$' + self.__typename + '"' @property def typename(self): return self.__typename @property def object(self): return self.__object def __guessTypeForValue(v): assert(type(v) != dict and type(v) != list) if type(v) == type(None): return NullType() typemap = { bool: ST_BOOL, int: ST_INT, str: ST_STR, float: ST_FLOAT } vtype = typemap.get(type(v), NullType()) if type(vtype) == NullType: return NullType() if vtype == ST_STR: if v.find('http://') == 0 or v.find('https://') == 0: if v.find('{') == -1: # FIXME ??? return ValueType(ST_URL) if REGEXP_DATE.match(v): return ValueType(ST_DATETIME) return ValueType(vtype) def __guessTypeForArray(json) -> ArrayType: assert(type(json) == list) def aggregateArrayOfObjectType(array): keys = functools.reduce(lambda a, e: a.union(set(e.keys())), array, set()) if len(keys) == 0: return ArrayType(None) merged = {} for obj in array: for key in keys: value = obj.get(key) if type(value) == ObjectType: merged[key] = value #elif type(value) == ArrayType: # merged[key] = aggregateArrayOfObjectType(value) elif key in merged: if type(merged[key]) == NullType and type(value) == NullType: pass elif type(merged[key]) == ObjectType and type(value) == NullType: merged[key] = Nullable(merged[key]) elif type(merged[key]) == NullType and type(value) == ObjectType: merged[key] = Nullable(value) elif type(merged[key]) == type(value) and type(value) == ValueType and merged[key] == value: pass else: pass #merged[key] = merged[key].union(value) else: merged[key] = value return ArrayType(ObjectType(merged)) if all([type(i) == dict for i in json]): arr = [__guessTypeForDict(i) for i in json] return aggregateArrayOfObjectType(arr) types = functools.reduce(lambda a, e: a.union(set([type(e)])), json, set()) if len(types) == 1: return ArrayType(__guessTypeForValue(json[0])) assert(False) def __guessTypeForDict(json) -> ObjectType: assert(type(json) == dict) return ObjectType({k:guessType(v) for (k,v) in json.items()}) def guessType(value) -> TypeBase: if type(value) == dict: return __guessTypeForDict(value) elif type(value) == list: return __guessTypeForArray(value) else: return __guessTypeForValue(value) def collectNonNestedObjects(obj: TypeBase, path: str = '', collected_map: Dict[str, TypeBase] = dict()) -> Dict[str, TypeBase]: if obj.isLeaf: return collected_map if obj.isPlain: collected_map[path] = obj return collected_map assert(type(obj) == ObjectType) for key, value in obj.items(): if type(value) == Nullable and type(value.type) == ObjectType: collectNonNestedObjects(value.type, path + '/' + key + '?', collected_map) elif type(value) == ObjectType: collectNonNestedObjects(value, path + '/' + key, collected_map) elif type(value) == ArrayType and type(value.type) == ObjectType: collectNonNestedObjects(value.type, path + '/' + key + '/0', collected_map) return collected_map def exactMatch(a: ObjectType, b: ObjectType): return a.numOfKeys > 0 and a.isPlain and a.hasSameKeysOf(b) def similarMatch(a: ObjectType, b: ObjectType): return a.numOfKeys > 0 and a.isPlain and a.containsAllKeysOf(b) and a.numOfKeys > 3 def bothMatch(a: ObjectType, b: ObjectType): return exactMatch(a, b) or similarMatch(a, b) class Endpoint: def __init__(self, request: Dict, response: TypeBase, rawResponse: str) -> None: self.__request = request self.__response = response self.__rawResponse = rawResponse @property def request(self): return self.__request @property def response(self): return self.__response @property def rawResponse(self): return self.__rawResponse def replaceWithCommonObject(self, commonObject: CommonObjectType): cond = lambda v: bothMatch(commonObject.object, v) def visitObject(obj: TypeBase): if obj.isLeaf: return 0 if type(obj) != ObjectType: return 0 assert(type(obj) == ObjectType) replaceCount = 0 for key, value in obj.items(): #print(' ', value) if type(value) == ObjectType: if cond(value): replaceCount += 1 obj.replaceWithCommonObject(key, commonObject) elif not value.isPlain: replaceCount += visitObject(value) elif type(value) == ArrayType and type(value.type) == ObjectType: if cond(value.type): replaceCount += 1 value.replaceWithCommonObject(commonObject) else: replaceCount += visitObject(value.type) elif type(value) == Nullable and type(value.type) == ObjectType: if cond(value.type): replaceCount += 1 value.replaceWithCommonObject(commonObject) else: replaceCount += visitObject(value.type) return replaceCount #print('>>>>', self.__request['name']) replaceCount = 0 if type(self.__response) == ObjectType and cond(self.__response): replaceCount = 1 self.__response = commonObject else: replaceCount = visitObject(self.__response) return replaceCount def nonNextedResponseObjects(self) -> Dict[str, TypeBase]: def resolveTypename(path): n = [e for e in path.split('/') if not e.isdigit()][-1] if len(n) == 0: return self.__request['name'] + 'Response' return n if n[-1] != '?' else n[:-1] if self.__response is None: return None if type(self.__response) == ArrayType: return None d = collectNonNestedObjects(self.__response, '', dict()) return {resolveTypename(k):v for (k,v) in d.items() if len(v.keys()) > 0} def __repr__(self) -> str: return '%s = %s' % (self.__request['name'], self.__response) class API: def __init__(self, endpoints: List[Endpoint] = []) -> None: self.__endpoints = endpoints self.__commonObjects = [] def endpoints(self) -> List[Endpoint]: return self.__endpoints def commonObjects(self) -> List[CommonObjectType]: return self.__commonObjects def __resolveTypename(self, typenameCanditates: List[str]): exists = lambda name: any(filter(lambda e: e.typename == name, self.__commonObjects)) def rename(name): for i in range(26): newTypename = name + chr(ord('A') + i) + 'xx' if not exists(newTypename): return newTypename assert('Temporary typename exhausted' and False) filteredTypenameCanditates = sorted([e for e in typenameCanditates if len(e) > 0], key=functools.cmp_to_key(lambda a,b:len(a) - len(b))) typename = filteredTypenameCanditates[0] cappedTypename = typename[0].upper() + typename[1:] return rename(cappedTypename) if exists(cappedTypename) else cappedTypename def findAndRegisterSimilarObjects(self): def findSimilarObject(objects: List[ObjectType], matchFunction: Callable[[ObjectType, ObjectType], bool]) -> CommonObjectType: for (_, obj) in objects: if any(filter(lambda e: matchFunction(e.object, obj), self.__commonObjects)): continue typenameCanditates = [n for (n,o) in objects if matchFunction(obj, o)] if len(typenameCanditates) >= 2: return CommonObjectType(self.__resolveTypename(typenameCanditates), obj) return None for i in range(100000): #nonNestedObjects = functools.reduce(lambda a, e: a + list(e.nonNextedResponseObjects().items()), self.__endpoints, []) nonNestedObjects = [] for e in self.__endpoints: objs = e.nonNextedResponseObjects() if objs is None: continue nonNestedObjects += objs.items() sot = findSimilarObject(nonNestedObjects, exactMatch) or findSimilarObject(nonNestedObjects, similarMatch) if sot is None: break self.__commonObjects.append(sot) for e in self.__endpoints: e.replaceWithCommonObject(sot) @staticmethod def initWithDir(dir: str, lang: str): endpoints = [] #for d in ['get-message.json', 'get-messages.json']: #os.listdir(os.path.join(dir, 'api')): path = os.path.join(dir, 'api', lang) for d in os.listdir(path): with open(os.path.join(path, d)) as req: req_json = json.load(req) res_text = None res_json = None try: with open(os.path.join(dir, 'response', d)) as res: res_text = ''.join(res.readlines()) res_json = json.loads(res_text) except (OSError, IOError) as e: pass # when reponse file doesn't exist endpoint = Endpoint(req_json, guessType(res_json), res_text) endpoints.append(endpoint) return API(endpoints)
36.218837
144
0.578356
1,399
13,075
5.255897
0.137956
0.023256
0.015232
0.013328
0.241398
0.166191
0.148783
0.133823
0.120495
0.102543
0
0.004619
0.304551
13,075
360
145
36.319444
0.804025
0.032352
0
0.255814
0
0.003322
0.018195
0.003006
0
0
0
0.002778
0.043189
1
0.17608
false
0.013289
0.019934
0.089701
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e8fa3cd904b0121303ce6cd660e368b0933349e
393
py
Python
setup.py
RonenHoffer/grebot
a8ca01baba72ff13ad68706626c5fd51630bbdf1
[ "MIT" ]
null
null
null
setup.py
RonenHoffer/grebot
a8ca01baba72ff13ad68706626c5fd51630bbdf1
[ "MIT" ]
null
null
null
setup.py
RonenHoffer/grebot
a8ca01baba72ff13ad68706626c5fd51630bbdf1
[ "MIT" ]
1
2016-01-27T13:37:09.000Z
2016-01-27T13:37:09.000Z
from setuptools import setup from platform import system SYSTEM = system() VERSION = '1.0.2' if SYSTEM == 'Windows': scripts = ['grebot/grebot.bat'] else: scripts = ['grebot/grebot.sh'] setup( name='grebot', version=VERSION, packages=['grebot'], license='MIT', long_description=open('README.txt').read(), scripts=scripts, install_requires=['colorama'] )
18.714286
47
0.653944
46
393
5.543478
0.652174
0.094118
0.14902
0
0
0
0
0
0
0
0
0.009375
0.185751
393
20
48
19.65
0.7875
0
0
0
0
0
0.198473
0
0
0
0
0
0
1
0
false
0
0.117647
0
0.117647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e90005a1d37aeec86aa49ac6b0e7b616e3410f4
3,774
py
Python
src/arcos_gui/magic_guis.py
bgraedel/arcos-gui
aaeeba3aae1bc9a23c635ebabf6309f878ad8a39
[ "BSD-3-Clause" ]
2
2022-02-22T14:24:38.000Z
2022-02-26T13:33:25.000Z
src/arcos_gui/magic_guis.py
bgraedel/arcos-gui
aaeeba3aae1bc9a23c635ebabf6309f878ad8a39
[ "BSD-3-Clause" ]
null
null
null
src/arcos_gui/magic_guis.py
bgraedel/arcos-gui
aaeeba3aae1bc9a23c635ebabf6309f878ad8a39
[ "BSD-3-Clause" ]
null
null
null
import operator from magicgui import magicgui OPERATOR_DICTIONARY = { "Divide": (operator.truediv, "Measurement_Ratio"), "Multiply": (operator.mul, "Measurement_Product"), "Add": (operator.add, "Measurement_Sum"), "Subtract": (operator.sub, "Measurement_Difference"), } measurement_math_options = list(OPERATOR_DICTIONARY.keys()) measurement_math_options.append("None") @magicgui( call_button="Set Options", position={ "choices": ["upper_right", "upper_left", "lower_right", "lower_left", "center"] }, size={"min": 0, "max": 1000}, x_shift={"min": -1000, "max": 1000}, y_shift={"min": -1000, "max": 1000}, ) def timestamp_options( start_time=0, step_time=1, prefix="T =", suffix="frame", position="upper_left", size=12, x_shift=12, y_shift=0, ): """ Widget to choose timestamp options from when called """ timestamp_options.close() # used as a callback function in main widget file def show_timestamp_options(): timestamp_options.show() @magicgui( call_button=False, Ok={"widget_type": "PushButton", "tooltip": "Press to load data"}, frame={ "choices": ["None"], "label": "Frame Column:", "tooltip": "Select frame column in input data", }, track_id={ "choices": ["None"], "label": "Object id Column:", "tooltip": "Select column representing object track ids in input data", # noqa: E501 }, x_coordinates={ "choices": ["None"], "label": "X Coordinate Column:", "tooltip": "Select x coordinate column in input data", }, y_coordinates={ "choices": ["None"], "label": "Y Coordinate Column:", "tooltip": "Select y coordinate column in input data", }, z_coordinates={ "choices": ["None"], "label": "Z Coordinate Column:", "tooltip": "Select z coordinate column in input data, select None if column does not exist", # noqa: E501 }, measurment={ "choices": ["None"], "label": "Measurement Column:", "tooltip": "Select measurement column in input data", }, field_of_view_id={ "choices": ["None"], "label": "Field of View/Position Column:", "tooltip": "Select fov column in input data, select None if column does not exist", # noqa: E501 }, additional_filter={ "choices": ["None"], "label": "Additional Filter Column:", "tooltip": "Select additional filter column, for example Well of a wellplate, select None if column does not exist", # noqa: E501 }, second_measurment={ "choices": ["None"], "label": "Second Measurement Column:", "visible": False, "tooltip": "Select second measurement", }, measurement_math={ "widget_type": "RadioButtons", "orientation": "horizontal", "choices": measurement_math_options, "label": "Math on first and \n second measurement:", "tooltip": "Choose operation to calculate the measurment to be used in arcos calculation on first and second measurement", # noqa: E501 }, ) def columnpicker( frame="None", track_id="None", x_coordinates="None", y_coordinates="None", z_coordinates="None", measurment="None", second_measurment="None", field_of_view_id="None", additional_filter="None", measurement_math="None", Ok=False, ): """Dialog with magicgui for selecting columns""" columnpicker.Ok.bind(not Ok) def toggle_visible_second_measurment(): curr_value = columnpicker.measurement_math.value if curr_value in ["None", "1/X"]: columnpicker.second_measurment.hide() else: columnpicker.second_measurment.show()
29.952381
144
0.621092
421
3,774
5.425178
0.308789
0.043345
0.063047
0.044658
0.109457
0.064799
0.064799
0.064799
0.064799
0.048161
0
0.015267
0.236354
3,774
125
145
30.192
0.777238
0.052464
0
0.119266
0
0
0.392958
0.006197
0
0
0
0
0
1
0.036697
false
0
0.018349
0
0.055046
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e90bb685c3b1c5f6854891a144a51a32c3e3543
3,717
py
Python
pylibrets/models.py
fernherrera/pylibrets
ee7ae95a9a7c8809c4bc6d269c52518d672cbf82
[ "MIT" ]
5
2016-08-02T16:01:31.000Z
2017-09-15T21:44:56.000Z
pylibrets/models.py
fernherrera/pylibrets
ee7ae95a9a7c8809c4bc6d269c52518d672cbf82
[ "MIT" ]
null
null
null
pylibrets/models.py
fernherrera/pylibrets
ee7ae95a9a7c8809c4bc6d269c52518d672cbf82
[ "MIT" ]
1
2017-07-29T18:42:09.000Z
2017-07-29T18:42:09.000Z
""" Model classes - contains the primary objects that power pylibRETS. """ class MetadataSystem(object): def __init__(self): self.GetSystemID = None self.GetSystemDescription = None self.GetComments = None self.GetTimeZoneOffset = None self.GetMetadataID = None self.GetResourceVersion = None self.GetResourceDate = None self.GetForeignKeyVersion = None self.GetForeignKeyDate = None self.GetFilterVersion = None self.GetFilterDate = None class MetadataResource(object): def __init__(self): self.ResourceID = None self.StandardName = None self.KeyField = None class MetadataClass(object): def __init__(self): self.ClassName = None self.StandardName = None self.Description = None self.VisibleName = None self.TableVersion = None self.TableDate = None self.UpdateVersion = None self.UpdateDate = None self.ClassTimeStamp = None self.DeletedFlagField = None self.DeletedFlagValue = None self.HasKeyIndex = None self.OffsetSupport = None class MetadataTable(object): def __init__(self): self.SystemName = None self.StandardName = None self.LongName = None self.DBName = None self.ShortName = None self.MaximumLength = None self.DataType = None self.Precision = None self.Searchable = None self.Interpretation = None self.Alignment = None self.UseSeparator = None self.EditMaskID = None self.LookupName = None self.MaxSelect = None self.Units = None self.Index = None self.Minimum = None self.Maximum = None self.Default = None self.Required = None self.SearchHelpID = None self.Unique = None self.UpdatesModTimeStamp = None self.ForeignKey = None self.ForeignField = None self.KeyRetrievalQuery = None self.KeyRetrievalSelect = None self.InKeyIndex = None self.FilterParentField = None self.DefaultSearchOrder = None self.Case = None class MetadataLookup(object): def __init__(self): self.LookupName = None self.VisibleName = None self.Version = None self.Date = None self.FilterID = None self.NotShownByDefault = None class MetadataLookupType(object): def __init__(self): self.Value = None self.LongValue = None self.ShortValue = None class MetadataObject(object): def __init__(self): self.ObjectType = None self.MIMEType = None self.VisibleName = None self.Description = None self.ObjectTimeStamp = None self.ObjectCount = None self.LocationAvailability = None self.ObjectData = None self.MaxFileSize = None class LoginResponse(object): def __init__(self): self.GetMemberName = None self.GetUserInfo = None self.GetBroker = None self.GetMetadataVersion = None self.GetMetadataTimestamp = None self.GetMinMetadataTimestamp = None self.GetOfficeList = None self.GetBalance = None self.GetTimeout = None self.GetPasswordExpire = None self.GetActionUrl = None self.GetChangePasswordUrl = None self.GetGetObjectUrl = None self.GetLoginUrl = None self.GetLoginCompleteUrl = None self.GetLogoutUrl = None self.GetSearchUrl = None self.GetGetMetadataUrl = None self.GetServerInformationUrl = None self.GetUpdateUrl = None self.GetPayloadListUrl = None self.GetUserID = None self.GetUserClass = None self.GetUserLevel = None self.GetAgentCode = None self.GetBrokerCode = None self.GetBrokerBranch = None self.GetMetadataID = None self.GetWarnPasswordExpirationDays = None self.GetStandardNamesVersion = None self.GetVendorName = None self.GetServerProductName = None self.GetServerProductVersion = None self.GetOperatorName = None self.GetRoleName = None self.GetSupportContactInformation = None self.GetSessionInformationTokens = None def CreateCapabilityUrls(baseUrl): pass
25.634483
66
0.750605
403
3,717
6.843672
0.330025
0.307469
0.037708
0.049311
0.152647
0
0
0
0
0
0
0
0.172182
3,717
144
67
25.8125
0.896328
0
0
0.151515
0
0
0
0
0
0
0
0
0
0
null
null
0.030303
0
null
null
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
8e91dfb90c4fe4bfe8c34531aaadba87573629d2
980
py
Python
setup.py
michaelremington2/uumarrty
4c48b496e09429eb6777f9dececa7c7be203cc8c
[ "BSD-3-Clause" ]
null
null
null
setup.py
michaelremington2/uumarrty
4c48b496e09429eb6777f9dececa7c7be203cc8c
[ "BSD-3-Clause" ]
null
null
null
setup.py
michaelremington2/uumarrty
4c48b496e09429eb6777f9dececa7c7be203cc8c
[ "BSD-3-Clause" ]
null
null
null
#! /usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages with open("README.md", "r") as fh: long_description = fh.read() setup( name='uumarrty', version='0.0.1', url='https://github.com/michaelremington2/uumarrty', author='Michael Remington and Jeet Sukumaran', author_email='michaelremington2@gmail.com', license="LICENSE.txt", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", ], scripts=[ "bin/simulate_uumarrty.py", ], test_suite = "tests", package_dir={"": "src"}, description="Agent based simulation of predator prey dynamics.", long_description=long_description, long_description_content_type="text/markdown", packages=find_packages(where="src"), python_requires=">=3.6", )
28.823529
68
0.656122
110
980
5.727273
0.754545
0.095238
0.060317
0.095238
0
0
0
0
0
0
0
0.011392
0.193878
980
34
69
28.823529
0.786076
0.043878
0
0.071429
0
0
0.443376
0.054487
0
0
0
0
0
1
0
false
0
0.035714
0
0.035714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e92b7bf4f6465a749d963319c6412e27abe7160
111
py
Python
src/ToolChainSCDG/procedures/linux/custom_package/gen_simproc4v.py
AnonymousSEMA/SEMA-ToolChain
05d6a7e43e10d4b1f6c5dfb70fbabeab3d4daf82
[ "BSD-2-Clause" ]
null
null
null
src/ToolChainSCDG/procedures/linux/custom_package/gen_simproc4v.py
AnonymousSEMA/SEMA-ToolChain
05d6a7e43e10d4b1f6c5dfb70fbabeab3d4daf82
[ "BSD-2-Clause" ]
null
null
null
src/ToolChainSCDG/procedures/linux/custom_package/gen_simproc4v.py
AnonymousSEMA/SEMA-ToolChain
05d6a7e43e10d4b1f6c5dfb70fbabeab3d4daf82
[ "BSD-2-Clause" ]
null
null
null
import angr class gen_simproc4v(angr.SimProcedure): def run(self, arg1, ar2, arg3, arg4): return
15.857143
41
0.675676
15
111
4.933333
0.933333
0
0
0
0
0
0
0
0
0
0
0.05814
0.225225
111
6
42
18.5
0.802326
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
8e92d4b4780c0502b64aba9cbe5abb7f02ad327d
10,370
py
Python
_pylab_colorslider.py
streitho/spinmob
5dd75b54de1b158e6f1308612336638fde520cbd
[ "Unlicense" ]
null
null
null
_pylab_colorslider.py
streitho/spinmob
5dd75b54de1b158e6f1308612336638fde520cbd
[ "Unlicense" ]
null
null
null
_pylab_colorslider.py
streitho/spinmob
5dd75b54de1b158e6f1308612336638fde520cbd
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python #Boa:App:BoaApp import wx import matplotlib as _matplotlib import pylab as _pylab import _pylab_colorslider_frame as _pcf; reload(_pcf) try: _prefs except: _prefs = None modules ={u'pylab_colorslider_frame': [1, 'Main frame of Application', u'pylab_colorslider_frame.py']} class BoaApp(wx.App): def OnInit(self): self.main = _pcf.create(None) self.main.Show() self.SetTopWindow(self.main) return True def main(): application = BoaApp(0) application.MainLoop() if __name__ == '__main__': main() # # This class contains one color point and generates new slider gui's when it's time to modify # class ColorPoint: color = None color2 = None position = 0.0 min = 0.0 # in case the user modifies this max = 1.0 # in case the user modifies this parent = None slider = None def __init__(self, parent, position, red=0, green=0, blue=255, red2=0, green2=0, blue2=255): # just store the local variables self.parent = parent self.color = wx.Colour(red, green, blue) self.color2 = wx.Colour(red2,green2,blue2) self.position = position return def ShowSlider(self, position=[0,0]): """ Creates a color slider GUI object, and pops it up. When the colorslider moves, this object's color data is updated. """ # close/delete any old ones self.HideSlider() # find out if this is the "main" slider (that appears in the taskbar) n = None for i in range(len(self.parent.colorpoints)): if self == self.parent.colorpoints[i]: n=i # modify the style accordingly style = 0 if not n==len(self.parent.colorpoints)-1: style = wx.FRAME_NO_TASKBAR|wx.CLIP_CHILDREN|wx.FRAME_FLOAT_ON_PARENT|wx.NO_BORDER size = wx.Size(351, 38) parent = self.parent.colorpoints[-1].slider # better make the last one first! else: style = wx.CLIP_CHILDREN|wx.CAPTION|wx.MINIMIZE_BOX|wx.CLOSE_BOX|wx.SYSTEM_MENU size = wx.Size(351, 40+35*(len(self.parent.colorpoints)-1)) parent = wx.GetApp().GetTopWindow() # convert the coords to a real position position = wx.Point(position[0], position[1]) # create the GUI object self.slider = _pcf.ColorSliderFrame(parent, self, style=style, size=size, position=position) if n in [0, len(self.parent.colorpoints)-1]: self.slider.EnableStuff(False) self.slider.Show() def HideSlider(self): if self.slider: self.slider.Hide() self.slider.Destroy() self.slider = None # # This class contains a list of color points and a link to a parent image. # Its job is to update the parent image colormap # class GuiColorMap: # define the local variables of the class colorpoints = [] image = None def __init__(self, image="top", colormap="_last"): """ This class contains a list of color points defining a colormap. It is capable of providing GUI sliders to modify the colors and locations of the color points in the color map and updating the supplied image on the fly. To get the initial color from the supplied image, it assumes that the red, green, and blue channels have the same set of positions! To find the image, try gca().images[0] set colormap=None to try and import the current colormap """ if image == "top": image = _pylab.gca().images[0] # store the reference to the image self.image = image # get the data for easier coding if colormap == None: # use the color map from the image if possible c = image.cmap._segmentdata cr = c['red'] cg = c['green'] cb = c['blue'] # get the number of steps in this cmap N = len(cb) # loop over the number of entries and generate the list self.colorpoints = [] # try to import the colormap from the image for n in range(N): if cr[n][0] == cb[n][0] and cr[n][0] == cg[n][0]: self.colorpoints.append(ColorPoint( self, cr[n][0], cr[n][1]*255, cg[n][1]*255, cb[n][1]*255, cr[n][2]*255, cg[n][2]*255, cb[n][2]*255)) else: print "This colormap is too complicated. Switching to default." colormap = "default" break; # if we need to, use the default map if not colormap == None: self.LoadColorMap(colormap) # may as well show these guys to the user too self.ShowSliders() def LoadColorMap(self, name="default"): # open the file "[spinmobpath]/colormaps/whatever.txt" try: f = open(_prefs.colormaps_dir + _prefs.path_delimiter + name + ".txt", "r") lines = f.readlines() f.close() # now loop over the colors (lines) and generate a list self.colorpoints = [] for line in lines: # split the line by white space s = line.split() # now create a new color point if len(s) == 7: self.colorpoints.append(ColorPoint(self, float(s[0]), float(s[1]), float(s[2]), float(s[3]), float(s[4]), float(s[5]), float(s[6]))) # use the hard-coded default except: print "Could not load "+_prefs.colormaps_dir + _prefs.path_delimiter + name + ".txt" self.colorpoints = [ColorPoint(self, 0.0, 255, 255, 255, 255, 255, 255), ColorPoint(self, 0.5, 0, 0, 255, 0, 0, 255), ColorPoint(self, 1.0, 255, 0, 0, 255, 0, 0)] # now update self.UpdateImage() def SaveColorMap(self, name="_last"): try: f = open(_prefs.colormaps_dir + _prefs.path_delimiter + name + ".txt", "w") # loop over the color points for c in self.colorpoints: f.write(str(c.position) + " " + str(c.color.Red()) + " " + str(c.color.Green()) + " " + str(c.color.Blue()) + " " + str(c.color2.Red())+ " " + str(c.color2.Green())+ " " + str(c.color2.Blue()) + "\n") f.close() except: print "Couldn't save last colormap!" def UpdateImage(self): """ This takes the current values of the various color points, orders them, and updates the colormap of the parent image. """ # first order the list according to the element positions new_list = [] while len(self.colorpoints): # find the minimum position x0 = 2.0 n0 = 0 for n in range(len(self.colorpoints)): # if this item is smaller than the previous record, store it if self.colorpoints[n].position < x0: x0 = self.colorpoints[n].position n0 = n # if it's equal to the previous record, make it a little bigger # next time around, this can be the new minimum elif self.colorpoints[n].position == x0: self.colorpoints[n].position = x0 + 0.0001 # if it's larger than 1, set it to 1 and knock off the best a little if self.colorpoints[n].position > 1.0: self.colorpoints[n].position = 1.0 self.colorpoints[n0].position = 1.0-0.0001 # now we have the minimum index new_list.append(self.colorpoints.pop(n0)) # now set the new list self.colorpoints = new_list # now generate the colormap from the ordered list r = [] g = [] b = [] for point in self.colorpoints: r.append((point.position, point.color.Red()/255.0, point.color2.Red()/255.0)) g.append((point.position, point.color.Green()/255.0, point.color2.Green()/255.0)) b.append((point.position, point.color.Blue()/255.0, point.color2.Blue()/255.0)) # store the formatted dictionary c = {'red':r, 'green':g, 'blue':b} # now set the dang thing self.image.set_cmap(_matplotlib.colors.LinearSegmentedColormap('custom', c)) _pylab.draw() self.SaveColorMap() def ShowSliders(self): """ This will show all the sliders, tiling them to the right of the figure """ # loop over the points in the list for n in range(len(self.colorpoints)-1,-1,-1): self.ShowSlider(n, "auto") def HideSliders(self): for p in self.colorpoints: p.HideSlider() def ShowSlider(self, n, position="auto"): """ This will show the n'th slider at the specified screen position """ try: if position == "auto": # get the figure position and size p = self.image.figure.canvas.Parent.GetPosition() w = self.image.figure.canvas.Parent.GetSize()[0] if n==len(self.colorpoints)-1: position = [p[0]+w, p[1]+40*(len(self.colorpoints)-n-1)] else: position = [p[0]+w+3, p[1]+65+35*(len(self.colorpoints)-n-2)] except: print "Can't position slider relative to anything but a wxAgg plot." if not hasattr(position, '__iter__'): position = [0,0] self.colorpoints[n].ShowSlider(position) def HideSlider(self, n): self.colorpoints[n].HideSlider()
34.451827
105
0.533848
1,296
10,370
4.219136
0.238426
0.068581
0.029261
0.026335
0.150329
0.071141
0.071141
0.051024
0.018288
0.018288
0
0.031921
0.362584
10,370
300
106
34.566667
0.79531
0.162006
0
0.082278
0
0
0.046613
0.006678
0
0
0
0
0
0
null
null
0
0.025316
null
null
0.025316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
8e948cdbd864ca7d68940aa639d8604501f00bc5
683
py
Python
RackPi/Pages/Reboot.py
DarkIrata/rackpi
e588f9b42ae55c8a763ce9e7a953e29f25e696b3
[ "MIT" ]
null
null
null
RackPi/Pages/Reboot.py
DarkIrata/rackpi
e588f9b42ae55c8a763ce9e7a953e29f25e696b3
[ "MIT" ]
null
null
null
RackPi/Pages/Reboot.py
DarkIrata/rackpi
e588f9b42ae55c8a763ce9e7a953e29f25e696b3
[ "MIT" ]
null
null
null
from Data.Drawer import Drawer from Data.Helper import * from Pages.PageBase import PageBase class Reboot(PageBase): def __init__(self, drawer: Drawer): PageBase.__init__(self, drawer) def UpdateCanvas(self): if not self.CanUpdate(100): return self.drawer.ClearCanvas() self.drawer.WriteOnCanvas(".......Reboot.......", line=0) self.drawer.WriteOnCanvas(" Hold Button ", line=1) self.drawer.WriteOnCanvas(" To Reboot ", line=2) def OnLongPress(self): self.drawer.ClearCanvas() cmd = "sudo reboot now" print("REBOOT") subprocess.Popen(cmd, shell = True)
31.045455
65
0.610542
75
683
5.453333
0.48
0.171149
0.168704
0
0
0
0
0
0
0
0
0.011976
0.266471
683
22
66
31.045455
0.804391
0
0
0.111111
0
0
0.116959
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.444444
0.055556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e98c19a9f41dbb82f2ec64a837df13e0499732e
380
py
Python
ex018.py
Gustavo-Dev-Web/python
88c9a51cba5290d1dcfce8ea9481ed4749503f68
[ "MIT" ]
null
null
null
ex018.py
Gustavo-Dev-Web/python
88c9a51cba5290d1dcfce8ea9481ed4749503f68
[ "MIT" ]
null
null
null
ex018.py
Gustavo-Dev-Web/python
88c9a51cba5290d1dcfce8ea9481ed4749503f68
[ "MIT" ]
null
null
null
from math import radians, sin, cos, tan angulo = float(input('Digite o ângulo que você deseja: ')) seno = sin(radians(angulo)) cosseno = cos(radians(angulo)) tangente = tan(radians(angulo)) print(f'O ângulo de {angulo} tem o SENO de {seno :.2f}!') print(f'O ângulo de {angulo} tem o COSSENO de {cosseno :.2f}!') print(f'O ângulo de {angulo} tem a TANGENTE de {tangente :.2f}!')
34.545455
65
0.694737
64
380
4.125
0.390625
0.106061
0.079545
0.147727
0.295455
0.295455
0.295455
0.295455
0
0
0
0.009288
0.15
380
10
66
38
0.80805
0
0
0
0
0
0.494737
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e995a2b9016602f49ded74c3b7213f7af39a054
2,315
py
Python
Dash/Plotly_to_Dash.py
jorge-garciadiego/Visualization-Dash
68fe99ff5c3ecc99eea3845f38849669449d89d2
[ "MIT" ]
null
null
null
Dash/Plotly_to_Dash.py
jorge-garciadiego/Visualization-Dash
68fe99ff5c3ecc99eea3845f38849669449d89d2
[ "MIT" ]
null
null
null
Dash/Plotly_to_Dash.py
jorge-garciadiego/Visualization-Dash
68fe99ff5c3ecc99eea3845f38849669449d89d2
[ "MIT" ]
null
null
null
import dash import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import numpy as np app = dash.Dash() # Creating Data np.random.seed(42) random_x = np.random.randint(1, 101, 100) random_y = np.random.randint(1, 101, 100) # everything that we are going to be inserting will be inside this Div for html # The Graph component is what will receive our Plotly figure app.layout = html.Div([dcc.Graph(id='scatterplot', figure={'data': [ go.Scatter( x=random_x, y=random_y, mode='markers', marker={ 'size': 12, 'color': 'rgb(51,204,153)', 'symbol': 'pentagon', 'line': {'width': 2} } )], 'layout': go.Layout(title='My Scatterplot', xaxis={'title': 'Some X title'})} ), dcc.Graph(id='scatterplot2', figure={'data': [ go.Scatter( x=random_x, y=random_y, mode='markers', marker={ 'size': 12, 'color': 'rgb(200,204,53)', 'symbol': 'pentagon', 'line': {'width': 2} } )], 'layout': go.Layout(title='My Scatterplot 2', xaxis={'title': 'Some X title'})} )]) if __name__ == '__main__': app.run_server()
45.392157
90
0.310151
165
2,315
4.230303
0.460606
0.04298
0.04298
0.045845
0.469914
0.412607
0.34957
0.34957
0.34957
0.34957
0
0.04386
0.606048
2,315
50
91
46.3
0.721491
0.064795
0
0.511628
0
0
0.103193
0
0
0
0
0
0
1
0
false
0
0.116279
0
0.116279
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
8e9b97604a5cb5368bd271887ae7d926ada9d2f3
685
py
Python
LeetCode/python/061-090/086-partition-list/solution.py
shootsoft/practice
49f28c2e0240de61d00e4e0291b3c5edd930e345
[ "Apache-2.0" ]
null
null
null
LeetCode/python/061-090/086-partition-list/solution.py
shootsoft/practice
49f28c2e0240de61d00e4e0291b3c5edd930e345
[ "Apache-2.0" ]
null
null
null
LeetCode/python/061-090/086-partition-list/solution.py
shootsoft/practice
49f28c2e0240de61d00e4e0291b3c5edd930e345
[ "Apache-2.0" ]
null
null
null
__author__ = 'yinjun' # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution: # @param head, a ListNode # @param x, an integer # @return a ListNode def partition(self, head, x): h1 = ListNode(0) h2 = ListNode(0) h1h = h1 h2h = h2 h = head while h != None: if h.val < x : h1.next = ListNode(h.val) h1 = h1.next else: h2.next = ListNode(h.val) h2 = h2.next h = h.next h1.next = h2h.next return h1h.next
18.513514
41
0.464234
83
685
3.73494
0.39759
0.03871
0.083871
0.103226
0
0
0
0
0
0
0
0.043928
0.435037
685
36
42
19.027778
0.757106
0.272993
0
0
0
0
0.01227
0
0
0
0
0
0
1
0.055556
false
0
0
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e9c2da8ab435f1bdffb4bc676ac1fdf47cb0931
1,134
py
Python
aprendi com guanabara/Ex 039.py
GustinSilva/python
fa1d4213771a2edc8dec8c08d0d19ada44e5e2f2
[ "MIT" ]
null
null
null
aprendi com guanabara/Ex 039.py
GustinSilva/python
fa1d4213771a2edc8dec8c08d0d19ada44e5e2f2
[ "MIT" ]
null
null
null
aprendi com guanabara/Ex 039.py
GustinSilva/python
fa1d4213771a2edc8dec8c08d0d19ada44e5e2f2
[ "MIT" ]
null
null
null
""" Faça um programa que leia o ano de nascimento de um jovem e informe, de acordo com sua idade se ele ainda vai se alistar se é a hora de se alistar se já passou o tempo de alistar o programa também deve falar o tempo que falta ou que passou """ import datetime import time ano_nasc = int(input('Ano de Nascimento: ')) ano_atual = datetime.date.today().year idade = ano_atual - ano_nasc print('Um momento estamos fazendo o calculo.\n', end='') time.sleep(1) print('.') time.sleep(1) print('.') time.sleep(1) if idade == 18: print('Quem nasceu no ano de {} tem {} anos em {}'.format(ano_nasc, idade, ano_atual)) print('Você tem que se alistar IMEDIATAMENTE') elif idade > 18: print('Quem nasceu em {} tem {} anos em {}.'.format(ano_nasc, idade, ano_atual)) print('Você já deveria ter se alistado há {} anos.'.format(idade-18)) print('Seu alistamento deu-se em {}.'.format(ano_nasc+18)) else: print('Quem nasceu em {} tem {} anos em {}.'.format(ano_nasc, idade, ano_atual)) print('Ainda faltam {} anos para seu alistamento.'.format(18-idade)) print('Seu alistamento será em {}.'.format(ano_nasc+18))
35.4375
92
0.694004
189
1,134
4.100529
0.391534
0.063226
0.070968
0.096774
0.341935
0.260645
0.260645
0.209032
0.209032
0.209032
0
0.01594
0.170194
1,134
31
93
36.580645
0.807651
0.21164
0
0.318182
0
0
0.397291
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
1
8e9c71c55ad72a43c98fbcef2fffb6bb801d664d
3,040
py
Python
standalone_tf/WholeImageDescriptor.py
galsh17/cartwheel_train
a50abe18cfe8c1f0f24267c3efa8537ecf211e72
[ "MIT" ]
32
2018-09-04T08:51:08.000Z
2022-02-22T02:04:38.000Z
standalone_tf/WholeImageDescriptor.py
galsh17/cartwheel_train
a50abe18cfe8c1f0f24267c3efa8537ecf211e72
[ "MIT" ]
5
2019-05-27T07:54:52.000Z
2022-01-11T10:14:25.000Z
standalone_tf/WholeImageDescriptor.py
galsh17/cartwheel_train
a50abe18cfe8c1f0f24267c3efa8537ecf211e72
[ "MIT" ]
14
2018-06-22T15:29:39.000Z
2021-09-28T12:58:37.000Z
""" A class interfce to netvlad based whole image descriptor. To use the pre-trained network in your application use this code and unit-test Author : Manohar Kuse <mpkuse@connect.ust.hk> Created : 20th Aug, 2018 """ import cv2 import numpy as np import os import time import code import argparse import sys import tensorflow as tf import tensorflow.contrib.slim as slim TF_MAJOR_VERSION = int(tf.__version__.split('.')[0]) TF_MINOR_VERSION = int(tf.__version__.split('.')[1]) from CartWheelFlow import VGGDescriptor from ColorLUT import ColorLUT import TerminalColors tcolor = TerminalColors.bcolors() class WholeImageDescriptor: def __init__( self, NET_TYPE, PARAM_K, PARAM_model_restore ): self.NET_TYPE = NET_TYPE self.PARAM_K = PARAM_K self.PARAM_model_restore = PARAM_model_restore ## Create Network tf_x = tf.placeholder( 'float', [1,240,320,3], name='x' ) #this has to be 3 if training with color images is_training = tf.placeholder( tf.bool, [], name='is_training') vgg_obj = VGGDescriptor(K=PARAM_K, D=256, N=60*80, b=1) tf_vlad_word = vgg_obj.network(tf_x, is_training, net_type=NET_TYPE ) ## Restore Model sess = tf.Session() print tcolor.OKGREEN,'Restore model from : ', PARAM_model_restore, tcolor.ENDC tensorflow_saver = tf.train.Saver() tensorflow_saver.restore( sess, PARAM_model_restore ) self.tf_x = tf_x self.tf_vlad_word = tf_vlad_word self.is_training = is_training self.vgg_obj = vgg_obj self.sess = sess def get_descriptor( self, im ): """ im: 1x240x320x3 """ assert( len(im.shape) == 4 ) feed_dict = {self.tf_x : im,\ self.is_training:True,\ self.vgg_obj.initial_t: 0 } tff_vlad_word, tff_sm = self.sess.run( [self.tf_vlad_word, self.vgg_obj.nl_sm], feed_dict=feed_dict) Assgn_matrix = np.reshape( tff_sm, [1,60,80,-1] ).argmax( axis=-1 ) #assuming batch size = 1 return tff_vlad_word, Assgn_matrix if __name__=='__main__': ## Network Params NET_TYPE = "resnet6" PARAM_K = 16 PARAM_model_restore = './tfmodels/B_vgg/model-8000' PARAM_model_restore = './tfmodels/D/model-8000' WID_net = WholeImageDescriptor( NET_TYPE, PARAM_K, PARAM_model_restore ) ## Load Image INPUT_FILE_NAME = 'sample_images/a0.jpg' print 'Load Image : ', INPUT_FILE_NAME IM = cv2.resize( cv2.imread( INPUT_FILE_NAME), (320, 240) ) im_batch = np.expand_dims( IM.astype('float32'), 0 ) ## descriptor and association map ## tff_vlad_word : 1x4096 ## Assgn_matrix : 1x60x80 tff_vlad_word, Assgn_matrix = WID_net.get_descriptor( im_batch ) ## Visualize Assgn_matrix - as a false color map colorLUT = ColorLUT() lut = colorLUT.lut( Assgn_matrix[0,:,:] ) cv2.imshow( 'IM', IM ) cv2.imshow( 'Assgn_matrix', cv2.resize( lut, (320,240) ) ) cv2.waitKey(0)
30.707071
113
0.659868
427
3,040
4.423888
0.367682
0.04235
0.071996
0.020116
0.103759
0.031763
0.031763
0
0
0
0
0.03823
0.234211
3,040
98
114
31.020408
0.773196
0.084539
0
0
0
0
0.063397
0.019936
0
0
0
0
0.017241
0
null
null
0
0.206897
null
null
0.034483
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
8e9d1f88f2018b598e87d9922395a3eec689c6a1
2,389
py
Python
jasonhelper/__init__.py
jbkoh/jason_python_helper
6a9d8e31d070b5adb827ba96887db24cb431b94e
[ "MIT" ]
null
null
null
jasonhelper/__init__.py
jbkoh/jason_python_helper
6a9d8e31d070b5adb827ba96887db24cb431b94e
[ "MIT" ]
1
2017-10-12T23:01:32.000Z
2017-11-21T06:44:07.000Z
jasonhelper/__init__.py
jbkoh/jason_python_helper
6a9d8e31d070b5adb827ba96887db24cb431b94e
[ "MIT" ]
1
2018-09-19T15:12:57.000Z
2018-09-19T15:12:57.000Z
import argparse import os import time ## Argparser def str2slist(s): s.replace(' ', '') return s.split(',') def str2ilist(s): s.replace(' ', '') return [int(c) for c in s.split(',')] def str2bool(v): if v in ['true', 'True']: return True elif v in ['false', 'False']: return False else: assert(False) argparser = argparse.ArgumentParser() argparser.register('type','bool',str2bool) argparser.register('type','slist', str2slist) argparser.register('type','ilist', str2ilist) # Adopted from: http://stackoverflow.com/a/8412405 def rolling_window(l, w_size): for i in range(len(l)-w_size+1): yield [l[i+o] for o in range(w_size)] def striding_windows(l, w_size): curr_idx = 0 while curr_idx < len(l): yield l[curr_idx:curr_idx + w_size] curr_idx += w_size def check_and_create_dir(dir_path): if not os.path.exists(dir_path): os.makedirs(dir_path) # Adopted from: https://stackoverflow.com/a/21894086 class bidict(dict): def __init__(self, *args, **kwargs): super(bidict, self).__init__(*args, **kwargs) self.inverse = {} for key, value in self.items(): self.inverse.setdefault(value,[]).append(key) def __setitem__(self, key, value): if key in self: self.inverse[self[key]].remove(key) super(bidict, self).__setitem__(key, value) self.inverse.setdefault(value,[]).append(key) def __delitem__(self, key): self.inverse.setdefault(self[key],[]).remove(key) if self[key] in self.inverse and not self.inverse[self[key]]: del self.inverse[self[key]] super(bidict, self).__delitem__(key) def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] class FtnTimer(object): def __init__(self): self.tot_time = 0 self.tot_cnt = 0 self.curr_time = 0 def start(self): self.start_time = time.clock() def end(self): end_time = time.clock() self.tot_time += end_time - self.start_time self.tot_cnt += 1 def get_result(self): if not self.tot_cnt: avg_time = None else: avg_time = self.tot_time / self.tot_cnt res = { 'average_time': avg_time } return res
25.688172
69
0.601925
334
2,389
4.113772
0.296407
0.064047
0.029112
0.039301
0.055313
0.055313
0.055313
0
0
0
0
0.015792
0.257848
2,389
92
70
25.967391
0.759165
0.062788
0
0.085714
0
0
0.026906
0
0
0
0
0
0.014286
1
0.2
false
0
0.042857
0
0.342857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e9d9a8e7ebad14756d858c92a15d00b8f0de94b
2,983
py
Python
data_evaluation.py
portaloffreedom/reinforcement-learning-in-rust
470a8b6486a2c83dccbab9a0ef4bfd020e975d56
[ "MIT" ]
null
null
null
data_evaluation.py
portaloffreedom/reinforcement-learning-in-rust
470a8b6486a2c83dccbab9a0ef4bfd020e975d56
[ "MIT" ]
null
null
null
data_evaluation.py
portaloffreedom/reinforcement-learning-in-rust
470a8b6486a2c83dccbab9a0ef4bfd020e975d56
[ "MIT" ]
null
null
null
# Download data, unzip, etc. from matplotlib import pyplot as plt import pandas as pd import numpy as np import scipy.stats as st # Set some parameters to apply to all plots. These can be overridden # in each plot if desired import matplotlib # Plot size to 14" x 7" matplotlib.rc('figure', figsize = (14, 7)) # Font size to 14 matplotlib.rc('font', size = 14) # Do not display top and right frame lines matplotlib.rc('axes.spines', top = False, right = False) # Remove grid lines matplotlib.rc('axes', grid = False) # Set backgound color to white matplotlib.rc('axes', facecolor = 'white') _, ax = plt.subplots() # Define a function for the line plot with intervals def lineplotCI(x_data, y_data, low_CI, upper_CI, minimum, maximum, x_label, y_label, title, color, file_name): # Create the plot object # Plot the data, set the linewidth, color and transparency of the # line, provide a label for the legend ax.plot(x_data, y_data, lw = 3, color = color, alpha = 1, label = file_name) ax.plot(x_data, minimum, lw=1, color=color, alpha=1, label='5% quantile') ax.plot(x_data, maximum, lw=1, color=color, alpha=1, label='95% quantile') # Shade the confidence interval ax.fill_between(x_data, low_CI, upper_CI, color=color, alpha=0.1, label='25-75 quantile') # Label the axes and provide a title ax.set_title(title) ax.set_xlabel(x_label) ax.set_ylabel(y_label) # Display legend ax.legend(loc = 'best') def add_plot(csv_name, color): dataset = pd.read_csv(csv_name, header=None) mean = dataset.mean(axis=0) std = dataset.std(axis=0) upper = mean + std lower = mean - std upper_quantile = dataset.quantile(0.75) median = dataset.quantile(0.5) lower_quantile = dataset.quantile(0.25) max_quantile = dataset.quantile(0.95) min_quantile = dataset.quantile(0.05) lower_interval, upper_interval = st.t.interval(0.95, 99, loc=mean, scale=std) # Call the function to create plot # lineplotCI(x_data = list(range(0, 400)) # , y_data = median # , low_CI=lower_quantile # , upper_CI=upper_quantile # , minimum = min_quantile # , maximum = max_quantile # , x_label='Episodes' # , y_label='Value of Policy' # , title='Value of policy over time' # , color=color) lineplotCI(x_data=list(range(0, 400)) , y_data=mean , low_CI=lower , upper_CI=upper , minimum=min_quantile , maximum=max_quantile , x_label='Episodes' , y_label='Value of Policy' , title='Value of policy over time' , file_name=csv_name , color=color) # add_plot("q_learning_epsilon_rewards.csv", '#539caf') add_plot("q_learning_epsilon_rewards.csv", '#999111') add_plot("double_q_epsilon_rewards.csv", '#990a11') plt.show()
33.516854
111
0.638284
430
2,983
4.27907
0.327907
0.019022
0.043478
0.052174
0.229891
0.201087
0.201087
0.13913
0.13913
0.103261
0
0.03048
0.252095
2,983
89
112
33.516854
0.794263
0.315454
0
0
0
0
0.096822
0.028798
0
0
0
0
0
1
0.043478
false
0
0.108696
0
0.152174
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8e9e6af2d0e1e8b1eb883ee00252a7b5e6939c84
3,953
py
Python
speedread_ner.py
gsi-upm/nerdy
62d2a6df730e30bc1c05c91557fcfd2236b742b8
[ "Apache-2.0" ]
2
2017-10-26T19:40:51.000Z
2018-03-07T12:10:49.000Z
speedread_ner.py
gsi-upm/nerdy
62d2a6df730e30bc1c05c91557fcfd2236b742b8
[ "Apache-2.0" ]
null
null
null
speedread_ner.py
gsi-upm/nerdy
62d2a6df730e30bc1c05c91557fcfd2236b742b8
[ "Apache-2.0" ]
null
null
null
__author__ = 'croman' from pipeline import pipe from lxml import etree import rdflib def ner(datasetfile, format): tweets = "" tweetids = [] if format == 'xml': dataset = etree.parse(datasetfile) for tweet in dataset.xpath('//Tweet'): tweetText = tweet.xpath('./TweetText/text()')[0] tweets += tweetText+"\n" tweetids.append(tweet.xpath('./TweetId/text()')[0]) tweets = tweets.encode('utf-8') elif format == "nif": tweetdict = {} a = rdflib.Graph() a.parse(datasetfile, format='n3') for s, p, o in a: if s.endswith(',') and p.endswith('isString'): tweetid = s.split('#')[0].split('.xml/')[1] tweetdict[tweetid] = o for key in sorted(tweetdict): tweetids.append(key) tweets += tweetdict[key]+'\n' tweets = tweets.encode('utf-8') print tweets indexes = [] tweetlines = tweets.split('\n') for t in tweetlines: tweetlength = 0 for word in t.split(): tweetlength += len(word) print tweetlength indexes.append(tweetlength) options = {'log':'DEBUG', 'conf': 'pipeline/settings.py', 'text': tweets} results = pipe.main(options, []) print 'results: ' + results x = 0 finalresults = '' resultslines = results.splitlines() finalresults = '' for i in indexes: print i length = 0 tweetresult = '' print x print resultslines[x] while length < i: if resultslines[x] != '': entity = resultslines[x].split('\t') print entity length += len(entity[0]) tweetresult += entity[0]+'/'+entity[1]+' ' x += 1 #print 'x=', x print 'length: ', length else: print 'ok' x += 1 print tweetresult finalresults += tweetresult[:-1]+' END\n' print finalresults ner("Mena Collection.ttl", "nif") """__author__ = 'croman' from pipeline import pipe from lxml import etree import rdflib def ner(datasetfile, format): tweets = "" tweetids = [] if format == 'xml': dataset = etree.parse(datasetfile) for tweet in dataset.xpath('//Tweet'): tweetText = tweet.xpath('./TweetText/text()')[0] tweets += tweetText+"\n" tweetids.append(tweet.xpath('./TweetId/text()')[0]) tweets = tweets.encode('utf-8') elif format == "nif": tweetdict = {} a = rdflib.Graph() a.parse(datasetfile, format='n3') for s, p, o in a: if s.endswith(',') and p.endswith('isString'): tweetid = s.split('#')[0].split('.xml/')[1] tweetdict[tweetid] = o for key in sorted(tweetdict): tweetids.append(key) tweets += tweetdict[key]+'\n' tweets = tweets.encode('utf-8') print tweets indexes = [] tweetlines = tweets.split('\n') for t in tweetlines: tweetlength = 0 for word in t.split(): tweetlength += len(word) indexes.append(tweetlength) options = {'log':'DEBUG', 'conf': 'pipeline/settings.py', 'text': tweets} results = pipe.main(options, []) print results x = 0 finalresults = '' for i in indexes: print i resultslines = results.split('\n') length = 0 while length < i: entity = resultslines[x].split('\t') print resultslines[x] length += len(entity[0]) if len(entity)>1: finalresults += entity[0]+'/'+entity[1]+' ' x += 1 print 'x=', x print 'length: ', length finalresults = finalresults[:-1]+' END\n' print finalresults ner("Mena Collection.ttl", "nif")"""
27.838028
77
0.517329
418
3,953
4.873206
0.186603
0.03191
0.0216
0.041237
0.831615
0.831615
0.80216
0.771723
0.771723
0.771723
0
0.012232
0.338224
3,953
142
78
27.838028
0.766437
0.003289
0
0.095238
0
0
0.083173
0
0
0
0
0
0
0
null
null
0
0.047619
null
null
0.174603
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
8ea04f344a5f50abc39827025757b32b1b9d55c5
2,313
py
Python
vfoot/gen/__init__.py
filipecn/vfoot
3059f5bb471b6bdf92a18a7cdb6b33a2c8852046
[ "MIT" ]
null
null
null
vfoot/gen/__init__.py
filipecn/vfoot
3059f5bb471b6bdf92a18a7cdb6b33a2c8852046
[ "MIT" ]
null
null
null
vfoot/gen/__init__.py
filipecn/vfoot
3059f5bb471b6bdf92a18a7cdb6b33a2c8852046
[ "MIT" ]
null
null
null
import pandas as pd import random def generate_teams(n_teams=128, n_countries=3, csv_file="times.csv"): times = pd.DataFrame(columns=['nome', 'estadio', 'nacionalidade', 'score']) for i in range(n_teams): times = times.append({ 'nome': 'Time ' + str(i), 'estadio': random.randint(1000, 50000), 'nacionalidade': random.randint(0, n_countries), 'score': random.randint(0, 20), }, ignore_index=True) times.to_csv(csv_file) return times def generate_players(n_teams=128, n_countries=3, csv_file="jogadores.csv"): """ gerar os jogadores 0 - sarrafeiro 1 - caceteiro 2 - cordeirinho 3 - cavalheiro 4 - fair play 0 - goleiro 3 1 - defensor 7 2 - meio 7 3 - atacante 7 :param n_teams: :param n_countries: :param csv_file: :return: """ jogadores = pd.DataFrame( columns=['nome', 'nacionalidade', 'idade', 'estrela', 'time', 'posicao', 'comportamento', 'forca']) numero_por_posicao = [3, 7, 7, 7] k = 0 for i in range(n_teams): for p in range(4): for j in range(numero_por_posicao[p]): jogadores = jogadores.append({ 'nome': 'Jogador ' + str(k), 'nacionalidade': random.randint(0, n_countries), 'idade': random.randint(18, 45), 'estrela': random.randint(1, 100) > 95, 'time': i, 'posicao': p, 'comportamento': random.randint(0, 4), 'forca': random.randint(1, 50), }, ignore_index=True) k = k + 1 jogadores.to_csv(csv_file) return jogadores def generate_coaches(n_teams=128, csv_file="tecnicos.csv"): tecnicos = pd.DataFrame(columns=['nome', 'time', 'idade', 'comportamento']) for i in range(n_teams): tecnicos = tecnicos.append({ 'nome': 'Tecnico ' + str(i), 'time': i, 'idade': random.randint(30, 70), 'comportamento': random.randint(0, 4), }, ignore_index=True) tecnicos.to_csv(csv_file) if __name__ == "__main__": generate_teams(csv_file="../../data/times.csv")
32.125
107
0.5361
267
2,313
4.486891
0.307116
0.108514
0.058431
0.055092
0.22621
0.149416
0.045075
0.045075
0
0
0
0.042389
0.326848
2,313
72
108
32.125
0.727039
0.117164
0
0.26087
1
0
0.156773
0
0
0
0
0
0
1
0.065217
false
0
0.043478
0
0.152174
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
8ea223055e4d3fcfd6d5415328c4b3e36324649c
3,988
py
Python
roles/openshift_health_checker/library/rpm_version.py
KoteikinyDrova/openshift-ansible
3db2bb10c0ad5e7ed702bfccdec03562533e8539
[ "Apache-2.0" ]
1
2019-03-13T10:14:35.000Z
2019-03-13T10:14:35.000Z
roles/openshift_health_checker/library/rpm_version.py
KoteikinyDrova/openshift-ansible
3db2bb10c0ad5e7ed702bfccdec03562533e8539
[ "Apache-2.0" ]
1
2021-09-23T23:36:29.000Z
2021-09-23T23:36:29.000Z
roles/openshift_health_checker/library/rpm_version.py
KoteikinyDrova/openshift-ansible
3db2bb10c0ad5e7ed702bfccdec03562533e8539
[ "Apache-2.0" ]
4
2018-10-27T00:29:24.000Z
2022-01-07T07:39:51.000Z
#!/usr/bin/python """ Ansible module for rpm-based systems determining existing package version information in a host. """ from ansible.module_utils.basic import AnsibleModule IMPORT_EXCEPTION = None try: import rpm # pylint: disable=import-error except ImportError as err: IMPORT_EXCEPTION = err # in tox test env, rpm import fails class RpmVersionException(Exception): """Base exception class for package version problems""" def __init__(self, message, problem_pkgs=None): Exception.__init__(self, message) self.problem_pkgs = problem_pkgs def main(): """Entrypoint for this Ansible module""" module = AnsibleModule( argument_spec=dict( package_list=dict(type="list", required=True), ), supports_check_mode=True ) if IMPORT_EXCEPTION: module.fail_json(msg="rpm_version module could not import rpm: %s" % IMPORT_EXCEPTION) # determine the packages we will look for pkg_list = module.params['package_list'] if not pkg_list: module.fail_json(msg="package_list must not be empty") # get list of packages available and complain if any # of them are missing or if any errors occur try: pkg_versions = _retrieve_expected_pkg_versions(_to_dict(pkg_list)) _check_pkg_versions(pkg_versions, _to_dict(pkg_list)) except RpmVersionException as excinfo: module.fail_json(msg=str(excinfo)) module.exit_json(changed=False) def _to_dict(pkg_list): return {pkg["name"]: pkg for pkg in pkg_list} def _retrieve_expected_pkg_versions(expected_pkgs_dict): """Search for installed packages matching given pkg names and versions. Returns a dictionary: {pkg_name: [versions]}""" transaction = rpm.TransactionSet() pkgs = {} for pkg_name in expected_pkgs_dict: matched_pkgs = transaction.dbMatch("name", pkg_name) if not matched_pkgs: continue for header in matched_pkgs: if header['name'] == pkg_name: if pkg_name not in pkgs: pkgs[pkg_name] = [] pkgs[pkg_name].append(header['version']) return pkgs def _check_pkg_versions(found_pkgs_dict, expected_pkgs_dict): invalid_pkg_versions = {} not_found_pkgs = [] for pkg_name, pkg in expected_pkgs_dict.items(): if not found_pkgs_dict.get(pkg_name): not_found_pkgs.append(pkg_name) continue found_versions = [_parse_version(version) for version in found_pkgs_dict[pkg_name]] expected_version = _parse_version(pkg["version"]) if expected_version not in found_versions: invalid_pkg_versions[pkg_name] = { "found_versions": found_versions, "required_version": expected_version, } if not_found_pkgs: raise RpmVersionException( '\n'.join([ "The following packages were not found to be installed: {}".format('\n '.join([ "{}".format(pkg) for pkg in not_found_pkgs ])) ]), not_found_pkgs, ) if invalid_pkg_versions: raise RpmVersionException( '\n '.join([ "The following packages were found to be installed with an incorrect version: {}".format('\n'.join([ " \n{}\n Required version: {}\n Found versions: {}".format( pkg_name, pkg["required_version"], ', '.join([version for version in pkg["found_versions"]])) for pkg_name, pkg in invalid_pkg_versions.items() ])) ]), invalid_pkg_versions, ) def _parse_version(version_str): segs = version_str.split('.') if not segs or len(segs) <= 2: return version_str return '.'.join(segs[0:2]) if __name__ == '__main__': main()
31.15625
116
0.621364
476
3,988
4.926471
0.277311
0.044776
0.030704
0.021748
0.078465
0.065672
0.045203
0.045203
0
0
0
0.001054
0.286108
3,988
127
117
31.401575
0.82262
0.127633
0
0.137931
0
0
0.115753
0
0
0
0
0
0
1
0.068966
false
0
0.08046
0.011494
0.206897
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8ea41ac7ddbb096f8c509205102ac3523839b975
3,335
py
Python
cameo/__init__.py
jlerman44/cameo
6aa4b9db7ebcc3e52547821e64b13675cf73f73a
[ "Apache-2.0" ]
null
null
null
cameo/__init__.py
jlerman44/cameo
6aa4b9db7ebcc3e52547821e64b13675cf73f73a
[ "Apache-2.0" ]
null
null
null
cameo/__init__.py
jlerman44/cameo
6aa4b9db7ebcc3e52547821e64b13675cf73f73a
[ "Apache-2.0" ]
1
2018-09-27T22:41:58.000Z
2018-09-27T22:41:58.000Z
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ CAMEO: Computer Aided Metabolic Engineering & Optimization Cameo is a high-level python library developed to aid the in silico strain design process in metabolic engineering projects. The library provides a modular architecture that enables the efficient construction of custom analysis workflows. Example ------- from cameo import load_model # load a model from SBML format (can be found under cameo/tests/data) model = load_model('EcoliCore.xml') # optimize the model and print the objective value solution = model.optimize() print 'Objective value:', solution.f # Determine a set of gene deletions that will optimize the production # of a desired compound from cameo.strain_design.heuristic import GeneKnockoutOptimization from cameo.strain_design.heuristic.objective_functions import biomass_product_coupled_yield from cameo.flux_analysis.simulation import fba objective = biomass_product_coupled_yield("Ec_biomass_iJO1366_core_53p95M", "EX_succ_lp_e_rp_", "EX_glc_lp_e_rp_") optimization = GeneKnockoutOptimization(model=model, objective_function=of, simulation_method=fba, heuristic_method=inspyred.ec.GA) optimization.run(max_evaluations=2000, n=1, mutation_rate=0.3, view=cameo.parallel.SequentialView(), product="EX_succ_lp_e_rp_", num_elites=1) """ import os import sys from cameo import config from cameo.util import get_system_info, in_ipnb if sys.version_info[0] == 2: import imp def find_module(name): try: imp.find_module(name) return True except ImportError: return False elif sys.version_info[0] == 3: if sys.version_info[1] <= 3: from importlib import find_loader as _find else: from importlib.util import find_spec as _find def find_module(name): return _find(name) is not None _cameo_path = __path__[0] _cameo_data_path = os.path.join(_cameo_path, 'data') # fix - if matplotlib is installed it is not possible to import cameo without importing matplotlib on jupyter notebook. if find_module("matplotlib") and in_ipnb(): from IPython import get_ipython ipython = get_ipython() ipython.magic("matplotlib inline") system_info = get_system_info() from ._version import get_versions __version__ = get_versions()['version'] del get_versions from cameo.io import load_model from cameo import models from .flux_analysis.analysis import flux_variability_analysis, phenotypic_phase_plane from .flux_analysis.simulation import fba, pfba from ._version import get_versions __version__ = get_versions()['version'] del get_versions del os, sys, in_ipnb, get_system_info, find_module
31.761905
119
0.755022
471
3,335
5.142251
0.43949
0.029728
0.029728
0.013212
0.114781
0.055326
0.055326
0.055326
0.055326
0.055326
0
0.011301
0.177511
3,335
104
120
32.067308
0.871673
0.618291
0
0.216216
0
0
0.035942
0
0
0
0
0
0
1
0.054054
false
0
0.405405
0.027027
0.540541
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
8ea5524aaaf6020d2fb120959b8bb005d31ffdc3
12,967
py
Python
spider_proxy/app/managers/proxy_fetch.py
seniortesting/python-spider
0b70817373e2e22267ddf3b80b9b7eb15931e41e
[ "MIT" ]
null
null
null
spider_proxy/app/managers/proxy_fetch.py
seniortesting/python-spider
0b70817373e2e22267ddf3b80b9b7eb15931e41e
[ "MIT" ]
null
null
null
spider_proxy/app/managers/proxy_fetch.py
seniortesting/python-spider
0b70817373e2e22267ddf3b80b9b7eb15931e41e
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- import logging import re from time import sleep import requests import urllib3 from app.utils.spider_utils import getHtmlTree, verifyProxyFormat from app.utils.web_request import WebRequest urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) logging.basicConfig(level=logging.INFO, format='[%(asctime)s.%(msecs).03d - %(filename)s:%(lineno)d %(levelname)s]: %(message)s') log = logging.getLogger(__name__) class FetchFreeProxy(object): @staticmethod def ip66(count=20): """ 代理66 http://www.66ip.cn/ :param count: 提取数量 :return: """ urls = [ "http://www.66ip.cn/nmtq.php?getnum=60&isp=0&anonymoustype=0&start=&ports=&export=&ipaddress=&area=1&proxytype=2&api=66ip" ] headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0', 'Accept': '*/*', 'Connection': 'keep-alive', 'Accept-Language': 'zh-CN,zh;q=0.8'} try: import js2py session = requests.Session() session.verify = False # -----------------------------2019-08-16 最早期版本 # src = session.get("http://www.66ip.cn/", headers=headers).text # # src = src.split("</script>")[0] + '}' # src = src.replace("<script>", "function test() {") # src = src.replace("while(z++)try{eval(", ';var num=10;while(z++)try{var tmp=') # src = src.replace(");break}", ";num--;if(tmp.search('cookie') != -1 | num<0){return tmp}}") # ctx = js2py.eval_js(src) # src = ctx.test() # src = src[src.find("document.cookie="): src.find("};if((")] # src = src.replace("document.cookie=", "") # src = "function test() {var window={}; return %s }" % src # cookie = js2py.eval_js(src).test() # js_cookie = cookie.split(";")[0].split("=")[-1] # -----------------------------2019-08-16 更新版本需要破解cookies # content = ''.join(re.findall('<script>(.*?)</script>', content)) # function_js = content.replace('eval', 'return') # function_content = "function getClearance(){" + function_js + "};" # self.context.execute(function_content) # # 一级解密结果 # decoded_result = self.context.getClearance() # function_js_result = 'var a' + decoded_result.split('document.cookie')[1].split("Path=/;'")[ # 0] + "Path=/;';return a;" # # s = re.sub(r'document.create.*?firstChild.href', '"{}"'.format(self.start_url), s) # function_content_result = "function getClearanceResult(){" + function_js_result + "};" # self.context.execute(function_content_result) # # 二次解密结果 # decoded_content = self.context.getClearanceResult() # jsl_clearance = decoded_content.split(';')[0] except Exception as e: print(e) return for url in urls: try: # cookies={"__jsl_clearance": js_cookie} html = session.get(url.format(count), headers=headers).text ips = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}", html) for ip in ips: yield ip.strip() except Exception as e: print(e) pass @staticmethod def goubanjia(): """ guobanjia http://www.goubanjia.com/ :return: """ url = "http://www.goubanjia.com/" tree = getHtmlTree(url) proxy_list = tree.xpath('//td[@class="ip"]') # 此网站有隐藏的数字干扰,或抓取到多余的数字或.符号 # 需要过滤掉<p style="display:none;">的内容 xpath_str = """.//*[not(contains(@style, 'display: none')) and not(contains(@style, 'display:none')) and not(contains(@class, 'port')) ]/text() """ for each_proxy in proxy_list: try: # :符号裸放在td下,其他放在div span p中,先分割找出ip,再找port ip_addr = ''.join(each_proxy.xpath(xpath_str)) # HTML中的port是随机数,真正的端口编码在class后面的字母中。 # 比如这个: # <span class="port CFACE">9054</span> # CFACE解码后对应的是3128。 port = 0 for _ in each_proxy.xpath(".//span[contains(@class, 'port')]" "/attribute::class")[0]. \ replace("port ", ""): port *= 10 port += (ord(_) - ord('A')) port /= 8 yield '{}:{}'.format(ip_addr, int(port)) except Exception as e: pass @staticmethod def kuaidaili(): """ 快代理 https://www.kuaidaili.com """ url_list = [ 'https://www.kuaidaili.com/free/inha/', 'https://www.kuaidaili.com/free/intr/' ] for url in url_list: tree = getHtmlTree(url) proxy_list = tree.xpath('.//table//tr') sleep(1) # 必须sleep 不然第二条请求不到数据 for tr in proxy_list[1:]: yield ':'.join(tr.xpath('./td/text()')[0:2]) @staticmethod def coderbusy(): """ 码农代理 https://proxy.coderbusy.com/ :return: """ urls = ['https://proxy.coderbusy.com/'] for url in urls: tree = getHtmlTree(url) proxy_list = tree.xpath('.//table//tr') for tr in proxy_list[1:]: tr_data=tr.xpath('./td/text()') ip_port=tr_data[0:2] location=tr_data[-1].strip() if location in ['腾讯云','阿里云','移动','联通','电信', '世纪互联']: yield ':'.join(ip_port) # yield ':'.join(tr.xpath('./td/text()')[0:2]) @staticmethod def ip3366(): """ 云代理 http://www.ip3366.net/free/ :return: """ urls = ['http://www.ip3366.net/free/?stype=1', "http://www.ip3366.net/free/?stype=2" ] request = WebRequest() for url in urls: r = request.get(url, timeout=10) proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\s\S]*?<td>(\d+)</td>', r.text) for proxy in proxies: yield ":".join(proxy) @staticmethod def jiangxianli(page_count=2): """ http://ip.jiangxianli.com/?page= 免费代理库 :return: """ for i in range(1, page_count + 1): url = 'http://ip.jiangxianli.com/?page={}'.format(i) html_tree = getHtmlTree(url) tr_list = html_tree.xpath("/html/body/div[1]/div/div[1]/div[2]/table/tbody/tr") if len(tr_list) == 0: continue for tr in tr_list: yield tr.xpath("./td[2]/text()")[0] + ":" + tr.xpath("./td[3]/text()")[0] @staticmethod def data5u(): ''' 无忧代理,免费10个 :return: ''' url_list = [ 'http://www.data5u.com/', ] for url in url_list: html_tree = getHtmlTree(url) ul_list = html_tree.xpath('//ul[@class="l2"]') for ul in ul_list: try: yield ':'.join(ul.xpath('.//li/text()')[0:2]) except Exception as e: print(e) @staticmethod def xicidaili(page_count=1): url_list = [ 'http://www.xicidaili.com/nn/', # 高匿 ] for each_url in url_list: for i in range(1, page_count + 1): page_url = each_url + str(i) tree = getHtmlTree(page_url) proxy_list = tree.xpath('.//table[@id="ip_list"]//tr[position()>1]') for proxy in proxy_list: try: yield ':'.join(proxy.xpath('./td/text()')[0:2]) except Exception as e: pass # @staticmethod # def proxylistplus(): # urls = ['https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-1'] # request = WebRequest() # for url in urls: # r = request.get(url) # proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\s\S]*?<td>(\d+)</td>', r.text) # for proxy in proxies: # yield ':'.join(proxy) # @staticmethod # def iphai(): # """ # IP海 http://www.iphai.com/free/ng # :return: # """ # urls = [ # 'http://www.iphai.com/free/ng', # 'http://www.iphai.com/free/np', # 'http://www.iphai.com/free/wg', # 'http://www.iphai.com/free/wp' # ] # request = WebRequest() # for url in urls: # r = request.get(url, timeout=10) # proxies = re.findall(r'<td>\s*?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*?</td>[\s\S]*?<td>\s*?(\d+)\s*?</td>', # r.text) # for proxy in proxies: # yield ":".join(proxy) # @staticmethod # def ip181(days=1): # url = 'http://www.ip181.com/' # html_tree = getHtmlTree(url) # try: # tr_list = html_tree.xpath('//tr')[1:] # for tr in tr_list: # yield ':'.join(tr.xpath('./td/text()')[0:2]) # except Exception as e: # pass # @staticmethod # def mimiip(): # url_gngao = ['http://www.mimiip.com/gngao/%s' % n for n in range(1, 10)] # 国内高匿 # url_gnpu = ['http://www.mimiip.com/gnpu/%s' % n for n in range(1, 10)] # 国内普匿 # url_gntou = ['http://www.mimiip.com/gntou/%s' % n for n in range(1, 10)] # 国内透明 # url_list = url_gngao + url_gnpu + url_gntou # # request = WebRequest() # for url in url_list: # r = request.get(url) # proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\w\W].*<td>(\d+)</td>', r.text) # for proxy in proxies: # yield ':'.join(proxy) # @staticmethod # def xundaili(): # ''' # 讯代理 # :return: # ''' # url = 'http://www.xdaili.cn/ipagent/freeip/getFreeIps?page=1&rows=10' # request = WebRequest() # try: # res = request.get(url).json() # for row in res['RESULT']['rows']: # yield '{}:{}'.format(row['ip'], row['port']) # except Exception as e: # pass # @staticmethod # def cnproxy(): # urls = ['http://cn-proxy.com/', 'http://cn-proxy.com/archives/218'] # request = WebRequest() # for url in urls: # r = request.get(url) # proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\w\W]<td>(\d+)</td>', r.text) # for proxy in proxies: # yield ':'.join(proxy) # @staticmethod # def proxylist(): # urls = ['https://proxy-list.org/english/index.php?p=%s' % n for n in range(1, 10)] # request = WebRequest() # import base64 # for url in urls: # r = request.get(url) # proxies = re.findall(r"Proxy\('(.*?)'\)", r.text) # for proxy in proxies: # yield base64.b64decode(proxy).decode() def checkAllProxy(): """ 检查getFreeProxy所有代理获取函数运行情况 Returns: None """ import inspect member_list = inspect.getmembers(FetchFreeProxy, predicate=inspect.isfunction) proxy_count_dict = dict() for func_name, func in member_list: log.debug(u"开始运行代理: {}".format(func_name)) try: proxy_list = [_ for _ in func() if verifyProxyFormat(_)] proxy_count_dict[func_name] = len(proxy_list) except Exception as e: log.error(u"代理获取函数 {} 运行出错!".format(func_name)) log.error(str(e)) log.info(u"所有函数运行完毕 " + "***" * 5) for func_name, func in member_list: log.debug(u"函数: {n}, 获取到代理数: {c}".format(n=func_name, c=proxy_count_dict.get(func_name, 0))) def checkSingleProxy(func): """ 检查指定的FetchFreeProxy某个function运行情况 Args: func: FetchFreeProxy中某个可调用方法 Returns: None """ func_name = getattr(func, '__name__', "None") log.info("start running func: {}".format(func_name)) count = 0 for proxy in func(): if verifyProxyFormat(proxy): log.debug("{} fetch proxy: {}".format(func_name, proxy)) count += 1 log.debug("{n} completed, fetch proxy number: {c}".format(n=func_name, c=count)) if __name__ == '__main__': # proxylistplus(FetchFreeProxy.proxylistplus) print(checkSingleProxy(FetchFreeProxy.coderbusy))
36.94302
134
0.488162
1,477
12,967
4.199729
0.215978
0.008061
0.011607
0.012252
0.34048
0.270031
0.226342
0.212317
0.168467
0.154603
0
0.027294
0.336007
12,967
350
135
37.048571
0.693148
0.390376
0
0.297468
0
0.037975
0.19589
0.039632
0
0
0
0
0
1
0.063291
false
0.018987
0.056962
0
0.132911
0.025316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8ea5e29ef8c1696d1f4448a1275564f4cc6083ea
786
py
Python
tools/leetcode.023.Merge k Sorted Lists/leetcode.023.Merge k Sorted Lists.submission5.py
tedye/leetcode
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
[ "MIT" ]
4
2015-10-10T00:30:55.000Z
2020-07-27T19:45:54.000Z
tools/leetcode.023.Merge k Sorted Lists/leetcode.023.Merge k Sorted Lists.submission5.py
tedye/leetcode
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
[ "MIT" ]
null
null
null
tools/leetcode.023.Merge k Sorted Lists/leetcode.023.Merge k Sorted Lists.submission5.py
tedye/leetcode
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
[ "MIT" ]
null
null
null
# Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution: # @param {ListNode[]} lists # @return {ListNode} def mergeKLists(self, lists): lists = [i for i in lists if i] if not lists: return None if len(lists) == 1: return lists[0] dummy = ListNode(0) d = dummy lists.sort(key=lambda x: x.val) while lists: temp = lists[0] d.next = temp temp = temp.next d = d.next d.next = None if temp: lists[0] = temp lists.sort(key=lambda x: x.val) else: lists = lists[1:] return dummy.next
786
786
0.479644
96
786
3.885417
0.354167
0.048257
0.064343
0.096515
0.123324
0.123324
0.123324
0
0
0
0
0.013129
0.418575
786
1
786
786
0.803063
0.978372
0
0.1
0
0
0
0
0
0
0
0
0
1
0.05
false
0
0
0
0.15
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
8ea66006c86aaaba9532a364fe87531b05105008
1,384
py
Python
Mundo 3/File 105.py
PedroHenriqueSimoes/Exercicios-Python
702a819d508dd7878b88fb676559d899237ac761
[ "MIT" ]
1
2020-04-30T21:32:01.000Z
2020-04-30T21:32:01.000Z
Mundo 3/File 105.py
PedroHenriqueSimoes/Exercicios-Python
702a819d508dd7878b88fb676559d899237ac761
[ "MIT" ]
1
2021-10-05T02:00:04.000Z
2021-10-05T02:00:04.000Z
Mundo 3/File 105.py
PedroHenriqueSimoes/Exercicios-Python
702a819d508dd7878b88fb676559d899237ac761
[ "MIT" ]
null
null
null
def notas(*n, show=False): """ -> Função que lê varias notas e retorna um dicionario com dados :param n: Lê varias notas (numero indefinido) :param show: Mostra a situação do aluno (opc) :return: Retorna um dicionario """ dados = dict() dados['total'] = len(n) dados['maior'] = max(n) dados['menor'] = min(n) dados['media'] = sum(n)/dados['total'] if show: if dados['media'] >= 7: dados['situação'] = 'BOA !' elif 7 > dados['media'] > 5: dados['situação'] = 'RAZOAVEL !' elif dados['media'] <= 5: dados['situaçãos'] = 'RUIM !' return dados user = list() t = bool() while True: user.append(float(input('Informe uma nota: '))) resp = ' ' while resp not in 'SsNn': resp = (str(input('Deseja continuar: [S/N] '))).strip()[0] if resp in 'Ss': break if resp in 'Nn': break print('\033[31m:<errozin>: Informe apenas os valores S ou N !\033[m') if resp in 'Nn': break most = ' ' while most not in 'SsNn': most = (str(input('Deseja mostra a situação? [S/N] '))).strip()[0] if most in 'Ss': t = True break elif most in 'Nn': t = False break print('\033[31m:<errozin>: Informe apenas os valores S ou N ! \033[m') tot = (notas(user, show=t)) print(tot)
28.244898
77
0.533237
188
1,384
3.925532
0.414894
0.03252
0.03252
0.04336
0.204607
0.143631
0.143631
0.143631
0.143631
0.143631
0
0.022893
0.305636
1,384
48
78
28.833333
0.745057
0.134393
0
0.225
0
0
0.257485
0
0
0
0
0
0
1
0.025
false
0
0
0
0.05
0.075
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8ea6772e802a782c50f83515c19392b32fbb9402
779
py
Python
Backend/ChatBot/question detection.py
paucutrina/RareHacks_Chatbot
c7ecfef693bf2f477d090629d6eecf7b0bf57872
[ "MIT" ]
null
null
null
Backend/ChatBot/question detection.py
paucutrina/RareHacks_Chatbot
c7ecfef693bf2f477d090629d6eecf7b0bf57872
[ "MIT" ]
null
null
null
Backend/ChatBot/question detection.py
paucutrina/RareHacks_Chatbot
c7ecfef693bf2f477d090629d6eecf7b0bf57872
[ "MIT" ]
null
null
null
from nltk import sent_tokenize, word_tokenize, pos_tag, ne_chunk sentence = 'Usually I go to the hospital when I am afraid. When I sould go there?' sentences_splitted = sent_tokenize(sentence) sentence_words_splitted = [word_tokenize(s) for s in sentences_splitted] question = [ne_chunk(pos_tag(s)) for s in sentences_splitted] labeled_sentence = [] helping_verbs = ['is', 'am', 'are', 'was', 'were', 'be', 'being', 'been', 'has', 'have', 'had', 'do', 'does', 'did', 'will', 'shall', 'should', 'would'] for sentence in sentence_words_splitted: if 'wh' in sentence[0] or '?' in sentence[-1] or sentence[0] in helping_verbs: # First word is where, when, which, who, what... and not helping verbs in the first word labeled_sentence.append(sentence)
51.933333
172
0.69448
117
779
4.461538
0.529915
0.097701
0.08046
0.02682
0.091954
0.091954
0
0
0
0
0
0.00463
0.168164
779
14
173
55.642857
0.800926
0.110398
0
0
0
0
0.197101
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8ea86f5c1066313076da8b4f11d85883b0f7d98c
16,079
py
Python
tp4/src/back-end/translator.py
ha2398/compiladores1-tps
a70de7cbb6a76301258f1e0f88141a57c6a15d5e
[ "MIT" ]
null
null
null
tp4/src/back-end/translator.py
ha2398/compiladores1-tps
a70de7cbb6a76301258f1e0f88141a57c6a15d5e
[ "MIT" ]
null
null
null
tp4/src/back-end/translator.py
ha2398/compiladores1-tps
a70de7cbb6a76301258f1e0f88141a57c6a15d5e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 ''' translator.py: 3 address code -> TAM translator. @author: Hugo Araujo de Sousa [2013007463] @email: hugosousa@dcc.ufmg.br @DCC053 - Compiladores I - UFMG ''' # TODO: Need to handle floating point literals. # TAM does not provide arithmetic routines for floating point!? import argparse as ap from quadruple import Quadruple from math import floor # Global variables. input_file = None output_file = None # Sizes (in 2B words) of the grammar types. TSIZES = {'int': 2, 'float': 4, 'char': 1, 'bool': 1} MAX_SIZE = TSIZES['float'] # Stack top ST = 0 # Code stack top CT = 0 # Address, on the stack, of the variables. addresses = {} # Types of the variables. types = {} # Dictionary which returns the Quadruple by label. labels = {} # Instruction format INSTR = '{}\t{}\t{}\t{}\t; {}\n' # Instruction buffer INSTR_BUFFER = [] ################################################################################ def str2bool(string): ''' Converts a string to bool. @param string: String to be converted. @type string: String. @return: Boolean that represents the string. @rtype: Bool. ''' return string.lower() == 'true' def parse_arguments(): ''' Add command line arguments to the program. @return: Command line arguments. @rtype: argparse.Namespace. ''' parser = ap.ArgumentParser() parser.add_argument('INPUT_FILE', type=str, help='Name of input file') parser.add_argument('OUTPUT_FILE', type=str, help='Name of output file') return parser.parse_args() def add_instr(instr, quad): ''' Print instruction to output file. @param instr: Instruction to print. @type instr: String. @param quad: Quadruple that generated the instruction. @type quad: Quadruple. ''' global CT INSTR_BUFFER.append((instr, quad)) CT += 1 def read_decls(): ''' Read the program's declarations. ''' global ST, CT print('-------------------BEGIN INPUT-------------------') while True: line = input_file.readline() print(line.strip('\n')) if len(line) <= 2: break else: line = line.replace('[', '') line = line.replace(']', '') args = line.split() if len(args) < 3: # Simple variable if args[1] not in addresses: size = TSIZES[args[0]] addresses[args[1]] = ST ST += size types[args[1]] = args[0] else: # Array if args[2] not in addresses: size = TSIZES[args[1]] * int(args[0]) addresses[args[2]] = ST ST += size types[args[2]] = args[1] add_instr(INSTR.format(10, 0, 0, size, 'PUSH ' + str(size)), None) def build_quadruples(): ''' Build quadruples from the isntruction in the source code. @return quads: Quadruples built. @rtype quads: List of Quadruple. ''' global CT, ST quads = [] for line in input_file: # Get all quadruples in source code print(line.strip('\n')) newQuad = None line_args = line.split() L = [] if ':' in line_args[0]: # Collect Quadruple labels L = [int(x[1:]) for x in line_args[0].split(':') if x != ''] del line_args[0] if len(line_args) != 0: # Non empty quadruples if 'if' in line_args[0]: # Conditional op = line_args[0] if len(line_args) == 6: cond = line_args[1:4] else: cond = line_args[1:2] branch = int(line_args[-1][1:]) newQuad = Quadruple(None, cond, None, op, branch) elif 'goto' == line_args[0]: # Unconditional jump branch = int(line_args[1][1:]) newQuad = Quadruple(None, None, None, line_args[0], branch) else: # Operation dst = line_args[0] if dst not in addresses: # Allocate memory for temporaries addresses[dst] = ST ST += MAX_SIZE types[dst] = 'float' add_instr(INSTR.format(10, 0, 0, MAX_SIZE, 'PUSH ' + str(MAX_SIZE)), None) # Get operator and operands if line_args[1] == '[': # Array indexing l-value op = '[]=' op1 = line_args[2] op2 = line_args[5] newQuad = Quadruple(dst, op1, op2, op) else: if len(line_args) == 3: # Simple copy assignments op1 = line_args[2] newQuad = Quadruple(dst, op1, None, None) elif len(line_args) == 5: # Arithmetic op = line_args[3] op1 = line_args[2] op2 = line_args[4] newQuad = Quadruple(dst, op1, op2, op) elif len(line_args) == 6: # Array indexing r-value op = '=[]' op1 = line_args[2] op2 = line_args[4] newQuad = Quadruple(dst, op1, op2, op) else: # Unary op = line_args[2] op2 = line_args[3] newQuad = Quadruple(dst, None, op2, op) if newQuad: quads.append(newQuad) for label in L: # Each label points to their proper quadruple labels[label] = newQuad print('--------------------END INPUT--------------------') return quads def translate(quads): ''' Translate quadruples to TAM code. Types of quadruples: 1. Conditional jump. 2. Unconditional jump. 3. Array indexing l-value assignment. 4. Array indexing r-value assignment. 5. Simple variable copy assignments. 6. Arithmetic assignment. 7. Unary assignment. @param quads: Quadruples to translate. @type quads: List of Quadruple. ''' for quad in quads: quad.address = CT quad_type = quad.type if quad_type == 1: # Conditional jump. # Push the condition bool value to stack. cond = quad.op1 if len(cond) == 3: # Relational operation if cond[0] in addresses: # Operand is variable addr_op1 = addresses[cond[0]] op1_size = TSIZES[types[cond[0]]] add_instr(INSTR.format(1, 4, 0, addr_op1, 'LOADA ' + str(addr_op1) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op1_size, 0, 'LOADI(' + str(op1_size) + ')'), quad) else: # Operand is not variable if cond[0] == 'true' or cond[0] == 'false': literal = int(str2bool(cond[0])) else: literal = int(floor(float(cond[0]))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) if cond[2] in addresses: # Operand is variable addr_op1 = addresses[cond[2]] op1_size = TSIZES[types[cond[2]]] add_instr(INSTR.format(1, 4, 0, addr_op1, 'LOADA ' + str(addr_op1) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op1_size, 0, 'LOADI(' + str(op1_size) + ')'), quad) else: # Operand is not variable if cond[2] == 'true' or cond[2] == 'false': literal = int(str2bool(cond[2])) else: literal = int(floor(float(cond[2]))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) # Perform comparison relop = cond[1] if relop == '<': mnemo = 'lt' d = 13 add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) elif relop == '<=': mnemo = 'le' d = 14 add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) elif relop == '>=': mnemo = 'ge' d = 15 add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) elif relop == '>': mnemo = 'gt' d = 16 add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) else: # Push operators size. op_size = TSIZES[types[cond[0]]] add_instr(INSTR.format(3, 0, 0, op_size, 'LOADL ' + str(op_size)), quad) if relop == '==': mnemo = 'eq' d = 17 add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) else: # != mnemo = 'ne' d = 18 add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) else: # Simple boolean if cond[0] in addresses: # Operand is variable addr_op1 = addresses[cond[0]] op1_size = TSIZES[types[cond[0]]] add_instr(INSTR.format(1, 4, 0, addr_op1, 'LOADA ' + str(addr_op1) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op1_size, 0, 'LOADI(' + str(op1_size) + ')'), quad) else: # Operand is not variable if cond[0] == 'true' or cond[0] == 'false': literal = int(str2bool(cond[0])) else: literal = int(floor(float(cond[0]))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) # Jump to label according to result n = 1 if quad.operator == 'if' else 0 add_instr(INSTR.format(14, 0, n, '{}', 'JUMPIF(' + str(n) + ') {}[CB]'), quad) elif quad_type == 2: # Unconditional jump. add_instr(INSTR.format(12, 0, 0, '{}', 'JUMP {}[CB]'), quad) elif quad_type == 3: # Array indexing l-value assignment. if quad.op2 in addresses: # Operand 2 is variable addr_op2 = addresses[quad.op2] op2_size = TSIZES[types[quad.op2]] add_instr(INSTR.format(1, 4, 0, addr_op2, 'LOADA ' + str(addr_op2) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op2_size, 0, 'LOADI(' + str(op2_size) + ')'), quad) else: # Operand 2 is literal if quad.op2 == 'true' or quad.op2 == 'false': literal = int(str2bool(quad.op2)) else: literal = int(floor(float(quad.op2))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) # Get array element address with offset. # 1. Push offset to stack if quad.op1 in addresses: # Operand is variable addr_op1 = addresses[quad.op1] op1_size = TSIZES[types[quad.op1]] add_instr(INSTR.format(1, 4, 0, addr_op1, 'LOADA ' + str(addr_op1) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op1_size, 0, 'LOADI(' + str(op1_size) + ')'), quad) else: # Operand is not variable if quad.op1 == 'true' or quad.op1 == 'false': literal = int(str2bool(quad.op1)) else: literal = int(floor(float(quad.op1))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) # 2. Push base address to stack addr_base = addresses[quad.dst] add_instr(INSTR.format(1, 4, 0, addr_base, 'LOADA ' + str(addr_base) + '[SB]'), quad) # 3. Add them up. mnemo = 'add' d = 8 add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) # 4. Store r-value in that address. dst_size = TSIZES[types[quad.dst]] add_instr(INSTR.format(5, 0, dst_size, 0, 'STOREI(' + str(dst_size) + ')'), quad) elif quad_type == 4: # Array indexing r-value assignment. # Get array element address with offset. # 1. Push offset to stack if quad.op2 in addresses: # Operand is variable addr_op2 = addresses[quad.op2] op2_size = TSIZES[types[quad.op2]] add_instr(INSTR.format(1, 4, 0, addr_op2, 'LOADA ' + str(addr_op2) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op1_size, 0, 'LOADI(' + str(op2_size) + ')'), quad) else: # Operand is not variable if quad.op2 == 'true' or quad.op2 == 'false': literal = int(str2bool(quad.op2)) else: literal = int(floor(float(quad.op2))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) # 2. Push base address to stack addr_base = addresses[quad.op1] add_instr(INSTR.format(1, 4, 0, addr_base, 'LOADA ' + str(addr_base) + '[SB]'), quad) # 3. Add them up. mnemo = 'add' d = 8 add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) # 4. Get r-value op_size = TSIZES[types[quad.op1]] add_instr(INSTR.format(2, 0, op_size, 0, 'LOADI(' + str(op_size) + ')'), quad) # Push destination address onto stack and store r-value there. addr_dst = addresses[quad.dst] dst_size = TSIZES[types[quad.dst]] add_instr(INSTR.format(1, 4, 0, addr_dst, 'LOADA ' + str(addr_dst) + '[SB]'), quad) add_instr(INSTR.format(5, 0, dst_size, 0, 'STOREI(' + str(dst_size) + ')'), quad) elif quad_type == 5: # Simple variable copy assignments. if quad.op1 in addresses: # Operand is variable addr_op1 = addresses[quad.op1] op1_size = TSIZES[types[quad.op1]] add_instr(INSTR.format(1, 4, 0, addr_op1, 'LOADA ' + str(addr_op1) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op1_size, 0, 'LOADI(' + str(op1_size) + ')'), quad) else: # Operand is not variable if quad.op1 == 'true' or quad.op1 == 'false': literal = int(str2bool(quad.op1)) else: literal = int(floor(float(quad.op1))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) addr_dst = addresses[quad.dst] dst_size = TSIZES[types[quad.dst]] add_instr(INSTR.format(1, 4, 0, addr_dst, 'LOADA ' + str(addr_dst) + '[SB]'), quad) add_instr(INSTR.format(5, 0, dst_size, 0, 'STOREI(' + str(dst_size) + ')'), quad) elif quad_type == 6: # Arithmetic assignment. addr_dst = addresses[quad.dst] dst_size = TSIZES[types[quad.dst]] if quad.op1 in addresses: # Operand 1 is variable addr_op1 = addresses[quad.op1] op1_size = TSIZES[types[quad.op1]] add_instr(INSTR.format(1, 4, 0, addr_op1, 'LOADA ' + str(addr_op1) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op1_size, 0, 'LOADI(' + str(op1_size) + ')'), quad) else: # Operand 1 is literal if quad.op1 == 'true' or quad.op1 == 'false': literal = int(str2bool(quad.op1)) else: literal = int(floor(float(quad.op1))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) if quad.op2 in addresses: # Operand 2 is variable addr_op2 = addresses[quad.op2] op2_size = TSIZES[types[quad.op2]] add_instr(INSTR.format(1, 4, 0, addr_op2, 'LOADA ' + str(addr_op2) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op2_size, 0, 'LOADI(' + str(op2_size) + ')'), quad) else: # Operand 2 is literal if quad.op2 == 'true' or quad.op2 == 'false': literal = int(str2bool(quad.op2)) else: literal = int(floor(float(quad.op2))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) # Perform operation if quad.operator == '+': mnemo = 'add' d = 8 elif quad.operator == '-': mnemo = 'sub' d = 9 elif quad.operator == '*': mnemo = 'mult' d = 10 else: mnemo = 'div' d = 11 add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) add_instr(INSTR.format(1, 4, 0, addr_dst, 'LOADA ' + str(addr_dst) + '[SB]'), quad) add_instr(INSTR.format(5, 0, dst_size, 0, 'STOREI(' + str(dst_size) + ')'), quad) elif quad_type == 7: # Unary assignment. addr_dst = addresses[quad.dst] dst_size = TSIZES[types[quad.dst]] add_instr(INSTR.format(3, 0, 0, 0, 'LOADL 0'), quad) if quad.op2 in addresses: # Operand 2 is variable addr_op2 = addresses[quad.op2] op2_size = TSIZES[types[quad.op2]] add_instr(INSTR.format(1, 4, 0, addr_op2, 'LOADA ' + str(addr_op2) + '[SB]'), quad) add_instr(INSTR.format(2, 0, op2_size, 0, 'LOADI(' + str(op2_size) + ')'), quad) else: # Operand 2 is literal if quad.op2 == 'true' or quad.op2 == 'false': literal = int(str2bool(quad.op2)) else: literal = int(floor(float(quad.op2))) add_instr(INSTR.format(3, 0, 0, literal, 'LOADL ' + str(literal)), quad) # Perform operation d = 9 mnemo = 'sub' add_instr(INSTR.format(6, 2, 0, d, mnemo), quad) add_instr(INSTR.format(1, 4, 0, addr_dst, 'LOADA ' + str(addr_dst) + '[SB]'), quad) add_instr(INSTR.format(5, 0, dst_size, 0, 'STOREI(' + str(dst_size) + ')'), quad) add_instr(INSTR.format(15, 0, 0, 0, 'HALT'), None) def backpatching(): ''' Perform backpatching to assign labels. ''' for i in range(len(INSTR_BUFFER)): instruction = INSTR_BUFFER[i][0] quadruple = INSTR_BUFFER[i][1] if '{}' in instruction: branch_label = quadruple.branch branch_quadruple = labels[branch_label] if branch_quadruple == None: branch_address = CT else: branch_address = branch_quadruple.address INSTR_BUFFER[i] = \ (instruction.format(branch_address, branch_address), quadruple) def finish(): ''' Finishes translation. ''' input_file.close() for (instr, quad) in INSTR_BUFFER: output_file.write(instr) output_file.close() def main(): global input_file, output_file args = parse_arguments() input_file = open(args.INPUT_FILE, 'r') output_file = open(args.OUTPUT_FILE, 'w') read_decls() quads = build_quadruples() translate(quads) backpatching() finish() ################################################################################ main()
26.57686
80
0.602525
2,320
16,079
4.072414
0.109914
0.050804
0.082557
0.118649
0.600021
0.574619
0.540538
0.52699
0.52699
0.497248
0
0.038027
0.229678
16,079
604
81
26.620861
0.72477
0.193296
0
0.540052
0
0
0.060002
0.007539
0
0
0
0.001656
0
1
0.023256
false
0
0.007752
0
0.03876
0.010336
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
8eab3a16c60da45c7e9e2c9740482835876404d6
2,501
py
Python
CaffeNet/caffenet_settings.py
MasazI/DeepLearning_TensorFlow
6a0865850b32eb4af52bc41984e0cbaa2a19c48a
[ "MIT" ]
17
2015-12-20T14:10:35.000Z
2022-02-28T13:06:33.000Z
CaffeNet/caffenet_settings.py
MasazI/DeepLearning_TensorFlow
6a0865850b32eb4af52bc41984e0cbaa2a19c48a
[ "MIT" ]
1
2019-02-20T12:37:56.000Z
2019-02-20T12:37:56.000Z
CaffeNet/caffenet_settings.py
MasazI/DeepLearning_TensorFlow
6a0865850b32eb4af52bc41984e0cbaa2a19c48a
[ "MIT" ]
8
2015-11-14T04:32:10.000Z
2020-12-26T01:12:18.000Z
# encoding: utf-8 import tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS # train settings flags.DEFINE_integer('batch_size', 40, 'the number of images in a batch.') flags.DEFINE_integer('training_data_type', 1, '0: directly feed, 1: tfrecords') #flags.DEFINE_string('train_tfrecords', 'data/train_caltech_random.tfrecords', 'path to tfrecords file for train.') flags.DEFINE_string('train_tfrecords', 'data/train_ex_norm.tfrecords', 'path to tfrecords file for train.') flags.DEFINE_integer('image_height', 256, 'image height.') flags.DEFINE_integer('image_width', 256, 'image width.') flags.DEFINE_integer('image_depth', 3, 'image depth.') flags.DEFINE_integer('crop_size', 227, 'crop size of image.') flags.DEFINE_float('learning_rate', 1e-2, 'initial learning rate.') flags.DEFINE_float('learning_rate_decay_factor', 0.1, 'learning rate decay factor.') flags.DEFINE_float('num_epochs_per_decay', 350.0, 'epochs after which learning rate decays.') flags.DEFINE_float('moving_average_decay', 0.9999, 'decay to use for the moving averate.') flags.DEFINE_integer('num_examples_per_epoch_for_train', 400, 'the number of examples per epoch train.') flags.DEFINE_integer('num_examples_per_epoch_for_eval', 400, 'the number of examples per eposh eval.') flags.DEFINE_string('tower_name', 'tower', 'multiple GPU prefix.') #flags.DEFINE_integer('num_classes', 10, 'the number of classes.') flags.DEFINE_integer('num_classes', 5, 'the number of classes.') flags.DEFINE_integer('num_threads', 8, 'the number of threads.') flags.DEFINE_boolean('fine_tuning', False, 'fine tuning.') flags.DEFINE_string('trained_model', 'trained_model/caffenet.npy' , 'trained model to use fine tuning.') # output logs settings flags.DEFINE_string('train_dir', 'train', 'directory where to write even logs and checkpoint') flags.DEFINE_integer('max_steps', 100000, 'the number of batches to run.') flags.DEFINE_boolean('log_device_placement', False, 'where to log device placement.') # evaluate settings flags.DEFINE_string('eval_dir', 'eval', 'directory where to write event logs.') flags.DEFINE_string('eval_tfrecords', 'data/train_ex_norm.tfrecords', 'path to tfrecords file for eval') flags.DEFINE_string('checkpoint_dir', 'train', 'directory where to read model checkpoints.') flags.DEFINE_integer('eval_interval_secs', 60*3, 'How to often to run the eval.'), flags.DEFINE_integer('num_examples', 100, 'the number of examples to run.') flags.DEFINE_boolean('run_once', False, 'whether to run eval only once.')
59.547619
115
0.773291
378
2,501
4.902116
0.314815
0.172153
0.135996
0.067998
0.348624
0.233135
0.206152
0.178629
0.093362
0.059363
0
0.022497
0.093563
2,501
41
116
61
0.794883
0.09916
0
0
0
0
0.563224
0.076135
0
0
0
0
0
1
0
false
0
0.033333
0
0.033333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0