id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1724359 | <gh_stars>1-10
from .base import BaseNode, ComputeNode, EventNode
from .params import IntParam, FloatParam, StringParam, ListParam, Param
from .params import INPUT_PLUG, OUTPUT_PLUG, PARAM
class ToStr(StringParam):
def __init__(self, int_param, zeropad_param, name=None, pluggable=None):
super().__init__(name=name, pluggable=pluggable)
self.int_param = int_param
self.zeropad_param = zeropad_param
@property
def value(self):
fmt = '{{:0{}d}}'.format(self.zeropad_param.value)
return fmt.format(self.int_param.value) # str(self.int_param.value)
def __call__(self, *args, **kwargs):
return self.value()
class IntToStr(BaseNode):
type = 'IntToStr'
categories = ['Data']
description = \
"""The **IntToStr node** converts Integer parameter values to Strings.
Parameters:
- *integer*: the input integer
- *string*: the output string representation of the integer
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.color = (150, 150, 150, 255)
input_param = IntParam(name='integer', value=0, pluggable=INPUT_PLUG)
zeropard_param = IntParam(name='zeropad', value=0, pluggable=PARAM)
self.params.append(input_param)
self.params.append(zeropard_param)
self.params.append(
ToStr(int_param=input_param, zeropad_param=zeropard_param, name='string', pluggable=OUTPUT_PLUG))
| StarcoderdataPython |
84614 | <filename>src/message_loop_gtk.py
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glib
import gtk
import sys
import unittest
_hooked = False
_is_main_loop_running = False
_current_main_loop_instance = 0
_unittests_running = False
_active_test_result = None
_active_test = None
_quitting = False
_quit_handlers = []
def init_main_loop():
global _hooked
if not _hooked:
_hooked = True
old_hook = sys.excepthook
def hook(exc, value, tb):
if is_main_loop_running() and _active_test:
if isinstance(value,unittest.TestCase.failureException):
_active_test_result.addFailure(_active_test, (exc, value, tb))
else:
if not str(value).startswith("_noprint"):
print "Untrapped exception! Exiting message loop with exception."
_active_test_result.addError(_active_test, (exc, value, tb))
quit_main_loop()
return
else:
old_hook(exc, value, tb)
return
sys.excepthook = hook
def post_task(cb, *args):
init_main_loop()
main_loop_instance_at_post = _current_main_loop_instance
def on_run():
# timeouts that were enqueued when the mainloop exited should not run
if _current_main_loop_instance == main_loop_instance_at_post:
cb(*args)
glib.timeout_add(0, on_run)
def post_delayed_task(cb, delay, *args):
init_main_loop()
main_loop_instance_at_post = _current_main_loop_instance
def on_run():
# timeouts that were enqueued when the mainloop exited should not run
if _current_main_loop_instance == main_loop_instance_at_post:
cb(*args)
timeout_ms = int(delay * 1000)
glib.timeout_add(timeout_ms, on_run)
def set_unittests_running(running):
global _unittests_running
_unittests_running = running
def set_active_test(test, result):
global _active_test
global _active_test_result
_active_test = test
_active_test_result = result
def is_main_loop_running():
return _is_main_loop_running
def add_quit_handler(cb):
_quit_handlers.insert(0, cb)
def run_main_loop():
global _current_main_loop_instance
global _is_main_loop_running
if _unittests_running and not _active_test:
_current_main_loop_instance += 1 # kill any enqueued tasks
del _quit_handlers[:]
raise Exception("UITestCase must be used for tests that use the message_loop.")
init_main_loop()
assert not _is_main_loop_running
try:
_is_main_loop_running = True
gtk.main()
finally:
_is_main_loop_running = False
_current_main_loop_instance += 1
global _quitting
_quitting = False
def quit_main_loop():
assert is_main_loop_running()
global _quitting
if _quitting:
return
_quitting = True
def do_quit():
global _current_main_loop_instance
_current_main_loop_instance += 1
for cb in _quit_handlers:
cb()
del _quit_handlers[:]
gtk.main_quit()
post_task(do_quit)
| StarcoderdataPython |
1604028 | <reponame>Eric-Arellano/flake8-pantsbuild
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import ast
import sys
import tokenize
from collections import defaultdict
from typing import Iterator, List, NamedTuple, Set
if sys.version_info >= (3, 8):
from importlib.metadata import version
else:
from importlib_metadata import version
PLUGIN_NAME = "flake8_pantsbuild"
PLUGIN_VERSION = version(PLUGIN_NAME)
PB10 = (
"PB10 Instead of {name}.{attr} use self.{attr} or cls.{attr} with instance methods and "
"classmethods, respectively, so that inheritance works correctly."
)
PB11 = (
"PB11 Using a constant on the left-hand side of a logical operator. This means that the "
"left-hand side will always be truthy, so condition will short-circuit and the right-hand side "
"will never be evaluated."
)
PB12 = (
"PB12 Using a constant on the right-hand side of an `and` operator. This means that the "
"right-hand side will always be truthy, which is likely not expected."
)
PB13 = (
"PB13 `open()` calls should be made within a `with` statement (context manager). This is "
"important to ensure that the file handler is properly cleaned up."
)
PB20 = "PB20 Indentation of {} instead of 2."
PB30 = (
"PB30 Using a trailing slash (`\\`) instead of parentheses for line continuation. Refer "
"to https://www.tutorialspoint.com/How-to-wrap-long-lines-in-Python."
)
class Error(NamedTuple):
lineno: int
col_offset: int
msg: str
class Visitor(ast.NodeVisitor):
"""Various lints used by the Pants project and its users."""
def __init__(self) -> None:
self.errors: List[Error] = []
self.with_call_exprs: Set = set()
def collect_call_exprs_from_with_node(self, with_node: ast.With) -> None:
"""Save any functions within a `with` statement to `self.with_call_exprs`.
This is needed for checking PB13.
"""
with_context_exprs = {
node.context_expr for node in with_node.items if isinstance(node.context_expr, ast.Call)
}
self.with_call_exprs.update(with_context_exprs)
def check_for_pb10(self, class_def_node: ast.ClassDef) -> None:
for node in ast.walk(class_def_node):
if not isinstance(node, ast.Attribute):
continue
attribute_value = node.value
if isinstance(attribute_value, ast.Name) and attribute_value.id == class_def_node.name:
self.errors.append(
Error(
attribute_value.lineno,
attribute_value.col_offset,
PB10.format(name=class_def_node.name, attr=node.attr),
)
)
def check_for_pb11_and_pb12(self, bool_op_node: ast.BoolOp) -> None:
def is_constant(expr):
return isinstance(expr, (ast.Num, ast.Str)) or isinstance(expr, ast.NameConstant)
if not isinstance(bool_op_node.op, (ast.And, ast.Or)):
return
leftmost = bool_op_node.values[0]
rightmost = bool_op_node.values[-1]
if is_constant(leftmost):
self.errors.append(Error(leftmost.lineno, leftmost.col_offset, PB11))
if isinstance(bool_op_node.op, ast.And) and is_constant(rightmost):
self.errors.append(Error(rightmost.lineno, rightmost.col_offset, PB12))
def check_for_pb13(self, call_node: ast.Call) -> None:
if (
isinstance(call_node.func, ast.Name)
and call_node.func.id == "open"
and call_node not in self.with_call_exprs
):
self.errors.append(Error(call_node.lineno, call_node.col_offset, PB13))
def visit_BoolOp(self, bool_op_node: ast.BoolOp) -> None:
self.check_for_pb11_and_pb12(bool_op_node)
self.generic_visit(bool_op_node)
def visit_Call(self, call_node: ast.Call) -> None:
self.check_for_pb13(call_node)
self.generic_visit(call_node)
def visit_ClassDef(self, class_def_node: ast.ClassDef) -> None:
self.check_for_pb10(class_def_node)
self.generic_visit(class_def_node)
def visit_With(self, with_node: ast.With) -> None:
self.collect_call_exprs_from_with_node(with_node)
self.generic_visit(with_node)
class Plugin:
name = PLUGIN_NAME
version = PLUGIN_VERSION
def __init__(self, tree) -> None:
self._tree = tree
def run(self) -> Iterator:
visitor = Visitor()
visitor.visit(self._tree)
for line, col, msg in visitor.errors:
yield line, col, msg, type(self)
class OptionalPlugin:
"""A plugin that's disabled by default."""
name = PLUGIN_NAME
version = PLUGIN_VERSION
off_by_default = True
codes: List[str] = []
@classmethod
def is_enabled(cls, options) -> bool:
return any(code in options.enable_extensions for code in cls.codes)
class IndentationPlugin(OptionalPlugin):
"""Lint for 2-space indentation.
This is disabled by default because it conflicts with Flake8's default settings of 4-space
indentation.
"""
codes = ["PB2", "PB20"]
def __init__(self, tree, file_tokens, options) -> None:
self._tokens = file_tokens
self._options = options
self.errors: List[Error] = []
def run(self) -> Iterator:
if not self.is_enabled(self._options):
return
self.check_for_pb20(self._tokens)
for line, col, msg in self.errors:
yield line, col, msg, type(self)
def check_for_pb20(self, tokens) -> None:
indents: List[str] = []
for token in tokens:
token_type, token_text, token_start = token[0:3]
if token_type is tokenize.DEDENT:
indents.pop()
if token_type is tokenize.INDENT:
last_indent = len(indents[-1]) if indents else 0
current_indent = len(token_text)
indents.append(token_text)
if current_indent - last_indent != 2:
lineno, col_offset = token_start
self.errors.append(
Error(lineno, col_offset, PB20.format(current_indent - last_indent))
)
class TrailingSlashesPlugin(OptionalPlugin):
"""Check for trailing slashes.
Flake8 does not automatically check for trailing slashes, but this is a subjective style
preference so should be disabled by default.
"""
codes = ["PB3", "PB30"]
def __init__(self, tree, lines, file_tokens, options) -> None:
self._lines = lines
self._tokens = file_tokens
self._options = options
self.errors: List[Error] = []
def run(self) -> Iterator:
if not self.is_enabled(self._options):
return
self.check_for_pb30(self._lines, self._tokens)
for line, col, msg in self.errors:
yield line, col, msg, type(self)
def check_for_pb30(self, lines, tokens) -> None:
lines = [line.rstrip("\n") for line in lines]
# First generate a set of ranges where we accept trailing slashes, specifically within
# comments and strings
exception_map = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type not in (tokenize.COMMENT, tokenize.STRING):
continue
token_start_line, token_start_col_offset = token_start
token_end_line, token_end_col_offset = token_end
if token_start_line == token_end_line:
exception_map[token_start_line].append(
(token_start_col_offset, token_end_col_offset)
)
else:
exception_map[token_start_line].append((token_start_col_offset, sys.maxsize))
for lineno in range(token_start_line + 1, token_end_line):
exception_map[lineno].append((0, sys.maxsize))
exception_map[token_end_line].append((0, token_end_col_offset))
def has_exception(lineno: int, col_offset: int) -> bool:
for start, end in exception_map.get(lineno, []):
if start <= col_offset <= end:
return True
return False
for line_number, line in enumerate(lines):
# Tokens are 1-indexed, rather than 0-indexed.
line_number += 1
stripped_line = line.rstrip()
col_offset = len(stripped_line) - 1
if stripped_line.endswith("\\") and not has_exception(line_number, col_offset):
self.errors.append(Error(line_number, col_offset, PB30))
| StarcoderdataPython |
81503 | <filename>mars/serialization/ray.py<gh_stars>1-10
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Tuple
from ..utils import lazy_import
from .core import Serializer, buffered, PickleSerializer
from .exception import ExceptionSerializer
ray = lazy_import("ray")
class RaySerializer(Serializer):
"""Return raw object to let ray do serialization."""
@buffered
def serial(self, obj: Any, context: Dict):
return (obj,), [], True
def deserial(self, serialized: Tuple, context: Dict, subs: List[Any]):
assert not subs
return serialized[0]
def register_ray_serializers():
PickleSerializer.unregister(object)
ExceptionSerializer.unregister(Exception)
RaySerializer.register(object)
RaySerializer.register(ray.ObjectRef)
RaySerializer.register(ray.actor.ActorHandle)
def unregister_ray_serializers():
RaySerializer.unregister(ray.actor.ActorHandle)
RaySerializer.unregister(ray.ObjectRef)
RaySerializer.unregister(object)
PickleSerializer.register(object)
ExceptionSerializer.register(Exception)
| StarcoderdataPython |
1798628 | <reponame>cls1991/leetcode
# coding: utf8
"""
题目链接: https://leetcode.com/problems/house-robber-ii/description.
题目描述:
Note: This is an extension of House Robber.
After robbing those houses on that street, the thief has found himself a new place for his thievery so that he will
not get too much attention. This time, all houses at this place are arranged in a circle. That means the first
house is the neighbor of the last one. Meanwhile, the security system for these houses remain the same as for those
in the previous street.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount
of money you can rob tonight without alerting the police.
Credits:
Special thanks to @Freezen for adding this problem and creating all test cases.
"""
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
# 选/不选第一家, 取较大值
return max(self.dynamic_rob(nums, 0), self.dynamic_rob(nums, 1))
def dynamic_rob(self, nums, choice):
if len(nums) <= choice:
return 0
elif len(nums) <= 1 + choice:
return nums[choice]
dp1 = nums[choice]
dp2 = max(nums[choice], nums[1 + choice])
for i in range(2 + choice, len(nums) - 1 + choice):
dp1, dp2 = dp2, max(dp2, dp1 + nums[i])
return dp2
| StarcoderdataPython |
4830091 | from . import db
from contextlib import contextmanager
'''
csv_address
call_type
crm_uuid
product_code
agent
caller_id_name
caller_id_number
destination_number
old_destination_number
dialplan
context
start_stamp
answer_stamp
end_stamp
bleg_start_stamp
bleg_answer_stamp
bleg_end_stamp
duration
billsec
bleg_duration
bleg_billsec
hangup_cause
uuid" column="aleg_uuid
bleg_uuid
accountcode
read_codec
write_codec
a_answer_bool
b_answer_bool
hangup_leg
record_url
record_path
'''
class Cdr(db.Model):
__tablename__='cdr'
id =db.Column(db.Integer,primary_key=True)
csv_address =db.Column(db.String(255))
call_type =db.Column(db.String(255))
crm_uuid =db.Column(db.String(255))
product_code =db.Column(db.String(255))
agent =db.Column(db.String(255))
caller_id_name =db.Column(db.String(255))
caller_id_number =db.Column(db.String(255))
destination_number =db.Column(db.String(255))
old_destination_number=db.Column(db.String(255))
dialplan =db.Column(db.String(255))
context =db.Column(db.String(255))
aleg_start_stamp =db.Column(db.String(255))
aleg_answer_stamp =db.Column(db.String(255))
aleg_end_stamp =db.Column(db.String(255))
bleg_start_stamp =db.Column(db.String(255))
bleg_answer_stamp =db.Column(db.String(255))
bleg_end_stamp =db.Column(db.String(255))
aleg_duration =db.Column(db.Integer)
aleg_billsec =db.Column(db.Integer)
bleg_duration =db.Column(db.Integer)
bleg_billsec =db.Column(db.Integer)
hangup_cause =db.Column(db.String(255))
aleg_uuid =db.Column(db.String(255))
bleg_uuid =db.Column(db.String(255))
accountcode =db.Column(db.String(255))
read_codec =db.Column(db.String(255))
write_codec =db.Column(db.String(255))
a_answer_bool =db.Column(db.String(255))
b_answer_bool =db.Column(db.String(255))
hangup_leg =db.Column(db.String(255))
record_url =db.Column(db.String(255))
record_path =db.Column(db.String(255))
@contextmanager
def auto_commit(self):
try:
yield
self.session.commit() # 事务
except Exception as e:
self.session.rollback() # 回滚
raise e
def to_json(self):
d={}
for c in self.__class__.__table__.columns:
v = getattr(self, c.name)
if c.name=="old_destination_number":
pass
else:
d[c.name] = v
return d | StarcoderdataPython |
1741902 | <reponame>AneeshUkidve/blue_pigs<filename>_morse/morse_code.py<gh_stars>0
import winsound
import time
import letters
sentence=input("\nEnter something to write: ")
def bleat(sty):
for i in sty:
if i==".":
winsound.Beep(700,200)
elif i=="-":
winsound.Beep(700,600)
def leti(sad):
for i in letters.letters:
if i[0] == sad:
print(i[1], end="|")
bleat(i[1])
print("\nMORSE ",end="")
for index in sentence:
if index == " ":
time.sleep(1.2)
print(" ", end="")
else:
time.sleep(0.6)
dar=index.lower()
leti(dar)
print()
input("\nPress Enter To Exit")
| StarcoderdataPython |
108090 | <gh_stars>1000+
# -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"poincare_ball_distance",
"poincare_ball_exp",
"poincare_ball_mobius_add",
]
import tensorflow as tf
import numpy as np
# helper functions to manage numerical issues, inspired by https://github.com/dalab/hyperbolic_nn
PROJECTION_EPS = 1e-5
TANH_LIMIT = 15.0
ATANH_LIMIT = tf.math.nextafter(1, 0)
def _project(c, x):
"""
Ensure ``x`` lies on the Poincaré ball with curvature ``-c``, in the presence of small numerical
errors.
"""
max_norm = tf.math.rsqrt(c) * (1 - PROJECTION_EPS)
return tf.clip_by_norm(x, clip_norm=max_norm, axes=-1)
def _tanh(x):
return tf.tanh(tf.clip_by_value(x, -TANH_LIMIT, TANH_LIMIT))
def _atanh(x):
return tf.atanh(tf.clip_by_value(x, -ATANH_LIMIT, ATANH_LIMIT))
def poincare_ball_mobius_add(c, x, y):
r"""
Möbius addition of ``x`` and ``y``, on the Poincaré ball with curvature ``-c``: :math:`\mathbf{x} \oplus^c \mathbf{y}`.
See Section 2 of [1] for more details.
[1] <NAME>, <NAME>, and <NAME>, “Hyperbolic Neural Networks,” `arXiv:1805.09112 <http://arxiv.org/abs/1805.09112>`_, Jun. 2018.
Args:
c (tensorflow Tensor-like): the curvature of the hyperbolic space(s). Must be able to be
broadcast to ``x`` and ``y``.
x (tensorflow Tensor-like): a tensor containing vectors in hyperbolic space, where each
vector is an element of the last axis (for example, if ``x`` has shape ``(2, 3, 4)``, it
represents ``2 * 3 = 6`` hyperbolic vectors, each of length ``4``). Must be able to be
broadcast to ``y``.
y (tensorflow Tensor-like): a tensor containing vectors in hyperbolic space, where each
vector is an element of the last axis similar to ``x``. Must be able to be broadcast to
``x``.
Returns:
A TensorFlow Tensor containing the Möbius addition of each of the vectors (last axis) in
``x`` and ``y``, using the corresponding curvature from ``c``. This tensor has the same
shape as the Euclidean equivalent ``x + y``.
"""
x_norm2 = tf.reduce_sum(x * x, axis=-1, keepdims=True)
y_norm2 = tf.reduce_sum(y * y, axis=-1, keepdims=True)
x_dot_y = tf.reduce_sum(x * y, axis=-1, keepdims=True)
inner = 1 + 2 * c * x_dot_y
numer = (inner + c * y_norm2) * x + (1 - c * x_norm2) * y
denom = inner + c * c * x_norm2 * y_norm2
return _project(c, numer / denom)
def poincare_ball_exp(c, x, v):
r"""
The exponential map of ``v`` at ``x`` on the Poincaré ball with curvature ``-c``:
:math:`\exp_{\mathbf{x}}^c(\mathbf{v})`.
See Section 2 of [1] for more details.
[1] <NAME>, <NAME>, and <NAME>, “Hyperbolic Neural Networks,” `arXiv:1805.09112 <http://arxiv.org/abs/1805.09112>`_, Jun. 2018.
Args:
c (tensorflow Tensor-like): the curvature of the hyperbolic space(s). Must be able to be
broadcast to ``x`` and ``v``.
x (tensorflow Tensor-like, optional): a tensor containing vectors in hyperbolic space
representing the base points for the exponential map, where each vector is an element of
the last axis (for example, if ``x`` has shape ``(2, 3, 4)``, it represents ``2 * 3 =
6`` hyperbolic vectors, each of length ``4``). Must be able to be broadcast to ``v``. An
explicit ``x = None`` is equivalent to ``x`` being all zeros, but uses a more efficient
form of :math:`\exp_{\mathbf{0}}^c(\mathbf{v})`.
v (tensorflow Tensor-like): a tensor containing vectors in Euclidean space representing the
tangent vectors for the exponential map, where each vector is an element of the last
axis similar to ``x``. Must be able to be broadcast to ``x``.
"""
v_norm2 = tf.reduce_sum(v * v, axis=-1, keepdims=True)
c_v_norm = tf.sqrt(c * v_norm2)
if x is None:
coeff = _tanh(c_v_norm) / c_v_norm
return _project(c, coeff * v)
x_norm2 = tf.reduce_sum(x * x, axis=-1, keepdims=True)
inner = c_v_norm / (1 - c * x_norm2)
coeff = _tanh(inner) / c_v_norm
return poincare_ball_mobius_add(c, x, coeff * v)
def poincare_ball_distance(c, x, y):
r"""
Distance between ``x`` and ``y``, on the Poincaré ball with curvature ``-c``: :math:`d_c(\mathbf{x}, \mathbf{y})`.
See Section 2 of [1] for more details.
[1] <NAME>, <NAME>, and <NAME>, “Hyperbolic Neural Networks,” `arXiv:1805.09112 <http://arxiv.org/abs/1805.09112>`_, Jun. 2018.
Args:
c (tensorflow Tensor-like): the curvature of the hyperbolic space(s). Must be able to be
broadcast to ``x`` and ``y``.
x (tensorflow Tensor-like): a tensor containing vectors in hyperbolic space, where each
vector is an element of the last axis (for example, if ``x`` has shape ``(2, 3, 4)``, it
represents ``2 * 3 = 6`` hyperbolic vectors, each of length ``4``). Must be able to be
broadcast to ``y``.
y (tensorflow Tensor-like): a tensor containing vectors in hyperbolic space, where each
vector is an element of the last axis similar to ``x``. Must be able to be broadcast to
``x``.
Returns:
A TensorFlow Tensor containing the hyperbolic distance between each of the vectors (last
axis) in ``x`` and ``y``, using the corresponding curvature from ``c``. This tensor has the
same shape as the Euclidean equivalent ``tf.norm(x - y)``.
"""
sqrt_c = tf.sqrt(c)
return (2 / sqrt_c) * _atanh(
sqrt_c * tf.norm(poincare_ball_mobius_add(c, -x, y), axis=-1)
)
| StarcoderdataPython |
3367538 | import scipy.stats as st
import numpy as np
import matplotlib.pyplot as plt
def normal_qq(data):
"""Get the Q-Q for the normal distribution.
Returns the theoretical values and the order statistics to be plotted against
each other.
For a normal distribution, we expect Phi(x_(k)) (the cdf of the kth order
statistic) to be approximately k / (n+1).
Hence, the theoretical z-score should be Phi^-1( k/(n+1) ). That is, the inverse
cdf of k/(n+1). To convert the theoretical z-score to to to theoretical (actual)
x-score, we multiply by the population standard deviation and add the population
mean.
The data argument must be a numpy array.
"""
# Get the number of data points
n = data.size
# Get the k values (for the order statistics) from 1 through n
k = np.arange(1, n + 1)
# Get the population standard deviation
sigma = data.std()
# Get the population mean
mu = data.mean()
# Calculate the theoretical data values
theor = sigma * st.norm.ppf(k / (n + 1)) + mu
# Return the theoretical values, and the order statistics
return theor, np.sort(data)
def f_test(numer, denom):
"""Calculate the F test and the corresponding p-value for a
numerator and denominator.
The numerator and denominator arrays can be numpy arrays or lists."""
numer = np.array(numer)
denom = np.array(denom)
# Calculate F test statistic
f = np.var(numer, ddof=1) / np.var(denom, ddof=1)
# Define the degrees of freedom numerator
dfn = numer.size - 1
# Define the degrees of freedom denominator
dfd = denom.size - 1
# Get the p-value of the F test statistic
p = 1 - st.f.cdf(f, dfn, dfd)
return {'statistic': f, 'pvalue': p}
def check_normality(array, rn=5):
print("""
The null hypothesis for all of these tests is that
the population is drawn from a normal distribution.
Thus, the p-values should all be greater than 0.05.""", end='\n\n')
print('Skew =', np.round(st.skew(array), rn))
print(st.skewtest(array), end='\n\n')
print('Kurtosis =', np.round(st.kurtosis(array), rn))
print(st.kurtosistest(array), end='\n\n')
print('D\'Agostino and Pearson',
st.normaltest(array), sep='\n', end='\n\n')
# Plotting functions
def scatter_plot(x, y, lim=4):
"""Simple square scatter plot with light grid lines and hollow blue circular
data points. The limit (lim) argument provides the upper and lower bound of the
x and y axes for the (square) plot. """
plt.figure(figsize=(6, 6))
plt.scatter(x, y, alpha=0.5, facecolors='none', edgecolors='#1f77b4')
plt.grid(alpha=0.5)
plt.xlim([-lim, lim])
plt.ylim([-lim, lim])
plt.xlabel('Parent score')
plt.ylabel('Child score')
# Make the plot square
plt.gca().set_aspect('equal', adjustable='box')
def plot_normal_qq(data, lim=3.5):
"""Plots the theoretical values (x-axis) against the order statistics (y-axis)
to see if the points lie on an approximate straight line (with gradient
population SD and intercept population mean).
The limit (lim) argument provides the upper and lower bound of the x and y
axes for the (square) plot."""
plt.figure(figsize=(5, 5))
x_theor, x_sample = normal_qq(data)
plt.plot([-5, 5], [-5, 5], color='grey')
plt.scatter(x_theor, x_sample, alpha=0.6,
facecolors='none', edgecolors='#1f77b4')
plt.xlim([-lim, lim])
plt.ylim([-lim, lim])
plt.grid(alpha=0.3)
# Make the plot square
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel('Theoretical')
plt.ylabel('Observed')
plt.tight_layout()
def plot_residuals_by_parent(true_bins, resid_means, resid_cis):
plt.figure(figsize=(5, 4))
plt.plot(true_bins, resid_means, color='black', linewidth=2)
plt.errorbar(true_bins, resid_means, yerr=resid_cis.T,
color='grey', alpha=1, linewidth=1.4)
plt.axhline(y=0, color='grey')
plt.grid(alpha=0.3)
plt.xlabel('Parent Score')
plt.ylabel('Child Residual')
plt.tight_layout()
| StarcoderdataPython |
32652 | import numpy as np
from numexpr_kernel import numexpr_kernel
from numba_kernel import numba_kernel
N = 10000
x = np.random.rand(N)
y = np.random.rand(N)
z = np.random.rand(N)
tau = np.random.rand(N)
r1 = numexpr_kernel(x, y, z, tau)
r1 = numexpr_kernel(x, y, z, tau)
r2 = np.zeros(N, dtype=float)
numba_kernel(x, y, z, tau, r2, N)
numba_kernel(x, y, z, tau, r2, N)
| StarcoderdataPython |
1625782 | import face_recognition
import cv2
path1 = r"D:\pythonProject2\known\non coded images\Elon_Musk_Royal_Society_(crop1).jpg"
path2 = r"D:\pythonProject2\known\non coded images\download.jpg"
img = cv2.imread(filename=path1)
locations = face_recognition.face_locations(img)
for (a, b, c, d) in locations:
cv2.rectangle(img, (d, a), (b, c), thickness=2, color=(0, 255, 0))
img2 = cv2.imread(filename=path2)
locations2 = face_recognition.face_locations(img2)
for (a, b, c, d) in locations2:
cv2.rectangle(img2, (d, a), (b, c), thickness=2, color=(0, 255, 0))
encodings1 = face_recognition.face_encodings(img) # list
encodings2 = face_recognition.face_encodings(img2) # list
compare = []
for i in encodings2:
compare = face_recognition.compare_faces(encodings1, i)
for j in compare:
if j:
print("Matched")
else:
print("Not Matched")
cv2.imshow("image1", img)
cv2.imshow("image2", img2)
cv2.waitKey(0)
| StarcoderdataPython |
3247708 | <gh_stars>1-10
import logging
import re
from typing import Any, Dict
logger = logging.getLogger(__name__)
def read_tir_file(file_path: str) -> Dict[str, Any]:
"""Create tire data dictionary from Pacejka .tir file.
Args:
file_path (str): tir file path.
Returns:
Dict[str, Any]: flat parameter dicitonary contianing only numeric data.
Typical data file looks like:
.
.
.
[OVERTURNING_COEFFICIENTS]
QSX2 = 0.6038 $Camber induced overturning couple
QSX3 = 0.025405 $Fy induced overturning couple
.
.
.
"""
params = {}
with open(file_path, "r") as f:
tir_data = f.readlines()
for line in tir_data:
# TODO: this is pretty flaky and would break if somebody puts a "=" into
# comments
if "=" in line:
name, val, *_ = re.split("[=$]", line.replace(" ", ""))
try:
params[name] = float(val)
except ValueError:
logger.debug(f"{name} is not a numeric value and is discarded.")
return params
# TODO: at the moment, the params can actually have different fields depending on the
# form of the tir file! This makes jitting difficult (because the input pytree changes)
def create_params_from_tir_file(tir_file):
"""Augment tir file params with some compute params."""
params = read_tir_file(tir_file)
params.update(
{
# Used to avoid low speed singularity, [Eqn (4.E6a) Page 178 - Book]
"epsilonv": 1e-6,
"epsilonx": 1e-3,
"epsilonk": 1e-6,
"epsilony": 1e-3,
}
)
return params
| StarcoderdataPython |
3315719 | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import os
"""
This is the adapted version of the example code for Amazon SageMaker. It is meant for
the instructors of the workshop, since the participants should go through the
process of creating this script themselves.
The code is adapted from:
https://github.com/pytorch/examples/blob/master/mnist/main.py
and
https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py
"""
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train(args, model, device, train_loader, optimizer, criterion, epoch):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
total = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
accuracy = 100 * correct / total
print('Test set: Accuracy on {} images: {:.0f} %'.format(total, accuracy))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='momentum step size (default: 0.9)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', type=bool, default=False,
help='For Saving the current Model')
# Container environment
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
args = parser.parse_args()
use_cuda = args.num_gpus > 0
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
cifar_transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_dir, train=True, download=True,
transform=cifar_transform),
batch_size=args.batch_size, shuffle=True, **kwargs
)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_dir, train=False, transform=cifar_transform),
batch_size=args.test_batch_size, shuffle=True, **kwargs
)
model = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
for epoch in range(1, args.epochs + 1):
running_loss = 0.0
train(args, model, device, train_loader, optimizer, criterion, epoch)
test(args, model, device, test_loader)
if args.save_model:
print('Saving the model')
model_path = os.path.join(args.model_dir, "model.pth")
torch.save(model.state_dict(), model_path)
if __name__ == '__main__':
main() | StarcoderdataPython |
71496 | <filename>dev.py
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import sys
import glob
import pickle
from sklearn.preprocessing import StandardScaler
from skimage.feature import hog
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
CAR_IMG_DIR = 'vehicles'
NOT_CAR_IMG_DIR = 'non-vehicles'
def find_total_files(folder):
count = 0
for root, subfolder, files in os.walk(folder):
count += len(files)
return count
def data_look():
data_dict = {}
# Define a key in data_dict "n_cars" and store the number of car images
data_dict["n_cars"] = find_total_files(CAR_IMG_DIR)
# Define a key "n_notcars" and store the number of notcar images
data_dict["n_notcars"] = find_total_files(NOT_CAR_IMG_DIR)
# Read in a test image, either car or notcar
# Define a key "image_shape" and store the test image shape 3-tuple
#data_dict["image_shape"] = cv2.imread(car_list[0]).shape
# Define a key "data_type" and store the data type of the test image.
data_dict["data_type"] = np.uint8
# Return data_dict
return data_dict
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel()
# Return the feature vector
return features
def color_hist(img, nbins = 32, bins_range = (0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=True,
feature_vec=True):
#print("SHAPE: {}".format(img.shape))
return_list = hog(img,
orientations = orient,
pixels_per_cell = (pix_per_cell, pix_per_cell),
cells_per_block = (cell_per_block, cell_per_block),
block_norm= 'L2-Hys', transform_sqrt=False,
visualize= vis, feature_vector= feature_vec)
# name returns explicitly
hog_features = return_list[0]
if vis:
hog_image = return_list[1]
return hog_features, hog_image
else:
return hog_features.ravel()
def get_features(image, cspace='RGB', spatial_size=(32, 32),
hist_bins=32, hist_range=(0, 256)):
if cspace != 'RGB':
if cspace == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
else: feature_image = np.copy(image)
# Apply bin_spatial() to get spatial color features
spatial_features = bin_spatial(feature_image, size=spatial_size)
# Apply color_hist() also with a color space option now
hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range)
# Apply color_hist() also with a color space option now
hog_features = get_hog_features(feature_image,
orient = 9,
pix_per_cell = 8,
cell_per_block = 2,
vis = False,
feature_vec = False)
return np.concatenate((spatial_features, hist_features, hog_features))
#return np.concatenate((spatial_features, hist_features))
def feature_extraction(data):
X, y = [], []
for label, folder in enumerate(data):
for root, subfolder, files in os.walk(folder):
for filename in files:
if filename.endswith('.png'):
image_path = os.path.join(root, filename)
image = mpimg.imread(image_path)
X.append(get_features(image))
y.append(label)
# Info
X, y = np.array(X), np.array(y).reshape(-1, 1)
# Shuffle data here
print("shuffling")
X, y = shuffle(X, y)
print("Extraction Complete ..")
print("Dataset: {} | Labels: {}".format(X.shape, y.shape))
return X, y
def feature_scaling(X_train, X_test):
print("scaling ..")
X_scaler = StandardScaler().fit(X_train)
# scale train
scaled_X_train = X_scaler.transform(X_train)
# scale test
scaled_X_test = X_scaler.transform(X_test)
print("done!")
print("saving scaler")
pickle.dump(X_scaler, open('scaler.sav', 'wb'))
return scaled_X_train, scaled_X_test, X_scaler
def hyperparam_optimization():
pass
def train(X_train, y_train):
print("training ...")
svc = LinearSVC(verbose = True)
svc.fit(X_train, y_train)
print("done!")
print("saving model")
pickle.dump(svc, open('model.sav', 'wb'))
return svc
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
#1) Create an empty list to receive positive detection windows
on_windows = []
#2) Iterate over all windows in the list
for window in windows:
#3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
#4) Extract features for that window using single_img_features()
features = get_features(test_img)
"""
features = get_features(test_img, cspace=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
"""
#5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
#6) Predict using your classifier
prediction = clf.predict(test_features)
#7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
#8) Return windows for positive detections
return on_windows
def detect_boxes(image, svc, X_scaler):
color_space = 'RGB' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 9 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = 0 # Can be 0, 1, 2, or "ALL"
spatial_size = (16, 16) # Spatial binning dimensions
hist_bins = 16 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop = [image.shape[0]//2, None] # Min and max in y to search in slide_window()
draw_image = np.copy(image)
windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop,
xy_window=(96, 96), xy_overlap=(0.5, 0.5))
hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
plt.imshow(window_img)
plt.show()
def pipeline():
print(data_look())
X, y = feature_extraction(data = [CAR_IMG_DIR, NOT_CAR_IMG_DIR])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape)
X_train, X_test, X_scaler = feature_scaling(X_train, X_test)
svc = train(X_train, y_train)
print("Accuracy: {}".format(svc.score(X_test, y_test)))
return True
if __name__ == "__main__":
#pipeline()
print("Loading model params")
svc = pickle.load(open('model.sav', 'rb'))
X_scaler = pickle.load(open('scaler.sav', 'rb'))
image = mpimg.imread(sys.argv[1])
#image = image.astype(np.float32)/255
#print(image)
detect_boxes(image, svc, X_scaler)
| StarcoderdataPython |
197437 | # <NAME> <<EMAIL>>
import os
import sys
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
import atnanoaod
##__________________________________________________________________||
@pytest.fixture()
def query_files_for(monkeypatch):
ret = mock.MagicMock()
module = sys.modules['atnanoaod.query']
monkeypatch.setattr(module, 'query_files_for', ret)
return ret
@pytest.fixture()
def convert_lfn_to_pfn_or_aaa(monkeypatch):
ret = mock.MagicMock()
module = sys.modules['atnanoaod.query']
monkeypatch.setattr(module, 'convert_lfn_to_pfn_or_aaa', ret)
ret.side_effect = lambda x: 'root:/{}'.format(x)
return ret
##__________________________________________________________________||
def test_mk_dataset_files_list_one_cmsdataset(query_files_for, convert_lfn_to_pfn_or_aaa):
dataset = 'QCD_HT200to300'
cmsdatasets = ['/QCD_HT200to300_13TeV/05Feb2018-v1/NANOAODSIM']
query_files_for.return_value = ['/store/file1.root', '/store/file2.root']
results = atnanoaod.query.mk_dataset_files_list(dataset, cmsdatasets)
assert atnanoaod.dataset.Dataset(
name=dataset,
files=['root://store/file1.root', 'root://store/file2.root']) == results
assert [mock.call(e) for e in cmsdatasets] == query_files_for.call_args_list
assert [mock.call('/store/file1.root'), mock.call('/store/file2.root')] ==convert_lfn_to_pfn_or_aaa.call_args_list
##__________________________________________________________________||
| StarcoderdataPython |
1670902 | <reponame>google-cloud-sdk-unofficial/google-cloud-sdk<gh_stars>1-10
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Pipelines API utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
import six
_DEFAULT_API_VERSION = 'v1'
def GetMessagesModule(api_version=_DEFAULT_API_VERSION):
return apis.GetMessagesModule('datapipelines', api_version)
def GetClientInstance(api_version=_DEFAULT_API_VERSION):
return apis.GetClientInstance('datapipelines', api_version)
def GetPipelineURI(resource):
pipeline = resources.REGISTRY.ParseRelativeName(
resource.name, collection='datapipelines.pipelines')
return pipeline.SelfLink()
class PipelinesClient(object):
"""Client for Pipelines for the Data Pipelines API."""
def __init__(self, client=None, messages=None):
self.client = client or GetClientInstance()
self.messages = messages or GetMessagesModule()
self._service = self.client.projects_locations_pipelines
def Describe(self, pipeline):
"""Describe a Pipeline in the given project and region.
Args:
pipeline: str, the name for the Pipeline being described.
Returns:
Described Pipeline Resource.
"""
describe_req = self.messages.DatapipelinesProjectsLocationsPipelinesGetRequest(
name=pipeline)
return self._service.Get(describe_req)
def Delete(self, pipeline):
"""Delete a Pipeline in the given project and region.
Args:
pipeline: str, the name for the Pipeline being described.
Returns:
Empty Response.
"""
delete_req = self.messages.DatapipelinesProjectsLocationsPipelinesDeleteRequest(
name=pipeline)
return self._service.Delete(delete_req)
def Stop(self, pipeline):
"""Stop a Pipeline in the given project and region.
Args:
pipeline: str, the name for the Pipeline being described.
Returns:
Pipeline resource.
"""
stop_req = self.messages.DatapipelinesProjectsLocationsPipelinesStopRequest(
name=pipeline)
return self._service.Stop(stop_req)
def Run(self, pipeline):
"""Run a Pipeline in the given project and region.
Args:
pipeline: str, the name for the Pipeline being described.
Returns:
Job resource which was created.
"""
stop_req = self.messages.DatapipelinesProjectsLocationsPipelinesRunRequest(
name=pipeline)
return self._service.Run(stop_req)
def List(self, limit=None, page_size=50, input_filter='', region=''):
"""List Pipelines for the given project and region.
Args:
limit: int or None, the total number of results to return.
page_size: int, the number of entries in each batch (affects requests
made, but not the yielded results).
input_filter: string, optional filter to pass, eg:
"type:BATCH,status:ALL", to filter out the pipelines based on staus or
type.
region: string, relative name to the region.
Returns:
Generator of matching devices.
"""
list_req = self.messages.DatapipelinesProjectsLocationsListPipelinesRequest(
filter=input_filter, parent=region)
return list_pager.YieldFromList(
self.client.projects_locations,
list_req,
field='pipelines',
method='ListPipelines',
batch_size=page_size,
limit=limit,
batch_size_attribute='pageSize')
def CreateLegacyTemplateRequest(self, args):
"""Create a Legacy Template request for the Pipeline workload.
Args:
args: Any, list of args needed to create a Pipeline.
Returns:
Legacy Template request.
"""
location = args.region
project_id = properties.VALUES.core.project.Get(required=True)
params_list = self.ConvertDictArguments(
args.parameters, self.messages
.GoogleCloudDatapipelinesV1LaunchTemplateParameters.ParametersValue)
transform_mapping_list = self.ConvertDictArguments(
args.transform_name_mappings,
self.messages.GoogleCloudDatapipelinesV1LaunchTemplateParameters
.TransformNameMappingValue)
transform_name_mappings = None
if transform_mapping_list:
transform_name_mappings = self.messages.GoogleCloudDatapipelinesV1LaunchTemplateParameters.TransformNameMappingValue(
additionalProperties=transform_mapping_list)
ip_private = self.messages.GoogleCloudDatapipelinesV1RuntimeEnvironment.IpConfigurationValueValuesEnum.WORKER_IP_PRIVATE
ip_configuration = ip_private if args.disable_public_ips else None
user_labels_list = self.ConvertDictArguments(
args.additional_user_labels, self.messages
.GoogleCloudDatapipelinesV1RuntimeEnvironment.AdditionalUserLabelsValue)
additional_user_labels = None
if user_labels_list:
additional_user_labels = self.messages.GoogleCloudDatapipelinesV1RuntimeEnvironment.AdditionalUserLabelsValue(
additionalProperties=user_labels_list)
launch_parameter = self.messages.GoogleCloudDatapipelinesV1LaunchTemplateParameters(
environment=self.messages.GoogleCloudDatapipelinesV1RuntimeEnvironment(
serviceAccountEmail=args.dataflow_service_account_email,
maxWorkers=args.max_workers,
numWorkers=args.num_workers,
network=args.network,
subnetwork=args.subnetwork,
machineType=args.worker_machine_type,
tempLocation=args.temp_location,
kmsKeyName=args.dataflow_kms_key,
ipConfiguration=ip_configuration,
workerRegion=args.worker_region,
workerZone=args.worker_zone,
enableStreamingEngine=args.enable_streaming_engine,
additionalExperiments=(args.additional_experiments
if args.additional_experiments else []),
additionalUserLabels=additional_user_labels),
update=args.update,
parameters=self.messages
.GoogleCloudDatapipelinesV1LaunchTemplateParameters.ParametersValue(
additionalProperties=params_list) if params_list else None,
transformNameMapping=transform_name_mappings)
return self.messages.GoogleCloudDatapipelinesV1LaunchTemplateRequest(
gcsPath=args.template_file_gcs_location,
location=location,
projectId=project_id,
launchParameters=launch_parameter)
def CreateFlexTemplateRequest(self, args):
"""Create a Flex Template request for the Pipeline workload.
Args:
args: Any, list of args needed to create a Pipeline.
Returns:
Flex Template request.
"""
location = args.region
project_id = properties.VALUES.core.project.Get(required=True)
params_list = self.ConvertDictArguments(
args.parameters, self.messages
.GoogleCloudDatapipelinesV1LaunchFlexTemplateParameter.ParametersValue)
transform_mapping_list = self.ConvertDictArguments(
args.transform_name_mappings,
self.messages.GoogleCloudDatapipelinesV1LaunchFlexTemplateParameter
.TransformNameMappingsValue)
transform_name_mappings = None
if transform_mapping_list:
transform_name_mappings = self.messages.GoogleCloudDatapipelinesV1LaunchFlexTemplateParameter.TransformNameMappingsValue(
additionalProperties=transform_mapping_list)
ip_private = self.messages.GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironment.IpConfigurationValueValuesEnum.WORKER_IP_PRIVATE
ip_configuration = ip_private if args.disable_public_ips else None
user_labels_list = self.ConvertDictArguments(
args.additional_user_labels,
self.messages.GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironment
.AdditionalUserLabelsValue)
additional_user_labels = None
if user_labels_list:
additional_user_labels = self.messages.GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironment.AdditionalUserLabelsValue(
additionalProperties=user_labels_list)
flexrs_goal = None
if args.flexrs_goal:
if args.flexrs_goal == 'SPEED_OPTIMIZED':
flexrs_goal = self.messages.GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironment.FlexrsGoalValueValuesEnum.FLEXRS_SPEED_OPTIMIZED
elif args.flexrs_goal == 'COST_OPTIMIZED':
flexrs_goal = self.messages.GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironment.FlexrsGoalValueValuesEnum.FLEXRS_COST_OPTIMIZED
launch_parameter = self.messages.GoogleCloudDatapipelinesV1LaunchFlexTemplateParameter(
containerSpecGcsPath=args.template_file_gcs_location,
environment=self.messages
.GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironment(
serviceAccountEmail=args.dataflow_service_account_email,
maxWorkers=args.max_workers,
numWorkers=args.num_workers,
network=args.network,
subnetwork=args.subnetwork,
machineType=args.worker_machine_type,
tempLocation=args.temp_location,
kmsKeyName=args.dataflow_kms_key,
ipConfiguration=ip_configuration,
workerRegion=args.worker_region,
workerZone=args.worker_zone,
enableStreamingEngine=args.enable_streaming_engine,
flexrsGoal=flexrs_goal,
additionalExperiments=(args.additional_experiments
if args.additional_experiments else []),
additionalUserLabels=additional_user_labels),
update=args.update,
parameters=self.messages
.GoogleCloudDatapipelinesV1LaunchFlexTemplateParameter.ParametersValue(
additionalProperties=params_list) if params_list else None,
transformNameMappings=transform_name_mappings)
return self.messages.GoogleCloudDatapipelinesV1LaunchFlexTemplateRequest(
location=location,
projectId=project_id,
launchParameter=launch_parameter)
def Create(self, pipeline, parent, args):
"""Create a Pipeline in the given project and region.
Args:
pipeline: str, the name for the Pipeline being created.
parent: str, relative name to the region.
args: Any, list of args needed to create a Pipeline.
Returns:
Pipeline resource.
"""
if args.pipeline_type == 'streaming':
pipeline_type = self.messages.GoogleCloudDatapipelinesV1Pipeline.TypeValueValuesEnum(
self.messages.GoogleCloudDatapipelinesV1Pipeline.TypeValueValuesEnum
.PIPELINE_TYPE_STREAMING)
else:
pipeline_type = self.messages.GoogleCloudDatapipelinesV1Pipeline.TypeValueValuesEnum(
self.messages.GoogleCloudDatapipelinesV1Pipeline.TypeValueValuesEnum
.PIPELINE_TYPE_BATCH)
schedule_info = self.messages.GoogleCloudDatapipelinesV1ScheduleSpec(
schedule=args.schedule, timeZone=args.time_zone)
if args.template_type == 'classic':
legacy_template_request = self.CreateLegacyTemplateRequest(args)
workload = self.messages.GoogleCloudDatapipelinesV1Workload(
dataflowLaunchTemplateRequest=legacy_template_request)
else:
flex_template_request = self.CreateFlexTemplateRequest(args)
workload = self.messages.GoogleCloudDatapipelinesV1Workload(
dataflowFlexTemplateRequest=flex_template_request)
if args.display_name:
display_name = args.display_name
else:
display_name = pipeline.rsplit('/', 1)[-1]
pipeline_spec = self.messages.GoogleCloudDatapipelinesV1Pipeline(
name=pipeline,
displayName=display_name,
type=pipeline_type,
scheduleInfo=schedule_info,
workload=workload)
create_req = self.messages.DatapipelinesProjectsLocationsPipelinesCreateRequest(
googleCloudDatapipelinesV1Pipeline=pipeline_spec, parent=parent)
return self._service.Create(create_req)
def WorkloadUpdateMask(self, template_type, args):
"""Given a set of args for the workload, create the required update mask.
Args:
template_type: str, the type of the pipeline.
args: Any, object with args needed for updating a pipeline.
Returns:
Update mask.
"""
update_mask = []
if template_type == 'flex':
prefix_string = 'workload.dataflow_flex_template_request.launch_parameter.'
else:
prefix_string = 'workload.dataflow_launch_template_request.launch_parameters.'
if args.template_file_gcs_location:
if template_type == 'flex':
update_mask.append(prefix_string + 'container_spec_gcs_path')
else:
update_mask.append('workload.dataflow_launch_template_request.gcs_path')
if args.parameters:
update_mask.append(prefix_string + 'parameters')
if args.update:
update_mask.append(prefix_string + 'update')
if args.transform_name_mappings:
if template_type == 'flex':
update_mask.append(prefix_string + 'transform_name_mappings')
else:
update_mask.append(prefix_string + 'transform_name_mapping')
if args.max_workers:
update_mask.append(prefix_string + 'environment.max_workers')
if args.num_workers:
update_mask.append(prefix_string + 'environment.num_workers')
if args.dataflow_service_account_email:
update_mask.append(prefix_string + 'environment.service_account_email')
if args.temp_location:
update_mask.append(prefix_string + 'environment.temp_location')
if args.network:
update_mask.append(prefix_string + 'environment.network')
if args.subnetwork:
update_mask.append(prefix_string + 'environment.subnetwork')
if args.worker_machine_type:
update_mask.append(prefix_string + 'environment.machine_type')
if args.dataflow_kms_key:
update_mask.append(prefix_string + 'environment.kms_key_name')
if args.disable_public_ips:
update_mask.append(prefix_string + 'environment.ip_configuration')
if args.worker_region:
update_mask.append(prefix_string + 'environment.worker_region')
if args.worker_zone:
update_mask.append(prefix_string + 'environment.worker_zone')
if args.enable_streaming_engine:
update_mask.append(prefix_string + 'environment.enable_streaming_engine')
if args.flexrs_goal:
if template_type == 'flex':
update_mask.append(prefix_string + 'environment.flexrs_goal')
if args.additional_user_labels:
update_mask.append(prefix_string + 'environment.additional_user_labels')
if args.additional_experiments:
update_mask.append(prefix_string + 'environment.additional_experiments')
return update_mask
def Patch(self, pipeline, args):
"""Update a Pipeline in the given project and region.
Args:
pipeline: str, the name for the Pipeline being updated.
args: Any, object with args needed to update a Pipeline.
Returns:
Pipeline resource.
"""
update_mask = []
schedule_info = None
if args.schedule or args.time_zone:
schedule, time_zone = None, None
if args.schedule:
schedule = args.schedule
update_mask.append('schedule_info.schedule')
if args.time_zone:
time_zone = args.time_zone
update_mask.append('schedule_info.time_zone')
schedule_info = self.messages.GoogleCloudDatapipelinesV1ScheduleSpec(
schedule=schedule, timeZone=time_zone)
if args.display_name:
update_mask.append('display_name')
if args.template_type == 'classic':
update_mask += self.WorkloadUpdateMask('classic', args)
legacy_template_request = self.CreateLegacyTemplateRequest(args)
workload = self.messages.GoogleCloudDatapipelinesV1Workload(
dataflowLaunchTemplateRequest=legacy_template_request)
else:
update_mask += self.WorkloadUpdateMask('flex', args)
flex_template_request = self.CreateFlexTemplateRequest(args)
workload = self.messages.GoogleCloudDatapipelinesV1Workload(
dataflowFlexTemplateRequest=flex_template_request)
pipeline_spec = self.messages.GoogleCloudDatapipelinesV1Pipeline(
name=pipeline,
displayName=args.display_name,
scheduleInfo=schedule_info,
workload=workload)
update_req = self.messages.DatapipelinesProjectsLocationsPipelinesPatchRequest(
googleCloudDatapipelinesV1Pipeline=pipeline_spec,
name=pipeline,
updateMask=','.join(update_mask))
return self._service.Patch(update_req)
def ConvertDictArguments(self, arguments, value_message):
"""Convert dictionary arguments to parameter list .
Args:
arguments: Arguments for create job using template.
value_message: the value message of the arguments
Returns:
List of value_message.AdditionalProperty
"""
params_list = []
if arguments:
for k, v in six.iteritems(arguments):
params_list.append(value_message.AdditionalProperty(key=k, value=v))
return params_list
| StarcoderdataPython |
1782871 | # W, H, C, N, K, S, R, Wpad, Hpad, Wstride, Hstride
layer_sizes = {
'cifar-10':
{
"vgg": [
# Conv1
(28, 28, 3, 1, 64, 3, 3, 1, 1, 1, 1),
(28, 28, 64, 1, 64, 3, 3, 1, 1, 1, 1),
# # Conv2
(14, 14, 64, 1, 128, 3, 3, 1, 1, 1, 1),
(14, 14, 128, 1, 128, 3, 3, 1, 1, 1, 1),
# # Conv3
(7, 7, 128, 1, 256, 3, 3, 1, 1, 1, 1),
(7, 7, 256, 1, 256, 3, 3, 1, 1, 1, 1),
(7, 7, 256, 1, 256, 3, 3, 1, 1, 1, 1),
# # Conv4
(4, 4, 256, 1, 512, 3, 3, 1, 1, 1, 1),
(4, 4, 512, 1, 512, 3, 3, 1, 1, 1, 1),
(4, 4, 512, 1, 512, 3, 3, 1, 1, 1, 1),
# # Conv5
(2, 2, 512, 1, 512, 3, 3, 1, 1, 1, 1),
(2, 2, 512, 1, 512, 3, 3, 1, 1, 1, 1),
(2, 2, 512, 1, 512, 3, 3, 1, 1, 1, 1),
# # FC
(1, 1, 512, 1, 512, 1, 1, 0, 0, 1, 1),
(1, 1, 512, 1, 10, 1, 1, 0, 0, 1, 1)
],
'dense': [
# Conv1 # too dense
(28, 28, 3, 1, 16, 3, 3, 1, 1, 1, 1),
# Block 1-1, 1-2, 1-3
(28, 28, 16, 1, 24, 3, 3, 1, 1, 1, 1),
(28, 28, 40, 1, 24, 3, 3, 1, 1, 1, 1),
(28, 28, 64, 1, 24, 3, 3, 1, 1, 1, 1),
# Block 1-4, 1-5, 1-6, 1-7
(28, 28, 88, 1, 24, 3, 3, 1, 1, 1, 1),
(28, 28, 112, 1, 24, 3, 3, 1, 1, 1, 1),
(28, 28, 136, 1, 24, 3, 3, 1, 1, 1, 1),
(28, 28, 160, 1, 24, 3, 3, 1, 1, 1, 1),
# Block 1-8, 1-9, 1-10, 1-11
(28, 28, 184, 1, 24, 3, 3, 1, 1, 1, 1),
(28, 28, 208, 1, 24, 3, 3, 1, 1, 1, 1),
(28, 28, 232, 1, 24, 3, 3, 1, 1, 1, 1),
(28, 28, 256, 1, 256, 1, 1, 0, 0, 1, 1),
# Conv 2-1, 2-2, 2-3
(14, 14, 256, 1, 24, 3, 3, 1, 1, 1, 1),
(14, 14, 280, 1, 24, 3, 3, 1, 1, 1, 1),
(14, 14, 304, 1, 24, 3, 3, 1, 1, 1, 1),
# Block 2-4, 2-5, 2-6, 2-7
(14, 14, 328, 1, 24, 3, 3, 1, 1, 1, 1),
(14, 14, 352, 1, 24, 3, 3, 1, 1, 1, 1),
(14, 14, 376, 1, 24, 3, 3, 1, 1, 1, 1),
(14, 14, 400, 1, 24, 3, 3, 1, 1, 1, 1),
# Block 2-8, 2-9, 2-10, 2-11
(14, 14, 424, 1, 24, 3, 3, 1, 1, 1, 1),
(14, 14, 448, 1, 24, 3, 3, 1, 1, 1, 1),
(14, 14, 472, 1, 24, 3, 3, 1, 1, 1, 1),
(14, 14, 496, 1, 496, 1, 1, 0, 0, 1, 1),
# Conv 3-1, 3-2, 3-3
(7, 7, 496, 1, 24, 3, 3, 1, 1, 1, 1),
(7, 7, 520, 1, 24, 3, 3, 1, 1, 1, 1),
(7, 7, 544, 1, 24, 3, 3, 1, 1, 1, 1),
# Block 3-4, 3-5, 3-6, 3-7
(7, 7, 568, 1, 24, 3, 3, 1, 1, 1, 1),
(7, 7, 592, 1, 24, 3, 3, 1, 1, 1, 1),
(7, 7, 616, 1, 24, 3, 3, 1, 1, 1, 1),
(7, 7, 640, 1, 24, 3, 3, 1, 1, 1, 1),
# Block 3-8, 3-9, 3-10
(7, 7, 664, 1, 24, 3, 3, 1, 1, 1, 1),
(7, 7, 688, 1, 24, 3, 3, 1, 1, 1, 1),
(7, 7, 712, 1, 24, 3, 3, 1, 1, 1, 1),
# FC1
(1, 1, 736, 1, 10, 1, 1, 0, 0, 1, 1)
],
'wrn': [
# Conv1 # too dense
(28, 28, 3, 1, 16, 3, 3, 1, 1, 1, 1),
# Conv 2-1, 2-2, 2-3
(28, 28, 16, 1, 160, 3, 3, 1, 1, 1, 1),
(28, 28, 160, 1, 160, 3, 3, 1, 1, 1, 1),
(28, 28, 16, 1, 160, 1, 1, 0, 0, 1, 1), # bottlneck
# Conv 2-4, 2-5, 2-6, 2-7, 2-8, 2-9
(28, 28, 160, 1, 160, 3, 3, 1, 1, 1, 1),
(28, 28, 160, 1, 160, 3, 3, 1, 1, 1, 1),
(28, 28, 160, 1, 160, 3, 3, 1, 1, 1, 1),
(28, 28, 160, 1, 160, 3, 3, 1, 1, 1, 1),
(28, 28, 160, 1, 160, 3, 3, 1, 1, 1, 1),
(28, 28, 160, 1, 160, 3, 3, 1, 1, 1, 1),
# Conv 3-1, 3-2, 3-3
(14, 14, 160, 1, 320, 3, 3, 1, 1, 1, 1),
(14, 14, 320, 1, 320, 3, 3, 1, 1, 1, 1),
(14, 14, 160, 1, 320, 1, 1, 0, 0, 1, 1), # bottlneck
# Conv 3-4, 3-5, 3-6, 3-7, 3-8, 3-9
(14, 14, 320, 1, 320, 3, 3, 1, 1, 1, 1),
(14, 14, 320, 1, 320, 3, 3, 1, 1, 1, 1),
(14, 14, 320, 1, 320, 3, 3, 1, 1, 1, 1),
(14, 14, 320, 1, 320, 3, 3, 1, 1, 1, 1),
(14, 14, 320, 1, 320, 3, 3, 1, 1, 1, 1),
(14, 14, 320, 1, 320, 3, 3, 1, 1, 1, 1),
# Conv 4-1, 4-2, 4-3
(7, 7, 320, 1, 640, 3, 3, 1, 1, 1, 1),
(7, 7, 640, 1, 640, 3, 3, 1, 1, 1, 1),
(7, 7, 320, 1, 640, 1, 1, 0, 0, 1, 1), # bottlneck
# Conv 4-4, 4-5, 4-6, 4-7, 4-8, 4-9
(7, 7, 640, 1, 640, 3, 3, 1, 1, 1, 1),
(7, 7, 640, 1, 640, 3, 3, 1, 1, 1, 1),
(7, 7, 640, 1, 640, 3, 3, 1, 1, 1, 1),
(7, 7, 640, 1, 640, 3, 3, 1, 1, 1, 1),
(7, 7, 640, 1, 640, 3, 3, 1, 1, 1, 1),
(7, 7, 640, 1, 640, 3, 3, 1, 1, 1, 1),
# FC # too dense
(1, 1, 640, 1, 10, 1, 1, 0, 0, 1, 1)
]
},
'imagenet':
{
'resnet18': [ # stride 2 is simulated by stride 1 since unsure if timeloop can perform stride 2 simulation
# Conv1
# (224, 224, 3, 1, 64, 7, 7, 3, 3, 2, 2), # the proper dim if we handle stride
(112, 112, 3, 1, 64, 7, 7, 3, 3, 1, 1), # stride should be 2
# Conv2_x
# (112, 112, 64, 1, 64, 3, 3, 1, 1, 2, 2), # the proper dim if we handle stride
(56, 56, 64, 1, 64, 3, 3, 1, 1, 1, 1), # stride should be 2
(56, 56, 64, 1, 64, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 1, 64, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 1, 64, 3, 3, 1, 1, 1, 1),
# Conv3_x
# (56, 56, 64, 1, 128, 3, 3, 1, 1, 2, 2), # the proper dim if we handle stride
(28, 28, 64, 1, 128, 3, 3, 1, 1, 1, 1), # stride should be 2
(28, 28, 128, 1, 128, 3, 3, 1, 1, 1, 1),
(28, 28, 128, 1, 128, 3, 3, 1, 1, 1, 1),
(28, 28, 128, 1, 128, 3, 3, 1, 1, 1, 1),
# Conv4_x
# (28, 28, 128, 1, 256, 3, 3, 1, 1, 2, 2), # the proper dim if we handle stride
(14, 14, 128, 1, 256, 3, 3, 1, 1, 1, 1), # stride should be 2
(14, 14, 256, 1, 256, 3, 3, 1, 1, 1, 1),
(14, 14, 256, 1, 256, 3, 3, 1, 1, 1, 1),
(14, 14, 256, 1, 256, 3, 3, 1, 1, 1, 1),
# Conv5_x
# (14, 14, 256, 1, 512, 3, 3, 1, 1, 2, 2), # the proper dim if we handle stride
(7, 7, 256, 1, 512, 3, 3, 1, 1, 1, 1), # stride should be 2
(7, 7, 512, 1, 512, 3, 3, 1, 1, 1, 1),
(7, 7, 512, 1, 512, 3, 3, 1, 1, 1, 1),
(7, 7, 512, 1, 512, 3, 3, 1, 1, 1, 1),
# FC
(1, 1, 512, 1, 1000, 1, 1, 0, 0, 1, 1)
],
'mobilenetv2': [ # stride 2 is simulated by stride 1 since unsure if timeloop can perform stride 2 simulation
# Conv1
# (224, 224, 3, 1, 64, 7, 7, 3, 3, 2, 2), # the proper dim if we handle stride
(112, 112, 3, 1, 32, 3, 3, 1, 1, 1, 1), # stride should be 2
# Conv2_x
(112, 112, 32, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(112, 112, 32, 1, 16, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv3_x
(112, 112, 16, 1, 96, 1, 1, 0, 0, 1, 1), # Bottleneck
# (112, 112, 96, 1, D, 3, 3, 1, 1, 2, 2), # the proper dim if we handle stride
(56, 56, 96, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(56, 56, 96, 1, 24, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv4_x
(56, 56, 24, 1, 144, 1, 1, 0, 0, 1, 1), # Bottleneck
(56, 56, 144, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(56, 56, 144, 1, 24, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv5_x
(56, 56, 24, 1, 144, 1, 1, 0, 0, 1, 1), # Bottleneck
# (112, 112, 96, 1, D, 3, 3, 1, 1, 2, 2), # the proper dim if we handle stride
(28, 28, 144, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(28, 28, 144, 1, 32, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv6_x
(28, 28, 32, 1, 192, 1, 1, 0, 0, 1, 1), # Bottleneck
(28, 28, 192, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(28, 28, 192, 1, 32, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv7_x
(28, 28, 32, 1, 192, 1, 1, 0, 0, 1, 1), # Bottleneck
(28, 28, 192, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(28, 28, 192, 1, 32, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv8_x
(28, 28, 32, 1, 192, 1, 1, 0, 0, 1, 1), # Bottleneck
# (112, 112, 96, 1, D, 3, 3, 1, 1, 2, 2), # the proper dim if we handle stride
(14, 14, 192, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(14, 14, 192, 1, 64, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv9_x
(14, 14, 64, 1, 384, 1, 1, 0, 0, 1, 1), # Bottleneck
(14, 14, 384, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(14, 14, 384, 1, 64, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv10_x
(14, 14, 64, 1, 384, 1, 1, 0, 0, 1, 1), # Bottleneck
(14, 14, 384, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(14, 14, 384, 1, 64, 1, 1, 0, 0, 1, 1), # Bottleneck
# Con11_x
(14, 14, 64, 1, 384, 1, 1, 0, 0, 1, 1), # Bottleneck
(14, 14, 384, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(14, 14, 384, 1, 64, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv12_x
(14, 14, 64, 1, 384, 1, 1, 0, 0, 1, 1), # Bottleneck
(14, 14, 384, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(14, 14, 384, 1, 96, 1, 1, 0, 0, 1, 1), # Bottleneck
# Con13_x
(14, 14, 96, 1, 576, 1, 1, 0, 0, 1, 1), # Bottleneck
(14, 14, 576, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(14, 14, 576, 1, 96, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv14_x
(14, 14, 96, 1, 576, 1, 1, 0, 0, 1, 1), # Bottleneck
(14, 14, 576, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(14, 14, 576, 1, 96, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv15_x
(14, 14, 96, 1, 576, 1, 1, 0, 0, 1, 1), # Bottleneck
# (112, 112, 96, 1, D, 3, 3, 1, 1, 2, 2), # the proper dim if we handle stride
(7, 7, 576, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(7, 7, 576, 1, 160, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv16_x
(7, 7, 160, 1, 960, 1, 1, 0, 0, 1, 1), # Bottleneck
(7, 7, 960, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(7, 7, 960, 1, 160, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv17_x
(7, 7, 160, 1, 960, 1, 1, 0, 0, 1, 1), # Bottleneck
(7, 7, 960, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(7, 7, 960, 1, 160, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv18_x
(7, 7, 160, 1, 960, 1, 1, 0, 0, 1, 1), # Bottleneck
(7, 7, 960, 1, 'D', 3, 3, 1, 1, 1, 1), # Depthwise
(7, 7, 960, 1, 320, 1, 1, 0, 0, 1, 1), # Bottleneck
# Conv19
(7, 7, 320, 1, 1280, 1, 1, 0, 0, 1, 1), # Bottleneck
# FC
(1, 1, 1280, 1, 1000, 1, 1, 0, 0, 1, 1)
]
}
}
layer_names = {
'cifar-10':
{
'vgg': [
'Conv1_1',
'Conv1_2',
'Conv2_1',
'Conv2_2',
'Conv3_1', 'Conv3_2', 'Conv3_3',
'Conv4_1', 'Conv4_2', 'Conv4_3',
'Conv5_1',
'Conv5_2', 'Conv5_3',
'FC1',
'FC2'
],
'dense': [
'Conv1',
# Dense Block 1
'Block1_1', 'Block1_2',
'Block1_3',
'Block1_4', 'Block1_5', 'Block1_6', 'Block1_7',
'Block1_8', 'Block1_9', 'Block1_10',
'Block1_11',
# Dense Block 2
'Block2_1', 'Block2_2', 'Block2_3',
'Block2_4', 'Block2_5', 'Block2_6', 'Block2_7',
'Block2_8', 'Block2_9', 'Block2_10',
'Block2_11',
# Dense Block 3
'Block3_1', 'Block3_2', 'Block3_3',
'Block3_4', 'Block3_5', 'Block3_6', 'Block3_7',
'Block3_8', 'Block3_9', 'Block3_10',
'FC1'
],
'wrn': [
'Conv1',
'Conv2_1', 'Conv2_2',
'Conv2_3', # bottleneck layer
'Conv2_4', 'Conv2_5',
'Conv2_6', 'Conv2_7',
'Conv2_8', 'Conv2_9',
# Conv 3
'Conv3_1', 'Conv3_2',
'Conv3_3', # bottleneck layer
'Conv3_4',
'Conv3_5',
'Conv3_6', 'Conv3_7',
'Conv3_8', 'Conv3_9',
# Conv 4
'Conv4_1', 'Conv4_2',
'Conv4_3', # bottleneck layer
'Conv4_4',
'Conv4_5',
'Conv4_6', 'Conv4_7',
'Conv4_8', 'Conv4_9',
'FC1'
]
},
'imagenet':
{
'resnet18': [
'Conv1',
'Conv2_1',
'Conv2_2',
'Conv2_3',
'Conv2_4',
'Conv3_1',
'Conv3_2',
'Conv3_3',
'Conv3_4',
'Conv4_1',
'Conv4_2',
'Conv4_3',
'Conv4_4',
'Conv5_1',
'Conv5_2',
'Conv5_3',
'Conv5_4',
'FC'
],
'mobilenetv2': [
'Conv1',
'Conv2_1', 'Conv2_2',
'Conv3_1', 'Conv3_2', 'Conv3_3',
'Conv4_1', 'Conv4_2', 'Conv4_3',
'Conv5_1', 'Conv5_2', 'Conv5_3',
'Conv6_1', 'Conv6_2', 'Conv6_3',
'Conv7_1', 'Conv7_2', 'Conv7_3',
'Conv8_1', 'Conv8_2', 'Conv8_3',
'Conv9_1', 'Conv9_2', 'Conv9_3',
'Conv10_1', 'Conv10_2', 'Conv10_3',
'Conv11_1', 'Conv11_2', 'Conv11_3',
'Conv12_1', 'Conv12_2', 'Conv12_3',
'Conv13_1', 'Conv13_2', 'Conv13_3',
'Conv14_1', 'Conv14_2', 'Conv14_3',
'Conv15_1', 'Conv15_2', 'Conv15_3',
'Conv16_1', 'Conv16_2', 'Conv16_3',
'Conv17_1', 'Conv17_2', 'Conv17_3',
'Conv18_1', 'Conv18_2', 'Conv18_3',
'Conv19',
'FC'
]
}
}
shapes = {
'fw': 'cnn-layer',
'bw': 'backward-pass',
'wu': 'weight-update'
}
| StarcoderdataPython |
1783164 | #!/usr/bin/env python
# ====================================================================
# Copyright (c) 2004-2005 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
# This software consists of voluntary contributions made by many
# individuals. For exact contribution history, see the revision
# history and logs, available at http://subversion.tigris.org/.
# ====================================================================
import sys
import os
import os.path
from svn import repos, fs, delta, core
### DEAR USER: Please populate the test_props() and test_path_change()
### to do your bidding.
def test_props(props):
"""Validate the PROPS (a dictionary mapping property names to
values) set on the transaction. Return 0 if all is well, non-zero
otherwise."""
### Test the transaction (revision-to-be) properties. If there is
### bogosity, write to sys.stderr and return non-zero.
return 0
def test_path_change(path, change):
"""Validate the CHANGE made to PATH in the transaction. Return 0
if all is well, non-zero otherwise."""
# The svn_node_kind of the path.
item_kind = change.item_kind
# Non-zero iff properties of this path were changed.
prop_changes = change.prop_changes
# Non-zero iff path is a file, and its text was changed.
text_changed = change.text_changed
# The location of the previous revision of this resource, if any.
base_path = change.base_path
base_rev = change.base_rev
# Non-zero iff this change represents an addition (see
# base_path/base_rev for whether it was an addition with history).
added = change.added
### Test the path change as you see fit. If there is bogosity,
### write to sys.stderr and return non-zero.
return 1
def main(pool, repos_dir, txn):
# Construct a ChangeCollector to fetch our changes.
fs_ptr = repos.svn_repos_fs(repos.svn_repos_open(repos_dir, pool))
root = fs.txn_root(fs.open_txn(fs_ptr, txn, pool), pool)
cc = repos.ChangeCollector(fs_ptr, root, pool)
# Call the transaction property validator. Might as well get the
# cheap checks outta the way first.
retval = test_props(cc.get_root_props())
if retval:
return retval
# Generate the path-based changes list.
e_ptr, e_baton = delta.make_editor(cc, pool)
repos.svn_repos_replay(root, e_ptr, e_baton, pool)
# Call the path change validator.
changes = cc.get_changes()
paths = changes.keys()
paths.sort(lambda a, b: core.svn_path_compare_paths(a, b))
for path in paths:
change = changes[path]
retval = test_path_change(path, change)
if retval:
return retval
return 0
def _usage_and_exit():
sys.stderr.write("USAGE: %s REPOS-DIR TXN-NAME\n" % (sys.argv[0]))
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) < 3:
_usage_and_exit()
sys.exit(core.run_app(main, sys.argv[1], sys.argv[2]))
| StarcoderdataPython |
65875 | <reponame>cool199966/AutoCoverLetter<filename>backend/letters/models.py
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class CoverLetter(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=255)
message = models.TextField()
is_public = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
| StarcoderdataPython |
4807409 |
def divide(n1, n2):
if n2 == 0:
raise ValueError("Divisão por 0 impossível")
return n1 / n2
print(divide(2,0))
print("Você tentou dividir um valor por 0, tente novamente!")
print("continua") | StarcoderdataPython |
3240902 | #!/usr/bin/env python3
"""
Copyright 2019 <NAME>
If you need a full Python package look at f90nml
read_namelist() reads a single namelist, returning the values in a dict()
It strips extraneous apostrophes
"""
import typing as T
import re
from pathlib import Path
def namelist_exists(fn: Path, namelist: str) -> bool:
""" determines if a namelist exists in a file """
pat = re.compile(r"^\s*&(" + namelist + ")$")
with fn.open("r") as f:
for line in f:
if pat.match(line) is not None:
return True
return False
def read_namelist(fn: Path, namelist: str) -> T.Dict[str, T.Any]:
""" read a namelist from an .nml file """
raw: T.Dict[str, T.Sequence[str]] = {}
nml_pat = re.compile(r"^\s*&(" + namelist + r")")
end_pat = re.compile(r"^\s*/\s*$")
val_pat = re.compile(r"^\s*(\w+)\s*=\s*['\"]?([^!'\"]*)['\"]?")
fn = Path(fn).expanduser()
with fn.open("r") as f:
for line in f:
if not nml_pat.match(line):
continue
for line in f:
if end_pat.match(line):
# end of namelist
return parse_values(raw)
val_mat = val_pat.match(line)
if not val_mat:
continue
key, vals = val_mat.group(1), val_mat.group(2).strip().split(",")
raw[key] = vals[0].strip() if len(vals) == 1 else vals
raise KeyError(f"did not find Namelist {namelist} in {fn}")
def parse_values(raw: T.Dict[str, T.Any]):
for k, v in raw.items():
if isinstance(v, list):
try:
raw[k] = list(map(float, v))
except ValueError:
pass
else:
try:
raw[k] = float(v)
except ValueError:
pass
return raw
if __name__ == "__main__":
import argparse
p = argparse.ArgumentParser(description="Read Fortran Namelist to Dict")
p.add_argument("file", help=".nml file to load")
P = p.parse_args()
for g in ("base", "empty"):
dat = read_namelist(P.file, g)
print(dat)
| StarcoderdataPython |
1642003 | <gh_stars>1-10
"""Generates archive format 2.X test data from real archive data.
Data is written to the current directory.
"""
from os import path
import numpy as np
import h5py
from ch_util import andata
ARCHIVE_ROOT = "/mnt/gong/archive/"
ARCHIVE_VERSION = "3.0"
ACQ = "20190114T082356Z_chimestack_corr"
FILENAMES = ["00000000_0000.h5", "00005089_0000.h5"]
# Data selection: what subset of data goes into each test data file.
FREQ_SEL = np.s_[32:34]
# Start and stop time indeces for each file.
STARTS = [-8, 0]
STOPS = [None, 4]
OUT_FILENAMES = ["00000000_0000.h5", "00000010_0000.h5"]
paths = [path.join(ARCHIVE_ROOT, ACQ, f) for f in FILENAMES]
def main():
# Open data files and cast as andata objects.
data_list = [andata.CorrData(h5py.File(p, "r")) for p in paths]
# Define a dataset filter that takes the first 64 frequenies.
def dset_filter(dataset):
# Must have this attribute.
if "freq" in dataset.attrs["axis"]:
# Must be first axis.
if dataset.attrs["axis"][0] != "freq":
raise RuntimeError("Expected 'freq' to be zeroth axis.")
dataset = dataset[FREQ_SEL]
return dataset
for ii, d in enumerate(data_list):
out_f = h5py.File(OUT_FILENAMES[ii], "w")
tdata = andata.concatenate(
[d],
start=STARTS[ii],
stop=STOPS[ii],
out_group=out_f,
dataset_filter=dset_filter,
)
# Adjust the frequency index map.
freq = out_f["index_map/freq"][FREQ_SEL]
del out_f["index_map/freq"]
out_f.create_dataset("index_map/freq", data=freq)
# Adjust the attributes. XXX others?
out_f.attrs["n_freq"] = [len(freq)]
out_f.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
131362 | <gh_stars>1-10
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This example shows how the Observer metric instrument can be used to capture
asynchronous metrics data.
"""
import psutil
from opentelemetry import metrics
from opentelemetry.sdk.metrics import MeterProvider, ValueObserver
from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter
metrics.set_meter_provider(MeterProvider())
meter = metrics.get_meter(__name__)
metrics.get_meter_provider().start_pipeline(meter, ConsoleMetricsExporter(), 5)
# Callback to gather cpu usage
def get_cpu_usage_callback(observer):
for (number, percent) in enumerate(psutil.cpu_percent(percpu=True)):
labels = {"cpu_number": str(number)}
observer.observe(percent, labels)
meter.register_observer(
callback=get_cpu_usage_callback,
name="cpu_percent",
description="per-cpu usage",
unit="1",
value_type=float,
observer_type=ValueObserver,
label_keys=("cpu_number",),
)
# Callback to gather RAM memory usage
def get_ram_usage_callback(observer):
ram_percent = psutil.virtual_memory().percent
observer.observe(ram_percent, {})
meter.register_observer(
callback=get_ram_usage_callback,
name="ram_percent",
description="RAM memory usage",
unit="1",
value_type=float,
observer_type=ValueObserver,
label_keys=(),
)
input("Metrics will be printed soon. Press a key to finish...\n")
| StarcoderdataPython |
3235457 | <gh_stars>0
#Exercício Python 046:
# Faça um programa que mostre na tela uma contagem regressiva para o estouro de fogos de artifício,
# indo de 10 até 0, com uma pausa de 1 segundo entre eles.
from time import sleep
r = 10
for i in range(10, -1, -1):
print(i)
sleep(0.5)
print('bUm!!!') | StarcoderdataPython |
165417 | <filename>TEMpcPlot/Symmetry/spacegroup.py
# Copyright (C) 2010, <NAME>
# (see accompanying license files for details).
"""Definition of the Spacegroup class.
from ase library
few change for extinctions
This module only depends on NumPy and the space group database.
"""
import os
import warnings
from functools import total_ordering
from typing import Union
from .CFML_exti import Search_Extinctions
import numpy as np
from .CFML_exti import Search_Extinctions
__all__ = ['Spacegroup']
class SpacegroupError(Exception):
"""Base exception for the spacegroup module."""
pass
class SpacegroupNotFoundError(SpacegroupError):
"""Raised when given space group cannot be found in data base."""
pass
class SpacegroupValueError(SpacegroupError):
"""Raised when arguments have invalid value."""
pass
# Type alias
_SPACEGROUP = Union[int, str, 'Spacegroup']
@total_ordering
class Spacegroup:
"""A space group class.
The instances of Spacegroup describes the symmetry operations for
the given space group.
Example:
>>> from ase.spacegroup import Spacegroup
>>>
>>> sg = Spacegroup(225)
>>> print('Space group', sg.no, sg.symbol)
Space group 225 F m -3 m
>>> sg.scaled_primitive_cell
array([[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ]])
>>> sites, kinds = sg.equivalent_sites([[0,0,0]])
>>> sites
array([[ 0. , 0. , 0. ],
[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ]])
"""
no = property(
lambda self: self._no,
doc='Space group number in International Tables of Crystallography.')
symbol = property(
lambda self: self._symbol,
doc='Hermann-Mauguin (or international) symbol for the space group.')
setting = property(lambda self: self._setting,
doc='Space group setting. Either one or two.')
lattice = property(lambda self: self._symbol[0],
doc="""Lattice type:
P primitive
I body centering, h+k+l=2n
F face centering, h,k,l all odd or even
A,B,C single face centering, k+l=2n, h+l=2n, h+k=2n
R rhombohedral centering, -h+k+l=3n (obverse); h-k+l=3n (reverse)
""")
centrosymmetric = property(lambda self: self._centrosymmetric,
doc='Whether a center of symmetry exists.')
scaled_primitive_cell = property(
lambda self: self._scaled_primitive_cell,
doc='Primitive cell in scaled coordinates as a matrix with the '
'primitive vectors along the rows.')
reciprocal_cell = property(
lambda self: self._reciprocal_cell,
doc='Tree Miller indices that span all kinematically non-forbidden '
'reflections as a matrix with the Miller indices along the rows.')
nsubtrans = property(lambda self: len(self._subtrans),
doc='Number of cell-subtranslation vectors.')
systematic_absence = property(
lambda self: Search_Extinctions(self, 1),
doc='print systematic absence ands redefine is_exti')
def _get_nsymop(self):
"""Returns total number of symmetry operations."""
if self.centrosymmetric:
return 2 * len(self._rotations) * len(self._subtrans)
else:
return len(self._rotations) * len(self._subtrans)
nsymop = property(_get_nsymop, doc='Total number of symmetry operations.')
subtrans = property(
lambda self: self._subtrans,
doc='Translations vectors belonging to cell-sub-translations.')
rotations = property(
lambda self: self._rotations,
doc='Symmetry rotation matrices. The invertions are not included '
'for centrosymmetrical crystals.')
translations = property(
lambda self: self._translations,
doc='Symmetry translations. The invertions are not included '
'for centrosymmetrical crystals.')
def __init__(self, spacegroup: _SPACEGROUP, setting=1, datafile=None):
"""Returns a new Spacegroup instance.
Parameters:
spacegroup : int | string | Spacegroup instance
The space group number in International Tables of
Crystallography or its Hermann-Mauguin symbol. E.g.
spacegroup=225 and spacegroup='F m -3 m' are equivalent.
setting : 1 | 2
Some space groups have more than one setting. `setting`
determines Which of these should be used.
datafile : None | string
Path to database file. If `None`, the the default database
will be used.
"""
if isinstance(spacegroup, Spacegroup):
for k, v in spacegroup.__dict__.items():
setattr(self, k, v)
Search_Extinctions(self, False)
return
if not datafile:
datafile = get_datafile()
with open(datafile, 'r') as fd:
_read_datafile(self, spacegroup, setting, fd)
Search_Extinctions(self, False)
Search_Extinctions(self, 0)
def __repr__(self):
return 'Spacegroup(%d, setting=%d)' % (self.no, self.setting)
def todict(self):
return {'number': self.no, 'setting': self.setting}
def __str__(self):
"""Return a string representation of the space group data in
the same format as found the database."""
retval = []
# no, symbol
retval.append('%-3d %s\n' % (self.no, self.symbol))
# setting
retval.append(' setting %d\n' % (self.setting))
# centrosymmetric
retval.append(' centrosymmetric %d\n' % (self.centrosymmetric))
# primitive vectors
retval.append(' primitive vectors\n')
for i in range(3):
retval.append(' ')
for j in range(3):
retval.append(' %13.10f' % (self.scaled_primitive_cell[i, j]))
retval.append('\n')
# primitive reciprocal vectors
retval.append(' reciprocal vectors\n')
for i in range(3):
retval.append(' ')
for j in range(3):
retval.append(' %3d' % (self.reciprocal_cell[i, j]))
retval.append('\n')
# sublattice
retval.append(' %d subtranslations\n' % self.nsubtrans)
for i in range(self.nsubtrans):
retval.append(' ')
for j in range(3):
retval.append(' %13.10f' % (self.subtrans[i, j]))
retval.append('\n')
# symmetry operations
nrot = len(self.rotations)
retval.append(' %d symmetry operations (rot+trans)\n' % nrot)
for i in range(nrot):
retval.append(' ')
for j in range(3):
retval.append(' ')
for k in range(3):
retval.append(' %2d' % (self.rotations[i, j, k]))
retval.append(' ')
for j in range(3):
retval.append(' %13.10f' % self.translations[i, j])
retval.append('\n')
retval.append('\n')
return ''.join(retval)
def __eq__(self, other):
return self.no == other.no and self.setting == other.setting
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.no < other.no or (self.no == other.no
and self.setting < other.setting)
def __index__(self):
return self.no
__int__ = __index__
def get_symop(self, matrix=True):
"""Returns all symmetry operations (including inversions and
subtranslations) as a sequence of (rotation, translation)
tuples.
Parameters:
matrix: boolean
if false return asymmetry operation as x,y,z
"""
symop = []
parities = [1]
if self.centrosymmetric:
parities.append(-1)
for parity in parities:
for subtrans in self.subtrans:
for rot, trans in zip(self.rotations, self.translations):
newtrans = np.mod(trans + subtrans, 1)
symop.append((parity * rot, newtrans))
if not(matrix):
symop = [MT2text(Opr) for Opr in symop]
return symop
def get_op(self):
"""Returns all symmetry operations (including inversions and
subtranslations), but unlike get_symop(), they are returned as
two ndarrays."""
if self.centrosymmetric:
rot = np.tile(np.vstack((self.rotations, -self.rotations)),
(self.nsubtrans, 1, 1))
trans = np.tile(np.vstack((self.translations, -self.translations)),
(self.nsubtrans, 1))
trans += np.repeat(self.subtrans, 2 * len(self.rotations), axis=0)
trans = np.mod(trans, 1)
else:
rot = np.tile(self.rotations, (self.nsubtrans, 1, 1))
trans = np.tile(self.translations, (self.nsubtrans, 1))
trans += np.repeat(self.subtrans, len(self.rotations), axis=0)
trans = np.mod(trans, 1)
return rot, trans
def get_rotations(self):
"""Return all rotations, including inversions for
centrosymmetric crystals."""
if self.centrosymmetric:
return np.vstack((self.rotations, -self.rotations))
else:
return self.rotations
def equivalent_reflections(self, hkl):
"""Return all equivalent reflections to the list of Miller indices
in hkl.
Example:
>>> from ase.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.equivalent_reflections([[0, 0, 2]])
array([[ 0, 0, -2],
[ 0, -2, 0],
[-2, 0, 0],
[ 2, 0, 0],
[ 0, 2, 0],
[ 0, 0, 2]])
"""
hkl = np.array(hkl, dtype='int', ndmin=2)
rot = self.get_rotations()
n, nrot = len(hkl), len(rot)
R = rot.transpose(0, 2, 1).reshape((3 * nrot, 3)).T
refl = np.dot(hkl, R).reshape((n * nrot, 3))
ind = np.lexsort(refl.T)
refl = refl[ind]
diff = np.diff(refl, axis=0)
mask = np.any(diff, axis=1)
return np.vstack((refl[:-1][mask], refl[-1, :]))
def equivalent_lattice_points(self, uvw):
"""Return all lattice points equivalent to any of the lattice points
in `uvw` with respect to rotations only.
Only equivalent lattice points that conserves the distance to
origo are included in the output (making this a kind of real
space version of the equivalent_reflections() method).
Example:
>>> from ase.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.equivalent_lattice_points([[0, 0, 2]])
array([[ 0, 0, -2],
[ 0, -2, 0],
[-2, 0, 0],
[ 2, 0, 0],
[ 0, 2, 0],
[ 0, 0, 2]])
"""
uvw = np.array(uvw, ndmin=2)
rot = self.get_rotations()
n, nrot = len(uvw), len(rot)
directions = np.dot(uvw, rot).reshape((n * nrot, 3))
ind = np.lexsort(directions.T)
directions = directions[ind]
diff = np.diff(directions, axis=0)
mask = np.any(diff, axis=1)
return np.vstack((directions[:-1][mask], directions[-1:]))
def symmetry_normalised_reflections(self, hkl):
"""Returns an array of same size as *hkl*, containing the
corresponding symmetry-equivalent reflections of lowest
indices.
Example:
>>> from ase.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.symmetry_normalised_reflections([[2, 0, 0], [0, 2, 0]])
array([[ 0, 0, -2],
[ 0, 0, -2]])
"""
hkl = np.array(hkl, dtype=int, ndmin=2)
normalised = np.empty(hkl.shape, int)
R = self.get_rotations().transpose(0, 2, 1)
for i, g in enumerate(hkl):
gsym = np.dot(R, g)
j = np.lexsort(gsym.T)[0]
normalised[i, :] = gsym[j]
return normalised
def unique_reflections(self, hkl):
"""Returns a subset *hkl* containing only the symmetry-unique
reflections.
Example:
>>> from ase.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.unique_reflections([[ 2, 0, 0],
... [ 0, -2, 0],
... [ 2, 2, 0],
... [ 0, -2, -2]])
array([[2, 0, 0],
[2, 2, 0]])
"""
hkl = np.array(hkl, dtype=int, ndmin=2)
hklnorm = self.symmetry_normalised_reflections(hkl)
perm = np.lexsort(hklnorm.T)
iperm = perm.argsort()
xmask = np.abs(np.diff(hklnorm[perm], axis=0)).any(axis=1)
mask = np.concatenate(([True], xmask))
imask = mask[iperm]
return hkl[imask]
def equivalent_sites(self,
scaled_positions,
onduplicates='error',
symprec=1e-3,
occupancies=None):
"""Returns the scaled positions and all their equivalent sites.
Parameters:
scaled_positions: list | array
List of non-equivalent sites given in unit cell coordinates.
occupancies: list | array, optional (default=None)
List of occupancies corresponding to the respective sites.
onduplicates : 'keep' | 'replace' | 'warn' | 'error'
Action if `scaled_positions` contain symmetry-equivalent
positions of full occupancy:
'keep'
ignore additional symmetry-equivalent positions
'replace'
replace
'warn'
like 'keep', but issue an UserWarning
'error'
raises a SpacegroupValueError
symprec: float
Minimum "distance" betweed two sites in scaled coordinates
before they are counted as the same site.
Returns:
sites: array
A NumPy array of equivalent sites.
kinds: list
A list of integer indices specifying which input site is
equivalent to the corresponding returned site.
Example:
>>> from ase.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sites, kinds = sg.equivalent_sites([[0, 0, 0], [0.5, 0.0, 0.0]])
>>> sites
array([[ 0. , 0. , 0. ],
[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ],
[ 0.5, 0. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 0.5],
[ 0.5, 0.5, 0.5]])
>>> kinds
[0, 0, 0, 0, 1, 1, 1, 1]
"""
kinds = []
sites = []
scaled = np.array(scaled_positions, ndmin=2)
for kind, pos in enumerate(scaled):
for rot, trans in self.get_symop():
site = np.mod(np.dot(rot, pos) + trans, 1.)
if not sites:
sites.append(site)
kinds.append(kind)
continue
t = site - sites
mask = np.all(
(abs(t) < symprec) | (abs(abs(t) - 1.0) < symprec), axis=1)
if np.any(mask):
inds = np.argwhere(mask).flatten()
for ind in inds:
# then we would just add the same thing again -> skip
if kinds[ind] == kind:
pass
elif onduplicates == 'keep':
pass
elif onduplicates == 'replace':
kinds[ind] = kind
elif onduplicates == 'warn':
warnings.warn('scaled_positions %d and %d '
'are equivalent' %
(kinds[ind], kind))
elif onduplicates == 'error':
raise SpacegroupValueError(
'scaled_positions %d and %d are equivalent' %
(kinds[ind], kind))
else:
raise SpacegroupValueError(
'Argument "onduplicates" must be one of: '
'"keep", "replace", "warn" or "error".')
else:
sites.append(site)
kinds.append(kind)
return np.array(sites), kinds
def symmetry_normalised_sites(self,
scaled_positions,
map_to_unitcell=True):
"""Returns an array of same size as *scaled_positions*,
containing the corresponding symmetry-equivalent sites of
lowest indices.
If *map_to_unitcell* is true, the returned positions are all
mapped into the unit cell, i.e. lattice translations are
included as symmetry operator.
Example:
>>> from ase.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.symmetry_normalised_sites([[0.0, 0.5, 0.5], [1.0, 1.0, 0.0]])
array([[ 0., 0., 0.],
[ 0., 0., 0.]])
"""
scaled = np.array(scaled_positions, ndmin=2)
normalised = np.empty(scaled.shape, float)
rot, trans = self.get_op()
for i, pos in enumerate(scaled):
sympos = np.dot(rot, pos) + trans
if map_to_unitcell:
# Must be done twice, see the scaled_positions.py test
sympos %= 1.0
sympos %= 1.0
j = np.lexsort(sympos.T)[0]
normalised[i, :] = sympos[j]
return normalised
def unique_sites(self,
scaled_positions,
symprec=1e-3,
output_mask=False,
map_to_unitcell=True):
"""Returns a subset of *scaled_positions* containing only the
symmetry-unique positions. If *output_mask* is True, a boolean
array masking the subset is also returned.
If *map_to_unitcell* is true, all sites are first mapped into
the unit cell making e.g. [0, 0, 0] and [1, 0, 0] equivalent.
Example:
>>> from ase.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.unique_sites([[0.0, 0.0, 0.0],
... [0.5, 0.5, 0.0],
... [1.0, 0.0, 0.0],
... [0.5, 0.0, 0.0]])
array([[ 0. , 0. , 0. ],
[ 0.5, 0. , 0. ]])
"""
scaled = np.array(scaled_positions, ndmin=2)
symnorm = self.symmetry_normalised_sites(scaled, map_to_unitcell)
perm = np.lexsort(symnorm.T)
iperm = perm.argsort()
xmask = np.abs(np.diff(symnorm[perm], axis=0)).max(axis=1) > symprec
mask = np.concatenate(([True], xmask))
imask = mask[iperm]
if output_mask:
return scaled[imask], imask
else:
return scaled[imask]
def tag_sites(self, scaled_positions, symprec=1e-3):
"""Returns an integer array of the same length as *scaled_positions*,
tagging all equivalent atoms with the same index.
Example:
>>> from ase.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sg.tag_sites([[0.0, 0.0, 0.0],
... [0.5, 0.5, 0.0],
... [1.0, 0.0, 0.0],
... [0.5, 0.0, 0.0]])
array([0, 0, 0, 1])
"""
scaled = np.array(scaled_positions, ndmin=2)
scaled %= 1.0
scaled %= 1.0
tags = -np.ones((len(scaled), ), dtype=int)
mask = np.ones((len(scaled), ), dtype=bool)
rot, trans = self.get_op()
i = 0
while mask.any():
pos = scaled[mask][0]
sympos = np.dot(rot, pos) + trans
# Must be done twice, see the scaled_positions.py test
sympos %= 1.0
sympos %= 1.0
m = ~np.all(np.any(np.abs(scaled[np.newaxis, :, :] -
sympos[:, np.newaxis, :]) > symprec,
axis=2),
axis=0)
assert not np.any((~mask) & m)
tags[m] = i
mask &= ~m
i += 1
return tags
def MT2text(Opr, reverse=False):
"From space group matrix/translation operator returns text version"
XYZ = ('-z', '-y', '-x', 'x-y', 'ERR', 'y-x', 'x', 'y', 'z')
TRA = (' ', 'ERR', '1/6', '1/4', '1/3', 'ERR',
'1/2', 'ERR', '2/3', '3/4', '5/6', 'ERR')
Fld = ''
M, T = Opr
for j in range(3):
IJ = int(round(2 * M[j][0] + 3 * M[j][1] + 4 * M[j][2] + 4)) % 12
IK = int(round(T[j] * 12)) % 12
if IK:
if IJ < 3:
if reverse:
Fld += (XYZ[IJ] + '+' + TRA[IK]).rjust(5)
else:
Fld += (TRA[IK] + XYZ[IJ]).rjust(5)
else:
if reverse:
Fld += (XYZ[IJ] + '+' + TRA[IK]).rjust(5)
else:
Fld += (TRA[IK] + '+' + XYZ[IJ]).rjust(5)
else:
Fld += XYZ[IJ].rjust(5)
if j != 2:
Fld += ', '
return Fld
def Text2MT(mcifOpr, CIF=True):
"From space group cif text returns matrix/translation"
XYZ = {'x': [1, 0, 0], '+x': [1, 0, 0], '-x': [-1, 0, 0], 'y': [0, 1, 0],
'+y': [0, 1, 0], '-y': [0, -1, 0], 'z': [0, 0, 1], '+z': [0, 0, 1],
'-z': [0, 0, -1], 'x-y': [1, -1, 0], '-x+y': [-1, 1, 0],
'y-x': [-1, 1, 0], '+x-y': [1, -1, 0], '+y-x': [-1, 1, 0]}
ops = mcifOpr.split(",")
M = []
T = []
for op in ops[:3]:
ip = len(op)
if '/' in op:
ip = op.index('/')
T.append(eval(op[:ip + 2]))
opMT = [op[ip + 2:], '']
else:
opMT = [op, '']
T.append(0.)
M.append(XYZ[opMT[0].lower()])
return np.array(M), np.array(T)
def get_datafile():
"""Return default path to datafile."""
return os.path.join(os.path.dirname(__file__), 'spacegroup.dat')
def format_symbol(symbol):
"""Returns well formatted Hermann-Mauguin symbol as extected by
the database, by correcting the case and adding missing or
removing dublicated spaces."""
fixed = []
s = symbol.strip()
s = s[0].upper() + s[1:].lower()
for c in s:
if c.isalpha():
if len(fixed) and fixed[-1] == '/':
fixed.append(c)
else:
fixed.append(' ' + c + ' ')
elif c.isspace():
fixed.append(' ')
elif c.isdigit():
fixed.append(c)
elif c == '-':
fixed.append(' ' + c)
elif c == '/':
fixed.append(c)
s = ''.join(fixed).strip()
return ' '.join(s.split())
# Functions for parsing the database. They are moved outside the
# Spacegroup class in order to make it easier to later implement
# caching to avoid reading the database each time a new Spacegroup
# instance is created.
def _skip_to_blank(f, spacegroup, setting):
"""Read lines from f until a blank line is encountered."""
while True:
line = f.readline()
if not line:
raise SpacegroupNotFoundError(
'invalid spacegroup `%s`, setting `%s` not found in data base'
% (spacegroup, setting))
if not line.strip():
break
def _skip_to_nonblank(f, spacegroup, setting):
"""Read lines from f until a nonblank line not starting with a
hash (#) is encountered and returns this and the next line."""
while True:
line1 = f.readline()
if not line1:
raise SpacegroupNotFoundError(
'invalid spacegroup %s, setting %i not found in data base' %
(spacegroup, setting))
line1.strip()
if line1 and not line1.startswith('#'):
line2 = f.readline()
break
return line1, line2
def _read_datafile_entry(spg, no, symbol, setting, f):
"""Read space group data from f to spg."""
floats = {'0.0': 0.0, '1.0': 1.0, '0': 0.0, '1': 1.0, '-1': -1.0}
for n, d in [(1, 2), (1, 3), (2, 3), (1, 4), (3, 4), (1, 6), (5, 6)]:
floats['{0}/{1}'.format(n, d)] = n / d
floats['-{0}/{1}'.format(n, d)] = -n / d
spg._no = no
spg._symbol = symbol.strip()
spg._setting = setting
spg._centrosymmetric = bool(int(f.readline().split()[1]))
# primitive vectors
f.readline()
spg._scaled_primitive_cell = np.array(
[[float(floats.get(s, s)) for s in f.readline().split()]
for i in range(3)],
dtype=float)
# primitive reciprocal vectors
f.readline()
spg._reciprocal_cell = np.array([[int(i) for i in f.readline().split()]
for i in range(3)],
dtype=int)
# subtranslations
spg._nsubtrans = int(f.readline().split()[0])
spg._subtrans = np.array(
[[float(floats.get(t, t)) for t in f.readline().split()]
for i in range(spg._nsubtrans)],
dtype=float)
# symmetry operations
nsym = int(f.readline().split()[0])
symop = np.array([[float(floats.get(s, s)) for s in f.readline().split()]
for i in range(nsym)],
dtype=float)
spg._nsymop = nsym
spg._rotations = np.array(symop[:, :9].reshape((nsym, 3, 3)), dtype=int)
spg._translations = symop[:, 9:]
def _read_datafile(spg, spacegroup, setting, f):
if isinstance(spacegroup, int):
pass
elif isinstance(spacegroup, str):
spacegroup = ' '.join(spacegroup.strip().split())
compact_spacegroup = ''.join(spacegroup.split())
else:
raise SpacegroupValueError('`spacegroup` must be of type int or str')
while True:
line1, line2 = _skip_to_nonblank(f, spacegroup, setting)
_no, _symbol = line1.strip().split(None, 1)
_symbol = format_symbol(_symbol)
compact_symbol = ''.join(_symbol.split())
_setting = int(line2.strip().split()[1])
_no = int(_no)
if ((isinstance(spacegroup, int) and _no == spacegroup
and _setting == setting)
or (isinstance(spacegroup, str)
and compact_symbol == compact_spacegroup) and
(setting is None or _setting == setting)):
_read_datafile_entry(spg, _no, _symbol, _setting, f)
break
else:
_skip_to_blank(f, spacegroup, setting)
def parse_sitesym_element(element):
"""Parses one element from a single site symmetry in the form used
by the International Tables.
Examples:
>>> parse_sitesym_element("x")
([(0, 1)], 0.0)
>>> parse_sitesym_element("-1/2-y")
([(1, -1)], -0.5)
>>> parse_sitesym_element("z+0.25")
([(2, 1)], 0.25)
>>> parse_sitesym_element("x-z+0.5")
([(0, 1), (2, -1)], 0.5)
Parameters
----------
element: str
Site symmetry like "x" or "-y+1/4" or "0.5+z".
Returns
-------
list[tuple[int, int]]
Rotation information in the form '(index, sign)' where index is
0 for "x", 1 for "y" and 2 for "z" and sign is '1' for a positive
entry and '-1' for a negative entry. E.g. "x" is '(0, 1)' and
"-z" is (2, -1).
float
Translation information in fractional space. E.g. "-1/4" is
'-0.25' and "1/2" is '0.5' and "0.75" is '0.75'.
"""
element = element.lower()
is_positive = True
is_frac = False
sng_trans = None
fst_trans = []
snd_trans = []
rot = []
for char in element:
if char == "+":
is_positive = True
elif char == "-":
is_positive = False
elif char == "/":
is_frac = True
elif char in "xyz":
rot.append((ord(char) - ord("x"), 1 if is_positive else -1))
elif char.isdigit() or char == ".":
if sng_trans is None:
sng_trans = 1.0 if is_positive else -1.0
if is_frac:
snd_trans.append(char)
else:
fst_trans.append(char)
trans = 0.0 if not fst_trans else (sng_trans * float("".join(fst_trans)))
if is_frac:
trans /= float("".join(snd_trans))
return rot, trans
def parse_sitesym_single(sym, out_rot, out_trans, sep=",", force_positive_translation=False):
"""Parses a single site symmetry in the form used by International
Tables and overwrites 'out_rot' and 'out_trans' with data.
Parameters
----------
sym: str
Site symmetry in the form used by International Tables (e.g. "x,y,z", "y-1/2,x,-z").
out_rot: np.array
A 3x3-integer array representing rotations (changes are made inplace).
out_rot: np.array
A 3-float array representing translations (changes are made inplace).
sep: str
String separator ("," in "x,y,z").
force_positive_translation: bool
Forces fractional translations to be between 0 and 1 (otherwise negative values might be accepted).
Defaults to 'False'.
Returns
-------
Nothing is returned: 'out_rot' and 'out_trans' are changed inplace.
"""
out_rot[:] = 0.0
out_trans[:] = 0.0
for i, element in enumerate(sym.split(sep)):
e_rot_list, e_trans = parse_sitesym_element(element)
for rot_idx, rot_sgn in e_rot_list:
out_rot[i][rot_idx] = rot_sgn
out_trans[i] = (e_trans %
1.0) if force_positive_translation else e_trans
def parse_sitesym(symlist, sep=',', force_positive_translation=False):
"""Parses a sequence of site symmetries in the form used by
International Tables and returns corresponding rotation and
translation arrays.
Example:
>>> symlist = [
... 'x,y,z',
... '-y+1/2,x+1/2,z',
... '-y,-x,-z',
... 'x-1/4, y-1/4, -z'
... ]
>>> rot, trans = parse_sitesym(symlist)
>>> rot
array([[[ 1, 0, 0],
[ 0, 1, 0],
[ 0, 0, 1]],
<BLANKLINE>
[[ 0, -1, 0],
[ 1, 0, 0],
[ 0, 0, 1]],
<BLANKLINE>
[[ 0, -1, 0],
[-1, 0, 0],
[ 0, 0, -1]],
<BLANKLINE>
[[ 1, 0, 0],
[ 0, 1, 0],
[ 0, 0, -1]]])
>>> trans
array([[ 0. , 0. , 0. ],
[ 0.5 , 0.5 , 0. ],
[ 0. , 0. , 0. ],
[-0.25, -0.25, 0. ]])
"""
nsym = len(symlist)
rot = np.zeros((nsym, 3, 3), dtype='int')
trans = np.zeros((nsym, 3))
for i, sym in enumerate(symlist):
parse_sitesym_single(sym, rot[i], trans[i], sep=sep,
force_positive_translation=force_positive_translation)
return rot, trans
def spacegroup_from_data(no=None,
symbol=None,
setting=None,
centrosymmetric=None,
scaled_primitive_cell=None,
reciprocal_cell=None,
subtrans=None,
sitesym=None,
rotations=None,
translations=None,
datafile=None):
"""Manually create a new space group instance. This might be
useful when reading crystal data with its own spacegroup
definitions."""
if no is not None and setting is not None:
spg = Spacegroup(no, setting, datafile)
elif symbol is not None:
spg = Spacegroup(symbol, None, datafile)
else:
raise SpacegroupValueError('either *no* and *setting* '
'or *symbol* must be given')
if not isinstance(sitesym, list):
raise TypeError('sitesym must be a list')
have_sym = False
if centrosymmetric is not None:
spg._centrosymmetric = bool(centrosymmetric)
if scaled_primitive_cell is not None:
spg._scaled_primitive_cell = np.array(scaled_primitive_cell)
if reciprocal_cell is not None:
spg._reciprocal_cell = np.array(reciprocal_cell)
if subtrans is not None:
spg._subtrans = np.atleast_2d(subtrans)
spg._nsubtrans = spg._subtrans.shape[0]
if sitesym is not None:
spg._rotations, spg._translations = parse_sitesym(sitesym)
have_sym = True
if rotations is not None:
spg._rotations = np.atleast_3d(rotations)
have_sym = True
if translations is not None:
spg._translations = np.atleast_2d(translations)
have_sym = True
if have_sym:
if spg._rotations.shape[0] != spg._translations.shape[0]:
raise SpacegroupValueError('inconsistent number of rotations and '
'translations')
spg._nsymop = spg._rotations.shape[0]
return spg
| StarcoderdataPython |
3325638 | default_app_config = 'oi_seattracker.apps.OiSeattrackerConfig'
| StarcoderdataPython |
1755989 | from expfactory.logger import bot
from flask import (
Blueprint,
render_template,
)
from expfactory.views.utils import (
perform_checks,
clear_session
)
from expfactory.server import app, csrf
import os
teacher_practice = Blueprint('teacher_practice', __name__,
static_url_path='/experiments/teacher_practice',
static_folder='/scif/apps/teacher_practice',
template_folder='/scif/apps')
@teacher_practice.route('/experiments/teacher_practice/')
def teacher_practice_base():
context = {'experiment': 'teacher_practice/index.html'}
return perform_checks('experiments/experiment.html', quiet=True,
context=context,
next="teacher_practice")
teacher_practice.before_request(csrf.protect)
app.register_blueprint(teacher_practice)
| StarcoderdataPython |
3381788 | <reponame>emmamcbryde/AuTuMN-1
from numpy import tanh
def tanh_based_scaleup(shape, inflection_time, start_asymptote, end_asymptote=1.0):
"""
return the function t: (1 - sigma) / 2 * tanh(b * (a - c)) + (1 + sigma) / 2
:param shape: shape parameter
:param inflection_time: inflection point
:param start_asymptote: lowest asymptotic value
:param end_asymptote: highest asymptotic value
:return: a function
"""
rng = end_asymptote - start_asymptote
def tanh_scaleup(t, cv=None):
return (tanh(shape * (t - inflection_time)) / 2.0 + 0.5) * rng + start_asymptote
return tanh_scaleup
| StarcoderdataPython |
1675340 | <reponame>AlanTheKnight/easygmail<gh_stars>0
from . import utils
import base64
class Parcer(object):
"""
Class for parcing information from ``get()`` API call
and processing it to ``Message`` class.
Attributes:
message - Message class
msg - get() API call result
"""
def __init__(self, msg):
self.message = msg
self.msg = msg.message
self.headers()
self.parts()
def __set(self, attr, value):
"""Perform setattr() on Message class."""
setattr(self.message, attr, value)
def headers(self):
"""Get headers of an email."""
for header in self.msg["payload"]["headers"]:
if header["name"].upper() == "FROM":
self.__set("sender", header["value"])
if header["name"].upper() == "TO":
self.__set("recipient", header["value"])
if header["name"].upper() == "SUBJECT":
self.__set("subject", header["value"])
if header["name"].upper() == "CONTENT-TYPE":
self.encoding = utils.parse_for_encoding(header["value"])
def get_charset_from_part_headers(self, part) -> str:
if part.get("headers"):
for header in part["headers"]:
if header["name"].upper() == "CONTENT-TYPE":
return utils.parse_for_encoding(header["value"])
return "UTF-8"
def parts(self):
if "parts" in self.msg["payload"].keys():
for part in self.msg["payload"]["parts"]:
if part["mimeType"].upper() == "TEXT/PLAIN" and "data" in part["body"]:
self.encoding = self.get_charset_from_part_headers(part)
self.message.body = base64.urlsafe_b64decode(
part["body"]["data"]).decode(self.encoding)
if part["mimeType"].upper() == "TEXT/HTML" and "data" in part["body"]:
encoding = self.get_charset_from_part_headers(part)
self.message.html = base64.urlsafe_b64decode(
part["body"]["data"]).decode(encoding)
if part["mimeType"].upper() == "MULTIPART/ALTERNATIVE":
for multipartPart in part["parts"]:
if multipartPart["mimeType"].upper() == "TEXT/PLAIN" and "data" in multipartPart["body"]:
for header in multipartPart["headers"]:
if header["name"].upper() == "CONTENT-TYPE":
self.encoding = utils.parse_for_encoding(header["value"])
self.__set("body", base64.urlsafe_b64decode(
multipartPart["body"]["data"]).decode(self.encoding))
if "filename" in part.keys() and part["filename"] != "":
self.message.attachments.append(
{
"filename": part["filename"],
"id": part["body"]["attachmentId"],
"size": part["body"]["size"]
}
)
# No parts in payload but payload.body exists
elif "body" in self.msg["payload"].keys():
self.__set("body", base64.urlsafe_b64decode(
self.msg["payload"]["body"]["data"]).decode(self.encoding))
| StarcoderdataPython |
1635968 | from enum import IntEnum
from dataclasses import dataclass
import numpy as np
class StepType(IntEnum):
FIRST = 0
MID = 1
LAST = 2
@dataclass
class TimeStep:
step_type: StepType
observation: object
reward: float
done: bool
info: dict
def __getitem__(self, key):
return self.info[key]
def first(self):
if self.step_type == StepType.FIRST:
assert all([x is None for x in [self.reward, self.done, self.info]])
return self.step_type == StepType.FIRST
def mid(self):
if self.step_type == StepType.MID:
assert not self.first() and not self.last()
return self.step_type == StepType.MID
def last(self):
if self.step_type == StepType.LAST:
assert self.done is not None and self.done
return self.step_type == StepType.LAST
def time_limit(self):
return self.last() and self.info.get('TimeLimit.truncated', False)
def terminal(self):
return self.last() and not self.time_limit()
def __repr__(self):
return f'{self.__class__.__name__}({self.step_type.name})'
class Trajectory(object):
def __init__(self):
self.timesteps = []
self._actions = []
def __len__(self):
return len(self.timesteps)
@property
def T(self):
return max(0, len(self) - 1)
def __getitem__(self, index):
return self.timesteps[index]
def __iter__(self):
self.i = 0
return self
def __next__(self):
if self.i < len(self):
timestep = self.timesteps[self.i]
self.i += 1
return timestep
else:
raise StopIteration
@property
def finished(self):
return len(self) > 0 and self.timesteps[-1].last()
@property
def reach_time_limit(self):
return len(self) > 0 and self.timesteps[-1].time_limit()
@property
def reach_terminal(self):
return len(self) > 0 and self.timesteps[-1].terminal()
def add(self, timestep, action):
assert not self.finished
if len(self) == 0:
assert timestep.first()
assert action is None
else:
assert action is not None
self._actions.append(action)
self.timesteps.append(timestep)
@property
def observations(self):
return [timestep.observation for timestep in self.timesteps]
@property
def actions(self):
return self._actions
@property
def rewards(self):
return [timestep.reward for timestep in self.timesteps[1:]]
@property
def dones(self):
return [timestep.done for timestep in self.timesteps[1:]]
@property
def infos(self):
return [timestep.info for timestep in self.timesteps[1:]]
def get_infos(self, key):
return [timestep.info[key] for timestep in self.timesteps[1:] if key in timestep.info]
def __repr__(self):
return f'Trajectory(T: {self.T}, Finished: {self.finished}, Reach time limit: {self.reach_time_limit}, Reach terminal: {self.reach_terminal})'
| StarcoderdataPython |
45733 | <filename>luno_python/base_client.py
import json
import platform
import requests
import six
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from . import VERSION
from .error import APIError
DEFAULT_BASE_URL = 'https://api.mybitx.com'
DEFAULT_TIMEOUT = 10
PYTHON_VERSION = platform.python_version()
SYSTEM = platform.system()
ARCH = platform.machine()
class BaseClient:
def __init__(self, base_url='', timeout=0,
api_key_id='', api_key_secret=''):
"""
:type base_url: str
:type timeout: float
:type api_key_id: str
:type api_key_secret: str
"""
self.set_auth(api_key_id, api_key_secret)
self.set_base_url(base_url)
self.set_timeout(timeout)
self.session = requests.Session()
def set_auth(self, api_key_id, api_key_secret):
"""Provides the client with an API key and secret.
:type api_key_id: str
:type api_key_secret: str
"""
self.api_key_id = api_key_id
self.api_key_secret = api_key_secret
def set_base_url(self, base_url):
"""Overrides the default base URL. For internal use.
:type base_url: str
"""
if base_url == '':
base_url = DEFAULT_BASE_URL
self.base_url = base_url.rstrip('/')
def set_timeout(self, timeout):
"""Sets the timeout, in seconds, for requests made by the client.
:type timeout: float
"""
if timeout == 0:
timeout = DEFAULT_TIMEOUT
self.timeout = timeout
def do(self, method, path, req=None, auth=False):
"""Performs an API request and returns the response.
TODO: Handle 429s
:type method: str
:type path: str
:type req: object
:type auth: bool
"""
try:
params = json.loads(json.dumps(req))
except Exception:
params = None
headers = {'User-Agent': self.make_user_agent()}
args = dict(timeout=self.timeout, params=params, headers=headers)
if auth:
args['auth'] = (self.api_key_id, self.api_key_secret)
url = self.make_url(path, params)
res = self.session.request(method, url, **args)
try:
e = res.json()
if 'error' in e and 'error_code' in e:
raise APIError(e['error_code'], e['error'])
return e
except JSONDecodeError:
raise Exception('luno: unknown API error (%s)' % res.status_code)
def make_url(self, path, params):
"""
:type path: str
:rtype: str
"""
if params:
for k, v in six.iteritems(params):
path = path.replace('{' + k + '}', str(v))
return self.base_url + '/' + path.lstrip('/')
def make_user_agent(self):
"""
:rtype: str
"""
return "LunoPythonSDK/%s python/%s %s %s" % \
(VERSION, PYTHON_VERSION, SYSTEM, ARCH)
| StarcoderdataPython |
42780 | # flake8: noqa
from pandas.computation.eval import eval
from pandas.computation.expr import Expr
| StarcoderdataPython |
151256 | #!/usr/bin/env python
# Copyright (C) 2021 ByteDance Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import io_extender
import rhea_config
from enhanced_systrace import systrace_env
from common import cmd_executer
from rhea_atrace.rhea_log.rhea_logger import rhea_logger
logger = rhea_logger
def capture(context):
root_cmd = ["root"]
(out, return_code) = cmd_executer.exec_commands(cmd_executer.get_complete_abd_cmd(root_cmd, context.serial_number))
open_android_fs = False
if return_code is 0:
if "cannot" not in out:
logger.info("current devices is rooted!")
_drop_cache()
open_android_fs = __open_android_fs(context.serial_number)
logger.debug("start to capture systrace.")
(out, return_code) = __capture_systrace(context)
logger.debug(out)
if return_code is 0:
context.set_params(rhea_config.ENVIRONMENT_PARAMS_ANDROID_FS, open_android_fs)
if open_android_fs:
io_extender.extend(context.get_build_file_path(rhea_config.ORIGIN_SYSTRACE_FILE),
context.get_build_file_path(rhea_config.ORIGIN_SYSTRACE_FS_FILE))
return True
logger.error("failed to capture systrace, check your inputs firstly.")
return False
def show_list_categories(serial_number):
systrace_path = systrace_env.get_executable_systrace()
if systrace_path is None:
logger.error("can't find systrace in system environment.")
sys.exit(1)
cmd = [systrace_path]
cmd.extend(["-l"])
if serial_number is not None:
cmd.extend(["-e", serial_number])
return cmd_executer.exec_commands(cmd)
def from_file(file_path):
systrace_path = systrace_env.get_executable_systrace()
if systrace_path is None:
logger.error("can't find systrace in system environment.")
sys.exit(1)
cmd = [systrace_path]
cmd.extend(["--from-file", file_path])
return cmd_executer.exec_commands(cmd)
def _drop_cache():
"""
free pagecache dentries and inodes cache, only root device effected
"""
(out, return_code) = cmd_executer.exec_write_value("/proc/sys/vm/drop_caches", "3")
if return_code is 0 and out is None:
logger.debug("succeed to drop caches.")
def __open_android_fs(serial_number):
"""
tracing android_fs events, only root device effected.
"""
(out, return_code) = cmd_executer.exec_write_value("/d/tracing/events/android_fs/enable", "1")
open_successful = False
if return_code is 0 and out is None:
open_successful = True
logger.info("succeed to tracing android_fs events.")
else:
(out_1, return_code_1) = cmd_executer.exec_adb_shell_with_append_commands(
"su -c 'echo 1 > /d/tracing/events/android_fs/enable'",
serial_number)
if return_code_1 is 0 and out_1 is None:
open_successful = True
logger.info("ensure to tracing android_fs events successfully.")
return open_successful
def __capture_systrace(context):
systrace_path = systrace_env.get_executable_systrace()
cmd = ["python2.7", systrace_path]
if context.serial_number is not None:
cmd.extend(["-e", context.serial_number])
if context.categories:
cmd.extend(context.categories)
cmd.extend(["-o", context.get_build_file_path(rhea_config.ORIGIN_SYSTRACE_FILE)])
if context.app_name is not None:
cmd.extend(["-a", context.app_name])
if context.trace_time is not None:
cmd.extend(["-t", str(context.trace_time + context.advanced_systrace_time + 2)])
if context.trace_buf_size is not None:
cmd.extend(["-b", str(context.trace_buf_size)])
if context.kfuncs is not None:
cmd.extend(["-k", str(context.kfuncs)])
logger.debug("systrace cmd: " + str(cmd))
return cmd_executer.exec_commands(cmd)
| StarcoderdataPython |
3280882 | <filename>autoio-interfaces/mess_io/reader/_lump.py
""" Read the merged wells from the auxiliary file
"""
def merged_wells(mess_aux_str):
""" Parse the auxiliary MESS output file string for all of the groups
of wells which merged from a Master Equation simulation.
:param mess_aux_str: string for the auxiliary file
:type mess_aux_str: str
:rtype: tuple(tuple(str))
"""
# Get the lines
mess_lines = mess_aux_str.splitlines()
# Read the block of text that has all of the species
merge_well_lines = []
for i, line in enumerate(mess_lines):
if 'number of species =' in line:
merge_well_lines.append(mess_lines[i+1])
# Read the block of text that has all of the species
start_idx = None
for i, line in enumerate(mess_lines):
if 'number of species =' in line:
start_idx = i+1
num_merged_spc = int(line.strip().split()[-1])
break
# Use the number of merged species and line idx to get lines of all wells
merged_well_lst = ()
if start_idx is not None:
merge_well_lines = []
for idx in range(num_merged_spc):
merge_well_lines.append(mess_lines[start_idx+2*idx])
# Parse the merged wells
merged_well_lst = ()
for line in merge_well_lines:
merged_well_lst += (tuple(line.strip().split()),)
return merged_well_lst
| StarcoderdataPython |
3312228 | import sys
import os
import tempfile
from pathlib import Path
import shutil
import glob
from PIL import Image
import cog
sys.path.insert(0, "inference")
from inference import run_inference
class Predictor(cog.Predictor):
def setup(self):
self.basepath = os.getcwd()
@cog.input("image", type=Path, help="input image")
@cog.input(
"output_type",
type=str,
default="gif",
options=["png", "gif"],
help="choose output the final png or a gif with the painting process",
)
def predict(self, image, output_type="gif"):
basename = os.path.basename(str(image))
# avoid subdirectory import issue
os.chdir("./inference")
output_dir = "output"
run_inference(
input_path=str(image),
model_path="model.pth",
output_dir=output_dir, # whether need intermediate results for animation.
need_animation=True if output_type == "gif" else False,
resize_h=None, # resize original input to this size. None means do not resize.
resize_w=None,
serial=True,
)
os.chdir(self.basepath)
if output_type == "gif":
# Set to dir with output images
in_dir = os.path.join(
"inference", output_dir, os.path.splitext(basename)[0] + "/*.jpg"
)
out_path = Path(tempfile.mkdtemp()) / "out.gif"
img, *imgs = [Image.open(f) for f in sorted(glob.glob(in_dir))]
img.save(
fp=str(out_path),
format="GIF",
append_images=imgs,
save_all=True,
duration=100,
loop=0,
)
else:
img = Image.open(os.path.join("inference", output_dir, basename))
out_path = Path(tempfile.mkdtemp()) / "out.gif"
img.save(str(out_path))
clean_folder(os.path.join("inference", output_dir))
return out_path
def clean_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print("Failed to delete %s. Reason: %s" % (file_path, e))
| StarcoderdataPython |
4833323 | <filename>tests/models/xsd/test_alternative.py
from unittest import TestCase
from xsdata.models.xsd import Alternative
class AlternativeTests(TestCase):
def test_property_real_name(self):
obj = Alternative()
self.assertEqual("value", obj.real_name)
obj.id = "foo"
self.assertEqual("foo", obj.real_name)
obj.test = "@type='text'"
self.assertEqual("type_text", obj.real_name)
def test_property_bases(self):
obj = Alternative()
self.assertEqual([], list(obj.bases))
obj.type = "foo"
self.assertEqual(["foo"], list(obj.bases))
def test_get_restrictions(self):
obj = Alternative()
expected = {"min_occurs": 0, "choice": str(id(obj))}
self.assertEqual(expected, obj.get_restrictions())
| StarcoderdataPython |
68415 | <reponame>SakuragawaAsaba/pnc-cli<gh_stars>0
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class ProductVersionRest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ProductVersionRest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'version': 'str',
'product_id': 'int',
'product_name': 'str',
'current_product_milestone_id': 'int',
'product_milestones': 'list[ProductMilestoneRest]',
'product_releases': 'list[ProductReleaseRest]',
'build_configuration_sets': 'list[BuildConfigurationSetRest]',
'build_configurations': 'list[BuildConfigurationRest]',
'attributes': 'dict(str, str)'
}
self.attribute_map = {
'id': 'id',
'version': 'version',
'product_id': 'productId',
'product_name': 'productName',
'current_product_milestone_id': 'currentProductMilestoneId',
'product_milestones': 'productMilestones',
'product_releases': 'productReleases',
'build_configuration_sets': 'buildConfigurationSets',
'build_configurations': 'buildConfigurations',
'attributes': 'attributes'
}
self._id = None
self._version = None
self._product_id = None
self._product_name = None
self._current_product_milestone_id = None
self._product_milestones = None
self._product_releases = None
self._build_configuration_sets = None
self._build_configurations = None
self._attributes = None
@property
def id(self):
"""
Gets the id of this ProductVersionRest.
:return: The id of this ProductVersionRest.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ProductVersionRest.
:param id: The id of this ProductVersionRest.
:type: int
"""
self._id = id
@property
def version(self):
"""
Gets the version of this ProductVersionRest.
:return: The version of this ProductVersionRest.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ProductVersionRest.
:param version: The version of this ProductVersionRest.
:type: str
"""
self._version = version
@property
def product_id(self):
"""
Gets the product_id of this ProductVersionRest.
:return: The product_id of this ProductVersionRest.
:rtype: int
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""
Sets the product_id of this ProductVersionRest.
:param product_id: The product_id of this ProductVersionRest.
:type: int
"""
self._product_id = product_id
@property
def product_name(self):
"""
Gets the product_name of this ProductVersionRest.
:return: The product_name of this ProductVersionRest.
:rtype: str
"""
return self._product_name
@product_name.setter
def product_name(self, product_name):
"""
Sets the product_name of this ProductVersionRest.
:param product_name: The product_name of this ProductVersionRest.
:type: str
"""
self._product_name = product_name
@property
def current_product_milestone_id(self):
"""
Gets the current_product_milestone_id of this ProductVersionRest.
:return: The current_product_milestone_id of this ProductVersionRest.
:rtype: int
"""
return self._current_product_milestone_id
@current_product_milestone_id.setter
def current_product_milestone_id(self, current_product_milestone_id):
"""
Sets the current_product_milestone_id of this ProductVersionRest.
:param current_product_milestone_id: The current_product_milestone_id of this ProductVersionRest.
:type: int
"""
self._current_product_milestone_id = current_product_milestone_id
@property
def product_milestones(self):
"""
Gets the product_milestones of this ProductVersionRest.
:return: The product_milestones of this ProductVersionRest.
:rtype: list[ProductMilestoneRest]
"""
return self._product_milestones
@product_milestones.setter
def product_milestones(self, product_milestones):
"""
Sets the product_milestones of this ProductVersionRest.
:param product_milestones: The product_milestones of this ProductVersionRest.
:type: list[ProductMilestoneRest]
"""
self._product_milestones = product_milestones
@property
def product_releases(self):
"""
Gets the product_releases of this ProductVersionRest.
:return: The product_releases of this ProductVersionRest.
:rtype: list[ProductReleaseRest]
"""
return self._product_releases
@product_releases.setter
def product_releases(self, product_releases):
"""
Sets the product_releases of this ProductVersionRest.
:param product_releases: The product_releases of this ProductVersionRest.
:type: list[ProductReleaseRest]
"""
self._product_releases = product_releases
@property
def build_configuration_sets(self):
"""
Gets the build_configuration_sets of this ProductVersionRest.
:return: The build_configuration_sets of this ProductVersionRest.
:rtype: list[BuildConfigurationSetRest]
"""
return self._build_configuration_sets
@build_configuration_sets.setter
def build_configuration_sets(self, build_configuration_sets):
"""
Sets the build_configuration_sets of this ProductVersionRest.
:param build_configuration_sets: The build_configuration_sets of this ProductVersionRest.
:type: list[BuildConfigurationSetRest]
"""
self._build_configuration_sets = build_configuration_sets
@property
def build_configurations(self):
"""
Gets the build_configurations of this ProductVersionRest.
:return: The build_configurations of this ProductVersionRest.
:rtype: list[BuildConfigurationRest]
"""
return self._build_configurations
@build_configurations.setter
def build_configurations(self, build_configurations):
"""
Sets the build_configurations of this ProductVersionRest.
:param build_configurations: The build_configurations of this ProductVersionRest.
:type: list[BuildConfigurationRest]
"""
self._build_configurations = build_configurations
@property
def attributes(self):
"""
Gets the attributes of this ProductVersionRest.
:return: The attributes of this ProductVersionRest.
:rtype: dict(str, str)
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""
Sets the attributes of this ProductVersionRest.
:param attributes: The attributes of this ProductVersionRest.
:type: dict(str, str)
"""
self._attributes = attributes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
| StarcoderdataPython |
3215755 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import uuid
import numpy as np
import tensorflow as tf
from niftynet.engine.application_driver import ApplicationDriver
from niftynet.engine.application_variables import global_vars_init_or_restore
from niftynet.engine.handler_model import ModelRestorer
from niftynet.io.misc_io import set_logger
from niftynet.utilities.util_common import ParserNamespace
from niftynet.engine.signal import GRAPH_CREATED, SESS_FINISHED, SESS_STARTED
# def _run_test_application():
# test_driver = get_initialised_driver()
# test_driver.run_application()
# return
def get_initialised_driver(starting_iter=0, model_dir_rand=True):
if model_dir_rand:
model_dir = os.path.join('.', 'testing_data', str(uuid.uuid4()))
os.mkdir(model_dir)
else:
model_dir = os.path.join('.', 'testing_data')
system_param = {
'SYSTEM': ParserNamespace(
action='train',
num_threads=2,
num_gpus=4,
cuda_devices='6',
model_dir=model_dir,
dataset_split_file=os.path.join(
'.', 'testing_data', 'testtoyapp.csv'),
event_handler=[
'niftynet.engine.handler_model.ModelRestorer',
'niftynet.engine.handler_sampler.SamplerThreading',
'niftynet.engine.handler_gradient.ApplyGradients'],
iteration_generator=None),
'NETWORK': ParserNamespace(
batch_size=20,
name='tests.toy_application.TinyNet'),
'TRAINING': ParserNamespace(
starting_iter=starting_iter,
max_iter=500,
save_every_n=20,
tensorboard_every_n=1,
max_checkpoints=20,
optimiser='niftynet.engine.application_optimiser.Adagrad',
validation_every_n=-1,
exclude_fraction_for_validation=0.1,
exclude_fraction_for_inference=0.1,
lr=0.01),
'CUSTOM': ParserNamespace(
vector_size=100,
mean=10.0,
stddev=2.0,
name='tests.toy_application.ToyApplication')
}
app_driver = ApplicationDriver()
app_driver.initialise_application(system_param, {})
# set parameters without __init__
app_driver.app.action_param = system_param['TRAINING']
app_driver.app.net_param = system_param['NETWORK']
app_driver.app.action = 'train'
return app_driver
class ApplicationDriverTest(tf.test.TestCase):
def test_wrong_init(self):
app_driver = ApplicationDriver()
with self.assertRaisesRegexp(AttributeError, ''):
app_driver.initialise_application([], [])
# def test_create_app(self):
# test_driver = get_initialised_driver(499, True)
# with self.assertRaisesRegexp(ValueError, 'Could not import'):
# test_driver._create_app('test.test')
# with self.assertRaisesRegexp(ValueError, 'Could not import'):
# test_driver._create_app('testtest')
# with self.assertRaisesRegexp(ValueError, 'Could not import'):
# test_driver._create_app(1)
# test_driver._create_app('tests.toy_application.ToyApplication')
def test_stop_app(self):
test_driver = get_initialised_driver()
graph = test_driver.create_graph(
test_driver.app, test_driver.num_gpus, True)
with self.test_session(graph=graph) as sess:
sess.run(global_vars_init_or_restore())
GRAPH_CREATED.send(test_driver.app, iter_msg=None)
SESS_STARTED.send(test_driver.app, iter_msg=None)
train_op = test_driver.app.gradient_op
SESS_FINISHED.send(test_driver.app, itermsg=None)
test_driver.app.stop()
try:
while True:
sess.run(train_op)
except tf.errors.OutOfRangeError:
for thread in test_driver.app.sampler[0][0]._threads:
self.assertFalse(thread.isAlive(), "threads not closed")
def test_training_update(self):
test_driver = get_initialised_driver()
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.test_session(graph=graph) as sess:
GRAPH_CREATED.send(test_driver.app, iter_msg=None)
SESS_STARTED.send(test_driver.app, iter_msg=None)
train_op = test_driver.app.gradient_op
test_tensor = tf.get_default_graph().get_tensor_by_name(
'G/conv_bn_selu/conv_/w:0')
var_0 = sess.run(test_tensor)
sess.run(train_op)
var_1 = sess.run(test_tensor)
square_diff = np.sum(np.abs(var_0 - var_1))
self.assertGreater(
square_diff, 0.0, 'train_op does not change model')
SESS_FINISHED.send(test_driver.app, itermsg=None)
test_driver.app.stop()
def test_multi_device_inputs(self):
test_driver = get_initialised_driver()
graph = test_driver.create_graph(
test_driver.app, test_driver.num_gpus, True)
with self.test_session(graph=graph) as sess:
GRAPH_CREATED.send(test_driver.app, iter_msg=None)
SESS_STARTED.send(test_driver.app, iter_msg=None)
for i in range(2):
sess.run(test_driver.app.gradient_op)
s_0, s_1, s_2, s_3 = sess.run([
tf.get_default_graph().get_tensor_by_name(
'worker_0/feature_input:0'),
tf.get_default_graph().get_tensor_by_name(
'worker_1/feature_input:0'),
tf.get_default_graph().get_tensor_by_name(
'worker_2/feature_input:0'),
tf.get_default_graph().get_tensor_by_name(
'worker_3/feature_input:0')
])
msg = 'same input data for different devices'
self.assertGreater(np.sum(np.abs(s_0 - s_1)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_0 - s_2)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_0 - s_3)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_1 - s_2)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_1 - s_3)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_2 - s_3)), 0.0, msg)
SESS_FINISHED.send(test_driver.app, itermsg=None)
test_driver.app.stop()
def test_multi_device_gradients(self):
test_driver = get_initialised_driver()
graph = test_driver.create_graph(
test_driver.app, test_driver.num_gpus, True)
with self.test_session(graph=graph) as sess:
GRAPH_CREATED.send(test_driver.app, iter_msg=None)
SESS_STARTED.send(test_driver.app, iter_msg=None)
for i in range(2):
sess.run(test_driver.app.gradient_op)
g_0, g_1, g_2, g_3, g_ave = sess.run([
tf.get_default_graph().get_tensor_by_name(
'worker_0/ComputeGradients/gradients/AddN_5:0'),
tf.get_default_graph().get_tensor_by_name(
'worker_1/ComputeGradients/gradients/AddN_5:0'),
tf.get_default_graph().get_tensor_by_name(
'worker_2/ComputeGradients/gradients/AddN_5:0'),
tf.get_default_graph().get_tensor_by_name(
'worker_3/ComputeGradients/gradients/AddN_5:0'),
tf.get_default_graph().get_tensor_by_name(
'ApplyGradients/AveOverDevices:0')
])
msg = 'same gradients for different devices'
self.assertGreater(np.sum(np.abs(g_0 - g_1)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_0 - g_2)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_0 - g_3)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_1 - g_2)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_1 - g_3)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_2 - g_3)), 0.0, msg)
g_array = np.concatenate([g_0.reshape((1, -1)),
g_1.reshape((1, -1)),
g_2.reshape((1, -1)),
g_3.reshape((1, -1))], axis=0)
g_ave = g_ave.reshape(-1)
g_np_ave = np.mean(g_array, axis=0)
self.assertAllClose(g_np_ave, g_ave)
SESS_FINISHED.send(test_driver.app, itermsg=None)
test_driver.app.stop()
def test_rand_initialisation(self):
test_driver = get_initialised_driver(0, True)
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.test_session(graph=graph) as sess:
test_tensor = tf.get_default_graph().get_tensor_by_name(
"G/conv_bn_selu/conv_/w:0")
with self.assertRaisesRegexp(
tf.errors.FailedPreconditionError,
'uninitialized value'):
sess.run(test_tensor)
ModelRestorer(**vars(test_driver)).rand_init_model(None)
sess.run(test_tensor)
_ = sess.run(tf.global_variables())
def test_from_latest_file_initialisation(self):
test_driver = get_initialised_driver(-1, False)
expected_init = np.array(
[[-0.03544217, 0.0228963, -0.04585603, 0.16923568, -0.51635778,
0.60694504, 0.01968583, -0.6252712, 0.28622296, -0.29527491,
0.61191976, 0.27878678, -0.07661559, -0.41357407, 0.70488983,
-0.10836645, 0.06488426, 0.0746650, -0.188567, -0.64652514]],
dtype=np.float32)
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.test_session(graph=graph) as sess:
test_tensor = tf.get_default_graph().get_tensor_by_name(
"G/conv_bn_selu/conv_/w:0")
with self.assertRaisesRegexp(
tf.errors.FailedPreconditionError,
'uninitialized value'):
_ = sess.run(test_tensor)
ModelRestorer(**vars(test_driver)).restore_model(None)
after_init = sess.run(test_tensor)
self.assertAllClose(after_init[0], expected_init)
_ = sess.run(tf.global_variables())
def test_not_found_file_initialisation(self):
test_driver = get_initialised_driver(42, False)
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.test_session(graph=graph) as sess:
with self.assertRaisesRegexp(
tf.errors.NotFoundError, 'Failed to find'):
ModelRestorer(**vars(test_driver)).restore_model(None)
def test_from_file_initialisation(self):
test_driver = get_initialised_driver(40, False)
expected_init = np.array(
[[-0.23192197, 0.60880029, -0.24921742, -0.00186354, -0.3345384,
0.16067748, -0.2210995, -0.19460233, -0.3035436, -0.42839912,
-0.0489039, -0.90753943, -0.12664583, -0.23129687, 0.01584663,
-0.43854219, 0.40412974, 0.0396539, -0.1590578, -0.53759819]],
dtype=np.float32)
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.test_session(graph=graph) as sess:
test_tensor = tf.get_default_graph().get_tensor_by_name(
"G/conv_bn_selu/conv_/w:0")
with self.assertRaisesRegexp(
tf.errors.FailedPreconditionError,
'uninitialized value'):
_ = sess.run(test_tensor)
ModelRestorer(**vars(test_driver)).restore_model(None)
after_init = sess.run(test_tensor)
self.assertAllClose(after_init[0], expected_init)
_ = sess.run(tf.global_variables())
if __name__ == "__main__":
set_logger()
# _run_test_application()
tf.test.main()
| StarcoderdataPython |
1689283 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OutgoingSMS.delivery_message'
db.add_column('mobile_outgoingsms', 'delivery_message', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
# Adding field 'IncomingSMS.message_id'
db.add_column('mobile_incomingsms', 'message_id', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True), keep_default=False)
# Adding field 'IncomingMMS.message_id'
db.add_column('mobile_incomingmms', 'message_id', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'OutgoingSMS.delivery_message'
db.delete_column('mobile_outgoingsms', 'delivery_message')
# Deleting field 'IncomingSMS.message_id'
db.delete_column('mobile_incomingsms', 'message_id')
# Deleting field 'IncomingMMS.message_id'
db.delete_column('mobile_incomingmms', 'message_id')
models = {
'mobile.incomingmms': {
'Meta': {'object_name': 'IncomingMMS'},
'country': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'received_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.TextField', [], {}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'mobile.incomingsms': {
'Meta': {'object_name': 'IncomingSMS'},
'country': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'received_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.TextField', [], {})
},
'mobile.mmsfile': {
'Meta': {'object_name': 'MMSFile'},
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mms': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['mobile.IncomingMMS']"})
},
'mobile.outgoingsms': {
'Meta': {'object_name': 'OutgoingSMS'},
'country': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '255'}),
'delivery_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'delivery_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sender': ('django.db.models.fields.CharField', [], {'default': '1212', 'max_length': '255'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['mobile']
| StarcoderdataPython |
81278 |
from south.db import db
from django.db import models
from apps.analyzer.models import *
class Migration:
def forwards(self, orm):
# Adding model 'FeatureCategory'
db.create_table('analyzer_featurecategory', (
('id', orm['analyzer.FeatureCategory:id']),
('user', orm['analyzer.FeatureCategory:user']),
('feed', orm['analyzer.FeatureCategory:feed']),
('feature', orm['analyzer.FeatureCategory:feature']),
('category', orm['analyzer.FeatureCategory:category']),
('count', orm['analyzer.FeatureCategory:count']),
))
db.send_create_signal('analyzer', ['FeatureCategory'])
# Adding model 'ClassifierTag'
db.create_table('analyzer_classifiertag', (
('id', orm['analyzer.ClassifierTag:id']),
('user', orm['analyzer.ClassifierTag:user']),
('score', orm['analyzer.ClassifierTag:score']),
('tag', orm['analyzer.ClassifierTag:tag']),
('feed', orm['analyzer.ClassifierTag:feed']),
('original_story', orm['analyzer.ClassifierTag:original_story']),
('creation_date', orm['analyzer.ClassifierTag:creation_date']),
))
db.send_create_signal('analyzer', ['ClassifierTag'])
# Adding model 'ClassifierFeed'
db.create_table('analyzer_classifierfeed', (
('id', orm['analyzer.ClassifierFeed:id']),
('user', orm['analyzer.ClassifierFeed:user']),
('score', orm['analyzer.ClassifierFeed:score']),
('feed', orm['analyzer.ClassifierFeed:feed']),
('original_story', orm['analyzer.ClassifierFeed:original_story']),
('creation_date', orm['analyzer.ClassifierFeed:creation_date']),
))
db.send_create_signal('analyzer', ['ClassifierFeed'])
# Adding model 'ClassifierTitle'
db.create_table('analyzer_classifiertitle', (
('id', orm['analyzer.ClassifierTitle:id']),
('user', orm['analyzer.ClassifierTitle:user']),
('score', orm['analyzer.ClassifierTitle:score']),
('title', orm['analyzer.ClassifierTitle:title']),
('feed', orm['analyzer.ClassifierTitle:feed']),
('original_story', orm['analyzer.ClassifierTitle:original_story']),
('creation_date', orm['analyzer.ClassifierTitle:creation_date']),
))
db.send_create_signal('analyzer', ['ClassifierTitle'])
# Adding model 'Category'
db.create_table('analyzer_category', (
('id', orm['analyzer.Category:id']),
('user', orm['analyzer.Category:user']),
('feed', orm['analyzer.Category:feed']),
('category', orm['analyzer.Category:category']),
('count', orm['analyzer.Category:count']),
))
db.send_create_signal('analyzer', ['Category'])
# Adding model 'ClassifierAuthor'
db.create_table('analyzer_classifierauthor', (
('id', orm['analyzer.ClassifierAuthor:id']),
('user', orm['analyzer.ClassifierAuthor:user']),
('score', orm['analyzer.ClassifierAuthor:score']),
('author', orm['analyzer.ClassifierAuthor:author']),
('feed', orm['analyzer.ClassifierAuthor:feed']),
('original_story', orm['analyzer.ClassifierAuthor:original_story']),
('creation_date', orm['analyzer.ClassifierAuthor:creation_date']),
))
db.send_create_signal('analyzer', ['ClassifierAuthor'])
def backwards(self, orm):
# Deleting model 'FeatureCategory'
db.delete_table('analyzer_featurecategory')
# Deleting model 'ClassifierTag'
db.delete_table('analyzer_classifiertag')
# Deleting model 'ClassifierFeed'
db.delete_table('analyzer_classifierfeed')
# Deleting model 'ClassifierTitle'
db.delete_table('analyzer_classifiertitle')
# Deleting model 'Category'
db.delete_table('analyzer_category')
# Deleting model 'ClassifierAuthor'
db.delete_table('analyzer_classifierauthor')
models = {
'analyzer.category': {
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'analyzer.classifierauthor': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Story']", 'null': 'True'}),
'score': ('django.db.models.fields.SmallIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'analyzer.classifierfeed': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Story']", 'null': 'True'}),
'score': ('django.db.models.fields.SmallIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'analyzer.classifiertag': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Story']", 'null': 'True'}),
'score': ('django.db.models.fields.SmallIntegerField', [], {}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'analyzer.classifiertitle': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Story']", 'null': 'True'}),
'score': ('django.db.models.fields.SmallIntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'analyzer.featurecategory': {
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feature': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rss_feeds.feed': {
'Meta': {'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': '0', 'auto_now': 'True', 'blank': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'page_data': ('StoryField', [], {'null': 'True', 'blank': 'True'}),
'stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.story': {
'Meta': {'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_content': ('StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rss_feeds.Tag']"})
},
'rss_feeds.storyauthor': {
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.tag': {
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['analyzer']
| StarcoderdataPython |
118660 | <gh_stars>0
from __future__ import annotations
from dataclasses import dataclass, field
from requests.models import Response
from ..digitaloceanapi.accounts import Accounts
from ..common.cloudapiexceptions import *
import json
import threading
import time
import re
@dataclass
class AccountAttributes:
droplet_limit: int = None
floating_ip_limit: int = None
volume_limit: int = None
email: str = None
uuid: str = None
email_verified: bool = None
status: str = None
status_message: str = None
class AccountManager:
def __init__(self):
self.accountapi = Accounts()
def retrieve_account_details(self):
response = self.accountapi.list_account_information()
if response:
content = json.loads(response.content.decode("utf-8"))
account_data = content["account"]
newaccount = Account()
newaccount.attributes = AccountAttributes(**account_data)
return newaccount
def droplet_limit(self):
return self.retrieve_account_details().attributes.droplet_limit
def floating_ip_limit(self):
return self.retrieve_account_details().attributes.floating_ip_limit
def volume_limit(self):
return self.retrieve_account_details().attributes.volume_limit
def email(self):
return self.retrieve_account_details().attributes.email
def uuid(self):
return self.retrieve_account_details().attributes.uuid
def email_verified(self):
return self.retrieve_account_details().attributes.email_verified
def status(self):
return self.retrieve_account_details().attributes.status
def status_message(self):
return self.retrieve_account_details().attributes.status_message
class Account:
def __init__(self, status=None):
self.attributes = AccountAttributes()
| StarcoderdataPython |
1693514 | <reponame>UCL/scikit-surgeryfredbackend<gh_stars>0
# -*- coding: utf-8 -*-
"""
Functions for point based registration using Orthogonal Procrustes.
"""
from sksurgeryfredbe.algorithms.scores import calculate_score
class Ablator():
"""
handles the simulated ablation for scikit-surgery fred
"""
def __init__(self, margin):
"""
Initialise ablator with some empty member variables
"""
self.margin = margin
self.target = None
self.est_target = None
self.target_radius = None
self.ready = False
self.margin_increment = 0.1
def setup(self, target, target_radius):
"""
Setup target etc.
"""
self.target = target
self.target_radius = target_radius
self.ready = True
def increase_margin(self):
"""
Make the margin bigger
"""
if self.ready:
self.margin += self.margin_increment
return self.margin
return 0.0
def decrease_margin(self):
"""
Make the margin smaller
"""
if self.ready:
self.margin -= self.margin_increment
if self.margin <= 0.0:
self.margin = 0.0
return self.margin
return 0.0
def ablate(self, estimated_target):
"""
performs and ablation, returns a score.
"""
if not self.ready:
return None
score = calculate_score(self.target, estimated_target.transpose(),
self.target_radius, self.margin)
return score
| StarcoderdataPython |
3227762 | <reponame>vinaymundada27/Hue
#!/usr/bin/env python
from setuptools import setup
setup(
name="django-auth-ldap",
version="1.2.0",
description="Django LDAP authentication backend",
long_description=open('README').read(),
url="http://bitbucket.org/psagers/django-auth-ldap/",
author="<NAME>",
author_email="<EMAIL>",
license="BSD",
packages=["django_auth_ldap"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: BSD License",
"Topic :: Internet :: WWW/HTTP",
"Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords=["django", "ldap", "authentication", "auth"],
install_requires=[
"django",
"python-ldap >= 2.0",
],
setup_requires=[
"setuptools >= 0.6c11",
],
tests_require=[
"mockldap >= 0.2",
]
)
| StarcoderdataPython |
1662065 | <gh_stars>0
class Solution:
"""
@param nums: A list of integers
@return: A list of integers includes the index of the first number
and the index of the last number
"""
def subarraySum(self, nums):
# write your code here
n = len(nums)
map_ = {}
sum_ = 0
map_[0] = -1
indexs = []
for i in xrange(n):
sum_ += nums[i]
if(sum_ in map_):
indexs.append(map_.get(sum_) + 1)
indexs.append(i)
break
map_[sum_] = i
return indexs
a = Solution()
print a.subarraySum([-3,1,2])
| StarcoderdataPython |
153845 | from fish import Fish
WIDTH = 640
HEIGHT = 416
NFISHES = 15 # Anzahl der Fische
FPS = 60
fishes = []
def setup():
global bg
size(WIDTH, HEIGHT)
this.surface.setTitle(u"<NAME>, bonbonbuntes Aquarium")
bg = loadImage("background.png")
for _ in range(NFISHES):
fishes.append(Fish())
frameRate(FPS)
def draw():
background(49, 197, 224) # Himmelblau
image(bg, 0, 0)
for fish in fishes:
fish.show()
fish.update()
| StarcoderdataPython |
3344146 | <reponame>Bobbyorr007/ivy
"""
Collection of TensorFlow general functions, wrapped to fit Ivy syntax and signature.
"""
# global
import ivy
_round = round
import numpy as _np
import math as _math
import tensorflow as _tf
from numbers import Number
import tensorflow_probability as _tfp
import multiprocessing as _multiprocessing
from tensorflow.python.types.core import Tensor
# local
from ivy.functional.ivy.old import default_dtype
from ivy.functional.ivy.device import default_device
from ivy.functional.backends.tensorflow import linspace
from ivy.functional.backends.tensorflow.device import _dev_callable, dev_from_str
def is_array(x, exclusive=False):
if isinstance(x, Tensor):
if exclusive and isinstance(x, _tf.Variable):
return False
return True
return False
copy_array = _tf.identity
array_equal = _tf.experimental.numpy.array_equal
floormod = lambda x, y: x % y
to_numpy = lambda x: _np.asarray(_tf.convert_to_tensor(x))
to_numpy.__name__ = 'to_numpy'
to_scalar = lambda x: to_numpy(x).item()
to_scalar.__name__ = 'to_scalar'
to_list = lambda x: x.numpy().tolist()
to_list.__name__ = 'to_list'
def unstack(x, axis, keepdims=False):
if x.shape == ():
return [x]
ret = _tf.unstack(x, axis=axis)
if keepdims:
return [_tf.expand_dims(r, axis) for r in ret]
return ret
container_types = lambda: []
def inplace_update(x, val):
if ivy.is_variable(x):
x.assign(val)
return x
raise Exception('TensorFlow does not support inplace operations on non-Variable tensors')
inplace_arrays_supported = lambda: False
inplace_variables_supported = lambda: True
def inplace_decrement(x, val):
if ivy.is_variable(x):
x.assign(x - val)
return x
raise Exception('TensorFlow does not support inplace operations on non-Variable tensors')
def inplace_increment(x, val):
if ivy.is_variable(x):
x.assign(x + val)
return x
raise Exception('TensorFlow does not support inplace operations on non-Variable tensors')
| StarcoderdataPython |
3291956 | import torch
import datetime
from .wordebd import WORDEBD
from .cxtebd import CXTEBD
from .avg import AVG
from .cnn import CNN
from .idf import IDF
from .meta import META
from .lstmatt import LSTMAtt
def get_embedding(vocab, args):
print("{}, Building embedding".format(
datetime.datetime.now().strftime('%02y/%02m/%02d %H:%M:%S')), flush=True)
# check if loading pre-trained embeddings
if args.bert:
ebd = CXTEBD(args.pretrained_bert,
cache_dir=args.bert_cache_dir,
finetune_ebd=args.finetune_ebd,
return_seq=(args.embedding != 'ebd'))
else:
ebd = WORDEBD(vocab, args.finetune_ebd)
if args.embedding == 'avg':
model = AVG(ebd, args)
elif args.embedding in ['idf', 'iwf']:
model = IDF(ebd, args)
elif args.embedding in ['meta', 'meta_mlp']:
model = META(ebd, args)
elif args.embedding == 'cnn':
model = CNN(ebd, args)
elif args.embedding == 'lstmatt':
model = LSTMAtt(ebd, args)
elif args.embedding == 'ebd' and args.bert:
model = ebd # using bert representation directly
print("{}, Building embedding".format(
datetime.datetime.now().strftime('%02y/%02m/%02d %H:%M:%S')), flush=True)
if args.snapshot != '':
# load pretrained models
print("{}, Loading pretrained embedding from {}".format(
datetime.datetime.now().strftime('%02y/%02m/%02d %H:%M:%S'),
args.snapshot + '.ebd'
))
model.load_state_dict(torch.load(args.snapshot + '.ebd'))
if args.cuda != -1:
return model.cuda(args.cuda)
else:
return model
| StarcoderdataPython |
1638023 | import pytest
from mathy_core import (
AbsExpression,
AddExpression,
BinaryExpression,
ConstantExpression,
DivideExpression,
EqualExpression,
ExpressionParser,
FunctionExpression,
MathExpression,
MultiplyExpression,
NegateExpression,
PowerExpression,
SgnExpression,
SubtractExpression,
UnaryExpression,
VariableExpression,
)
def test_expressions_get_children():
constant = ConstantExpression(4)
variable = VariableExpression("x")
expr = AddExpression(constant, variable)
# expect two children for add expression
assert len(expr.get_children()) == 2
# when both children are present, the 0 index should be the left child
assert expr.get_children()[0] == constant
assert expr.evaluate({"x": 10}) == 14
def test_expressions_type_id_abstract():
expr = MathExpression()
with pytest.raises(NotImplementedError):
expr.type_id
def test_expressions_name_abstract():
expr = MathExpression()
with pytest.raises(NotImplementedError):
expr.name
def test_expressions_evaluate_abstract():
expr = MathExpression()
with pytest.raises(NotImplementedError):
expr.evaluate()
def test_expressions_terminal_text():
expr = VariableExpression("x")
assert "x" in expr.terminal_text
assert expr.terminal_text is not None
assert expr.raw == "x"
def test_expressions_add_class():
expr = VariableExpression("x")
expr.add_class("as_string")
expr.add_class(["many_classes", "as_list"])
math_ml = expr.to_math_ml()
assert "as_string" in math_ml
assert "many_classes" in math_ml
assert "as_list" in math_ml
def test_expressions_clear_classes():
expr = VariableExpression("x")
expr.add_class("as_string")
expr.to_math_ml()
assert "as_string" in expr.classes
expr.clear_classes()
assert "as_string" not in expr.classes
@pytest.mark.parametrize(
"node_instance",
[
AddExpression(ConstantExpression(3), ConstantExpression(1)),
SubtractExpression(ConstantExpression(3), ConstantExpression(3)),
MultiplyExpression(ConstantExpression(3), ConstantExpression(3)),
DivideExpression(ConstantExpression(3), ConstantExpression(3)),
NegateExpression(ConstantExpression(3)),
ConstantExpression(3),
VariableExpression("x"),
SgnExpression(ConstantExpression(-1)),
SgnExpression(ConstantExpression(0)),
SgnExpression(ConstantExpression(1)),
AbsExpression(ConstantExpression(-1)),
PowerExpression(VariableExpression("x"), ConstantExpression(3)),
],
)
def test_expressions_common_properties_and_methods(node_instance: MathExpression):
assert node_instance.type_id is not None
assert node_instance.name is not None
assert node_instance.color is not None
assert node_instance.raw is not None
assert node_instance.terminal_text is not None
assert str(node_instance) != ""
assert node_instance.evaluate({"x": 2}) == node_instance.clone().evaluate({"x": 2})
@pytest.mark.parametrize(
"node_instance",
[
BinaryExpression(ConstantExpression(1), ConstantExpression(1)),
UnaryExpression(ConstantExpression(1)),
FunctionExpression(ConstantExpression(1)),
],
)
def test_expressions_abstract_properties_and_methods(node_instance: MathExpression):
with pytest.raises(NotImplementedError):
node_instance.evaluate()
def test_expressions_equality_evaluate_error():
expr = EqualExpression(VariableExpression("x"), ConstantExpression(2))
with pytest.raises(ValueError):
expr.evaluate()
with pytest.raises(ValueError):
expr.operate(1, 2) # type:ignore
def test_expressions_binary_errors():
child = BinaryExpression()
with pytest.raises(NotImplementedError):
child.name
with pytest.raises(ValueError):
child.evaluate()
def test_expressions_unary_specify_child_side():
child = ConstantExpression(1337)
expr = UnaryExpression(child, child_on_left=False)
assert expr.get_child() == child
assert expr.left is None
assert expr.right == child
def test_expressions_unary_evaluate_errors():
expr = UnaryExpression(None)
with pytest.raises(ValueError):
expr.evaluate()
@pytest.mark.parametrize(
"text",
["4/x^3+2-7x*12=0", "abs(-4) + abs(34)", "-sgn(-1) / sgn(2)", "sgn(0)", "5!"],
)
def test_expressions_to_math_ml(text: str):
expr = ExpressionParser().parse(text)
ml_string = expr.to_math_ml()
assert "<math xmlns='http:#www.w3.org/1998/Math/MathML'>" in ml_string
assert "</math>" in ml_string
def test_expressions_find_id():
expr: MathExpression = ExpressionParser().parse("4 / x")
node: MathExpression = expr.find_type(VariableExpression)[0]
assert expr.find_id(node.id) == node
@pytest.mark.parametrize("visit_order", ["preorder", "inorder", "postorder"])
def test_expressions_to_list(visit_order: str):
expr: MathExpression = ExpressionParser().parse("4 / x")
assert len(expr.to_list(visit_order)) == 3
def test_expressions_to_list_errors():
expr: MathExpression = ExpressionParser().parse("4 / x")
with pytest.raises(ValueError):
expr.to_list("invalid")
def test_expressions_clone():
constant = ConstantExpression(4)
assert constant.value == 4
assert constant.clone().value == 4
def test_expressions_clone_root():
a = ConstantExpression(1100)
b = ConstantExpression(100)
_ = AddExpression(a, b)
assert a.clone().parent is None
assert a.parent is not None
assert a.clone_from_root().parent is not None
assert b.parent is not None
assert b.clone().parent is None
assert b.clone_from_root().parent is not None
def test_expressions_function_exceptions():
x = FunctionExpression()
with pytest.raises(NotImplementedError):
x.name
def test_expressions_variable_exceptions():
x = VariableExpression(None)
with pytest.raises(ValueError):
str(x)
with pytest.raises(ValueError):
VariableExpression("x").evaluate({})
with pytest.raises(ValueError):
x.to_math_ml()
| StarcoderdataPython |
3353938 | <gh_stars>0
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
import torch.nn as nn
from ..builder import build_loss
from ..builder import HEADS
from ..model_utils import ConvModule
# from mmdet.core import mask_target
# replace mmdet.core with mmcv.runner
from mmcv.runner import auto_fp16, force_fp32
@HEADS.register_module
class ClsHead(nn.Module):
def __init__(self,
num_convs=2,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=[6, 6, 3],
conv_cfg=None,
norm_cfg=None,
loss_cls=dict(
type='CrossEntropyLoss')):
super(ClsHead, self).__init__()
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_cls = build_loss(loss_cls)
self.convs = nn.ModuleList()
for _ in range(3):
convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
stride=2,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
self.convs.append(convs)
self.fcs = nn.ModuleList([nn.Linear(conv_out_channels, n) for n in num_classes])
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, std=1e-3)
@auto_fp16()
def forward(self, x):
x = x.unsqueeze(0).expand(3, -1, -1, -1, -1)
for i in range(len(self.convs[0])):
x = [convs[i](_x) for _x, convs in zip(x, self.convs)]
# global avg pool
x = [torch.mean(_x.view(_x.size(0), _x.size(1), -1), dim=2) for _x in x]
preds = [fc(_x) for _x, fc in zip(x, self.fcs)]
return preds
@force_fp32(apply_to=('preds', ))
def loss(self, preds, labels):
loss = dict()
loss_cls = 0
for i in range(len(labels[0])):
# if i != 2:
# continue
labels_i = torch.stack([l[i] for l in labels])
loss_cls += self.loss_cls(preds[i], labels_i - 1)
loss['loss_cls'] = loss_cls
return loss | StarcoderdataPython |
3302270 | <reponame>juanjorgegarcia/Z01
# <NAME> @ <EMAIL>
# Dez/2017
# Disciplina Elementos de Sistemas
#
# Run maven jar package from other dir
import os,sys
# Verificar se testes unitários passaram
def checkUnitTests(dirname):
hasErrors = False
# rotina de leitura do arquivo de teste
for filename in os.listdir(dirname):
if filename.endswith('.txt'):
try:
with open(dirname+filename, 'r') as f:
tmp = f.read().splitlines()
partes = tmp[3].split()
for i in range(len(partes)):
if(partes[i]=='Failures:'):
if(partes[i+1]!='0,'):
hasErrors = True
if(partes[i]=='Errors:'):
if(partes[i+1]!='0,'):
hasErrors = True
if(partes[i]=='Skipped:'):
if(partes[i+1]!='0,'):
hasErrors = True
return hasErrors
except IOError:
print('Error : Arquivo não encontrado: {}'.format(filename))
return(True)
def genJAR():
pwd = os.path.abspath(os.path.dirname(__file__))
pwd = os.path.join(pwd, '..', 'VMtranslator')
os.system("mvn -f {} package -q -e".format(pwd))
error = checkUnitTests(os.path.join(pwd,'target', 'surefire-reports'+'/'))
if(error):
print("Erro nos testes unitários")
return(1)
if __name__ == "__main__":
genJAR()
| StarcoderdataPython |
3219158 | <gh_stars>0
from dataclasses import dataclass
#from az_custom_logging.config.project_config import ProjectConfig
"""
Find a way to read customer_id and shared_key
for the respective project Log Analytics Workspace
"""
@dataclass(frozen=True)
class CustomLogConfig:
customer_id: str
shared_key: str
log_name: str
resource:str = '/api/logs'
api_version:str = '2016-04-01'
log_api_url:str = None
debug_label: str = 'DEBUG'
info_label: str = 'INFO'
error_label: str = 'ERROR'
@staticmethod
def load_config(customer_id: str, shared_key: str, log_name: str=None):
#projectConfig = ProjectConfig.load_config(project_id=project_id)
resource = CustomLogConfig.resource
apiVersion = CustomLogConfig.api_version
apiUrl = f'https://{customer_id}.ods.opinsights.azure.com{resource}?api-version={apiVersion}'
return CustomLogConfig(
customer_id=customer_id,
shared_key=shared_key,
log_name=log_name,
log_api_url=apiUrl
)
| StarcoderdataPython |
4195 | """
Demonstration of numbers in Python
"""
# Python has an integer type called int
print("int")
print("---")
print(0)
print(1)
print(-3)
print(70383028364830)
print("")
# Python has a real number type called float
print("float")
print("-----")
print(0.0)
print(7.35)
print(-43.2)
print("")
# Limited precision
print("Precision")
print("---------")
print(4.56372883832331773)
print(1.23456789012345678)
print("")
# Scientific/exponential notation
print("Scientific notation")
print("-------------------")
print(5e32)
print(999999999999999999999999999999999999999.9)
print("")
# Infinity
print("Infinity")
print("--------")
print(1e500)
print(-1e500)
print("")
# Conversions
print("Conversions between numeric types")
print("---------------------------------")
print(float(3))
print(float(99999999999999999999999999999999999999))
print(int(3.0))
print(int(3.7))
print(int(-3.7))
"""
Demonstration of simple arithmetic expressions in Python
"""
# Unary + and -
print("Unary operators")
print(+3)
print(-5)
print(+7.86)
print(-3348.63)
print("")
# Simple arithmetic
print("Addition and Subtraction")
print(1 + 2)
print(48 - 89)
print(3.45 + 2.7)
print(87.3384 - 12.35)
print(3 + 6.7)
print(9.8 - 4)
print("")
print("Multiplication")
print(3 * 2)
print(7.8 * 27.54)
print(7 * 8.2)
print("")
print("Division")
print(8 / 2)
print(3 / 2)
print(7.538 / 14.3)
print(8 // 2)
print(3 // 2)
print(7.538 // 14.3)
print("")
print("Exponentiation")
print(3 ** 2)
print(5 ** 4)
print(32.6 ** 7)
print(9 ** 0.5)
"""
Demonstration of compound arithmetic expressions in Python
"""
# Expressions can include multiple operations
print("Compound expressions")
print(3 + 5 + 7 + 27)
#Operator with same precedence are evaluated from left to right
print(18 - 6 + 4)
print("")
# Operator precedence defines how expressions are evaluated
print("Operator precedence")
print(7 + 3 * 5)
print(5.5 * 6 // 2 + 8)
print(-3 ** 2)
print("")
# Use parentheses to change evaluation order
print("Grouping with parentheses")
print((7 + 3) * 5)
print(5.5 * ((6 // 2) + 8))
print((-3) ** 2)
"""
Demonstration of the use of variables and how to assign values to
them.
"""
# The = operator can be used to assign values to variables
bakers_dozen = 12 + 1
temperature = 93
# Variables can be used as values and in expressions
print(temperature, bakers_dozen)
print("celsius:", (temperature - 32) * 5 / 9)
print("fahrenheit:", float(temperature))
# You can assign a different value to an existing variable
temperature = 26
print("new value:", temperature)
# Multiple variables can be used in arbitrary expressions
offset = 32
multiplier = 5.0 / 9.0
celsius = (temperature - offset) * multiplier
print("celsius value:", celsius)
| StarcoderdataPython |
3387602 | import nltk
import os
import sys
import mlflow
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from gensim.models import Word2Vec
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
import six
import mlflow
from mlflow.utils import mlflow_tags
from mlflow.entities import RunStatus
from mlflow.utils.logging_utils import eprint
from mlflow.tracking.fluent import _get_experiment_id
def _already_ran(entry_point_name, parameters, git_commit, experiment_id=None):
"""Best-effort detection of if a run with the given entrypoint name,
parameters, and experiment id already ran. The run must have completed
successfully and have at least the parameters provided.
"""
experiment_id = experiment_id if experiment_id is not None else _get_experiment_id()
client = mlflow.tracking.MlflowClient()
all_run_infos = reversed(client.list_run_infos(experiment_id))
for run_info in all_run_infos:
full_run = client.get_run(run_info.run_id)
tags = full_run.data.tags
if tags.get(mlflow_tags.MLFLOW_PROJECT_ENTRY_POINT, None) != entry_point_name:
continue
match_failed = False
for param_key, param_value in six.iteritems(parameters):
run_value = full_run.data.params.get(param_key)
if run_value != param_value:
match_failed = True
break
if match_failed:
continue
if run_info.status != RunStatus.FINISHED:
eprint(("Run matched, but is not FINISHED, so skipping "
"(run_id=%s, status=%s)") % (run_info.run_id, run_info.status))
continue
previous_version = tags.get(mlflow_tags.MLFLOW_GIT_COMMIT, None)
if git_commit != previous_version:
eprint(("Run matched, but has a different source version, so skipping "
"(found=%s, expected=%s)") % previous_version, git_commit)
continue
return client.get_run(run_info.run_id)
eprint("No matching run has been found.")
return None
# TODO(aaron): This is not great because it doesn't account for:
# - changes in code
# - changes in dependant steps
def _get_or_run(entrypoint, parameters, git_commit, use_cache=True):
existing_run = _already_ran(entrypoint, parameters, git_commit)
if use_cache and existing_run:
print("Found existing run for entrypoint=%s and parameters=%s" % (entrypoint, parameters))
return existing_run
print("Launching new run for entrypoint=%s and parameters=%s" % (entrypoint, parameters))
submitted_run = mlflow.run(".", entrypoint, parameters=parameters)
return mlflow.tracking.MlflowClient().get_run(submitted_run.run_id)
def workflow(args):
# Note: The entrypoint names are defined in MLproject. The artifact directories
# are documented by each step's .py file.
with mlflow.start_run() as active_run:
git_commit = active_run.data.tags.get(mlflow_tags.MLFLOW_GIT_COMMIT)
data_handler_run = _get_or_run("data_handler", args, git_commit)
input_data_uri = os.path.join(data_handler_run.info.artifact_uri, 'processed_data_dir', 'input_data.pkl')
word2vec_vectorizer_run = _get_or_run('word2vec_vectorizer',
{'data_path': input_data_uri,
'text_col' : args['text_col'],
'dimension': 30},
git_commit)
vect_data_uri = os.path.join(word2vec_vectorizer_run.info.artifact_uri,
'processed_data_dir', 'vectorized_data.pkl')
rf_params = {
'data_path': vect_data_uri,
'label_col': 'airline_sentiment',
'n_estimators': 500,
'max_depth': 10
}
random_forest_run = _get_or_run("random_forest",
rf_params,
git_commit)
if __name__ == '__main__':
keys = 'data_path', 'text_col', 'label_col', 'punctuation'
args = {k: v for k, v in zip(keys, sys.argv[1:])}
workflow(args)
| StarcoderdataPython |
3215544 | <gh_stars>0
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import sqlalchemy
from keystone.common import driver_hints
from keystone.common import sql
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone.identity.backends import base
from keystone.identity.backends import sql_model as model
CONF = keystone.conf.CONF
class Identity(base.IdentityDriverBase):
# NOTE(henry-nash): Override the __init__() method so as to take a
# config parameter to enable sql to be used as a domain-specific driver.
def __init__(self, conf=None):
self.conf = conf
super(Identity, self).__init__()
@property
def is_sql(self):
return True
def _check_password(self, password, user_ref):
"""Check the specified password against the data store.
Note that we'll pass in the entire user_ref in case the subclass
needs things like user_ref.get('name')
For further justification, please see the follow up suggestion at
https://blueprints.launchpad.net/keystone/+spec/sql-identiy-pam
"""
return utils.check_password(password, user_ref.password)
# Identity interface
def authenticate(self, user_id, password):
with sql.session_for_read() as session:
try:
user_ref = self._get_user(session, user_id)
except exception.UserNotFound:
raise AssertionError(_('Invalid user / password'))
if self._is_account_locked(user_id, user_ref):
raise exception.AccountLocked(user_id=user_id)
elif not self._check_password(password, user_ref):
self._record_failed_auth(user_id)
raise AssertionError(_('Invalid user / password'))
elif not user_ref.enabled:
raise exception.UserDisabled(user_id=user_id)
elif user_ref.password_is_expired:
raise exception.PasswordExpired(user_id=user_id)
# successful auth, reset failed count if present
if user_ref.local_user.failed_auth_count:
self._reset_failed_auth(user_id)
return base.filter_user(user_ref.to_dict())
def _is_account_locked(self, user_id, user_ref):
"""Check if the user account is locked.
Checks if the user account is locked based on the number of failed
authentication attempts.
:param user_id: The user ID
:param user_ref: Reference to the user object
:returns Boolean: True if the account is locked; False otherwise
"""
attempts = user_ref.local_user.failed_auth_count or 0
max_attempts = CONF.security_compliance.lockout_failure_attempts
lockout_duration = CONF.security_compliance.lockout_duration
if max_attempts and (attempts >= max_attempts):
if not lockout_duration:
return True
else:
delta = datetime.timedelta(seconds=lockout_duration)
last_failure = user_ref.local_user.failed_auth_at
if (last_failure + delta) > datetime.datetime.utcnow():
return True
else:
self._reset_failed_auth(user_id)
return False
def _record_failed_auth(self, user_id):
with sql.session_for_write() as session:
user_ref = session.query(model.User).get(user_id)
if not user_ref.local_user.failed_auth_count:
user_ref.local_user.failed_auth_count = 0
user_ref.local_user.failed_auth_count += 1
user_ref.local_user.failed_auth_at = datetime.datetime.utcnow()
def _reset_failed_auth(self, user_id):
with sql.session_for_write() as session:
user_ref = session.query(model.User).get(user_id)
user_ref.local_user.failed_auth_count = 0
user_ref.local_user.failed_auth_at = None
# user crud
@sql.handle_conflicts(conflict_type='user')
def create_user(self, user_id, user):
user = utils.hash_user_password(user)
with sql.session_for_write() as session:
user_ref = model.User.from_dict(user)
user_ref.created_at = datetime.datetime.utcnow()
session.add(user_ref)
return base.filter_user(user_ref.to_dict())
@driver_hints.truncated
def list_users(self, hints):
with sql.session_for_read() as session:
query = session.query(model.User).outerjoin(model.LocalUser)
user_refs = sql.filter_limit_query(model.User, query, hints)
return [base.filter_user(x.to_dict()) for x in user_refs]
def _get_user(self, session, user_id):
user_ref = session.query(model.User).get(user_id)
if not user_ref:
raise exception.UserNotFound(user_id=user_id)
return user_ref
def get_user(self, user_id):
with sql.session_for_read() as session:
return base.filter_user(
self._get_user(session, user_id).to_dict())
def get_user_by_name(self, user_name, domain_id):
with sql.session_for_read() as session:
query = session.query(model.User).join(model.LocalUser)
query = query.filter(sqlalchemy.and_(
model.LocalUser.name == user_name,
model.LocalUser.domain_id == domain_id))
try:
user_ref = query.one()
except sql.NotFound:
raise exception.UserNotFound(user_id=user_name)
return base.filter_user(user_ref.to_dict())
@sql.handle_conflicts(conflict_type='user')
def update_user(self, user_id, user):
with sql.session_for_write() as session:
user_ref = self._get_user(session, user_id)
old_user_dict = user_ref.to_dict()
user = utils.hash_user_password(user)
for k in user:
old_user_dict[k] = user[k]
new_user = model.User.from_dict(old_user_dict)
for attr in model.User.attributes:
if attr not in model.User.readonly_attributes:
setattr(user_ref, attr, getattr(new_user, attr))
user_ref.extra = new_user.extra
return base.filter_user(
user_ref.to_dict(include_extra_dict=True))
def _validate_password_history(self, password, user_ref):
unique_cnt = CONF.security_compliance.unique_last_password_count
# Slice off all of the extra passwords.
user_ref.local_user.passwords = (
user_ref.local_user.passwords[-unique_cnt:])
# Validate the new password against the remaining passwords.
if unique_cnt > 1:
for password_ref in user_ref.local_user.passwords:
if utils.check_password(password, password_ref.password):
detail = _('The new password cannot be identical to a '
'previous password. The number of previous '
'passwords that must be unique is: '
'%(unique_cnt)d') % {'unique_cnt': unique_cnt}
raise exception.PasswordValidationError(detail=detail)
def change_password(self, user_id, new_password):
with sql.session_for_write() as session:
user_ref = session.query(model.User).get(user_id)
if user_ref.password_ref and user_ref.password_ref.self_service:
self._validate_minimum_password_age(user_ref)
self._validate_password_history(new_password, user_ref)
user_ref.password = <PASSWORD>(<PASSWORD>)
user_ref.password_ref.self_service = True
def _validate_minimum_password_age(self, user_ref):
min_age_days = CONF.security_compliance.minimum_password_age
min_age = (user_ref.password_created_at +
datetime.timedelta(days=min_age_days))
if datetime.datetime.utcnow() < min_age:
days_left = (min_age - datetime.datetime.utcnow()).days
raise exception.PasswordAgeValidationError(
min_age_days=min_age_days, days_left=days_left)
def add_user_to_group(self, user_id, group_id):
with sql.session_for_write() as session:
self.get_group(group_id)
self.get_user(user_id)
query = session.query(model.UserGroupMembership)
query = query.filter_by(user_id=user_id)
query = query.filter_by(group_id=group_id)
rv = query.first()
if rv:
return
session.add(model.UserGroupMembership(user_id=user_id,
group_id=group_id))
def check_user_in_group(self, user_id, group_id):
with sql.session_for_read() as session:
self.get_group(group_id)
self.get_user(user_id)
query = session.query(model.UserGroupMembership)
query = query.filter_by(user_id=user_id)
query = query.filter_by(group_id=group_id)
if not query.first():
raise exception.NotFound(_("User '%(user_id)s' not found in"
" group '%(group_id)s'") %
{'user_id': user_id,
'group_id': group_id})
def remove_user_from_group(self, user_id, group_id):
# We don't check if user or group are still valid and let the remove
# be tried anyway - in case this is some kind of clean-up operation
with sql.session_for_write() as session:
query = session.query(model.UserGroupMembership)
query = query.filter_by(user_id=user_id)
query = query.filter_by(group_id=group_id)
membership_ref = query.first()
if membership_ref is None:
# Check if the group and user exist to return descriptive
# exceptions.
self.get_group(group_id)
self.get_user(user_id)
raise exception.NotFound(_("User '%(user_id)s' not found in"
" group '%(group_id)s'") %
{'user_id': user_id,
'group_id': group_id})
session.delete(membership_ref)
def list_groups_for_user(self, user_id, hints):
with sql.session_for_read() as session:
self.get_user(user_id)
query = session.query(model.Group).join(model.UserGroupMembership)
query = query.filter(model.UserGroupMembership.user_id == user_id)
query = sql.filter_limit_query(model.Group, query, hints)
return [g.to_dict() for g in query]
def list_users_in_group(self, group_id, hints):
with sql.session_for_read() as session:
self.get_group(group_id)
query = session.query(model.User).outerjoin(model.LocalUser)
query = query.join(model.UserGroupMembership)
query = query.filter(
model.UserGroupMembership.group_id == group_id)
query = sql.filter_limit_query(model.User, query, hints)
return [base.filter_user(u.to_dict()) for u in query]
def delete_user(self, user_id):
with sql.session_for_write() as session:
ref = self._get_user(session, user_id)
q = session.query(model.UserGroupMembership)
q = q.filter_by(user_id=user_id)
q.delete(False)
session.delete(ref)
# group crud
@sql.handle_conflicts(conflict_type='group')
def create_group(self, group_id, group):
with sql.session_for_write() as session:
ref = model.Group.from_dict(group)
session.add(ref)
return ref.to_dict()
@driver_hints.truncated
def list_groups(self, hints):
with sql.session_for_read() as session:
query = session.query(model.Group)
refs = sql.filter_limit_query(model.Group, query, hints)
return [ref.to_dict() for ref in refs]
def _get_group(self, session, group_id):
ref = session.query(model.Group).get(group_id)
if not ref:
raise exception.GroupNotFound(group_id=group_id)
return ref
def get_group(self, group_id):
with sql.session_for_read() as session:
return self._get_group(session, group_id).to_dict()
def get_group_by_name(self, group_name, domain_id):
with sql.session_for_read() as session:
query = session.query(model.Group)
query = query.filter_by(name=group_name)
query = query.filter_by(domain_id=domain_id)
try:
group_ref = query.one()
except sql.NotFound:
raise exception.GroupNotFound(group_id=group_name)
return group_ref.to_dict()
@sql.handle_conflicts(conflict_type='group')
def update_group(self, group_id, group):
with sql.session_for_write() as session:
ref = self._get_group(session, group_id)
old_dict = ref.to_dict()
for k in group:
old_dict[k] = group[k]
new_group = model.Group.from_dict(old_dict)
for attr in model.Group.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_group, attr))
ref.extra = new_group.extra
return ref.to_dict()
def delete_group(self, group_id):
with sql.session_for_write() as session:
ref = self._get_group(session, group_id)
q = session.query(model.UserGroupMembership)
q = q.filter_by(group_id=group_id)
q.delete(False)
session.delete(ref)
| StarcoderdataPython |
171894 | import wx
import sys
import threading
import traceback
import inspect
import weakref
class Signal(object):
def __init__(self, owner=None):
self.__lock = threading.Lock()
self.__handlers = []
if isinstance(owner, wx.Window):
owner.Bind(wx.EVT_WINDOW_DESTROY, lambda evt: self.destroy())
elif owner is not None:
self.__owner_ref = weakref.ref(owner, lambda ref: self.destroy())
def signal(self, *args, **kwargs):
if self.__lock is None:
return
wx.CallAfter(self.__dosignal, *args, **kwargs)
def __dosignal(self, *args, **kwargs):
if self.__lock is None:
return
with self.__lock:
dead = []
for i, (func, ref) in enumerate(self.__handlers):
try:
if ref is None:
func(*args, **kwargs)
else:
obj = ref()
if isinstance(obj, wx._core._wxPyDeadObject):
dead.append(i)
if obj is not None:
func(obj, *args, **kwargs)
except SystemExit:
raise
except:
sys.stdout.write(
"\nError signalling %s with arguments %r %r:\n\n%s" %
(func, args, kwargs, traceback.format_exc()))
for i in reversed(dead):
del self.__handlers[i]
def bind(self, func):
if self.__lock is None:
return
if not inspect.isfunction(func) and not inspect.ismethod(func) and hasattr(func, "__call__"):
func = func.__call__
if inspect.ismethod(func):
handler = (func.__func__, weakref.ref(func.__self__, self.__unbind_weakref))
elif inspect.isfunction(func):
handler = (func, None)
else:
raise TypeError("Callable required")
with self.__lock:
self.__handlers.append(handler)
def __unbind_weakref(self, ref):
if self.__lock is None:
return
with self.__lock:
for i in xrange(len(self.__handlers)-1, -1, -1):
if self.__handlers[i][1] is ref:
del self.__handlers[i]
def unbind(self, func):
if self.__lock is None:
return
with self.__lock:
obj = None
if inspect.ismethod(func) and func.__self__ is not None:
obj = func.__self__
func = func.__func__
for i in xrange(len(self.__handlers)-1, -1, -1):
ref = self.__handlers[i][1]
if self.__handlers[i][0] is func and \
(obj is None or (ref is not None and obj is ref())):
del self.__handlers[i]
def unbind_object(self, obj):
if self.__lock is None:
return
with self.__lock:
for i in xrange(len(self.__handlers)-1, -1, -1):
ref = self.__handlers[i][1]
if ref is not None and obj() is ref():
del self.__handlers[i]
def clear(self):
if self.__lock is None:
return
with self.__lock:
self.__handlers = []
def destroy(self):
if self.__lock is None:
return
with self.__lock:
self.__handlers = []
self.__lock = None
| StarcoderdataPython |
114424 | <gh_stars>0
from configparser import ConfigParser
from pathlib import Path
from typing import List, Any
true_path = Path(__file__).parent / 'config.ini'
_parser = ConfigParser()
_parser.read(str(true_path.resolve()), encoding='utf-8')
class Config(object):
@staticmethod
def get_property_string(section_name: str, property_name: str) -> Any:
return _parser.get(section_name, property_name)
@staticmethod
def get_property_int(section_name: str, property_name: str) -> Any:
return _parser.getint(section_name, property_name)
@staticmethod
def get_property_float(section_name: str, property_name: str) -> Any:
return _parser.getfloat(section_name, property_name)
@staticmethod
def get_property_bool(section_name: str, property_name: str) -> Any:
return _parser.getboolean(section_name, property_name)
class NmistRawAnnConfig(Config):
@property
def network_size(self) -> List[int]:
layer_size_string = Config.get_property_string('nmist_raw_ann', 'layer_sizes')
return eval(layer_size_string)
@property
def gradient_step_size(self) -> float:
return Config.get_property_float('nmist_raw_ann', 'gradient_step_size')
@property
def regularization_lambda(self) -> float:
return Config.get_property_float('nmist_raw_ann', 'regularization_lambda')
@property
def training_iterations(self) -> int:
return Config.get_property_int('nmist_raw_ann', 'num_training_iterations')
@property
def mini_batch_size(self) -> int:
return Config.get_property_int('nmist_raw_ann', 'mini_batch_size')
class TorchNMist(Config):
@property
def epochs(self) -> int:
return Config.get_property_int('torch_nmist', 'epochs')
@property
def batch_size(self) -> int:
return Config.get_property_int('torch_nmist', 'batch_size')
@property
def learning_rate(self) -> float:
return Config.get_property_float('torch_nmist', 'learning_rate')
nmist_raw_ann_config = NmistRawAnnConfig() | StarcoderdataPython |
18269 | import numpy as np
import matplotlib.pyplot as plt
#Dahlquist test
#sol1ex = lambda t: np.exp(-t)
#sol2ex = lambda t: np.exp(-2*t)
#oscillator 1
sol1ex = lambda t: np.cos(t**2/2)
sol2ex = lambda t: np.sin(t**2/2)
#oscillator 2
#sol1ex = lambda t: np.exp(np.sin(t**2))
#sol2ex = lambda t: np.exp(np.cos(t**2))
name = 'Osc1'
t = np.fromfile('../out/%s_snap_t' % name)
nsnap = len(t)
sol1 = np.zeros((nsnap,))
sol2 = sol1.copy()
for i in range(nsnap):
s = np.fromfile('../out/%s_snap_%d' % (name,i))
sol1[i] = s[0]
sol2[i] = s[1]
fig, axs = plt.subplots(2, 3, figsize=(10,5))
axs = [item for sublist in axs for item in sublist]
tdense = np.linspace(min(t), max(t), 2500)
axs[0].plot(tdense, sol1ex(tdense), 'k', linewidth=0.5, label='$y_1$ exact')
axs[0].plot(t, sol1, 'C0.', label='$y_1$ numerical')
axs[0].set_title('Solutions')
axs[0].set_ylabel('$y_1$')
axs[0].legend()
axs[3].plot(tdense, sol2ex(tdense), 'k', linewidth=0.5, label='$y_2$ exact')
axs[3].plot(t, sol2, 'C1.', label='$y_2$ numerical')
axs[3].set_ylabel('$y_2$')
axs[3].legend()
axs[1].semilogy(t, np.abs(sol1 - sol1ex(t)), 'C0.', label='$y_1$ abs err')
axs[4].semilogy(t, np.abs(sol2 - sol2ex(t)), 'C1.', label='$y_2$ abs err')
axs[1].set_title('Absolute Error')
axs[2].semilogy(t, np.abs((sol1 - sol1ex(t))/sol1ex(t)), 'C0.', label='$y_1$ rel err')
axs[5].semilogy(t, np.abs((sol2 - sol2ex(t))/sol1ex(t)), 'C1.', label='$y_2$ rel err')
axs[2].set_title('Relative Error')
axs[3].set_xlabel('t')
axs[4].set_xlabel('t')
axs[5].set_xlabel('t')
plt.tight_layout()
plt.show()
| StarcoderdataPython |
98921 | import datetime
from app.models.base.base import BaseModel
from app import db
class WeChatWantBuyModel(db.Model, BaseModel):
__bind_key__ = "a_social"
__tablename__ = "wechat_want_buy"
id = db.Column(db.Integer, primary_key=True)
seller_id = db.Column(db.Integer, default=0)
buyer_id = db.Column(db.Integer, default=0)
status = db.Column(db.Integer, default=1)
created_time = db.Column(db.DateTime, default=datetime.datetime.now)
updated_time = db.Column(db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
@staticmethod
def query_wechat_want_buy(seller_id, buyer_id):
return WeChatWantBuyModel.query.filter_by(buyer_id=buyer_id, seller_id=seller_id, status=1).first()
| StarcoderdataPython |
191170 | # comment on each line
# Here we are setting variables, in this case X, the x variable creates within it a formatted variable which is also set in this creation.
x = "There are %d types of people." % 10
# creating the variable binary
binary = "binary"
# creating variable do_not
do_not = "don't"
# creating variable y, like x we are creating a string that calls formatted variables into it.
# first example of string within string.
y = "Those who don't know %s and those who %s." % (binary, do_not)
# printing the strings created as variables which also call formatted variables into it.
print x
print y
# these strings call in a variable that has variables within in it.
# these are also 'string inside of string' examples
print "I said: %r." % x
print "I also said: '%s'." % y
# we again create variables, joke_evaluation calls a formatted variable within it.
# another string within a string example.
hilarious = False
joke_evaluation = "Isn't that joke funny?! %r"
# this sting calls the joke_evaluation variable and then calls the variable hilarious which
# returns false as an answer, the last string within string example.
print joke_evaluation % hilarious
# here we are introduced to formatters w and e. w which prints the left side of a string
# and e prints the right side. They will be printed together to create a complete sentence.
w = "This is the left side of..."
e = "a string with a right side."
# here is how they are placed together, and will print
# "This is the left side of...a string with a right side."
print w + e
# more on formatters. %r is best for debugging, and other formats are for actually displaying
# variables. It is for debugging because it displays a "raw" data variable.
# %s and %d the these are used to display actual variables to people.
# why single (') quotes inside the string and double (") outside.
# style choice. It makes neater looking code to read.
# Review of what was introduced in this lesson
# print
# variable creation
# placing variables within strings
# placing multipul variables within a string
# the formatter
# %r = raw data
# %d = pull a variable
# %s = pull a variable
# w = pull the left side of a string
# e = pull the right side of a string
| StarcoderdataPython |
3251699 | from lettuceUI import LettuceUI
__all__ = ['LettuceUI'
]
| StarcoderdataPython |
3393325 | <reponame>ryosuke0825/atcoder_python
n = int(input())
print(((n-1)*n)//2)
| StarcoderdataPython |
1693867 | <reponame>Jumpscale/lib9
"""
This modules defines types related to signatures
"""
from JumpScale9Lib.clients.blockchain.rivine.encoding import binary
SIGEd25519 = 'ed25519'
SPECIFIER_SIZE = 16
class SiaPublicKeyFactory:
"""
SiaPublicKeyFactory class
"""
@staticmethod
def from_string(pub_key_str):
"""
Creates a SiaPublicKey from a string
"""
algo, pub_key = pub_key_str.split(':')
if algo == SIGEd25519:
return Ed25519PublicKey(pub_key=bytearray.fromhex(pub_key))
class SiaPublicKey:
"""
A SiaPublicKey is a public key prefixed by a Specifier. The Specifier
indicates the algorithm used for signing and verification.
"""
def __init__(self, algorithm, pub_key):
"""
Initialize new SiaPublicKey
"""
self._algorithm = algorithm
self._pub_key = pub_key
@property
def binary(self):
"""
Encodes the public key into binary format
"""
key_value = bytearray()
s = bytearray(SPECIFIER_SIZE)
s[:len(self._algorithm)] = bytearray(self._algorithm, encoding='utf-8')
key_value.extend(s)
key_value.extend(binary.encode(self._pub_key, type_='slice'))
return key_value
@property
def json(self):
"""
Returns a json encoded version of the SiaPublicKey
"""
return "{}:{}".format(self._algorithm, self._pub_key.hex())
class Ed25519PublicKey(SiaPublicKey):
"""
Ed25519PublicKey returns pk as a SiaPublicKey, denoting its algorithm as Ed25519.
"""
def __init__(self, pub_key):
"""
Initialize new Ed25519PublicKey
"""
super().__init__(algorithm=SIGEd25519, pub_key=pub_key)
| StarcoderdataPython |
3303137 | <reponame>mofilamamra/APP-WILAYA<gh_stars>0
# auth.py
from flask import Blueprint, render_template, redirect, url_for, request, flash, session
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import login_user, logout_user, login_required
from app.models.Model import User, ACCESS
from app.models import db
from app.forms.user import LoginForm , RegisterForm
from app.utils.utils import requires_access_level
auth = Blueprint('auth', __name__)
@auth.route('/identifier', methods=['GET', 'POST'])
def login():
title = 'identifier'
form = LoginForm()
if request.method == 'GET':
return render_template('auth/login.html', title=title, form=form)
email = request.form.get('email')
password = <PASSWORD>('password')
remember = True if request.form.get('remember') else False
user = User.query.filter_by(email=email).first()
# check if user actually exists
# take the user supplied password, hash it, and compare it to the hashed password in DB
if not user or not check_password_hash(user.password, password):
flash('Please check your login details and try again.')
return redirect(url_for('auth.login'))
# if the above check passes, then we know the user has the right credentials
login_user(user, remember=remember)
session['email'] = email
return redirect(url_for('home.index'))
@auth.route('/inscrire', methods=['GET','POST'])
@login_required
@requires_access_level(ACCESS['admin'])
def register():
title = 'inscrire'
form = RegisterForm()
if request.method == 'GET':
return render_template('auth/register.html', title=title, form=form)
email = request.form.get('email')
username = request.form.get('username')
password = request.form.get('password')
hashed_password = generate_password_hash(password,method="sha256")
category_id = request.form.get("category")
# if this returns a user, then the email already exists in database
user = User.query.filter_by(email=email).first()
if user: # if a user is found, we want to redirect back to signup page so user can try again
flash("L'adresse mail existe déjà")
return redirect(url_for('auth.register'))
if form.validate_on_submit():
new_user = User(username,email,hashed_password,category_id)
# add the new user to DB.
db.session.add(new_user)
db.session.commit()
flash("Merci pour l'inscription")
return redirect(url_for('home.index'))
else:
return render_template("auth/register.html", title=title, form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
session.pop('email', None)
return redirect(url_for('home.index'))
| StarcoderdataPython |
63817 | <reponame>SmartStake/harmonyanalytics<filename>harmonyanalyticsutils/harmonyNodeHealth.py
import datetime
import json
import subprocess
import sys
import commonUtils
import constants
import logUtil
logger = logUtil.l()
if len(sys.argv) < 5:
raise Exception("correct syntax is: python harmonyNodeHealth dev/prod logsPath nodeName shardId")
harmonyCmdPath = constants.HARMONY_HOME_DIR + 'hmy'
harmonyNodeUrl = '--node=https://api.s{}.t.hmny.io'
today = datetime.date.today()
#python harmonyNodeHealth.py
node_name = sys.argv[3]
shardId = sys.argv[4]
harmonyNodeUrl = harmonyNodeUrl.format(shardId)
logger.info("shard url: " + harmonyNodeUrl)
def check_health():
logger.info(node_name + " - starting health check")
nodeStatus = getNodeStatus()
nodeHeight = nodeStatus["blockNumber"]
logger.info("node nodeHeight is: " + str(nodeHeight))
shardId = nodeStatus["shardID"]
networkStatus = getNetworkStatus()
networkHeight = networkStatus["shard-chain-header"]["block-number"]
# logger.info(node_height)
logger.info(node_name + " - networkHeight is: " + str(networkHeight))
blockDiff = int(networkHeight) - int(nodeHeight)
logger.info("network block height - node height: " + str(blockDiff))
logger.error("%s - block diff - %s - network block height - %s, node height - %s ",
node_name, blockDiff, networkHeight, nodeHeight)
saveHealthCheck(blockDiff, networkHeight, nodeHeight, shardId)
logger.info(node_name + " - finished successfully")
def saveHealthCheck(blockDiff, networkHeight, nodeHeight, shardId):
logger.debug("in save_health_check")
# data_json = '{"nodeName": "' + node_name + '", "symbol": "AION", "checkupTime": "' + str(datetime.now()) + \
# '", "networkBlockHeight": "' + block_height + '", "nodeBlockHeight": "' + node_height + \
# '", "heightGap": "' + block_diff + '", "lastBlockValidated": 120}'
reqData = {
"type": "saveHealthCheck",
"nodeName": node_name,
"symbol": constants.app,
"checkupTime": datetime.datetime.now(),
"networkBlockHeight": networkHeight,
"nodeBlockHeight": nodeHeight,
"heightGap": blockDiff,
"poolId": constants.DEFAULT_POOL_ID,
"shardId": shardId
}
# "key": key,
# "token": token,
logger.debug(reqData)
commonUtils.postReq(constants.saveHealthCheckUrl, reqData)
# def postReq(url, reqData):
# data_json = jsondumps(reqData)
# logger.debug("after json dump")
# logger.debug(data_json)
# session = Session()
# logger.debug("calling save health check:")
# logger.debug(url)
# #response = session.post('https://dev.smartstakeapi.com/saveHealthCheck',
# # data=data_json, allow_redirects=False)
#
# response = session.post(url,
# data=data_json, allow_redirects=False)
# logger.debug("save health check response is:")
# logger.debug(response)
def getNodeStatus():
# ./hmy blockchain latest-header
nodeHeaderCmd = [harmonyCmdPath, 'blockchain', 'latest-header']
# logger.info("obtaining nodeHeader: " + str(nodeHeaderCmd))
nodeHeader = execCmdJson(nodeHeaderCmd)
# logger.info("nodeHeader: ")
# logger.info(nodeHeader)
# return nodeHeader["result"]["blockNumber"]
return nodeHeader["result"]
def getNetworkStatus():
# ./hmy --node="https://api.s2.t.hmny.io" blockchain latest-headers
networkHeaderCmd = [harmonyCmdPath, harmonyNodeUrl, 'blockchain', 'latest-headers']
# logger.info("obtaining networkHeader: " + str(networkHeaderCmd))
networkHeader = execCmdJson(networkHeaderCmd)
# logger.info("networkHeader: ")
# logger.info(networkHeader)
return networkHeader["result"]
def execCmdJson(args):
return json.loads(execCmd(args))
def execCmd(args):
out = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# logger.info("calling communicate")
stdout,stderr = out.communicate()
resp = stdout.decode().replace("\n", "")
return resp
# def default(o):
# if type(o) is datetime.date or type(o) is datetime.datetime:
# return o.isoformat()
# if isinstance(o, float):
# return round(o, 8)
# if type(o) is decimal.Decimal:
# return str(o)
#
#
# def jsondumps(o):
# # return json.dumps(o)
# return json.dumps(o, default=default)
check_health()
| StarcoderdataPython |
1733726 | <filename>data_parsing_code/scrap_wiki_verb_roots_inflections.py
# Import libraries
import requests
from bs4 import BeautifulSoup
import pandas as pd
import codecs
import mysql.connector as MySQL
mysql = MySQL.connect(host='localhost',
database='ar_nlg'
, user='root'
, password='')
mycursor = mysql.cursor(dictionary=True)
mycursor.execute("SELECT `id`,`verb_root` FROM `verbs_root`")
verb_roots = mycursor.fetchall()
for verb_root in verb_roots:
url='https://en.wiktionary.org/wiki/'+verb_root['verb_root']
# Connect to the URL
response = requests.get(url)
response.encoding = "utf-8"
try:
soup = BeautifulSoup(response.text, "html.parser")
div= soup.find('div', attrs={'class':'NavFrame ar-conj'})
div_content=div.find('div',attrs={'class':'NavContent'})
inflection_table=div_content.find('table',attrs={'class':'inflection-table'})
with codecs.open("verbs_tables/arabic_verb_"+str(verb_root['id'])+".html", "a", encoding="utf-8") as f2:
f2.write(str(inflection_table))
except:
continue
| StarcoderdataPython |
3234695 | <reponame>maclema/aws-parallelcluster<gh_stars>100-1000
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import boto3
from retrying import RetryError, retry
from time_utils import seconds
from utils import get_compute_nodes_count
def get_compute_nodes_allocation(scheduler_commands, region, stack_name, max_monitoring_time):
"""
Watch periodically the number of compute nodes in the cluster.
:return: (ec2_capacity_time_series, compute_nodes_time_series, timestamps): three lists describing
the variation over time in the number of compute nodes and the timestamp when these fluctuations occurred.
ec2_capacity_time_series describes the variation in the desired ec2 capacity. compute_nodes_time_series
describes the variation in the number of compute nodes seen by the scheduler. timestamps describes the
time since epoch when the variations occurred.
"""
ec2_capacity_time_series = []
compute_nodes_time_series = []
timestamps = []
@retry(
# Retry until EC2 and Scheduler capacities scale down to 0
# Also make sure cluster scaled up before scaling down
retry_on_result=lambda _: ec2_capacity_time_series[-1] != 0
or compute_nodes_time_series[-1] != 0
or max(ec2_capacity_time_series) == 0
or max(compute_nodes_time_series) == 0,
wait_fixed=seconds(20),
stop_max_delay=max_monitoring_time,
)
def _watch_compute_nodes_allocation():
compute_nodes = scheduler_commands.compute_nodes_count()
ec2_capacity = get_compute_nodes_count(stack_name, region)
timestamp = time.time()
# add values only if there is a transition.
if (
len(ec2_capacity_time_series) == 0
or ec2_capacity_time_series[-1] != ec2_capacity
or compute_nodes_time_series[-1] != compute_nodes
):
ec2_capacity_time_series.append(ec2_capacity)
compute_nodes_time_series.append(compute_nodes)
timestamps.append(timestamp)
try:
_watch_compute_nodes_allocation()
except RetryError:
# ignoring this error in order to perform assertions on the collected data.
pass
logging.info(
"Monitoring completed: %s, %s, %s",
"ec2_capacity_time_series [" + " ".join(map(str, ec2_capacity_time_series)) + "]",
"compute_nodes_time_series [" + " ".join(map(str, compute_nodes_time_series)) + "]",
"timestamps [" + " ".join(map(str, timestamps)) + "]",
)
return ec2_capacity_time_series, compute_nodes_time_series, timestamps
def watch_compute_nodes(scheduler_commands, max_monitoring_time, number_of_nodes):
"""Watch periodically the number of nodes seen by the scheduler."""
compute_nodes_time_series = []
timestamps = []
@retry(
# Retry until the given number_of_nodes is equal to the number of compute nodes
retry_on_result=lambda _: compute_nodes_time_series[-1] != number_of_nodes,
wait_fixed=seconds(20),
stop_max_delay=max_monitoring_time,
)
def _watch_compute_nodes_allocation():
compute_nodes = scheduler_commands.compute_nodes_count()
timestamp = time.time()
# add values only if there is a transition.
if len(compute_nodes_time_series) == 0 or compute_nodes_time_series[-1] != compute_nodes:
compute_nodes_time_series.append(compute_nodes)
timestamps.append(timestamp)
try:
_watch_compute_nodes_allocation()
except RetryError:
# ignoring this error in order to perform assertions on the collected data.
pass
logging.info(
"Monitoring completed: %s, %s",
"compute_nodes_time_series [" + " ".join(map(str, compute_nodes_time_series)) + "]",
"timestamps [" + " ".join(map(str, timestamps)) + "]",
)
def get_stack(stack_name, region, cfn_client=None):
"""
Get the output for a DescribeStacks action for the given Stack.
:return: the Stack data type
"""
if not cfn_client:
cfn_client = boto3.client("cloudformation", region_name=region)
return cfn_client.describe_stacks(StackName=stack_name).get("Stacks")[0]
def get_stack_output_value(stack_outputs, output_key):
"""
Get output value from Cloudformation Stack Output.
:return: OutputValue if that output exists, otherwise None
"""
return next((o.get("OutputValue") for o in stack_outputs if o.get("OutputKey") == output_key), None)
def get_batch_ce(stack_name, region):
"""
Get name of the AWS Batch Compute Environment.
:return: ce_name or exit if not found
"""
outputs = get_stack(stack_name, region).get("Outputs")
return get_stack_output_value(outputs, "BatchComputeEnvironmentArn")
def get_batch_ce_max_size(stack_name, region):
"""Get max vcpus for Batch Compute Environment."""
client = boto3.client("batch", region_name=region)
return (
client.describe_compute_environments(computeEnvironments=[get_batch_ce(stack_name, region)])
.get("computeEnvironments")[0]
.get("computeResources")
.get("maxvCpus")
)
def get_batch_ce_min_size(stack_name, region):
"""Get min vcpus for Batch Compute Environment."""
client = boto3.client("batch", region_name=region)
return (
client.describe_compute_environments(computeEnvironments=[get_batch_ce(stack_name, region)])
.get("computeEnvironments")[0]
.get("computeResources")
.get("minvCpus")
)
| StarcoderdataPython |
3292962 | #!/usr/bin/env python
#
# test_fsl_data_utils.py -
#
# Author: <NAME> <<EMAIL>>
#
import shutil
import os
import os.path as op
import numpy as np
import fsl.utils.tempdir as tempdir
import fsl.data.utils as dutils
import fsl.utils.path as fslpath
import fsl.data.image as fslimage
import fsl.data.vtk as fslvtk
import fsl.data.gifti as fslgifti
import fsl.data.freesurfer as fslfs
import fsl.data.mghimage as fslmgh
import fsl.data.featimage as featimage
import fsl.data.melodicimage as melimage
import fsl.data.dtifit as dtifit
import fsl.data.melodicanalysis as melanalysis
import fsl.data.featanalysis as featanalysis
from . import (touch,
make_mock_feat_analysis,
make_mock_melodic_analysis,
make_mock_dtifit_analysis)
def test_guessType():
def asrt(path, cls):
restype, respath = dutils.guessType(path)
assert restype is cls
if path.startswith('fsleyes://'):
path = path[10:]
# image path might not have an extension
try:
path = fslimage.addExt(path, mustExist=True)
except fslimage.PathError:
pass
assert respath == op.abspath(path)
with tempdir.tempdir() as td:
touch('foo.nii')
asrt('foo', fslimage.Image)
asrt('foo.nii', fslimage.Image)
asrt('fsleyes://foo', fslimage.Image)
asrt('fsleyes://foo.nii', fslimage.Image)
os.remove('foo.nii')
touch('foo.vtk')
asrt('foo.vtk', fslvtk.VTKMesh)
os.remove('foo.vtk')
touch('foo.surf.gii')
asrt('foo.surf.gii', fslgifti.GiftiMesh)
os.remove('foo.surf.gii')
touch('lh.pial')
asrt('lh.pial', fslfs.FreesurferMesh)
os.remove('lh.pial')
touch('foo.mgz')
asrt('foo.mgz', fslmgh.MGHImage)
os.remove('foo.mgz')
make_mock_melodic_analysis('filtered_func_data.ica',
(10, 10, 10, 10),
20)
asrt('filtered_func_data.ica/melodic_IC', melimage.MelodicImage)
asrt('filtered_func_data.ica/melodic_IC.nii.gz', melimage.MelodicImage)
asrt('filtered_func_data.ica', melimage.MelodicImage)
asrt('filtered_func_data.ica/', melimage.MelodicImage)
shutil.rmtree('filtered_func_data.ica')
featdir = op.join(op.dirname(__file__),
'testdata', 'test_feat', '1stlevel_1.feat')
make_mock_feat_analysis(featdir,
td,
(10, 10, 10, 10))
asrt('1stlevel_1.feat/filtered_func_data', featimage.FEATImage)
asrt('1stlevel_1.feat/filtered_func_data.nii.gz', featimage.FEATImage)
asrt('1stlevel_1.feat', featimage.FEATImage)
make_mock_dtifit_analysis('dti', (10, 10, 10))
asrt('dti', dtifit.DTIFitTensor)
shutil.rmtree('dti')
asrt('noexist', None)
touch('norecognise')
asrt('norecognise', None)
touch('norecognise.txt')
asrt('norecognise.txt', None)
os.remove('norecognise')
os.remove('norecognise.txt')
def test_makeWriteable():
robuf = bytes( b'\01\02\03\04')
wbuf = bytearray(b'\01\02\03\04')
roarr = np.ndarray((4,), dtype=np.uint8, buffer=robuf)
warr = np.ndarray((4,), dtype=np.uint8, buffer=wbuf)
warr.flags['WRITEABLE'] = False
rocopy = dutils.makeWriteable(roarr)
wcopy = dutils.makeWriteable(warr)
assert rocopy.base is not roarr.base
assert wcopy .base is warr .base
rocopy[1] = 100
wcopy[ 1] = 100
| StarcoderdataPython |
199769 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/5/8 下午8:29
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/sfnet-tensorflow
# @File : make_cityscapes_tfrecords.py
# @IDE: PyCharm
"""
Generate cityscapes tfrecords tools
"""
from data_provider.cityscapes import cityscapes_tf_io
from local_utils.log_util import init_logger
from local_utils.config_utils import parse_config_utils
LOG = init_logger.get_logger(log_file_name_prefix='generate_cityscapes_tfrecords')
CFG = parse_config_utils.CITYSCAPES_CFG
def generate_tfrecords():
"""
:return:
"""
io = cityscapes_tf_io.CityScapesTfIO(cfg=CFG)
io.writer.write_tfrecords()
return
if __name__ == '__main__':
"""
test
"""
generate_tfrecords()
| StarcoderdataPython |
3307199 | import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_example(driver):
driver.get("http://localhost/litecart/admin/login.php")
driver.find_element_by_name('username').send_keys('admin')
driver.find_element_by_name('password').send_keys('<PASSWORD>')
driver.find_element_by_name('login').click()
c = driver.find_elements_by_css_selector('[id="app-"]')
for i in range (len(c)):
elems = driver.find_elements_by_css_selector('[id="app-"]')
elems[i].click()
assert(driver.find_elements_by_css_selector("h1"))
elems = driver.find_elements_by_css_selector('[id="app-"]')
in_elems = elems[i].find_elements_by_css_selector("li")
for y in range (len(in_elems)):
elems = driver.find_elements_by_css_selector('[id="app-"]')
in_elems1 = elems[i].find_elements_by_css_selector("li")
in_elems1[y].click()
assert (driver.find_elements_by_css_selector("h1")) | StarcoderdataPython |
1754348 | #!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_security_list
short_description: Manage a SecurityList resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a SecurityList resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new security list for the specified VCN. For more information
about security lists, see L(Security Lists,https://docs.cloud.oracle.com/Content/Network/Concepts/securitylists.htm).
For information on the number of rules you can have in a security list, see
L(Service Limits,https://docs.cloud.oracle.com/Content/General/Concepts/servicelimits.htm).
- For the purposes of access control, you must provide the OCID of the compartment where you want the security
list to reside. Notice that the security list doesn't have to be in the same compartment as the VCN, subnets,
or other Networking Service components. If you're not sure which compartment to use, put the security
list in the same compartment as the VCN. For more information about compartments and access control, see
L(Overview of the IAM Service,https://docs.cloud.oracle.com/Content/Identity/Concepts/overview.htm). For information about OCIDs, see
L(Resource Identifiers,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- "You may optionally specify a *display name* for the security list, otherwise a default is provided.
It does not have to be unique, and you can change it. Avoid entering confidential information."
version_added: "2.9"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The OCID of the compartment to contain the security list.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
- This parameter is updatable.
type: dict
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
- Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
egress_security_rules:
description:
- Rules for allowing egress IP packets.
- Required for create using I(state=present).
- This parameter is updatable.
type: list
suboptions:
destination:
description:
- Conceptually, this is the range of IP addresses that a packet originating from the instance
can go to.
- "Allowed values:"
- " * IP address range in CIDR notation. For example: `192.168.1.0/24`"
- " * The `cidrBlock` value for a L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/), if you're
setting up a security list rule for traffic destined for a particular `Service` through
a service gateway. For example: `oci-phx-objectstorage`."
type: str
required: true
destination_type:
description:
- Type of destination for the rule. The default is `CIDR_BLOCK`.
- "Allowed values:"
- " * `CIDR_BLOCK`: If the rule's `destination` is an IP address range in CIDR notation."
- " * `SERVICE_CIDR_BLOCK`: If the rule's `destination` is the `cidrBlock` value for a
L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/) (the rule is for traffic destined for a
particular `Service` through a service gateway)."
type: str
choices:
- "CIDR_BLOCK"
- "SERVICE_CIDR_BLOCK"
icmp_options:
description:
- "Optional and valid only for ICMP. Use to specify a particular ICMP type and code
as defined in
L(ICMP Parameters,http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml).
If you specify ICMP as the protocol but omit this object, then all ICMP types and
codes are allowed. If you do provide this object, the type is required and the code is optional.
To enable MTU negotiation for ingress internet traffic, make sure to allow type 3 (\\"Destination
Unreachable\\") code 4 (\\"Fragmentation Needed and Don't Fragment was Set\\"). If you need to specify
multiple codes for a single type, create a separate security list rule for each."
type: dict
suboptions:
code:
description:
- The ICMP code (optional).
type: int
type:
description:
- The ICMP type.
type: int
required: true
is_stateless:
description:
- A stateless rule allows traffic in one direction. Remember to add a corresponding
stateless rule in the other direction if you need to support bidirectional traffic. For
example, if egress traffic allows TCP destination port 80, there should be an ingress
rule to allow TCP source port 80. Defaults to false, which means the rule is stateful
and a corresponding rule is not necessary for bidirectional traffic.
type: bool
protocol:
description:
- "The transport protocol. Specify either `all` or an IPv4 protocol number as
defined in
L(Protocol Numbers,http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml).
Options are supported only for ICMP (\\"1\\"), TCP (\\"6\\"), and UDP (\\"17\\")."
type: str
required: true
tcp_options:
description:
- Optional and valid only for TCP. Use to specify particular destination ports for TCP rules.
If you specify TCP as the protocol but omit this object, then all destination ports are allowed.
type: dict
suboptions:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
udp_options:
description:
- Optional and valid only for UDP. Use to specify particular destination ports for UDP rules.
If you specify UDP as the protocol but omit this object, then all destination ports are allowed.
type: dict
suboptions:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
description:
description:
- An optional description of your choice for the rule.
type: str
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
ingress_security_rules:
description:
- Rules for allowing ingress IP packets.
- Required for create using I(state=present).
- This parameter is updatable.
type: list
suboptions:
icmp_options:
description:
- "Optional and valid only for ICMP. Use to specify a particular ICMP type and code
as defined in
L(ICMP Parameters,http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml).
If you specify ICMP as the protocol but omit this object, then all ICMP types and
codes are allowed. If you do provide this object, the type is required and the code is optional.
To enable MTU negotiation for ingress internet traffic, make sure to allow type 3 (\\"Destination
Unreachable\\") code 4 (\\"Fragmentation Needed and Don't Fragment was Set\\"). If you need to specify
multiple codes for a single type, create a separate security list rule for each."
type: dict
suboptions:
code:
description:
- The ICMP code (optional).
type: int
type:
description:
- The ICMP type.
type: int
required: true
is_stateless:
description:
- A stateless rule allows traffic in one direction. Remember to add a corresponding
stateless rule in the other direction if you need to support bidirectional traffic. For
example, if ingress traffic allows TCP destination port 80, there should be an egress
rule to allow TCP source port 80. Defaults to false, which means the rule is stateful
and a corresponding rule is not necessary for bidirectional traffic.
type: bool
protocol:
description:
- "The transport protocol. Specify either `all` or an IPv4 protocol number as
defined in
L(Protocol Numbers,http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml).
Options are supported only for ICMP (\\"1\\"), TCP (\\"6\\"), and UDP (\\"17\\")."
type: str
required: true
source:
description:
- Conceptually, this is the range of IP addresses that a packet coming into the instance
can come from.
- "Allowed values:"
- " * IP address range in CIDR notation. For example: `192.168.1.0/24`"
- " * The `cidrBlock` value for a L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/), if you're
setting up a security list rule for traffic coming from a particular `Service` through
a service gateway. For example: `oci-phx-objectstorage`."
type: str
required: true
source_type:
description:
- Type of source for the rule. The default is `CIDR_BLOCK`.
- " * `CIDR_BLOCK`: If the rule's `source` is an IP address range in CIDR notation."
- " * `SERVICE_CIDR_BLOCK`: If the rule's `source` is the `cidrBlock` value for a
L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/) (the rule is for traffic coming from a
particular `Service` through a service gateway)."
type: str
choices:
- "CIDR_BLOCK"
- "SERVICE_CIDR_BLOCK"
tcp_options:
description:
- Optional and valid only for TCP. Use to specify particular destination ports for TCP rules.
If you specify TCP as the protocol but omit this object, then all destination ports are allowed.
type: dict
suboptions:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
udp_options:
description:
- Optional and valid only for UDP. Use to specify particular destination ports for UDP rules.
If you specify UDP as the protocol but omit this object, then all destination ports are allowed.
type: dict
suboptions:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
type: dict
suboptions:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
type: int
required: true
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
type: int
required: true
description:
description:
- An optional description of your choice for the rule.
type: str
vcn_id:
description:
- The OCID of the VCN the security list belongs to.
- Required for create using I(state=present).
type: str
security_list_id:
description:
- The OCID of the security list.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
purge_security_rules:
description:
- Purge security rules from security list which are not present in the provided group security list.
If I(purge_security_rules=no), provided security rules would be appended to existing security
rules. I(purge_security_rules) and I(delete_security_rules) are mutually exclusive.
- This parameter is updatable.
type: bool
default: "true"
delete_security_rules:
description:
- Delete security rules from existing security list which are present in the
security rules provided by I(ingress_security_rules) and/or I(egress_security_rules).
If I(delete_security_rules=yes), security rules provided by I(ingress_security_rules)
and/or I(egress_security_rules) would be deleted to existing security list, if they
are part of existing security list. If they are not part of existing security list,
they will be ignored. I(purge_security_rules) and I(delete_security_rules) are mutually
exclusive.
- This parameter is updatable.
type: bool
default: "false"
state:
description:
- The state of the SecurityList.
- Use I(state=present) to create or update a SecurityList.
- Use I(state=absent) to delete a SecurityList.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create security_list
oci_network_security_list:
vcn_id: ocid1.vcn.oc1.phx.unique_ID
display_name: MyPrivateSubnetSecurityList
ingress_security_rules:
- protocol: 6
source: 10.0.1.0/24
tcp_options:
destination_port_range:
min: 1521
max: 1521
- protocol: 6
source: 10.0.2.0/24
tcp_options:
destination_port_range:
min: 1521
max: 1521
egress_security_rules:
- protocol: 6
destination: 10.0.2.0/24
tcp_options:
destination_port_range:
min: 1521
max: 1521
compartment_id: ocid1.compartment.oc1..unique_ID
- name: Update security_list using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_network_security_list:
compartment_id: ocid1.compartment.oc1..unique_ID
defined_tags: {'Operations': {'CostCenter': 'US'}}
display_name: MyPrivateSubnetSecurityList
egress_security_rules:
- destination: 10.0.2.0/24
protocol: 6
freeform_tags: {'Department': 'Finance'}
ingress_security_rules:
- protocol: 6
source: 10.0.1.0/24
purge_security_rules: false
delete_security_rules: true
- name: Update security_list
oci_network_security_list:
defined_tags: {'Operations': {'CostCenter': 'US'}}
display_name: MyPrivateSubnetSecurityList
security_list_id: ocid1.securitylist.oc1..xxxxxxEXAMPLExxxxxx
- name: Delete security_list
oci_network_security_list:
security_list_id: ocid1.securitylist.oc1..xxxxxxEXAMPLExxxxxx
state: absent
- name: Delete security_list using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_network_security_list:
compartment_id: ocid1.compartment.oc1..unique_ID
display_name: MyPrivateSubnetSecurityList
state: absent
"""
RETURN = """
security_list:
description:
- Details of the SecurityList resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment containing the security list.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
returned: on success
type: string
sample: display_name_example
egress_security_rules:
description:
- Rules for allowing egress IP packets.
returned: on success
type: complex
contains:
destination:
description:
- Conceptually, this is the range of IP addresses that a packet originating from the instance
can go to.
- "Allowed values:"
- " * IP address range in CIDR notation. For example: `192.168.1.0/24`"
- " * The `cidrBlock` value for a L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/), if you're
setting up a security list rule for traffic destined for a particular `Service` through
a service gateway. For example: `oci-phx-objectstorage`."
returned: on success
type: string
sample: destination_example
destination_type:
description:
- Type of destination for the rule. The default is `CIDR_BLOCK`.
- "Allowed values:"
- " * `CIDR_BLOCK`: If the rule's `destination` is an IP address range in CIDR notation."
- " * `SERVICE_CIDR_BLOCK`: If the rule's `destination` is the `cidrBlock` value for a
L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/) (the rule is for traffic destined for a
particular `Service` through a service gateway)."
returned: on success
type: string
sample: CIDR_BLOCK
icmp_options:
description:
- "Optional and valid only for ICMP. Use to specify a particular ICMP type and code
as defined in
L(ICMP Parameters,http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml).
If you specify ICMP as the protocol but omit this object, then all ICMP types and
codes are allowed. If you do provide this object, the type is required and the code is optional.
To enable MTU negotiation for ingress internet traffic, make sure to allow type 3 (\\"Destination
Unreachable\\") code 4 (\\"Fragmentation Needed and Don't Fragment was Set\\"). If you need to specify
multiple codes for a single type, create a separate security list rule for each."
returned: on success
type: complex
contains:
code:
description:
- The ICMP code (optional).
returned: on success
type: int
sample: 56
type:
description:
- The ICMP type.
returned: on success
type: int
sample: 56
is_stateless:
description:
- A stateless rule allows traffic in one direction. Remember to add a corresponding
stateless rule in the other direction if you need to support bidirectional traffic. For
example, if egress traffic allows TCP destination port 80, there should be an ingress
rule to allow TCP source port 80. Defaults to false, which means the rule is stateful
and a corresponding rule is not necessary for bidirectional traffic.
returned: on success
type: bool
sample: true
protocol:
description:
- "The transport protocol. Specify either `all` or an IPv4 protocol number as
defined in
L(Protocol Numbers,http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml).
Options are supported only for ICMP (\\"1\\"), TCP (\\"6\\"), and UDP (\\"17\\")."
returned: on success
type: string
sample: protocol_example
tcp_options:
description:
- Optional and valid only for TCP. Use to specify particular destination ports for TCP rules.
If you specify TCP as the protocol but omit this object, then all destination ports are allowed.
returned: on success
type: complex
contains:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
udp_options:
description:
- Optional and valid only for UDP. Use to specify particular destination ports for UDP rules.
If you specify UDP as the protocol but omit this object, then all destination ports are allowed.
returned: on success
type: complex
contains:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
description:
description:
- An optional description of your choice for the rule.
returned: on success
type: string
sample: description_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The security list's Oracle Cloud ID (OCID).
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
ingress_security_rules:
description:
- Rules for allowing ingress IP packets.
returned: on success
type: complex
contains:
icmp_options:
description:
- "Optional and valid only for ICMP. Use to specify a particular ICMP type and code
as defined in
L(ICMP Parameters,http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml).
If you specify ICMP as the protocol but omit this object, then all ICMP types and
codes are allowed. If you do provide this object, the type is required and the code is optional.
To enable MTU negotiation for ingress internet traffic, make sure to allow type 3 (\\"Destination
Unreachable\\") code 4 (\\"Fragmentation Needed and Don't Fragment was Set\\"). If you need to specify
multiple codes for a single type, create a separate security list rule for each."
returned: on success
type: complex
contains:
code:
description:
- The ICMP code (optional).
returned: on success
type: int
sample: 56
type:
description:
- The ICMP type.
returned: on success
type: int
sample: 56
is_stateless:
description:
- A stateless rule allows traffic in one direction. Remember to add a corresponding
stateless rule in the other direction if you need to support bidirectional traffic. For
example, if ingress traffic allows TCP destination port 80, there should be an egress
rule to allow TCP source port 80. Defaults to false, which means the rule is stateful
and a corresponding rule is not necessary for bidirectional traffic.
returned: on success
type: bool
sample: true
protocol:
description:
- "The transport protocol. Specify either `all` or an IPv4 protocol number as
defined in
L(Protocol Numbers,http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml).
Options are supported only for ICMP (\\"1\\"), TCP (\\"6\\"), and UDP (\\"17\\")."
returned: on success
type: string
sample: protocol_example
source:
description:
- Conceptually, this is the range of IP addresses that a packet coming into the instance
can come from.
- "Allowed values:"
- " * IP address range in CIDR notation. For example: `192.168.1.0/24`"
- " * The `cidrBlock` value for a L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/), if you're
setting up a security list rule for traffic coming from a particular `Service` through
a service gateway. For example: `oci-phx-objectstorage`."
returned: on success
type: string
sample: source_example
source_type:
description:
- Type of source for the rule. The default is `CIDR_BLOCK`.
- " * `CIDR_BLOCK`: If the rule's `source` is an IP address range in CIDR notation."
- " * `SERVICE_CIDR_BLOCK`: If the rule's `source` is the `cidrBlock` value for a
L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/) (the rule is for traffic coming from a
particular `Service` through a service gateway)."
returned: on success
type: string
sample: CIDR_BLOCK
tcp_options:
description:
- Optional and valid only for TCP. Use to specify particular destination ports for TCP rules.
If you specify TCP as the protocol but omit this object, then all destination ports are allowed.
returned: on success
type: complex
contains:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
udp_options:
description:
- Optional and valid only for UDP. Use to specify particular destination ports for UDP rules.
If you specify UDP as the protocol but omit this object, then all destination ports are allowed.
returned: on success
type: complex
contains:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
description:
description:
- An optional description of your choice for the rule.
returned: on success
type: string
sample: description_example
lifecycle_state:
description:
- The security list's current state.
returned: on success
type: string
sample: PROVISIONING
time_created:
description:
- The date and time the security list was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
vcn_id:
description:
- The OCID of the VCN the security list belongs to.
returned: on success
type: string
sample: ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"display_name": "display_name_example",
"egress_security_rules": [{
"destination": "destination_example",
"destination_type": "CIDR_BLOCK",
"icmp_options": {
"code": 56,
"type": 56
},
"is_stateless": true,
"protocol": "protocol_example",
"tcp_options": {
"destination_port_range": {
"max": 56,
"min": 56
},
"source_port_range": {
"max": 56,
"min": 56
}
},
"udp_options": {
"destination_port_range": {
"max": 56,
"min": 56
},
"source_port_range": {
"max": 56,
"min": 56
}
},
"description": "description_example"
}],
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"ingress_security_rules": [{
"icmp_options": {
"code": 56,
"type": 56
},
"is_stateless": true,
"protocol": "protocol_example",
"source": "source_example",
"source_type": "CIDR_BLOCK",
"tcp_options": {
"destination_port_range": {
"max": 56,
"min": 56
},
"source_port_range": {
"max": 56,
"min": 56
}
},
"udp_options": {
"destination_port_range": {
"max": 56,
"min": 56
},
"source_port_range": {
"max": 56,
"min": 56
}
},
"description": "description_example"
}],
"lifecycle_state": "PROVISIONING",
"time_created": "2016-08-25T21:10:29.600Z",
"vcn_id": "ocid1.vcn.oc1..xxxxxxEXAMPLExxxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.core import VirtualNetworkClient
from oci.core.models import CreateSecurityListDetails
from oci.core.models import UpdateSecurityListDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class SecurityListHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "security_list_id"
def get_module_resource_id(self):
return self.module.params.get("security_list_id")
def get_get_fn(self):
return self.client.get_security_list
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_security_list,
security_list_id=self.module.params.get("security_list_id"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["vcn_id", "display_name"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_security_lists, **kwargs
)
def get_create_model_class(self):
return CreateSecurityListDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_security_list,
call_fn_args=(),
call_fn_kwargs=dict(create_security_list_details=create_details,),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateSecurityListDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_security_list,
call_fn_args=(),
call_fn_kwargs=dict(
security_list_id=self.module.params.get("security_list_id"),
update_security_list_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_security_list,
call_fn_args=(),
call_fn_kwargs=dict(
security_list_id=self.module.params.get("security_list_id"),
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
SecurityListHelperCustom = get_custom_class("SecurityListHelperCustom")
class ResourceHelper(SecurityListHelperCustom, SecurityListHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
compartment_id=dict(type="str"),
defined_tags=dict(type="dict"),
display_name=dict(aliases=["name"], type="str"),
egress_security_rules=dict(
type="list",
elements="dict",
options=dict(
destination=dict(type="str", required=True),
destination_type=dict(
type="str", choices=["CIDR_BLOCK", "SERVICE_CIDR_BLOCK"]
),
icmp_options=dict(
type="dict",
options=dict(
code=dict(type="int"), type=dict(type="int", required=True)
),
),
is_stateless=dict(type="bool"),
protocol=dict(type="str", required=True),
tcp_options=dict(
type="dict",
options=dict(
destination_port_range=dict(
type="dict",
options=dict(
max=dict(type="int", required=True),
min=dict(type="int", required=True),
),
),
source_port_range=dict(
type="dict",
options=dict(
max=dict(type="int", required=True),
min=dict(type="int", required=True),
),
),
),
),
udp_options=dict(
type="dict",
options=dict(
destination_port_range=dict(
type="dict",
options=dict(
max=dict(type="int", required=True),
min=dict(type="int", required=True),
),
),
source_port_range=dict(
type="dict",
options=dict(
max=dict(type="int", required=True),
min=dict(type="int", required=True),
),
),
),
),
description=dict(type="str"),
),
),
freeform_tags=dict(type="dict"),
ingress_security_rules=dict(
type="list",
elements="dict",
options=dict(
icmp_options=dict(
type="dict",
options=dict(
code=dict(type="int"), type=dict(type="int", required=True)
),
),
is_stateless=dict(type="bool"),
protocol=dict(type="str", required=True),
source=dict(type="str", required=True),
source_type=dict(
type="str", choices=["CIDR_BLOCK", "SERVICE_CIDR_BLOCK"]
),
tcp_options=dict(
type="dict",
options=dict(
destination_port_range=dict(
type="dict",
options=dict(
max=dict(type="int", required=True),
min=dict(type="int", required=True),
),
),
source_port_range=dict(
type="dict",
options=dict(
max=dict(type="int", required=True),
min=dict(type="int", required=True),
),
),
),
),
udp_options=dict(
type="dict",
options=dict(
destination_port_range=dict(
type="dict",
options=dict(
max=dict(type="int", required=True),
min=dict(type="int", required=True),
),
),
source_port_range=dict(
type="dict",
options=dict(
max=dict(type="int", required=True),
min=dict(type="int", required=True),
),
),
),
),
description=dict(type="str"),
),
),
vcn_id=dict(type="str"),
security_list_id=dict(aliases=["id"], type="str"),
purge_security_rules=dict(type="bool", default="true"),
delete_security_rules=dict(type="bool", default="false"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="security_list",
service_client_class=VirtualNetworkClient,
namespace="core",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| StarcoderdataPython |
142320 | <filename>rest_framework_security/brute_force_protection/exceptions.py<gh_stars>1-10
class BruteForceProtectionException(Exception):
pass
class BruteForceProtectionBanException(BruteForceProtectionException):
pass
class BruteForceProtectionCaptchaException(BruteForceProtectionException):
pass
| StarcoderdataPython |
1738419 | <reponame>wikimedia/operations-debs-python-flask-login<gh_stars>0
# -*- coding: utf-8 -*-
"""
flask.ext.login
==============
This module provides user session management for Flask. It lets you log your
users in and out in a database-independent manner.
:copyright: (C) 2011 by <NAME>.
:license: MIT/X11, see LICENSE for more details.
"""
import hmac
from datetime import datetime, timedelta
from flask import (current_app, session, _request_ctx_stack, redirect, url_for,
request, flash, abort)
from flask.signals import Namespace
from functools import wraps
from hashlib import sha1, md5
from urlparse import urlparse, urlunparse
from werkzeug.local import LocalProxy
from werkzeug.urls import url_decode, url_encode
_signals = Namespace()
def _get_user():
return getattr(_request_ctx_stack.top, "user", None)
def _cookie_digest(payload, key=None):
if key is None:
key = current_app.config["SECRET_KEY"]
payload = payload.encode("utf8")
mac = hmac.new(key, payload, sha1)
return mac.hexdigest()
def encode_cookie(payload):
"""
This will encode a `unicode` value into a cookie, and sign that cookie
with the app's secret key.
:param payload: The value to encode, as `unicode`.
"""
return u"%s|%s" % (payload, _cookie_digest(payload))
def decode_cookie(cookie):
"""
This decodes a cookie given by `encode_cookie`. If verification of the
cookie fails, `None` will be returned.
:param cookie: An encoded cookie.
"""
try:
payload, digest = cookie.rsplit(u"|", 1)
digest = digest.encode("ascii")
except ValueError:
return None
if _cookie_digest(payload) == digest:
return payload
else:
return None
def make_next_param(login, current):
"""
Reduces the scheme and host from a given URL so it can be passed to
the given `login` URL more efficiently.
:param login: The login URL being redirected to.
:param current: The URL to reduce.
"""
login_scheme, login_netloc = urlparse(login)[:2]
current_scheme, current_netloc = urlparse(current)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
parsed = urlparse(current)
return urlunparse(("", "", parsed[2], parsed[3], parsed[4], ""))
return current
def login_url(login_view, next_url=None, next_field="next"):
"""
Creates a URL for redirecting to a login page. If only `login_view` is
provided, this will just return the URL for it. If `next_url` is provided,
however, this will append a ``next=URL`` parameter to the query string
so that the login view can redirect back to that URL.
:param login_view: The name of the login view. (Alternately, the actual
URL to the login view.)
:param next_url: The URL to give the login view for redirection.
:param next_field: What field to store the next URL in. (It defaults to
``next``.)
"""
if login_view.startswith(("https://", "http://", "/")):
base = login_view
else:
base = url_for(login_view)
if next_url is None:
return base
parts = list(urlparse(base))
md = url_decode(parts[4])
md[next_field] = make_next_param(base, next_url)
parts[4] = url_encode(md, sort=True)
return urlunparse(parts)
def make_secure_token(*args, **options):
"""
This will create a secure token that you can use as an authentication
token for your users. It uses heavy-duty HMAC encryption to prevent people
from guessing the information. (To make it even more effective, if you
will never need to regenerate the token, you can pass some random data
as one of the arguments.)
:param args: The data to include in the token.
:param options: To manually specify a secret key, pass ``key=THE_KEY``.
Otherwise, the current app's secret key will be used.
"""
key = options.get("key")
if key is None:
key = current_app.config["SECRET_KEY"]
payload = "\0".join((
s.encode("utf8") if isinstance(s, unicode) else s) for s in args
)
mac = hmac.new(key, payload, sha1)
return mac.hexdigest().decode("utf8")
def _create_identifier():
base = unicode("%s|%s" % (request.remote_addr,
request.headers.get("User-Agent")), 'utf8', errors='replace')
hsh = md5()
hsh.update(base.encode("utf8"))
return hsh.digest()
#: The default name of the "remember me" cookie (``remember_token``)
COOKIE_NAME = "remember_token"
#: The default time before the "remember me" cookie expires (365 days).
COOKIE_DURATION = timedelta(days=365)
#: The default flash message to display when users need to log in.
LOGIN_MESSAGE = u"Please log in to access this page."
#: The default flash message to display when users need to reauthenticate.
REFRESH_MESSAGE = u"Please reauthenticate to access this page."
class LoginManager(object):
"""
This object is used to hold the settings used for logging in.
Instances of `LoginManager` are *not* bound to specific apps, so
you can create one in the main body of your code and then bind it to your
app in a factory function.
"""
def __init__(self):
#: A class or factory function that produces an anonymous user, which
#: is used when no one is logged in.
self.anonymous_user = AnonymousUser
#: The name of the view to redirect to when the user needs to log in.
#: (This can be an absolute URL as well, if your authentication
#: machinery is external to your application.)
self.login_view = None
#: The message to flash when a user is redirected to the login page.
self.login_message = LOGIN_MESSAGE
#: The name of the view to redirect to when the user needs to
#: reauthenticate.
self.refresh_view = None
#: The message to flash when a user is redirected to the "needs
#: refresh" page.
self.needs_refresh_message = REFRESH_MESSAGE
#: The mode to use session protection in. This can be either
#: ``"basic"`` (the default) or ``"strong"``, or `None` to disable it.
self.session_protection = "basic"
self.token_callback = None
self.user_callback = None
self.unauthorized_callback = None
self.needs_refresh_callback = None
def user_loader(self, callback):
"""
This sets the callback for reloading a user from the session. The
function you set should take a user ID (a `unicode`) and return a
user object, or `None` if the user does not exist.
:param callback: The callback for retrieving a user object.
"""
self.user_callback = callback
def token_loader(self, callback):
"""
This sets the callback for loading a user from an authentication
token. The function you set should take an authentication token
(a `unicode, as returned by a user's `get_auth_token` method) and
return a user object, or `None` if the user does not exist.
:param callback: The callback for retrieving a user object.
"""
self.token_callback = callback
def setup_app(self, app, add_context_processor=True):
"""
Configures an application. This registers a `before_request` and an
`after_request` call, and attaches this `LoginManager` to it as
``app.login_manager``.
:param app: The `flask.Flask` object to configure.
:param add_context_processor: Whether to add a context processor to
the app that adds a `current_user`
variable to the template.
"""
app.login_manager = self
app.before_request(self._load_user)
app.after_request(self._update_remember_cookie)
if add_context_processor:
app.context_processor(_user_context_processor)
def unauthorized_handler(self, callback):
"""
This will set the callback for the `unauthorized` method, which among
other things is used by `login_required`. It takes no arguments, and
should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
"""
self.unauthorized_callback = callback
def unauthorized(self):
"""
This is called when the user is required to log in. If you register a
callback with `unauthorized_handler`, then it will be called.
Otherwise, it will take the following actions:
- Flash `login_message` to the user.
- Redirect the user to `login_view`. (The page they were attempting
to access will be passed in the `next` query string variable, so
you can redirect there if present instead of the homepage.)
If `login_view` is not defined, then it will simply raise a 401
(Unauthorized) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.
"""
user_unauthorized.send(current_app._get_current_object())
if self.unauthorized_callback:
return self.unauthorized_callback()
if not self.login_view:
abort(401)
if self.login_message:
flash(self.login_message)
return redirect(login_url(self.login_view, request.url))
def needs_refresh_handler(self, callback):
"""
This will set the callback for the `needs_refresh` method, which among
other things is used by `fresh_login_required`. It takes no arguments,
and should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
"""
self.needs_refresh_callback = callback
def needs_refresh(self):
"""
This is called when the user is logged in, but they need to be
reauthenticated because their session is stale. If you register a
callback with `needs_refresh_handler`, then it will be called.
Otherwise, it will take the following actions:
- Flash `needs_refresh_message` to the user.
- Redirect the user to `refresh_view`. (The page they were attempting
to access will be passed in the `next` query string variable, so
you can redirect there if present instead of the homepage.)
If `refresh_view` is not defined, then it will simply raise a 403
(Forbidden) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.
"""
user_needs_refresh.send(current_app._get_current_object())
if self.needs_refresh_callback:
return self.needs_refresh_callback()
if not self.refresh_view:
abort(403)
flash(self.needs_refresh_message)
return redirect(login_url(self.refresh_view, request.url))
def _load_user(self):
if request.path.startswith(current_app.static_url_path):
return
config = current_app.config
if config.get("SESSION_PROTECTION", self.session_protection):
deleted = self._session_protection()
if deleted:
self.reload_user()
return
# If a remember cookie is set, and the session is not, move the
# cookie user ID to the session.
cookie_name = config.get("REMEMBER_COOKIE_NAME", COOKIE_NAME)
if cookie_name in request.cookies and "user_id" not in session:
self._load_from_cookie(request.cookies[cookie_name])
else:
self.reload_user()
def _session_protection(self):
sess = session._get_current_object()
ident = _create_identifier()
if "_id" not in sess:
sess["_id"] = ident
elif ident != sess["_id"]:
app = current_app._get_current_object()
mode = app.config.get("SESSION_PROTECTION",
self.session_protection)
if mode == "basic" or sess.permanent:
sess["_fresh"] = False
session_protected.send(app)
return False
elif mode == "strong":
sess.clear()
sess["remember"] = "clear"
session_protected.send(app)
return True
return False
def reload_user(self):
ctx = _request_ctx_stack.top
user_id = session.get("user_id", None)
if user_id is None:
ctx.user = self.anonymous_user()
else:
user = self.user_callback(user_id)
if user is None:
logout_user()
else:
ctx.user = user
def _load_from_cookie(self, cookie):
if self.token_callback:
user = self.token_callback(cookie)
if user is not None:
session["user_id"] = user.get_id()
session["_fresh"] = False
_request_ctx_stack.top.user = user
else:
self.reload_user()
else:
user_id = decode_cookie(cookie)
if user_id is not None:
session["user_id"] = user_id
session["_fresh"] = False
self.reload_user()
def _update_remember_cookie(self, response):
operation = session.pop("remember", None)
if operation == "set" and "user_id" in session:
self._set_cookie(response)
elif operation == "clear":
self._clear_cookie(response)
return response
def _set_cookie(self, response):
# cookie settings
config = current_app.config
cookie_name = config.get("REMEMBER_COOKIE_NAME", COOKIE_NAME)
duration = config.get("REMEMBER_COOKIE_DURATION", COOKIE_DURATION)
domain = config.get("REMEMBER_COOKIE_DOMAIN", None)
# prepare data
if self.token_callback:
data = current_user.get_auth_token()
else:
data = encode_cookie(str(session["user_id"]))
expires = datetime.now() + duration
# actually set it
response.set_cookie(cookie_name, data, expires=expires, domain=domain)
def _clear_cookie(self, response):
config = current_app.config
cookie_name = config.get("REMEMBER_COOKIE_NAME", COOKIE_NAME)
domain = config.get("REMEMBER_COOKIE_DOMAIN", None)
response.delete_cookie(cookie_name, domain=domain)
#: A proxy for the current user.
current_user = LocalProxy(lambda: _request_ctx_stack.top.user)
def _user_context_processor():
return dict(current_user=_get_user())
def login_fresh():
"""
This returns `True` if the current login is fresh.
"""
return session.get("_fresh", False)
def login_user(user, remember=False, force=False):
"""
Logs a user in. You should pass the actual user object to this. If the
user's `is_active` method returns `False`, they will not be logged in
unless `force` is `True`.
This will return `True` if the log in attempt succeeds, and `False` if
it fails (i.e. because the user is inactive).
:param user: The user object to log in.
:param remember: Whether to remember the user after their session expires.
:param force: If the user is inactive, setting this to `True` will log
them in regardless.
"""
if (not force) and (not user.is_active()):
return False
user_id = user.get_id()
session["user_id"] = user_id
session["_fresh"] = True
if remember:
session["remember"] = "set"
app = current_app._get_current_object()
current_app.login_manager.reload_user()
user_logged_in.send(current_app._get_current_object(), user=_get_user())
return True
def logout_user():
"""
Logs a user out. (You do not need to pass the actual user.) This will
also clean up the remember me cookie if it exists.
"""
if "user_id" in session:
del session["user_id"]
if "_fresh" in session:
del session["_fresh"]
cookie_name = current_app.config.get("REMEMBER_COOKIE_NAME", COOKIE_NAME)
if cookie_name in request.cookies:
session["remember"] = "clear"
user = _get_user()
if user and (not user.is_anonymous()):
user_logged_out.send(current_app._get_current_object(), user=user)
current_app.login_manager.reload_user()
return True
def confirm_login():
"""
This sets the current session as fresh. Sessions become stale when they
are reloaded from a cookie.
"""
session["_fresh"] = True
user_login_confirmed.send(current_app._get_current_object())
def login_required(fn):
"""
If you decorate a view with this, it will ensure that the current user is
logged in and authenticated before calling the actual view. (If they are
not, it calls the `~LoginManager.unauthorized` callback.) For example::
@app.route("/post")
@login_required
def post():
pass
If there are only certain times you need to require that your user is
logged in, you can do so with::
if not current_user.is_authenticated():
return current_app.login_manager.unauthorized()
(which is essentially the code that this function adds to your views).
:param fn: The view function to decorate.
"""
@wraps(fn)
def decorated_view(*args, **kwargs):
if not current_user.is_authenticated():
return current_app.login_manager.unauthorized()
return fn(*args, **kwargs)
return decorated_view
def fresh_login_required(fn):
"""
If you decorate a view with this, it will ensure that the current user's
login is fresh - i.e. there session was not restored from a "remember me"
cookie. Sensitive operations, like changing a password or e-mail, should
be protected with this, to impede the efforts of cookie thieves.
If the user is not authenticated, `LoginManager.unauthorized` is called
as normal. If they are authenticated, but their session is not fresh,
it will call `LoginManager.needs_refresh` instead. (In that case, you
will need to provide a `~LoginManager.refresh_view`.)
:param fn: The view function to decorate.
"""
@wraps(fn)
def decorated_view(*args, **kwargs):
if not current_user.is_authenticated():
return current_app.login_manager.unauthorized()
elif not login_fresh():
return current_app.login_manager.needs_refresh()
return fn(*args, **kwargs)
return decorated_view
class UserMixin(object):
"""
This provides default implementations for the methods that Flask-Login
expects user objects to have.
"""
def is_active(self):
"""
Returns `True`.
"""
return True
def is_authenticated(self):
"""
Returns `True`.
"""
return True
def is_anonymous(self):
"""
Returns `False`.
"""
return False
def get_id(self):
"""
Assuming that the user object has an `id` attribute, this will take
that and convert it to `unicode`.
"""
try:
return unicode(self.id)
except AttributeError:
raise NotImplementedError("No `id` attribute - override get_id")
class AnonymousUser(object):
"""
This is the default object for representing an anonymous user.
"""
def is_authenticated(self):
return False
def is_active(self):
return False
def is_anonymous(self):
return True
def get_id(self):
return None
# Signals
#: Sent when a user is logged in. In addition to the app (which is the
#: sender), it is passed `user`, which is the user being logged in.
user_logged_in = _signals.signal("logged-in")
#: Sent when a user is logged out. In addition to the app (which is the
#: sender), it is passed `user`, which is the user being logged out.
user_logged_out = _signals.signal("logged-out")
#: Sent when a user's login is confirmed, marking it as fresh. (It is not
#: called for a normal login.)
#: It receives no additional arguments besides the app.
user_login_confirmed = _signals.signal("login-confirmed")
#: Sent when the `unauthorized` method is called on a `LoginManager`. It
#: receives no additional arguments besides the app.
user_unauthorized = _signals.signal("unauthorized")
#: Sent when the `needs_refresh` method is called on a `LoginManager`. It
#: receives no additional arguments besides the app.
user_needs_refresh = _signals.signal("needs-refresh")
#: Sent whenever session protection takes effect, and a session is either
#: marked non-fresh or deleted. It receives no additional arguments besides
#: the app.
session_protected = _signals.signal("session-protected")
| StarcoderdataPython |
1723695 | #
__doc__ = '''
Primitive functions to be embeded in model/test templates
'''
MODEL_PRIMITIVE_FUNCTIONS = """@{
# Available classes
# Module()
# Pin()
# Metric()
# Param()
import sys
import os
from dave.common.misc import dec2bin
##############################
# primitive functions
##############################
def put_warning_message(msg):
return '###--- WARNING: '+ msg.upper()
def put_error_message(msg):
return '###--- ERROR: '+ msg
def print_bus(n):
'''
print out bus representation for given bus width (n)
if n==1, return a space
'''
return '[%d:0]' % int(n-1) if n > 1 else ''
def to_engr (value,dtype=float):
''' convert a floating number to engineering notation
if value < 1e-18 , it returns 0.0
'''
suffix = [('a',1e-18),('f',1e-15),('p',1e-12),('n',1e-9),('u',1e-6),('m',1e-3), \
('',1.0),('k',1e3),('M',1e6),('G',1e9),('T',1e12),('P',1e15),('E',1e18)]
try:
m = abs(value)
if m < suffix[0][1]: # if less than 1e-18
return '0.0'
elif m >= suffix[-1][1]: # if larger than 1e18
return '%.3f'%(dtype(value/suffix[-1][1]))+suffix[-1][0]
else:
for p,v in enumerate(suffix):
if m/v[1] < 1.0:
return '%.3f'%(dtype(value/suffix[p-1][1]))+suffix[p-1][0]
except:
return None
##############################
# MODULE declaration section
##############################
class Module(object):
@classmethod
def name(cls):
'''
Prints out a module name. Mainly used for two purposes:
a. module name declaration in Verilog model, and
b. device-under-test (dut) name declaration in mProbo test
'''
try:
return module_name
except:
return None
@classmethod
def pin(cls, p, comma=False):
'''
print a pin in module declaration. A comma will follow if comma==True
'''
if Pin.is_exist(p):
if Pin.datatype(p) == 'pwl':
return PWL.declare_pin(p, comma)
elif Pin.datatype(p) == 'real':
return REAL.declare_pin(p, comma)
elif Pin.datatype(p) == 'logic':
return LOGIC.declare_pin(p, comma)
@classmethod
def pins(cls): # print all pins in module declaration
pl = Pin.list()
return '\\n '.join([ cls.pin(p, True) for p in pl[:-1] ] + [ cls.pin(pl[-1], False) ])
@classmethod
def parameter(cls, p, comma=False, semicolon=False):
'''
Print a parameter in module declaration. A comma will follow if comma==True
'''
if Param.is_exist(p):
return 'parameter %s %s = %s%s%s // %s' % (Param.datatype(p), Param.name(p), Param.value(p), ',' if comma else '', ';' if semicolon else '', Param.description(p))
@classmethod
def parameters(cls): # print all parameters in module declaration
pl = Param.list()
if pl != []:
return '\\n '.join([ cls.parameter(p, True) for p in pl[:-1] ] + [ cls.parameter(pl[-1], False) ])
##############################
# PIN section
##############################
class Pin(object):
@classmethod
def get(cls): # get the "pin" section in the interface
return pin
@classmethod
def list(cls): # return a list of pins
return cls.get().keys()
@classmethod
def list_property(cls, p): # return a list of properties for given pin name
return cls.get()[p].keys()
@classmethod
def get_namemap(cls): # return a dict of pin map (generic -> user)
return dict([(p, cls.name(p)) for p in cls.list()])
@classmethod
def get_reversed_namemap(cls): # return a dict of reversed pin map (user -> generic)
return dict([(cls.name(p),p) for p in cls.list()])
@classmethod
def is_exist(cls, p): # check if a pin named 'p' exists
return p in cls.list()
@classmethod
def property(cls, p, c): # retreive property 'c' of a pin 'p'
try:
return cls.get()[p][c]
except:
return None
@classmethod
def is_mode(cls, p): # retreive property 'is_mode' of a pin 'p'
return cls.property(p, 'is_mode')
@classmethod
def constraint(cls, p, c): # retreive the value of constraint 'c' of a pin 'p'
constraint = cls.property(p, 'constraint')
return constraint[c]['value']
@classmethod
def is_or(cls, p1, p2): # at least one of pins ('p1' and 'p2') should exist
return True if cls.is_exist(p1) or cls.is_exist(p2) else False
@classmethod
def is_and(cls, p1, p2): # both pins ('p1' and 'p2') should exist
return True if cls.is_exist(p1) and cls.is_exist(p2) else False
@classmethod
def is_xor(cls, p1, p2): # only one of pins ('p1' and 'p2') should exist
return True if cls.is_exist(p1) ^ cls.is_exist(p2) else False
@classmethod
def name(cls, p):
return cls.property(p,'name')
@classmethod
def generic_name(cls, name):
return cls.get_reversed_namemap()[name]
@classmethod
def direction(cls, p):
return cls.property(p, 'direction')
@classmethod
def datatype(cls, p):
dt = cls.property(p, 'datatype')
if dt=='': dt='logic'
return dt
@classmethod
def description(cls, p):
return cls.property(p, 'description')
@classmethod
def vectorsize(cls, p):
if 'vectorsize' in cls.list_property(p):
return cls.property(p, 'vectorsize')
else:
return 0
@classmethod
def list_constraint(cls, p):
c = cls.property(p, 'constraint')
if c != None:
return c.keys()
else:
return []
@classmethod
def check_pin_chain(cls): # sometimes, other pins should exist when a pin exists
err_list = []
for p in cls.list():
if 'pin_chain' in cls.list_constraint(p):
violated = list(set(cls.constraint(p, 'pin_chain'))-set(cls.list()))
if (violated != []):
err_list.append('Pin chain validation failed. Missing pin(s) "'+','.join(violated)+'" for the pin "%s"' % p)
if len(err_list) > 0:
print '\\n'.join(map(put_error_message, err_list))
sys.exit()
@classmethod
def is_current(cls, p): # check if this pin is current signal
return 'current' in cls.list_constraint(p)
@classmethod
def current_direction(cls, p): # return current direction ('p' or 'n')
return Pin.constraint(p, 'current')
@classmethod
def list_optional(cls): # return list of generic pin names which are optional
return [p for p,v in cls.get().items() if v['is_optional']]
@classmethod
def list_optional_digital(cls): # return generic pin names of (digital)
return [p for p in cls.list_optional() if cls.datatype(p) in ['logic', ''] and cls.direction(p) == 'input']
@classmethod
def list_optional_analog(cls, exclude=[]): # return generic pin names of (NOT digital) except pins listed in exclude
plist = [p for p in cls.list_optional() if cls.datatype(p) != 'logic' and cls.direction(p) == 'input']
return list(set(plist)-set(exclude))
@classmethod
def list_optional_analog_current(cls, exclude=[]): # return generic pin names of (NOT digital) except pins listed in exclude (constraint doesn't have 'current' key)
plist = [p for p in cls.list_optional() if cls.datatype(p) != 'logic']
pins = list(set(plist)-set(exclude))
return filter(cls.is_current, pins)
@classmethod
def list_optional_analog_voltage(cls, exclude=[]): # return generic pin names of (NOT digital) except pins listed in exclude (constraint doesn't have 'current' key)
pins = cls.list_optional_analog(exclude)
return list(set(pins)-set(cls.list_optional_analog_current(exclude)))
@classmethod
def list_pinonly(cls, exclude=[]): # return optional pin names with 'is_pinonly'==True
return [p for p,v in cls.get().items() if v['is_pinonly']]
@classmethod
def declare_signal(cls, p):
if Pin.datatype(p) == 'pwl':
return PWL.declare_signal(p)
elif Pin.datatype(p) == 'real':
return REAL.declare_signal(p)
elif Pin.datatype(p) == 'logic':
return LOGIC.declare_signal(p)
@classmethod
def print_map(cls):
'''
Print out vlog statements to map generic pins in the template to user pins, if the names are different.
Since all the body statements in the model template will look up generic pin names instead of user pin names, there should be statements for the mapping.
NOTE: THIS FUNCTION MUST BE PRESENT IN VERILOG TEMPLATE
'''
vlogstatement = [cls.declare_signal(p) for p in cls.list() if p != cls.name(p)]
vlogstatement += ['assign %s=%s ;' %(cls.name(p),p) if cls.direction(p)=='output' else 'assign %s=%s ;' %(p,cls.name(p)) for p in cls.list() if p != cls.name(p)]
return '// map pins between generic names and user names, if they are different\\n'+'\\n'.join(vlogstatement)
@classmethod
def print_if_exists(cls, statement, p):
'''
Print a Verilog statement if pin p exists.
Note that @@ is replaced with @ in this function.
Therefore do not use this function if the statement has @ for Verilog such as 'always @'
'''
return statement.replace('@@','@') if Pin.is_exist(p) else ''
##############################
# PARAMETER section
##############################
class Param(object):
@classmethod
def get(cls): # return modelparams section
return modelparam
@classmethod
def prefix(cls): # return parameter prefix when calibration is disabled
return 'rep_'
@classmethod
def list(cls): # return a list of model parameters
return cls.get().keys()
@classmethod
def is_exist(cls, p): # check if a parameter named 'p' exists
try:
return p in cls.list()
except:
return None
@classmethod
def property(cls, p,c): # retreive property 'c' of a parameter 'p'
return cls.get()[p][c]
@classmethod
def name(cls, p): # return parameter name p, if it exists
return p if p in cls.list() else None
@classmethod
def description(cls, p): # retreive description of a parameter 'p'
return cls.property(p, 'description')
@classmethod
def value(cls, p): # retreive value of a parameter 'p'
return cls.property(p, 'value')
@classmethod
def datatype(cls, p): # retreive data type of a parameter 'p'
return cls.property(p, 'datatype')
##############################
# METRIC section
##############################
class Metric(object):
@classmethod
def get(cls): # return metrics section
return metric
@classmethod
def list(cls): # return a list of metrics
return cls.get().keys()
@classmethod
def is_exist(cls, m): # check if a metric named 'm' exist
try:
return m in cls.list()
except:
return None
@classmethod
def property(cls, m, c): # retreive property 'c' of a metric 'm'
return cls.get()[m][c]
@classmethod
def description(cls, m): # retreive description of a metric 'm'
try:
return cls.property(m, 'description')
except:
return ''
@classmethod
def value(cls, m): # retreive description of a metric 'm'
return cls.property(m, 'value')
@classmethod
def print_if_exists(cls, statement, m):
'''
Print a Verilog statement if metric m exists.
Note that @@ is replaced with @ in this function.
Therefore do not use this function if the statement has @ for Verilog such as 'always @'
'''
return statement.replace('@@','@') if cls.is_exist(m) else ''
##############################
# Miscellaneous functions
##############################
def get_sensitivity_list():
'''
return default sensitivity list
'''
return REAL.list_optional_pins() + PWL.list_optional_pins_in_real() + LOGIC.list_optional_pins()
def print_sensitivity_list(list_val):
'''
print out sensitivity list in Verilog format
'''
if list_val == []:
return '*'
else:
return ' or '.join(list_val)
def annotate_modelparam(param_map, variable_map={}):
'''
Create verilog statements to back annotate the extracted parameters to variables
param_map = { testname : { testresponse : verilog variable being mapped to }, ... }
variable_map is a dictionary that maps predictor variable in a test to a Verilog variable.
variable_map = { var1 : Verilog_var1, var2 : Verilog_var2, ... }
This will take into account for digital modes as well
'''
#if 'variable_map' not in globals():
# variable_map = {}
digital_modes = ["get_lm_equation_modes('%s', '%s')" % (k, v.keys()[0]) for k,v in param_map.items()]
digital_cases = ['digital_modes[%d][0].keys()' % i for i in range(len(digital_modes))]
vlog_1 = 'digital_modes = [%s]\\n' % ', '.join(digital_modes)
vlog_2 = 'digital_cases = [%s]\\n' % ', '.join(digital_cases)
vlog_3 = 'variable_map = {v_map}\\n'.format(v_map = variable_map)
vlog = '$${\\n' + vlog_1 + vlog_2 + vlog_3 + '}$$\\n'
for i, t in enumerate(param_map.keys()):
vlog += _annotate_verilog_statement(t, param_map[t], i)
return vlog
def _annotate_verilog_statement(testname, param_map_value, case_index):
vlog_statement_template = '''
$$[if not mode_exists('{testname}')]
{vlog_statement1}
$$[else]
case({{$$(','.join({casenumber}))}})
$$[for m in {modenumber}]
{{$$(','.join(["%d'b%s" % (Pin.vectorsize(d), dec2bin('%d'%m[d], Pin.vectorsize(d))) for d in {casenumber}]))}}: begin
{vlog_statement2}
end
$$[end for]
default: begin
{vlog_statement3}
end
endcase
$$[end if]
'''
vlog = ''
template_base = "{variable} = $$get_lm_equation('{testname}', '{response}'"
casenumber = 'digital_cases[%d]' % case_index
modenumber = 'digital_modes[%d]' % case_index
template = template_base + ');'
vlog_statement1 = ' '+'\\n '.join([template.format(variable=v, testname=testname, response=k) for k,v in param_map_value.items()])
template = template_base + ', m);'
vlog_statement2 = ' '+'\\n '.join([template.format(variable=v, testname=testname, response=k) for k,v in param_map_value.items()])
template = template_base + ', %s[0]);' % modenumber
vlog_statement3 = ' '+'\\n '.join([template.format(variable=v, testname=testname, response=k) for k,v in param_map_value.items()])
vlog += vlog_statement_template.format(testname=testname, casenumber=casenumber, modenumber=modenumber, vlog_statement1 = vlog_statement1, vlog_statement2 = vlog_statement2, vlog_statement3 = vlog_statement3)
return vlog
################
# LOGIC-specific
################
class LOGIC(object):
@classmethod
def declare_pin(cls, p, comma=False):
''' print a pin in module declaration. A comma will follow if comma==True '''
return '%s %s %s %s%s // %s' % (Pin.direction(p), Pin.datatype(p), print_bus(Pin.vectorsize(p)), Pin.name(p), ',' if comma else '', Pin.description(p))
@classmethod
def declare_signal(cls, p):
return '%s %s %s;' %(Pin.datatype(p), print_bus(Pin.vectorsize(p)), p)
@classmethod
def list_optional_pins(cls, exclude=[]): # return generic pin names of (digital)
return [p for p in list(set(Pin.list_optional())-set(exclude)) if Pin.datatype(p) in ['logic', '']]
################
# REAL-specific
################
class REAL(object):
@classmethod
def declare_pin(cls, p, comma=False):
''' print a pin in module declaration. A comma will follow if comma==True '''
return '%s %s %s %s%s // %s' % (Pin.direction(p), Pin.datatype(p), print_bus(Pin.vectorsize(p)), Pin.name(p), ',' if comma else '', Pin.description(p))
@classmethod
def declare_signal(cls, p):
return '%s %s %s;' %(Pin.datatype(p), p, print_bus(Pin.vectorsize(p)))
@classmethod
def list_optional_pins(cls, exclude=[]):
'''
Get a list of real signal expressions for optional real analog pins
'''
return [p for p in list(set(Pin.list_optional_analog())-set(exclude)) if Pin.datatype(p)=="real"]
##############
# PWL-specific
##############
class PWL(object):
@classmethod
def declare_pin(cls, p, comma=False):
''' print a pin in module declaration. A comma will follow if comma==True '''
return '%s %s %s %s%s // %s' % (Pin.direction(p), Pin.datatype(p), Pin.name(p), print_bus(Pin.vectorsize(p)), ',' if comma else '', Pin.description(p))
@classmethod
def declare_signal(cls, p):
return '%s %s %s;' %(Pin.datatype(p), p, print_bus(Pin.vectorsize(p)))
@classmethod
def get_real(cls, signame):
'''
Get a real signal expression for given pwl signal name (signame)
'''
return '%s_r' % signame
@classmethod
def list_optional_pins(cls, exclude=[]):
'''
Get a list of real signal expressions for optional pwl analog pins
'''
pl = [p for p in list(set(Pin.list_optional_analog())-set(exclude)) if Pin.datatype(p)=="pwl"]
return map(cls.get_real, pl)
@classmethod
def list_optional_pins_in_real(cls, exclude=[]):
'''
Get a list of real signal expressions for optional pwl analog pins with real suffix
'''
return map(cls.get_real, cls.list_optional_pins())
@classmethod
def instantiate_pwl2real(cls, signame):
'''
Convert PWL waveform to PWC waveform for a given signal name, using pwl2real primitive.
Output "real" signal has the same signal name as its PWL signal, but it will be followed by a suffix "_r"
'''
return 'pwl2real #(.dv(etol_{signal})) xp2r_{signal} (.in({signal}), .out({signal}_r)); // pwl-to-real of {signal}'.format(signal=signame)
@classmethod
def declare_real(cls, sig_list):
'''
Declare the corresponding "real" signal (wire) of a PWL signal
The "real" signal will have the same signal
'''
if len(sig_list) > 0:
return 'real %s;' % ', '.join(sig_list)
else:
return ''
@classmethod
def declare_optional_analog_pins_in_real(cls, exclude=[]):
'''
declare optional analog pins with real datatype suffix
'''
pl_real = list(set(cls.list_optional_pins())-set(exclude))
return cls.declare_real(pl_real)
@classmethod
def instantiate_pwl2real_optional_analog_pins(cls, exclude=[]):
'''
do instantiate_pwl2real for all optional analog pins
'''
pl = [p for p in list(set(Pin.list_optional_analog())-set(exclude)) if Pin.datatype(p)=="pwl"]
_statements = map(cls.instantiate_pwl2real, pl)
return '\\n'.join(_statements)
}@
"""
TEST_PRIMITIVE_FUNCTIONS = """@{
##############################
# test primitive functions
##############################
class Test(object):
@classmethod
def dut(cls): # print dut name
return Module.name()
class TestPort(object):
@classmethod
def declare_optional_pins_prime(cls, port_name, is_digital):
'''
Declare port specifiction in test for given port
'''
if is_digital:
spec = {'port_type': 'digitalmode', 'encode':'binary', 'prohibited': '', 'pinned': 'False', 'default_value': 'b0'}
template = ''' [[[{port_name}]]]
port_type = {port_type}
bit_width = {bit_width}
encode = {encode}
prohibited = {prohibited}
pinned = {pinned}
default_value = {default_value}
description = {description}
'''
else:
spec = {'port_type': 'analoginput', 'regions': '0.0, 1.0', 'pinned': 'False', 'default_value': '0.5'}
template = ''' [[[{port_name}]]]
port_type = {port_type}
regions = {regions}
pinned = {pinned}
default_value = {default_value}
description = {description}
'''
testcfg = ''
spec.update({'port_name': port_name, 'description': Pin.description(port_name)})
if is_digital:
spec.update({'bit_width': Pin.vectorsize(port_name)})
testcfg += template.format(**spec)
return testcfg
@classmethod
def declare_optional_analog_pins(cls, exclude=[]):
'''
Do class.declare_optional_pins_prime for optional analog pins
'''
testcfg = ''
for p in list(set(Pin.list_optional_analog())-set(exclude)):
testcfg += cls.declare_optional_pins_prime(p, False)
return testcfg
@classmethod
def declare_optional_digital_pins(cls, exclude=[]):
'''
Do class.declare_optional_pins_prime for optional digital pins
'''
testcfg = ''
for p in list(set(Pin.list_optional_digital())-set(exclude)):
testcfg += cls.declare_optional_pins_prime(p, True)
return testcfg
@classmethod
def declare_optional_pins(cls, exclude=[]):
'''
Do class.declare_optional_pins_prime for optional analog and digital pins
'''
testcfg = cls.declare_optional_analog_pins(exclude)
testcfg += cls.declare_optional_digital_pins(exclude)
return testcfg
class Testbench(object):
@classmethod
def instantiate_bitvector(cls, signame, bitwidth, value=''):
'''
Instantiate bitvector
bitvector #(.bit_width({bitwidth}), .value(@{signame})) xbv_{signame} (.out({signame}));
'''
if value == '':
value = '@(%s)' % signame
return 'bitvector #(.bit_width({bitwidth}), .value({value})) xbv_{signame} (.out({signame}{bus}));'.format(signame=signame, bitwidth=bitwidth, value=value, bus='[%d:0]' % (Pin.vectorsize(signame)-1) if Pin.vectorsize(signame)>1 else '')
@classmethod
def instantiate_bitvector_optional_pins(cls, exclude=[]):
'''
Do cls._instantiate_bitvector() for all optional digital pins
'''
return '\\n'.join([cls.instantiate_bitvector(p, Pin.vectorsize(p)) for p in Pin.list_optional_digital() if p not in exclude])
@classmethod
def instantiate_vdc(cls, signame, value=''):
'''
Instantiate vdc
For e.g., instantiate_vdc('vin') will produce
vdc #(.dc(@vin)) xvdc_vin (.vout(vin));
'''
if value == '':
value = '@(%s)' % signame
return 'vdc #(.dc({value})) xvdc_{signame} (.vout({signame}));'.format(signame=signame, value=value)
@classmethod
def instantiate_vdc_optional_pins(cls, exclude=[]):
'''
Do cls._instantiate_vdc() for all optional analog voltage pins
'''
return '\\n'.join([cls.instantiate_vdc(p) for p in list(set(Pin.list_optional_analog_voltage()) - set(exclude))])
@classmethod
def instantiate_idc(cls, signame, pnode, nnode, value=''):
'''
Instantiate idc which produces signame
For e.g., instantiate_idc('iin', 'vdd', 'iin') will produce
idc #(.is_n(1), .dc(@iin)) xidc_iin (.outnode(iin), .refnode(vdd));
'''
if value == '':
value = '@(%s)' % signame
return 'idc #(.is_n({direction}), .dc({value})) xidc_{signame} (.outnode({outnode}), .refnode({refnode}));'.format(signame=signame, outnode=signame, refnode=pnode if signame==nnode else nnode if signame==pnode else 'ERROR', direction = '0' if signame==pnode else '1' if signame==nnode else 'ERROR', value=value)
@classmethod
def instantiate_idc_optional_pins(cls, prefnode='vdd', nrefnode='gnd', exclude=[]):
'''
Do cls._instantiate_idc() for all optional analog current pins
'''
return '\\n'.join([cls.instantiate_idc(p, p if Pin.current_direction(p)=='n' else prefnode, p if Pin.current_direction(p)=='p' else nrefnode ) for p in Pin.list_optional_analog_current() if p not in exclude])
@classmethod
def instantiate_idc_on_pin(cls, signame, prefnode='vdd', nrefnode='gnd'):
'''
Do cls._instantiate_idc() for all optional analog current pins
'''
p = signame
return cls.instantiate_idc(p, p if Pin.current_direction(p)=='n' else prefnode, p if Pin.current_direction(p)=='p' else nrefnode)
@classmethod
def dut(cls): # device-under-test
return Test.dut()
@classmethod
def map_by_name(cls, p): # map a pin by name in Verilog
if Pin.vectorsize(p) > 1:
return '.%s(%s%s)' % (Pin.name(p),p,print_bus(Pin.vectorsize(p)))
else:
return '.%s(%s)' % (Pin.name(p),p)
@classmethod
def dut_map_by_name(cls): # print dut pin mapping by name in Verilog
return ' %s ' % (', '.join([cls.map_by_name(p) for p in Pin.list()]))
class TestParam(object):
@classmethod
def get(cls): # return testparams section
return testparam
@classmethod
def list(cls): # return a list of parameter names
return cls.get().keys()
@classmethod
def is_exist(cls, p): # check if p param exists
return p in cls.list()
@classmethod
def value(cls, p, dtype=float): # retrieve parameter value in test
return dtype(cls.property(p, 'value'))
@classmethod
def property(cls, p, c): # retrieve parameter property in test
return cls.get()[p][c]
class TestWire(object):
@classmethod
def declare_analog(cls, list_pin, datatype=''):
if datatype=='':
list_val = list_pin
else:
list_val = filter(lambda x: Pin.datatype(x)==datatype, list_pin)
return ','.join(list_val)
@classmethod
def declare_logic(cls, list_pin):
return ','.join(['%s %s' %(print_bus(Pin.vectorsize(p)),p) for p in list_pin])
##############################
# Template section
##############################
class Template(object):
@classmethod
def module_name(cls):
''' return generic module name '''
return generic_module_name
@classmethod
def is_calibration(cls):
''' check if a test is for calibration or for model checking '''
return is_calibration
@classmethod
def include_snippet(cls, filename, id=''):
''' include snippets from the specified file.
The full filename will be os.path.join(template_rootdir, filename)
This is useful if you want to reuse parts of codes from other template
Snippets between ('//---SNIPPET ON' and '//---SNIPPET OFF')
will be snipped off and included in the current template
'id' is an identifier if you want to have many snippet sections
e.g.) see cml and cml_mux
'''
code = []
snippet_on = False
snippet_filename = os.path.join(template_rootdir, filename)
if not os.path.exists(snippet_filename):
print put_error_message('No %s file exists' % snippet_filename)
sys.exit()
else:
with open(os.path.join(template_rootdir, filename), 'r') as f:
for l in f.readlines():
l = l.strip()
if l.startswith('//---SNIPPET ON'):
if id == '' or id == l.split('//---SNIPPET ON')[1].rstrip().lstrip():
snippet_on = True
elif l.startswith('//---SNIPPET OFF'):
snippet_on = False
elif snippet_on:
code.append(l)
return '\\n'.join(code)
# check pin chain (a pin need other pin. for e.g. differential inputs)
Pin.check_pin_chain()
}@
"""
MODEL_HEADER = """{start}{duplicate}
{ll} Module Name: @Module.name()
{ll} This code is automatically generated by "{software}"
{ll} at {timestamp}.
{ll} Note that this version of "{software}" and the generated code
{ll} are only for evaluation purpose. Any redistribution, modification,
{ll} or commercial use is prohibited without permission.
{ll}
{ll} Copyright (c) 2014-Present by <NAME>. All rights reserved.
{ll}
{ll} The information and source code contained herein is the property
{ll} of <NAME>, and may not be disclosed or reproduced
{ll} in whole or in part without explicit written authorization from
{ll} <NAME>.
{ll} For more information, contact bclim at stanford.edu
{duplicate}{end}
"""
BACKANNOTATION_PRIMITIVE_FUNCTIONS = """
@{{
lm_param = {lm_param}
def mode_exists(testname): # return False if there is only 'dummy_digitalmode' in a test, testname
param = globals()['lm_param']
response = param[testname][param[testname].keys()[0]][0]
if response['mode'].keys()==['dummy_digitalmode']:
return False
else:
return True
def get_lm_coef(testname, dv, iv, mode={{'dummy_digitalmode':0}}):
'''
testname: test name
dv: generic output response name (metric)
iv: generic analog control input in test.cfg
mode: dict of true digital input vector in test.cfg
'''
param = globals()['lm_param']
if dv in param[testname].keys():
for v in param[testname][dv]:
if v['mode']==mode:
if iv in v['coef'].keys():
return v['coef'][iv]
else:
return None
return None
else:
return None
def get_terms(testname, dv, mode={{'dummy_digitalmode':0}}):
''' return a list of terms for dependent variable dv
Note that A, B, A:B are all different terms
'''
param = globals()['lm_param']
if dv in param[testname].keys():
for v in param[testname][dv]:
if v['mode']==mode:
return v['coef'].keys()
return None
else:
return None
def get_lm_equation_modes(testname, dv):
''' return a list of a dictionary where the dictionary is a mode '''
param = globals()['lm_param'][testname][dv]
return [k['mode'] for k in param]
def replace_lm_variable_name(t):
''' replace variable name in linear equation by looking up variable_map if exists '''
if t in variable_map.keys():
return variable_map[t]
elif Pin.is_exist(t):
if Pin.datatype(t) == 'pwl':
return PWL.get_real(t)
else:
return t
else:
return t
def get_lm_equation(testname, dv, mode={{'dummy_digitalmode':0}}, default='0.0'):
''' return a linear equation of dv as a function of inputs
note that there is a suffix of '_r' for every independent variable in the resultant equation
of which pin has a datatype of pwl
'''
try:
import re
from dave.common.misc import flatten_list
param = globals()['lm_param']
_terms = get_terms(testname, dv, mode) # terms from linear regression
_coefs = [get_lm_coef(testname, dv, iv, mode) for iv in _terms]
def get_unit_terms(term):
''' extact variables. For example, ctl1*ctl2 will produce [ctl1,ctl2] '''
return [f for f in term.split('*') if len(f) >0 and f[0].isalpha()]
_port = sorted(list(set(flatten_list([get_unit_terms(t) for t in _terms]))-set(['offset']))) # all the unit independent variables from linear regression results
_varl = [replace_lm_variable_name(t) for t in _port]
equation = '+'.join(['%s*%s' %(str(_coefs[i]),t) if t!='offset' else str(_coefs[i]) for i,t in enumerate(_terms)]).replace('+-','-')
for i,v in enumerate(_port):
equation = re.sub(r'\\b%s\\b' % v, _varl[i], equation)
return equation
except:
return default
}}@
"""
| StarcoderdataPython |
1629880 | n = 1
p = i = 0
while n != 0:
n = int(input('Digite um valor: '))
if n % 2 == 0:
p += 1
if n % 2 != 0:
i += 1
print('Qtd de Pares: '.format(c))
print('Qtd de Impares: '.format(i))
| StarcoderdataPython |
1607151 | <gh_stars>0
from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404
from .models import Image,Location,Category
# Create your views here.
def all_images(request):
photos = Image.get_images()
locations = Location.objects.all()
categories = Category.objects.all()
return render(request,'photos/all-photos.html', {"photos":photos, "locations":locations, "categories":categories,})
def images_by_location_taken(request,location_id):
photos = Image.filter_images_by_location(location_id)
return render(request, 'photos/photo-location.html',{"photos":photos})
def search_results(request):
if 'photo' in request.GET and request.GET["photo"]:
category = request.GET.get("photo")
searched_images = Image.search_image(category)
message =f"{category}"
return render(request, 'photos/search.html',{"message":message, "photos": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'photos/search.html',{"message":message})
def image_by_id(request,image_id):
try:
photo = Image.objects.get(id = image_id)
except DoesNotExist:
raise Http404()
return render(request,"photos/photo.html", {"photo":photo}) | StarcoderdataPython |
1733730 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt Ltd and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class ReleaseSettings(Document):
pass
| StarcoderdataPython |
3317639 | """
This module solves for the orbit of the planet given Keplerian parameters.
"""
import numpy as np
import astropy.units as u
import astropy.constants as consts
from orbitize import cuda_ext, cext
if cext:
from . import _kepler
if cuda_ext:
# Configure GPU context for CUDA accelerated compute
from orbitize import gpu_context
kep_gpu_ctx = gpu_context.gpu_context()
def tau_to_manom(date, sma, mtot, tau, tau_ref_epoch):
"""
Gets the mean anomlay
Args:
date (float or np.array): MJD
sma (float): semi major axis (AU)
mtot (float): total mass (M_sun)
tau (float): epoch of periastron, in units of the orbital period
tau_ref_epoch (float): reference epoch for tau
Returns:
float or np.array: mean anomaly on that date [0, 2pi)
"""
period = np.sqrt(
4 * np.pi**2.0 * (sma * u.AU)**3 /
(consts.G * (mtot * u.Msun))
)
period = period.to(u.day).value
frac_date = (date - tau_ref_epoch)/period
frac_date %= 1
mean_anom = (frac_date - tau) * 2 * np.pi
mean_anom %= 2 * np.pi
return mean_anom
def calc_orbit(
epochs, sma, ecc, inc, aop, pan, tau, plx, mtot, mass_for_Kamp=None, tau_ref_epoch=58849, tolerance=1e-9,
max_iter=100, use_c=True, use_gpu=False
):
"""
Returns the separation and radial velocity of the body given array of
orbital parameters (size n_orbs) at given epochs (array of size n_dates)
Based on orbit solvers from <NAME> and <NAME>. Adapted by <NAME> and <NAME>.
Args:
epochs (np.array): MJD times for which we want the positions of the planet
sma (np.array): semi-major axis of orbit [au]
ecc (np.array): eccentricity of the orbit [0,1]
inc (np.array): inclination [radians]
aop (np.array): argument of periastron [radians]
pan (np.array): longitude of the ascending node [radians]
tau (np.array): epoch of periastron passage in fraction of orbital period past MJD=0 [0,1]
plx (np.array): parallax [mas]
mtot (np.array): total mass of the two-body orbit (M_* + M_planet) [Solar masses]
mass_for_Kamp (np.array, optional): mass of the body that causes the RV signal.
For example, if you want to return the stellar RV, this is the planet mass.
If you want to return the planetary RV, this is the stellar mass. [Solar masses].
For planet mass ~ 0, mass_for_Kamp ~ M_tot, and function returns planetary RV (default).
tau_ref_epoch (float, optional): reference date that tau is defined with respect to (i.e., tau=0)
tolerance (float, optional): absolute tolerance of iterative computation. Defaults to 1e-9.
max_iter (int, optional): maximum number of iterations before switching. Defaults to 100.
use_c (bool, optional): Use the C solver if configured. Defaults to True
use_gpu (bool, optional): Use the GPU solver if configured. Defaults to False
Return:
3-tuple:
raoff (np.array): array-like (n_dates x n_orbs) of RA offsets between the bodies
(origin is at the other body) [mas]
deoff (np.array): array-like (n_dates x n_orbs) of Dec offsets between the bodies [mas]
vz (np.array): array-like (n_dates x n_orbs) of radial velocity of one of the bodies
(see `mass_for_Kamp` description) [km/s]
Written: <NAME>, <NAME>, 2018
"""
n_orbs = np.size(sma) # num sets of input orbital parameters
n_dates = np.size(epochs) # number of dates to compute offsets and vz
# return planetary RV if `mass_for_Kamp` is not defined
if mass_for_Kamp is None:
mass_for_Kamp = mtot
# Necessary for _calc_ecc_anom, for now
if np.isscalar(epochs): # just in case epochs is given as a scalar
epochs = np.array([epochs])
ecc_arr = np.tile(ecc, (n_dates, 1))
# # compute mean anomaly (size: n_orbs x n_dates)
manom = tau_to_manom(epochs[:, None], sma, mtot, tau, tau_ref_epoch)
# compute eccentric anomalies (size: n_orbs x n_dates)
eanom = _calc_ecc_anom(manom, ecc_arr, tolerance=tolerance, max_iter=max_iter, use_c=use_c, use_gpu=use_gpu)
# compute the true anomalies (size: n_orbs x n_dates)
# Note: matrix multiplication makes the shapes work out here and below
tanom = 2.*np.arctan(np.sqrt((1.0 + ecc)/(1.0 - ecc))*np.tan(0.5*eanom))
# compute 3-D orbital radius of second body (size: n_orbs x n_dates)
radius = sma * (1.0 - ecc * np.cos(eanom))
# compute ra/dec offsets (size: n_orbs x n_dates)
# math from <NAME>. Lots of trig
c2i2 = np.cos(0.5*inc)**2
s2i2 = np.sin(0.5*inc)**2
arg1 = tanom + aop + pan
arg2 = tanom + aop - pan
c1 = np.cos(arg1)
c2 = np.cos(arg2)
s1 = np.sin(arg1)
s2 = np.sin(arg2)
# updated sign convention for Green Eq. 19.4-19.7
raoff = radius * (c2i2*s1 - s2i2*s2) * plx
deoff = radius * (c2i2*c1 + s2i2*c2) * plx
# compute the radial velocity (vz) of the body (size: n_orbs x n_dates)
# first comptue the RV semi-amplitude (size: n_orbs x n_dates)
Kv = np.sqrt(consts.G / (1.0 - ecc**2)) * (mass_for_Kamp * u.Msun *
np.sin(inc)) / np.sqrt(mtot * u.Msun) / np.sqrt(sma * u.au)
# Convert to km/s
Kv = Kv.to(u.km/u.s)
# compute the vz
vz = Kv.value * (ecc*np.cos(aop) + np.cos(aop + tanom))
# Squeeze out extra dimension (useful if n_orbs = 1, does nothing if n_orbs > 1)
vz = np.squeeze(vz)[()]
return raoff, deoff, vz
def _calc_ecc_anom(manom, ecc, tolerance=1e-9, max_iter=100, use_c=False, use_gpu=False):
"""
Computes the eccentric anomaly from the mean anomlay.
Code from <NAME>'s orbit solver (e < 0.95 use Newton, e >= 0.95 use Mikkola)
Args:
manom (float/np.array): mean anomaly, either a scalar or np.array of any shape
ecc (float/np.array): eccentricity, either a scalar or np.array of the same shape as manom
tolerance (float, optional): absolute tolerance of iterative computation. Defaults to 1e-9.
max_iter (int, optional): maximum number of iterations before switching. Defaults to 100.
use_c (bool, optional): Use the C solver if configured. Defaults to False
use_gpu (bool, optional): Use the GPU solver if configured. Defaults to False
Return:
eanom (float/np.array): eccentric anomalies, same shape as manom
Written: <NAME>, 2018
"""
if np.isscalar(ecc) or (np.shape(manom) == np.shape(ecc)):
pass
else:
raise ValueError("ecc must be a scalar, or ecc.shape == manom.shape")
# If manom is a scalar, make it into a one-element array
if np.isscalar(manom):
manom = np.array((manom, ))
# If ecc is a scalar, make it the same shape as manom
if np.isscalar(ecc):
ecc = np.full(np.shape(manom), ecc)
# Initialize eanom array
eanom = np.full(np.shape(manom), np.nan)
# Save some boolean arrays
ecc_zero = ecc == 0.0
ecc_low = ecc < 0.95
# First deal with e == 0 elements
ind_zero = np.where(ecc_zero)
if len(ind_zero[0]) > 0:
eanom[ind_zero] = manom[ind_zero]
# Now low eccentricities
ind_low = np.where(~ecc_zero & ecc_low)
if len(ind_low[0]) > 0:
eanom[ind_low] = _newton_solver_wrapper(manom[ind_low], ecc[ind_low], tolerance, max_iter, use_c, use_gpu)
# Now high eccentricities
ind_high = np.where(~ecc_zero & ~ecc_low | (eanom == -1)) # The C and CUDA solvers return the unphysical value -1 if they fail to converge
if len(ind_high[0]) > 0:
eanom[ind_high] = _mikkola_solver_wrapper(manom[ind_high], ecc[ind_high], use_c, use_gpu)
return np.squeeze(eanom)[()]
def _newton_solver_wrapper(manom, ecc, tolerance, max_iter, use_c=False, use_gpu=False):
"""
Wrapper for the various (Python, C, CUDA) implementations of the Newton-Raphson solver
for eccentric anomaly.
Args:
manom (np.array): array of mean anomalies
ecc (np.array): array of eccentricities
eanom0 (np.array, optional): array of first guess for eccentric anomaly, same shape as manom (optional)
use_c (bool, optional): Use the C solver if configured. Defaults to False
use_gpu (bool, optional): Use the GPU solver if configured. Defaults to False
Return:
eanom (np.array): array of eccentric anomalies
Written: <NAME>, 2021
"""
eanom = np.empty_like(manom)
if cuda_ext and use_gpu:
# the CUDA solver returns eanom = -1 if it doesnt converge after max_iter iterations
eanom = _CUDA_newton_solver(manom, ecc, tolerance=tolerance, max_iter=max_iter)
elif cext and use_c:
# the C solver returns eanom = -1 if it doesnt converge after max_iter iterations
eanom = _kepler._c_newton_solver(manom, ecc, tolerance=tolerance, max_iter=max_iter)
else:
eanom = _newton_solver(manom, ecc, tolerance=tolerance, max_iter=max_iter)
return eanom
def _newton_solver(manom, ecc, tolerance=1e-9, max_iter=100, eanom0=None):
"""
Newton-Raphson solver for eccentric anomaly.
Args:
manom (np.array): array of mean anomalies
ecc (np.array): array of eccentricities
tolerance (float, optional): absolute tolerance of iterative computation.
Defaults to 1e-9.
max_iter (int, optional): maximum number of iterations before switching.
Defaults to 100.
eanom0 (np.array): array of first guess for eccentric anomaly, same
shape as manom (optional)
Return:
eanom (np.array): array of eccentric anomalies
Written: <NAME>, 2018
"""
# Ensure manom and ecc are np.array (might get passed as astropy.Table Columns instead)
manom = np.asarray(manom)
ecc = np.asarray(ecc)
# Initialize at E=M, E=pi is better at very high eccentricities
if eanom0 is None:
eanom = np.copy(manom)
else:
eanom = np.copy(eanom0)
# Let's do one iteration to start with
eanom -= (eanom - (ecc * np.sin(eanom)) - manom) / (1.0 - (ecc * np.cos(eanom)))
diff = (eanom - (ecc * np.sin(eanom)) - manom) / (1.0 - (ecc * np.cos(eanom)))
abs_diff = np.abs(diff)
ind = np.where(abs_diff > tolerance)
niter = 0
while ((ind[0].size > 0) and (niter <= max_iter)):
eanom[ind] -= diff[ind]
# If it hasn't converged after half the iterations are done, try starting from pi
if niter == (max_iter//2):
eanom[ind] = np.pi
diff[ind] = (eanom[ind] - (ecc[ind] * np.sin(eanom[ind])) - manom[ind]) / \
(1.0 - (ecc[ind] * np.cos(eanom[ind])))
abs_diff[ind] = np.abs(diff[ind])
ind = np.where(abs_diff > tolerance)
niter += 1
if niter >= max_iter:
print(manom[ind], eanom[ind], diff[ind], ecc[ind], '> {} iter.'.format(max_iter))
eanom[ind] = _mikkola_solver_wrapper(manom[ind], ecc[ind]) # Send remaining orbits to the analytical version, this has not happened yet...
return eanom
def _CUDA_newton_solver(manom, ecc, tolerance=1e-9, max_iter=100, eanom0=None):
"""
Helper function for calling the CUDA implementation of the Newton-Raphson solver for eccentric anomaly.
Args:
manom (np.array): array of mean anomalies
ecc (np.array): array of eccentricities
eanom0 (np.array, optional): array of first guess for eccentric anomaly, same shape as manom (optional)
Return:
eanom (np.array): array of eccentric anomalies
Written: <NAME>, 2021
"""
global kep_gpu_ctx
# Ensure manom and ecc are np.array (might get passed as astropy.Table Columns instead)
manom = np.asarray(manom)
ecc = np.asarray(ecc)
eanom = np.empty_like(manom)
tolerance = np.asarray(tolerance, dtype = np.float64)
max_iter = np.asarray(max_iter)
kep_gpu_ctx.newton(manom, ecc, eanom, eanom0, tolerance, max_iter)
return eanom
def _mikkola_solver_wrapper(manom, ecc, use_c=False, use_gpu=False):
"""
Wrapper for the various (Python, C, CUDA) implementations of Analtyical Mikkola solver
Args:
manom (np.array): array of mean anomalies between 0 and 2pi
ecc (np.array): eccentricity
use_c (bool, optional): Use the C solver if configured. Defaults to False
use_gpu (bool, optional): Use the GPU solver if configured. Defaults to False
Return:
eanom (np.array): array of eccentric anomalies
Written: <NAME>, 2018
"""
ind_change = np.where(manom > np.pi)
manom[ind_change] = (2.0 * np.pi) - manom[ind_change]
if cuda_ext and use_gpu:
eanom = _CUDA_mikkola_solver(manom, ecc)
elif cext and use_c:
eanom = _kepler._c_mikkola_solver(manom, ecc)
else:
eanom = _mikkola_solver(manom, ecc)
eanom[ind_change] = (2.0 * np.pi) - eanom[ind_change]
return eanom
def _mikkola_solver(manom, ecc):
"""
Analtyical Mikkola solver for the eccentric anomaly. See: <NAME>. 1987. Celestial Mechanics, 40, 329-334.
Adapted from IDL routine keplereq.pro by <NAME> http://www.lpl.arizona.edu/~bjackson/idl_code/keplereq.pro
Args:
manom (float or np.array): mean anomaly, must be between 0 and pi.
ecc (float or np.array): eccentricity
Return:
eanom (np.array): array of eccentric anomalies
Written: <NAME>, 2018
"""
alpha = (1.0 - ecc) / ((4.0 * ecc) + 0.5)
beta = (0.5 * manom) / ((4.0 * ecc) + 0.5)
aux = np.sqrt(beta**2.0 + alpha**3.0)
z = np.abs(beta + aux)**(1.0/3.0)
s0 = z - (alpha/z)
s1 = s0 - (0.078*(s0**5.0)) / (1.0 + ecc)
e0 = manom + (ecc * (3.0*s1 - 4.0*(s1**3.0)))
se0 = np.sin(e0)
ce0 = np.cos(e0)
f = e0-ecc*se0-manom
f1 = 1.0-ecc*ce0
f2 = ecc*se0
f3 = ecc*ce0
f4 = -f2
u1 = -f/f1
u2 = -f/(f1+0.5*f2*u1)
u3 = -f/(f1+0.5*f2*u2+(1.0/6.0)*f3*u2*u2)
u4 = -f/(f1+0.5*f2*u3+(1.0/6.0)*f3*u3*u3+(1.0/24.0)*f4*(u3**3.0))
return (e0 + u4)
def _CUDA_mikkola_solver(manom, ecc):
"""
Helper function for calling the CUDA implementation of the Analtyical Mikkola solver for the eccentric anomaly.
Args:
manom (float or np.array): mean anomaly, must be between 0 and pi.
ecc (float or np.array): eccentricity
Return:
eanom (np.array): array of eccentric anomalies
Written: <NAME>, 2021
"""
global kep_gpu_ctx
# Ensure manom and ecc are np.array (might get passed as astropy.Table Columns instead)
manom = np.asarray(manom)
ecc = np.asarray(ecc)
eanom = np.empty_like(manom)
kep_gpu_ctx.mikkola(manom, ecc, eanom)
return eanom
| StarcoderdataPython |
1573 | import SimpleXMLRPCServer as xmls
def echo(msg):
print 'Got', msg
return msg
class echoserver(xmls.SimpleXMLRPCServer):
allow_reuse_address = True
server = echoserver(('127.0.0.1', 8001))
server.register_function(echo, 'echo')
print 'Listening on port 8001'
try:
server.serve_forever()
except:
server.server_close()
| StarcoderdataPython |
1735694 | <reponame>jmshnds/eventstore_grpc
"""
Resign Node.
"""
from eventstore_grpc.proto import operations_pb2, operations_pb2_grpc, shared_pb2
def resign_node(stub: operations_pb2_grpc.OperationsStub, **kwargs) -> shared_pb2.Empty:
"""Resign Node."""
return stub.ResignNode(shared_pb2.Empty(), **kwargs)
| StarcoderdataPython |
3271537 | from .rcnn_heads import ORCNNROIHeads
from .mask_heads import (
build_amodal_mask_head,
build_visible_mask_head
)
from .pooler import ROIPooler
| StarcoderdataPython |
3293622 | """W&B Sweep Functionality."""
import os
import signal
import subprocess
import sys
import json
from typing import Tuple
from ast import literal_eval
import pdb
DEFAULT_CONFIG = {
"dataset": "RetinaDatasetWrapper",
"model": "RetinaModel",
"network": "resnetconv",
"train_args": {"batch_size": 128, "epochs": 10, "lr": 1e-3, "loss": "crossentropy", "optimizer": "adam"},
}
def args_to_json(default_config: dict, preserve_args: tuple = ("gpu", "save")) -> Tuple[dict, list]:
"""Convert command line arguments to nested config values
i.e. run_sweep.py --dataset_args.foo=1.7
{
"dataset_args": {
"foo": 1.7
}
}
"""
args = []
config = default_config.copy()
key, val = None, None
for arg in sys.argv[1:]:
if "=" in arg:
key, val = arg.split("=")
elif key:
val = arg
else:
key = arg
if key and val:
parsed_key = key.lstrip("-").split(".")
if parsed_key[0] in preserve_args:
args.append("--{}={}".format(parsed_key[0], val))
else:
nested = config
for level in parsed_key[:-1]:
nested[level] = config.get(level, {})
nested = nested[level]
try:
# Convert numerics to floats / ints
val = literal_eval(val)
except ValueError:
pass
nested[parsed_key[-1]] = val
key, val = None, None
return config, args
def main():
config, args = args_to_json(DEFAULT_CONFIG)
env = {k: v for k, v in os.environ.items() if k not in ("WANDB_PROGRAM", "WANDB_ARGS")}
print ('args', *args)
# pylint: disable=subprocess-popen-preexec-fn
run = subprocess.Popen(
["python", "training/run_experiment.py", *args, json.dumps(config)], env=env, preexec_fn=os.setsid,
) # nosec
signal.signal(signal.SIGTERM, lambda *args: run.terminate())
run.wait()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3339321 | <gh_stars>0
# Generated by Django 3.0.6 on 2020-06-05 20:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('merchant_game', '0022_auto_20200605_2237'),
]
operations = [
migrations.AddConstraint(
model_name='loan',
constraint=models.UniqueConstraint(fields=('player', 'round'), name='player_loan_round'),
),
]
| StarcoderdataPython |
85948 | from tkinter import *
from tkinter.font import BOLD
root = Tk()
def click():
mylabel = Label(root, text=f"Hello, {a.get()}")
mylabel.pack()
a = Entry(root, width=30)
a.pack()
a.insert(0, "Enter Your Name")
btn = Button(root, text='Click Me', command=click)
btn.pack()
root.mainloop()
| StarcoderdataPython |
1734876 | """Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the
License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from collections import namedtuple
from autotrail.core.dag import Step, topological_traverse
from autotrail.core.socket_communication import HandlerResult
class APICallName:
"""Namepsace for the API call names. Use this class instead of plain strings."""
# List of valid API call names
# These values don't carry any specific meaning but they should be unique, i.e., no two API calls should have the
# same value.
START = 'start'
SHUTDOWN = 'shutdown'
STATUS = 'status'
LIST = 'list'
PAUSE = 'pause'
INTERRUPT = 'interrupt'
NEXT_STEPS = 'next_steps'
RESUME = 'resume'
SKIP = 'skip'
UNSKIP = 'unskip'
BLOCK = 'block'
UNBLOCK = 'unblock'
PAUSE_BRANCH = 'pause_branch'
RESUME_BRANCH = 'resume_branch'
SET_PAUSE_ON_FAIL = 'set_pause_on_fail'
UNSET_PAUSE_ON_FAIL = 'unset_pause_on_fail'
SEND_MESSAGE_TO_STEPS = 'send_message_to_steps'
class APICallField:
"""Namespace for the various fields (keys) in an API call. Use this class instead of plain strings."""
NAME = 'name'
TAGS = 'tags'
STEP_COUNT = 'step_count'
DRY_RUN = 'dry_run'
MESSAGE = 'message'
STATES = 'states'
STATUS_FIELDS = 'status_fields'
class StatusField(object):
"""Namespace for the fields returned as part of the status API call. Use this class instead of plain strings."""
N = 'n'
NAME = 'Name'
STATE = 'State'
RETURN_VALUE = 'Return value'
OUTPUT_MESSAGES = 'Output messages'
INPUT_MESSAGES = 'Input messages'
PROMPT_MESSAGES = 'Prompt messages'
UNREPLIED_PROMPT_MESSAGE = 'Waiting for response'
def get_class_globals(klass):
"""Returns a set of the globals defined in the given class.
Globals are identified to be uppercase and do not start with an underscore.
Arguments:
klass -- The class whose globals need to be fetched.
Returns:
set -- of the class globals.
"""
return {getattr(klass, attribute) for attribute in dir(klass) if not attribute.startswith('_') and attribute.isupper()}
def validate_states(states):
"""Validate the given states.
Arguments:
states -- An iterable containing step states.
Returns:
None -- If all the states are valid.
Raises:
ValidationError -- If any of the states are invalid. Exception contains details about the state that is invalid.
"""
try:
valid_states = get_class_globals(Step)
for state in states:
if state not in valid_states:
raise ValueError('The state "{}" is not valid.'.format(str(state)))
except TypeError:
raise TypeError('{} should be a list or iterable.'.format(APICallField.STATES))
def validate_status_fields(status_fields):
"""Validate the given status fields.
Arguments:
states -- An iterable containing status fields (fields to be included in a status API response).
Returns:
None -- If all the status fields are valid.
Raises:
ValidationError -- If any of the status fields are invalid. Exception contains details about the status field that
is invalid.
"""
try:
valid_fields = get_class_globals(StatusField)
for field in status_fields:
if field not in valid_fields:
raise ValueError('The field "{}" is not valid.'.format(str(field)))
except TypeError:
raise TypeError('{} should be a list or iterable.'.format(str(status_fields)))
def validate_mapping(obj):
"""Validate if the given obj behaves like a dictionary by allowing look-ups and iteration.
Arguments:
obj -- Any object being validated
Returns:
None -- If the validation is successful.
Raises:
ValidationError -- If the object doesn't allow look-ups or iteration like a dictionary.
"""
try:
for key in obj:
obj[key]
except Exception:
raise TypeError('{} needs to be a dictionary like mapping.'.format(str(obj)))
def validate_number(obj):
"""Validate the given obj as a number.
Arguments:
obj -- The object to be validated.
Returns:
None -- If the given object behaves like a number as used by the API calls.
Raises:
ValidationError -- If the object does not exhibit numerical behaviour required for the API calls.
"""
try:
l = [0, 1, 2]
l[:obj]
except TypeError:
raise TypeError('{} needs to be a number.'.format(str(obj)))
def validate_boolean(obj):
"""Validate the obj as a boolean.
Arguments:
obj -- The object to be validated.
Returns:
None -- If the object is a boolean (True or False)
Raises:
ValidationError -- If the object is neither True nor False.
"""
if obj is not True and obj is not False:
raise ValueError('{} should be a boolean True or False.'.format(str(obj)))
def search_steps_with_tags(steps, tags):
"""Search steps for which the given tags match.
Returns:
A generator of Steps -- that have the provided tags.
"""
if tags == {}:
return steps
return (step for step in steps if is_dict_subset_of(tags, step.tags))
def search_steps_with_states(steps, states):
"""Search for steps that are in the given states.
Arguments:
steps -- An iterator over Step like objects.
states -- A list of states allowed by the Step like objects e.g., Step.WAIT.
Returns:
A generator object that will yield step objects whose state is in the given list of states.
"""
if states == []:
return steps
return (step for step in steps if step.state in states)
def get_progeny(steps):
"""Returns the strict progeny of all the vertices provided.
Strict progeny means that each vertex returned is not a child of any other vertices.
Arguments:
steps -- An iterator over Step like objects.
Returns:
A set generator of Steps -- of Each of these steps are found in the branches originating from the given steps.
"""
return {progeny for step in steps for progeny in topological_traverse(step)}
def step_to_stepstatus(step, status_fields):
"""Create a status dictionary from a Step like object. This dictionary will include keys from the status_fields and
their corresponding values from the step. Some keys will always be present.
Arguments:
step -- A Step like object.
status_fields -- A list of fields to be included in the status dictionaries. These are defined in the StatusField
namespace.
Returns:
dictionary -- Of the form:
{
# The following two key-value pairs will always be present in this dictionary, because without these it
# will be impossible to uniquely identify the steps.
StatusField.N: <Sequence number of the step>,
StatusField.NAME: <Name of the step>,
# The rest of the key-value pairs are obtained based on the given status_fields
<Key from status_fields>: <Corresponding value from the step>,
}
Example:
If the status_fields specified are:
[StatusField.STATE, StatusField.RETURN_VALUE]
Then the returned dictionary will be of the form:
{
StatusField.N: <Sequence number of the step>,
StatusField.NAME: <Name of the step>,
StatusField.STATE: <State of the step>,
StatusField.RETURN_VALUE: <Return value from the step>,
}
"""
step_status = {
StatusField.N: step.tags['n'],
StatusField.NAME: str(step),
}
if StatusField.STATE in status_fields:
step_status[StatusField.STATE] = step.state
if StatusField.RETURN_VALUE in status_fields:
step_status[StatusField.RETURN_VALUE] = step.return_value
if StatusField.OUTPUT_MESSAGES in status_fields:
step_status[StatusField.OUTPUT_MESSAGES] = step.output_messages
if StatusField.PROMPT_MESSAGES in status_fields:
step_status[StatusField.PROMPT_MESSAGES] = step.prompt_messages
if StatusField.UNREPLIED_PROMPT_MESSAGE in status_fields:
step_status[StatusField.UNREPLIED_PROMPT_MESSAGE] = get_unreplied_prompt_message(step)
return step_status
def extract_essential_tags(steps):
"""Extract the 'n' and 'name' tags from the steps. These are the tags that can uniquely identify the step.
Arguments:
steps -- An iterator over Step like objects.
Returns:
A generator that yields dictionaries each containing only the 'n' and 'name' tags.
"""
return ({'n': step.tags['n'], 'name': step.tags['name']} for step in steps)
def get_unreplied_prompt_message(step):
"""Get the unreplied message sent by the action function of a Step.
This function will return the unreplied prompt message sent by the action function of a Step so far.
Basically the number of prompt messages sent by an action function is tallied against any messages sent to it by
the user.
Arguments:
step -- A Step like object.
Returns:
The message that hasn't been replied to yet. (The type depends on the action function.)
"""
return step.prompt_messages[-1] if (len(step.prompt_messages) != len(step.input_messages)) else None
def change_attribute(steps, attribute, value):
"""Change the attribute of the given steps to the value provided.
Arguments:
steps -- A iterable of Step like objects.
attribute -- An attribute of the objects that needs to be changed.
value -- The value that the attribute needs to be changed to.
Post-condition:
The attribute of all the steps will be updated to the given value.
"""
for step in steps:
setattr(step, attribute, value)
log_step(logging.debug, step, 'Changing attribute {} to {}'.format(attribute, str(value)))
def interrupt(steps):
"""Interrupt the action functions of the steps by calling their process' terminate method.
Arguments:
steps -- A iterable of Step like objects.
Post-condition:
Each step's process.terminate() will be called, interrupting the run of their action functions.
"""
for step in steps:
log_step(logging.debug, step, 'Interruping the step.')
step.process.terminate()
def get_status(steps, status_fields):
"""Get the status of every step by including the fields specified in status_fields.
Arguments:
steps -- An iterator over Step like objects.
status_fields -- A list of fields to be included in the status dictionaries.
Returns:
A list of dictionaries. Each dictionary represents the status of a step. See the documentation of the
step_to_stepstatus function to know how this dictionary is structured.
"""
return [step_to_stepstatus(step, status_fields) for step in steps]
def send_message_to_steps(steps, message):
"""Send the message to the input_queue of the steps.
Arguments:
steps -- An iterator over Step like objects.
message -- This should be any picklable Python object that can be put into a multiprocessing.Queue.
Post-condition:
1. The the message will be put into the input_queue of each step.
2. The message will also be added to the input_messages list.
"""
for step in steps:
log_step(logging.debug, step, 'Sent message to step. Message: {}'.format(str(message)))
step.input_queue.put(message)
step.input_messages.append(message)
# This datastructure declaratively defines how an API call is handled.
# The actual execution of the business logic is handled by functions (handle_api_call).
# Each APICallDefinition consists of the following attributes:
# validators : This is a dictionary that maps the fields that are required for this API call mapped to their
# validation function. Each validation function should:
# 1. Accept the value of the corresponding field from the api_call dictionary.
# 2. If validation is successful, return (doesn't matter what is returned).
# 3. If validation fails, then raise an Exception.
# For example, consider the validators dictionary below:
# {
# APICallField.TAGS: validate_mapping,
# }
# The key represented by APICallField.TAGS will be extracted from the api_call dictionary and
# validate_mapping will be called with it as:
# validate_mapping(api_call.get(APICallField.TAGS))
# steps : This is a function that will be used to extract the steps needed for the API call. Typically,
# API calls work on steps that match specific tags or that are in specific states, the function here
# is used to extract that list of steps.
# This function should:
# 1. Accept 2 parameters - steps (list) and the api_call dictionary.
# 2. Return either a list or iterator over steps.
# For example, consider the steps function below:
# lambda steps, api_call: search_steps_with_tags(steps, api_call[APICallField.TAGS])
# The above will return all steps that match the tags specified in the api_call dictionary.
# predicate : This is a guard that is used to decide if the handlers should be called or not.
# This relieves the handlers of the responsibility to check various conditions.
# This predicate should:
# 1. Accept 2 parameters - steps (list) and the api_call dictionary.
# 2. Return either a True or False value (values like [] or '' are valid as they can be intrpreted as
# False.
# If the predicate is True, then the handlers will be called.
# For example, consider the following predicate function:
# lambda steps, api_call: not api_call[APICallField.DRY_RUN]
# The above predicate will ensure that the handlers won't be called unless dry run is False in the
# api_call dictionary.
# handlers : This is a list of functions that take the action required by the API call. Since there are multiple
# handlers, the return value of the first is passed to the second and so on, which means, that the
# first handler accepts one less parameter compared to the rest.
# The first hander should:
# 1. Accept 2 parameters - steps (list) and the api_call dictionary.
# All the other handers should:
# 2. Accept 3 parameters - steps (list), the api_call dictionary and the return value of the previous
# handler.
# The return values between the handlers can be anything and is of no consequence to the business logic
# so long as they work seamlessly between the handlers and the post_processor.
# For example, consider the following handlers:
# [
# lambda steps, api_call: interrupt(steps),
# lambda steps, api_call, return_value: change_attribute(
# steps, attribute='state', value=Step.INTERRUPTED)
# ]
# The above handlers first invoke interrupt with the steps.
# Then, change_attribute is invoked for the same list of steps.
# The return_value is not used.
# post_processor: This is a function that is responsible for returning the final result of the API call in the form of
# a 3-tuple.
# This function should:
# 1. Accept 3 parameters - steps (list), the api_call dictionary and the return value of the last run
# handler.
# 3. Return a 3-tuple of the form: (<api result>, <error>, <flag for trail continuation>)
# <api result> -- the result of the API call.
# <error> -- Any error encountered that needs to be sent back to the user.
# <flag> -- This is a boolean to indicate if the trail should continue running.
# True means that the trail can continue running normally and False indicates
# that the trail should be shutdown.
APICallDefinition = namedtuple('APICallDefinition',
['validators', 'steps', 'predicate', 'handlers', 'post_processor'])
API_CALL_DEFINITIONS = {
APICallName.START: APICallDefinition(
validators={APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(steps, [Step.READY]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.WAIT)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.SHUTDOWN: APICallDefinition(
validators={APICallField.DRY_RUN: validate_boolean},
steps=None,
predicate=lambda steps, api_call: False,
handlers=None,
post_processor=lambda steps, api_call, return_value: (
not api_call[APICallField.DRY_RUN], None, api_call[APICallField.DRY_RUN]),
),
APICallName.LIST: APICallDefinition(
validators={APICallField.TAGS: validate_mapping},
steps=lambda steps, api_call: search_steps_with_tags(steps, api_call[APICallField.TAGS]),
predicate=lambda steps, api_call: False,
handlers=None,
post_processor=lambda steps, api_call, return_value: ([step.tags for step in steps], None, True),
),
APICallName.STATUS: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.STATUS_FIELDS: validate_status_fields,
APICallField.STATES: validate_states},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
api_call[APICallField.STATES]),
predicate=lambda steps, api_call: True,
handlers=[lambda steps, api_call: get_status(steps, api_call[APICallField.STATUS_FIELDS])],
post_processor=lambda steps, api_call, return_value: (return_value, None, True),
),
APICallName.PAUSE: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call:search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.READY, Step.WAIT]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.TOPAUSE)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.INTERRUPT: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.RUN]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[
lambda steps, api_call: interrupt(steps),
lambda steps, api_call, return_value: change_attribute(steps, attribute='state', value=Step.INTERRUPTED)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.RESUME: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.TOPAUSE, Step.PAUSED, Step.PAUSED_ON_FAIL, Step.INTERRUPTED]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.WAIT)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.SKIP: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.READY, Step.WAIT, Step.TOPAUSE, Step.PAUSED, Step.PAUSED_ON_FAIL, Step.INTERRUPTED]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.TOSKIP)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.UNSKIP: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.TOSKIP]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.WAIT)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.BLOCK: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.READY, Step.WAIT, Step.TOPAUSE, Step.PAUSED, Step.PAUSED_ON_FAIL]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.TOBLOCK)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.UNBLOCK: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.TOBLOCK]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.WAIT)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.SET_PAUSE_ON_FAIL: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.READY, Step.WAIT, Step.INTERRUPTED, Step.TOPAUSE, Step.PAUSED]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='pause_on_fail', value=True)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.UNSET_PAUSE_ON_FAIL: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.READY, Step.WAIT, Step.INTERRUPTED, Step.TOPAUSE, Step.PAUSED]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='pause_on_fail', value=False)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.PAUSE_BRANCH: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
get_progeny(search_steps_with_tags(steps, api_call[APICallField.TAGS])),
[Step.READY, Step.WAIT]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.TOPAUSE)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.RESUME_BRANCH: APICallDefinition(
validators={
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
get_progeny(search_steps_with_tags(steps, api_call[APICallField.TAGS])),
[Step.TOPAUSE, Step.PAUSED, Step.PAUSED_ON_FAIL]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.WAIT)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.NEXT_STEPS: APICallDefinition(
validators={
APICallField.STEP_COUNT: validate_number,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
steps[:api_call[APICallField.STEP_COUNT]],
[Step.TOPAUSE, Step.PAUSED, Step.PAUSED_ON_FAIL]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: change_attribute(steps, attribute='state', value=Step.WAIT)],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
APICallName.SEND_MESSAGE_TO_STEPS: APICallDefinition(
validators={
APICallField.MESSAGE: lambda api_call: None,
APICallField.TAGS: validate_mapping,
APICallField.DRY_RUN: validate_boolean},
steps=lambda steps, api_call: search_steps_with_states(
search_steps_with_tags(steps, api_call[APICallField.TAGS]),
[Step.RUN]),
predicate=lambda steps, api_call: not api_call[APICallField.DRY_RUN],
handlers=[lambda steps, api_call: send_message_to_steps(steps, api_call[APICallField.MESSAGE])],
post_processor=lambda steps, api_call, return_value: (list(extract_essential_tags(steps)), None, True),
),
}
def validate_api_call(api_call, api_call_definition):
"""Validate the api_call based on the provided api_call_definition.
Arguments:
api_call -- A dictionary of the API call request.
api_call_definition -- A APICallDefinition or similar data structure that has an attribute called 'validators',
which is a dictionary of the parameter mapped to the validation function.
Returns:
None -- If the validation was successful.
String -- containing the error message if the api_call was invalid.
"""
if api_call_definition is None:
return 'API name {} is invalid.'.format(api_call.get(APICallField.NAME))
for field, validator in api_call_definition.validators.iteritems():
try:
value = api_call.get(field)
validator(value)
except Exception as e:
return ('API Call validation failed: The parameter {field} has an invalid value: {value}. Error: {error}'
'').format(field=field, value=value, error=str(e))
def handle_api_call(api_call, steps, api_call_definitions=API_CALL_DEFINITIONS):
"""Handler for API calls.
This function is compliant to be used as a handler with the autotrail.core.socket_communication.serve_socket
function.
This function handles a single API call request by using the api_call_definitions, which provides the data for the
execution of the business logic. See the documenation of APICallDefinition to know how the datastructure is used.
Arguments:
api_call -- A dictionary of the API call request.
steps -- A topologically ordered iterable over steps.
Keyword Arguments:
api_call_definitions -- A dictionary that maps API call names to their API Call definitions.
The API call definition is looked up in this dictionary to control the business logic of
how this API call is handled.
See the documenation of APICallDefinition to understand how it is structured.
Returns:
HandlerResult -- Containing the response to be sent to the user (result and error) and the return_value
that will be passed as-is to the caller of serve_socket.
"""
api_call_definition = api_call_definitions.get(api_call.get(APICallField.NAME))
validation_result = validate_api_call(api_call, api_call_definition)
if validation_result:
return HandlerResult(response=dict(name=api_call.get(APICallField.NAME), result=None, error=validation_result),
return_value=True)
steps = list(api_call_definition.steps(steps, api_call)) if api_call_definition.steps else []
handler_return_value = None
if api_call_definition.predicate(steps, api_call):
first_handler = api_call_definition.handlers[0]
handlers = api_call_definition.handlers[1:]
handler_return_value = first_handler(steps, api_call)
for handler in handlers:
handler_return_value = handler(steps, api_call, handler_return_value)
result, error, return_value = api_call_definition.post_processor(steps, api_call, handler_return_value)
logging.info(('Received API Request: {api_call} -- Sending response: result={result}, error={error}, '
'return_value={return_value}').format(api_call=str(api_call), result=str(result), error=str(error),
return_value=return_value))
return HandlerResult(response=dict(name=api_call[APICallField.NAME], result=result, error=error),
return_value=return_value)
def log_step(log_function, step, message):
"""Write a log using the provided log_function by providing information about the step.
Arguments:
log_function -- The logging function to use. Eg., logging.info, logging.debug etc.
step -- A Step or Step like object with at least 'name' and 'n' tags defined.
message -- String - The message to log.
Usage:
Assume that example_step has the tags: {n=0, name='example_step'}, then the code:
log_step(logging.info, example_step, 'Example log message')
Will produce the following log message:
[Step Name=example_step, n=0] Example log message
"""
log_message = '[Step Name={name}, n={n}] {message}'.format(
name=step.tags['name'], n=step.tags['n'], message=message)
log_function(log_message)
def is_dict_subset_of(sub_dict, super_dict):
"""Checks if sub_dict is contained in the super_dict.
Arguments:
sub_dict -- A dictionary like mapping that supports iteritems method.
super_dict -- A dictionary like mapping that supports iteritems method.
Returns:
True -- If all the key-value pairs of sub_dict are present in the super_dict.
False -- Otherwise.
"""
sub_set = set(sub_dict.iteritems())
super_set = set(super_dict.iteritems())
return sub_set.issubset(super_set)
| StarcoderdataPython |
199906 | <reponame>nocproject/noc<gh_stars>10-100
# ---------------------------------------------------------------------
# Rotek.BT.get_inventory
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_inventory import Script as BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
class Script(BaseScript):
name = "Rotek.BT.get_inventory"
interface = IGetInventory
def get_chassis_sensors(self):
r = [
# In
{
"name": "in",
"status": True,
"description": "Дверь",
"measurement": "StatusEnum",
"snmp_oid": "1.3.6.1.4.1.41752.5.15.1.1.0",
},
# v230
{
"name": "v230_state",
"status": True,
"description": "Флаг наличия сетевого напряжения AC 230В",
"measurement": "StatusEnum",
"snmp_oid": "1.3.6.1.4.1.41752.5.15.1.9.0",
},
# temp1
{
"name": "temp_out",
"status": True,
"description": "Температура в шкафу",
"measurement": "Celsius",
"snmp_oid": "1.3.6.1.4.1.41752.5.15.1.2.0",
},
]
r += [
{
"name": "current_load",
"status": True,
"description": "Ток потребления нагрузки",
"measurement": "Ampere",
"snmp_oid": "1.3.6.1.4.1.41752.5.15.1.3.0",
},
{
"name": "ups_battery_U",
"status": True,
"description": "ИБП. Напряжение на АКБ",
"measurement": "Volt AC",
"snmp_oid": "1.3.6.1.4.1.41752.5.15.1.6.0",
},
{
"name": "current_battery",
"status": True,
"description": "Ток заряда АКБ",
"measurement": "Ampere",
"snmp_oid": "1.3.6.1.4.1.41752.5.15.1.5.0",
},
]
return r
def execute_snmp(self):
r = self.get_inv_from_version()
sensors = self.get_chassis_sensors()
if sensors:
r[0]["sensors"] = sensors
return r
| StarcoderdataPython |
1781253 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Написать программу, которая будет удалять все комментарии из исходного файла
с кодом на языке Python. Пройдите по всем строкам в файле на предмет поиска
символа #. Обнаружив его, программа должна удалить все содержимое,
начиная с этого символа и до конца строки.
"""
if __name__ == "__main__":
# Запрос имени файла
file_name = input("Введите имя файла, который нужно открыть: ")
# Разбивает на строки первый файл
with open(file_name, "r") as file:
content = file.readlines()
# Запрос имени для нового файла
new_file_name = input("Введите имя для нового файла: ")
# Создаёт новый файл
with open(new_file_name, "w") as new_file:
# Вписывает строки которые не начинаются с #
for line in content:
if not line.startswith('#'):
new_file.write(line)
| StarcoderdataPython |
3264767 | <gh_stars>1-10
"""
FLI.device.py
Object-oriented base interface for handling FLI USB devices
author: <NAME>, Yankee Environmental Systems
author_email: <EMAIL>
"""
__author__ = '<NAME>'
__date__ = '2012-08-16'
import sys, time
import ctypes
from ctypes import pointer, POINTER, byref, c_char, c_char_p, c_long, c_ubyte,\
c_double, c_size_t
from lib import FLILibrary, FLIError, FLIWarning, flidomain_t, flidev_t,\
FLIDOMAIN_USB
###############################################################################
DEBUG = False
BUFFER_SIZE = 64
###############################################################################
class USBDevice(object):
""" base class for all FLI USB devices"""
#load the DLL
_libfli = FLILibrary.getDll(debug=DEBUG)
_domain = flidomain_t(FLIDOMAIN_USB)
def __init__(self, dev_name, model):
self.dev_name = dev_name
self.model = model
#open the device
self._dev = flidev_t()
self._libfli.FLIOpen(byref(self._dev),dev_name,self._domain)
def __del__(self):
self._libfli.FLIClose(self._dev)
def get_serial_number(self):
serial = ctypes.create_string_buffer(BUFFER_SIZE)
self._libfli.FLIGetSerialString(self._dev,serial,c_size_t(BUFFER_SIZE))
return serial.value
@classmethod
def find_devices(cls):
"""locates all FLI USB devices in the current domain and returns a
list of USBDevice objects"""
tmplist = POINTER(c_char_p)()
cls._libfli.FLIList(cls._domain, byref(tmplist)) #allocates memory
devs = []
#process list only if it is not NULL
if tmplist:
i = 0
while tmplist[i]: #process members only if they are not NULL
dev_name, model = tmplist[i].split(";")
devs.append(cls(dev_name=dev_name,model=model)) #create device objects
i += 1
cls._libfli.FLIFreeList(tmplist) #frees memory
#finished processing list
return devs
@classmethod
def locate_device(cls, serial_number):
"""locates the FLI USB devices in the current domain that matches the
'serial_number' string
returns None if no match is found
raises FLIError if more than one device matching the serial_number
is found, i.e., there is a conflict
"""
dev_match = None
devs = cls.find_devices()
for dev in devs:
dev_sn = dev.get_serial_number()
if dev_sn == serial_number: #match found
if dev_match is None: #first match
dev_match = dev
else: #conflict
msg = "Device Conflict: there are more than one devices matching the serial_number '%s'" % serial_number
raise FLIError(msg)
return dev_match
###############################################################################
# TEST CODE
###############################################################################
if __name__ == "__main__":
devs = USBDevice.find_devices()
| StarcoderdataPython |
66108 | import argparse
from typing import List, Iterable, Tuple, Optional
from dejima.plugin import NoteField, SourcePlugin, CardTemplate, Note
__version__ = "0.1.0"
class SomeSource(SourcePlugin):
# These can be named anything you want, and will become fields on
# your notes in Anki. Note that the templates in `get_card_templates`
# below have to match the field names, though!
Front = NoteField(unique=True, merge=True)
Back = NoteField(unique=True, merge=True)
# You could add more fields for storing other information like images
# audio files or anything else you might find convenient to show
# on a flash card.
#
# See https://github.com/coddingtonbear/dejima/blob/master/src/dejima/sources/lln.py
# to get an idea of how media works in Anki.
def get_card_templates(self) -> List[CardTemplate]:
# Every Dejima "Source" gets its own card type in Anki; this allows
# each source to define their own fields and have a little more
# flexibility. Below, we're defining the templates that will
# be used by Anki for generating flash cards from your notes.
#
# If you want your flash cards to not have a reverse side (i.e.
# where your question is the *Back* of the card, and you're
# expected to answer with the *Front*), just remove the second
# of these options.
#
# You can also make certain templates optionally generate a card
# by carefully crafting the `front` field such that it is empty
# in certain situations. See https://github.com/coddingtonbear/dejima/blob/master/src/dejima/sources/boox.py
# for an example of how to do that.
return [
CardTemplate(
name="Card 1",
front="<p>{{Front}}</p>",
back="""
{{FrontSide}}
<hr id='answer' />
<p>
{{Back}}
</p>
""",
),
CardTemplate(
name="Card 2",
front="""
<p>
{{Back}}
</p>
""",
back="""
{{FrontSide}}
<hr id='answer' />
<p>
{{Front}}
</p>
""",
),
]
@classmethod
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
# You *probably* need to read your content from a file somewhere
# if so, the following block is useful in that it'll open the
# file you have specified for you and make it available under
# `self.options.input`.
#
# If you do need to open a file -- just un-comment the following
# statement:
# parser.add_argument(
# "-i",
# "--input",
# nargs="?",
# type=argparse.FileType("r"),
# default=sys.stdin,
# help="File to read from (default: stdin)",
# )
return super().add_arguments(parser)
def get_entries(self) -> Iterable[Tuple[Optional[str], Note]]:
# Here is where you do the actual work of importing content
# from whatever source and `yield`-ing `Note` instances that
# will become entries in Anki.
#
# The below example is pretty useless, but will give you
# a simple way of understanding how this works.
flash_cards_i_want_to_create = [
{"English": "hey there", "Russian": "привет"},
{"English": "bye", "Russian": "пока"},
]
for card in flash_cards_i_want_to_create:
# Dejima handles making sure that any particular entry is
# imported only once, no matter how many times it might
# appear in an import (so you don't need to worry about
# being careful not to import particular content more than
# once), but to do that, you need to give it a "foreign key"
# to use for identifying this partiuclar entry. Here, we're
# just using the "English" text on the card. If you were
# sure you didn't want Dejima to prevent double-imports,
# you can set this value to `None` and no double-import
# checks will take place.
#
# If you want to skip those double-import checks for testing
# or because you've deleted the relevant cards in Anki, you
# can use the `--reimport` command-line argument.
foreign_key = card["English"]
# Now, we create our "Note" object -- the note object has
# three properties:
#
# - fields: This is a dictionary having values for each of
# the field names you defined at the top of your class.
# - tags: This is a list of strings allowing you to add
# tags to your card. We're not adding any tags here,
# but it's easy to do that if you wanted to.
# - media: A list of media objects to upload into Anki
# for use as images in flash cards or audio files. We're
# not using those here either, but look at the importer
# here: https://github.com/coddingtonbear/dejima/blob/master/src/dejima/sources/lln.py
# to get an idea of how that works.
#
# You'll see that we're getting the field name via
# `self.Fields.field_name` -- that's just a convenience
# property -- you could just use "Front" or "Back", too,
# if you wanted. Using it the way shown below just makes
# it easier in cases where the name of the attribute on
# this class doesn't match the name of the field you
# would like to create in Anki.
note = Note(
fields={
self.Front.field_name: card["English"],
self.Back.field_name: card["Russian"],
}
)
yield foreign_key, note
| StarcoderdataPython |
149886 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
'''
Take bitstream .bit files and decode them to FASM.
'''
import contextlib
import os
import fasm
import fasm.output
from db import Database
import fasm_disassembler
import bitstream
import subprocess
import tempfile
def bit_to_bits(bitread,
part_yaml,
arch,
bit_file,
bits_file,
frame_range=None):
""" Calls bitread to create bits (ASCII) from bit file (binary) """
if frame_range:
frame_range_arg = '-F {}'.format(frame_range)
else:
frame_range_arg = ''
subprocess.check_output(
'{} -E --part_file {} --architecture {} {} -o {} -z -y {}'.format(
bitread, part_yaml, arch, frame_range_arg, bits_file, bit_file),
shell=True)
def bits_to_fasm(db_root, part, bits_file, verbose, canonical,
suppress_zero_features):
db = Database(db_root, part)
grid = db.grid()
disassembler = fasm_disassembler.FasmDisassembler(db)
with open(bits_file) as f:
bitdata = bitstream.load_bitdata(f, bitstream.WORD_SIZE_BITS)
model = fasm.output.merge_and_sort(
disassembler.find_features_in_bitstream(bitdata, verbose=verbose),
zero_function=disassembler.is_zero_feature,
sort_key=grid.tile_key,
)
if suppress_zero_features:
output_lines = []
for line in model:
if line.set_feature is None:
output_lines.append(line)
elif not disassembler.is_zero_feature(line.set_feature.feature):
output_lines.append(line)
print(
fasm.fasm_tuple_to_string(output_lines, canonical=canonical),
end='')
else:
print(fasm.fasm_tuple_to_string(model, canonical=canonical), end='')
def main():
import argparse
parser = argparse.ArgumentParser(
description='Convert UltraScale/UltraScalePlus bit file to FASM.')
database_dir = os.getenv("URAY_DATABASE_DIR")
database = os.getenv("URAY_DATABASE")
db_root_kwargs = {}
if database_dir is None or database is None:
db_root_kwargs['required'] = True
else:
db_root_kwargs['required'] = False
db_root_kwargs['default'] = os.path.join(database_dir, database)
default_part = os.getenv("URAY_PART")
part_kwargs = {}
if default_part is None:
part_kwargs['required'] = True
else:
part_kwargs['required'] = False
part_kwargs['default'] = default_part
if os.getenv("URAY_TOOLS_DIR") is None:
default_bitread = 'bitread'
else:
default_bitread = os.path.join(os.getenv("URAY_TOOLS_DIR"), 'bitread')
if os.getenv("URAY_ARCH") is None:
default_arch = "UltraScale"
else:
default_arch = os.getenv("URAY_ARCH")
parser.add_argument('--db-root', help="Database root.", **db_root_kwargs)
parser.add_argument(
'--bits-file',
help="Output filename for bitread output, default is deleted tempfile.",
default=None)
parser.add_argument(
'--part', help="Name of part being targetted.", **part_kwargs)
parser.add_argument(
'--bitread',
help="Name of part being targetted.",
default=default_bitread)
parser.add_argument(
'--architecture',
help=
"Name of the device architecture family (e.g. UltraScale, Series7, etc.)",
default=default_arch)
parser.add_argument(
'--frame_range', help="Frame range to use with bitread.")
parser.add_argument('bit_file', help='')
parser.add_argument(
'--verbose',
help='Print lines for unknown tiles and bits',
action='store_true')
parser.add_argument(
'--canonical', help='Output canonical bitstream.', action='store_true')
parser.add_argument(
'--suppress_zero_features',
help='Supress zero features.',
action='store_true')
args = parser.parse_args()
with contextlib.ExitStack() as stack:
if args.bits_file:
bits_file = stack.enter_context(open(args.bits_file, 'wb'))
else:
bits_file = stack.enter_context(tempfile.NamedTemporaryFile())
bit_to_bits(
bitread=args.bitread,
part_yaml=os.path.join(args.db_root, args.part, "part.yaml"),
arch=args.architecture,
bit_file=args.bit_file,
bits_file=bits_file.name,
frame_range=args.frame_range,
)
bits_to_fasm(
db_root=args.db_root,
part=args.part,
bits_file=bits_file.name,
verbose=args.verbose,
canonical=args.canonical,
suppress_zero_features=args.suppress_zero_features)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4802059 | <reponame>SebasHernandezMD/AutomatO<filename>Automat_Script.py
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 12 21:42:45 2021
@author: SebastianHdz
Script para automatizar pipeline-analisis de WGS-TB:
En la carpeta debe tener 3 archivos en formato UNIX-UNIOCODE UTF8:
-> automat_script.py #Este programa
-> ls.txt #Lista de Samples (one per line, no other characters)
-> pipeline.txt #Pipeline con Sample en forma de ERRXXX
-> batch.sh #Archivo .sh contiene cada pipeline de cada sample
"""
print("AUTOMAT-O: script para batch de pipelines:")
print("\n")
#%%
#Se abre el archivo de lista de muestras como lectura (objeto ls)
#Se convierte el archivo en una lista python (list_raw)
#Se quita "\n" usando rstrip para lista [], mediante loop.
with open("ls.txt", "r") as ls:
list_raw=ls.readlines()
list_samples=[l.rstrip() for l in list_raw]
#%%
#Definimos el patron a cambiar en cada sample (ERRXXX), OJO REVISAR EL PIPELINE ANTES DE INICIAR.
#Abrimos el archivo pipeline y lo dejamos como string (pipeline) para poder leerlo
#Contamos en mumero de ERRXXX que se deben reemplezar en string para conteos finales.
pattern="ERRXXX"
import regex as re
with open("pipeline.txt", "r") as file_handle_1:
pipeline=file_handle_1.read()
result_patern=re.findall(pattern, pipeline, flags=0, pos=None, endpos=None, overlapped=True, ignore_unused=False)
patternCount=result_patern.count(pattern)
#%%
#Con este script, usando list_samples itineramos el reemplazo de cada
#elemento de la lista en el pipeline
#FOR DEBUGGIN ONLY, Copie y pegue el ejemplo de 04-FlowControl, aqui unos samples por si la lista es demasiado grande.
#samples = ['A12', 'A15', '2020-002-CH-CP1', '2020-003-CH-CP2', 'SRR1765879', 'SAMN02402666', 'ERR2206621', 'Sample_01', 'Sample_02']
import regex as re
for each_element in list_samples:
file_handle_1=open("pipeline.txt", "r+", encoding=('UTF-8'))
pipeline=file_handle_1.read()
new_pipeline=re.sub(pattern, each_element, pipeline, count=0)
with open("batch.sh", "a+", encoding=('UTF-8')) as file_handle_2:
file_handle_2.write(new_pipeline)
file_handle_2.close()
#%%
#Comprobaciones.
print("Esta es la lista de muestras a correr:", list_samples)
print("\n")
print("Este es el numero de muestras a procesar:", len(list_samples))
print("Este es el pattern a reemplazar:", pattern)
print("Este es el numero de", pattern, "a reemplezar en pipeline:", patternCount)
import regex as re
with open("batch.sh", "r+", encoding=('UTF-8')) as file_handle_2:
batch=file_handle_2.read()
count_samples=re.findall("#SAMPLE:", batch, flags=0, pos=None, endpos=None, overlapped=True, ignore_unused=False)
print("Este es el numero de muestras agregadas al batch:", len(count_samples))
if len(count_samples) == len(list_samples):
print("RESULTADO: Todo listo!")
else:
print("PILAS!! Algo salio mal! Seguramente has pegado varios pipeline seguidos, o el archivo batch.sh no estaba vacio! Borra el contenido de batch.sh y vuelve a intentar!!")
#CLAVE: Revisar que no se haya append varias veces al mismo pipeline.
#%%
#Mensaje al usuario
print("\n")
print("Recuerda para ejecutar el batch.sh:")
print("$ chmod a+x batch.sh #Activa como ejecutable el archivo.")
print("$ ./batch.sh #Ejecuta el pipeline.")
| StarcoderdataPython |
119256 | # coding:utf-8
#数据存储器
#对提取的数据进行整理和存储
#更新时间:2016/3/8
#创建时间:2016/3/2
#作者:debbie
import urllib2
import cookielib
class HtmlOutputer(object):
def __init__(self):
self.datas=[]
#存储得到的根页面数据
def collect_topic(self,topic_data):
if topic_data is None:
return
fout=open('data.dat','w')
for item in topic_data:
for x in item:
x=x.encode("utf8")
fout.write(x)
fout.write(' ')
fout.write('\n')
fout.close()
#存储得到的数据
def collect_data(self,comment_data,url):
if comment_data is None:
return
#self.datas.extend(data)
arr_url=url.split('/')
fname=arr_url[len(arr_url)-1]
#单个进行存储时,存储文件用URL的文件路径命名
fout=open('./data/'+fname+'.dat','w')
for item in comment_data:
for x in item:
x=x.encode("utf8")
fout.write(x)
fout.write(' ')
fout.write('\n')
fout.close()
#适用于不需要每个页面单独存储,而是爬取完成后汇总存储
def output_html(self):
fout=open('output.dat','w')
fout.close() | StarcoderdataPython |
3311283 | <reponame>lebedevsergey/poet-ex-machina<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2016 <NAME>
# Licensed under the Apache License, Version 2.0
import includes.utils as utils
class RhymesAndRitms:
# new algorithm that works with word syllables
@staticmethod
def getRhymedEnd(rhymedWord, rhymedWordAccentedSyllOffset):
syllables = utils.Utils.getWordSyllables(rhymedWord)
accentedSyllNum = len(syllables) - rhymedWordAccentedSyllOffset
s = ''
for i, syl in reversed(list(enumerate(syllables))):
if i + 1 != accentedSyllNum:
s = syl + s
else:
j = len(syl) - 1
for c in reversed(syl):
s = c + s
if utils.Utils.isVowel(c):
break
j = j - 1
if len(syllables) == accentedSyllNum and j > 0:
s = syl[j-1] + s
break
return s
# old algorithm taken from the original ASm rpogram
@staticmethod
def getRhymedEnd_OldAlgorithm(rhymedWord, rhymedWordAccentedSyllOffset):
s = ''
j = len(rhymedWord) - 1
for i in range(0, rhymedWordAccentedSyllOffset + 1):
while (not utils.Utils.isVowel(rhymedWord[j])) and j >= 0:
s = rhymedWord[j] + s
j = j - 1
while (utils.Utils.isVowel(rhymedWord[j])) and j >= 0:
s = rhymedWord[j] + s
j = j - 1
if rhymedWordAccentedSyllOffset > 0: # if not masculine rhyme then add syllable
while (not utils.Utils.isVowel(rhymedWord[j])) and j >= 0:
s = rhymedWord[j] + s
j = j - 1
return s
@staticmethod
def isRhyme(wordInfo, rhymedWordInfo):
rhymedWordAccentedSyllOffset = rhymedWordInfo['sylNum'] - rhymedWordInfo['accentSylNum']
rhymedEnd1 = RhymesAndRitms.getRhymedEnd(rhymedWordInfo['word'], rhymedWordAccentedSyllOffset)
wordAccentedSyllOffset = wordInfo['sylNum'] - wordInfo['accentSylNum']
rhymedEnd2 = RhymesAndRitms.getRhymedEnd(wordInfo['word'], wordAccentedSyllOffset)
if not rhymedEnd1 or not rhymedEnd2:
return False
# if len(rhymedEnd1) != len(rhymedEnd2) and not (rhymedWordAccentedSyllOffset == 0 and (len(rhymedEnd1) == 1 or len(rhymedEnd2) == 1)):
if len(rhymedEnd1) != len(rhymedEnd2):
return False
j = len(rhymedEnd2)-1
for i in range(len(rhymedEnd1)-1, -1, -1):
if j < 0:
return False
c1 = rhymedEnd1[i]
c2 = rhymedEnd2[j]
consonant = utils.Utils.getConsonant(c2)
if c1 != c2 and c1 != consonant:
return False
j = j - 1
return True
#checks if word meets verse line rhythm
@staticmethod
def RITM_li(wordInfo, prevWordInfo, verseLineTemplate, curTemplateSymlIndex): #;6) Проверить совпадение ритма (такт и максимальное #; print db 13,10,'RITM:',0
sylNum = wordInfo['sylNum']
if prevWordInfo:
prevSylNum = prevWordInfo['sylNum']
else:
prevSylNum = 0
if prevSylNum + sylNum <= 2 and prevSylNum > 0:
return False
if curTemplateSymlIndex + 1 - sylNum < 0: #;А влезает ли в строку?
return False
if sylNum == 1:
return True
accentedSyllOffset = sylNum - wordInfo['accentSylNum']
if (curTemplateSymlIndex - accentedSyllOffset) < 0:
return False
if verseLineTemplate[curTemplateSymlIndex - accentedSyllOffset] != '+':
return False
return True | StarcoderdataPython |
174792 | <reponame>williamfzc/sepmachine
from sepmachine.pipeline.base import BasePipeline
| StarcoderdataPython |
3204958 | <reponame>openstack/airship-armada<gh_stars>10-100
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from armada import const
from armada.exceptions import manifest_exceptions
from armada.handlers import helm
from armada.handlers import wait
from armada.tests.unit import base
test_chart = {'wait': {'timeout': 10, 'native': {'enabled': False}}}
class ChartWaitTestCase(base.ArmadaTestCase):
def get_unit(self, chart_data, timeout=None, version=2):
chart = {
'schema': 'armada/Chart/v{}'.format(str(version)),
'metadata': {
'name': 'test'
},
const.KEYWORD_DATA: chart_data
}
return wait.ChartWait(
k8s=mock.MagicMock(),
release_id=helm.HelmReleaseId('test', 'test-test'),
chart=chart,
k8s_wait_attempts=1,
k8s_wait_attempt_sleep=1,
timeout=timeout)
def test_get_timeout(self):
unit = self.get_unit({'timeout': 5, 'wait': {'timeout': 10}})
self.assertEquals(unit.get_timeout(), 10)
def test_get_timeout_default(self):
unit = self.get_unit({})
self.assertEquals(unit.get_timeout(), const.DEFAULT_CHART_TIMEOUT)
def test_get_timeout_override(self):
unit = self.get_unit(
timeout=20, chart_data={
'timeout': 5,
'wait': {
'timeout': 10
}
})
self.assertEquals(unit.get_timeout(), 20)
def test_get_timeout_deprecated(self):
unit = self.get_unit({'timeout': 5})
self.assertEquals(unit.get_timeout(), 5)
def test_is_native_enabled_default_false(self):
unit = self.get_unit({})
self.assertEquals(unit.is_native_enabled(), False)
def test_is_native_enabled_true(self):
unit = self.get_unit({'wait': {'native': {'enabled': True}}})
self.assertEquals(unit.is_native_enabled(), True)
def test_is_native_enabled_false(self):
unit = self.get_unit({'wait': {'native': {'enabled': False}}})
self.assertEquals(unit.is_native_enabled(), False)
def test_waits_init(self):
unit = self.get_unit({
'wait': {
'resources': [{
'type': 'pod',
'labels': {
'foo': 'bar'
}
}, {
'type': 'job',
'labels': {
'foo': 'bar'
}
}, {
'type': 'daemonset',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}, {
'type': 'deployment',
'labels': {
'foo': 'bar'
},
'min_ready': '50%'
}, {
'type': 'statefulset',
'labels': {
'foo': 'bar'
}
}]
}
}) # yapf: disable
self.assertEqual(5, len(unit.waits))
self.assertIsInstance(unit.waits[0], wait.PodWait)
self.assertIsInstance(unit.waits[1], wait.JobWait)
self.assertIsInstance(unit.waits[2], wait.DaemonSetWait)
self.assertIsInstance(unit.waits[3], wait.DeploymentWait)
self.assertIsInstance(unit.waits[4], wait.StatefulSetWait)
def test_waits_init_min_ready_fails_if_not_controller(self):
def create_pod_wait_min_ready():
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'pod',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(
manifest_exceptions.ManifestException, create_pod_wait_min_ready)
def create_job_wait_min_ready():
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'job',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(
manifest_exceptions.ManifestException, create_job_wait_min_ready)
def test_waits_init_invalid_type(self):
def create_with_invalid_type():
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'invalid',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(
manifest_exceptions.ManifestException, create_with_invalid_type)
@mock.patch.object(wait.ChartWait, 'get_resource_wait')
def test_wait(self, get_resource_wait):
def return_mock(*args, **kwargs):
return mock.MagicMock()
get_resource_wait.side_effect = return_mock
unit = self.get_unit(
{'wait': {
'resources': [{
'type': 'foo'
}, {
'type': 'bar'
}]
}})
unit.wait(10)
self.assertEqual(2, len(unit.waits))
for w in unit.waits:
w.wait.assert_called_once()
class PodWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels, version=2):
return wait.PodWait(
resource_type='pod',
chart_wait=ChartWaitTestCase.get_unit(None, {}, version=version),
labels=labels)
def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock()
resource.metadata.annotations = annotations
resource.metadata.owner_references = owner_references
return resource
test_pods = [
mock_resource({
'key': 'value',
'helm.sh/hook': 'test'
}),
mock_resource({'helm.sh/hook': 'test-success'}),
mock_resource({'helm.sh/hook': 'test-failure'}),
mock_resource({'helm.sh/hook': 'test,pre-install'}),
]
job_pods = [
mock_resource(owner_references=[mock.Mock(kind='Job')]),
mock_resource(
owner_references=[
mock.Mock(kind='NotAJob'),
mock.Mock(kind='Job')
])
]
included_pods = [
mock_resource(),
mock_resource(owner_references=[]),
mock_resource({'helm.sh/hook': 'pre-install'}),
mock_resource({'key': 'value'}),
mock_resource(owner_references=[mock.Mock(kind='NotAJob')]),
]
evicted_pods = [
mock.Mock(
metadata=mock.Mock(annotations={}, owner_references=None),
status=mock.Mock(phase='Evicted')),
]
unit = self.get_unit({}, version=1)
# Validate test pods excluded
for pod in test_pods:
self.assertFalse(unit.include_resource(pod))
# Validate test pods excluded
for pod in job_pods:
self.assertFalse(unit.include_resource(pod))
# Validate other resources included
for pod in included_pods:
self.assertTrue(unit.include_resource(pod))
# Validate evicted pods are excluded
for pod in evicted_pods:
self.assertFalse(unit.include_resource(pod))
class JobWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels):
return wait.JobWait(
resource_type='job', chart_wait=mock.MagicMock(), labels=labels)
def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock()
resource.metadata.annotations = annotations
resource.metadata.owner_references = owner_references
return resource
cronjob_jobs = [
mock_resource(owner_references=[mock.Mock(kind='CronJob')]),
mock_resource(
owner_references=[
mock.Mock(kind='NotACronJob'),
mock.Mock(kind='CronJob')
])
]
included_jobs = [
mock_resource(),
mock_resource(owner_references=[]),
mock_resource(owner_references=[mock.Mock(kind='NotAJob')])
]
unit = self.get_unit({})
# Validate test pods excluded
for job in cronjob_jobs:
self.assertFalse(unit.include_resource(job))
# Validate other resources included
for job in included_jobs:
self.assertTrue(unit.include_resource(job))
| StarcoderdataPython |
3208051 | # Copyright 2020 Soil, Inc.
import eventlet
from pyVmomi import vim
from soil.api.utils.common import sizeof_fmt
from soil.api.utils.vmware import vCenterPropertyCollector
class ViewBuilder(object):
_collection_name = "vcenter"
def __init__(self):
super(ViewBuilder, self).__init__()
def _detail(self, request, vcenter):
if vcenter is None:
return {"vcenter": {}}
# summary = self._summary(vcenter)
vcenter_ref = {
"vcenter": {
'id': vcenter.get('id'),
'uuid': vcenter.get('uuid'),
'name': vcenter.get('name'),
'type': vcenter.get('type'),
'host': vcenter.get('host'),
'port': vcenter.get('port'),
'status': vcenter.get('status'),
'created_at': vcenter.get('created_at'),
'updated_at': vcenter.get('updated_at'),
# 'summary': summary
}
}
return vcenter_ref
def _list(self, request, vcenters):
if not vcenters:
return {"vcenters": []}
vcenters_list = []
# the pile acts as a collection of return values from the functions
# if any exceptions are raised by the function they'll get raised here
pile = eventlet.GreenPile(len(vcenters))
for vcenter in vcenters:
pile.spawn(self._detail, request, vcenter)
for result in pile:
try:
vcenter = result['vcenter']
vcenters_list.append(vcenter)
except KeyError:
pass
return {"vcenters": vcenters_list}
# backend private method
def _summary(self, vcenter):
summary_ref = {
'version': '',
'hostname': '',
'numVms': 0,
'numTemplates': 0,
'numHosts': 0,
'numEffectiveHosts': 0,
'numCpus': 0,
'totalCpuMhz': 0,
'totalMemory': 0,
'numCpuCores': 0,
'numCpuThreads': 0,
'effectiveCpuMhz': 0,
'effectiveMemory': 0,
'dataStore': 0
}
object_type = []
properties = {
'ComputeResource': ['summary'],
'HostSystem': ['hardware.cpuInfo.numCpuPackages'],
'Datastore': ['summary.capacity'],
'VirtualMachine': ['config.template', 'guest'],
}
with vCenterPropertyCollector(vcenter, object_type, properties) as result:
for key, value in result.items():
if isinstance(key, str):
about = value.about
summary_ref['version'] = ' '.join([about.apiVersion, about.build])
continue
if isinstance(key, vim.ComputeResource):
summary = value['summary']
summary_ref['numHosts'] += summary.numHosts
summary_ref['numEffectiveHosts'] += summary.numEffectiveHosts
summary_ref['totalCpuMhz'] += summary.totalCpu
summary_ref['totalMemory'] += summary.totalMemory
summary_ref['numCpuCores'] += summary.numCpuCores
summary_ref['numCpuThreads'] += summary.numCpuThreads
summary_ref['effectiveCpuMhz'] += summary.effectiveCpu
summary_ref['effectiveMemory'] += summary.effectiveMemory
continue
if isinstance(key, vim.VirtualMachine):
template = value.get('config.template', False)
guest = value['guest']
if template:
summary_ref['numTemplates'] += 1
else:
summary_ref['numVms'] += 1
ip_address = [net.ipAddress for net in guest.net if hasattr(net, 'ipAddress')]
for ip in ip_address:
if vcenter.host in ip:
summary_ref['hostname'] = guest.hostName
continue
if isinstance(key, vim.HostSystem):
numCpuPackages = value['hardware.cpuInfo.numCpuPackages']
summary_ref['numCpus'] += numCpuPackages
continue
if isinstance(key, vim.Datastore):
capacity = value['summary.capacity']
summary_ref['dataStore'] += capacity
summary_ref['totalMemory'] = sizeof_fmt(summary_ref['totalMemory'])
summary_ref['effectiveMemory'] = sizeof_fmt(summary_ref['effectiveMemory'])
summary_ref['dataStore'] = sizeof_fmt(summary_ref['dataStore'])
return summary_ref
| StarcoderdataPython |
149185 | <reponame>azuline/cryptopals
"""
Detect single-character XOR
"""
import sys # isort:skip
from pathlib import Path # isort:skip
sys.path.append(str(Path(__file__).parent.resolve().parent))
from itertools import chain
from set1.c03 import get_options, select_option
DATA_PATH = Path(__file__).parent / "data" / "04.txt"
if __name__ == "__main__":
with DATA_PATH.open("r") as fp:
strings = [bytes.fromhex(line.strip()) for line in fp]
options = chain(*(get_options(str_) for str_ in strings))
selected = select_option(options)
print(f"Selected: {selected.option}")
| StarcoderdataPython |
3337513 | <filename>webapp/migrations/0008_auto_20191016_1649.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-16 14:49
from __future__ import unicode_literals
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0007_profile'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=tinymce.models.HTMLField(),
),
]
| StarcoderdataPython |
1644086 | import os
import sys
import argparse
from src.agent import Agent
from src.utils import register_model_args
import tensorflow as tf
def main():
parser = argparse.ArgumentParser(description='LUBAN runner')
register_model_args(parser)
params, unparsed = parser.parse_known_args(sys.argv)
sess = tf.Session()
agent = Agent(sess ,params)
agent.train(checkpoint_dir="./checkpoint", data_dir='./data/dataset-50-3-2.hdf5')
if __name__ == '__main__':
main()
| StarcoderdataPython |
165924 | #!/usr/bin/env python
import os
import sys
from twit.wsgi import application
WSGI_APPLICATION = 'twit.wsgi.application'
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twit.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| StarcoderdataPython |
3384491 | import random
from requests import Response
from starlette import status
from app.main import ITEMS, HELLO_WORLD
def test_root(test_client):
response: Response = test_client.get("/")
assert response.status_code == status.HTTP_200_OK
assert response.json() == HELLO_WORLD
def test_items(test_client):
key, value = random.choice(list(ITEMS.items()))
response: Response = test_client.get(f"/items/{key}")
assert response.status_code == status.HTTP_200_OK
assert response.json() == value
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.