hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acffd6761384a301287008fa357d0255432a4326
| 1,187
|
py
|
Python
|
Mini-xtreme-2021/Answers/q8.py
|
bimalka98/Data-Structures-and-Algorithms
|
7d358a816561551a5b04af8de378141becda42bd
|
[
"MIT"
] | 1
|
2021-03-21T07:54:08.000Z
|
2021-03-21T07:54:08.000Z
|
Mini-xtreme-2021/Answers/q8.py
|
bimalka98/Data-Structures-and-Algorithms
|
7d358a816561551a5b04af8de378141becda42bd
|
[
"MIT"
] | null | null | null |
Mini-xtreme-2021/Answers/q8.py
|
bimalka98/Data-Structures-and-Algorithms
|
7d358a816561551a5b04af8de378141becda42bd
|
[
"MIT"
] | null | null | null |
def findServerCount(timeLst):
servers = []
for i in range(len(timeLst)-1):
found = False
for serverIndex in range(len(servers)):
if(servers[serverIndex] <= timeLst[i][:4]):
servers[serverIndex] = getFreeTime(timeLst[i][-4:])
found = True
break
if(not found):
servers.append(getFreeTime(timeLst[i][-4:]))
print(len(servers))
def getFreeTime(time):
newMinute = time[-2:]
newHour = time[:2]
if(int(time[-2:]) >= 55):
newMinute = (int(time[-2:])+5)%60
newHour = int(time[:2])+1
else:
newMinute = (int(time[-2:])+5)
newMinute = "0"*(2-len(str(newMinute)))+ str(newMinute)
newHour = "0"*(2-len(str(newHour))) + str(newHour)
return newHour+newMinute
def removeWhiteSpace(time):
filteredTime = ""
for letter in time:
if(ord(letter)!= 32):
filteredTime += letter
return filteredTime
n = int(input())
timeLst = [];
for i in range(n):
timeRange = input()
timeRange.replace(" ","")
timeLst.append(removeWhiteSpace(timeRange).strip())
timeLst.sort()
findServerCount(timeLst)
| 26.377778
| 67
| 0.566976
|
acffd70c595a52abee301c689c5e7e98ae59e70d
| 6,125
|
py
|
Python
|
pypy/translator/jvm/builtin.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/translator/jvm/builtin.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | null | null | null |
pypy/translator/jvm/builtin.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
from pypy.translator.jvm import typesystem as jvmtype
from pypy.translator.jvm import generator as jvmgen
from pypy.rpython.ootypesystem import ootype
from pypy.translator.jvm.typesystem import \
jInt, jVoid, jStringBuilder, jString, jPyPy, jChar, jArrayList, jObject, \
jBool, jHashMap, jPyPyDictItemsIterator, Generifier, jCharSequence, \
jPyPyCustomDict
# ______________________________________________________________________
# Mapping of built-in OOTypes to JVM types
class JvmBuiltInType(jvmtype.JvmClassType):
"""
Represents built-in types to JVM. May optionally be associated
with an OOTYPE; if it is, then we will support lookup of the OOTYPE
methods and will re-map them as needed to the JVM equivalents.
"""
def __init__(self, db, classty, OOTYPE):
jvmtype.JvmClassType.__init__(self, classty.name)
self.db = db
self.OOTYPE = OOTYPE
self.gen = Generifier(OOTYPE)
def __eq__(self, other):
return isinstance(other, JvmBuiltInType) and other.name == self.name
def __hash__(self):
return hash(self.name)
def lookup_field(self, fieldnm):
""" Given a field name, returns a jvmgen.Field object """
_, FIELDTY = self.OOTYPE._lookup_field(fieldnm)
jfieldty = self.db.lltype_to_cts(FIELDTY)
return jvmgen.Field(
self.descriptor.class_name(), fieldnm, jfieldty, False)
def lookup_method(self, methodnm):
""" Given the method name, returns a jvmgen.Method object """
# Look for a shortcut method in our table of remappings:
try:
key = (self.OOTYPE.__class__, methodnm)
return built_in_methods[key]
except KeyError: pass
# Otherwise, determine the Method object automagically
# First, map the OOTYPE arguments and results to
# the java types they will be at runtime. Note that
# we must use the erased types for this.
ARGS, RESULT = self.gen.erased_types(methodnm)
jargtypes = [self.db.lltype_to_cts(P) for P in ARGS]
jrettype = self.db.lltype_to_cts(RESULT)
if self.OOTYPE.__class__ in bridged_objects:
# Bridged objects are ones where we have written a java class
# that has methods with the correct names and types already
return jvmgen.Method.v(self, methodnm, jargtypes, jrettype)
else:
# By default, we assume it is a static method on the PyPy
# object, that takes an instance of this object as the first
# argument. The other arguments we just convert to java versions,
# except for generics.
jargtypes = [self] + jargtypes
return jvmgen.Method.s(jPyPy, methodnm, jargtypes, jrettype)
# When we lookup a method on a BuiltInClassNode, we first check the
# 'built_in_methods' and 'bridged_objects' tables. This allows us to
# redirect to other methods if we like.
bridged_objects = (
ootype.DictItemsIterator,
ootype.WeakReference.__class__
)
built_in_methods = {
# Note: String and StringBuilder are rebound in ootype, and thus
# .__class__ is required
(ootype.StringBuilder.__class__, "ll_allocate"):
jvmgen.Method.v(jStringBuilder, "ensureCapacity", (jInt,), jVoid),
(ootype.StringBuilder.__class__, "ll_build"):
jvmgen.Method.v(jStringBuilder, "toString", (), jString),
(ootype.String.__class__, "ll_streq"):
jvmgen.Method.v(jString, "equals", (jObject,), jBool),
(ootype.String.__class__, "ll_strlen"):
jvmgen.Method.v(jString, "length", (), jInt),
(ootype.String.__class__, "ll_stritem_nonneg"):
jvmgen.Method.v(jString, "charAt", (jInt,), jChar),
(ootype.String.__class__, "ll_startswith"):
jvmgen.Method.v(jString, "startsWith", (jString,), jBool),
(ootype.String.__class__, "ll_endswith"):
jvmgen.Method.v(jString, "endsWith", (jString,), jBool),
(ootype.String.__class__, "ll_strcmp"):
jvmgen.Method.v(jString, "compareTo", (jString,), jInt),
(ootype.String.__class__, "ll_upper"):
jvmgen.Method.v(jString, "toUpperCase", (), jString),
(ootype.String.__class__, "ll_lower"):
jvmgen.Method.v(jString, "toLowerCase", (), jString),
(ootype.String.__class__, "ll_replace_chr_chr"):
jvmgen.Method.v(jString, "replace", (jChar, jChar), jString),
(ootype.Dict, "ll_set"):
jvmgen.Method.v(jHashMap, "put", (jObject, jObject), jObject),
(ootype.Dict, "ll_get"):
jvmgen.Method.v(jHashMap, "get", (jObject,), jObject),
(ootype.Dict, "ll_contains"):
jvmgen.Method.v(jHashMap, "containsKey", (jObject,), jBool),
(ootype.Dict, "ll_length"):
jvmgen.Method.v(jHashMap, "size", (), jInt),
(ootype.Dict, "ll_clear"):
jvmgen.Method.v(jHashMap, "clear", (), jVoid),
(ootype.CustomDict, "ll_set"):
jvmgen.Method.v(jPyPyCustomDict, "put", (jObject, jObject), jObject),
(ootype.CustomDict, "ll_get"):
jvmgen.Method.v(jPyPyCustomDict, "get", (jObject,), jObject),
(ootype.CustomDict, "ll_contains"):
jvmgen.Method.v(jPyPyCustomDict, "containsKey", (jObject,), jBool),
(ootype.CustomDict, "ll_length"):
jvmgen.Method.v(jPyPyCustomDict, "size", (), jInt),
(ootype.CustomDict, "ll_clear"):
jvmgen.Method.v(jPyPyCustomDict, "clear", (), jVoid),
(ootype.List, "ll_length"):
jvmgen.Method.v(jArrayList, "size", (), jInt),
(ootype.List, "ll_getitem_fast"):
jvmgen.Method.v(jArrayList, "get", (jInt,), jObject),
}
# ootype.String[Builder] and ootype.Unicode[Builder] are mapped to the
# same JVM type, so we reuse the same builtin methods also for them
def add_unicode_methods():
mapping = {
ootype.String.__class__: ootype.Unicode.__class__,
ootype.StringBuilder.__class__: ootype.UnicodeBuilder.__class__
}
for (TYPE, name), value in built_in_methods.items():
if TYPE in mapping:
TYPE = mapping[TYPE]
built_in_methods[TYPE, name] = value
add_unicode_methods()
del add_unicode_methods
| 36.676647
| 79
| 0.666776
|
acffd799786e676432b6bb96821da3c5941eb7fb
| 7,036
|
py
|
Python
|
talon_one/models/new_audience.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2021-03-05T06:41:26.000Z
|
2021-03-05T06:41:26.000Z
|
talon_one/models/new_audience.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2021-09-07T08:56:58.000Z
|
2021-09-07T08:56:58.000Z
|
talon_one/models/new_audience.py
|
talon-one/talon_one.py
|
f863bb3c2cc5ddc94d9227adcf14947b2ea7db41
|
[
"MIT"
] | 1
|
2019-05-21T10:27:54.000Z
|
2019-05-21T10:27:54.000Z
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class NewAudience(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'integration': 'str',
'integration_id': 'str'
}
attribute_map = {
'name': 'name',
'integration': 'integration',
'integration_id': 'integrationId'
}
def __init__(self, name=None, integration=None, integration_id=None, local_vars_configuration=None): # noqa: E501
"""NewAudience - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._integration = None
self._integration_id = None
self.discriminator = None
self.name = name
self.integration = integration
self.integration_id = integration_id
@property
def name(self):
"""Gets the name of this NewAudience. # noqa: E501
The human-friendly display name for this Audience. # noqa: E501
:return: The name of this NewAudience. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this NewAudience.
The human-friendly display name for this Audience. # noqa: E501
:param name: The name of this NewAudience. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def integration(self):
"""Gets the integration of this NewAudience. # noqa: E501
Integration that this audience was created in. # noqa: E501
:return: The integration of this NewAudience. # noqa: E501
:rtype: str
"""
return self._integration
@integration.setter
def integration(self, integration):
"""Sets the integration of this NewAudience.
Integration that this audience was created in. # noqa: E501
:param integration: The integration of this NewAudience. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and integration is None: # noqa: E501
raise ValueError("Invalid value for `integration`, must not be `None`") # noqa: E501
allowed_values = ["mparticle"] # noqa: E501
if self.local_vars_configuration.client_side_validation and integration not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `integration` ({0}), must be one of {1}" # noqa: E501
.format(integration, allowed_values)
)
self._integration = integration
@property
def integration_id(self):
"""Gets the integration_id of this NewAudience. # noqa: E501
The ID of this Audience in the third-party integration # noqa: E501
:return: The integration_id of this NewAudience. # noqa: E501
:rtype: str
"""
return self._integration_id
@integration_id.setter
def integration_id(self, integration_id):
"""Sets the integration_id of this NewAudience.
The ID of this Audience in the third-party integration # noqa: E501
:param integration_id: The integration_id of this NewAudience. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and integration_id is None: # noqa: E501
raise ValueError("Invalid value for `integration_id`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
integration_id is not None and len(integration_id) < 1):
raise ValueError("Invalid value for `integration_id`, length must be greater than or equal to `1`") # noqa: E501
self._integration_id = integration_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NewAudience):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NewAudience):
return True
return self.to_dict() != other.to_dict()
| 36.268041
| 647
| 0.622797
|
acffd84d1925e928e54858599dc37372336576d7
| 37,138
|
py
|
Python
|
tensorflow_probability/python/experimental/nn/util/convolution_util.py
|
chrism0dwk/probability
|
ab260f15cae94c6802c2f2769fb448ad213b79cd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/nn/util/convolution_util.py
|
chrism0dwk/probability
|
ab260f15cae94c6802c2f2769fb448ad213b79cd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/nn/util/convolution_util.py
|
chrism0dwk/probability
|
ab260f15cae94c6802c2f2769fb448ad213b79cd
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions for framing `conv` as `matmul`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.nn.util import utils
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
__all__ = [
'im2row',
'im2row_index',
'make_convolution_fn',
'make_convolution_transpose_fn_with_dilation',
'make_convolution_transpose_fn_with_subkernels',
'make_convolution_transpose_fn_with_subkernels_matrix',
]
def im2row(x,
block_shape,
slice_step=(1, 1),
padding='VALID',
name=None):
"""Rearrange image blocks into rows.
This function can be used to implement 2D convolution as a `matmul`, e.g.,
`tf.nn.conv2d(x, k) = tf.matmul(
tf.experimental.nn.util.im2row(x), tf.reshape(k, shape=[-1, out_size]))`.
Args:
x: Rank 3 (or more) Tensor representing 2D images.
block_shape: Length-2 vector representing the block or "filter" shape.
slice_step: Length-2 vector specifying the convolution stride length.
Default value: `(1, 1)`.
padding: One of `'VALID'` or `'SAME'` (case insensitive).
Default value: `'VALID'`.
name: Python `str` used to describe ops created by this function.
Default value: `None` (i.e., `'im2col'`).
Returns:
im2row_x: batch of matrices representing subblock copies of `x`.
Same batch shape as `x` but with rightmost shape:
`batch_shape + [oh * ow, block_shape[0] * block_shape[1] * channels]`,
where `oh = (h - block_shape[0] + 1) // slice_step[0]` and
`ow = (w - block_shape[1] + 1) // slice_step[1]` when `padding = 'VALID'`
and `oh = h` and `ow = w` when `padding = 'SAME'`.
shape: shape `Tensor` equivalent to:
`batch_shape + [oh, ow, block_shape[0] * block_shape[1] * channels]` where
`oh, ow` are defined as above.
"""
with tf.name_scope(name or 'im2row'):
padding = _validate_padding(padding)
if padding == 'VALID':
pass # Do nothing.
elif padding == 'SAME':
raise NotImplementedError(
'Argument padding="SAME" not implemented.')
# TODO(jvdillon): See if the following works:
# fh, fw = block_shape
# o = 1 if data_format == 'NHWC' else 0
# n = ps.maximum(0, ps.rank(x) - 3)
# paddings = ps.pad(
# [[0, fh - 1], [0, fw - 1]],
# paddings=[[n + 1 - o, o], [0, 0]],
# constant_values=0)
# x = tf.pad(x, paddings=paddings, constant_values=0)
# padding = 'VALID'
else:
assert False # Can't be here.
x_shape = ps.shape(x)
idx, s = im2row_index(
x_shape, block_shape=block_shape, slice_step=slice_step)
flat_shape = ps.pad(
x_shape[:-3], paddings=[[0, 1]], constant_values=-1)
x = tf.gather(tf.reshape(x, flat_shape), idx, axis=-1) # == np.take
return tf.reshape(x, s)
def im2row_index(input_shape,
block_shape,
rank=2,
slice_step=(1, 1),
dilations=(1, 1),
dtype=tf.int32,
transpose=False,
validate_args=False,
name=None):
"""Computes indexes into a flattened image for building `im2row`."""
with tf.name_scope(name or 'im2row_index'):
if tf.get_static_value(rank) != 2:
raise NotImplementedError('Argument `rank` currently only supports `2`; '
'saw "{}".'.format(rank))
fh, fw = prepare_tuple_argument(
block_shape, n=rank, arg_name='block_shape',
validate_args=validate_args)
sh, sw = prepare_tuple_argument(
slice_step, n=rank, arg_name='slice_step', validate_args=validate_args)
dh, dw = prepare_tuple_argument(
dilations, n=rank, arg_name='dilations', validate_args=validate_args)
# 1) Process input arguments.
batch_shape, h, w, c = ps.split(
ps.reshape(ps.cast(input_shape, dtype=dtype), shape=[-1]),
num_or_size_splits=[-1, 1, 1, 1])
h, w, c = h[0], w[0], c[0]
tot_fh = dh * (fh - 1) + 1
tot_fw = dw * (fw - 1) + 1
# 2) Assemble all block start positions as indexes into the flattened image.
# start_idx.shape = [fh, fw, c]
if transpose:
last_element = lambda size, step: size - (size - 1) % step - 1
w_step = c * dw
h_step = c * w * dh
last_w = last_element(c * tot_fw, w_step)
last_h = last_element(c * w * tot_fh, h_step)
start_idx = cartesian_add([
ps.range(last_h, -1, delta=-h_step, dtype=dtype),
ps.range(last_w, -1, delta=-w_step, dtype=dtype),
ps.range(c, delta=1, dtype=dtype),
])
else:
start_idx = cartesian_add([
ps.range(c * w * tot_fh, delta=c * w * dh, dtype=dtype),
ps.range(c * tot_fw, delta=c * dw, dtype=dtype),
ps.range(c, delta=1, dtype=dtype),
])
# 3) Assemble all block offsets (into flattened image).
eh = h - tot_fh + 1
ew = w - tot_fw + 1
offset_idx = cartesian_add([
ps.range(w * eh, delta=w * sh, dtype=dtype),
ps.range(ew, delta=sw, dtype=dtype),
])
offset_idx = offset_idx * c
oh = (eh - 1) // sh + 1 # out height
ow = (ew - 1) // sw + 1 # out width
# 4) Combine block start/offset pairs.
# shape = [(eh // sh) * (ew // sw), fh * fw * c]
idx = cartesian_add([offset_idx, start_idx])
new_shape = ps.concat(
[batch_shape, ps.convert_to_shape_tensor([oh, ow, fh * fw * c])],
axis=0)
return idx, new_shape
def cartesian_add(xs):
"""Adds a list of vectors by cumulatively expanding a dimension."""
return sum(ps.reshape(x, shape=[-1] + [1] * (len(xs) - 1 - i))
for i, x in enumerate(xs))
def _validate_padding(padding):
"""Verify correctness of `padding` argument."""
padding_ = str(padding).upper()
if padding_ in {'SAME', 'VALID'}:
return padding_
raise ValueError(
'Argument padding="{}" not recognized; must be one of '
'{{"VALID", "SAME"}} (case insensitive).'.format(padding))
# TODO(emilyaf): Finish docstrings.
def make_convolution_fn(
filter_shape, rank, strides, padding, dilations=None, dtype=tf.int32,
validate_args=False, name=None):
"""Like `tf.nn.conv2d` except applies batch of kernels to batch of `x`."""
with tf.name_scope(name or 'conv2d'):
if tf.get_static_value(rank) != 2:
raise NotImplementedError('Argument `rank` currently only supports `2`; '
'saw "{}".'.format(rank))
[
filter_shape,
rank,
strides,
padding,
dilations,
] = prepare_conv_args(
filter_shape, rank=rank, strides=strides, padding=padding,
dilations=dilations, validate_args=validate_args)
def op(x, kernel):
input_dtype = dtype_util.common_dtype([x, kernel], dtype_hint=tf.float32)
x = tf.convert_to_tensor(x, dtype=input_dtype, name='x')
kernel = tf.convert_to_tensor(kernel, dtype=input_dtype, name='kernel')
batch_shape, event_shape = ps.split(
ps.shape(x), num_or_size_splits=[-1, 3])
xh, xw, c_in = ps.unstack(event_shape, num=3)
fh, fw = filter_shape
assertions = _maybe_validate_input_shapes(
ps.shape(kernel), channels_in=c_in, filter_height=fh,
filter_width=fw, validate_args=validate_args)
with tf.control_dependencies(assertions):
if tf.get_static_value(ps.rank(kernel)) == 2:
flat_x = tf.reshape(x, shape=ps.concat([[-1], event_shape], axis=0))
flat_y = tf.nn.conv2d(
x,
filters=tf.reshape(kernel, shape=[fh, fw, c_in, -1]),
strides=strides,
padding=padding,
data_format='NHWC',
dilations=dilations)
output_shape = ps.shape(flat_y)[-3:]
return tf.reshape(
flat_y, shape=ps.concat([batch_shape, output_shape], axis=0))
pad_values = [
_get_conv_padding(
xdim, filter_dim=k, stride=s, dilation=d, padding=padding)
for (xdim, k, s, d) in zip((xh, xw), filter_shape, strides, dilations)
]
idx, shape = im2row_index(
(xh + sum(pad_values[0]), xw + sum(pad_values[1]), c_in),
block_shape=filter_shape, slice_step=strides, dilations=dilations,
dtype=dtype)
if padding == 'SAME':
n = ps.maximum(0, ps.rank(x) - 3)
paddings = ps.pad(
pad_values, paddings=[[n, 1], [0, 0]], constant_values=0)
x = tf.pad(x, paddings=paddings, constant_values=0)
flat_shape = ps.pad(
batch_shape, paddings=[[0, 1]], constant_values=-1)
flat_x = tf.gather(tf.reshape(x, shape=flat_shape), indices=idx, axis=-1)
im_x = tf.reshape(flat_x, shape=ps.concat([batch_shape, shape], axis=0))
return tf.matmul(im_x, kernel[..., tf.newaxis, :, :])
return op
def _get_conv_padding(xdim, filter_dim, stride, dilation, padding):
"""Returns the number of zeros to pad at the start and end of an axis."""
if padding == 'VALID':
return (0, 0)
elif padding == 'SAME':
tot_k = dilation * (filter_dim - 1) + 1
tot_pad = tf.maximum(tot_k - ((xdim - 1) % stride + 1), 0)
pad_start = tot_pad // 2
return pad_start, tot_pad - pad_start
def make_convolution_transpose_fn_with_dilation(
filter_shape, strides, padding, rank=2, dilations=None, dtype=tf.int32,
validate_args=False, name=None):
"""Like `tf.nn.conv2d` except applies batch of kernels to batch of `x`.
This version tends to be fastest on GPU. It implements the transposed
convolution as a regular convolution of an image that is dilated by
interleaving rows and columns of zeros equal to the number of strides.
Args:
filter_shape: ...
strides: ...
padding: ...
rank: ...
dilations: ...
dtype: ...
validate_args: ...
name: ...
Returns:
convolution_transpose_fn: A callable that takes an input `Tensor` and kernel
and applies the transpose convolution operation.
"""
with tf.name_scope(name or 'make_convolution_transpose_fn_with_dilation'):
if tf.get_static_value(rank) != 2:
raise NotImplementedError('Argument `rank` currently only supports `2`; '
'saw "{}".'.format(rank))
[
filter_shape,
rank,
strides,
padding,
dilations,
] = prepare_conv_args(
filter_shape, rank=rank, strides=strides, padding=padding,
dilations=dilations, is_transpose=True, validate_args=validate_args)
sh, sw = strides
fh, fw = filter_shape
pad_values = [
_get_transpose_conv_dilated_padding(
k, stride=s, dilation=d, padding=padding)
for (k, s, d) in zip(filter_shape, strides, dilations)]
def op(x, kernel):
input_dtype = dtype_util.common_dtype([x, kernel], dtype_hint=tf.float32)
x = tf.convert_to_tensor(x, dtype=input_dtype, name='x')
kernel = tf.convert_to_tensor(kernel, dtype=input_dtype, name='kernel')
batch_shape, event_shape = ps.split(
ps.shape(x), num_or_size_splits=[-1, 3])
xh, xw, c_in = ps.unstack(event_shape, num=3)
kernel_shape = ps.shape(kernel)
assertions = _maybe_validate_input_shapes(
kernel_shape, channels_in=c_in, filter_height=fh, filter_width=fw,
validate_args=validate_args)
with tf.control_dependencies(assertions):
# If the kernel does not have batch shape, fall back to
# `conv2d_transpose` (unless dilations > 1, which is not implemented in
# `conv2d_transpose`).
if (tf.get_static_value(ps.rank(kernel)) == 2
and all(d == 1 for d in dilations)):
return _call_conv2d_transpose(
x, kernel, filter_shape, strides, padding, dilations,
kernel_shape[-1], batch_shape, event_shape)
idx, shape = im2row_index(
(xh * sh + sum(pad_values[0]), xw * sw + sum(pad_values[1]), c_in),
block_shape=filter_shape, slice_step=(1, 1), dilations=dilations,
dtype=dtype, transpose=True)
n = ps.maximum(0, ps.rank(x) - 3)
paddings = ps.pad(
pad_values, paddings=[[n, 1], [0, 0]], constant_values=0)
# Interleave the rows and columns of the input with rows and columns of
# zeros equal to the number of strides.
x_half_dilated = tf.concat(
[tf.zeros(ps.concat([batch_shape, (xh * xw, sw - 1, c_in)], axis=0),
dtype=input_dtype),
tf.reshape(
x, shape=ps.concat([batch_shape, (xh * xw, 1, c_in)], axis=0))
], axis=-2)
y = tf.reshape(
x_half_dilated,
shape=ps.concat([batch_shape, (xh, 1, xw * sw, c_in)], axis=0))
x = tf.reshape(
tf.concat(
[tf.zeros(
ps.concat(
[batch_shape, (xh, sh - 1, xw * sw, c_in)], axis=0),
dtype=input_dtype), y], axis=-3),
shape=ps.concat([batch_shape, (xh * sh, xw * sw, c_in)], axis=0))
x_pad = tf.pad(x, paddings=paddings, constant_values=0)
flat_shape = ps.pad(batch_shape, paddings=[[0, 1]], constant_values=-1)
flat_x = tf.gather(
tf.reshape(x_pad, shape=flat_shape), indices=idx, axis=-1)
im_x = tf.reshape(flat_x, shape=ps.concat([batch_shape, shape], axis=0))
return tf.matmul(im_x, kernel[..., tf.newaxis, :, :])
return op
def make_convolution_transpose_fn_with_subkernels_matrix(
filter_shape, strides, padding, rank=2, dilations=None, dtype=tf.int32,
validate_args=False, name=None):
"""Like `tf.nn.conv2d` except applies batch of kernels to batch of `x`."""
with tf.name_scope(name or 'make_convolution_transpose_fn_with_dilation'):
if tf.get_static_value(rank) != 2:
raise NotImplementedError('Argument `rank` currently only supports `2`; '
'saw "{}".'.format(rank))
strides = tf.get_static_value(strides)
if not isinstance(strides, int):
raise ValueError('Argument `strides` must be a statically known integer.'
'Saw: {}'.format(strides))
[
filter_shape,
rank,
_,
padding,
dilations,
] = prepare_conv_args(
filter_shape, rank=rank, strides=strides, padding=padding,
dilations=dilations, is_transpose=True, validate_args=validate_args)
fh, fw = filter_shape
dh, dw = dilations
# Determine maximum filter height and filter width of sub-kernels.
sub_fh = (fh - 1) // strides + 1
sub_fw = (fw - 1) // strides + 1
def loop_body(i_, event_ind):
i = i_ // strides
j = i_ % strides
i_ind = ps.range(i * fw, fw * fh, delta=strides * fw, dtype=dtype)
j_ind = ps.range(j, fw, delta=strides, dtype=dtype)
nc = cartesian_add([i_ind, j_ind])
ind = ps.reverse(ps.reshape(nc, shape=[-1]), axis=[0])
k = ps.reshape(
cartesian_add(
[ps.range(ps.shape(nc)[0] * sub_fw, delta=sub_fw, dtype=dtype),
ps.range(ps.shape(nc)[1], dtype=dtype)]),
shape=[-1])
last_j = strides - (fw - j - 1) % strides - 1
last_i = strides - (fh - i - 1) % strides - 1
kernel_ind = ps.stack(
[k, ps.ones_like(k) * last_i * strides + last_j], axis=1)
event_ind = ps.tensor_scatter_nd_update(
event_ind, ind[..., tf.newaxis], kernel_ind)
return i_ + 1, event_ind
event_ind = ps.zeros((fh * fw, 2), dtype=dtype)
_, event_ind = tf.while_loop(
lambda i, _: i < strides ** 2,
loop_body,
[tf.zeros([], dtype=dtype), event_ind])
tot_pad_top, tot_pad_bottom = _get_transpose_conv_dilated_padding(
fh, stride=strides, dilation=dh, padding=padding)
tot_pad_left, tot_pad_right = _get_transpose_conv_dilated_padding(
fw, stride=strides, dilation=dw, padding=padding)
pad_bottom = (tot_pad_bottom - 1) // strides + 1
pad_top = (tot_pad_top - 1) // strides + 1
pad_right = (tot_pad_right - 1) // strides + 1
pad_left = (tot_pad_left - 1) // strides + 1
padding_vals = ((pad_top, pad_bottom), (pad_left, pad_right))
truncate_top = pad_top * strides - tot_pad_top
truncate_left = pad_left * strides - tot_pad_left
def op(x, kernel):
input_dtype = dtype_util.common_dtype([x, kernel], dtype_hint=tf.float32)
x = tf.convert_to_tensor(x, dtype=input_dtype, name='x')
kernel = tf.convert_to_tensor(kernel, dtype=input_dtype, name='kernel')
batch_shape, event_shape = ps.split(
ps.shape(x), num_or_size_splits=[-1, 3])
xh, xw, c_in = ps.unstack(event_shape, num=3)
kernel_shape = ps.shape(kernel)
c_out = kernel_shape[-1]
kernel_batch = kernel_shape[:-2]
assertions = _maybe_validate_input_shapes(
kernel_shape, channels_in=c_in, filter_height=fh, filter_width=fw,
validate_args=validate_args)
with tf.control_dependencies(assertions):
# If the kernel does not have batch shape, fall back to
# `conv2d_transpose` (unless dilations > 1, which is not implemented in
# `conv2d_transpose`).
if (tf.get_static_value(ps.rank(kernel)) == 2
and all(d == 1 for d in dilations)):
return _call_conv2d_transpose(
x, kernel=kernel, filter_shape=filter_shape,
strides=(strides,) * rank, padding=padding, dilations=dilations,
c_out=c_out, batch_shape=batch_shape, event_shape=event_shape)
n = ps.maximum(0, ps.rank(x) - 3)
paddings = ps.pad(
padding_vals,
paddings=[[n, 1], [0, 0]],
constant_values=0)
x_pad = tf.pad(x, paddings=paddings, constant_values=0)
x_pad_shape = ps.shape(x_pad)[:-3]
flat_shape = ps.pad(x_pad_shape, paddings=[[0, 1]], constant_values=-1)
flat_x = tf.reshape(x_pad, shape=flat_shape)
idx, s = im2row_index(
(xh + tf.reduce_sum(padding_vals[0]),
xw + tf.reduce_sum(padding_vals[1]), c_in),
block_shape=(sub_fh, sub_fw), slice_step=(1, 1), dilations=dilations
)
x_ = tf.gather(flat_x, indices=idx, axis=-1)
im_x = tf.reshape(x_, shape=ps.concat([x_pad_shape, s], axis=0))
# Add channels to subkernel indices
idx_event = event_ind * [[c_in, 1]]
idx_event_channels = (
idx_event[tf.newaxis]
+ tf.stack([ps.range(c_in), tf.zeros((c_in,), dtype=dtype)],
axis=-1)[:, tf.newaxis, :])
idx_event = tf.squeeze(
tf.batch_to_space(
idx_event_channels, block_shape=[c_in], crops=[[0, 0]]), axis=0)
idx_event_broadcast = tf.broadcast_to(
idx_event,
shape=ps.concat([kernel_batch, ps.shape(idx_event)], axis=0))
# Add cartesian product of batch indices, since scatter_nd can only be
# applied to leading dimensions.
idx_batch = tf.stack(
tf.meshgrid(
*[ps.range(b_, delta=1, dtype=dtype)
for b_ in tf.unstack(kernel_batch)], indexing='ij'),
axis=ps.size(kernel_batch))
idx_batch = tf.cast(idx_batch, dtype=dtype) # empty tensor is float
idx_batch_broadcast = idx_batch[..., tf.newaxis, :] + tf.zeros(
(ps.shape(idx_event)[0], 1), dtype=dtype)
idx_kernel = tf.concat(
[idx_batch_broadcast, idx_event_broadcast], axis=-1)
kernel_mat = tf.scatter_nd(
idx_kernel,
updates=kernel,
shape=ps.cast(
ps.concat([kernel_batch,
[sub_fh * sub_fw * c_in, strides ** 2, c_out]],
axis=0),
dtype=dtype))
kernel_mat = tf.reshape(
kernel_mat,
shape=ps.concat(
[ps.shape(kernel_mat)[:-2], [strides ** 2 * c_out]], axis=0))
kernel_mat = kernel_mat[..., tf.newaxis, :, :]
out = tf.matmul(im_x, kernel_mat)
broadcast_batch_shape = ps.broadcast_shape(batch_shape, kernel_batch)
if strides > 1:
tot_size = tf.reduce_prod(broadcast_batch_shape)
flat_out = tf.reshape(
out,
shape=ps.concat([[tot_size], ps.shape(out)[-3:]], axis=0))
out = tf.nn.depth_to_space(flat_out, block_size=strides)
if padding == 'VALID':
out_height = fh + strides * (xh - 1)
out_width = fw + strides * (xw - 1)
elif padding == 'SAME':
out_height = xh * strides
out_width = xw * strides
out = out[..., truncate_top:truncate_top + out_height,
truncate_left:truncate_left + out_width, :]
out = tf.reshape(
out, shape=ps.concat(
[broadcast_batch_shape, [out_height, out_width, c_out]],
axis=0))
return out
return op
def make_convolution_transpose_fn_with_subkernels(
filter_shape, strides, padding, rank=2, dilations=None, dtype=tf.int32,
validate_args=False, name=None):
"""Like `tf.nn.conv2d` except applies batch of kernels to batch of `x`."""
with tf.name_scope(name or 'make_convolution_transpose_fn_with_dilation'):
if tf.get_static_value(rank) != 2:
raise NotImplementedError('Argument `rank` currently only supports `2`; '
'saw "{}".'.format(rank))
[
filter_shape,
rank,
strides,
padding,
dilations,
] = prepare_conv_args(
filter_shape, rank=rank, strides=strides, padding=padding,
dilations=dilations, is_transpose=True, validate_args=validate_args)
sh, sw = strides
fh, fw = filter_shape
dh, dw = dilations
# Determine maximum filter height and filter width of sub-kernels.
sub_fh = (fh - 1) // sh + 1
sub_fw = (fw - 1) // sw + 1
def loop_body(i_, kernels_ind):
i = i_ // sw
j = i_ % sw
i_ind = ps.range((sh - i - 1)*fw, fw * fh, delta=sh*fw, dtype=dtype)
j_ind = ps.range((sw - j - 1), fw, delta=sw, dtype=dtype)
last_j = sw - (fw - j - 1) % sw - 1
last_i = sh - (fh - i - 1) % sh - 1
pos = last_i * sw + last_j
nc = cartesian_add([i_ind, j_ind])
kernels_ind = kernels_ind.write(
sh * sw - pos - 1, ps.reverse(ps.reverse(nc, [0]), [1]))
return i_ + 1, kernels_ind
kernels_ind = tf.TensorArray(dtype=dtype, infer_shape=False, size=1,
dynamic_size=True)
_, kernels_ind = tf.while_loop(
lambda i, _: i < sh * sw,
loop_body,
[0, kernels_ind])
tot_pad_top, tot_pad_bottom = _get_transpose_conv_dilated_padding(
fh, stride=sh, dilation=dh, padding=padding)
tot_pad_left, tot_pad_right = _get_transpose_conv_dilated_padding(
fw, stride=sw, dilation=dw, padding=padding)
pad_bottom = (tot_pad_bottom - 1) // sh + 1
pad_top = (tot_pad_top - 1) // sh + 1
pad_right = (tot_pad_right - 1) // sw + 1
pad_left = (tot_pad_left - 1) // sw + 1
padding_vals = ((pad_top, pad_bottom), (pad_left, pad_right))
truncate_top = pad_top * sh - tot_pad_top
truncate_left = pad_left * sw - tot_pad_left
def op(x, kernel):
input_dtype = dtype_util.common_dtype([x, kernel], dtype_hint=tf.float32)
x = tf.convert_to_tensor(x, dtype=input_dtype, name='x')
kernel = tf.convert_to_tensor(kernel, dtype=input_dtype, name='kernel')
batch_shape, event_shape = ps.split(
ps.shape(x), num_or_size_splits=[-1, 3])
xh, xw, c_in = ps.unstack(event_shape, num=3)
kernel_shape = ps.shape(kernel)
c_out = kernel_shape[-1]
kernel_batch = kernel_shape[:-2]
assertions = _maybe_validate_input_shapes(
kernel_shape, channels_in=c_in, filter_height=fh, filter_width=fw,
validate_args=validate_args)
with tf.control_dependencies(assertions):
# If the kernel does not have batch shape, fall back to
# `conv2d_transpose` (unless dilations > 1, which is not implemented in
# `conv2d_transpose`).
if (tf.get_static_value(ps.rank(kernel)) == 2
and all(d == 1 for d in dilations)):
return _call_conv2d_transpose(
x, kernel, filter_shape, strides, padding, dilations, c_out,
batch_shape, event_shape)
n = ps.maximum(0, ps.rank(x) - 3)
paddings = ps.pad(
padding_vals,
paddings=[[n, 1], [0, 0]],
constant_values=0)
x_pad = tf.pad(x, paddings=paddings, constant_values=0)
ex_h = xh + tf.reduce_sum(padding_vals[0]) - sub_fh + 1
ex_w = xw + tf.reduce_sum(padding_vals[1]) - sub_fw + 1
def loop_body(i, outputs):
subkernel_ind = kernels_ind.read(i)
fh_, fw_ = ps.unstack(ps.shape(subkernel_ind), num=2)
eh = ex_h + fh_ - 1
ew = ex_w + fw_ - 1
subkernel_ind = ps.reshape(
ps.reshape(subkernel_ind * c_in, shape=[-1])[:, tf.newaxis]
+ ps.range(c_in), shape=[-1])
k = tf.gather(kernel, subkernel_ind, axis=-2)
ind, shape = im2row_index(
[eh, ew, c_in],
block_shape=(fh_, fw_),
slice_step=(1, 1),
dilations=dilations)
x_i = x_pad[..., :eh, :ew, :]
x_i_shape = ps.shape(x_i)
flat_shape = ps.pad(
x_i_shape[:-3], paddings=[[0, 1]], constant_values=-1)
flat_x = tf.reshape(x_i, flat_shape)
x_ = tf.gather(flat_x, ind, axis=-1)
im_x = tf.reshape(x_, ps.concat([x_i_shape[:-3], shape], axis=0))
outputs = outputs.write(
i,
tf.matmul(
im_x,
tf.reshape(
k, ps.concat(
[kernel_batch, [1, fh_ * fw_* c_in, c_out]], axis=0)))
)
return i + 1, outputs
outputs = tf.TensorArray(dtype=input_dtype, infer_shape=False, size=1,
dynamic_size=True)
_, outputs = tf.while_loop(
lambda i, _: i < sh * sw,
loop_body,
[0, outputs])
y = outputs.concat()
m = tf.reduce_prod(ps.shape(y)[:-3])
y_ = tf.reshape(y, shape=ps.concat([[m], ps.shape(y)[-3:]], axis=0))
y2 = tf.batch_to_space(
y_, strides, crops=tf.zeros([2, 2], dtype=tf.int64))
broadcast_batch_shape = ps.broadcast_shape(batch_shape, kernel_batch)
y2 = tf.reshape(y2, ps.concat(
[broadcast_batch_shape, ps.shape(y2)[-3:]], axis=0))
if padding == 'VALID':
out_height = fh + sh * (xh - 1)
out_width = fw + sw * (xw - 1)
elif padding == 'SAME':
out_height = xh * sh
out_width = xw * sw
return y2[..., truncate_top:truncate_top+out_height,
truncate_left:truncate_left+out_width, :]
return op
def _maybe_validate_input_shapes(
kernel_shape, channels_in, filter_height, filter_width, validate_args):
"""Validate shapes of inputs to convolution op."""
k_dim = kernel_shape[-2]
k_dim_ = tf.get_static_value(k_dim)
expected_k_dim = filter_height * filter_width * channels_in
expected_k_dim_ = tf.get_static_value(expected_k_dim)
assertions = []
if expected_k_dim_ is not None and k_dim_ is not None:
if expected_k_dim_ != k_dim_:
raise ValueError(
'The size of the second-to-rightmost dimension of `kernel` ( ={}) '
' must equal `filter_height * filter_width * channels_in` ( ={}), '
'where `channels_in` is the size of the rightmost dimension of the '
'input.'.format(k_dim_, expected_k_dim_))
elif validate_args:
assertions.append(
assert_util.assert_equal(
k_dim, expected_k_dim,
message=('The size of the second-to-rightmost dimension of `kernel`'
' must equal `filter_height * filter_width * channels_in`,'
' where `channels_in` is the size of the rightmost '
'dimension of the input.')))
return assertions
def _get_transpose_conv_dilated_padding(filter_dim, stride, dilation, padding):
"""Zero-padding for inputs dilated by strides."""
tot_filter_dim = filter_dim + (filter_dim - 1) * (dilation - 1)
if padding == 'VALID':
tot_pad = 2 * (tot_filter_dim - 1)
elif padding == 'SAME':
tot_pad = tot_filter_dim + stride - 2
# TODO(emilyaf): Support stride > kernel_dim.
# if filter_dim > 1:
pad_end = tot_pad // 2
pad_start = tot_pad - pad_end - (stride - 1) # implicit pad
# else:
# pad_end = pad_start = 0
return pad_start, pad_end
def _get_output_shape(rank, strides, padding, dilations, input_shape,
output_size, filter_shape, output_padding=None):
"""Compute the `output_shape` and `strides` arg used by `conv_transpose`."""
if output_padding is None:
output_padding = (None,) * rank
else:
output_padding = utils.prepare_tuple_argument(
output_padding, n=rank, arg_name='output_padding')
for stride, out_pad in zip(strides, output_padding):
if out_pad >= stride:
raise ValueError('Stride {} must be greater than output '
'padding {}.'.format(strides, output_padding))
event_shape = []
for i in range(-rank, 0):
event_shape.append(_deconv_output_length(
input_shape[i - 1],
filter_size=filter_shape[i],
padding=padding,
output_padding=output_padding[i],
stride=strides[i],
dilation=dilations[i]))
event_shape.append(output_size)
batch_shape = input_shape[:-rank-1]
output_shape = ps.concat([batch_shape, event_shape], axis=0)
strides = ps.pad(strides, paddings=[[1, 1]], constant_values=1)
return output_shape, strides
def _deconv_output_length(input_size, filter_size, padding, output_padding,
stride, dilation):
"""Determines output length of a transposed convolution given input length.
Args:
input_size: `int`.
filter_size: `int`.
padding: one of `"SAME"`, `"VALID"`, `"FULL"`.
output_padding: `int`, amount of padding along the output dimension. Can
be set to `None` in which case the output length is inferred.
stride: `int`.
dilation: `int`.
Returns:
output_length: The output length (`int`).
"""
assert padding in {'SAME', 'VALID', 'FULL'}
if input_size is None:
return None
# Get the dilated kernel size
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
# Infer length if output padding is None, else compute the exact length
if output_padding is None:
if padding == 'VALID':
return input_size * stride + max(filter_size - stride, 0)
elif padding == 'FULL':
return input_size * stride - (stride + filter_size - 2)
elif padding == 'SAME':
return input_size * stride
if padding == 'SAME':
pad = filter_size // 2
elif padding == 'VALID':
pad = 0
elif padding == 'FULL':
pad = filter_size - 1
return (input_size - 1) * stride + filter_size - 2 * pad + output_padding
def prepare_conv_args(
filter_shape, rank, strides, padding, dilations,
is_transpose=False, validate_args=False):
"""Sanitizes use provided input."""
padding = _validate_padding(padding)
try:
rank = int(tf.get_static_value(rank))
except TypeError:
raise TypeError('Argument `rank` must be statically known `int`.')
valid_rank = {1, 2, 3}
if rank not in valid_rank:
raise ValueError('Argument `rank` must be in {}.'.format(valid_rank))
filter_shape = prepare_tuple_argument(
filter_shape, n=rank, arg_name='filter_shape',
validate_args=validate_args)
strides = prepare_tuple_argument(
strides, n=rank, arg_name='strides', validate_args=validate_args)
padding = _prepare_padding_argument(padding)
dilations = prepare_tuple_argument(
dilations, n=rank, arg_name='dilations', validate_args=validate_args)
strides_ = [tf.get_static_value(s) for s in strides]
dilations_ = [tf.get_static_value(d) for d in dilations]
assertions = []
if is_transpose:
if (all(s is not None for s in strides_)
and all(d is not None for d in dilations_)):
if any(s > 1 for s in strides_) and any(d > 1 for d in dilations_):
raise NotImplementedError('At least one of `dilations` and `strides` '
'must equal `1` for each dimension. Saw: '
'`strides={}`, `dilations={}`'.format(
strides, dilations))
elif validate_args:
assertions.append(
assert_util.assert_equal(
tf.logical_or(
tf.equal(tf.reduce_max(strides), 1),
tf.equal(tf.reduce_max(dilations), 1)),
True,
message='At least one of `dilations` and `strides` must equal `1` '
'for each dimension.'))
# TODO(emilyaf): Remove this once strides > filter_dim is supported.
filter_shape_ = [tf.get_static_value(s) for s in filter_shape]
if any(s is not None and f is not None and s > f
for s, f in zip(strides_, filter_shape_)):
raise NotImplementedError('Stride must be less than or equal to the '
'filter size along each dimension.')
with tf.control_dependencies(assertions):
return filter_shape, rank, strides, padding, dilations
def prepare_tuple_argument(arg, n, arg_name, validate_args=False):
"""Helper which processes `Tensor`s to tuples in standard form."""
arg_size = ps.size(arg)
arg_size_ = tf.get_static_value(arg_size)
assertions = []
if arg_size_ is not None:
if arg_size_ not in (1, n):
raise ValueError('The size of `{}` must be equal to `1` or to the rank '
'of the convolution (={}). Saw size = {}'.format(
arg_name, n, arg_size_))
elif validate_args:
assertions.append(assert_util.assert_equal(
ps.logical_or(arg_size == 1, arg_size == n),
True,
message=('The size of `{}` must be equal to `1` or to the rank of the '
'convolution (={})'.format(arg_name, n))))
with tf.control_dependencies(assertions):
arg = ps.broadcast_to(arg, shape=[n])
arg = ps.unstack(arg, num=n)
return arg
def _prepare_padding_argument(x):
"""Helper which processes the padding argument."""
if not hasattr(x, 'upper'):
return tuple(x)
padding = x.upper()
if padding in {'CAUSAL', 'FULL'}:
raise NotImplementedError(
'Argument `padding` value "{}" currently not supported. If you '
'require this feature, please create an issue on '
'`https://github.com/tensorflow/probability` or email '
'`tfprobability@tensorflow.org`.'.format(padding))
valid_values = {'VALID', 'SAME'}
if padding not in valid_values:
raise ValueError('Argument `padding` must be convertible to a tuple '
'or one of {}; saw: "{}".'.format(valid_values, padding))
return padding
def _call_conv2d_transpose(x, kernel, filter_shape, strides, padding, dilations,
c_out, batch_shape, event_shape):
"""Call `tf.nn.conv2d_transpose` (for kernels with no batch dimensions)."""
fh, fw = filter_shape
flat_x = tf.reshape(x, shape=ps.concat([[-1], event_shape], axis=0))
output_shape, strides_ = _get_output_shape(
rank=2, strides=strides, padding=padding, dilations=dilations,
input_shape=ps.shape(flat_x), output_size=c_out,
filter_shape=filter_shape)
flat_y = tf.nn.conv2d_transpose(
flat_x,
filters=tf.transpose(
tf.reshape(
kernel, shape=[fh, fw, event_shape[-1], -1]),
perm=[0, 1, 3, 2]),
output_shape=output_shape,
strides=strides_,
padding=padding,
data_format='NHWC',
dilations=dilations)
return tf.reshape(
flat_y, shape=ps.concat([batch_shape, output_shape[-3:]], axis=0))
| 38.725756
| 81
| 0.610965
|
acffd8a40a92ddd54e3de4ee66052b5631ea3069
| 3,182
|
py
|
Python
|
test/test_amici_objective.py
|
beimbusch/pyPESTO
|
8275751eba87efedd6ad1e6923a94d7de0603ddd
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_amici_objective.py
|
beimbusch/pyPESTO
|
8275751eba87efedd6ad1e6923a94d7de0603ddd
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_amici_objective.py
|
beimbusch/pyPESTO
|
8275751eba87efedd6ad1e6923a94d7de0603ddd
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This is for testing the pypesto.Objective.
"""
from pypesto.objective.amici_util import add_sim_grad_to_opt_grad
import petab
import pypesto
import pypesto.optimize
import pypesto.objective.constants
import pytest
import numpy as np
from test.petab_util import folder_base
ATOL = 1e-1
RTOL = 1e-0
def test_add_sim_grad_to_opt_grad():
"""
Test gradient mapping/summation works as expected.
17 = 1 + 2*5 + 2*3
"""
par_opt_ids = ['opt_par_1',
'opt_par_2',
'opt_par_3']
mapping_par_opt_to_par_sim = {
'sim_par_1': 'opt_par_1',
'sim_par_2': 'opt_par_3',
'sim_par_3': 'opt_par_3'
}
par_sim_ids = ['sim_par_1', 'sim_par_2', 'sim_par_3']
sim_grad = np.asarray([1.0, 3.0, 5.0])
opt_grad = np.asarray([1.0, 1.0, 1.0])
expected = np.asarray([3.0, 1.0, 17.0])
add_sim_grad_to_opt_grad(
par_opt_ids,
par_sim_ids,
mapping_par_opt_to_par_sim,
sim_grad,
opt_grad,
coefficient=2.0)
assert np.allclose(expected, opt_grad)
def test_error_leastsquares_with_ssigma():
petab_problem = petab.Problem.from_yaml(
folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml")
petab_problem.model_name = "Zheng_PNAS2012"
importer = pypesto.petab.PetabImporter(petab_problem)
obj = importer.create_objective()
problem = importer.create_problem(obj)
optimizer = pypesto.optimize.ScipyOptimizer(
'ls_trf', options={'max_nfev': 50})
with pytest.raises(RuntimeError):
pypesto.optimize.minimize(
problem=problem, optimizer=optimizer, n_starts=1,
options=pypesto.optimize.OptimizeOptions(allow_failed_starts=False)
)
def test_preeq_guesses():
"""
Test whether optimization with preequilibration guesses works, asserts
that steadystate guesses are written and checks that gradient is still
correct with guesses set.
"""
petab_problem = petab.Problem.from_yaml(
folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml")
petab_problem.model_name = "Zheng_PNAS2012"
importer = pypesto.petab.PetabImporter(petab_problem)
obj = importer.create_objective()
problem = importer.create_problem(obj)
# assert that initial guess is uninformative
assert problem.objective.steadystate_guesses['fval'] == np.inf
optimizer = pypesto.optimize.ScipyOptimizer(
'L-BFGS-B', options={'maxiter': 50})
result = pypesto.optimize.minimize(
problem=problem, optimizer=optimizer, n_starts=1,
)
assert problem.objective.steadystate_guesses['fval'] < np.inf
assert len(obj.steadystate_guesses['data']) == 1
df = obj.check_grad(
result.optimize_result.list[0]['x'],
eps=1e-3,
verbosity=0,
mode=pypesto.objective.constants.MODE_FUN
)
print("relative errors MODE_FUN: ", df.rel_err.values)
print("absolute errors MODE_FUN: ", df.abs_err.values)
assert np.all((df.rel_err.values < RTOL) | (df.abs_err.values < ATOL))
# assert that resetting works
problem.objective.initialize()
assert problem.objective.steadystate_guesses['fval'] == np.inf
| 30.596154
| 79
| 0.682904
|
acffd97a7992260dcb85243a39d116e5f52f55c9
| 611
|
py
|
Python
|
023. Enumerating k-mers Lexicographically/Main.py
|
SyouTono242/Rosalind
|
d0f339789b5373d7ce98d401d5168536f1d2e18c
|
[
"Apache-2.0"
] | null | null | null |
023. Enumerating k-mers Lexicographically/Main.py
|
SyouTono242/Rosalind
|
d0f339789b5373d7ce98d401d5168536f1d2e18c
|
[
"Apache-2.0"
] | null | null | null |
023. Enumerating k-mers Lexicographically/Main.py
|
SyouTono242/Rosalind
|
d0f339789b5373d7ce98d401d5168536f1d2e18c
|
[
"Apache-2.0"
] | null | null | null |
# Given: A collection of at most 10 symbols defining an ordered alphabet, and a positive integer n (n≤10).
#
# Return: All strings of length n that can be formed from the alphabet, ordered lexicographically (use the standard
# order of symbols in the English alphabet).
Alphabet = "A B C D E F"
n = 3
def alpha_combs(alphaList, n, acc="", res=[]):
if n==0:
res.append(acc)
else:
for character in alphaList:
alpha_combs(alphaList, n-1, acc+character, res)
return res
alphaList = sorted(Alphabet.split())
for combs in alpha_combs(alphaList, n):
print(combs)
| 30.55
| 115
| 0.674304
|
acffd9ac70f12ad3b1d79f74be98a2a18d1cd04d
| 388
|
py
|
Python
|
backend/apps/cabins/migrations/0009_auto_20210327_1240.py
|
hovedstyret/indok-web
|
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
|
[
"MIT"
] | 3
|
2021-11-18T09:29:14.000Z
|
2022-01-13T20:12:11.000Z
|
backend/apps/cabins/migrations/0009_auto_20210327_1240.py
|
rubberdok/indok-web
|
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
|
[
"MIT"
] | 277
|
2022-01-17T18:16:44.000Z
|
2022-03-31T19:44:04.000Z
|
backend/apps/cabins/migrations/0009_auto_20210327_1240.py
|
hovedstyret/indok-web
|
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-03-27 11:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cabins", "0008_auto_20210322_2011"),
]
operations = [
migrations.AlterField(
model_name="cabin",
name="max_guests",
field=models.IntegerField(default=18),
),
]
| 20.421053
| 50
| 0.600515
|
acffdbab9ed0a5b01827507983e5f50d80f93575
| 1,166
|
py
|
Python
|
common/urls.py
|
cs4p/opal
|
8c01814a66e03a4481d3fc06375d097866485bc6
|
[
"CC0-1.0"
] | null | null | null |
common/urls.py
|
cs4p/opal
|
8c01814a66e03a4481d3fc06375d097866485bc6
|
[
"CC0-1.0"
] | 1
|
2022-01-05T19:41:20.000Z
|
2022-01-05T19:41:20.000Z
|
common/urls.py
|
cs4p/opal
|
8c01814a66e03a4481d3fc06375d097866485bc6
|
[
"CC0-1.0"
] | null | null | null |
"""opal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, re_path
from .views import *
app_name = 'common'
urlpatterns = [path('p/<str:uuid>', permalink, name='permalink'),
path('db_status/', database_status_view, name='db_status'),
path('auth/', authentication_view, name='authentication_view'),
re_path(r'^saml/$', saml_authentication, name='saml_authentication'),
re_path(r'^saml/attrs/$', attrs, name='attrs'),
re_path(r'^saml/metadata/$', metadata, name='metadata'),
]
| 41.642857
| 84
| 0.65952
|
acffdbeb42cf2390e0fd3a0b9fbcd6932524df42
| 10,472
|
py
|
Python
|
metrics/cortx_people.py
|
vaibhavparatwar/cortx-1
|
1e8c0251c0c83444eacc79d560f5ff7a7f1cde72
|
[
"Apache-2.0"
] | 1
|
2021-03-12T07:14:19.000Z
|
2021-03-12T07:14:19.000Z
|
metrics/cortx_people.py
|
vaibhavparatwar/cortx-1
|
1e8c0251c0c83444eacc79d560f5ff7a7f1cde72
|
[
"Apache-2.0"
] | 1
|
2021-03-22T12:37:01.000Z
|
2021-03-22T12:37:01.000Z
|
metrics/cortx_people.py
|
vaibhavparatwar/cortx-1
|
1e8c0251c0c83444eacc79d560f5ff7a7f1cde72
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
from cortx_community import get_logins,CortxCommunity,get_teams,SlackCommunity,CortxActivity
import argparse
import json
import os
import re
import sys
import time
from github import Github
def Debug(msg):
print(msg)
def person_match(person,string):
match = False
try:
if string in person.login.lower():
match = True
except:
pass
try:
if string in person.company.lower():
match = True
except:
pass
try:
if string in person.email.lower():
match = True
except:
pass
return match
def get_mergable_email(source,target):
se = source.get_email()
te = target.get_email()
assert se or te, "Could not find a mergable email"
if se and te:
if '@seagate.com' in se:
return se
elif '@seagate.com' in te:
return te
else:
assert se == te, "Can't merge different non-seagate emails %s != %s" % (se,te)
if se:
return se
else:
return te
def get_activities(login,activity):
activities={}
for (url,created_at) in activity.get_activities(login):
if created_at is not None: # just don't count watch events since they don't have a date
activities[created_at] = url
return activities
# function to merge two different people into one
# this usually happens when we find someone in slack and can't automatically find their github login
def merge(target_login,source_login,people):
slack_people=SlackCommunity()
activity=CortxActivity()
# what do we need to do here?
# 1. find all activity belonging to merge and add it to individual - DONE
# 2. find merge in slack people and change github login to individual - DONE
# 3. remove merge from cortx people
# 4. copy the slack id from merge into target
assert target_login, "Can't merge without specifying the individual into whom to merge"
#print("need to merge %s into %s" % (source_login, target_login))
try:
activities=get_activities(source_login,activity)
except KeyError: # this person has no activities
activities={}
target=people.get_person(target_login)
source=people.get_person(source_login)
email=get_mergable_email(source,target)
print("need to merge %s into %s using %s" % (source, target, email))
sperson=slack_people.find_login(source_login)
assert sperson, "Couldn't find %s in slack pickle" % email
slack_people.set_github(sperson,target_login)
print("Also need to clean up slack person %s" % sperson)
for date,url in activities.items():
#def add_activity(self,key,login,url,created_at):
key="migrated_event.%s.%s.%s.%s" % (url,date,target_login,source_login)
try:
(login,url,created_at) = activity.get_activity(key) # already exists
except:
print("Not yet migrated: Migrating %s %s" % (date,url))
activity.add_activity(key,target_login,url,date)
# copy over company, type, linkedin, and email; merge notes
if source.get_company() and not target.get_company():
print("Trying to transfer company %s" % source.get_company())
people.update_company(target_login,source.get_company())
if source.get_type() and not target.get_type():
print("Trying to transfer type %s" % source.get_type())
people.set_type(target_login,source.get_type())
if source.get_linkedin() and not target.get_linkedin():
print("Trying to transfer type %s" % source.get_linkedin())
people.set_linkedin(target_login,source.get_linkedin())
if source.get_note():
print("Trying to transfer note %s" % source.get_note())
people.add_note(target_login,source.get_note())
people.update_email(target_login,email)
if 'GID_UNK' in source_login:
people.remove_person(source_login)
else:
print("Cowardly refusing to remove non GID_UNK login")
activity.persist()
slack_people.persist()
people.persist()
def main():
parser = argparse.ArgumentParser(description='Update or print info in our cortx community pickle.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--individual', '-i', help='Update or print just one single person.')
parser.add_argument('--type', '-t', help="When you update an individual, what type is she")
parser.add_argument('--email', '-e', help="When you update an individual, add her email")
parser.add_argument('--company', '-c', help="When you update an individual, what company is she")
parser.add_argument('--linkedin', '-l', help="When you update an individual, add her linkedin profile")
parser.add_argument('--note', '-n', help="When you update an individual, add a note (must be formatted as python dict)")
parser.add_argument('--unknowns', '-u', help="Dump the unknowns and quit", action="store_true")
parser.add_argument('--update', '-U', help="When dumpting unknowns, also update the pickle if identified as Seagate")
parser.add_argument('--dump', '-d', help="Dump entire community and quit", action="store_true")
parser.add_argument('--slack', '-s', help="Operate on the slack people", action="store_true")
parser.add_argument('--merge', '-m', help="Merge one person into another")
parser.add_argument('--org', '-o', help='Print the latest statistics for a different org', default='Seagate')
parser.add_argument('--github', '-g', help='Change the github login for a slack person', action="store")
args = parser.parse_args()
if args.slack:
people = SlackCommunity(org_name=args.org)
else:
people = CortxCommunity(org_name=args.org)
gh = Github(os.environ.get('GH_OATH'))
if args.merge:
merge(target_login=args.individual,source_login=args.merge,people=people)
if (args.individual):
updated=False
if not args.slack:
if people.includes(args.individual):
if args.type:
updated = True
people.update_type(args.individual,args.type)
if args.company:
updated = True
people.update_company(args.individual,args.company)
if args.linkedin:
updated = True
people.update_linkedin(args.individual,args.linkedin)
if args.email:
updated = True
people.update_email(args.individual,args.email)
if args.note:
updated = True
note = json.loads(args.note)
people.add_note(args.individual,note)
print(people.people[args.individual])
else:
print("Person %s not in the known community" % args.individual)
else:
if args.github:
gpeople=CortxCommunity()
gperson = None
sperson = None
try:
gperson=gpeople.get_person(args.github)
except KeyError:
print("Error: %s is unknown github ID" % args.github)
try:
sperson=people.get_github(args.individual)
except TypeError:
print("Error: %s is unknown slack ID" % args.individual)
assert gperson and sperson, "Can't operate unless both args are valid"
people.set_github(args.individual,args.github)
updated=True
people.print_person(args.individual)
if updated:
people.persist()
sys.exit(0)
if args.unknowns:
unknowns = 0
for person in people.values():
if person.type is None:
print(person)
unknowns += 1
# if they are unknown, look them up and see if they are part of CORTX organization
add_to_team=False
try:
if person_match(person,'seagate') or person_match(person,'dsr') or person_match(person,'calsoft'):
print("%s login matches seagate or contractor; adding to CORTX Team" % person.login)
add_to_team=True
except AttributeError:
pass
try:
user = gh.get_user(login=person.login)
user.get_organization_membership('Seagate')
print("%s is in Seagate org; adding to CORTX Team"%person.login)
add_to_team=True
except:
pass
if add_to_team:
if args.update:
people.update_company(person.login,'Seagate')
people.update_type(person.login,'CORTX Team')
people.persist()
else:
print("Not updating because update flag was not set")
print("%d total unknowns in community" % unknowns)
sys.exit(0)
if args.dump:
print(people)
if not args.slack:
types = {}
for person in people.values():
if person.type not in types:
types[person.type] = 0
types[person.type] += 1
print(types)
sys.exit()
# if no args are passed, the program reaches here
# let's add a linkedin field for everyone
# we just used this once to change the pickle to add a field
#for login,person in people.items():
# people.add_person(login,person.company,person.email,None)
# if person.type:
# people.update_type(login,person.type)
# if no args are passed, the program reaches here
# if the program reaches here, then it will attempt to auto-update info about the community in the people pickle
# fetch the innersource folks from the innersource json
# this code is dangerous. We needed it once but we don't probably want to run it again
print("Cowardly doing nothing")
sys.exit()
with open('pickles/innersource.json','r') as f:
innersource = json.load(f)
Debug("Loaded innersource:")
print(innersource)
allteams = get_teams('https://api.github.com/orgs/Seagate/teams/cortx-community/teams')
team_members = set()
for team in allteams:
if 'innersource' not in team:
Debug("Fetching from %s" % team)
gids = get_logins('members', 'https://api.github.com/orgs/seagate/teams/%s' % team)
team_members |= gids
for person in people:
if person in team_members:
people.update_type(person,"CORTX Team")
people.update_company(person,"Seagate")
if person in innersource:
if people.get_type(person) is None:
people.update_type(person,"Innersource")
people.update_company(person,"Seagate")
people.update_email(person,innersource[person])
else:
Type = people.get_type(person)
if not Type:
match = re.match('[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}',person)
if match:
print("Updating %s to be a mannequin" % person)
people.update_type(person,"Mannequin")
else:
print("Person %s is unknown" % person)
people.persist()
if __name__ == "__main__":
main()
| 37.003534
| 157
| 0.673224
|
acffdc3ecb183799729adbd7c3aa6f78e2a55781
| 6,654
|
py
|
Python
|
openrobot_cli/api/__init__.py
|
MaskDuck/OpenRobot-CLI
|
aecc1182b61e03f5fc21496e0cb303b2dbc039f9
|
[
"MIT"
] | null | null | null |
openrobot_cli/api/__init__.py
|
MaskDuck/OpenRobot-CLI
|
aecc1182b61e03f5fc21496e0cb303b2dbc039f9
|
[
"MIT"
] | null | null | null |
openrobot_cli/api/__init__.py
|
MaskDuck/OpenRobot-CLI
|
aecc1182b61e03f5fc21496e0cb303b2dbc039f9
|
[
"MIT"
] | null | null | null |
import os
import json
import click
from ..utils import blue_text
from openrobot.api_wrapper import SyncClient
def get_token():
try:
dir = os.path.expanduser("~/.openrobot")
with open(f'{dir}/api/cridentials.json', 'r') as f:
cridentials = json.load(f)
token = cridentials['token']
except:
token = None
return token
def setup_client():
try:
dir = os.path.expanduser("~/.openrobot")
with open(f'{dir}/api/cridentials.json', 'r') as f:
cridentials = json.load(f)
token = cridentials['token']
except:
token = os.environ.get('OPENROBOT_API_TOKEN') or 'I-Am-Testing'
global client
client = SyncClient(token, ignore_warning=True)
setup_client()
@click.group("api", invoke_without_command=True)
@click.pass_context
def main(ctx: click.Context):
"""
OpenRobot API. You can configure and use the OpenRobot API.
"""
if ctx.invoked_subcommand is None:
help_str = ctx.get_help()
click.echo(help_str)
ctx.exit()
@main.command("configure")
@click.option('--token', prompt=blue_text('Please enter your API Token ') + '[' + (get_token() or "I-Am-Testing") + ']', default=None)
@click.pass_context
def configure(ctx: click.Context, token: str):
"""
Configure OpenRobot API.
"""
token = token or get_token() or 'I-Am-Testing'
dir = os.path.expanduser("~/.openrobot")
try:
os.mkdir(dir)
except FileExistsError:
pass
try:
os.mkdir(f'{dir}/api')
except FileExistsError:
pass
with open(f'{dir}/api/cridentials.json', 'w') as fp:
json.dump({'token': token}, fp)
setup_client()
click.echo(f'OpenRobot API configured successfully!')
ctx.exit()
@main.command("config")
@click.option('--token', prompt=blue_text('Please enter your API Token ') + '[' + (get_token() or "I-Am-Testing") + ']', default=None, help="The token to use to authenticate to APIs.")
@click.pass_context
def configure(ctx: click.Context, token: str):
"""An alias of configure command."""
ctx.invoke(configure, token=token)
# API commands
@main.command("lyrics")
@click.argument("query", nargs=-1, type=str)
@click.option("--format", default="text", type=click.Choice(["json", "text"]), help="Output format. Text will be an output with Title, Artist and Lyrics. JSON will be a JSON object returned by the API.")
@click.pass_context
def lyrics(ctx: click.Context, query: str, format: str):
"""
Gets/Searches lyrics from a query. This access the /api/lyrics endpoint.
"""
query = ' '.join(query)
lyrics = client.lyrics(query)
if format == "json":
click.echo(json.dumps(lyrics.raw))
else:
if lyrics.lyrics is None:
click.echo(f"No lyrics found for {query}")
ctx.exit()
s = f"""{blue_text("Title:")} {lyrics.title or "Unknown."}
{blue_text("Artist:")} {lyrics.artist or "Unknown."}
{lyrics.lyrics}"""
click.echo(s)
ctx.exit()
@main.command("nsfw-check")
@click.argument("url", type=str)
@click.option("--format", default="text", type=click.Choice(["json", "text"]), help="Output format. Text will be an output with Safe/Unsafe score and Labels. JSON will be a JSON object returned by the API.")
@click.pass_context
def nsfw_check(ctx: click.Context, url: str, format: str):
"""
Performs a NSFW Check using the /api/nsfw-check endpoint.
"""
nsfw = client.nsfw_check(url)
if format == "json":
click.echo(json.dumps(nsfw.raw))
else:
safe_score = 100 - nsfw.score * 100
unsafe_score = nsfw.score * 100
is_safe = not bool(nsfw.labels) and safe_score > unsafe_score
s = f"""{click.style("Safe Score:", fg="green")} {round(safe_score, 1)}%
{click.style("Unsafe Score:", fg="red")} {round(unsafe_score, 1)}%
{click.style("Is Safe:", fg="green" if is_safe else "red")} {is_safe}
{blue_text("Labels:")}"""
parent_name_added = []
if nsfw.labels:
for label in reversed(nsfw.labels):
if label.name in parent_name_added:
continue
s += f'\n- {label.name} - Confidence: {round(label.confidence, 1)}%'
if label.parent_name:
parent_name_added.append(label.parent_name)
else:
s += f' None'
click.echo(s)
ctx.exit()
@main.command("celebrity")
@click.argument("url", type=str)
@click.option("--format", default="text", type=click.Choice(["json", "text"]), help="Output format. Text will be an output with the details of the detected celebrity. JSON will be a JSON object returned by the API.")
@click.pass_context
def celebrity(ctx: click.Context, url: str, format: str):
"""
Performs a Celebrity Detection using the /api/celebrity endpoint.
"""
detected = client.celebrity(url)
if format == "json":
click.echo(json.dumps([x.raw for x in detected] or {"detectedFaces": []}))
else:
if not detected:
click.echo(f"No celebrity detected.")
ctx.exit()
for celebrity in detected:
newline = "\n"
s = ""
if len(detected) > 1:
s += click.style("#" + str(detected.index(celebrity) + 1), fg="bright_cyan")
s += f"""
{blue_text("Name:")} {celebrity.name}
{blue_text("Gender:")} {celebrity.gender}
{blue_text("Confidence:")} {round(celebrity.confidence, 1)}%
{blue_text("URLs:")}{f"{newline}- " + f"{newline}- ".join(celebrity.urls) if celebrity.urls else " None"}
{blue_text("Face:")}
- {blue_text("Pose:")}
- {blue_text("Roll:")} {celebrity.face.pose.roll}
- {blue_text("Yaw:")} {celebrity.face.pose.yaw}
- {blue_text("Pitch:")} {celebrity.face.pose.pitch}
- {blue_text("Quality:")}
- {blue_text("Brightness:")} {celebrity.face.quality.brightness}
- {blue_text("Sharpness:")} {celebrity.face.quality.sharpness}
- {blue_text("Emotions:")}{f"{newline} - " + f"{newline} - ".join([f'{emotion.type.lower().capitalize()} - Confidence: {round(emotion.confidence, 1)}%' for emotion in sorted(celebrity.face.emotions, key=lambda i: i.confidence, reverse=True)]) if celebrity.face.emotions else " Unknown."}
- {blue_text("Is Smiling:")} {celebrity.face.smile.value} - Confidence: {round(celebrity.face.smile.confidence, 1)}%"""
if detected.index(celebrity) + 1 < len(detected):
s += "\n"
click.echo(s)
ctx.exit()
def api_cli_setup(cmd: click.Group):
cmd.add_command(main)
| 31.239437
| 305
| 0.616171
|
acffdcd49dfef04711d3d32c6be72fe399402934
| 4,780
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/series/methods/test_describe.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/series/methods/test_describe.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/series/methods/test_describe.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | 1
|
2021-04-26T22:41:56.000Z
|
2021-04-26T22:41:56.000Z
|
import numpy as np
from pandas import Period, Series, Timedelta, Timestamp, date_range
import pandas._testing as tm
class TestSeriesDescribe:
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name="int_data")
result = s.describe()
expected = Series(
[5, 2, s.std(), 0, 1, 2, 3, 4],
name="int_data",
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
s = Series([True, True, False, False, False], name="bool_data")
result = s.describe()
expected = Series(
[5, 2, False, 3], name="bool_data", index=["count", "unique", "top", "freq"]
)
tm.assert_series_equal(result, expected)
s = Series(["a", "a", "b", "c", "d"], name="str_data")
result = s.describe()
expected = Series(
[5, 4, "a", 2], name="str_data", index=["count", "unique", "top", "freq"]
)
tm.assert_series_equal(result, expected)
s = Series(
[
Timedelta("1 days"),
Timedelta("2 days"),
Timedelta("3 days"),
Timedelta("4 days"),
Timedelta("5 days"),
],
name="timedelta_data",
)
result = s.describe()
expected = Series(
[5, s[2], s.std(), s[0], s[1], s[2], s[3], s[4]],
name="timedelta_data",
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
s = Series(
[Period("2020-01", "M"), Period("2020-01", "M"), Period("2019-12", "M")],
name="period_data",
)
result = s.describe()
expected = Series(
[3, 2, s[0], 2],
name="period_data",
index=["count", "unique", "top", "freq"],
)
tm.assert_series_equal(result, expected)
def test_describe_empty_object(self):
# https://github.com/pandas-dev/pandas/issues/27183
s = Series([None, None], dtype=object)
result = s.describe()
expected = Series(
[0, 0, np.nan, np.nan],
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_series_equal(result, expected)
result = s[:0].describe()
tm.assert_series_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2])
assert np.isnan(result.iloc[3])
def test_describe_with_tz(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
name = str(tz_naive_fixture)
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
result = s.describe(datetime_is_numeric=True)
expected = Series(
[
5,
Timestamp(2018, 1, 3).tz_localize(tz),
start.tz_localize(tz),
s[1],
s[2],
s[3],
end.tz_localize(tz),
],
name=name,
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
def test_describe_with_tz_warns(self):
name = tz = "CET"
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
with tm.assert_produces_warning(FutureWarning):
result = s.describe()
expected = Series(
[
5,
5,
s.value_counts().index[0],
1,
start.tz_localize(tz),
end.tz_localize(tz),
],
name=name,
index=["count", "unique", "top", "freq", "first", "last"],
)
tm.assert_series_equal(result, expected)
def test_datetime_is_numeric_includes_datetime(self):
s = Series(date_range("2012", periods=3))
result = s.describe(datetime_is_numeric=True)
expected = Series(
[
3,
Timestamp("2012-01-02"),
Timestamp("2012-01-01"),
Timestamp("2012-01-01T12:00:00"),
Timestamp("2012-01-02"),
Timestamp("2012-01-02T12:00:00"),
Timestamp("2012-01-03"),
],
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
| 33.900709
| 89
| 0.471967
|
acffdd34d800e4ec72b78e4084e4c4d51dc289cc
| 2,782
|
py
|
Python
|
models/disputable_voting.py
|
pkretzschmar/commons-config-backend
|
a5abf9294e0b2fbb8f5d5bf5d3db2b1c55ebbc91
|
[
"MIT"
] | null | null | null |
models/disputable_voting.py
|
pkretzschmar/commons-config-backend
|
a5abf9294e0b2fbb8f5d5bf5d3db2b1c55ebbc91
|
[
"MIT"
] | null | null | null |
models/disputable_voting.py
|
pkretzschmar/commons-config-backend
|
a5abf9294e0b2fbb8f5d5bf5d3db2b1c55ebbc91
|
[
"MIT"
] | null | null | null |
import pandas as pd
class DisputableVotingModel:
def __init__(self,
support_required=None,
minimum_quorum=None,
vote_duration=None,
delegated_voting_period=None,
quiet_ending_period=None,
quiet_ending_extension=None,
execution_delay=None):
self.support_required = support_required if support_required is not None else 0.4
self.minimum_quorum = minimum_quorum if minimum_quorum is not None else 0.1
self.vote_duration = vote_duration if vote_duration is not None else 6
self.delegated_voting_period = delegated_voting_period if delegated_voting_period is not None else 3
self.quiet_ending_period = quiet_ending_period if quiet_ending_period is not None else 2
self.quiet_ending_extension = quiet_ending_extension if quiet_ending_extension is not None else 1
self.execution_delay = execution_delay if execution_delay is not None else 1
self.non_quiet_voting_period = max(self.vote_duration - self.quiet_ending_period, 0)
self.output_dict = {}
self.output_dict['input'] = {
'supportRequired': self.support_required,
'minimumQuorum': self.minimum_quorum,
'voteDuration': self.vote_duration,
'delegatedVotingPeriod': self.delegated_voting_period,
'quietEndingPeriod': self.quiet_ending_period,
'quietEndingExtension': self.quiet_ending_extension,
'executionDelay': self.execution_delay
}
def get_data(self):
# Bar Chart Data
bar_chart_items = {
'totalProposalProcess': {
'nonQuietVotingPeriod': self.non_quiet_voting_period,
'quietEndingPeriod': self.quiet_ending_period,
'executionDelay': self.execution_delay
},
'delegatedVoting': {
'delegatedVotingPeriod': self.delegated_voting_period
},
'proposalProcessWithExtension': {
'voteDuration': self.vote_duration,
'quietEndingExtension': self.quiet_ending_extension,
'executionDelay': self.execution_delay
},
'voteDuration': self.vote_duration,
}
self.output_dict['output'] = {'barChart': bar_chart_items}
# Pie Chart Data
pie_chart_items = {
'nonQuietVotingPeriod': self.non_quiet_voting_period,
'quietEndingPeriod': self.quiet_ending_period,
'quietEndingExtension': self.quiet_ending_extension,
'executionDelay': self.execution_delay
}
self.output_dict['output']['pieChart'] = pie_chart_items
return self.output_dict
| 44.870968
| 108
| 0.645219
|
acffddcd33f33dbf358c8b101d2ed810592a0084
| 14,380
|
py
|
Python
|
DVSTool/drawevent.py
|
YU-Zhiyang/WEVI
|
0282dc6de58722fc3ed3829a004800b035685b3a
|
[
"MIT"
] | 14
|
2021-08-10T06:58:07.000Z
|
2022-02-25T23:03:10.000Z
|
DVSTool/drawevent.py
|
YU-Zhiyang/WEVI
|
0282dc6de58722fc3ed3829a004800b035685b3a
|
[
"MIT"
] | 4
|
2021-10-30T13:01:52.000Z
|
2022-03-22T04:59:46.000Z
|
DVSTool/drawevent.py
|
YU-Zhiyang/WEVI
|
0282dc6de58722fc3ed3829a004800b035685b3a
|
[
"MIT"
] | null | null | null |
import cv2
import sys
import os
sys.path.append('../')
from lib import fileTool as FT
from lib.pwcNet.pwcNet import PWCDCNet
from lib.forwardWarpTorch.forwardWarp import forwadWarp
import torch
from pathlib import Path
from DVSTool20.DVSBase import ESIMReader
import numpy as np
import pickle
from tqdm import tqdm
from multiprocessing import Pool, RLock, freeze_support
from functools import partial
import multiprocessing
"""
zip related intensity, events, flows and warped intensity
"""
def getallSamples(allFiles, numInter=9):
subSetSize = 19 # [0,10,20,30,41], [10,20,30,40,51], [20,30,40,50,61]
length = len(allFiles)
# allFiles = list(range(length))
sampleList = []
for start1 in range(0, 4, 1):
for start2 in range(start1, length, 4):
try:
lowT = allFiles[start2]
upT = allFiles[start2 + subSetSize]
sampleList.append(allFiles[start2:start2 + subSetSize])
except Exception as e:
break
sampleList.append(allFiles[-subSetSize::])
return sampleList
def zipFrameEvent(EventDir: str, TargetDir: str, numEvFea=20, numInter=9, vis=False):
curProc = multiprocessing.current_process()
targetParent = Path(TargetDir) / Path(EventDir).stem
gpuId = (curProc._identity[0] - 1) % 8
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpuId)
FT.mkPath(targetParent)
allFiles = FT.getAllFiles(EventDir, 'pkl')
allFiles.sort(key=lambda x: int(Path(x).stem), reverse=False)
allSamples = getallSamples(allFiles=allFiles, numInter=3)
flowNet = PWCDCNet().cuda().eval()
warp = forwadWarp()
if vis:
cv2.namedWindow('1', 0)
pbar = tqdm(total=len(allSamples), position=int(curProc._identity[0]))
for sIdx, sample in enumerate(allSamples):
targetPath = Path(targetParent) / '{:07d}.pkl'.format(sIdx)
if targetPath.is_file():
pbar.set_description('{}: GPU:{}'.format(Path(EventDir).stem, gpuId))
pbar.update(1)
continue
ESIM = ESIMReader(fileNames=sample)
I = []
trainImgIdxs = [5, 9, 13]
# get images --------------------------------------------------------------------------------
if vis:
for imgPath in ESIM.pathImgStart:
cv2.imshow('1', imgPath)
cv2.waitKey(100)
for fidx in trainImgIdxs:
imgPath = ESIM.pathImgStart[fidx]
I.append(imgPath)
if vis:
cv2.imshow('1', I[-1])
cv2.waitKey(200)
# get E0 --------------------------------------------------------------------------------------------
fCenter = ESIM.tImgStart[5] # 10
fStart = ESIM.tImgStart[0] # 5
fStop = ESIM.tImgStart[10] # 15
tEvents = np.linspace(start=fStart, stop=fCenter, num=numEvFea // 2 + 1, endpoint=True).tolist() + \
np.linspace(start=fCenter, stop=fStop, num=numEvFea // 2 + 1, endpoint=True).tolist()[1::]
E0 = np.zeros([numEvFea, ESIM.height, ESIM.width]).astype(np.int8)
for eIdx in range(numEvFea):
eStart, eEnd = tEvents[eIdx], tEvents[eIdx + 1]
p = ESIM.aggregEvent(tStart=eStart, tStop=eEnd, P=None)
E0[eIdx, ...] = p
if vis:
eventImg = p.astype(np.float32)
eventImg = ((eventImg - eventImg.min()) / (eventImg.max() - eventImg.min()) * 255.0).astype(np.uint8)
eventImg = cv2.cvtColor(eventImg, cv2.COLOR_GRAY2BGR)
# imgPath = ESIM.pathImgStart[15 + eIdx // 2]
imgPath = ESIM.pathImgStart[5]
img = cv2.cvtColor(imgPath.copy(), cv2.COLOR_GRAY2BGR)
# img = I[1].copy()
img[:, :, 0][E0[eIdx, ...] != 0] = 0
img[:, :, 2][E0[eIdx, ...] > 0] = 255
img[:, :, 1][E0[eIdx, ...] < 0] = 255
cv2.putText(img, '{}_{}'.format(1, eIdx), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(0, 0, 255),
5,
cv2.LINE_AA)
cv2.imshow('1', np.concatenate([img.astype(np.uint8), eventImg], axis=1))
cv2.waitKey(300)
E0 = E0.astype(np.int8)
# get Et -----------------------------------------------------------------------------------
fCenter = ESIM.tImgStart[9] # 20
fStart = ESIM.tImgStart[4] # 15
fStop = ESIM.tImgStart[14] # 25
tEvents = np.linspace(start=fStart, stop=fCenter, num=numEvFea // 2 + 1, endpoint=True).tolist() + \
np.linspace(start=fCenter, stop=fStop, num=numEvFea // 2 + 1, endpoint=True).tolist()[1::]
Et = np.zeros([numEvFea, ESIM.height, ESIM.width]).astype(np.int8)
for eIdx in range(numEvFea):
eStart, eEnd = tEvents[eIdx], tEvents[eIdx + 1]
p = ESIM.aggregEvent(tStart=eStart, tStop=eEnd, P=None)
Et[eIdx, ...] = p
if vis:
eventImg = p.astype(np.float32)
eventImg = ((eventImg - eventImg.min()) / (eventImg.max() - eventImg.min()) * 255.0).astype(np.uint8)
eventImg = cv2.cvtColor(eventImg, cv2.COLOR_GRAY2BGR)
# imgPath = ESIM.pathImgStart[15 + eIdx // 2]
imgPath = ESIM.pathImgStart[9]
img = cv2.cvtColor(imgPath.copy(), cv2.COLOR_GRAY2BGR)
# img = I[1].copy()
img[:, :, 0][Et[eIdx, ...] != 0] = 0
img[:, :, 2][Et[eIdx, ...] > 0] = 255
img[:, :, 1][Et[eIdx, ...] < 0] = 255
cv2.putText(img, '{}_{}'.format(1, eIdx), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(0, 0, 255),
5,
cv2.LINE_AA)
cv2.imshow('1', np.concatenate([img.astype(np.uint8), eventImg], axis=1))
cv2.waitKey(300)
Et = Et.astype(np.int8)
# get E1 ------------------------------------------------------------------------------------------------
fCenter = ESIM.tImgStart[13] # 30
fStart = ESIM.tImgStart[8] # 25
fStop = ESIM.tImgStart[18] # 35
tEvents = np.linspace(start=fStart, stop=fCenter, num=numEvFea // 2 + 1, endpoint=True).tolist() + \
np.linspace(start=fCenter, stop=fStop, num=numEvFea // 2 + 1, endpoint=True).tolist()[1::]
E1 = np.zeros([numEvFea, ESIM.height, ESIM.width]).astype(np.int8)
for eIdx in range(numEvFea):
eStart, eEnd = tEvents[eIdx], tEvents[eIdx + 1]
p = ESIM.aggregEvent(tStart=eStart, tStop=eEnd, P=None)
E1[eIdx, ...] = p
if vis:
eventImg = p.astype(np.float32)
eventImg = ((eventImg - eventImg.min()) / (eventImg.max() - eventImg.min()) * 255.0).astype(np.uint8)
eventImg = cv2.cvtColor(eventImg, cv2.COLOR_GRAY2BGR)
# imgPath = ESIM.pathImgStart[15 + eIdx // 2]
imgPath = ESIM.pathImgStart[13]
img = cv2.cvtColor(imgPath.copy(), cv2.COLOR_GRAY2BGR)
# img = I[1].copy()
img[:, :, 0][E1[eIdx, ...] != 0] = 0
img[:, :, 2][E1[eIdx, ...] > 0] = 255
img[:, :, 1][E1[eIdx, ...] < 0] = 255
cv2.putText(img, '{}_{}'.format(1, eIdx), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(0, 0, 255),
5,
cv2.LINE_AA)
cv2.imshow('1', np.concatenate([img.astype(np.uint8), eventImg], axis=1))
cv2.waitKey(300)
E1 = E1.astype(np.int8)
I0 = I[0].transpose([2, 0, 1])
It = I[1].transpose([2, 0, 1])
I1 = I[2].transpose([2, 0, 1])
with torch.no_grad():
I0Cuda = torch.from_numpy(I0 / 255.0).cuda().unsqueeze(0).float()
ItCuda = torch.from_numpy(It / 255.0).cuda().unsqueeze(0).float()
I1Cuda = torch.from_numpy(I1 / 255.0).cuda().unsqueeze(0).float()
F0t = flowNet(I0Cuda, ItCuda, iters=20, test_mode=True)
F1t = flowNet(I1Cuda, ItCuda, iters=20, test_mode=True)
if vis:
It_ = (warp(I0Cuda, F0t))[0].cpu().numpy().transpose([1, 2, 0])
It__ = (warp(I1Cuda, F1t))[0].cpu().numpy().transpose([1, 2, 0])
cv2.imshow('1', It.transpose([1, 2, 0]))
cv2.waitKey(200)
cv2.imshow('1', It_)
cv2.waitKey(200)
cv2.imshow('1', It__)
cv2.waitKey(200)
if vis:
cv2.imshow('1', I0.transpose([1, 2, 0]))
cv2.waitKey(200)
cv2.imshow('1', It.transpose([1, 2, 0]))
cv2.waitKey(200)
cv2.imshow('1', I1.transpose([1, 2, 0]))
cv2.waitKey(200)
record = {'I0': I0, 'It': It, 'I1': I1,
'Et': Et, 'E0': E0, 'E1': E1,
'F0t': F0t[0, ...].cpu().numpy(), 'F1t': F1t[0, ...].cpu().numpy()
}
with open(targetPath, 'wb') as fs:
pickle.dump(record, fs)
pbar.set_description('{}: GPU:{}'.format(Path(EventDir).stem, gpuId))
pbar.update(1)
pbar.close()
def batchZip(srcDir, dstDir, numEvFea, numInter, vis, poolSize=1):
allSubDirs = FT.getSubDirs(srcDir)
kernelFunc = partial(zipFrameEvent, TargetDir=dstDir, numEvFea=numEvFea, numInter=numInter, vis=vis)
freeze_support()
tqdm.set_lock(RLock())
p = Pool(processes=poolSize, initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),))
p.map(func=kernelFunc, iterable=allSubDirs)
p.close()
p.join()
def mainServer():
# dirs of simulated events
srcDir = '/mnt/lustre/yuzhiyang/dataset/slomoDVS3/event/train'
# dirs of output samples
dstDir = '/mnt/lustre/yuzhiyang/dataset/slomoDVS3/train20'
# channels of event feature
numEvFea = 20
# number of frames to be interpolated
numInter = 9
vis = False
poolSize = 8
batchZip(srcDir=srcDir,
dstDir=dstDir,
numEvFea=numEvFea,
numInter=numInter,
vis=vis,
poolSize=poolSize)
def mainLocal():
# dirs of simulated events
srcDir = '/home/sensetime/data/event/DVS/slomoDVS3/event/train/'
# dirs of output samples
dstDir = '/home/sensetime/data/event/DVS/slomoDVS3/train20'
# channels of event feature
numEvFea = 20
# number of frames to be interpolated
numInter = 9
vis = True
poolSize = 1
batchZip(srcDir=srcDir,
dstDir=dstDir,
numEvFea=numEvFea,
numInter=numInter,
vis=vis,
poolSize=poolSize)
def check():
srcDir = '/home/sensetime/data/event/DVS/slomoDVS3/train'
allSequences = FT.getSubDirs(srcDir)
for sequence in allSequences:
allFiles = FT.getAllFiles(sequence, 'pkl')
allFiles.sort(reverse=False)
cv2.namedWindow('1', 0)
for file in allFiles:
with open(file, 'rb') as fs:
aSample = pickle.load(fs)
print('check I')
cv2.imshow('1', aSample['I0'].transpose([1, 2, 0]))
cv2.waitKey(200)
cv2.imshow('1', aSample['It'].transpose([1, 2, 0]))
cv2.waitKey(200)
cv2.imshow('1', aSample['I1'].transpose([1, 2, 0]))
cv2.waitKey(200)
print('check E0')
E0 = aSample['E0']
eventImg = p.astype(np.float32)
eventImg = ((eventImg - eventImg.min()) / (eventImg.max() - eventImg.min()) * 255.0).astype(
np.uint8)
eventImg = cv2.cvtColor(eventImg, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(aSample['I0'].transpose([1, 2, 0]).copy(), cv2.COLOR_GRAY2BGR)
img[:, :, 0][p != 0] = 0
img[:, :, 2][p > 0] = 255
img[:, :, 1][p < 0] = 255
cv2.putText(img, '{}'.format(eIdx), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(0, 0, 255),
5,
cv2.LINE_AA)
cv2.imshow('1', np.concatenate([img.astype(np.uint8), eventImg], axis=1))
cv2.waitKey(100)
print('check Et')
for eIdx, p in enumerate(aSample['Et']):
eventImg = p.astype(np.float32)
eventImg = ((eventImg - eventImg.min()) / (eventImg.max() - eventImg.min()) * 255.0).astype(
np.uint8)
eventImg = cv2.cvtColor(eventImg, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(aSample['It'].transpose([1, 2, 0]).copy(), cv2.COLOR_GRAY2BGR)
img[:, :, 0][p != 0] = 0
img[:, :, 2][p > 0] = 255
img[:, :, 1][p < 0] = 255
cv2.putText(img, '{}'.format(eIdx), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(0, 0, 255),
5,
cv2.LINE_AA)
cv2.imshow('1', np.concatenate([img.astype(np.uint8), eventImg], axis=1))
cv2.waitKey(100)
print('check E1')
for eIdx, p in enumerate(aSample['E1']):
eventImg = p.astype(np.float32)
eventImg = ((eventImg - eventImg.min()) / (eventImg.max() - eventImg.min()) * 255.0).astype(
np.uint8)
eventImg = cv2.cvtColor(eventImg, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(aSample['I1'].transpose([1, 2, 0]).copy(), cv2.COLOR_GRAY2BGR)
img[:, :, 0][p != 0] = 0
img[:, :, 2][p > 0] = 255
img[:, :, 1][p < 0] = 255
cv2.putText(img, '{}'.format(eIdx), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(0, 0, 255),
5,
cv2.LINE_AA)
cv2.imshow('1', np.concatenate([img.astype(np.uint8), eventImg], axis=1))
cv2.waitKey(100)
if __name__ == '__main__':
# srun -p Pixel --nodelist=SH-IDC1-10-5-31-31 --cpus-per-task=22 --gres=gpu:8 python mainGetDVSTrain_02.py
# mainServer()
# mainLocal()
check()
pass
| 35.682382
| 117
| 0.507441
|
acffde5e4bbab6a2d2240ec0d221d67211e8d444
| 744
|
py
|
Python
|
Python/Programming Fundamentals/Exams/61. The Isle Of Man Race.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Fundamentals/Exams/61. The Isle Of Man Race.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Fundamentals/Exams/61. The Isle Of Man Race.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
import re
pattern = r'(#|\$|%|\*|&)([a-zA-Z]+)\1=(\d+)!!(.+)'
is_found = False
while not is_found:
message = input()
matches = re.fullmatch(pattern, message)
if not matches:
print('Nothing found!')
continue
full_message = matches[0]
name_racer = matches[2]
length_of_geohashcode = int(matches[3])
encrypted_geohashcode = matches[4]
length = len(encrypted_geohashcode)
if length_of_geohashcode != length:
print('Nothing found!')
continue
geohashcode = ''
for ch in encrypted_geohashcode:
ch = chr(ord(ch) + length)
geohashcode += ch
is_found = True
print(f"Coordinates found! {name_racer} -> {geohashcode}")
| 24
| 63
| 0.58871
|
acffdefcc1836d8629769dadcf9e87c63d6fd70d
| 1,478
|
py
|
Python
|
bitshares/aio/instance.py
|
silverchen0402/python-bitshares
|
aafbcf5cd09e7bca99dd156fd60b9df8ba508630
|
[
"MIT"
] | 102
|
2018-04-08T23:05:00.000Z
|
2022-03-31T10:10:03.000Z
|
bitshares/aio/instance.py
|
silverchen0402/python-bitshares
|
aafbcf5cd09e7bca99dd156fd60b9df8ba508630
|
[
"MIT"
] | 246
|
2018-04-03T12:35:49.000Z
|
2022-02-28T10:44:28.000Z
|
bitshares/aio/instance.py
|
silverchen0402/python-bitshares
|
aafbcf5cd09e7bca99dd156fd60b9df8ba508630
|
[
"MIT"
] | 128
|
2018-04-14T01:39:12.000Z
|
2022-03-25T08:56:51.000Z
|
# -*- coding: utf-8 -*-
from graphenecommon.aio.instance import AbstractBlockchainInstanceProvider
class SharedInstance:
"""This class merely offers a singelton for the Blockchain Instance."""
instance = None
config = {}
class BlockchainInstance(AbstractBlockchainInstanceProvider):
"""This is a class that allows compatibility with previous naming conventions."""
_sharedInstance = SharedInstance
def __init__(self, *args, **kwargs):
# Also allow 'bitshares_instance'
if kwargs.get("bitshares_instance"):
kwargs["blockchain_instance"] = kwargs["bitshares_instance"]
AbstractBlockchainInstanceProvider.__init__(self, *args, **kwargs)
def get_instance_class(self):
"""Should return the Chain instance class, e.g. `bitshares.BitShares`"""
import bitshares.aio as bts
return bts.BitShares
@property
def bitshares(self):
"""Alias for the specific blockchain."""
return self.blockchain
def shared_blockchain_instance():
return BlockchainInstance().shared_blockchain_instance()
def set_shared_blockchain_instance(instance):
instance.clear_cache()
# instance.set_shared_instance()
BlockchainInstance.set_shared_blockchain_instance(instance)
def set_shared_config(config):
BlockchainInstance.set_shared_config(config)
shared_bitshares_instance = shared_blockchain_instance
set_shared_bitshares_instance = set_shared_blockchain_instance
| 28.980392
| 85
| 0.742896
|
acffdfcb271ed94f995cf9abb83ff15cc345f941
| 29,412
|
py
|
Python
|
noiseplanet/utils/hexgrid.py
|
jks-liu/noiseplanet
|
907b2bb9a93df48a266ad01c7cad0d8ef2367e78
|
[
"Apache-2.0"
] | 27
|
2020-06-13T21:00:53.000Z
|
2022-03-29T03:22:06.000Z
|
noiseplanet/utils/hexgrid.py
|
voodooed/noiseplanet
|
fca79ef3b81826a5286d566ebf3bf9340df13201
|
[
"Apache-2.0"
] | 12
|
2020-12-24T08:14:00.000Z
|
2022-02-06T18:01:51.000Z
|
noiseplanet/utils/hexgrid.py
|
voodooed/noiseplanet
|
fca79ef3b81826a5286d566ebf3bf9340df13201
|
[
"Apache-2.0"
] | 9
|
2020-08-28T16:06:54.000Z
|
2022-01-26T01:05:02.000Z
|
# -*- coding: utf-8 -*-
# Created on Tue Dec 18 16:49:20 2019
# @author: arthurd
"""
HexGrid Module.
Generate Hexagonal grid. Cartesian-Hexagonal coordinates interaction.
"""
import numpy as np
import math
import matplotlib.pyplot as plt
from pyproj import Proj, Transformer
def hexbin_grid(bbox, side_length=1, proj_init=None, proj_out=None):
"""
Create a grid of hexagons.
See http://www.calculatorsoup.com/calculators/geometry-plane/polygon.php
Parameters
----------
bbox : Tuple
Box of the area to generate the hexagons.
Format : Lower X, Lower Y, Upper X, Upper Y.
side_length : float, optional
Side length of the hexagons. The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example
-------
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
polygons : List
List of hexagons. An hexagons is a list of coordinates (tuple, Lat, Lon).
"""
startx = bbox[0]
starty = bbox[1]
endx = bbox[2]
endy = bbox[3]
proj = proj_init != proj_out
if proj:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
startx, starty = transformer.transform(startx, starty)
endx, endy = transformer.transform(endx, endy)
# calculate coordinates of the hexagon points
p = side_length * 0.5
b = side_length * math.cos(math.radians(30))
w = b * 2
h = 2 * side_length
# offset start and end coordinates by hex widths and heights to guarantee coverage
startx = startx - w
starty = starty - h/2
endx = endx
endy = endy
origx = startx
# offsets for moving along and up rows
xoffset = b
yoffset = 3 * p
P1 = np.empty((0, 2))
P2 = np.empty((0, 2))
P3 = np.empty((0, 2))
P4 = np.empty((0, 2))
P5 = np.empty((0, 2))
P6 = np.empty((0, 2))
row = 0
while starty < endy:
if row % 2 == 0:
startx = origx + xoffset
else:
startx = origx + w
while startx <= endx:
p1 = [startx, starty + p]
p2 = [startx, starty + (3 * p)]
p3 = [startx + b, starty + h]
p4 = [startx + w, starty + (3 * p)]
p5 = [startx + w, starty + p]
p6 = [startx + b, starty]
P1 = np.vstack((P1, p1))
P2 = np.vstack((P2, p2))
P3 = np.vstack((P3, p3))
P4 = np.vstack((P4, p4))
P5 = np.vstack((P5, p5))
P6 = np.vstack((P6, p6))
startx += w
starty += yoffset
row += 1
if proj:
transformer = Transformer.from_proj(Proj(init=proj_out), Proj(init=proj_init))
lon1, lat1 = transformer.transform(P1[:,0], P1[:,1])
P1 = np.column_stack((lon1, lat1))
lon2, lat2 = transformer.transform(P2[:,0], P2[:,1])
P2 = np.column_stack((lon2, lat2))
lon3, lat3 = transformer.transform(P3[:,0], P3[:,1])
P3 = np.column_stack((lon3, lat3))
lon4, lat4 = transformer.transform(P4[:,0], P4[:,1])
P4 = np.column_stack((lon4, lat4))
lon5, lat5 = transformer.transform(P5[:,0], P5[:,1])
P5 = np.column_stack((lon5, lat5))
lon6, lat6 = transformer.transform(P6[:,0], P6[:,1])
P6 = np.column_stack((lon6, lat6))
polygons = []
for i in range(len(P1)):
hexagon = [(P1[i][0], P1[i][1]),
(P2[i][0], P2[i][1]),
(P3[i][0], P3[i][1]),
(P4[i][0], P4[i][1]),
(P5[i][0], P5[i][1]),
(P6[i][0], P6[i][1])]
polygons.append(hexagon)
return polygons
# =============================================================================
# DEPRECATED
# =============================================================================
# def get_size_hexgrid(bbox, side_length):
# startx, starty, endx, endy = bbox[0], bbox[1], bbox[2], bbox[3]
# # width & height of the bbox
# w = abs(endx - startx)
# h = abs(endy - starty)
# # parameters of the hexagon
# R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
# r = R * np.cos(np.deg2rad(30))
# # number of hexagons (vertivcal & horizontal)
# Nw = int((w + r)//(2*r)) + 1
# Nh = int((h + R)//(R + side_length/2)) + 1
#
# shorter_lines = 0 if (w > 2*(Nw - 1)*r) else 1
#
# return Nw, Nh, shorter_lines
# =============================================================================
def cartesian_to_hex(point, origin=(0, 0), side_length=1,
proj_init=None, proj_out=None):
"""
Convert cartesian coordinates to hexagonal coordinates system.
Parameters
----------
point : Tuple
Point in cartesian coordinates to convert.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hex_coord : Tuple
Point's coordinates in hexagonal coordinates system.
"""
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
point = transformer.transform(point[0], point[1])
origin = transformer.transform(origin[0], origin[1])
mat = np.array([[np.sqrt(3)/3, -1/3],
[0 , 2/3 ]])
point = np.array(point)
hex_coord = np.dot(mat, point - origin)/side_length
return hex_coord
def hex_to_cartesian(hexa, origin=(0, 0), side_length=1,
proj_init=None, proj_out=None):
"""
Convert hexagonal coordinates to cartesian.
Parameters
----------
hexa : Tuple
Hexagonal coordinates.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
point : Tuple
Hexagon's coordinates in cartesian.
"""
mat = np.array([[np.sqrt(3), np.sqrt(3)/2],
[0 , 3/2 ]])
hex_coord = np.array(hexa)
cart_coord = side_length * np.dot(mat, hex_coord)
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
cart_coord = transformer.transform(cart_coord[0], cart_coord[1])
origin = transformer.transform(origin[0], origin[1])
return cart_coord + origin
def hexs_to_cartesians(Q, R, origin=(0, 0), side_length=1,
proj_init=None, proj_out=None):
"""
Convert a list of hexagonal coordinates to cartesian.
Parameters
----------
Q : numpy 1D array
Columns indexes of hexagons coordinates.
R : numpy 1D array
Rows indexes of hexagons coordinates.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
points : numpy 2D array
Hexagons points in cartesian coordinates.
"""
assert len(Q) == len(R), 'The dimension of Q and R indexes should be the same'
mat = np.array([[np.sqrt(3), np.sqrt(3)/2],
[0 , 3/2 ]])
hex_coord = np.vstack((Q, R))
cart_coord = side_length * np.dot(mat, hex_coord)
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
X, Y = transformer.transform(cart_coord[0], cart_coord[1])
cart_coord = np.vstack((X, Y))
origin = transformer.transform(origin[0], origin[1])
origin = np.vstack(([origin[0]] * len(Q), [origin[1]] * len(R)))
return cart_coord + origin
def cartesians_to_hexs(X, Y, origin=(0, 0), side_length=1,
proj_init=None, proj_out=None):
"""
Convert a list of cartesian points to hexagonal coordinates.
Parameters
----------
X : numpy 1D array
X indexes of cartesian points.
Y : numpy 1D array
Y indexes of cartesian points.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagons : numpy 2D array
Cartesian points in hexagonal coordinates.
"""
# Test if the length of X and Y are equals
assert len(X) == len(Y), 'The dimension of X and Y should be the same'
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
X, Y = transformer.transform(X, Y)
origin = transformer.transform(origin[0], origin[1])
mat = np.array([[np.sqrt(3)/3, -1/3],
[0 , 2/3 ]])
coord = np.vstack((X, Y))
origin = np.vstack(([origin[0]] * len(X), [origin[1]] * len(Y)))
return np.dot(mat, coord - origin)/side_length
def nearest_hexagon(point, origin=(0, 0), side_length=1, proj_init=None, proj_out=None):
"""
Get the nearest hexagon center from a cartesian point.
Parameters
----------
point : Tuple
Cartesian point.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagon_center : Tuple
Hexagonal coordinates of the nearest hexagon from point.
"""
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
point = transformer.transform(point[0], point[1])
origin = transformer.transform(origin[0], origin[1])
# Hexagonal coordinates
hex_coord = cartesian_to_hex(point, origin=origin, side_length=side_length)
# Cube coordinates
x = hex_coord[0]
z = hex_coord[1]
y = - x - z
# Rounding cube coordinates
rx = np.round(x)
ry = np.round(y)
rz = np.round(z)
x_diff = abs(rx - x)
y_diff = abs(ry - y)
z_diff = abs(rz - z)
if x_diff > y_diff and x_diff > z_diff:
rx = -ry - rz
elif y_diff > z_diff:
ry = -rx - rz
else:
rz = -rx - ry
# Hexagonal coordinates
q = rx
r = rz
return q, r
def nearest_hexagons(X, Y, origin=(0, 0), side_length=1, proj_init=None, proj_out=None):
"""
Get the nearest hexagons centers from a list of cartesian points.
Parameters
----------
X : numpy 1D array
X indexes of cartesian points.
Y : numpy 1D array
Y indexes of cartesian points.
origin : Tuple, optional
Origin of the hexagonal coordinates system.
The default is (0, 0).
side_length : Float, optional
Side length of the hexagons.
The default is 1.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagons_center : numpy 2D array
Cartesian points in hexagonal coordinates.
"""
if proj_init != proj_out:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
X, Y = transformer.transform(X, Y)
origin = transformer.transform(origin[0], origin[1])
hex_coord = cartesians_to_hexs(X, Y, origin=origin, side_length=side_length)
# Cube coordinates
X = hex_coord[0]
Z = hex_coord[1]
Y = - X - Z
# Rounding cube coordinates
rX = np.round(X)
rY = np.round(Y)
rZ = np.round(Z)
X_diff = abs(rX - X)
Y_diff = abs(rY - Y)
Z_diff = abs(rZ - Z)
for i in range(len(X)):
if X_diff[i] > Y_diff[i] and X_diff[i] > Z_diff[i]:
rX[i] = -rY[i] - rZ[i]
elif Y_diff[i] > Z_diff[i]:
rY[i] = -rX[i] - rZ[i]
else:
rZ[i] = -rX[i] - rY[i]
# Hexagonal coordinates
Q = rX
R = rZ
# # Cartesian coordinates
# X, Y = hexs_to_cartesians(Q, R, origin=origin, side_length=side_length)
# if proj:
# transformer = Transformer.from_proj(Proj(init=proj_out), Proj(init=proj_init))
# X, Y = transformer.transform(X, Y)
return Q, R
def hexagon_coordinates(center, side_length=1, r=0.8660254037844389, R=1.0000000000000002,
proj_init=None, proj_out=None):
"""
Get the hexagon's coordinates points from its center.
Parameters
----------
center : Tuple
Center of the hexagon, in cartesian coordinates.
side_length : Float, optional
Side length of the hexagon.
The default is 1.
r : Float, optional
Intern radius. The default is 0.8660254037844389.
R : Float, optional
Extern radius. The default is 1.0000000000000002.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagon : List
List of points belonging to the hexagon.
"""
if side_length != 1 and r == 0.8660254037844389 and R == 1.0000000000000002:
R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
r = R * np.cos(np.deg2rad(30))
elif side_length == 1 and r != 0.8660254037844389 and R == 1.0000000000000002:
side_length = 2*r *np.tan(np.deg2rad(30))
R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
elif side_length == 1 and r == 0.8660254037844389 and R != 1.0000000000000002:
r = R * np.cos(np.deg2rad(30))
side_length = 2*r *np.tan(np.deg2rad(30))
proj = proj_init != proj_out
if proj:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
center = transformer.transform(center[0], center[1])
point1 = [center[0], center[1] - R]
point2 = [center[0] + r, center[1] - side_length/2]
point3 = [center[0] + r, center[1] + side_length/2]
point4 = [center[0], center[1] + R]
point5 = [center[0] - r, center[1] + side_length/2]
point6 = [center[0] - r, center[1] - side_length/2]
if proj:
transformer = Transformer.from_proj(Proj(init=proj_out), Proj(init=proj_init))
point1 = transformer.transform(point1[0], point1[1])
point2 = transformer.transform(point2[0], point2[1])
point3 = transformer.transform(point3[0], point3[1])
point4 = transformer.transform(point4[0], point4[1])
point5 = transformer.transform(point5[0], point5[1])
point6 = transformer.transform(point6[0], point6[1])
return [point1, point2, point3, point4, point5, point6, point1]
def hexagons_coordinates(X, Y, side_length=1, r=0.8660254037844389, R=1.0000000000000002,
proj_init=None, proj_out=None):
"""
Get the hexagons' coordinates points from a list of center.
Parameters
----------
X : numpy 1D array
X indexes of cartesian center.
Y : numpy 1D array
Y indexes of cartesian center.
side_length : Float, optional
Side length of the hexagon.
The default is 1.
r : Float, optional
Intern radius. The default is 0.8660254037844389.
R : Float, optional
Extern radius. The default is 1.0000000000000002.
proj_init : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_init refers to the starting coordinates system.
The default is None.
proj_out : String, optional
If working with coordinates and the hexagons need to be calculated in another
coordinates system, proj_out refers to the ending coordinates system.
The default is None.
Example :
If the bbox is in geographic coordinates, but the hexgrid should be computed
on the web mercator system.
Then,
>>> proj_init="epsg:4326"
>>> proj_out="epsg:3857"
Returns
-------
hexagons : List
List of hexagons, composed by their coordinates.
"""
if side_length != 1 and r == 0.8660254037844389 and R == 1.0000000000000002:
R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
r = R * np.cos(np.deg2rad(30))
elif side_length == 1 and r != 0.8660254037844389 and R == 1.0000000000000002:
side_length = 2*r *np.tan(np.deg2rad(30))
R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
elif side_length == 1 and r == 0.8660254037844389 and R != 1.0000000000000002:
r = R * np.cos(np.deg2rad(30))
side_length = 2*r *np.tan(np.deg2rad(30))
proj = proj_init != proj_out
if proj:
transformer = Transformer.from_proj(Proj(init=proj_init), Proj(init=proj_out))
X, Y = transformer.transform(X, Y)
P1 = np.column_stack((X , Y - R))
P2 = np.column_stack((X + r , Y - side_length/2))
P3 = np.column_stack((X + r , Y + side_length/2))
P4 = np.column_stack((X , Y + R))
P5 = np.column_stack((X - r , Y + side_length/2))
P6 = np.column_stack((X - r , Y - side_length/2))
if proj:
transformer = Transformer.from_proj(Proj(init=proj_out), Proj(init=proj_init))
lon1, lat1 = transformer.transform(P1[:,0], P1[:,1])
P1 = np.column_stack((lon1, lat1))
lon2, lat2 = transformer.transform(P2[:,0], P2[:,1])
P2 = np.column_stack((lon2, lat2))
lon3, lat3 = transformer.transform(P3[:,0], P3[:,1])
P3 = np.column_stack((lon3, lat3))
lon4, lat4 = transformer.transform(P4[:,0], P4[:,1])
P4 = np.column_stack((lon4, lat4))
lon5, lat5 = transformer.transform(P5[:,0], P5[:,1])
P5 = np.column_stack((lon5, lat5))
lon6, lat6 = transformer.transform(P6[:,0], P6[:,1])
P6 = np.column_stack((lon6, lat6))
hexagons = []
for i in range(len(P1)):
hexagon = [(P1[i][0], P1[i][1]),
(P2[i][0], P2[i][1]),
(P3[i][0], P3[i][1]),
(P4[i][0], P4[i][1]),
(P5[i][0], P5[i][1]),
(P6[i][0], P6[i][1]),
(P1[i][0], P1[i][1])]
hexagons.append(hexagon)
return hexagons
if __name__ == "__main__":
print("\n\t-----------------------\n",
"\t Hexbin \n\n")
# Style of the figures
plt.style.use('seaborn-darkgrid')
plt.figure()
# =============================================================================
# 1/ Create an hexagonal grid
# =============================================================================
print("1/ Create an hexagonal grid")
print("\t1.1/ Select the bounding box or two extremes values \n\t(lower_let, upper_right\n\tand select the hexagonal radius")
point1 = (2, 2)
point2 = (7, 10)
print("\t1.2/ Create the grid, in the web mercator projection by default_")
print("\t1.3/ Parameters of the grid")
# parameters of the hexagons
w = abs(point1[0] - point2[0])
h = abs(point1[1] - point2[1])
r = 0.8660254037844389
side_length = 1
R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
print("\tParameters : r = {0}, R = {1}, a = {2}".format(r, R, side_length))
bbox = (point1[0], point1[1], point2[0], point2[1])
hexagons = hexbin_grid(bbox, side_length=side_length, proj_init="epsg:4326", proj_out="epsg:4326")
for i, hexagon in enumerate(hexagons):
hexagon.append(hexagon[0])
X = [point[0] for point in hexagon]
Y = [point[1] for point in hexagon]
plt.plot(X, Y, color='skyblue', label='hexagon' if i == 0 else '')
print("\t1.3/ Plot the grid")
plt.scatter(point1[0], point1[1], color='skyblue', label='start')
plt.scatter(point2[0], point2[1], color='skyblue', label='end')
# frame of the plot
plt.xticks([point1[0] - r*30 + k*r for k in range(int(30*w))])
plt.yticks([point1[1] - (R + side_length/2) + k*(R + side_length/2) for k in range(int(2*h))])
plt.axis("equal")
# =============================================================================
# 2/ Get the grid indexes from the boundinc box and radius
# =============================================================================
print("2/ Get the nearest hexagon")
print("\t2.1/ Get the index of the hexagon")
import random
point = (random.random()*(point2[0] - point1[0]) + point1[0],
random.random()*(point2[1] - point1[1]) + point1[1])
plt.scatter(point[0], point[1], color='darkcyan', label='random point')
origin = point1
nearest_hex = nearest_hexagon(point, origin=origin, side_length=side_length)
center = hex_to_cartesian(nearest_hex, origin=origin, side_length=side_length)
plt.scatter(center[0], center[1], color='cyan', label='hex center')
plt.legend(loc=1, frameon=True)
print("\t2.2/ Get nearest hexagons from a list of points")
X = np.random.randint(-5, 10, 50)
Y = np.random.randint(-5, 15, 50)
origin = (-5, -5)
side_length = 1.5
R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
r = R * np.cos(np.deg2rad(30))
Q, R = nearest_hexagons(X, Y, origin=origin, side_length=side_length)
Xcenter, Ycenter = hexs_to_cartesians(Q, R, origin=origin, side_length=side_length)
hexagons = hexagons_coordinates(Xcenter, Ycenter, side_length=side_length)
plt.figure()
plt.scatter(X, Y, zorder=3, color='darkcyan', label='random point')
plt.scatter(Xcenter, Ycenter, color='cyan', label='hex center')
for i, hexagon in enumerate(hexagons):
hexagon.append(hexagon[0])
X = [point[0] for point in hexagon]
Y = [point[1] for point in hexagon]
plt.plot(X, Y, color='skyblue', label='hexagon' if i == 0 else '')
plt.legend(loc=1, frameon=True)
plt.xticks(np.unique(Xcenter))
plt.yticks(np.unique(Ycenter))
plt.axis("equal")
# =============================================================================
# 3/ Geographic coordinates
# =============================================================================
print("3/ Geographic coordinates")
print("\t3.1/ Get nearest hexagons in web mercator system from a list of geographic points")
proj_init="epsg:4326"
proj_out="epsg:3857"
lon = np.random.rand(40)/2 + 3
lat = np.random.rand(40) + 45
# origin = (-179.99999999, -89.99999999)
origin = (0, 0)
side_length = 15000
R = 0.5 * side_length * 1/(np.sin(np.deg2rad(30)))
r = R * np.cos(np.deg2rad(30))
Q, R = nearest_hexagons(lon, lat, side_length=side_length, origin=origin,
proj_init=proj_init, proj_out=proj_out)
Xcenter, Ycenter = hexs_to_cartesians(Q, R, side_length=side_length, origin=origin,
proj_init=proj_out, proj_out=proj_init)
hexagons = hexagons_coordinates(Xcenter, Ycenter, side_length=side_length,
proj_init=proj_init, proj_out=proj_out)
plt.figure()
plt.scatter(lon, lat, zorder=3, color='darkcyan', label='random point')
plt.scatter(Xcenter, Ycenter, color='cyan', label='hex center')
for i, hexagon in enumerate(hexagons):
hexagon.append(hexagon[0])
X = [point[0] for point in hexagon]
Y = [point[1] for point in hexagon]
plt.plot(X, Y, color='skyblue', label='hexagon' if i == 0 else '')
plt.legend(loc=1, frameon=True)
plt.xticks(Xcenter)
plt.yticks(Ycenter)
plt.axis("equal")
| 34.724911
| 129
| 0.578063
|
acffe1b0efd181f4154863da9b44bdd1dd2f823a
| 7,841
|
py
|
Python
|
profile_depths/time_structure_pred.py
|
psturmfels/tape
|
0b72f1c24177991b8b031158364176500d4184a8
|
[
"BSD-3-Clause"
] | null | null | null |
profile_depths/time_structure_pred.py
|
psturmfels/tape
|
0b72f1c24177991b8b031158364176500d4184a8
|
[
"BSD-3-Clause"
] | null | null | null |
profile_depths/time_structure_pred.py
|
psturmfels/tape
|
0b72f1c24177991b8b031158364176500d4184a8
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import argparse
import os
import time
import sys
import numpy as np
import pandas as pd
from netsurfp2 import parse_fasta, preprocess, model, convert_npz
sys.path[0] = '/export/home/tape'
from tape import utils
from tape.registry import registry
from tape.training import ForwardRunner, run_eval_epoch
def create_parser():
parser = argparse.ArgumentParser(description='Time models on secondary structure prediction')
parser.add_argument('model_type',
choices=['netsurf', 'transformer'],
default='netsurf',
help='Model type to run')
parser.add_argument('model_path',
type=str,
help='Where the model is located')
parser.add_argument('out_file',
type=str,
help='Output file')
parser.add_argument('split',
choices=['casp12', 'ts115', 'cb513'],
help='split to run on')
parser.add_argument('--hhdb', choices=['scop70', 'uniclust30'],
help='HHBlits Database',
default='scop70')
parser.add_argument('--n_threads', default=16, type=int, help='number of jobs')
parser.add_argument('--batch_size', help='Batch size', type=int, default=50)
parser.add_argument('--no_cuda', help='Turn off gpus', action='store_true')
return parser
def accuracy(labels, predictions):
valid_mask = np.sum(labels, axis=-1).astype(int)
sequence_lengths = np.sum(valid_mask, axis=-1)
index_predictions = np.argmax(predictions, axis=-1)
index_labels = np.argmax(labels, axis=-1)
match = (index_predictions == index_labels).astype(int)
index_sum = np.sum(valid_mask * match, axis=-1)
accuracy = index_sum / sequence_lengths.astype(float)
return accuracy
def run_netsurf(model_path, split, hhdb, n_threads, batch_size):
fasta_file = f'/export/home/tape/profile_depths/{split}.faa'
label_file = f'/export/home/tape/profile_depths/netsurfp2/{split.upper()}_HHblits.npz'
tmp_out = f'/export/home/tape/profile_depths/{split}_{hhdb}/'
print(f'Running netsurf on {fasta_file}, {hhdb} with {n_threads} threads and batch size of {batch_size}')
with open(fasta_file) as fasta_handle:
protlist = parse_fasta(fasta_handle)
num_sequences = len(protlist)
if hhdb == 'scop70':
searcher = preprocess.HHblits('/export/home/tape/profile_depths/scop70/scop70_1.75', n_threads=n_threads)
elif hhdb == 'uniclust30':
searcher = preprocess.HHblits('/export/home/tape/profile_depths/uniclust30_2018_08/uniclust30_2018_08', n_threads=n_threads)
computation_start = time.time()
print('Building multiple sequence alignments...')
search_start = time.time()
profiles = searcher(protlist, tmp_out)
search_elapsed = time.time() - search_start
search_per_sequence = search_elapsed / num_sequences
print('Running main netsurf model...')
pred_start = time.time()
nsp_model = model.TfGraphModel.load_graph(model_path)
results = nsp_model.predict(profiles, tmp_out, batch_size=batch_size)
pred_elapsed = time.time() - pred_start
pred_per_sequence = pred_elapsed / num_sequences
time_elapsed = time.time() - computation_start
time_per_sequence = time_elapsed / num_sequences
label_data = np.load(label_file)
q8_labels = label_data['data'][:, :, 57:65]
q3_labels = np.stack([np.sum(q8_labels[:, :, 0:3], axis=-1),
np.sum(q8_labels[:, :, 3:5], axis=-1),
np.sum(q8_labels[:, :, 5:8], axis=-1)],
axis=-1)
ids = list(label_data['pdbids'])
sorted_index = sorted(range(len(ids)), key = lambda k: ids[k])
labels = np.stack([q3_labels[i] for i in sorted_index], axis=0)
predictions = convert_npz(results)
accuracy_per_sequence = accuracy(labels, predictions['q3'])
acc = np.mean(accuracy_per_sequence)
print(f'Mean accuracy: {acc:.4f}')
print(f'Search time per sequence: {search_per_sequence:.2f}')
print(f'Prediction time per sequence: {pred_per_sequence:.2f}')
print(f'Total time per sequence: {time_per_sequence:.2f}')
return acc, search_per_sequence, pred_per_sequence, time_per_sequence
def run_transformer(model_path, split, batch_size, no_cuda):
local_rank = -1
device, n_gpu, is_master = utils.setup_distributed(local_rank, no_cuda)
model = registry.get_task_model('transformer', 'secondary_structure', None, model_path)
model = model.to(device)
runner = ForwardRunner(model, device, n_gpu)
runner.initialize_distributed_model()
valid_dataset = utils.setup_dataset('secondary_structure', '/export/home/tape/data/', split, 'iupac')
num_sequences = len(valid_dataset)
valid_loader = utils.setup_loader(valid_dataset, batch_size, local_rank, n_gpu, 1, 1)
acc_fn = registry.get_metric('accuracy')
computation_start = time.time()
save_outputs = run_eval_epoch(valid_loader, runner, is_master)
time_elapsed = time.time() - computation_start
time_per_sequence = time_elapsed / num_sequences
acc = acc_fn(save_outputs)
print(f'Mean accuracy: {acc:.4f}')
print(f'Search time per sequence: 0.00')
print(f'Prediction time per sequence: {time_per_sequence:.2f}')
print(f'Total time per sequence: {time_per_sequence:.2f}')
return acc, 0.0, time_per_sequence, time_per_sequence
def save_to_csv(csv_file,
model_type,
accuracy,
search_per_sequence,
pred_per_sequence,
time_per_sequence,
split,
hhdb,
used_gpu):
if os.path.exists(csv_file):
df = pd.read_csv(csv_file)
df = df.append({'model_type': model_type,
'accuracy': accuracy,
'search_per_sequence': search_per_sequence,
'pred_per_sequence': pred_per_sequence,
'time_per_sequence': time_per_sequence,
'split': split,
'hhdb': hhdb,
'used_gpu': used_gpu},
ignore_index=True)
else:
df = pd.DataFrame({'model_type': [model_type],
'accuracy': [accuracy],
'search_per_sequence': [search_per_sequence],
'pred_per_sequence': [pred_per_sequence],
'time_per_sequence': [time_per_sequence],
'split': [split],
'hhdb': [hhdb],
'used_gpu': [used_gpu]})
df.to_csv(csv_file, index=False)
def main(args=None):
if args is None:
parser = create_parser()
args = parser.parse_args()
if args.model_type == 'netsurf':
accuracy, search_per_sequence, pred_per_sequence, time_per_sequence = \
run_netsurf(args.model_path,
args.split,
args.hhdb,
args.n_threads,
args.batch_size)
else:
accuracy, search_per_sequence, pred_per_sequence, time_per_sequence = \
run_transformer(args.model_path,
args.split,
args.batch_size,
args.no_cuda)
print(f'Saving results to {args.out_file}...')
save_to_csv(args.out_file,
args.model_type,
accuracy,
search_per_sequence,
pred_per_sequence,
time_per_sequence,
args.split,
args.hhdb,
not args.no_cuda)
if __name__ == '__main__':
main()
| 39.60101
| 132
| 0.614973
|
acffe250b317bb18182722b7fba3bdbfc7037d8a
| 2,815
|
py
|
Python
|
2020/day11.py
|
mbmackenzie/advent-of-code
|
66ef2723b67c84b9c03e05281106fb2811da17a1
|
[
"MIT"
] | null | null | null |
2020/day11.py
|
mbmackenzie/advent-of-code
|
66ef2723b67c84b9c03e05281106fb2811da17a1
|
[
"MIT"
] | null | null | null |
2020/day11.py
|
mbmackenzie/advent-of-code
|
66ef2723b67c84b9c03e05281106fb2811da17a1
|
[
"MIT"
] | null | null | null |
"""Day 11"""
from typing import Any
from aoc import Solution
Grid = list[str]
def get_num_occupied(x: int, y: int, grid: Grid, acceptable: str) -> int:
"""Get the number of empty and occupied seats amongh neighbors"""
directions = [
(1, 0),
(0, 1),
(-1, 0),
(0, -1),
(1, 1),
(-1, 1),
(1, -1),
(-1, -1),
]
def update_pos(x: int, y: int, direction: tuple[int, int]) -> tuple[int, int]:
"""Update the position"""
return x + direction[0], y + direction[1]
def in_grid(x: int, y: int) -> bool:
"""Check if the coordinates are in the grid"""
return 0 <= x < len(grid) and 0 <= y < len(grid[0])
neighbors = ""
for direction in directions:
new_x, new_y = update_pos(x, y, direction)
while True:
if not in_grid(new_x, new_y):
break
if grid[new_x][new_y] in acceptable:
neighbors += grid[new_x][new_y]
break
new_x, new_y = update_pos(new_x, new_y, direction)
return neighbors.count("#")
def update_grid(
old_grid: Grid, acceptable: str = ".L#", min_occupied_to_switch: int = 4
) -> Grid:
"""Update the grid"""
new_grid = [""] * len(old_grid)
for i, row in enumerate(old_grid):
for j, seat in enumerate(row):
occupied = get_num_occupied(i, j, old_grid, acceptable)
if seat == "L" and occupied == 0:
new_grid[i] += "#"
elif seat == "#" and occupied >= min_occupied_to_switch:
new_grid[i] += "L"
else:
new_grid[i] += seat
return new_grid
def update_until_stable(initial_grid: Grid, **kwargs: Any) -> Grid:
prev_grid = None
current_grid = initial_grid
while current_grid != prev_grid:
prev_grid = current_grid
current_grid = update_grid(current_grid, **kwargs)
return current_grid
class Day11(Solution):
"""Solution to day 11 of the 2020 Advent of Code"""
def __init__(self) -> None:
super().__init__(2020, 11, "")
def _part_one(self) -> int:
"""How many occupied seats when the arrangment is stable?"""
stable_grid = update_until_stable(self.data)
return sum(seat == "#" for row in stable_grid for seat in row)
def _part_two(self) -> int:
"""How many occupied seats when the arrangment is stable?
- people ignore empty space
- people need 5 or more occupied seats to leave
"""
stable_grid = update_until_stable(
self.data, acceptable="L#", min_occupied_to_switch=5
)
return sum(seat == "#" for row in stable_grid for seat in row)
def _get_data(self) -> Grid:
return self.input.as_list()
| 27.067308
| 82
| 0.566607
|
acffe2b9900889813fed218edea271abc1c1e911
| 5,423
|
py
|
Python
|
lulu/extractors/tumblr.py
|
fakegit/Lulu
|
940c9344df8aab6044ec603f8e78d145b0f9bad7
|
[
"MIT"
] | 922
|
2018-01-17T09:22:26.000Z
|
2022-03-28T04:10:45.000Z
|
lulu/extractors/tumblr.py
|
fakegit/Lulu
|
940c9344df8aab6044ec603f8e78d145b0f9bad7
|
[
"MIT"
] | 83
|
2018-01-16T08:33:55.000Z
|
2021-06-25T06:18:24.000Z
|
lulu/extractors/tumblr.py
|
fakegit/Lulu
|
940c9344df8aab6044ec603f8e78d145b0f9bad7
|
[
"MIT"
] | 156
|
2018-02-03T06:24:14.000Z
|
2022-01-05T05:39:14.000Z
|
#!/usr/bin/env python
import re
from urllib import parse
from html import unescape
from lulu.extractors.vine import vine_download
from lulu.extractors.vimeo import vimeo_download
from lulu.extractors.universal import universal_download
from lulu.extractors.dailymotion import dailymotion_download
from lulu.common import (
match1,
url_info,
url_size,
print_info,
get_content,
download_urls,
playlist_not_supported,
)
__all__ = ['tumblr_download']
site_info = 'Tumblr.com'
def tumblr_download(
url, output_dir='.', merge=True, info_only=False, **kwargs
):
if re.match(r'https?://\d+\.media\.tumblr\.com/', url):
universal_download(url, output_dir, merge=merge, info_only=info_only)
return
html = parse.unquote(get_content(url)).replace('\/', '/')
feed = match1(
html, r'<meta property="og:type" content="tumblr-feed:(\w+)" />'
)
if feed in ['photo', 'photoset', 'entry'] or feed is None:
# try to extract photos
page_title = match1(
html, r'<meta name="description" content="([^"\n]+)'
) or match1(
html, r'<meta property="og:description" content="([^"\n]+)'
) or match1(html, r'<title>([^<\n]*)')
urls = re.findall(
r'(https?://[^;"&]+/tumblr_[^;"]+_\d+\.jpg)', html
) + re.findall(
r'(https?://[^;"&]+/tumblr_[^;"]+_\d+\.png)', html
) + re.findall(r'(https?://[^;"&]+/tumblr_[^";]+_\d+\.gif)', html)
tuggles = {}
for url in urls:
filename = parse.unquote(url.split('/')[-1])
title = '.'.join(filename.split('.')[:-1])
tumblr_id = match1(title, r'^tumblr_(.+)_\d+$')
quality = int(match1(title, r'^tumblr_.+_(\d+)$'))
ext = filename.split('.')[-1]
try:
size = url_size(url)
if tumblr_id not in tuggles or tuggles[tumblr_id]['quality'] \
< quality:
tuggles[tumblr_id] = {
'title': title,
'url': url,
'quality': quality,
'ext': ext,
'size': size,
}
except Exception:
pass
if tuggles:
size = sum([tuggles[t]['size'] for t in tuggles])
print_info(site_info, page_title, None, size)
if not info_only:
for t in tuggles:
title = tuggles[t]['title']
ext = tuggles[t]['ext']
size = tuggles[t]['size']
url = tuggles[t]['url']
print_info(site_info, title, ext, size)
download_urls(
[url], title, ext, size, output_dir=output_dir
)
return
# feed == 'audio' or feed == 'video' or feed is None
# try to extract video / audio
real_url = match1(html, r'source src=\\x22([^\\]+)\\')
if not real_url:
real_url = match1(html, r'audio_file=([^&]+)&')
if real_url:
real_url = (
'{}?plead=please-dont-download-this-or-our-lawyers-wont-let-us'
'-host-audio'.format(real_url)
)
if not real_url:
real_url = match1(html, r'<source src="([^"]*)"')
if not real_url:
iframe_url = match1(
html,
r'<[^>]+tumblr_video_container[^>]+><iframe[^>]+'
r'src=[\'"]([^\'"]*)[\'"]'
)
if iframe_url:
iframe_html = get_content(iframe_url)
real_url = match1(
iframe_html,
r'<video[^>]*>[\n ]*<source[^>]+src=[\'"]([^\'"]*)[\'"]'
)
else:
iframe_url = match1(html, r'<iframe[^>]+src=[\'"]([^\'"]*)[\'"]')
if iframe_url[:2] == '//':
iframe_url = 'http:' + iframe_url
if re.search(r'player\.vimeo\.com', iframe_url):
vimeo_download(
iframe_url, output_dir, merge=merge, info_only=info_only,
referer='http://tumblr.com/', **kwargs
)
return
elif re.search(r'dailymotion\.com', iframe_url):
dailymotion_download(
iframe_url, output_dir, merge=merge, info_only=info_only,
**kwargs
)
return
elif re.search(r'vine\.co', iframe_url):
vine_download(
iframe_url, output_dir, merge=merge, info_only=info_only,
**kwargs
)
return
else:
iframe_html = get_content(iframe_url)
real_url = match1(iframe_html, r'<source src="([^"]*)"')
title = unescape(
match1(
html, r'<meta property="og:title" content="([^"]*)" />'
) or match1(
html, r'<meta property="og:description" content="([^"]*)" />'
) or match1(html, r'<title>([^<\n]*)') or url.split('/')[4]
).replace('\n', '')
_type, ext, size = url_info(real_url)
print_info(site_info, title, _type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge=merge)
download = tumblr_download
download_playlist = playlist_not_supported(site_info)
| 34.762821
| 79
| 0.495851
|
acffe2da3c10ae54d03b91406cc380fa926fc5e5
| 250
|
py
|
Python
|
toxsign/users/apps.py
|
mboudet/toxsign_v2
|
6ceb4f599241648dfb2e5f48ee33cf5b0b31258b
|
[
"MIT"
] | 2
|
2019-07-01T08:24:26.000Z
|
2020-07-02T08:03:35.000Z
|
toxsign/users/apps.py
|
mboudet/toxsign_v2
|
6ceb4f599241648dfb2e5f48ee33cf5b0b31258b
|
[
"MIT"
] | 20
|
2019-07-01T15:06:53.000Z
|
2020-12-08T13:14:10.000Z
|
toxsign/users/apps.py
|
mboudet/toxsign_v2
|
6ceb4f599241648dfb2e5f48ee33cf5b0b31258b
|
[
"MIT"
] | 2
|
2019-07-01T09:37:39.000Z
|
2019-07-09T09:24:03.000Z
|
from django.apps import AppConfig
class UsersAppConfig(AppConfig):
name = "toxsign.users"
verbose_name = "Users"
def ready(self):
try:
import users.signals # noqa F401
except ImportError:
pass
| 17.857143
| 45
| 0.608
|
acffe3947db19167a6c05c9d31bbb78a0437d23a
| 22,538
|
py
|
Python
|
pymatgen/transformations/site_transformations.py
|
Chessmag/pymatgen
|
61a4bb7a1792e1ea2379abd45b3c40efb816fd64
|
[
"MIT"
] | 1
|
2021-10-18T01:26:50.000Z
|
2021-10-18T01:26:50.000Z
|
pymatgen/transformations/site_transformations.py
|
Chessmag/pymatgen
|
61a4bb7a1792e1ea2379abd45b3c40efb816fd64
|
[
"MIT"
] | null | null | null |
pymatgen/transformations/site_transformations.py
|
Chessmag/pymatgen
|
61a4bb7a1792e1ea2379abd45b3c40efb816fd64
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines site transformations which transforms a structure into
another structure. Site transformations differ from standard transformations
in that they operate in a site-specific manner.
All transformations should inherit the AbstractTransformation ABC.
"""
import math
import itertools
import logging
import time
import numpy as np
from monty.json import MSONable
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.transformation_abc import AbstractTransformation
from pymatgen.analysis.ewald import EwaldSummation, EwaldMinimizer
class InsertSitesTransformation(AbstractTransformation):
"""
This transformation substitutes certain sites with certain species.
"""
def __init__(self, species, coords, coords_are_cartesian=False,
validate_proximity=True):
"""
Args:
species: A list of species. e.g., ["Li", "Fe"]
coords: A list of coords corresponding to those species. e.g.,
[[0,0,0],[0.5,0.5,0.5]].
coords_are_cartesian (bool): Set to True if coords are given in
cartesian coords. Defaults to False.
validate_proximity (bool): Set to False if you do not wish to ensure
that added sites are not too close to other sites. Defaults to True.
"""
if len(species) != len(coords):
raise ValueError("Species and coords must be the same length!")
self.species = species
self.coords = coords
self.coords_are_cartesian = coords_are_cartesian
self.validate_proximity = validate_proximity
def apply_transformation(self, structure):
"""
Apply the transformation.
Arg:
structure (Structure): A structurally similar structure in
regards to crystal and site positions.
Return:
Returns a copy of structure with sites inserted.
"""
s = structure.copy()
for i, sp in enumerate(self.species):
s.insert(i, sp, self.coords[i],
coords_are_cartesian=self.coords_are_cartesian,
validate_proximity=self.validate_proximity)
return s.get_sorted_structure()
def __str__(self):
return "InsertSiteTransformation : " + \
"species {}, coords {}".format(self.species, self.coords)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Return: None"""
return None
@property
def is_one_to_many(self):
"""Return: False"""
return False
class ReplaceSiteSpeciesTransformation(AbstractTransformation):
"""
This transformation substitutes certain sites with certain species.
"""
def __init__(self, indices_species_map):
"""
Args:
indices_species_map: A dict containing the species mapping in
int-string pairs. E.g., { 1:"Na"} or {2:"Mn2+"}. Multiple
substitutions can be done. Overloaded to accept sp_and_occu
dictionary. E.g. {1: {"Ge":0.75, "C":0.25} }, which
substitutes a single species with multiple species to generate a
disordered structure.
"""
self.indices_species_map = indices_species_map
def apply_transformation(self, structure):
"""
Apply the transformation.
Arg:
structure (Structure): A structurally similar structure in
regards to crystal and site positions.
Return:
Returns a copy of structure with sites replaced.
"""
s = structure.copy()
for i, sp in self.indices_species_map.items():
s[int(i)] = sp
return s
def __str__(self):
return "ReplaceSiteSpeciesTransformation :" + \
", ".join(["{}->{}".format(k, v) + v for k, v in
self.indices_species_map.items()])
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Return: None"""
return None
@property
def is_one_to_many(self):
"""Return: False"""
return False
class RemoveSitesTransformation(AbstractTransformation):
"""
Remove certain sites in a structure.
"""
def __init__(self, indices_to_remove):
"""
Args:
indices_to_remove: List of indices to remove. E.g., [0, 1, 2]
"""
self.indices_to_remove = indices_to_remove
def apply_transformation(self, structure):
"""
Apply the transformation.
Arg:
structure (Structure): A structurally similar structure in
regards to crystal and site positions.
Return:
Returns a copy of structure with sites removed.
"""
s = structure.copy()
s.remove_sites(self.indices_to_remove)
return s
def __str__(self):
return "RemoveSitesTransformation :" + ", ".join(
map(str, self.indices_to_remove))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Return: None"""
return None
@property
def is_one_to_many(self):
"""Return: False"""
return False
class TranslateSitesTransformation(AbstractTransformation):
"""
This class translates a set of sites by a certain vector.
"""
def __init__(self, indices_to_move, translation_vector,
vector_in_frac_coords=True):
"""
Args:
indices_to_move: The indices of the sites to move
translation_vector: Vector to move the sites. If a list of list or numpy
array of shape, (len(indices_to_move), 3), is provided then each
translation vector is applied to the corresponding site in the
indices_to_move.
vector_in_frac_coords: Set to True if the translation vector is in
fractional coordinates, and False if it is in cartesian
coordinations. Defaults to True.
"""
self.indices_to_move = indices_to_move
self.translation_vector = np.array(translation_vector)
self.vector_in_frac_coords = vector_in_frac_coords
def apply_transformation(self, structure):
"""
Apply the transformation.
Arg:
structure (Structure): A structurally similar structure in
regards to crystal and site positions.
Return:
Returns a copy of structure with sites translated.
"""
s = structure.copy()
if self.translation_vector.shape == (len(self.indices_to_move), 3):
for i, idx in enumerate(self.indices_to_move):
s.translate_sites(idx, self.translation_vector[i],
self.vector_in_frac_coords)
else:
s.translate_sites(self.indices_to_move, self.translation_vector,
self.vector_in_frac_coords)
return s
def __str__(self):
return "TranslateSitesTransformation for indices " + \
"{}, vect {} and vect_in_frac_coords = {}".format(
self.indices_to_move, self.translation_vector,
self.vector_in_frac_coords)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
TranslateSitesTranformation with the reverse translation.
"""
return TranslateSitesTransformation(
self.indices_to_move, -self.translation_vector,
self.vector_in_frac_coords)
@property
def is_one_to_many(self):
"""Return: False"""
return False
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = MSONable.as_dict(self)
d["translation_vector"] = self.translation_vector.tolist()
return d
class PartialRemoveSitesTransformation(AbstractTransformation):
"""
Remove fraction of specie from a structure.
Requires an oxidation state decorated structure for ewald sum to be
computed.
Given that the solution to selecting the right removals is NP-hard, there
are several algorithms provided with varying degrees of accuracy and speed.
The options are as follows:
ALGO_FAST:
This is a highly optimized algorithm to quickly go through the search
tree. It is guaranteed to find the optimal solution, but will return
only a single lowest energy structure. Typically, you will want to use
this.
ALGO_COMPLETE:
The complete algo ensures that you get all symmetrically distinct
orderings, ranked by the estimated Ewald energy. But this can be an
extremely time-consuming process if the number of possible orderings is
very large. Use this if you really want all possible orderings. If you
want just the lowest energy ordering, ALGO_FAST is accurate and faster.
ALGO_BEST_FIRST:
This algorithm is for ordering the really large cells that defeats even
ALGO_FAST. For example, if you have 48 sites of which you want to
remove 16 of them, the number of possible orderings is around
2 x 10^12. ALGO_BEST_FIRST shortcircuits the entire search tree by
removing the highest energy site first, then followed by the next
highest energy site, and so on. It is guaranteed to find a solution
in a reasonable time, but it is also likely to be highly inaccurate.
ALGO_ENUMERATE:
This algorithm uses the EnumerateStructureTransformation to perform
ordering. This algo returns *complete* orderings up to a single unit
cell size. It is more robust than the ALGO_COMPLETE, but requires
Gus Hart's enumlib to be installed.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
ALGO_ENUMERATE = 3
def __init__(self, indices, fractions, algo=ALGO_COMPLETE):
"""
Args:
indices:
A list of list of indices.
e.g. [[0, 1], [2, 3, 4, 5]]
fractions:
The corresponding fractions to remove. Must be same length as
indices. e.g., [0.5, 0.25]
algo:
This parameter allows you to choose the algorithm to perform
ordering. Use one of PartialRemoveSpecieTransformation.ALGO_*
variables to set the algo.
"""
self.indices = indices
self.fractions = fractions
self.algo = algo
self.logger = logging.getLogger(self.__class__.__name__)
def _best_first_ordering(self, structure, num_remove_dict):
self.logger.debug("Performing best first ordering")
starttime = time.time()
self.logger.debug("Performing initial ewald sum...")
ewaldsum = EwaldSummation(structure)
self.logger.debug("Ewald sum took {} seconds."
.format(time.time() - starttime))
starttime = time.time()
ematrix = ewaldsum.total_energy_matrix
to_delete = []
totalremovals = sum(num_remove_dict.values())
removed = {k: 0 for k in num_remove_dict.keys()}
for i in range(totalremovals):
maxindex = None
maxe = float("-inf")
maxindices = None
for indices in num_remove_dict.keys():
if removed[indices] < num_remove_dict[indices]:
for ind in indices:
if ind not in to_delete:
energy = sum(ematrix[:, ind]) + \
sum(ematrix[:, ind]) - ematrix[ind, ind]
if energy > maxe:
maxindex = ind
maxe = energy
maxindices = indices
removed[maxindices] += 1
to_delete.append(maxindex)
ematrix[:, maxindex] = 0
ematrix[maxindex, :] = 0
s = structure.copy()
s.remove_sites(to_delete)
self.logger.debug("Minimizing Ewald took {} seconds."
.format(time.time() - starttime))
return [{"energy": sum(sum(ematrix)),
"structure": s.get_sorted_structure()}]
def _complete_ordering(self, structure, num_remove_dict):
self.logger.debug("Performing complete ordering...")
all_structures = []
symprec = 0.2
s = SpacegroupAnalyzer(structure, symprec=symprec)
self.logger.debug("Symmetry of structure is determined to be {}."
.format(s.get_space_group_symbol()))
sg = s.get_space_group_operations()
tested_sites = []
starttime = time.time()
self.logger.debug("Performing initial ewald sum...")
ewaldsum = EwaldSummation(structure)
self.logger.debug("Ewald sum took {} seconds."
.format(time.time() - starttime))
starttime = time.time()
allcombis = []
for ind, num in num_remove_dict.items():
allcombis.append(itertools.combinations(ind, num))
count = 0
for allindices in itertools.product(*allcombis):
sites_to_remove = []
indices_list = []
for indices in allindices:
sites_to_remove.extend([structure[i] for i in indices])
indices_list.extend(indices)
s_new = structure.copy()
s_new.remove_sites(indices_list)
energy = ewaldsum.compute_partial_energy(indices_list)
already_tested = False
for i, tsites in enumerate(tested_sites):
tenergy = all_structures[i]["energy"]
if abs((energy - tenergy) / len(s_new)) < 1e-5 and \
sg.are_symmetrically_equivalent(sites_to_remove,
tsites,
symm_prec=symprec):
already_tested = True
if not already_tested:
tested_sites.append(sites_to_remove)
all_structures.append({"structure": s_new, "energy": energy})
count += 1
if count % 10 == 0:
timenow = time.time()
self.logger.debug("{} structures, {:.2f} seconds."
.format(count, timenow - starttime))
self.logger.debug("Average time per combi = {} seconds"
.format((timenow - starttime) / count))
self.logger.debug("{} symmetrically distinct structures found."
.format(len(all_structures)))
self.logger.debug("Total symmetrically distinct structures found = {}"
.format(len(all_structures)))
all_structures = sorted(all_structures, key=lambda s: s["energy"])
return all_structures
def _fast_ordering(self, structure, num_remove_dict, num_to_return=1):
"""
This method uses the matrix form of ewaldsum to calculate the ewald
sums of the potential structures. This is on the order of 4 orders of
magnitude faster when there are large numbers of permutations to
consider. There are further optimizations possible (doing a smarter
search of permutations for example), but this wont make a difference
until the number of permutations is on the order of 30,000.
"""
self.logger.debug("Performing fast ordering")
starttime = time.time()
self.logger.debug("Performing initial ewald sum...")
ewaldmatrix = EwaldSummation(structure).total_energy_matrix
self.logger.debug("Ewald sum took {} seconds."
.format(time.time() - starttime))
starttime = time.time()
m_list = []
for indices, num in num_remove_dict.items():
m_list.append([0, num, list(indices), None])
self.logger.debug("Calling EwaldMinimizer...")
minimizer = EwaldMinimizer(ewaldmatrix, m_list, num_to_return,
PartialRemoveSitesTransformation.ALGO_FAST)
self.logger.debug("Minimizing Ewald took {} seconds."
.format(time.time() - starttime))
all_structures = []
lowest_energy = minimizer.output_lists[0][0]
num_atoms = sum(structure.composition.values())
for output in minimizer.output_lists:
s = structure.copy()
del_indices = []
for manipulation in output[1]:
if manipulation[1] is None:
del_indices.append(manipulation[0])
else:
s.replace(manipulation[0], manipulation[1])
s.remove_sites(del_indices)
struct = s.get_sorted_structure()
all_structures.append(
{"energy": output[0],
"energy_above_minimum": (output[0] - lowest_energy) / num_atoms,
"structure": struct})
return all_structures
def _enumerate_ordering(self, structure):
# Generate the disordered structure first.
s = structure.copy()
for indices, fraction in zip(self.indices, self.fractions):
for ind in indices:
new_sp = {sp: occu * fraction
for sp, occu
in structure[ind].species.items()}
s[ind] = new_sp
# Perform enumeration
from pymatgen.transformations.advanced_transformations import EnumerateStructureTransformation
trans = EnumerateStructureTransformation()
return trans.apply_transformation(s, 10000)
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply the transformation.
Args:
structure: input structure
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
num_remove_dict = {}
total_combis = 0
for indices, frac in zip(self.indices, self.fractions):
num_to_remove = len(indices) * frac
if abs(num_to_remove - int(round(num_to_remove))) > 1e-3:
raise ValueError("Fraction to remove must be consistent with "
"integer amounts in structure.")
num_to_remove = int(round(num_to_remove))
num_remove_dict[tuple(indices)] = num_to_remove
n = len(indices)
total_combis += int(round(math.factorial(n) /
math.factorial(num_to_remove) /
math.factorial(n - num_to_remove)))
self.logger.debug("Total combinations = {}".format(total_combis))
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
num_to_return = max(1, num_to_return)
self.logger.debug("Will return {} best structures."
.format(num_to_return))
if self.algo == PartialRemoveSitesTransformation.ALGO_FAST:
all_structures = self._fast_ordering(structure, num_remove_dict,
num_to_return)
elif self.algo == PartialRemoveSitesTransformation.ALGO_COMPLETE:
all_structures = self._complete_ordering(structure, num_remove_dict)
elif self.algo == PartialRemoveSitesTransformation.ALGO_BEST_FIRST:
all_structures = self._best_first_ordering(structure,
num_remove_dict)
elif self.algo == PartialRemoveSitesTransformation.ALGO_ENUMERATE:
all_structures = self._enumerate_ordering(structure)
else:
raise ValueError("Invalid algo.")
opt_s = all_structures[0]["structure"]
return opt_s if not return_ranked_list \
else all_structures[0:num_to_return]
def __str__(self):
return "PartialRemoveSitesTransformation : Indices and fraction" + \
" to remove = {}, ALGO = {}".format(self.indices, self.algo)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""Return: None"""
return None
@property
def is_one_to_many(self):
"""Return: True"""
return True
class AddSitePropertyTransformation(AbstractTransformation):
"""
Simple transformation to add site properties to a given structure
"""
def __init__(self, site_properties):
"""
Args:
site_properties (dict): site properties to be added to a structure
"""
self.site_properties = site_properties
def apply_transformation(self, structure):
"""
Apply the transformation.
Arg:
structure (Structure): A structurally similar structure in
regards to crystal and site positions.
Return:
Returns a copy of structure with sites properties added.
"""
new_structure = structure.copy()
for prop in self.site_properties.keys():
new_structure.add_site_property(prop, self.site_properties[prop])
return new_structure
@property
def inverse(self):
"""Return: None"""
return None
@property
def is_one_to_many(self):
"""Return: False"""
return False
| 37.752094
| 102
| 0.601828
|
acffe5c2abcbc41850972592777f4324638b83b8
| 2,245
|
py
|
Python
|
app/ubmrcnn/start_ubmrcnn_worker.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | 2
|
2020-07-09T19:34:03.000Z
|
2021-06-21T23:09:23.000Z
|
app/ubmrcnn/start_ubmrcnn_worker.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | null | null | null |
app/ubmrcnn/start_ubmrcnn_worker.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | null | null | null |
import os,sys,logging
from multiprocessing import Process
from UBMRCNNWorker import UBMRCNNWorker
from ublarcvserver import start_broker
"""
Start the broker and worker. Run one client. Useful for tests.
"""
def start_ubmrcnn_worker(broker_address,plane,weight_file,
device_id,
batch_size,
ssh_thru_server,ssh_password):
print(batch_size,type(batch_size))
worker=UBMRCNNWorker(broker_address,plane,weight_file,
batch_size,
device_id=device_id,
ssh_thru_server=ssh_thru_server,
ssh_password=ssh_password,
)
worker.connect()
print("worker started: ",worker.idname())
worker.run()
def startup_ubmrcnn_workers( broker_address, weights_files,
device_id=0,
batch_size=1,
nplanes=[0,1,2],
ssh_thru_server=None, ssh_password=None,
start=True):
# setup the worker
pworkers = []
print("planes: ",nplanes)
for p in nplanes:
pworker = Process(target=start_ubmrcnn_worker,
args=(broker_address,p,weights_files[p],
device_id,batch_size,
ssh_thru_server,ssh_password))
pworker.daemon = True
pworkers.append(pworker)
if start:
for pworker in pworkers:
pworker.start()
return pworkers
if __name__ == "__main__":
# endpoint:
endpoint = "tcp://localhost:6005"
bindpoint = "tcp://*:6005"
weights_dir = "/home/jmills/workdir/ubdl/ublarcvserver/app/ubmrcnn"
weights_files = {0:weights_dir+"/mcc8_mrcnn_plane0.pth",
1:weights_dir+"/mcc8_mrcnn_plane1.pth",
2:weights_dir+"/mcc8_mrcnn_plane2.pth"}
logging.basicConfig(level=logging.DEBUG)
pbroker = start_broker(bindpoint)
pworkers = startup_ubmrcnn_workers(endpoint,weights_files,nplanes=[0,1,2])
print("[ENTER] to quit.")
if sys.version_info[0] < 3:
raw_input()
else:
input()
| 29.933333
| 78
| 0.570156
|
acffe65fb4d038f2dd4349e609bfbcf75bd044dd
| 8,095
|
py
|
Python
|
mmpose/datasets/datasets/top_down/topdown_onehand10k_dataset.py
|
filipkro/mmpose
|
b4b6eda3fe3c2470ab0e44936f4bf7f82db6d3e4
|
[
"Apache-2.0"
] | 1
|
2020-09-22T03:39:47.000Z
|
2020-09-22T03:39:47.000Z
|
mmpose/datasets/datasets/top_down/topdown_onehand10k_dataset.py
|
filipkro/mmpose
|
b4b6eda3fe3c2470ab0e44936f4bf7f82db6d3e4
|
[
"Apache-2.0"
] | null | null | null |
mmpose/datasets/datasets/top_down/topdown_onehand10k_dataset.py
|
filipkro/mmpose
|
b4b6eda3fe3c2470ab0e44936f4bf7f82db6d3e4
|
[
"Apache-2.0"
] | 1
|
2021-07-13T03:42:27.000Z
|
2021-07-13T03:42:27.000Z
|
import copy as cp
import os
import os.path as osp
from collections import OrderedDict
import json_tricks as json
import numpy as np
from mmpose.core.evaluation.top_down_eval import (keypoint_auc, keypoint_epe,
keypoint_pck_accuracy)
from mmpose.datasets.builder import DATASETS
from .topdown_base_dataset import TopDownBaseDataset
@DATASETS.register_module()
class TopDownOneHand10KDataset(TopDownBaseDataset):
"""OneHand10K dataset for top-down hand pose estimation.
`Mask-pose Cascaded CNN for 2D Hand Pose Estimation from
Single Color Images' TCSVT'2019
More details can be found in the `paper
<https://www.yangangwang.com/papers/WANG-MCC-2018-10.pdf>`_ .
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
OneHand10K keypoint indexes::
0: 'wrist',
1: 'thumb1',
2: 'thumb2',
3: 'thumb3',
4: 'thumb4',
5: 'forefinger1',
6: 'forefinger2',
7: 'forefinger3',
8: 'forefinger4',
9: 'middle_finger1',
10: 'middle_finger2',
11: 'middle_finger3',
12: 'middle_finger4',
13: 'ring_finger1',
14: 'ring_finger2',
15: 'ring_finger3',
16: 'ring_finger4',
17: 'pinky_finger1',
18: 'pinky_finger2',
19: 'pinky_finger3',
20: 'pinky_finger4'
Args:
ann_file (str): Path to the annotation file.
img_prefix (str): Path to a directory where images are held.
Default: None.
data_cfg (dict): config
pipeline (list[dict | callable]): A sequence of data transforms.
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
ann_file,
img_prefix,
data_cfg,
pipeline,
test_mode=False):
super().__init__(
ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)
self.ann_info['flip_pairs'] = []
self.ann_info['use_different_joint_weights'] = False
assert self.ann_info['num_joints'] == 21
self.ann_info['joint_weights'] = \
np.ones((self.ann_info['num_joints'], 1), dtype=np.float32)
self.db = self._get_db(ann_file)
self.image_set = set(x['image_file'] for x in self.db)
self.num_images = len(self.image_set)
print(f'=> num_images: {self.num_images}')
print(f'=> load {len(self.db)} samples')
def _get_db(self, ann_file):
"""Load dataset."""
with open(ann_file, 'r') as f:
data = json.load(f)
tmpl = dict(
image_file=None,
center=None,
scale=None,
rotation=0,
joints_3d=None,
joints_3d_visible=None,
bbox=None,
dataset='onehand10k')
imid2info = {x['id']: x for x in data['images']}
num_joints = self.ann_info['num_joints']
gt_db = []
for anno in data['annotations']:
newitem = cp.deepcopy(tmpl)
image_id = anno['image_id']
newitem['image_file'] = os.path.join(
self.img_prefix, imid2info[image_id]['file_name'])
if max(anno['keypoints']) == 0:
continue
joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)
for ipt in range(num_joints):
joints_3d[ipt, 0] = anno['keypoints'][ipt * 3 + 0]
joints_3d[ipt, 1] = anno['keypoints'][ipt * 3 + 1]
joints_3d[ipt, 2] = 0
t_vis = min(anno['keypoints'][ipt * 3 + 2], 1)
joints_3d_visible[ipt, :] = (t_vis, t_vis, 0)
center, scale = self._xywh2cs(*anno['bbox'][:4])
newitem['center'] = center
newitem['scale'] = scale
newitem['joints_3d'] = joints_3d
newitem['joints_3d_visible'] = joints_3d_visible
newitem['bbox'] = anno['bbox'][:4]
gt_db.append(newitem)
return gt_db
def _xywh2cs(self, x, y, w, h):
"""This encodes bbox(x,y,w,w) into (center, scale)
Args:
x, y, w, h
Returns:
center (np.ndarray[float32](2,)): center of the bbox (x, y).
scale (np.ndarray[float32](2,)): scale of the bbox w & h.
"""
aspect_ratio = self.ann_info['image_size'][0] / self.ann_info[
'image_size'][1]
center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)
if (not self.test_mode) and np.random.rand() < 0.3:
center += 0.4 * (np.random.rand(2) - 0.5) * [w, h]
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
# pixel std is 200.0
scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)
scale = scale * 1.25
return center, scale
def evaluate(self, outputs, res_folder, metric='PCK', **kwargs):
"""Evaluate OneHand10K keypoint results. metric (str | list[str]):
Metrics to be evaluated. Options are 'PCK', 'AUC', 'EPE'.
'PCK': ||pre[i] - joints_3d[i]|| < 0.2 * max(w, h)
'AUC': area under curve
'EPE': end-point error
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['PCK', 'AUC', 'EPE']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
res_file = os.path.join(res_folder, 'result_keypoints.json')
kpts = []
for preds, boxes, image_path in outputs:
str_image_path = ''.join(image_path)
image_id = int(osp.basename(osp.splitext(str_image_path)[0]))
kpts.append({
'keypoints': preds[0].tolist(),
'center': boxes[0][0:2].tolist(),
'scale': boxes[0][2:4].tolist(),
'area': float(boxes[0][4]),
'score': float(boxes[0][5]),
'image_id': image_id,
})
self._write_keypoint_results(kpts, res_file)
info_str = self._report_metric(res_file, metrics)
name_value = OrderedDict(info_str)
return name_value
def _write_keypoint_results(self, keypoints, res_file):
"""Write results into a json file."""
with open(res_file, 'w') as f:
json.dump(keypoints, f, sort_keys=True, indent=4)
def _report_metric(self, res_file, metrics):
"""Keypoint evaluation.
Report PCK, AUC or EPE.
"""
info_str = []
with open(res_file, 'r') as fin:
preds = json.load(fin)
assert len(preds) == len(self.db)
outputs = []
gts = []
for pred, item in zip(preds, self.db):
outputs.append(pred['keypoints'])
gts.append(item['joints_3d'])
outputs = np.array(outputs)[:, :, :-1]
gts = np.array(gts)[:, :, :-1]
if 'PCK' in metrics:
hit = 0
exist = 0
for pred, item in zip(preds, self.db):
bbox = np.array(item['bbox'])
threshold = np.max(bbox[2:]) * 0.2
h, _, e = keypoint_pck_accuracy(
np.array(pred['keypoints'])[None, :, :-1],
np.array(item['joints_3d'])[None, :, :-1], 1,
np.array([[threshold, threshold]]))
hit += len(h[h > 0])
exist += e
pck = hit / exist
info_str.append(('PCK', pck))
if 'AUC' in metrics:
info_str.append(('AUC', keypoint_auc(outputs, gts, 30)))
if 'EPE' in metrics:
info_str.append(('EPE', keypoint_epe(outputs, gts)))
return info_str
| 32.641129
| 77
| 0.542434
|
acffe6b015a6a2c9fcc4095952707fdbfc767a71
| 14,619
|
py
|
Python
|
facedancer/backends/greathost.py
|
hugmyndakassi/Facedancer
|
e688fe61dc34087db333432394e1f90e52ac3794
|
[
"BSD-3-Clause"
] | 345
|
2019-03-12T23:36:36.000Z
|
2022-02-02T00:26:27.000Z
|
facedancer/backends/greathost.py
|
hugmyndakassi/Facedancer
|
e688fe61dc34087db333432394e1f90e52ac3794
|
[
"BSD-3-Clause"
] | 35
|
2019-03-24T19:06:41.000Z
|
2021-12-28T07:38:49.000Z
|
facedancer/backends/greathost.py
|
hugmyndakassi/Facedancer
|
e688fe61dc34087db333432394e1f90e52ac3794
|
[
"BSD-3-Clause"
] | 59
|
2019-04-18T14:22:47.000Z
|
2022-01-18T14:34:50.000Z
|
# GreatDancerHostApp.py
#
# Host support for GreatFET-base devices
import sys
import time
import codecs
import struct
import logging
from ..core import *
class GreatDancerHostApp(FacedancerUSBHost):
"""
Class that represents a GreatFET-based USB host.
"""
app_name = "GreatDancer Host"
PORT_STATUS_REG = 0
READ_STATUS_REG = 1
WRITE_STATUS_REG = 2
PORT_STATUS_REGISTER_CONNECTED_MASK = (1 << 0)
PORT_STATUS_REGISTER_ENABLED_MASK = (1 << 2)
PORT_STATUS_REGISTER_POWERED_MASK = (1 << 12)
PORT_STATUS_REGISTER_SPEED_SHIFT = 26
PORT_STATUS_REGISTER_SPEED_MASK = 0b11
PORT_STATUS_REGISTER_LINE_STATE_SHIFT = 10
PORT_STATUS_REGISTER_LINE_STATE_MASK = 0b11
LINE_STATE_NAMES = {
0: "SE0",
1: "J",
2: "K",
3: "No device / SE1"
}
LINE_STATE_SE0 = 0
LINE_STATE_J = 1
LINE_STATE_K = 2
LINE_STATE_SE1 = 3
DEVICE_SPEED_LOW = 0
DEVICE_SPEED_FULL = 1
DEVICE_SPEED_HIGH = 2
DEVICE_SPEED_NONE = 3
STATUS_REG_SPEED_VALUES = {
0: DEVICE_SPEED_FULL,
1: DEVICE_SPEED_LOW,
2: DEVICE_SPEED_HIGH,
3: DEVICE_SPEED_NONE
}
DEVICE_SPEED_NAMES = {
DEVICE_SPEED_FULL: "Full speed",
DEVICE_SPEED_LOW: "Low speed",
DEVICE_SPEED_HIGH: "High speed",
DEVICE_SPEED_NONE: "Disconnected"
}
SPEED_REQUESTS = {
0: 1,
1: 0,
2: 2,
3: 3
}
# Endpoint directions
DIRECTION_IN = 0x00
DIRECTION_OUT = 0x80
# Endpoint types
ENDPOINT_TYPE_CONTROL = 0
# Packet IDs
PID_SETUP = 2
PID_OUT = 0
PID_IN = 1
@classmethod
def appropriate_for_environment(cls, backend_name):
"""
Determines if the current environment seems appropriate
for using the GreatDancer backend.
"""
# Check: if we have a backend name other than greatfet,
# the user is trying to use something else. Abort!
if backend_name and backend_name != "greatfet":
return False
# If we're not explicitly trying to use something else,
# see if there's a connected GreatFET.
try:
import greatfet
greatfet.GreatFET()
return True
except ImportError:
logging.info("Skipping GreatFET-based devices, as the greatfet python module isn't installed.")
return False
except:
return False
def __init__(self, verbose=0, quirks=[], autoconnect=True, device=None):
"""
Sets up a GreatFET-based host connection.
"""
import greatfet
if device is None:
device = greatfet.GreatFET()
# Store our input args.
# TODO: pull into base class
self.device = device
self.verbose = verbose
# Grab a reference to our protocol definitions.
self.vendor_requests = greatfet.protocol.vendor_requests
if autoconnect:
self.connect()
def connect(self):
"""
Sets up our host to talk to the device, including turning on VBUS.
"""
self.device.comms._vendor_request_out(self.vendor_requests.USBHOST_CONNECT)
def bus_reset(self, delay=0.500):
"""
Issues a "bus reset", requesting that the downstream device reset itself.
delay -- The amount of time, in seconds, to wait before or after the
reset request. To be compliant, this should be omitted, or set
to 0.1s.
"""
# Note: we need to wait a reset delay before and after the bus reset.
# This allows the host to initialize _and_ then allows the device to settle.
time.sleep(delay)
self.device.comms._vendor_request_out(self.vendor_requests.USBHOST_BUS_RESET)
time.sleep(delay)
@staticmethod
def _decode_usb_register(transfer_result):
"""
Decodes a raw 32-bit register value from a form encoded
for transit as a USB control request.
transfer_result: The value returned by the vendor request.
returns: The raw integer value of the given register.
"""
status_hex = codecs.encode(transfer_result[::-1], 'hex')
return int(status_hex, 16)
def _fetch_status_register(self, register_number):
"""
Fetches a status register from the GreatDacner, and returns it
as an integer.
"""
raw_status = self.device.comms._vendor_request_in(self.vendor_requests.USBHOST_GET_STATUS, index=register_number, length=4)
return self._decode_usb_register(raw_status)
def _port_status(self):
""" Returns the raw state of the port status register. """
return self._fetch_status_register(self.PORT_STATUS_REG)
def _get_read_status(self):
""" Returns the raw state of the read status word. """
return self._fetch_status_register(self.READ_STATUS_REG)
def _get_write_status(self):
""" Returns the raw state of the read status word. """
return self._fetch_status_register(self.WRITE_STATUS_REG)
def device_is_connected(self):
""" Returns true iff a given device is connected. """
status = self._port_status()
return bool(status & self.PORT_STATUS_REGISTER_CONNECTED_MASK)
def port_is_enabled(self):
""" Returns true iff the FaceDancer host port's enabled. """
status = self._port_status()
return bool(status & self.PORT_STATUS_REGISTER_ENABLED_MASK)
def port_is_powered(self):
""" Returns true iff the FaceDancer host port's enabled. """
status = self._port_status()
return bool(status & self.PORT_STATUS_REGISTER_POWERED_MASK)
def current_device_speed(self, as_string=False):
""" Returns the speed of the connected device
as_string -- If true, returns the speed as a string for printing; otherwise
returns a DEVICE_SPEED_* constant.
"""
port_speed_raw = \
(self._port_status() >> self.PORT_STATUS_REGISTER_SPEED_SHIFT) & \
self.PORT_STATUS_REGISTER_SPEED_MASK
# Translate from a GreatFET format device speed to a FaceDancer one.
port_speed = self.STATUS_REG_SPEED_VALUES[port_speed_raw]
if as_string:
port_speed = self.DEVICE_SPEED_NAMES[port_speed]
return port_speed
def current_line_state(self, as_string=False):
""" Returns the current state of the USB differential pair
as_string -- If true, returns the speed as a string for printing; otherwise
returns a LINE_STATE_* constant.
"""
line_state = \
(self._port_status() >> self.PORT_STATUS_REGISTER_LINE_STATE_SHIFT) & \
self.PORT_STATUS_REGISTER_LINE_STATE_MASK
if as_string:
line_state = self.LINE_STATE_NAMES[line_state]
return line_state
def set_up_endpoint(self, endpoint_address_or_object, endpoint_type=None, max_packet_size=None,
device_address=None, endpoint_speed=None, handle_data_toggle=None,
is_control_endpoint=None):
"""
Sets up an endpoint for use. Can be used to initialize an endpoint or to update
its parameters. Two forms exist:
endpoint_object -- a USBEndpoint object with the parameters to be populated
or
endpoint_address -- the address of the endpoint to be setup; including the direction bit
endpoint_type -- one of the ENDPOINT_TYPE constants that specifies the transfer mode on
the endpoint_address
max_packet_size -- the maximum packet size to be communicated on the given endpoint
device_address -- the address of the device to be communicated with; if not provided,
the last address will be used
endpoint_speed -- the speed of the packets to be communicated on the endpoint; should be a
DEVICE_SPEED_* constant; if not provided, the last device's speed will be used
handle_data_toggle -- true iff the hardware should automatically handle selection of data
packet PIDs
is_control_endpoint -- true iff the given packet is a for a control endpoint
TODO: eventually support hubs / more than one device?
"""
if isinstance(endpoint_address_or_object, USBEndpoint):
endpoint = endpoint_address_or_object
# Figure out the endpoint address from its direction and number.
endpoint_address = endpoint.number
if endpoint.direction == endpoint.direction_in:
endpoint_address |= self.DIRECTION_IN
self.set_up_endpoint(endpoint_address, endpoint.transfer_type, endpoint.max_packet_size)
return
endpoint_address = endpoint_address_or_object
endpoint_number = endpoint_address & 0x7f
if endpoint_number > 15:
raise ValueError("cannot have an endpoint with a number > 15!")
# Figure out defaults for any arguments not provided.
if device_address is None:
device_address = self.last_device_address
if endpoint_speed is None:
endpoint_speed = self.last_device_speed
if is_control_endpoint is None:
is_control_endpoint = (endpoint_number == 0)
if handle_data_toggle is None:
handle_data_toggle = True if not is_control_endpoint else False
# Figure out which endpoint schedule to use.
# FIXME: support more than the asynchronous schedule
endpoint_schedule = 0
# TODO: do we translate speed requests, here?
# Issue the configuration packet.
packet = struct.pack("<BBBBBHB", endpoint_schedule, device_address, endpoint_address,
endpoint_speed, is_control_endpoint, max_packet_size, handle_data_toggle)
self.device.comms._vendor_request_out(self.vendor_requests.USBHOST_SET_UP_ENDPOINT, data=packet)
def initialize_control_endpoint(self, device_address=None, device_speed=None, max_packet_size=None):
"""
Set up the device's control endpoint, so we can use it for e.g. enumeration.
"""
# If not overridden, apply the specification default maximum packet size.
# TODO: support high speed devices, here?
if max_packet_size is None:
max_packet_size = 8 if device_speed == self.DEVICE_SPEED_LOW else 64
# Set up both directions on the control endpoint.
self.set_up_endpoint(0 | self.DIRECTION_OUT, self.ENDPOINT_TYPE_CONTROL, max_packet_size)
self.set_up_endpoint(0 | self.DIRECTION_IN, self.ENDPOINT_TYPE_CONTROL, max_packet_size)
def send_on_endpoint(self, endpoint_number, data, is_setup=False,
blocking=True, data_packet_pid=0):
"""
Sends a block of data on the provided endpoints.
endpoint_number -- The endpoint number on which to send.
data -- The data to be transmitted.
is_setup -- True iff this transfer should begin with a SETUP token.
blocking -- True iff this transaction should wait for the transaction to complete.
data_packet_pid -- The data packet PID to use (1 or 0). Ignored if the endpoint is set to automatically
alternate data PIDs.
raises an IOError on a communications error or stall
"""
# Determine the PID token with which to start the request...
pid_token = self.PID_SETUP if is_setup else self.PID_OUT
# Issue the actual send itself.
# TODO: validate length
self.device.comms._vendor_request_out(self.vendor_requests.USBHOST_SEND_ON_ENDPOINT,
index=endpoint_number, value=(data_packet_pid << 8) | pid_token,
data=data)
# ... and if we're blocking, also finish it.
if blocking:
complete = False
stalled = False
# Wait until we get a complete flag in the status register.
# XXX: This isn't entirely correct-- it'll clear too much status.
while not complete:
status = self._get_write_status()
stalled = (status >> endpoint_number) & 0x1
complete = (status >> (endpoint_number + 16)) & 0x1
if stalled:
raise IOError("Stalled!")
def read_from_endpoint(self, endpoint_number, expected_read_size=64, data_packet_pid=0):
"""
Sends a block of data on the provided endpoints.
endpoint_number -- The endpoint number on which to send.
expected_read_size -- The expected amount of data to be read.
data_packet_pid -- The data packet PID to use (1 or 0).
Ignored if the endpoint is set to automatically alternate data PIDs.
raises an IOError on a communications error or stall
"""
# Start the request...
self.device.comms._vendor_request_out(self.vendor_requests.USBHOST_START_NONBLOCKING_READ,
index=(data_packet_pid << 8) | endpoint_number, value=expected_read_size)
# ... and if we're blocking, also finish it.
complete = False
stalled = False
# Wait until we get a complete flag in the status register.
# XXX: This isn't entirely correct-- it'll clear too much status.
while not complete:
status = self._get_read_status()
stalled = (status >> endpoint_number) & 0x1
complete = (status >> (endpoint_number + 16)) & 0x1
if stalled:
raise IOError("Stalled!")
# Figure out how muhc to read.
raw_length = self.device.comms._vendor_request_in(self.vendor_requests.USBHOST_GET_NONBLOCKING_LENGTH,
index=endpoint_number, length=4)
length = self._decode_usb_register(raw_length)
if self.verbose > 4:
print("Supposedly, we've got {} bytes of data to read".format(length))
# If there's no data available, we don't need to waste time reading anything.
if length == 0:
return b''
# Otherwise, read the data from the endpoint and return it.
data = self.device.comms._vendor_request_in(self.vendor_requests.USBHOST_FINISH_NONBLOCKING_READ,
index=endpoint_number, length=length)
return data.tobytes()
| 34.724466
| 131
| 0.643956
|
acffe813d764271fcaef8b717ecd671986397dad
| 6,062
|
py
|
Python
|
test/integration/ggrc_basic_permissions/test_assignable.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | null | null | null |
test/integration/ggrc_basic_permissions/test_assignable.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 12
|
2015-01-08T14:50:19.000Z
|
2017-11-29T19:37:53.000Z
|
test/integration/ggrc_basic_permissions/test_assignable.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 1
|
2015-01-08T13:25:09.000Z
|
2015-01-08T13:25:09.000Z
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""
Test Assignable RBAC
"""
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import Generator
from integration.ggrc.generator import ObjectGenerator
class TestReader(TestCase):
"""Test Assignable RBAC"""
def setUp(self):
TestCase.setUp(self)
self.generator = Generator()
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
self.init_assignable()
def init_users(self):
""" Init users needed by the test cases """
users = [("creator", "Creator"), ("reader", "Reader"),
("editor", "Editor"), ("admin", "gGRC Admin")]
self.users = {}
for (name, role) in users:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role=role)
self.users[name] = user
def init_assignable(self):
"""Creates the assignable object used by all the tests"""
self.api.set_user(self.users["editor"])
response = self.api.post(all_models.Assessment, {
"assessment": {
"title": "Assessment",
"context": None,
}
})
obj_id = response.json.get("assessment").get("id")
self.assertEqual(response.status_code, 201, "Error setting up Assessment")
self.obj_json = response.json
self.obj = self.generator.get_object(all_models.Assessment, obj_id)
def _post_relationship(self, user, obj_id):
"""Helper method for creating assignees on an object"""
return self.api.post(all_models.Relationship, {
"relationship": {"source": {
"id": obj_id,
"type": "Assessment"
}, "destination": {
"id": user.id,
"type": "Person"
}, "attrs": {
"AssigneeType": "Creator"
}, "context": None},
})
def test_basic_operations_with_no_assignee(self):
"""Editor creates an Assessment, but doesn't assign Reader/Creator as
assignee. Reader should have Read access, Creator should have no access
"""
# Reader should have read access, but shouldn't be allowed to edit or
# create another assingee
self.api.set_user(self.users["reader"])
response = self.api.get(all_models.Assessment, self.obj.id)
self.assertEqual(response.status_code, 200)
response = self.api.put(self.obj, self.obj_json)
self.assertEqual(response.status_code, 403)
response = self._post_relationship(self.users["reader"], self.obj.id)
self.assertEqual(response.status_code, 403)
# Creator should have no access. We skip the put request because we can't
# get the object etag.
self.api.set_user(self.users["creator"])
response = self.api.get(all_models.Assessment, self.obj.id)
self.assertEqual(response.status_code, 403)
response = self._post_relationship(self.users["reader"], self.obj.id)
self.assertEqual(response.status_code, 403)
def test_basic_operations_with_assignee(self):
"""Test if Reader/Creator have CRUD access once they become assignees"""
# Admin adds reader as an assignee
self.api.set_user(self.users["admin"])
response = self._post_relationship(self.users["reader"], self.obj.id)
self.assertEqual(response.status_code, 201)
# Reader is now allowed to update the object
self.api.set_user(self.users["reader"])
response = self.api.get(all_models.Assessment, self.obj.id)
self.assertEqual(response.status_code, 200)
response = self.api.put(self.obj, response.json)
self.assertEqual(response.status_code, 200)
# Reader adds creator as an assignee
response = self._post_relationship(self.users["creator"], self.obj.id)
self.assertEqual(response.status_code, 201)
# Creator now has CRUD access
self.api.set_user(self.users["creator"])
response = self.api.get(all_models.Assessment, self.obj.id)
self.assertEqual(response.status_code, 200)
response = self.api.put(self.obj, response.json)
# Creator should even be allowed to add new assignees
response = self._post_relationship(self.users["admin"], self.obj.id)
self.assertEqual(response.status_code, 201)
def test_readability_of_mapped_objects(self):
"""Test if assignees get Read access on all mapped objects"""
# Editor creates a System object and maps it to the assignable object
self.api.set_user(self.users["editor"])
response = self.api.post(all_models.System, {
"system": {
"title": "System",
"context": None,
}
})
system_id = response.json.get("system").get("id")
system = self.generator.get_object(all_models.System, system_id)
self.api.post(all_models.Relationship, {
"relationship": {"source": {
"id": self.obj.id,
"type": "Assessment"
}, "destination": {
"id": system_id,
"type": "System"
}, "context": None},
})
# Since creator is not an assignee she should not have access to any of the
# two objects
self.api.set_user(self.users["creator"])
response = self.api.get(all_models.Assessment, self.obj.id)
self.assertEqual(response.status_code, 403)
response = self.api.get(all_models.System, system_id)
self.assertEqual(response.status_code, 403)
# Editor adds creator as an assignee
self.api.set_user(self.users["editor"])
response = self._post_relationship(self.users["creator"], self.obj.id)
# Creator should now have read access on the mapped object
self.api.set_user(self.users["creator"])
response = self.api.get(all_models.System, system_id)
self.assertEqual(response.status_code, 200)
# But he should still not be allowed to update
response = self.api.put(system, response.json)
self.assertEqual(response.status_code, 403)
| 37.652174
| 79
| 0.679974
|
acffe8140862e2345ec7dec2801da427615ba100
| 438
|
py
|
Python
|
EcoApp/EcoApp/cor/migrations/0018_auto_20200524_0331.py
|
palonsovazquez/EcoAppApi
|
a47c593c782f881858e2519d4a52c09a0a8b2cef
|
[
"Apache-2.0"
] | null | null | null |
EcoApp/EcoApp/cor/migrations/0018_auto_20200524_0331.py
|
palonsovazquez/EcoAppApi
|
a47c593c782f881858e2519d4a52c09a0a8b2cef
|
[
"Apache-2.0"
] | null | null | null |
EcoApp/EcoApp/cor/migrations/0018_auto_20200524_0331.py
|
palonsovazquez/EcoAppApi
|
a47c593c782f881858e2519d4a52c09a0a8b2cef
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-05-24 01:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cor', '0017_auto_20200524_0318'),
]
operations = [
migrations.AlterField(
model_name='component',
name='code',
field=models.IntegerField(auto_created=True, primary_key=True, serialize=False, unique=True),
),
]
| 23.052632
| 105
| 0.625571
|
acffeb5af8e178fd0607e676ee7ff1cb9011b04d
| 1,648
|
py
|
Python
|
tools/gitmirror_conf_generate.py
|
saripurigopi/fgci-ansible
|
25eb4e95346b38f4ca31bf4911c90a1bedf901ee
|
[
"MIT"
] | null | null | null |
tools/gitmirror_conf_generate.py
|
saripurigopi/fgci-ansible
|
25eb4e95346b38f4ca31bf4911c90a1bedf901ee
|
[
"MIT"
] | null | null | null |
tools/gitmirror_conf_generate.py
|
saripurigopi/fgci-ansible
|
25eb4e95346b38f4ca31bf4911c90a1bedf901ee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Parse requirements.yaml and generate a git-mirror config file and a
# requirements.yml that uses the mirror instead of pulling directly.
import yaml
import urlparse
import sys
import time
def getreq(fname):
with open(fname) as f:
m = yaml.safe_load(f)
return m
def genreq(req_in, mirror_ip, fname):
with open(fname, 'w') as f:
f.write('---\n# Generated by %s on %s\n' % (sys.argv[0], time.ctime()))
for repo in req_in:
up = urlparse.urlsplit(repo['src'])
np = up.netloc + up.path
un = ('http', mirror_ip + ':8080', np, '', '')
nurl = urlparse.urlunsplit(un)
f.write('\n- src: ' + nurl + '\n')
for k in repo:
if k == 'src':
continue
f.write(' ' + k + ': ' + repo[k] + '\n')
def genmirror_conf(req_in, fname, basepath):
with open(fname, 'w') as f:
f.write('# git-mirror config generated by %s on %s\n' % (sys.argv[0], time.ctime()))
f.write('BasePath = "%s"\n' % basepath)
# Hackety-hack, lets mirror the fgci-ansible repo as well
f.write('\n[[Repo]]\nOrigin = "https://github.com/CSCfi/fgci-ansible.git"\n')
for repo in req_in:
f.write('\n[[Repo]]\nOrigin = "%s"\n' % repo['src'])
if __name__ == '__main__':
import sys
if len(sys.argv) != 6:
print "Usage: %s requirements.yml mirror_ip requirements_out.yml gitmirror.toml basepath" % sys.argv[0]
sys.exit(1)
m = getreq(sys.argv[1])
genreq(m, sys.argv[2], sys.argv[3])
genmirror_conf(m, sys.argv[4], sys.argv[5])
| 35.06383
| 111
| 0.564927
|
acffee77b8ecae144f5b041277cef10d831238c2
| 13,924
|
py
|
Python
|
log_casp_inh/model_587.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_casp_inh/model_587.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_casp_inh/model_587.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM'])
Monomer('Ligand', ['Receptor'])
Monomer('C6pro', ['C3A'])
Monomer('ParpU', ['C3A'])
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('BidM', ['BaxM'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('Xiap', ['SmacC', 'C3A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C3ub')
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C3pro', ['C8A'])
Monomer('SmacM', ['BaxA'])
Monomer('SmacC', ['Xiap'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('C6pro_0', 100.0)
Parameter('ParpU_0', 1000000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('C8A_0', 0.0)
Parameter('Xiap_0', 146750.0)
Parameter('Receptor_0', 100.0)
Parameter('C3ub_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('SmacM_0', 100000.0)
Parameter('SmacC_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('BaxA_obs', BaxA())
Observable('Ligand_obs', Ligand())
Observable('C6pro_obs', C6pro())
Observable('ParpU_obs', ParpU())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('BidM_obs', BidM())
Observable('BaxM_obs', BaxM())
Observable('C8A_obs', C8A())
Observable('Xiap_obs', Xiap())
Observable('Receptor_obs', Receptor())
Observable('C3ub_obs', C3ub())
Observable('Fadd_obs', Fadd())
Observable('C3pro_obs', C3pro())
Observable('SmacM_obs', SmacM())
Observable('SmacC_obs', SmacC())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), BaxA_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(Xiap(SmacC=None, C3A=None), Xiap_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C3ub(), C3ub_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
| 85.423313
| 598
| 0.808532
|
acffeea74f00a14a8e25da1d2d2d31fec2cfd5b5
| 8,821
|
py
|
Python
|
telegraph/api.py
|
AbirHasan2005/telegraph
|
428002dce04db57cd96fd406e8917c896ed45673
|
[
"MIT"
] | 1
|
2021-12-05T19:14:54.000Z
|
2021-12-05T19:14:54.000Z
|
telegraph/api.py
|
AbirHasan2005/telegraph
|
428002dce04db57cd96fd406e8917c896ed45673
|
[
"MIT"
] | null | null | null |
telegraph/api.py
|
AbirHasan2005/telegraph
|
428002dce04db57cd96fd406e8917c896ed45673
|
[
"MIT"
] | 2
|
2021-10-17T13:12:02.000Z
|
2021-12-30T04:07:26.000Z
|
# -*- coding: utf-8 -*-
import json
import requests
from .exceptions import TelegraphException
from .utils import html_to_nodes, nodes_to_html
class TelegraphApi(object):
""" Telegraph API Client
:param access_token: access_token
:type access_token: str
"""
__slots__ = ('access_token', 'session')
def __init__(self, access_token=None):
self.access_token = access_token
self.session = requests.Session()
def method(self, method, values=None, path=''):
values = values.copy() if values is not None else {}
if 'access_token' not in values and self.access_token:
values['access_token'] = self.access_token
response = self.session.post(
'https://api.telegra.ph/{}/{}'.format(method, path),
values
).json()
if response.get('ok'):
return response['result']
raise TelegraphException(response.get('error'))
class Telegraph(object):
""" Telegraph API client helper
:param access_token: Telegraph access token
"""
__slots__ = ('_telegraph',)
def __init__(self, access_token=None):
self._telegraph = TelegraphApi(access_token)
def get_access_token(self):
""" Return current access_token
"""
return self._telegraph.access_token
def create_account(self, short_name, author_name=None, author_url=None,
replace_token=True):
""" Create a new Telegraph account
:param short_name: Account name, helps users with several
accounts remember which they are currently using.
Displayed to the user above the "Edit/Publish"
button on Telegra.ph, other users don't see this name
:param author_name: Default author name used when creating new articles
:param author_url: Default profile link, opened when users click on the
author's name below the title. Can be any link,
not necessarily to a Telegram profile or channels
:param replace_token: Replaces current token to a new user's token
"""
response = self._telegraph.method('createAccount', values={
'short_name': short_name,
'author_name': author_name,
'author_url': author_url
})
if replace_token:
self._telegraph.access_token = response.get('access_token')
return response
def edit_account_info(self, short_name=None, author_name=None,
author_url=None):
""" Update information about a Telegraph account.
Pass only the parameters that you want to edit
:param short_name: Account name, helps users with several
accounts remember which they are currently using.
Displayed to the user above the "Edit/Publish"
button on Telegra.ph, other users don't see this name
:param author_name: Default author name used when creating new articles
:param author_url: Default profile link, opened when users click on the
author's name below the title. Can be any link,
not necessarily to a Telegram profile or channels
"""
return self._telegraph.method('editAccountInfo', values={
'short_name': short_name,
'author_name': author_name,
'author_url': author_url
})
def revoke_access_token(self):
""" Revoke access_token and generate a new one, for example,
if the user would like to reset all connected sessions, or
you have reasons to believe the token was compromised.
On success, returns dict with new access_token and auth_url fields
"""
response = self._telegraph.method('revokeAccessToken')
self._telegraph.access_token = response.get('access_token')
return response
def get_page(self, path, return_content=True, return_html=True):
""" Get a Telegraph page
:param path: Path to the Telegraph page (in the format Title-12-31,
i.e. everything that comes after https://telegra.ph/)
:param return_content: If true, content field will be returned
:param return_html: If true, returns HTML instead of Nodes list
"""
response = self._telegraph.method('getPage', path=path, values={
'return_content': return_content
})
if return_content and return_html:
response['content'] = nodes_to_html(response['content'])
return response
def create_page(self, title, content=None, html_content=None,
author_name=None, author_url=None, return_content=False):
""" Create a new Telegraph page
:param title: Page title
:param content: Content in nodes list format (see doc)
:param html_content: Content in HTML format
:param author_name: Author name, displayed below the article's title
:param author_url: Profile link, opened when users click on
the author's name below the title
:param return_content: If true, a content field will be returned
"""
if content is None:
content = html_to_nodes(html_content)
content_json = json.dumps(content, ensure_ascii=False)
return self._telegraph.method('createPage', values={
'title': title,
'author_name': author_name,
'author_url': author_url,
'content': content_json,
'return_content': return_content
})
def edit_page(self, path, title, content=None, html_content=None,
author_name=None, author_url=None, return_content=False):
""" Edit an existing Telegraph page
:param path: Path to the page
:param title: Page title
:param content: Content in nodes list format (see doc)
:param html_content: Content in HTML format
:param author_name: Author name, displayed below the article's title
:param author_url: Profile link, opened when users click on
the author's name below the title
:param return_content: If true, a content field will be returned
"""
if content is None:
content = html_to_nodes(html_content)
content_json = json.dumps(content, ensure_ascii=False)
return self._telegraph.method('editPage', path=path, values={
'title': title,
'author_name': author_name,
'author_url': author_url,
'content': content_json,
'return_content': return_content
})
def get_account_info(self, fields=None):
""" Get information about a Telegraph account
:param fields: List of account fields to return. Available fields:
short_name, author_name, author_url, auth_url, page_count
Default: [“short_name”,“author_name”,“author_url”]
"""
return self._telegraph.method('getAccountInfo', {
'fields': json.dumps(fields, ensure_ascii=False) if fields else None
})
def get_page_list(self, offset=0, limit=50):
""" Get a list of pages belonging to a Telegraph account
sorted by most recently created pages first
:param offset: Sequential number of the first page to be returned
(default = 0)
:param limit: Limits the number of pages to be retrieved
(0-200, default = 50)
"""
return self._telegraph.method('getPageList', {
'offset': offset,
'limit': limit
})
def get_views(self, path, year=None, month=None, day=None, hour=None):
""" Get the number of views for a Telegraph article
:param path: Path to the Telegraph page
:param year: Required if month is passed. If passed, the number of
page views for the requested year will be returned
:param month: Required if day is passed. If passed, the number of
page views for the requested month will be returned
:param day: Required if hour is passed. If passed, the number of
page views for the requested day will be returned
:param hour: If passed, the number of page views for
the requested hour will be returned
"""
return self._telegraph.method('getViews', path=path, values={
'year': year,
'month': month,
'day': day,
'hour': hour
})
| 34.189922
| 80
| 0.609455
|
acfff26fffea32659a99f47bf9837fee19b1aa79
| 203
|
py
|
Python
|
coroutine/async_asyncio.py
|
kaitolucifer/asgi-demo
|
3d5a1ab834d01c04dfbed4f99f61e78f1896606c
|
[
"WTFPL"
] | null | null | null |
coroutine/async_asyncio.py
|
kaitolucifer/asgi-demo
|
3d5a1ab834d01c04dfbed4f99f61e78f1896606c
|
[
"WTFPL"
] | null | null | null |
coroutine/async_asyncio.py
|
kaitolucifer/asgi-demo
|
3d5a1ab834d01c04dfbed4f99f61e78f1896606c
|
[
"WTFPL"
] | null | null | null |
import asyncio
async def test1():
print(1)
await coro2
print('test1 done')
async def test2():
print(2)
await asyncio.sleep(0)
coro1 = test1()
coro2 = test2()
asyncio.run(coro1)
| 11.277778
| 26
| 0.630542
|
acfff61dc968fc4de71821450008e48edd4d36ee
| 305
|
py
|
Python
|
2017/02/mexico-nafta-baseline-20170223/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14
|
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2017/02/mexico-nafta-baseline-20170223/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2017/02/mexico-nafta-baseline-20170223/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7
|
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1OH1cqs_v4b3FYYgccNRYpyDtRZ6kjAJAlqRpPrssJew'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714
| 77
| 0.819672
|
acfff687c8c25baebd00873d9fc161b7d17926d6
| 944
|
py
|
Python
|
ask-sdk-core/ask_sdk_core/dispatch_components/__init__.py
|
koshyviv/alexa-skills-kit-sdk-for-python
|
1ceace2b745f53d32a8fea8eeb18ec9f381cda1c
|
[
"Apache-2.0"
] | 1
|
2020-06-13T14:14:26.000Z
|
2020-06-13T14:14:26.000Z
|
ask-sdk-core/ask_sdk_core/dispatch_components/__init__.py
|
koshyviv/alexa-skills-kit-sdk-for-python
|
1ceace2b745f53d32a8fea8eeb18ec9f381cda1c
|
[
"Apache-2.0"
] | null | null | null |
ask-sdk-core/ask_sdk_core/dispatch_components/__init__.py
|
koshyviv/alexa-skills-kit-sdk-for-python
|
1ceace2b745f53d32a8fea8eeb18ec9f381cda1c
|
[
"Apache-2.0"
] | 2
|
2019-11-22T14:52:47.000Z
|
2021-06-18T13:46:15.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
# Importing the most commonly used component classes, for
# short-circuiting purposes.
from .request_components import (
AbstractRequestHandler, AbstractRequestInterceptor,
AbstractResponseInterceptor, HandlerAdapter, RequestMapper,
RequestHandlerChain)
from .exception_components import (
AbstractExceptionHandler, ExceptionMapper)
| 33.714286
| 67
| 0.76589
|
acfff6a4dd028d5768425cc05f332187a1bd48ef
| 11,518
|
py
|
Python
|
sphinx/transforms/__init__.py
|
sephalon/sphinx
|
c892fe98f7a311dc51340f8d98e63d9790b7f820
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/transforms/__init__.py
|
sephalon/sphinx
|
c892fe98f7a311dc51340f8d98e63d9790b7f820
|
[
"BSD-2-Clause"
] | 4
|
2021-03-04T17:18:36.000Z
|
2021-07-14T19:08:18.000Z
|
sphinx/transforms/__init__.py
|
sephalon/sphinx
|
c892fe98f7a311dc51340f8d98e63d9790b7f820
|
[
"BSD-2-Clause"
] | 2
|
2021-03-02T10:07:11.000Z
|
2021-10-12T23:41:07.000Z
|
# -*- coding: utf-8 -*-
"""
sphinx.transforms
~~~~~~~~~~~~~~~~~
Docutils transforms used by Sphinx when reading documents.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.transforms import Transform, Transformer
from docutils.transforms.parts import ContentsFilter
from docutils.utils import new_document
from docutils.transforms.universal import SmartQuotes
from sphinx import addnodes
from sphinx.locale import _
from sphinx.util import logging
from sphinx.util.i18n import format_date
from sphinx.util.nodes import apply_source_workaround, is_smartquotable
if False:
# For type annotation
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.domain.std import StandardDomain # NOQA
from sphinx.environment import BuildEnvironment # NOQA
logger = logging.getLogger(__name__)
default_substitutions = set([
'version',
'release',
'today',
])
class SphinxTransform(Transform):
"""
A base class of Transforms.
Compared with ``docutils.transforms.Transform``, this class improves accessibility to
Sphinx APIs.
The subclasses can access following objects and functions:
self.app
The application object (:class:`sphinx.application.Sphinx`)
self.config
The config object (:class:`sphinx.config.Config`)
self.env
The environment object (:class:`sphinx.environment.BuildEnvironment`)
"""
@property
def app(self):
# type: () -> Sphinx
return self.document.settings.env.app
@property
def env(self):
# type: () -> BuildEnvironment
return self.document.settings.env
@property
def config(self):
# type: () -> Config
return self.document.settings.env.config
class SphinxTransformer(Transformer):
"""
A transformer for Sphinx.
"""
document = None # type: nodes.Node
env = None # type: BuildEnvironment
def set_environment(self, env):
# type: (BuildEnvironment) -> None
self.env = env
def apply_transforms(self):
# type: () -> None
if isinstance(self.document, nodes.document):
if not hasattr(self.document.settings, 'env') and self.env:
self.document.settings.env = self.env
Transformer.apply_transforms(self)
else:
# wrap the target node by document node during transforming
try:
document = new_document('')
if self.env:
document.settings.env = self.env
document += self.document
self.document = document
Transformer.apply_transforms(self)
finally:
self.document = self.document[0]
class DefaultSubstitutions(SphinxTransform):
"""
Replace some substitutions if they aren't defined in the document.
"""
# run before the default Substitutions
default_priority = 210
def apply(self):
# type: () -> None
# only handle those not otherwise defined in the document
to_handle = default_substitutions - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
text = self.config[refname]
if refname == 'today' and not text:
# special handling: can also specify a strftime format
text = format_date(self.config.today_fmt or _('%b %d, %Y'), # type: ignore
language=self.config.language)
ref.replace_self(nodes.Text(text, text))
class MoveModuleTargets(SphinxTransform):
"""
Move module targets that are the first thing in a section to the section
title.
XXX Python specific
"""
default_priority = 210
def apply(self):
# type: () -> None
for node in self.document.traverse(nodes.target):
if not node['ids']:
continue
if ('ismod' in node and
node.parent.__class__ is nodes.section and
# index 0 is the section title node
node.parent.index(node) == 1):
node.parent['ids'][0:0] = node['ids']
node.parent.remove(node)
class HandleCodeBlocks(SphinxTransform):
"""
Several code block related transformations.
"""
default_priority = 210
def apply(self):
# type: () -> None
# move doctest blocks out of blockquotes
for node in self.document.traverse(nodes.block_quote):
if all(isinstance(child, nodes.doctest_block) for child
in node.children):
node.replace_self(node.children)
# combine successive doctest blocks
# for node in self.document.traverse(nodes.doctest_block):
# if node not in node.parent.children:
# continue
# parindex = node.parent.index(node)
# while len(node.parent) > parindex+1 and \
# isinstance(node.parent[parindex+1], nodes.doctest_block):
# node[0] = nodes.Text(node[0] + '\n\n' +
# node.parent[parindex+1][0])
# del node.parent[parindex+1]
class AutoNumbering(SphinxTransform):
"""
Register IDs of tables, figures and literal_blocks to assign numbers.
"""
default_priority = 210
def apply(self):
# type: () -> None
domain = self.env.get_domain('std') # type: StandardDomain
for node in self.document.traverse(nodes.Element):
if domain.is_enumerable_node(node) and domain.get_numfig_title(node) is not None:
self.document.note_implicit_target(node)
class SortIds(SphinxTransform):
"""
Sort secion IDs so that the "id[0-9]+" one comes last.
"""
default_priority = 261
def apply(self):
# type: () -> None
for node in self.document.traverse(nodes.section):
if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
node['ids'] = node['ids'][1:] + [node['ids'][0]]
class CitationReferences(SphinxTransform):
"""
Replace citation references by pending_xref nodes before the default
docutils transform tries to resolve them.
"""
default_priority = 619
def apply(self):
# type: () -> None
for citnode in self.document.traverse(nodes.citation_reference):
cittext = citnode.astext()
refnode = addnodes.pending_xref(cittext, refdomain='std', reftype='citation',
reftarget=cittext, refwarn=True,
ids=citnode["ids"])
refnode.source = citnode.source or citnode.parent.source
refnode.line = citnode.line or citnode.parent.line
refnode += nodes.Text('[' + cittext + ']')
citnode.parent.replace(citnode, refnode)
TRANSLATABLE_NODES = {
'literal-block': nodes.literal_block,
'doctest-block': nodes.doctest_block,
'raw': nodes.raw,
'index': addnodes.index,
'image': nodes.image,
}
class ApplySourceWorkaround(SphinxTransform):
"""
update source and rawsource attributes
"""
default_priority = 10
def apply(self):
# type: () -> None
for n in self.document.traverse():
if isinstance(n, (nodes.TextElement, nodes.image)):
apply_source_workaround(n)
class AutoIndexUpgrader(SphinxTransform):
"""
Detect old style; 4 column based indices and automatically upgrade to new style.
"""
default_priority = 210
def apply(self):
# type: () -> None
for node in self.document.traverse(addnodes.index):
if 'entries' in node and any(len(entry) == 4 for entry in node['entries']):
msg = ('4 column based index found. '
'It might be a bug of extensions you use: %r' % node['entries'])
logger.warning(msg, location=node)
for i, entry in enumerate(node['entries']):
if len(entry) == 4:
node['entries'][i] = entry + (None,)
class ExtraTranslatableNodes(SphinxTransform):
"""
make nodes translatable
"""
default_priority = 10
def apply(self):
# type: () -> None
targets = self.config.gettext_additional_targets
target_nodes = [v for k, v in TRANSLATABLE_NODES.items() if k in targets]
if not target_nodes:
return
def is_translatable_node(node):
# type: (nodes.Node) -> bool
return isinstance(node, tuple(target_nodes))
for node in self.document.traverse(is_translatable_node):
node['translatable'] = True
class UnreferencedFootnotesDetector(SphinxTransform):
"""
detect unreferenced footnotes and emit warnings
"""
default_priority = 200
def apply(self):
for node in self.document.footnotes:
if node['names'] == []:
# footnote having duplicated number. It is already warned at parser.
pass
elif node['names'][0] not in self.document.footnote_refs:
logger.warning('Footnote [%s] is not referenced.', node['names'][0],
type='ref', subtype='footnote',
location=node)
for node in self.document.autofootnotes:
if not any(ref['auto'] == node['auto'] for ref in self.document.autofootnote_refs):
logger.warning('Footnote [#] is not referenced.',
type='ref', subtype='footnote',
location=node)
class FilterSystemMessages(SphinxTransform):
"""Filter system messages from a doctree."""
default_priority = 999
def apply(self):
# type: () -> None
filterlevel = self.config.keep_warnings and 2 or 5
for node in self.document.traverse(nodes.system_message):
if node['level'] < filterlevel:
logger.debug('%s [filtered system message]', node.astext())
node.parent.remove(node)
class SphinxContentsFilter(ContentsFilter):
"""
Used with BuildEnvironment.add_toc_from() to discard cross-file links
within table-of-contents link nodes.
"""
def visit_pending_xref(self, node):
# type: (nodes.Node) -> None
text = node.astext()
self.parent.append(nodes.literal(text, text))
raise nodes.SkipNode
def visit_image(self, node):
# type: (nodes.Node) -> None
raise nodes.SkipNode
class SphinxSmartQuotes(SmartQuotes):
"""
Customized SmartQuotes to avoid transform for some extra node types.
refs: sphinx.parsers.RSTParser
"""
def get_tokens(self, txtnodes):
# A generator that yields ``(texttype, nodetext)`` tuples for a list
# of "Text" nodes (interface to ``smartquotes.educate_tokens()``).
texttype = {True: 'literal', # "literal" text is not changed:
False: 'plain'}
for txtnode in txtnodes:
notsmartquotable = not is_smartquotable(txtnode)
yield (texttype[notsmartquotable], txtnode.astext())
| 32.814815
| 95
| 0.604358
|
acfff6ed42020c227b8a281356a2f31968b023cb
| 2,795
|
py
|
Python
|
detection/metrics/analyzer/clustering.py
|
ml-in-programming/kotlin-code-anomaly
|
8a999beafc6222feb715a9bb9ef9fd670d414856
|
[
"Apache-2.0"
] | 4
|
2018-04-20T07:36:13.000Z
|
2019-02-03T10:33:39.000Z
|
detection/metrics/analyzer/clustering.py
|
JetBrains-Research/kotlin-code-anomaly
|
8a999beafc6222feb715a9bb9ef9fd670d414856
|
[
"Apache-2.0"
] | null | null | null |
detection/metrics/analyzer/clustering.py
|
JetBrains-Research/kotlin-code-anomaly
|
8a999beafc6222feb715a9bb9ef9fd670d414856
|
[
"Apache-2.0"
] | null | null | null |
import csv
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
is_drawing = False
out_dir = f"../post-analysis/"
csv_in_path = f"../post-analysis/feb18_true_all.csv"
# out_path = f"{out_dir}"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
start_time = time.time()
# Load input
methods = pandas.read_csv(csv_in_path, header=None, delimiter=',', quoting=csv.QUOTE_MINIMAL, error_bad_lines=True,
engine='python')
# Fix potential problems in input
X = np.array(methods.values[:, 1:], dtype="float64")
ok_lines = np.array([~np.isnan(row).any() for row in X])
methods = methods[ok_lines]
X = X[ok_lines]
n_methods = methods.shape[0]
# Preprocessing
X = scale(X)
X_pca = PCA(n_components=2).fit_transform(X)
def run_kmeans(methods):
# K-Means
n_clusters_kmeans = 4
kmeans = KMeans(init='k-means++', n_clusters=n_clusters_kmeans, n_init=10)
kmeans_labels = kmeans.fit_predict(X_pca)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = X_pca[:, 0].min() - 1, X_pca[:, 0].max() + 1
y_min, y_max = X_pca[:, 1].min() - 1, X_pca[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', s=100, linewidths=2, color='w')
# Plot the data
plt.plot(X_pca[:, 0], X_pca[:, 1], 'k.', markersize=2)
plt.title('K-means (PCA-reduced data)')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.savefig(f"{out_dir}k_means.png")
# Save each cluster
for i in range(n_clusters_kmeans):
cur_indices = np.asarray([label == i for label in kmeans_labels])
cur_methods = methods[cur_indices]
cur_methods.to_csv(f"{out_dir}cluster_{i}.csv", header=False, index=False)
run_kmeans(methods)
# Finish
end_time = time.time()
print(f"Total elapsed time: {end_time - start_time}")
| 32.126437
| 115
| 0.667263
|
acfff70415af30cae85e1c57a35e5e80e6b039a1
| 2,077
|
py
|
Python
|
package/spack-gotcha/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-gotcha/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-gotcha/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gotcha(CMakePackage):
"""C software library for shared library function wrapping,
enables tools to intercept calls into shared libraries"""
homepage = "http://github.com/LLNL/gotcha"
url = "http://github.com/LLNL/gotcha"
variant('test', default=False, description='Build tests for Gotcha')
version('develop', git='https://github.com/LLNL/gotcha.git',
branch="develop")
version('master', git='https://github.com/LLNL/gotcha.git',
branch="master")
version('1.0.2', git='https://github.com/LLNL/gotcha.git', tag="1.0.2")
version('0.0.2', git='https://github.com/LLNL/gotcha.git', tag="0.0.2")
def configure_args(self):
spec = self.spec
return [
'-DGOTCHA_ENABLE_TESTS=%s' % ('ON' if '+test' in spec else 'OFF')
]
| 42.387755
| 78
| 0.653828
|
acfff73dc627541d9216fce53073b06c90d63c9a
| 2,715
|
py
|
Python
|
src/sqlfluff/rules/L025.py
|
tinder-albertyue/sqlfluff
|
992bd8d02465c9a57f29d58d5c328e8f548f6cc9
|
[
"MIT"
] | null | null | null |
src/sqlfluff/rules/L025.py
|
tinder-albertyue/sqlfluff
|
992bd8d02465c9a57f29d58d5c328e8f548f6cc9
|
[
"MIT"
] | null | null | null |
src/sqlfluff/rules/L025.py
|
tinder-albertyue/sqlfluff
|
992bd8d02465c9a57f29d58d5c328e8f548f6cc9
|
[
"MIT"
] | null | null | null |
"""Implementation of Rule L025."""
from sqlfluff.core.rules.base import LintFix, LintResult
from sqlfluff.core.rules.doc_decorators import document_fix_compatible
from sqlfluff.core.rules.functional import Segments
import sqlfluff.core.rules.functional.segment_predicates as sp
from sqlfluff.rules.L020 import Rule_L020
from sqlfluff.core.dialects.common import AliasInfo
@document_fix_compatible
class Rule_L025(Rule_L020):
"""Tables should not be aliased if that alias is not used.
| **Anti-pattern**
.. code-block:: sql
SELECT
a
FROM foo AS zoo
| **Best practice**
| Use the alias or remove it. An unused alias makes code
| harder to read without changing any functionality.
.. code-block:: sql
SELECT
zoo.a
FROM foo AS zoo
-- Alternatively...
SELECT
a
FROM foo
"""
def _lint_references_and_aliases(
self,
table_aliases,
standalone_aliases,
references,
col_aliases,
using_cols,
parent_select,
):
"""Check all aliased references against tables referenced in the query."""
# A buffer to keep any violations.
violation_buff = []
# Check all the references that we have, keep track of which aliases we refer to.
tbl_refs = set()
for r in references:
tbl_refs.update(
tr.part
for tr in r.extract_possible_references(
level=r.ObjectReferenceLevel.TABLE
)
)
alias: AliasInfo
for alias in table_aliases:
if alias.aliased and alias.ref_str not in tbl_refs:
fixes = [LintFix.delete(alias.alias_expression)]
# Walk back to remove indents/whitespaces
to_delete = (
Segments(*alias.from_expression_element.segments)
.reversed()
.select(
start_seg=alias.alias_expression,
# Stop once we reach an other, "regular" segment.
loop_while=sp.or_(sp.is_whitespace(), sp.is_meta()),
)
)
fixes += [LintFix.delete(seg) for seg in to_delete]
violation_buff.append(
LintResult(
anchor=alias.segment,
description="Alias {!r} is never used in SELECT statement.".format(
alias.ref_str
),
fixes=fixes,
)
)
return violation_buff or None
| 30.852273
| 91
| 0.552855
|
acfff8c95c57f7028f8280e9e5dbb257887c567a
| 517
|
py
|
Python
|
test/test_load_signatures.py
|
x-way/tell-me-your-secrets
|
e59749f87e146a4cc4912b9505deefcce10a72a4
|
[
"MIT"
] | 31
|
2019-11-27T09:14:55.000Z
|
2022-02-15T12:39:43.000Z
|
test/test_load_signatures.py
|
x-way/tell-me-your-secrets
|
e59749f87e146a4cc4912b9505deefcce10a72a4
|
[
"MIT"
] | 27
|
2020-06-18T13:20:42.000Z
|
2021-09-15T08:13:51.000Z
|
test/test_load_signatures.py
|
x-way/tell-me-your-secrets
|
e59749f87e146a4cc4912b9505deefcce10a72a4
|
[
"MIT"
] | 12
|
2019-12-12T08:53:29.000Z
|
2022-02-15T12:39:53.000Z
|
import unittest
import yaml
from tell_me_your_secrets.__main__ import SignatureRecognizer
from tell_me_your_secrets.defaults import DEFAULT_CONFIG_PATH
class SignatureLoadTest(unittest.TestCase):
def test_load_default_config(self):
""" Test to ensure default config is valid. """
with open(DEFAULT_CONFIG_PATH) as f:
config = yaml.safe_load(f)
signatures = SignatureRecognizer.load_signatures(config['signatures'], [])
self.assertTrue(len(signatures) > 0)
| 30.411765
| 86
| 0.729207
|
acfff95bfa85860a52b8e9609be7ef72302bbc39
| 3,490
|
py
|
Python
|
myfuc/.ipynb_checkpoints/KAMOME_NET-checkpoint.py
|
kamomehz/waveletCodingCNN
|
50c7db9d986039ded38999b7e4f4265e2250fb90
|
[
"MIT"
] | null | null | null |
myfuc/.ipynb_checkpoints/KAMOME_NET-checkpoint.py
|
kamomehz/waveletCodingCNN
|
50c7db9d986039ded38999b7e4f4265e2250fb90
|
[
"MIT"
] | null | null | null |
myfuc/.ipynb_checkpoints/KAMOME_NET-checkpoint.py
|
kamomehz/waveletCodingCNN
|
50c7db9d986039ded38999b7e4f4265e2250fb90
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, TensorDataset
import numpy as np
from matplotlib import pyplot as plt
import struct
class RMSELoss(nn.Module):
def __init__(self):
super(RMSELoss, self).__init__()
def forward(self, inputs, targets):
tmp = (inputs-targets)**2
loss = torch.mean(tmp)
return torch.sqrt(loss)
class Net_0(nn.Module):
def __init__(self):
super(Net_0, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, padding=1)
# self.fc1 = nn.Linear(768, 1) #16*12=16*(7-4)*(8-4)
self.fc1 = nn.Linear(7*6*32, 1)
self.drop1=nn.Dropout(p=0.1)
self.drop2=nn.Dropout(p=0.2)
def forward(self, x):
x = F.leaky_relu(self.conv1(x))
# x = self.drop2(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
return x
class Net_L(nn.Module):
def __init__(self):
super(Net_L, self).__init__()
self.fc1 = nn.Linear(7*6, 128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 1)
def forward(self, x):
x = torch.flatten(x, 1)
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
x = self.fc3(x)
return x
class Net_CNN(nn.Module):
def __init__(self):
super(Net_CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.fc1 = nn.Linear(7*6*128, 128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 8)
self.fc4 = nn.Linear(8, 1)
self.drop1=nn.Dropout(p=0.1)
def forward(self, x):
x = F.leaky_relu(self.conv1(x))
# x = self.drop1(x)
x = F.leaky_relu(self.conv2(x))
# x = self.drop1(x)
x = F.leaky_relu(self.conv3(x))
# x = self.drop1(x)
x = torch.flatten(x, 1)
x = F.leaky_relu(self.fc1(x))
# x = self.drop1(x)
x = F.leaky_relu(self.fc2(x))
# x = self.drop1(x)
x = F.leaky_relu(self.fc3(x))
# x = self.drop1(x)
x = self.fc4(x)
return x
class Net_CNNFC(nn.Module):
def __init__(self):
super(Net_CNNFC, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, padding=1)
self.fc1 = nn.Linear(6*3*32+4, 1)
def forward(self, x, y):
# p=0.5
x = F.relu(self.conv1(x))
x = torch.flatten(x, 1)
x = torch.cat((x,y),1)
# x = F.relu(self.fc1(x))
# x = self.fc3(x)
print(x.shape)
x = self.fc1(x)
return x
class Net_tkzw(nn.Module):
def __init__(self):
super(Net_tkzw, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.fc1 = nn.Linear(64*24, 32)
self.fc0 = nn.Linear(32, 1)
self.drop1=nn.Dropout(p=0.1)
self.drop4=nn.Dropout(p=0.4)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
# x = F.leaky_relu(self.conv1(x))
# x = F.leaky_relu(self.conv2(x))
x = torch.flatten(x, 1)
# x = F.leaky_relu(self.fc1(x))
x = F.relu(self.fc1(x))
# x = self.drop4(x)
x = self.fc0(x)
return x
| 27.698413
| 63
| 0.544413
|
acfff9bb73eb82f00d2815212752940eace32a55
| 22,055
|
py
|
Python
|
map_matching/road_routing.py
|
ilaripih/map-matching
|
37e901aa48075bf6d09a2f152a7a267bb5fcdc26
|
[
"BSD-3-Clause"
] | null | null | null |
map_matching/road_routing.py
|
ilaripih/map-matching
|
37e901aa48075bf6d09a2f152a7a267bb5fcdc26
|
[
"BSD-3-Clause"
] | null | null | null |
map_matching/road_routing.py
|
ilaripih/map-matching
|
37e901aa48075bf6d09a2f152a7a267bb5fcdc26
|
[
"BSD-3-Clause"
] | 1
|
2020-08-13T12:39:40.000Z
|
2020-08-13T12:39:40.000Z
|
import collections
import itertools
from .shortest_path import *
from .utils import Edge
try:
from itertools import (
izip as zip,
imap as map,
ifilter as filter)
except ImportError:
pass
# A hashable object used by road network routing. Its attributes
# edge_id and location must be hashable too
AdHocNode = collections.namedtuple('AdHocNode', ['edge_id', 'location'])
def test_adhoc_node():
this_node = AdHocNode(1, 0.2)
that_node = AdHocNode(1, 0.2)
other_node = AdHocNode(2, 0.3)
# It should be hashable
d = {}
d[this_node] = 'rainbow'
assert this_node == that_node
assert d[this_node] == 'rainbow'
assert d[that_node] == 'rainbow'
assert d.get(other_node) is None
def split_edge(edge, locations):
"""
Split edge by a list of locations. Insert an ad hoc node at each
location and return a list of inserted ad hoc nodes and
corresponding edges.
"""
# Attach indexes so we can recover the original order later
idx_locations = list(enumerate(locations))
idx_locations.sort(key=lambda t: t[1])
idx_node_edges = []
forward_edge = edge
prev_loc = 0
for idx, loc in idx_locations:
assert 0 <= loc <= 1 and prev_loc <= loc
if loc == 0:
middle_node = edge.start_node
backward_edge = None
# The forward edge keeps unchanged
elif loc == 1:
middle_node = edge.end_node
backward_edge = forward_edge
forward_edge = None
else:
middle_node = AdHocNode(edge_id=edge.id, location=loc)
edge_proportion = loc - prev_loc
backward_edge = Edge(id=forward_edge.id,
start_node=forward_edge.start_node,
end_node=middle_node,
cost=edge_proportion * edge.cost,
reverse_cost=edge_proportion * edge.reverse_cost,
reversed=edge.reversed)
forward_edge = Edge(id=forward_edge.id,
start_node=middle_node,
end_node=forward_edge.end_node,
cost=forward_edge.cost - backward_edge.cost,
reverse_cost=forward_edge.reverse_cost - backward_edge.reverse_cost,
reversed=edge.reversed)
if idx_node_edges:
idx_node_edges[-1][-1] = backward_edge
# The forward edge will be replaced in the next iteration. See above line
idx_node_edges.append([idx, middle_node, backward_edge, forward_edge])
prev_loc = loc
# Sort by index, so each node is corresponding to its location in
# the location list
idx_node_edges.sort()
# Drop the indexes
return [(n, b, f) for _, n, b, f in idx_node_edges]
def test_split_edge():
import functools
same_edge_p = functools.partial(Edge.same_edge, precision=0.0000001)
edge = Edge(id=1, start_node=2, end_node=10, cost=100, reverse_cost=1000)
# It should simply do it right
adhoc_node_edges = split_edge(edge, [0.5])
assert len(adhoc_node_edges) == 1
n, b, f = adhoc_node_edges[0]
assert n == AdHocNode(edge_id=edge.id, location= 0.5)
assert same_edge_p(b, Edge(id=edge.id,
start_node=edge.start_node,
end_node=n,
cost=edge.cost * 0.5,
reverse_cost=edge.reverse_cost * 0.5))
assert same_edge_p(f, Edge(id=edge.id,
start_node=n,
end_node=10,
cost=edge.cost * 0.5,
reverse_cost=edge.reverse_cost * 0.5))
assert not b.reversed and not f.reversed
# It should split reversed edge
redge = edge.reversed_edge()
adhoc_node_edges = split_edge(redge, [0.5])
n, b, f = adhoc_node_edges[0]
assert b.reversed and f.reversed
# It should split the edge by 2 locations
adhoc_node_edges = split_edge(edge, [0.5, 0.4])
assert len(adhoc_node_edges) == 2
(n2, b2, f2), (n1, b1, f1) = adhoc_node_edges
assert same_edge_p(b1, Edge(id=edge.id,
start_node=edge.start_node,
end_node=n1,
cost=edge.cost * 0.4,
reverse_cost=edge.reverse_cost * 0.4))
assert same_edge_p(f1, Edge(id=edge.id,
start_node=n1,
end_node=n2,
cost=edge.cost * 0.1,
reverse_cost=edge.reverse_cost * 0.1))
assert b2 == f1
assert same_edge_p(f2, Edge(id=edge.id,
start_node=n2,
end_node=edge.end_node,
cost=edge.cost * 0.5,
reverse_cost=edge.reverse_cost * 0.5))
# It should split the edge at starting location
adhoc_node_edges = split_edge(edge, [0])
assert len(adhoc_node_edges) == 1
n, b, f = adhoc_node_edges[0]
assert n == edge.start_node
assert b is None
assert f == edge
# It should split the edge at ending location
adhoc_node_edges = split_edge(edge, [1])
assert len(adhoc_node_edges) == 1
n, b, f = adhoc_node_edges[0]
assert n == edge.end_node
assert b == edge
assert f is None
# It should do all right
adhoc_node_edges = split_edge(edge, [1, 0.4, 0, 0.4, 0, 0.5])
assert len(adhoc_node_edges) == 6
# Do this because Python gurantees stable sort
n0, b0, f0 = adhoc_node_edges[2]
n1, b1, f1 = adhoc_node_edges[4]
n2, b2, f2 = adhoc_node_edges[1]
n3, b3, f3 = adhoc_node_edges[3]
n4, b4, f4 = adhoc_node_edges[5]
n5, b5, f5 = adhoc_node_edges[0]
assert n0 == edge.start_node and b0 is None and f0 is None
assert n1 == edge.start_node and b1 is None
assert same_edge_p(f1, Edge(id=edge.id,
start_node=n1,
end_node=n2,
cost=edge.cost * 0.4,
reverse_cost=edge.reverse_cost * 0.4))
assert isinstance(n2, AdHocNode)
assert b2 == f1
assert same_edge_p(f2, Edge(id=edge.id,
start_node=n2,
end_node=n3,
cost=0,
reverse_cost=0))
assert isinstance(n3, AdHocNode)
assert b3 == f2
assert same_edge_p(f3, Edge(id=edge.id,
start_node=n3,
end_node=n4,
cost=edge.cost * 0.1,
reverse_cost=edge.reverse_cost * 0.1))
assert isinstance(n4, AdHocNode)
assert b4 == f3
assert same_edge_p(f4, Edge(id=edge.id,
start_node=n4,
end_node=n5,
cost=edge.cost * 0.5,
reverse_cost=edge.reverse_cost * 0.5))
assert n5 == edge.end_node
assert b5 == f4
assert f5 is None
def _get_edge_id(idx_el):
_, (edge, _) = idx_el
return edge.id
def build_adhoc_network(edge_locations):
"""
Build an adhoc network based on a list of edge locations.
An edge location is simple 2-tuple (edge, percentage between [0,
1]) to describe a location along an edge.
It returns both the inserted ad hoc nodes, and the adhoc network.
"""
idx_edge_locations = list(enumerate(edge_locations))
idx_adhoc_node_edges = []
idx_edge_locations.sort(key=_get_edge_id)
# Group locations by edge ID, and insert ad hoc node at each location
for edge_id, group in itertools.groupby(idx_edge_locations, key=_get_edge_id):
first_edge = None
locations, indexes = [], []
for idx, (edge, location) in group:
if not first_edge:
first_edge = edge
if first_edge != edge:
assert first_edge == edge.reversed_edge(), \
'Two edges with same ID must either be same edges or be reverse to each other'
location = 1 - location
assert edge.id == edge_id == first_edge.id
locations.append(location)
indexes.append(idx)
adhoc_node_edges = split_edge(first_edge, locations)
idx_adhoc_node_edges += zip(indexes, adhoc_node_edges)
# Drop indexes and edges
adhoc_nodes = [n for _, (n, _, _) in sorted(idx_adhoc_node_edges)]
# Build ad hoc network
adhoc_network = collections.defaultdict(list)
for idx, (node, backward_edge, forward_edge) in idx_adhoc_node_edges:
if not isinstance(node, AdHocNode):
continue
if not isinstance(backward_edge.start_node, AdHocNode):
adhoc_network[backward_edge.start_node].append(backward_edge)
adhoc_network[node].append(backward_edge.reversed_edge())
adhoc_network[node].append(forward_edge)
if not isinstance(forward_edge.end_node, AdHocNode):
adhoc_network[forward_edge.end_node].append(forward_edge.reversed_edge())
return adhoc_nodes, adhoc_network
def test_build_adhoc_network():
import functools
same_edge_p = functools.partial(Edge.same_edge, precision=0.0000001)
# It should simply do it right
edge_locations = ((Edge(id=1, start_node=1, end_node=10, cost=100, reverse_cost=1000), 0.5),)
adhoc_nodes, adhoc_network = build_adhoc_network(edge_locations)
assert len(adhoc_nodes) == 1
node = adhoc_nodes[0]
assert isinstance(node, AdHocNode)
backward_edge, forward_edge = adhoc_network[node]
backward_edge = backward_edge.reversed_edge()
assert same_edge_p(backward_edge, Edge(id=1, start_node=1, end_node=node, cost=50, reverse_cost=500))
assert same_edge_p(forward_edge, Edge(id=1, start_node=node, end_node=10, cost=50, reverse_cost=500))
assert backward_edge == adhoc_network[1][0]
assert backward_edge.reversed_edge() == adhoc_network[node][0]
assert forward_edge == adhoc_network[node][1]
assert forward_edge.reversed_edge() == adhoc_network[10][0]
# It should do it simply right for 2 edge locations
edge_locations = ((Edge(id=1, start_node=1, end_node=10, cost=100, reverse_cost=1000), 0.5),
(Edge(id=2, start_node=3, end_node=5, cost=100, reverse_cost=1000), 0.4))
adhoc_nodes, adhoc_network = build_adhoc_network(edge_locations)
assert len(adhoc_nodes) == 2
# It should do it right at 3 locations at the same edge
edge = Edge(id=1, start_node=1, end_node=10, cost=100, reverse_cost=1000)
edge_locations = ((edge, 0.5), (edge.reversed_edge(), 0.4), (edge.reversed_edge(), 0))
adhoc_nodes, adhoc_network = build_adhoc_network(edge_locations)
# 1 -------------> n0 --> n1 -----------> n2 (10)
n0, n1, n2 = adhoc_nodes
assert same_edge_p(adhoc_network[1][0], Edge(id=1, start_node=1, end_node=n0, cost=50, reverse_cost=500))
b0, f0 = adhoc_network[n0]
assert b0 == adhoc_network[1][0].reversed_edge()
assert same_edge_p(f0, Edge(id=1, start_node=n0, end_node=n1, cost=10, reverse_cost=100))
b1, f1 = adhoc_network[n1]
assert b1 == f0.reversed_edge()
assert same_edge_p(f1, Edge(id=1, start_node=n1, end_node=n2, cost=40, reverse_cost=400))
assert n2 == 10
assert same_edge_p(adhoc_network[n2][0],
Edge(id=1, start_node=n1, end_node=n2, cost=40, reverse_cost=400).reversed_edge())
def road_network_route(source_edge_location,
target_edge_location,
get_edges,
max_path_cost=None):
"""
Like `shortest_path.find_shortest_path`, except that it finds the
best route from the source edge location to the target edge
location.
An edge location is simple 2-tuple (edge, percentage between [0,
1]) to describe a location along an edge.
See `shortest_path.find_shortest_path` for more information.
"""
edge_locations = (source_edge_location, target_edge_location)
(source_node, target_node), adhoc_network = build_adhoc_network(edge_locations)
if not adhoc_network:
assert not isinstance(source_node, AdHocNode) and not isinstance(target_node, AdHocNode)
return find_shortest_path(source_node, target_node, get_edges, max_path_cost)
def _get_edges(node):
if isinstance(node, AdHocNode):
return adhoc_network[node]
adhoc_edges = adhoc_network.get(node)
if adhoc_edges:
return itertools.chain(get_edges(node), adhoc_edges)
else:
return get_edges(node)
return find_shortest_path(source_node, target_node, _get_edges, max_path_cost)
def road_network_route_many(source_edge_location,
target_edge_locations,
get_edges,
max_path_cost=None):
"""
Like `shortest_path.find_many_shortest_paths`, except that it
finds best routes from the source edge location to a list of
target edge locations.
An edge location is simple 2-tuple (edge, percentage between [0,
1]) to describe a location along an edge.
See `shortest_path.find_many_shortest_paths` for more information.
"""
edge_locations = [source_edge_location] + list(target_edge_locations)
adhoc_nodes, adhoc_network = build_adhoc_network(edge_locations)
# PyPy doesn't support:
# source_node, *target_nodes = adhoc_nodes
source_node, target_nodes = adhoc_nodes[0], adhoc_nodes[1:]
if not adhoc_network:
for node in adhoc_nodes:
assert not isinstance(node, AdHocNode)
return find_many_shortest_paths(source_node, target_nodes, get_edges, max_path_cost)
def _get_edges(node):
if isinstance(node, AdHocNode):
return adhoc_network[node]
adhoc_edges = adhoc_network.get(node)
if adhoc_edges:
if get_edges(node) is None:
return adhoc_edges
return itertools.chain(get_edges(node), adhoc_edges)
else:
return get_edges(node)
return find_many_shortest_paths(source_node, target_nodes, _get_edges, max_path_cost)
def test_road_network_route():
# The example from http://en.wikipedia.org/wiki/Dijkstra's_algorithm
e12 = Edge('12', 1, 2, 7, 7)
e13 = Edge('13', 1, 3, 9, 9)
e16 = Edge('16', 1, 6, 14, 14)
e23 = Edge('23', 2, 3, 10, 10)
e24 = Edge('24', 2, 4, 15, 15)
e34 = Edge('34', 3, 4, 11, 11)
e36 = Edge('36', 3, 6, 2, 2)
e45 = Edge('45', 4, 5, 6, 6)
e56 = Edge('56', 5, 6, 9, 9)
# Extra isolated edge
e89 = Edge('89', 8, 9, 2, 1000)
ecircle = Edge('cc', 'c', 'c', 100000, 1)
edges = (e12, e13, e16, e23, e24, e34, e36, e45, e56, e89)
road_network = {
1: (e12, e13, e16),
2: (e12.reversed_edge(), e23, e24),
3: (e13.reversed_edge(), e23.reversed_edge(), e34, e36),
4: (e24.reversed_edge(), e34.reversed_edge(), e45),
5: (e45.reversed_edge(), e56),
6: (e16.reversed_edge(), e36.reversed_edge(), e56.reversed_edge()),
# Extra isolated edges
8: (e89, ),
9: (e89.reversed_edge(),)}
def _get_edges(node):
return road_network.get(node, [])
_AHN = collections.namedtuple('AdhocNodeForTest', 'edge_id, location, reversed')
def _assert_path(path, nodes):
if path or nodes:
assert len(nodes) == len(path) + 1, 'count not matched'
else:
return
path = reversed(path)
nodes = iter(nodes)
last_edge = None
for edge in path:
node = next(nodes)
if isinstance(node, _AHN):
assert node.edge_id == edge.id
assert node.location == edge.start_node.location
assert node.reversed == edge.reversed
else:
assert node == edge.start_node
if last_edge:
assert last_edge.end_node == edge.start_node
last_edge = edge
# Last node
node = next(nodes)
if isinstance(node, _AHN):
assert node.edge_id == edge.id
assert node.location == edge.end_node.location
assert node.reversed == edge.reversed
else:
assert node == edge.end_node
# It should route between 2 locations at different edges
path, cost = road_network_route((e13, 0.5), (e56, 0.5), _get_edges)
_assert_path(path, [_AHN('13', 0.5, False), 3, 6, _AHN('56', 0.5, True)])
assert abs(cost - 11) <= 0.000001
# It should route between 2 locations at the same edge
path, cost = road_network_route((e13, 0.1), (e13, 0.9), _get_edges)
_assert_path(path, [_AHN('13', 0.1, False), _AHN('13', 0.9, False)])
assert abs(cost - 9 * 0.8) <= 0.000001
# It should route between 2 locations at a circle edge (start node == end node) in a reverse way
path1, cost1 = road_network_route((ecircle, 0.2), (ecircle, 0.7), _get_edges)
path2, cost2 = road_network_route((ecircle, 0.2), (ecircle.reversed_edge(), 0.3), _get_edges)
assert path1 == path2 and cost1 == cost2
_assert_path(path1, [_AHN('cc', 0.2, True), 'c', _AHN('cc', 0.7, True)])
assert abs(cost1 - 0.5) <= 0.0000001
# It should give 0 cost if source and target are same location
path, cost = road_network_route((e13, 0.1), (e13.reversed_edge(), 0.9), _get_edges)
_assert_path(path, [_AHN('13', 0.1, True), _AHN('13', 1 - 0.9, True)])
assert abs(cost) <= 0.000001
assert cost == path[0].cost
# It should route for locations at intersections
path, cost = road_network_route((e13, 0), (e13, 1), _get_edges)
_assert_path(path, [1, 3])
assert path[0] == e13
assert cost == e13.cost
# It should not find a path
from nose.tools import assert_raises
assert_raises(PathNotFound, road_network_route, (e13, 0.2), (e24, 0.9), _get_edges, 10)
assert_raises(PathNotFound, road_network_route, (e13, 0), (e89, 0.5), _get_edges)
assert_raises(PathNotFound, road_network_route, (e89, 0.9), (e89, 0.2), _get_edges, 10)
# It should return multiple paths
targets = [(e16, 0.6), (e13, 0.3), (e34, 0.5), (e56, 1)]
results = road_network_route_many((e16, 0.1), targets, _get_edges)
path, cost = results[0]
assert abs(cost - 7) < 0.000001
_assert_path(path, [_AHN('16', 0.1, False), _AHN('16', 0.6, False)])
path, cost = results[1]
assert abs(cost - 4.1) < 0.000001
_assert_path(path, [_AHN('16', 0.1, True), 1, _AHN('13', 0.3, False)])
path, cost = results[2]
assert abs(cost - 15.9) < 0.000001
_assert_path(path, [_AHN('16', 0.1, True), 1, 3, _AHN('34', 0.5, False)])
path, cost = results[3]
assert abs(cost - 12.4) < 0.000001
_assert_path(path, [_AHN('16', 0.1, True), 1, 3, 6])
# It should find paths when multiple targets are on the same edge with the source
targets = [(e16, 0.2), (e16, 0.4), (e16, 1), (e16, 0)]
results = road_network_route_many((e16, 0.8), targets, _get_edges)
path, cost = results[0]
_assert_path(path, [_AHN('16', 0.8, True), _AHN('16', 0.4, True), _AHN('16', 0.2, True)])
assert abs(cost - 8.4) < 0.000001
path, cost = results[1]
_assert_path(path, [_AHN('16', 0.8, True), _AHN('16', 0.4, True)])
assert abs(cost - 5.6) < 0.000001
path, cost = results[2]
_assert_path(path, [_AHN('16', 0.8, False), 6])
assert abs(cost - 2.8) < 0.000001
path, cost = results[3]
_assert_path(path, [_AHN('16', 0.8, True), _AHN('16', 0.4, True), _AHN('16', 0.2, True), 1])
assert abs(cost - 11.2) < 0.000001
# It should find paths on the circle edge
targets = [(ecircle, 0.8), (ecircle, 0.7), (ecircle, 0.1)]
results = road_network_route_many((ecircle, 0.2), targets, _get_edges)
path, cost = results[0]
assert (cost - 0.4) < 0.0000001
_assert_path(path, [_AHN('cc', 0.2, True), _AHN('cc', 0.1, True), 'c', _AHN('cc', 0.8, True)])
path, cost = results[1]
assert (cost - 0.5) < 0.0000001
_assert_path(path, [_AHN('cc', 0.2, True), _AHN('cc', 0.1, True), 'c', _AHN('cc', 0.8, True), _AHN('cc', 0.7, True)])
path, cost = results[2]
assert (cost - 0.1) < 0.0000001
_assert_path(path, [_AHN('cc', 0.2, True), _AHN('cc', 0.1, True)])
# It should not find a path to the isolated edge
targets = [(e13, 0.3), (e89, 0.2), (e34, 0.5)]
results = road_network_route_many((e13, 0.3), targets, _get_edges)
assert results[0][1] >= 0 and results[2][1] >= 0
assert results[1] == (None, -1)
# It should not find a path if the cost exceeds the max_path_cost
results = road_network_route_many((e89, 0.9), [(e89, 0.8), (e89, 0.1)], _get_edges, 200)
path, cost = results[0]
assert abs(cost - 100) < 0.00001
assert results[1] == (None, -1)
# One-to-many routing should be the same as calling one-to-one
# multiple times
import random
source = (e13, random.random())
# Generate 20 locations at each edge
targets = [(edge, random.random()) for edge in edges for _ in range(20)]
def _route_many_hard_way(source, targets):
route_distances = []
for target in targets:
try:
_, route_distance = road_network_route(source, target, _get_edges)
except PathNotFound:
route_distance = -1
route_distances.append(route_distance)
return route_distances
hard_ways = _route_many_hard_way(source, targets)
# Get costs in the second column
easy_ways = list(zip(*road_network_route_many(source, targets, _get_edges)))[1]
for hard_way, easy_way in zip(hard_ways, easy_ways):
assert abs(hard_way - easy_way) < 0.0000000001
| 40.994424
| 121
| 0.606756
|
acfff9cdc0c8718f09bb64bb02401075174e3400
| 26,509
|
py
|
Python
|
src/VarDACAE/fluidity/vtktools.py
|
scheng1992/Data_Assimilation
|
b4d43895229205ee2cd16b15ee20beccb33b71d6
|
[
"MIT"
] | 1
|
2021-11-25T12:46:48.000Z
|
2021-11-25T12:46:48.000Z
|
src/VarDACAE/fluidity/vtktools.py
|
bugsuse/Data_Assimilation
|
2965ccf78951df11f8686282cd6814bae18afde5
|
[
"MIT"
] | null | null | null |
src/VarDACAE/fluidity/vtktools.py
|
bugsuse/Data_Assimilation
|
2965ccf78951df11f8686282cd6814bae18afde5
|
[
"MIT"
] | 2
|
2021-03-02T13:29:34.000Z
|
2022-03-12T11:01:08.000Z
|
#!/usr/bin/env python
"""This file is taken from the Fluidity master branch at the following location:
https://github.com/FluidityProject/multifluids/blob/master/python/vtktools.py
The file has been edited for python2 -> python3
It was easier to import just this file than to have fluidity as a dependency
for this project - since we don't need to run the software in this case"""
import math
import sys
import numpy
import vtk
# All returned arrays are cast into either numpy or numarray arrays
arr=numpy.array
class vtuStruct:
"""Structured grid object to deal with VTK unstructured grids."""
def __init__(self, filename = None, sg=None):
"""Creates a vtu object by reading the specified file."""
if filename is None and ugrid is None:
self.ugrid = vtk.vtkStructuredGrid()
elif filename is None and ugrid is not None:
self.ugrid = ugrid
else:
self.gridreader = None
if filename[-4:] == ".vtu":
self.gridreader=vtk.vtkXMLStructuredGridReader()
elif filename[-5:] == ".pvtu":
self.gridreader=vtk.vtkXMLPStructuredGridReader()
else:
raise Exception("ERROR: don't recognise file extension" + filename)
self.gridreader.SetFileName(filename)
self.gridreader.Update()
self.ugrid=self.gridreader.GetOutput()
if self.ugrid.GetNumberOfPoints() + self.ugrid.GetNumberOfCells() == 0:
raise Exception("ERROR: No points or cells found after loading vtu " + filename)
self.filename=filename
class vtu:
"""Unstructured grid object to deal with VTK unstructured grids."""
def __init__(self, filename = None, ugrid=None):
"""Creates a vtu object by reading the specified file."""
if filename is None and ugrid is None:
self.ugrid = vtk.vtkUnstructuredGrid()
elif filename is None and ugrid is not None:
self.ugrid = ugrid
else:
self.gridreader = None
if filename[-4:] == ".vtu":
self.gridreader=vtk.vtkXMLUnstructuredGridReader()
elif filename[-5:] == ".pvtu":
self.gridreader=vtk.vtkXMLPUnstructuredGridReader()
else:
raise Exception("ERROR: don't recognise file extension" + filename)
self.gridreader.SetFileName(filename)
self.gridreader.Update()
self.ugrid=self.gridreader.GetOutput()
if self.ugrid.GetNumberOfPoints() + self.ugrid.GetNumberOfCells() == 0:
raise Exception("ERROR: No points or cells found after loading vtu " + filename)
self.filename=filename
def GetScalarField(self, name):
"""Returns an array with the values of the specified scalar field."""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetScalars(name)
vtkdata.GetNumberOfTuples()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetScalars(name)
vtkdata.GetNumberOfTuples()
except:
raise Exception("ERROR: couldn't find point or cell scalar field data with name "+name+" in file "+self.filename+".")
return arr([vtkdata.GetTuple1(i) for i in range(vtkdata.GetNumberOfTuples())])
def GetScalarRange(self, name):
"""Returns the range (min, max) of the specified scalar field."""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetScalars(name)
vtkdata.GetRange()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetScalars(name)
vtkdata.GetRange()
except:
raise Exception("ERROR: couldn't find point or cell scalar field data with name "+name+" in file "+self.filename+".")
return vtkdata.GetRange()
def GetVectorField(self, name):
"""Returns an array with the values of the specified vector field."""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetScalars(name)
vtkdata.GetNumberOfTuples()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetScalars(name)
vtkdata.GetNumberOfTuples()
except:
raise Exception("ERROR: couldn't find point or cell vector field data with name "+name+" in file "+self.filename+".")
return arr([vtkdata.GetTuple3(i) for i in range(vtkdata.GetNumberOfTuples())])
def GetVectorNorm(self, name):
"""Return the field with the norm of the specified vector field."""
v = self.GetVectorField(name)
n = []
try:
from scipy.linalg import norm
except ImportError:
def norm(v):
r = 0.0
for x in v:
r = r + x**2
r = math.sqrt(r)
return r
for node in range(self.ugrid.GetNumberOfPoints()):
n.append(norm(v[node]))
return arr(n)
def GetField(self,name):
"""Returns an array with the values of the specified field."""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetArray(name)
vtkdata.GetNumberOfTuples()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetArray(name)
vtkdata.GetNumberOfTuples()
except:
raise Exception("ERROR: couldn't find point or cell field data with name "+name+" in file "+self.filename+".")
nc=vtkdata.GetNumberOfComponents()
nt=vtkdata.GetNumberOfTuples()
array=arr([vtkdata.GetValue(i) for i in range(nc*nt)])
if nc==9:
return array.reshape(nt,3,3)
elif nc==4:
return array.reshape(nt,2,2)
else:
return array.reshape(nt,nc)
def GetFieldRank(self, name):
"""
Returns the rank of the supplied field.
"""
try:
pointdata=self.ugrid.GetPointData()
vtkdata=pointdata.GetArray(name)
vtkdata.GetNumberOfTuples()
except:
try:
celldata=self.ugrid.GetCellData()
vtkdata=celldata.GetArray(name)
vtkdata.GetNumberOfTuples()
except:
raise Exception("ERROR: couldn't find point or cell field data with name "+name+" in file "+self.filename+".")
comps = vtkdata.GetNumberOfComponents()
if comps == 1:
return 0
elif comps in [2, 3]:
return 1
elif comps in [4, 9]:
return 2
else:
raise Exception("Field rank > 2 encountered")
def Write(self, filename=[]):
"""Writes the grid to a vtu file.
If no filename is specified it will use the name of the file originally
read in, thus overwriting it!
"""
if filename==[]:
filename=self.filename
if filename is None:
raise Exception("No file supplied")
if filename.endswith('pvtu'):
gridwriter=vtk.vtkXMLPUnstructuredGridWriter()
else:
gridwriter=vtk.vtkXMLUnstructuredGridWriter()
gridwriter.SetFileName(filename)
gridwriter.SetInputData(self.ugrid)
gridwriter.Write()
def AddScalarField(self, name, array):
"""Adds a scalar field with the specified name using the values from the array."""
data = vtk.vtkDoubleArray()
data.SetNumberOfValues(len(array))
data.SetName(name)
for i in range(len(array)):
data.SetValue(i, array[i])
if len(array) == self.ugrid.GetNumberOfPoints():
pointdata=self.ugrid.GetPointData()
pointdata.AddArray(data)
pointdata.SetActiveScalars(name)
elif len(array) == self.ugrid.GetNumberOfCells():
celldata=self.ugrid.GetCellData()
celldata.AddArray(data)
celldata.SetActiveScalars(name)
else:
raise Exception("Length neither number of nodes nor number of cells")
def AddVectorField(self, name, array):
"""Adds a vector field with the specified name using the values from the array."""
n=array.size
data = vtk.vtkDoubleArray()
data.SetNumberOfComponents(array.shape[1])
data.SetNumberOfValues(n)
data.SetName(name)
for i in range(n):
data.SetValue(i, array.reshape(n)[i])
if array.shape[0]==self.ugrid.GetNumberOfPoints():
pointdata=self.ugrid.GetPointData()
pointdata.AddArray(data)
pointdata.SetActiveVectors(name)
elif array.shape[0]==self.ugrid.GetNumberOfCells():
celldata=self.ugrid.GetCellData()
celldata.AddArray(data)
else:
raise Exception("Length neither number of nodes nor number of cells")
def AddField(self, name, array):
"""Adds a field with arbitrary number of components under the specified name using."""
n=array.size
sh=arr(array.shape)
data = vtk.vtkDoubleArray()
# number of tuples is sh[0]
# number of components is the product of the rest of sh
data.SetNumberOfComponents(sh[1:].prod())
data.SetNumberOfValues(n)
data.SetName(name)
flatarray=array.reshape(n)
for i in range(n):
data.SetValue(i, flatarray[i])
if sh[0]==self.ugrid.GetNumberOfPoints():
pointdata=self.ugrid.GetPointData()
pointdata.AddArray(data)
elif sh[0]==self.ugrid.GetNumberOfCells():
celldata=self.ugrid.GetCellData()
celldata.AddArray(data)
else:
raise Exception("Length neither number of nodes nor number of cells")
def ApplyProjection(self, projection_x, projection_y, projection_z):
"""Applys a projection to the grid coordinates. This overwrites the existing values."""
npoints = self.ugrid.GetNumberOfPoints ()
for i in range (npoints):
(x,y,z) = self.ugrid.GetPoint (i)
new_x = eval (projection_x)
new_y = eval (projection_y)
new_z = eval (projection_z)
self.ugrid.GetPoints().SetPoint (i, new_x, new_y, new_z)
def ApplyCoordinateTransformation(self,f):
"""Applys a coordinate transformation to the grid coordinates. This overwrites the existing values."""
npoints = self.ugrid.GetNumberOfPoints ()
for i in range (npoints):
(x,y,z) = self.ugrid.GetPoint (i)
newX = f(arr([x,y,z]),t=0)
self.ugrid.GetPoints ().SetPoint (i, newX[0], newX[1], newX[2])
def ApplyEarthProjection(self):
""" Assume the input geometry is the Earth in Cartesian geometry and project to longatude, latitude, depth."""
npoints = self.ugrid.GetNumberOfPoints ()
earth_radius = 6378000.0
rad_to_deg = 180.0/math.pi
deg_to_rad = math.pi/180.0
for i in range (npoints):
(x,y,z) = self.ugrid.GetPoint (i)
r = math.sqrt(x*x+y*y+z*z)
depth = r - earth_radius
longitude = rad_to_deg*math.atan2(y, x)
latitude = 90.0 - rad_to_deg*math.acos(z/r)
self.ugrid.GetPoints ().SetPoint (i, longitude, latitude, depth)
def ProbeData(self, coordinates, name):
"""Interpolate field values at these coordinates."""
# Initialise locator
locator = vtk.vtkPointLocator()
locator.SetDataSet(self.ugrid)
locator.SetTolerance(10.0)
locator.Update()
# Initialise probe
points = vtk.vtkPoints()
points.SetDataTypeToDouble()
ilen, jlen = coordinates.shape
for i in range(ilen):
points.InsertNextPoint(coordinates[i][0], coordinates[i][1], coordinates[i][2])
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
probe = vtk.vtkProbeFilter()
probe.SetInputData(polydata)
probe.setSourceData(self.ugrid)
probe.Update()
# Generate a list invalidNodes, containing a map from invalid nodes in the
# result to their closest nodes in the input
valid_ids = probe.GetValidPoints()
valid_loc = 0
invalidNodes = []
for i in range(ilen):
if valid_ids.GetTuple1(valid_loc) == i:
valid_loc += 1
else:
nearest = locator.FindClosestPoint([coordinates[i][0], coordinates[i][1], coordinates[i][2]])
invalidNodes.append((i, nearest))
# Get final updated values
pointdata=probe.GetOutput().GetPointData()
vtkdata=pointdata.GetArray(name)
nc=vtkdata.GetNumberOfComponents()
nt=vtkdata.GetNumberOfTuples()
array = arr([vtkdata.GetValue(i) for i in range(nt * nc)])
# Fix the point data at invalid nodes
if len(invalidNodes) > 0:
try:
oldField = self.ugrid.GetPointData().GetArray(name)
components = oldField.GetNumberOfComponents()
except:
try:
oldField = self.ugrid.GetCellData().GetArray(name)
components = oldField.GetNumberOfComponents()
except:
raise Exception("ERROR: couldn't find point or cell field data with name "+name+" in file "+self.filename+".")
for invalidNode, nearest in invalidNodes:
for comp in range(nc):
array[invalidNode * nc + comp] = oldField.GetValue(nearest * nc + comp)
valShape = self.GetField(name)[0].shape
array.shape = tuple([nt] + list(valShape))
return array
def RemoveField(self, name):
"""Removes said field from the unstructured grid."""
pointdata=self.ugrid.GetPointData()
pointdata.RemoveArray(name)
def GetLocations(self):
"""Returns an array with the locations of the nodes."""
vtkPoints = self.ugrid.GetPoints()
if vtkPoints is None:
vtkData = vtk.vtkDoubleArray()
else:
vtkData = vtkPoints.GetData()
return arr([vtkData.GetTuple3(i) for i in range(vtkData.GetNumberOfTuples())])
def GetCellPoints(self, id):
"""Returns an array with the node numbers of each cell (ndglno)."""
idlist=vtk.vtkIdList()
self.ugrid.GetCellPoints(id, idlist)
return arr([idlist.GetId(i) for i in range(idlist.GetNumberOfIds())])
def GetFieldNames(self):
"""Returns the names of the available fields."""
vtkdata=self.ugrid.GetPointData()
return [vtkdata.GetArrayName(i) for i in range(vtkdata.GetNumberOfArrays())]
def GetPointCells(self, id):
"""Return an array with the elements which contain a node."""
idlist=vtk.vtkIdList()
self.ugrid.GetPointCells(id, idlist)
return arr([idlist.GetId(i) for i in range(idlist.GetNumberOfIds())])
def GetPointPoints(self, id):
"""Return the nodes connecting to a given node."""
cells = self.GetPointCells(id)
lst = []
for cell in cells:
lst = lst + list(self.GetCellPoints(cell))
s = set(lst) # remove duplicates
return arr(list(s)) # make into a list again
def GetDistance(self, x, y):
"""Return the distance in physical space between x and y."""
posx = self.ugrid.GetPoint(x)
posy = self.ugrid.GetPoint(y)
return math.sqrt(sum([(posx[i] - posy[i])**2 for i in range(len(posx))]))
def Crop(self, min_x, max_x, min_y, max_y, min_z, max_z):
"""Trim off the edges defined by a bounding box."""
trimmer = vtk.vtkExtractUnstructuredGrid()
trimmer.SetInputData(self.ugrid)
trimmer.SetExtent(min_x, max_x, min_y, max_y, min_z, max_z)
trimmer.Update()
trimmed_ug = trimmer.GetOutput()
self.ugrid = trimmed_ug
def IntegrateField(self, field):
"""
Integrate the supplied scalar field, assuming a linear representation on a
tetrahedral mesh. Needs numpy-izing for speed.
"""
assert field[0].shape in [(), (1,)]
integral = 0.0
n_cells = self.ugrid.GetNumberOfCells()
vtkGhostLevels = self.ugrid.GetCellData().GetArray("vtkGhostLevels")
for cell_no in range(n_cells):
integrate_cell = True
if vtkGhostLevels:
integrate_cell = (vtkGhostLevels.GetTuple1(cell_no) == 0)
if integrate_cell:
Cell = self.ugrid.GetCell(cell_no)
Cell_points = Cell.GetPoints ()
nCell_points = Cell.GetNumberOfPoints()
if nCell_points == 4:
Volume = abs(Cell.ComputeVolume(Cell_points.GetPoint(0), \
Cell_points.GetPoint(1), \
Cell_points.GetPoint(2), \
Cell_points.GetPoint(3)))
elif nCell_points == 3:
Volume = abs(Cell.TriangleArea(Cell_points.GetPoint(0), \
Cell_points.GetPoint(1), \
Cell_points.GetPoint(2)))
else:
raise Exception("Unexpected number of points: " + str(nCell_points))
Cell_ids = Cell.GetPointIds()
for point in range(Cell_ids.GetNumberOfIds()):
PointId = Cell_ids.GetId(point)
integral = integral + (Volume*field[PointId] / float(nCell_points))
return integral
def GetCellVolume(self, id):
cell = self.ugrid.GetCell(id)
pts = cell.GetPoints()
if isinstance(cell, vtk.vtkTriangle):
return cell.TriangleArea(pts.GetPoint(0), pts.GetPoint(1), pts.GetPoint(2))
elif cell.GetNumberOfPoints() == 4:
return abs(cell.ComputeVolume(pts.GetPoint(0), pts.GetPoint(1), pts.GetPoint(2), pts.GetPoint(3)))
elif cell.GetNumberOfPoints() == 3:
return abs(cell.ComputeVolume(pts.GetPoint(0), pts.GetPoint(1), pts.GetPoint(2)))
else:
raise Exception("Unexpected number of points")
def GetFieldIntegral(self, name):
"""
Integrate the named field.
"""
return self.IntegrateField(self.GetField(name))
def GetFieldRms(self, name):
"""
Return the rms of the supplied scalar or vector field.
"""
field = self.GetField(name)
rank = self.GetFieldRank(name)
if rank == 0:
normField = arr([field[i] ** 2.0 for i in range(len(field))])
elif rank == 1:
normField = self.GetVectorNorm(name)
else:
raise Exception("Cannot calculate norm field for field rank > 1")
volField = arr([1.0 for i in range(len(field))])
rms = self.IntegrateField(normField)
rms /= self.IntegrateField(volField)
rms = numpy.sqrt(rms)
return float(rms)
def StructuredPointProbe(self, nx, ny, nz, bounding_box=None):
""" Probe the unstructured grid dataset using a structured points dataset. """
probe = vtk.vtkProbeFilter()
probe.SetSourceData(self.ugrid)
sgrid = vtk.vtkStructuredPoints()
bbox = [0.0,0.0, 0.0,0.0, 0.0,0.0]
if bounding_box==None:
bbox = self.ugrid.GetBounds()
else:
bbox = bounding_box
sgrid.SetOrigin([bbox[0], bbox[2], bbox[4]])
sgrid.SetDimensions(nx, ny, nz)
spacing = [0.0, 0.0, 0.0]
if nx>1: spacing[0] = (bbox[1]-bbox[0])/(nx-1.0)
if ny>1: spacing[1] = (bbox[3]-bbox[2])/(ny-1.0)
if nz>1: spacing[2] = (bbox[5]-bbox[4])/(nz-1.0)
sgrid.SetSpacing(spacing)
probe.SetInputData (sgrid)
probe.Update ()
return probe.GetOutput()
### Field manipulation methods ###
def ManipulateField(self, fieldName, manipFunc, newFieldName = None):
"""
Generic field manipulation method. Applies the supplied manipulation function
manipFunc to the field fieldName. manipFunc must be a function of the form:
def manipFunc(field, index):
# ...
return fieldValAtIndex
"""
field = self.GetField(fieldName)
if newFieldName is None or fieldName == newFieldName:
self.RemoveField(fieldName)
newFieldName = fieldName
field = arr([manipFunc(field, i) for i in range(len(field))])
self.AddField(newFieldName, field)
return
def AddFieldToField(self, fieldName, array, newFieldName = None):
def ManipFunc(field, index):
return field[index] + array[index]
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
def SubFieldFromField(self, fieldName, array, newFieldName = None):
def ManipFunc(field, index):
return field[index] - array[index]
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
def DotFieldWithField(self, fieldName, array, newFieldName = None):
"""
Dot product
"""
def ManipFunc(field, index):
sum = 0.0
for i, val in enumerate(field[index]):
sum += val * array[index][i]
return sum
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
def CrossFieldWithField(self, fieldName, array, newFieldName = None, postMultiply = True):
"""
Cross product
"""
def ManipFunc(field, index):
if postMultiply:
return numpy.cross(field[index], array[index])
else:
return numpy.cross(array[index], field[index])
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
def MatMulFieldWithField(self, fieldName, array, newFieldName = None, postMultiply = True):
"""
Matrix multiplication
"""
def ManipFunc(field, index):
if postMultiply:
return numpy.matrix(field[i]) * numpy.matix(array[i])
else:
return numpy.matix(array[i]) * numpy.matrix(field[i])
self.ManipulateField(fieldName, ManipFunc, newFieldName)
return
# Default multiplication is dot product
MulFieldByField = DotFieldWithField
def GetDerivative(self, name):
"""
Returns the derivative of field 'name', a
vector field if 'name' is scalar, and a tensor field
if 'name' is a vector. The field 'name' has to be point-wise data.
The returned array gives a cell-wise derivative.
"""
cd=vtk.vtkCellDerivatives()
cd.SetInputData(self.ugrid)
pointdata=self.ugrid.GetPointData()
nc=pointdata.GetArray(name).GetNumberOfComponents()
if nc==1:
cd.SetVectorModeToComputeGradient()
cd.SetTensorModeToPassTensors()
pointdata.SetActiveScalars(name)
cd.Update()
vtkdata=cd.GetUnstructuredGridOutput().GetCellData().GetArray('ScalarGradient')
return arr([vtkdata.GetTuple3(i) for i in range(vtkdata.GetNumberOfTuples())])
else:
cd.SetTensorModeToComputeGradient()
cd.SetVectorModeToPassVectors()
pointdata.SetActiveVectors(name)
cd.Update()
vtkdata=cd.GetUnstructuredGridOutput().GetCellData().GetArray('VectorGradient')
return arr([vtkdata.GetTuple9(i) for i in range(vtkdata.GetNumberOfTuples())])
def GetVorticity(self, name):
"""
Returns the vorticity of vectorfield 'name'.
The field 'name' has to be point-wise data.
The returned array gives a cell-wise derivative.
"""
cd=vtk.vtkCellDerivatives()
cd.SetInputData(self.ugrid)
pointdata=self.ugrid.GetPointData()
cd.SetVectorModeToComputeVorticity()
cd.SetTensorModeToPassTensors()
pointdata.SetActiveVectors(name)
cd.Update()
vtkdata=cd.GetUnstructuredGridOutput().GetCellData().GetArray('VectorGradient')
return arr([vtkdata.GetTuple3(i) for i in range(vtkdata.GetNumberOfTuples())])
def CellDataToPointData(self):
"""
Transforms all cell-wise fields in the vtu to point-wise fields.
All existing fields will remain.
"""
cdtpd=vtk.vtkCellDataToPointData()
cdtpd.SetInputData(self.ugrid)
cdtpd.PassCellDataOn()
cdtpd.Update()
self.ugrid=cdtpd.GetUnstructuredGridOutput()
def VtuMatchLocations(vtu1, vtu2, tolerance = 1.0e-6):
"""
Check that the locations in the supplied vtus match exactly, returning True if they
match and False otherwise.
The locations must be in the same order.
"""
locations1 = vtu1.GetLocations().tolist()
locations2 = vtu2.GetLocations()
if not len(locations1) == len(locations2):
return False
for i in range(len(locations1)):
if not len(locations1[i]) == len(locations2[i]):
return False
for j in range(len(locations1[i])):
if abs(locations1[i][j] - locations2[i][j]) > tolerance:
return False
return True
def VtuMatchLocationsArbitrary(vtu1, vtu2, tolerance = 1.0e-6):
"""
Check that the locations in the supplied vtus match, returning True if they
match and False otherwise.
The locations may be in a different order.
"""
locations1 = vtu1.GetLocations()
locations2 = vtu2.GetLocations()
if not locations1.shape == locations2.shape:
return False
epsilon = numpy.ones(locations1.shape[1])*numpy.finfo(numpy.float).eps
for j in range(locations1.shape[1]): epsilon[j] = epsilon[j]*(locations1[:,j].max()-locations1[:,j].min())
for i in range(len(locations1)):
for j in range(len(locations1[i])):
if(abs(locations1[i][j]) < epsilon[j]): locations1[i][j] = 0.0
if(abs(locations2[i][j]) < epsilon[j]): locations2[i][j] = 0.0
# lexical sort on x,y and z coordinates resp. of locations1 and locations2
sort_index1=numpy.lexsort(locations1.T)
sort_index2=numpy.lexsort(locations2.T)
# should now be in same order, so we can check for its biggest difference
return abs(locations1[sort_index1]-locations2[sort_index2]).max() < tolerance
def VtuDiff(vtu1, vtu2, filename = None):
"""
Generate a vtu with fields generated by taking the difference between the field
values in the two supplied vtus. Fields that are not common between the two vtus
are neglected. If probe is True, the fields of vtu2 are projected onto the cell
points of vtu1. Otherwise, the cell points of vtu1 and vtu2 must match.
"""
# Generate empty output vtu
resultVtu = vtu()
resultVtu.filename = filename
# If the input vtu point locations match, do not use probe
useProbe = not VtuMatchLocations(vtu1, vtu2)
# Copy the grid from the first input vtu into the output vtu
resultVtu.ugrid.DeepCopy(vtu1.ugrid)
# Find common field names between the input vtus and generate corresponding
# difference fields
fieldNames1 = vtu1.GetFieldNames()
fieldNames2 = vtu2.GetFieldNames()
for fieldName in fieldNames1:
if fieldName in fieldNames2:
if useProbe:
field2 = vtu2.ProbeData(vtu1.GetLocations(), fieldName)
else:
field2 = vtu2.GetField(fieldName)
resultVtu.SubFieldFromField(fieldName, field2)
else:
resultVtu.RemoveField(fieldName)
return resultVtu
def usage():
print ('Usage:')
print ('COMMAND LINE: vtktools.py [-h] [-p] [-e var1,var2, ...] INPUT_FILENAME')
print ('')
print ('INPUT_FILENAME:')
print (' The input file name.')
print ('')
print ('OPTIONS:')
print (' -h Prints this usage message.')
print (' -p Converts the coordinates from xyz to latitude and longitude.')
print (' -e Extracts the data point from the variables provided.')
if __name__ == "__main__":
import vtktools
import math
import getopt
optlist, args = getopt.getopt(sys.argv[1:], 'hpe:')
v = vtktools.vtu(args[0])
# Parse arguments
LongLat = False
for o,a in optlist:
if o == '-h':
usage()
elif o == '-p':
LongLat = True
elif o == '-e':
scalars = a.strip().split(",")
# Project domain if necessary
if(LongLat):
v.ApplyEarthProjection()
# Extract variables
if(scalars):
npoints = v.ugrid.GetNumberOfPoints ()
nvar = len(scalars)
for i in range (npoints):
(x,y,z) = v.ugrid.GetPoint (i)
line = "%lf "%x+"%lf "%y
for scalar in scalars:
line = line+" %lf"%v.ugrid.GetPointData().GetArray(scalar).GetTuple1(i)
print (line)
| 33.302764
| 125
| 0.666151
|
acfffa34492bc826a8a3470e4db7baac64516d7a
| 56,384
|
py
|
Python
|
keystone/cmd/cli.py
|
lostmap/keystone
|
9a9022600e01ea09131cf194ffa5c1757ffeb24f
|
[
"Apache-2.0"
] | null | null | null |
keystone/cmd/cli.py
|
lostmap/keystone
|
9a9022600e01ea09131cf194ffa5c1757ffeb24f
|
[
"Apache-2.0"
] | null | null | null |
keystone/cmd/cli.py
|
lostmap/keystone
|
9a9022600e01ea09131cf194ffa5c1757ffeb24f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import print_function
import argparse
import datetime
import os
import sys
import uuid
import migrate
from oslo_config import cfg
from oslo_db.sqlalchemy import migration
from oslo_log import log
from oslo_serialization import jsonutils
import pbr.version
from keystone.cmd import bootstrap
from keystone.cmd import doctor
from keystone.common import driver_hints
from keystone.common import fernet_utils
from keystone.common import jwt_utils
from keystone.common import sql
from keystone.common.sql import upgrades
from keystone.common import utils
import keystone.conf
from keystone.credential.providers import fernet as credential_fernet
from keystone import exception
from keystone.federation import idp
from keystone.federation import utils as mapping_engine
from keystone.i18n import _
from keystone.server import backends
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
class BaseApp(object):
name = None
@classmethod
def add_argument_parser(cls, subparsers):
parser = subparsers.add_parser(cls.name, help=cls.__doc__)
parser.set_defaults(cmd_class=cls)
return parser
class BootStrap(BaseApp):
"""Perform the basic bootstrap process."""
name = "bootstrap"
def __init__(self):
self.bootstrapper = bootstrap.Bootstrapper()
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(BootStrap, cls).add_argument_parser(subparsers)
parser.add_argument('--bootstrap-username', default='admin',
metavar='OS_BOOTSTRAP_USERNAME',
help=('The username of the initial keystone '
'user during bootstrap process.'))
# NOTE(morganfainberg): See below for ENV Variable that can be used
# in lieu of the command-line arguments.
parser.add_argument('--bootstrap-password', default=None,
metavar='OS_BOOTSTRAP_PASSWORD',
help='The bootstrap user password')
parser.add_argument('--bootstrap-project-name', default='admin',
metavar='OS_BOOTSTRAP_PROJECT_NAME',
help=('The initial project created during the '
'keystone bootstrap process.'))
parser.add_argument('--bootstrap-role-name', default='admin',
metavar='OS_BOOTSTRAP_ROLE_NAME',
help=('The initial role-name created during the '
'keystone bootstrap process.'))
parser.add_argument('--bootstrap-service-name', default='keystone',
metavar='OS_BOOTSTRAP_SERVICE_NAME',
help=('The initial name for the initial identity '
'service created during the keystone '
'bootstrap process.'))
parser.add_argument('--bootstrap-admin-url',
metavar='OS_BOOTSTRAP_ADMIN_URL',
help=('The initial identity admin url created '
'during the keystone bootstrap process. '
'e.g. http://127.0.0.1:5000/v3'))
parser.add_argument('--bootstrap-public-url',
metavar='OS_BOOTSTRAP_PUBLIC_URL',
help=('The initial identity public url created '
'during the keystone bootstrap process. '
'e.g. http://127.0.0.1:5000/v3'))
parser.add_argument('--bootstrap-internal-url',
metavar='OS_BOOTSTRAP_INTERNAL_URL',
help=('The initial identity internal url created '
'during the keystone bootstrap process. '
'e.g. http://127.0.0.1:5000/v3'))
parser.add_argument('--bootstrap-region-id',
metavar='OS_BOOTSTRAP_REGION_ID',
help=('The initial region_id endpoints will be '
'placed in during the keystone bootstrap '
'process.'))
parser.add_argument('--immutable-roles',
default=True,
action='store_true',
help=('Whether default roles (admin, member, and '
'reader) should be immutable. This is the '
'default.'))
parser.add_argument('--no-immutable-roles',
default=False,
action='store_true',
help=('Whether default roles (admin, member, and '
'reader) should be immutable. Immutable '
'default roles is the default, use this '
'flag to opt out of immutable default '
'roles.'))
return parser
def do_bootstrap(self):
"""Perform the bootstrap actions.
Create bootstrap user, project, and role so that CMS, humans, or
scripts can continue to perform initial setup (domains, projects,
services, endpoints, etc) of Keystone when standing up a new
deployment.
"""
self.username = (
os.environ.get('OS_BOOTSTRAP_USERNAME') or
CONF.command.bootstrap_username)
self.project_name = (
os.environ.get('OS_BOOTSTRAP_PROJECT_NAME') or
CONF.command.bootstrap_project_name)
self.role_name = (
os.environ.get('OS_BOOTSTRAP_ROLE_NAME') or
CONF.command.bootstrap_role_name)
self.password = (
os.environ.get('OS_BOOTSTRAP_PASSWORD') or
CONF.command.bootstrap_password)
self.service_name = (
os.environ.get('OS_BOOTSTRAP_SERVICE_NAME') or
CONF.command.bootstrap_service_name)
self.admin_url = (
os.environ.get('OS_BOOTSTRAP_ADMIN_URL') or
CONF.command.bootstrap_admin_url)
self.public_url = (
os.environ.get('OS_BOOTSTRAP_PUBLIC_URL') or
CONF.command.bootstrap_public_url)
self.internal_url = (
os.environ.get('OS_BOOTSTRAP_INTERNAL_URL') or
CONF.command.bootstrap_internal_url)
self.region_id = (
os.environ.get('OS_BOOTSTRAP_REGION_ID') or
CONF.command.bootstrap_region_id)
self.service_id = None
self.endpoints = None
if self.password is None:
print(_('ERROR: Either --bootstrap-password argument or '
'OS_BOOTSTRAP_PASSWORD must be set.'))
sys.exit(1)
self.bootstrapper.admin_password = self.password
self.bootstrapper.admin_username = self.username
self.bootstrapper.project_name = self.project_name
self.bootstrapper.admin_role_name = self.role_name
self.bootstrapper.service_name = self.service_name
self.bootstrapper.service_id = self.service_id
self.bootstrapper.admin_url = self.admin_url
self.bootstrapper.public_url = self.public_url
self.bootstrapper.internal_url = self.internal_url
self.bootstrapper.region_id = self.region_id
if CONF.command.no_immutable_roles:
self.bootstrapper.immutable_roles = False
else:
self.bootstrapper.immutable_roles = True
self.bootstrapper.bootstrap()
self.reader_role_id = self.bootstrapper.reader_role_id
self.member_role_id = self.bootstrapper.member_role_id
self.role_id = self.bootstrapper.admin_role_id
self.project_id = self.bootstrapper.project_id
@classmethod
def main(cls):
klass = cls()
klass.do_bootstrap()
class Doctor(BaseApp):
"""Diagnose common problems with keystone deployments."""
name = 'doctor'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(Doctor, cls).add_argument_parser(subparsers)
return parser
@staticmethod
def main():
# Return a non-zero exit code if we detect any symptoms.
raise SystemExit(doctor.diagnose())
def assert_not_extension(extension):
if extension:
print(_("All extensions have been moved into keystone core and as "
"such its migrations are maintained by the main keystone "
"database control. Use the command: keystone-manage "
"db_sync"))
raise RuntimeError
class DbSync(BaseApp):
"""Sync the database."""
name = 'db_sync'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbSync, cls).add_argument_parser(subparsers)
parser.add_argument('version', default=None, nargs='?',
help=('Migrate the database up to a specified '
'version. If not provided, db_sync will '
'migrate the database to the latest known '
'version. Schema downgrades are not '
'supported.'))
parser.add_argument('--extension', default=None,
help=('This is a deprecated option to migrate a '
'specified extension. Since extensions are '
'now part of the main repository, '
'specifying db_sync without this option '
'will cause all extensions to be migrated.'))
group = parser.add_mutually_exclusive_group()
group.add_argument('--expand', default=False, action='store_true',
help=('Expand the database schema in preparation '
'for data migration.'))
group.add_argument('--migrate', default=False,
action='store_true',
help=('Copy all data that needs to be migrated '
'within the database ahead of starting the '
'first keystone node upgraded to the new '
'release. This command should be run '
'after the --expand command. Once the '
'--migrate command has completed, you can '
'upgrade all your keystone nodes to the new '
'release and restart them.'))
group.add_argument('--contract', default=False, action='store_true',
help=('Remove any database tables and columns '
'that are no longer required. This command '
'should be run after all keystone nodes are '
'running the new release.'))
group.add_argument('--check', default=False, action='store_true',
help=('Check for outstanding database actions that '
'still need to be executed. This command can '
'be used to verify the condition of the '
'current database state.'))
return parser
@classmethod
def check_db_sync_status(cls):
status = 0
try:
expand_version = upgrades.get_db_version(repo='expand_repo')
except migration.exception.DBMigrationError:
LOG.info('Your database is not currently under version '
'control or the database is already controlled. Your '
'first step is to run `keystone-manage db_sync '
'--expand`.')
return 2
try:
migrate_version = upgrades.get_db_version(
repo='data_migration_repo')
except migration.exception.DBMigrationError:
migrate_version = 0
try:
contract_version = upgrades.get_db_version(repo='contract_repo')
except migration.exception.DBMigrationError:
contract_version = 0
repo = migrate.versioning.repository.Repository(
upgrades.find_repo('expand_repo'))
migration_script_version = int(max(repo.versions.versions))
if (contract_version > migrate_version or migrate_version >
expand_version):
LOG.info('Your database is out of sync. For more information '
'refer to https://docs.openstack.org/keystone/'
'latest/admin/identity-upgrading.html')
status = 1
elif migration_script_version > expand_version:
LOG.info('Your database is not up to date. Your first step is '
'to run `keystone-manage db_sync --expand`.')
status = 2
elif expand_version > migrate_version:
LOG.info('Expand version is ahead of migrate. Your next step '
'is to run `keystone-manage db_sync --migrate`.')
status = 3
elif migrate_version > contract_version:
LOG.info('Migrate version is ahead of contract. Your next '
'step is to run `keystone-manage db_sync --contract`.')
status = 4
elif (migration_script_version == expand_version == migrate_version ==
contract_version):
LOG.info('All db_sync commands are upgraded to the same '
'version and up-to-date.')
LOG.info('The latest installed migration script version is: '
'%(script)d.\nCurrent repository versions:\nExpand: '
'%(expand)d \nMigrate: %(migrate)d\nContract: '
'%(contract)d', {'script': migration_script_version,
'expand': expand_version,
'migrate': migrate_version,
'contract': contract_version})
return status
@staticmethod
def main():
assert_not_extension(CONF.command.extension)
# It is possible to run expand and migrate at the same time,
# expand needs to run first however.
if CONF.command.check:
sys.exit(DbSync.check_db_sync_status())
elif CONF.command.expand and CONF.command.migrate:
upgrades.expand_schema()
upgrades.migrate_data()
elif CONF.command.expand:
upgrades.expand_schema()
elif CONF.command.migrate:
upgrades.migrate_data()
elif CONF.command.contract:
upgrades.contract_schema()
else:
upgrades.offline_sync_database_to_version(
CONF.command.version)
class DbVersion(BaseApp):
"""Print the current migration version of the database."""
name = 'db_version'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbVersion, cls).add_argument_parser(subparsers)
parser.add_argument('--extension', default=None,
help=('This is a deprecated option to print the '
'version of a specified extension. Since '
'extensions are now part of the main '
'repository, the version of an extension is '
'implicit in the version of the main '
'repository.'))
@staticmethod
def main():
assert_not_extension(CONF.command.extension)
print(upgrades.get_db_version())
class BasePermissionsSetup(BaseApp):
"""Common user/group setup for file permissions."""
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(BasePermissionsSetup,
cls).add_argument_parser(subparsers)
running_as_root = (os.geteuid() == 0)
parser.add_argument('--keystone-user', required=running_as_root)
parser.add_argument('--keystone-group', required=running_as_root)
return parser
@staticmethod
def get_user_group():
keystone_user_id = None
keystone_group_id = None
try:
a = CONF.command.keystone_user
if a:
keystone_user_id = utils.get_unix_user(a)[0]
except KeyError:
raise ValueError("Unknown user '%s' in --keystone-user" % a)
try:
a = CONF.command.keystone_group
if a:
keystone_group_id = utils.get_unix_group(a)[0]
except KeyError:
raise ValueError("Unknown group '%s' in --keystone-group" % a)
return keystone_user_id, keystone_group_id
@classmethod
def initialize_fernet_repository(
cls, keystone_user_id, keystone_group_id, config_group=None):
conf_group = getattr(CONF, config_group)
futils = fernet_utils.FernetUtils(
conf_group.key_repository,
conf_group.max_active_keys,
config_group
)
futils.create_key_directory(keystone_user_id, keystone_group_id)
if futils.validate_key_repository(requires_write=True):
futils.initialize_key_repository(
keystone_user_id, keystone_group_id)
@classmethod
def rotate_fernet_repository(
cls, keystone_user_id, keystone_group_id, config_group=None):
conf_group = getattr(CONF, config_group)
futils = fernet_utils.FernetUtils(
conf_group.key_repository,
conf_group.max_active_keys,
config_group
)
if futils.validate_key_repository(requires_write=True):
futils.rotate_keys(keystone_user_id, keystone_group_id)
class FernetSetup(BasePermissionsSetup):
"""Setup key repositories for Fernet tokens and auth receipts.
This also creates a primary key used for both creating and validating
Fernet tokens and auth receipts. To improve security, you should rotate
your keys (using keystone-manage fernet_rotate, for example).
"""
name = 'fernet_setup'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.initialize_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_tokens')
if (os.path.abspath(CONF.fernet_tokens.key_repository) !=
os.path.abspath(CONF.fernet_receipts.key_repository)):
cls.initialize_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_receipts')
elif(CONF.fernet_tokens.max_active_keys !=
CONF.fernet_receipts.max_active_keys):
# WARNING(adriant): If the directories are the same,
# 'max_active_keys' is ignored from fernet_receipts in favor of
# fernet_tokens to avoid a potential mismatch. Only if the
# directories are different do we create a different one for
# receipts, and then respect 'max_active_keys' for receipts.
LOG.warning(
"Receipt and Token fernet key directories are the same "
"but `max_active_keys` is different. Receipt "
"`max_active_keys` will be ignored in favor of Token "
"`max_active_keys`."
)
class FernetRotate(BasePermissionsSetup):
"""Rotate Fernet encryption keys.
This assumes you have already run keystone-manage fernet_setup.
A new primary key is placed into rotation, which is used for new tokens.
The old primary key is demoted to secondary, which can then still be used
for validating tokens. Excess secondary keys (beyond [fernet_tokens]
max_active_keys) are revoked. Revoked keys are permanently deleted. A new
staged key will be created and used to validate tokens. The next time key
rotation takes place, the staged key will be put into rotation as the
primary key.
Rotating keys too frequently, or with [fernet_tokens] max_active_keys set
too low, will cause tokens to become invalid prior to their expiration.
"""
name = 'fernet_rotate'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.rotate_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_tokens')
if (os.path.abspath(CONF.fernet_tokens.key_repository) !=
os.path.abspath(CONF.fernet_receipts.key_repository)):
cls.rotate_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_receipts')
class CreateJWSKeyPair(BasePermissionsSetup):
"""Create a key pair for signing and validating JWS tokens.
This command creates a public and private key pair to use for signing and
validating JWS token signatures. The key pair is written to the directory
where the command is invoked.
"""
name = 'create_jws_keypair'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(CreateJWSKeyPair, cls).add_argument_parser(subparsers)
parser.add_argument(
'--force', action='store_true',
help=('Forcibly overwrite keys if they already exist')
)
return parser
@classmethod
def main(cls):
current_directory = os.getcwd()
private_key_path = os.path.join(current_directory, 'private.pem')
public_key_path = os.path.join(current_directory, 'public.pem')
if os.path.isfile(private_key_path) and not CONF.command.force:
raise SystemExit(_('Private key %(path)s already exists')
% {'path': private_key_path})
if os.path.isfile(public_key_path) and not CONF.command.force:
raise SystemExit(_('Public key %(path)s already exists')
% {'path': public_key_path})
jwt_utils.create_jws_keypair(private_key_path, public_key_path)
class TokenSetup(BasePermissionsSetup):
"""Setup a key repository for tokens.
This also creates a primary key used for both creating and validating
tokens. To improve security, you should rotate your keys (using
keystone-manage token_rotate, for example).
"""
name = 'token_setup'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.initialize_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_tokens')
class TokenRotate(BasePermissionsSetup):
"""Rotate token encryption keys.
This assumes you have already run keystone-manage token_setup.
A new primary key is placed into rotation, which is used for new tokens.
The old primary key is demoted to secondary, which can then still be used
for validating tokens. Excess secondary keys (beyond [token]
max_active_keys) are revoked. Revoked keys are permanently deleted. A new
staged key will be created and used to validate tokens. The next time key
rotation takes place, the staged key will be put into rotation as the
primary key.
Rotating keys too frequently, or with [token] max_active_keys set
too low, will cause tokens to become invalid prior to their expiration.
"""
name = 'token_rotate'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.rotate_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_tokens')
class ReceiptSetup(BasePermissionsSetup):
"""Setup a key repository for auth receipts.
This also creates a primary key used for both creating and validating
receipts. To improve security, you should rotate your keys (using
keystone-manage receipt_rotate, for example).
"""
name = 'receipt_setup'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.initialize_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_receipts')
class ReceiptRotate(BasePermissionsSetup):
"""Rotate auth receipts encryption keys.
This assumes you have already run keystone-manage receipt_setup.
A new primary key is placed into rotation, which is used for new receipts.
The old primary key is demoted to secondary, which can then still be used
for validating receipts. Excess secondary keys (beyond [receipt]
max_active_keys) are revoked. Revoked keys are permanently deleted. A new
staged key will be created and used to validate receipts. The next time key
rotation takes place, the staged key will be put into rotation as the
primary key.
Rotating keys too frequently, or with [receipt] max_active_keys set
too low, will cause receipts to become invalid prior to their expiration.
"""
name = 'receipt_rotate'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.rotate_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_receipts')
class CredentialSetup(BasePermissionsSetup):
"""Setup a Fernet key repository for credential encryption.
The purpose of this command is very similar to `keystone-manage
fernet_setup` only the keys included in this repository are for encrypting
and decrypting credential secrets instead of token payloads. Keys can be
rotated using `keystone-manage credential_rotate`.
"""
name = 'credential_setup'
@classmethod
def main(cls):
futils = fernet_utils.FernetUtils(
CONF.credential.key_repository,
credential_fernet.MAX_ACTIVE_KEYS,
'credential'
)
keystone_user_id, keystone_group_id = cls.get_user_group()
futils.create_key_directory(keystone_user_id, keystone_group_id)
if futils.validate_key_repository(requires_write=True):
futils.initialize_key_repository(
keystone_user_id,
keystone_group_id
)
class CredentialRotate(BasePermissionsSetup):
"""Rotate Fernet encryption keys for credential encryption.
This assumes you have already run `keystone-manage credential_setup`.
A new primary key is placed into rotation only if all credentials are
encrypted with the current primary key. If any credentials are encrypted
with a secondary key the rotation will abort. This protects against
removing a key that is still required to decrypt credentials. Once a key is
removed from the repository, it is impossible to recover the original data
without restoring from a backup external to keystone (more on backups
below). To make sure all credentials are encrypted with the latest primary
key, please see the `keystone-manage credential_migrate` command. Since the
maximum number of keys in the credential repository is 3, once all
credentials are encrypted with the latest primary key we can safely
introduce a new primary key. All credentials will still be decryptable
since they are all encrypted with the only secondary key in the repository.
It is imperitive to understand the importance of backing up keys used to
encrypt credentials. In the event keys are overrotated, applying a key
repository from backup can help recover otherwise useless credentials.
Persisting snapshots of the key repository in secure and encrypted source
control, or a dedicated key management system are good examples of
encryption key backups.
The `keystone-manage credential_rotate` and `keystone-manage
credential_migrate` commands are intended to be done in sequence. After
performing a rotation, a migration must be done before performing another
rotation. This ensures we don't over-rotate encryption keys.
"""
name = 'credential_rotate'
def __init__(self):
drivers = backends.load_backends()
self.credential_provider_api = drivers['credential_provider_api']
self.credential_api = drivers['credential_api']
def validate_primary_key(self):
crypto, keys = credential_fernet.get_multi_fernet_keys()
primary_key_hash = credential_fernet.primary_key_hash(keys)
credentials = self.credential_api.driver.list_credentials(
driver_hints.Hints()
)
for credential in credentials:
if credential['key_hash'] != primary_key_hash:
msg = _('Unable to rotate credential keys because not all '
'credentials are encrypted with the primary key. '
'Please make sure all credentials have been encrypted '
'with the primary key using `keystone-manage '
'credential_migrate`.')
raise SystemExit(msg)
@classmethod
def main(cls):
futils = fernet_utils.FernetUtils(
CONF.credential.key_repository,
credential_fernet.MAX_ACTIVE_KEYS,
'credential'
)
keystone_user_id, keystone_group_id = cls.get_user_group()
if futils.validate_key_repository(requires_write=True):
klass = cls()
klass.validate_primary_key()
futils.rotate_keys(keystone_user_id, keystone_group_id)
class CredentialMigrate(BasePermissionsSetup):
"""Provides the ability to encrypt credentials using a new primary key.
This assumes that there is already a credential key repository in place and
that the database backend has been upgraded to at least the Newton schema.
If the credential repository doesn't exist yet, you can use
``keystone-manage credential_setup`` to create one.
"""
name = 'credential_migrate'
def __init__(self):
drivers = backends.load_backends()
self.credential_provider_api = drivers['credential_provider_api']
self.credential_api = drivers['credential_api']
def migrate_credentials(self):
crypto, keys = credential_fernet.get_multi_fernet_keys()
primary_key_hash = credential_fernet.primary_key_hash(keys)
# FIXME(lbragstad): We *should* be able to use Hints() to ask only for
# credentials that have a key_hash equal to a secondary key hash or
# None, but Hints() doesn't seem to honor None values. See
# https://bugs.launchpad.net/keystone/+bug/1614154. As a workaround -
# we have to ask for *all* credentials and filter them ourselves.
credentials = self.credential_api.driver.list_credentials(
driver_hints.Hints()
)
for credential in credentials:
if credential['key_hash'] != primary_key_hash:
# If the key_hash isn't None but doesn't match the
# primary_key_hash, then we know the credential was encrypted
# with a secondary key. Let's decrypt it, and send it through
# the update path to re-encrypt it with the new primary key.
decrypted_blob = self.credential_provider_api.decrypt(
credential['encrypted_blob']
)
cred = {'blob': decrypted_blob}
self.credential_api.update_credential(
credential['id'],
cred
)
@classmethod
def main(cls):
# Check to make sure we have a repository that works...
futils = fernet_utils.FernetUtils(
CONF.credential.key_repository,
credential_fernet.MAX_ACTIVE_KEYS,
'credential'
)
futils.validate_key_repository(requires_write=True)
klass = cls()
klass.migrate_credentials()
class TrustFlush(BaseApp):
"""Flush expired and non-expired soft deleted trusts from the backend."""
name = 'trust_flush'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(TrustFlush, cls).add_argument_parser(subparsers)
parser.add_argument('--project-id', default=None,
help=('The id of the project of which the '
'expired or non-expired soft-deleted '
'trusts is to be purged'))
parser.add_argument('--trustor-user-id', default=None,
help=('The id of the trustor of which the '
'expired or non-expired soft-deleted '
'trusts is to be purged'))
parser.add_argument('--trustee-user-id', default=None,
help=('The id of the trustee of which the '
'expired or non-expired soft-deleted '
'trusts is to be purged'))
parser.add_argument('--date', default=datetime.datetime.utcnow(),
help=('The date of which the expired or '
'non-expired soft-deleted trusts older '
'than that will be purged. The format of '
'the date to be "DD-MM-YYYY". If no date '
'is supplied keystone-manage will use the '
'system clock time at runtime'))
return parser
@classmethod
def main(cls):
drivers = backends.load_backends()
trust_manager = drivers['trust_api']
if CONF.command.date:
if not isinstance(CONF.command.date, datetime.datetime):
try:
CONF.command.date = datetime.datetime.strptime(
CONF.command.date, '%d-%m-%Y')
except KeyError:
raise ValueError("'%s'Invalid input for date, should be "
"DD-MM-YYYY", CONF.command.date)
else:
LOG.info("No date is supplied, keystone-manage will use the "
"system clock time at runtime ")
trust_manager.flush_expired_and_soft_deleted_trusts(
project_id=CONF.command.project_id,
trustor_user_id=CONF.command.trustor_user_id,
trustee_user_id=CONF.command.trustee_user_id,
date=CONF.command.date
)
class MappingPurge(BaseApp):
"""Purge the mapping table."""
name = 'mapping_purge'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(MappingPurge, cls).add_argument_parser(subparsers)
parser.add_argument('--all', default=False, action='store_true',
help=('Purge all mappings.'))
parser.add_argument('--domain-name', default=None,
help=('Purge any mappings for the domain '
'specified.'))
parser.add_argument('--public-id', default=None,
help=('Purge the mapping for the Public ID '
'specified.'))
parser.add_argument('--local-id', default=None,
help=('Purge the mappings for the Local ID '
'specified.'))
parser.add_argument('--type', default=None, choices=['user', 'group'],
help=('Purge any mappings for the type '
'specified.'))
return parser
@staticmethod
def main():
def validate_options():
# NOTE(henry-nash): It would be nice to use the argparse automated
# checking for this validation, but the only way I can see doing
# that is to make the default (i.e. if no optional parameters
# are specified) to purge all mappings - and that sounds too
# dangerous as a default. So we use it in a slightly
# unconventional way, where all parameters are optional, but you
# must specify at least one.
if (CONF.command.all is False and
CONF.command.domain_name is None and
CONF.command.public_id is None and
CONF.command.local_id is None and
CONF.command.type is None):
raise ValueError(_('At least one option must be provided'))
if (CONF.command.all is True and
(CONF.command.domain_name is not None or
CONF.command.public_id is not None or
CONF.command.local_id is not None or
CONF.command.type is not None)):
raise ValueError(_('--all option cannot be mixed with '
'other options'))
def get_domain_id(name):
try:
return resource_manager.get_domain_by_name(name)['id']
except KeyError:
raise ValueError(_("Unknown domain '%(name)s' specified by "
"--domain-name") % {'name': name})
validate_options()
drivers = backends.load_backends()
resource_manager = drivers['resource_api']
mapping_manager = drivers['id_mapping_api']
# Now that we have validated the options, we know that at least one
# option has been specified, and if it was the --all option then this
# was the only option specified.
#
# The mapping dict is used to filter which mappings are purged, so
# leaving it empty means purge them all
mapping = {}
if CONF.command.domain_name is not None:
mapping['domain_id'] = get_domain_id(CONF.command.domain_name)
if CONF.command.public_id is not None:
mapping['public_id'] = CONF.command.public_id
if CONF.command.local_id is not None:
mapping['local_id'] = CONF.command.local_id
if CONF.command.type is not None:
mapping['entity_type'] = CONF.command.type
mapping_manager.purge_mappings(mapping)
DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'
def _domain_config_finder(conf_dir):
"""Return a generator of all domain config files found in a directory.
Domain configs match the filename pattern of
'keystone.<domain_name>.conf'.
:returns: generator yielding (filename, domain_name) tuples
"""
LOG.info('Scanning %r for domain config files', conf_dir)
for r, d, f in os.walk(conf_dir):
for fname in f:
if (fname.startswith(DOMAIN_CONF_FHEAD) and
fname.endswith(DOMAIN_CONF_FTAIL)):
if fname.count('.') >= 2:
domain_name = fname[len(DOMAIN_CONF_FHEAD):
-len(DOMAIN_CONF_FTAIL)]
yield (os.path.join(r, fname), domain_name)
continue
LOG.warning('Ignoring file (%s) while scanning '
'domain config directory', fname)
class DomainConfigUploadFiles(object):
def __init__(self, domain_config_finder=_domain_config_finder):
super(DomainConfigUploadFiles, self).__init__()
self.load_backends()
self._domain_config_finder = domain_config_finder
def load_backends(self):
drivers = backends.load_backends()
self.resource_manager = drivers['resource_api']
self.domain_config_manager = drivers['domain_config_api']
def valid_options(self):
"""Validate the options, returning True if they are indeed valid.
It would be nice to use the argparse automated checking for this
validation, but the only way I can see doing that is to make the
default (i.e. if no optional parameters are specified) to upload
all configuration files - and that sounds too dangerous as a
default. So we use it in a slightly unconventional way, where all
parameters are optional, but you must specify at least one.
"""
if (CONF.command.all is False and
CONF.command.domain_name is None):
print(_('At least one option must be provided, use either '
'--all or --domain-name'))
return False
if (CONF.command.all is True and
CONF.command.domain_name is not None):
print(_('The --all option cannot be used with '
'the --domain-name option'))
return False
return True
def _upload_config_to_database(self, file_name, domain_name):
"""Upload a single config file to the database.
:param file_name: the file containing the config options
:param domain_name: the domain name
:returns: a boolean indicating if the upload succeeded
"""
try:
domain_ref = (
self.resource_manager.get_domain_by_name(domain_name))
except exception.DomainNotFound:
print(_('Invalid domain name: %(domain)s found in config file '
'name: %(file)s - ignoring this file.') % {
'domain': domain_name,
'file': file_name})
return False
if self.domain_config_manager.get_config_with_sensitive_info(
domain_ref['id']):
print(_('Domain: %(domain)s already has a configuration '
'defined - ignoring file: %(file)s.') % {
'domain': domain_name,
'file': file_name})
return False
sections = {}
try:
parser = cfg.ConfigParser(file_name, sections)
parser.parse()
except Exception:
# We explicitly don't try and differentiate the error cases, in
# order to keep the code in this tool more robust as oslo.config
# changes.
print(_('Error parsing configuration file for domain: %(domain)s, '
'file: %(file)s.') % {
'domain': domain_name,
'file': file_name})
return False
try:
for group in sections:
for option in sections[group]:
sections[group][option] = sections[group][option][0]
self.domain_config_manager.create_config(domain_ref['id'],
sections)
return True
except Exception as e:
msg = ('Error processing config file for domain: '
'%(domain_name)s, file: %(filename)s, error: %(error)s')
LOG.error(msg,
{'domain_name': domain_name,
'filename': file_name,
'error': e},
exc_info=True)
return False
def read_domain_configs_from_files(self):
"""Read configs from file(s) and load into database.
The command line parameters have already been parsed and the CONF
command option will have been set. It is either set to the name of an
explicit domain, or it's None to indicate that we want all domain
config files.
"""
domain_name = CONF.command.domain_name
conf_dir = CONF.identity.domain_config_dir
if not os.path.exists(conf_dir):
print(_('Unable to locate domain config directory: %s') % conf_dir)
raise ValueError
if domain_name:
# Request is to upload the configs for just one domain
fname = DOMAIN_CONF_FHEAD + domain_name + DOMAIN_CONF_FTAIL
if not self._upload_config_to_database(
os.path.join(conf_dir, fname), domain_name):
return False
return True
success_cnt = 0
failure_cnt = 0
for filename, domain_name in self._domain_config_finder(conf_dir):
if self._upload_config_to_database(filename, domain_name):
success_cnt += 1
LOG.info('Successfully uploaded domain config %r',
filename)
else:
failure_cnt += 1
if success_cnt == 0:
LOG.warning('No domain configs uploaded from %r', conf_dir)
if failure_cnt:
return False
return True
def run(self):
# First off, let's just check we can talk to the domain database
try:
self.resource_manager.list_domains(driver_hints.Hints())
except Exception:
# It is likely that there is some SQL or other backend error
# related to set up
print(_('Unable to access the keystone database, please check it '
'is configured correctly.'))
raise
if not self.valid_options():
return 1
if not self.read_domain_configs_from_files():
return 1
class DomainConfigUpload(BaseApp):
"""Upload the domain specific configuration files to the database."""
name = 'domain_config_upload'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DomainConfigUpload, cls).add_argument_parser(subparsers)
parser.add_argument('--all', default=False, action='store_true',
help='Upload contents of all domain specific '
'configuration files. Either use this option '
'or use the --domain-name option to choose a '
'specific domain.')
parser.add_argument('--domain-name', default=None,
help='Upload contents of the specific '
'configuration file for the given domain. '
'Either use this option or use the --all '
'option to upload contents for all domains.')
return parser
@staticmethod
def main():
dcu = DomainConfigUploadFiles()
status = dcu.run()
if status is not None:
sys.exit(status)
class SamlIdentityProviderMetadata(BaseApp):
"""Generate Identity Provider metadata."""
name = 'saml_idp_metadata'
@staticmethod
def main():
metadata = idp.MetadataGenerator().generate_metadata()
print(metadata)
class MappingEngineTester(BaseApp):
"""Execute mapping engine locally."""
name = 'mapping_engine'
def __init__(self):
super(MappingEngineTester, self).__init__()
self.mapping_id = uuid.uuid4().hex
self.rules_pathname = None
self.rules = None
self.assertion_pathname = None
self.assertion = None
def read_rules(self, path):
self.rules_pathname = path
try:
with open(path, "rb") as file:
self.rules = jsonutils.load(file)
except ValueError as e:
raise SystemExit(_('Error while parsing rules '
'%(path)s: %(err)s') % {'path': path, 'err': e})
def read_assertion(self, path):
self.assertion_pathname = path
try:
with open(path) as file:
self.assertion = file.read().strip()
except IOError as e:
raise SystemExit(_("Error while opening file "
"%(path)s: %(err)s") % {'path': path, 'err': e})
def normalize_assertion(self):
def split(line, line_num):
try:
k, v = line.split(':', 1)
return k.strip(), v.strip()
except ValueError:
msg = _("assertion file %(pathname)s at line %(line_num)d "
"expected 'key: value' but found '%(line)s' "
"see help for file format")
raise SystemExit(msg % {'pathname': self.assertion_pathname,
'line_num': line_num,
'line': line})
assertion = self.assertion.splitlines()
assertion_dict = {}
prefix = CONF.command.prefix
for line_num, line in enumerate(assertion, 1):
line = line.strip()
if line == '':
continue
k, v = split(line, line_num)
if prefix:
if k.startswith(prefix):
assertion_dict[k] = v
else:
assertion_dict[k] = v
self.assertion = assertion_dict
def normalize_rules(self):
if isinstance(self.rules, list):
self.rules = {'rules': self.rules}
@classmethod
def main(cls):
if CONF.command.engine_debug:
mapping_engine.LOG.logger.setLevel('DEBUG')
else:
mapping_engine.LOG.logger.setLevel('WARN')
tester = cls()
tester.read_rules(CONF.command.rules)
tester.normalize_rules()
mapping_engine.validate_mapping_structure(tester.rules)
tester.read_assertion(CONF.command.input)
tester.normalize_assertion()
if CONF.command.engine_debug:
print("Using Rules:\n%s" % (
jsonutils.dumps(tester.rules, indent=2)))
print("Using Assertion:\n%s" % (
jsonutils.dumps(tester.assertion, indent=2)))
rp = mapping_engine.RuleProcessor(tester.mapping_id,
tester.rules['rules'])
mapped = rp.process(tester.assertion)
print(jsonutils.dumps(mapped, indent=2))
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(MappingEngineTester,
cls).add_argument_parser(subparsers)
parser.formatter_class = argparse.RawTextHelpFormatter
parser.add_argument('--rules', default=None, required=True,
help=("Path to the file with "
"rules to be executed. "
"Content must be\na proper JSON structure, "
"with a top-level key 'rules' and\n"
"corresponding value being a list."))
parser.add_argument('--input', default=None, required=True,
help=("Path to the file with input attributes. "
"The content\nconsists of ':' separated "
"parameter names and their values.\nThere "
"is only one key-value pair per line. "
"A ';' in the\nvalue is a separator and "
"then a value is treated as a list.\n"
"Example:\n"
"\tEMAIL: me@example.com\n"
"\tLOGIN: me\n"
"\tGROUPS: group1;group2;group3"))
parser.add_argument('--prefix', default=None,
help=("A prefix used for each environment "
"variable in the\nassertion. For example, "
"all environment variables may have\nthe "
"prefix ASDF_."))
parser.add_argument('--engine-debug',
default=False, action="store_true",
help=("Enable debug messages from the mapping "
"engine."))
class MappingPopulate(BaseApp):
"""Pre-populate entries from domain-specific backends.
Running this command is not required. It should only be run right after
the LDAP was configured, when many new users were added, or when
"mapping_purge" is run.
This command will take a while to run. It is perfectly fine for it to run
more than several minutes.
"""
name = "mapping_populate"
@classmethod
def load_backends(cls):
drivers = backends.load_backends()
cls.identity_api = drivers['identity_api']
cls.resource_api = drivers['resource_api']
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(MappingPopulate, cls).add_argument_parser(
subparsers)
parser.add_argument('--domain-name', default=None, required=True,
help=("Name of the domain configured to use "
"domain-specific backend"))
return parser
@classmethod
def main(cls):
"""Process entries for id_mapping_api."""
cls.load_backends()
domain_name = CONF.command.domain_name
try:
domain_id = cls.resource_api.get_domain_by_name(domain_name)['id']
except exception.DomainNotFound:
print(_('Invalid domain name: %(domain)s') % {
'domain': domain_name})
return False
# We don't actually need to tackle id_mapping_api in order to get
# entries there, because list_users does this anyway. That's why it
# will be enough to just make the call below.
cls.identity_api.list_users(domain_scope=domain_id)
CMDS = [
BootStrap,
CredentialMigrate,
CredentialRotate,
CredentialSetup,
DbSync,
DbVersion,
Doctor,
DomainConfigUpload,
FernetRotate,
FernetSetup,
CreateJWSKeyPair,
MappingPopulate,
MappingPurge,
MappingEngineTester,
ReceiptRotate,
ReceiptSetup,
SamlIdentityProviderMetadata,
TokenRotate,
TokenSetup,
TrustFlush
]
def add_command_parsers(subparsers):
for cmd in CMDS:
cmd.add_argument_parser(subparsers)
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Available commands',
handler=add_command_parsers)
def main(argv=None, developer_config_file=None):
"""Main entry point into the keystone-manage CLI utility.
:param argv: Arguments supplied via the command line using the ``sys``
standard library.
:type argv: list
:param developer_config_file: The location of a configuration file normally
found in development environments.
:type developer_config_file: string
"""
CONF.register_cli_opt(command_opt)
keystone.conf.configure()
sql.initialize()
keystone.conf.set_default_for_default_log_levels()
user_supplied_config_file = False
if argv:
for argument in argv:
if argument == '--config-file':
user_supplied_config_file = True
if developer_config_file:
developer_config_file = [developer_config_file]
# NOTE(lbragstad): At this point in processing, the first element of argv
# is the binary location of keystone-manage, which oslo.config doesn't need
# and is keystone specific. Only pass a list of arguments so that
# oslo.config can determine configuration file locations based on user
# provided arguments, if present.
CONF(args=argv[1:],
project='keystone',
version=pbr.version.VersionInfo('keystone').version_string(),
usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']',
default_config_files=developer_config_file)
if not CONF.default_config_files and not user_supplied_config_file:
LOG.warning('Config file not found, using default configs.')
keystone.conf.setup_logging()
CONF.command.cmd_class.main()
| 40.857971
| 79
| 0.602139
|
acfffa4c7f480c3f1e90e86e4085f4f5a59c041d
| 1,181
|
py
|
Python
|
framework/codejam/analyse/language.py
|
neizod/coding-analysis
|
cc086bcf204e570032d11b12a46ac819cfe93f2b
|
[
"MIT"
] | 1
|
2015-05-22T05:01:53.000Z
|
2015-05-22T05:01:53.000Z
|
framework/codejam/analyse/language.py
|
neizod/coding-analysis
|
cc086bcf204e570032d11b12a46ac819cfe93f2b
|
[
"MIT"
] | null | null | null |
framework/codejam/analyse/language.py
|
neizod/coding-analysis
|
cc086bcf204e570032d11b12a46ac819cfe93f2b
|
[
"MIT"
] | null | null | null |
import os
import json
from framework._utils import AnalyserHook
class CodeJamAnalyseLanguage(AnalyserHook):
''' This method will analyse language used in each subbmited code. '''
@staticmethod
def analyse(data):
for row in data:
if len(row['languages']) != 1:
continue
yield [row['pid'], row['io'], row['uname'],
row['languages'].pop()]
@staticmethod
def prepare_input(year, **_):
from framework._utils.misc import datapath, make_ext
os.makedirs(datapath('codejam', 'result'), exist_ok=True)
usepath = datapath('codejam', 'extract',
make_ext('language', year, 'json'))
return json.load(open(usepath))
@staticmethod
def prepare_output(result, year, **_):
from itertools import chain
from framework._utils import write
from framework._utils.misc import datapath, make_ext
outpath = datapath('codejam', 'result',
make_ext('language', year, 'txt'))
header = ['pid', 'io', 'uname', 'language']
write.table(chain([header], result), open(outpath, 'w'))
| 33.742857
| 74
| 0.593565
|
acfffae2676dcb53658f13001da9575a437e490a
| 1,018
|
py
|
Python
|
backend/question/apps.py
|
beehyv/workforcehealthtracker
|
ab0f921938e1ee50158c9d2c72a66c534d435eee
|
[
"Apache-2.0"
] | 2
|
2020-05-10T05:20:04.000Z
|
2020-05-21T15:29:55.000Z
|
backend/question/apps.py
|
beehyv/workforcehealthtracker
|
ab0f921938e1ee50158c9d2c72a66c534d435eee
|
[
"Apache-2.0"
] | null | null | null |
backend/question/apps.py
|
beehyv/workforcehealthtracker
|
ab0f921938e1ee50158c9d2c72a66c534d435eee
|
[
"Apache-2.0"
] | 2
|
2020-05-13T18:21:37.000Z
|
2020-05-21T15:29:58.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Built and managed with Open Source Love by BeeHyv Software Solutions Pvt Ltd. Hyderabad
# www.beehyv.com
from django.apps import AppConfig
class QuestionConfig(AppConfig):
name = 'question'
| 39.153846
| 91
| 0.747544
|
acfffbc73eb4e760558634e1e2c9186d33c43ade
| 1,169
|
py
|
Python
|
src/gunicorn_cfg.py
|
devsetgo/pynote_2
|
e5a086a06ed19774f569eb59486d1d0960e78c06
|
[
"MIT"
] | null | null | null |
src/gunicorn_cfg.py
|
devsetgo/pynote_2
|
e5a086a06ed19774f569eb59486d1d0960e78c06
|
[
"MIT"
] | 47
|
2021-04-10T14:47:38.000Z
|
2022-03-05T02:40:59.000Z
|
src/gunicorn_cfg.py
|
devsetgo/pynote_2
|
e5a086a06ed19774f569eb59486d1d0960e78c06
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Configuration of Gunicorn to serve application utilizing Uvicorn
gunicorn config reference: https://github.com/benoitc/gunicorn/blob/master/examples/example_config.py
"""
from settings import LOGURU_LOGGING_LEVEL
# ip and port to bind
bind = "0.0.0.0:5000"
# max number of pending connections
# backlog = 2048
# TODO: APScheduler Multiple Workers
# BODY: APScheduler has conflicts if more than 1 gunicorn worker is enabled.
# define number of workers by cores times two plus one
# workers = multiprocessing.cpu_count() * 2 + 1
workers = 4
# set worker class to uvicorn
worker_class = "uvicorn.workers.UvicornWorker"
# loglevel - The granularity of log output
loglevel = LOGURU_LOGGING_LEVEL.lower()
"""
A dictionary containing headers and values that the front-end proxy uses to indicate HTTPS requests.
These tell Gunicorn to set wsgi.url_scheme to https, so your application can tell that the request is secure.
"""
# secure_scheme_headers = {
# "X-FORWARDED-PROTOCOL": "ssl",
# "X-FORWARDED-PROTO": "https",
# "X-FORWARDED-SSL": "on",
# }
# ips that are allowed to forward
# FORWARDED_ALLOW_IPS = "127.0.0.1", "0.0.0.0"
| 31.594595
| 109
| 0.740804
|
acfffc0e0c93e6a34ea84f33e871b90641c4ba0b
| 230
|
py
|
Python
|
Python/Tests/TestData/DebuggerProject/SteppingTestBug1315.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 404
|
2019-05-07T02:21:57.000Z
|
2022-03-31T17:03:04.000Z
|
Python/Tests/TestData/DebuggerProject/SteppingTestBug1315.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/TestData/DebuggerProject/SteppingTestBug1315.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
try:
import _thread
except ImportError:
import thread as _thread
l = _thread.allocate_lock()
l.acquire()
def my_thread():
print('100')
print('200')
l.release()
_thread.start_new_thread(my_thread, ())
l.acquire()
| 16.428571
| 39
| 0.691304
|
acfffc31c1c29fd9298dfa09e4da1ac1935ed570
| 7,837
|
py
|
Python
|
terminusdb_client/woql_utils.py
|
polyneme/terminusdb-client-python
|
720024e33465f830709691507b4fbd5b3597e29f
|
[
"Apache-2.0"
] | null | null | null |
terminusdb_client/woql_utils.py
|
polyneme/terminusdb-client-python
|
720024e33465f830709691507b4fbd5b3597e29f
|
[
"Apache-2.0"
] | null | null | null |
terminusdb_client/woql_utils.py
|
polyneme/terminusdb-client-python
|
720024e33465f830709691507b4fbd5b3597e29f
|
[
"Apache-2.0"
] | null | null | null |
import json
import urllib.parse
from datetime import datetime
from .errors import DatabaseError
STANDARD_URLS = {
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"xdd": "http://terminusdb.com/schema/xdd#",
"owl": "http://www.w3.org/2002/07/owl#",
"terminus": "http://terminusdb.com/schema/terminus#",
"vio": "http://terminusdb.com/schema/vio#",
"repo": "http://terminusdb.com/schema/repository#",
"layer": "http://terminusdb.com/schema/layer#",
"woql": "http://terminusdb.com/schema/woql#",
"ref": "http://terminusdb.com/schema/ref#",
}
def encode_uri_component(value):
"""Encode a URI.
Parameters
----------
value: Value which needs to be encoded
Returns
-------
Encoded Value
"""
return urllib.parse.urlencode(value, doseq=True)
def uri_encode_payload(payload):
"""Encode the given payload
Parameters
----------
payload: dict
Returns
-------
Encoded payload array
"""
if isinstance(payload, str):
return encode_uri_component(payload)
payload_arr = []
if isinstance(payload, dict):
for key, value in payload.items():
"""
if dictionary inside dictionary
"""
if isinstance(value, dict):
# for keyElement,valueElement in value.items():
payload_arr.append(encode_uri_component(value))
else:
payload_arr.append(encode_uri_component({key: value}))
return "&".join(payload_arr)
def add_params_to_url(url, payload):
"""Add params / payload to given url
Parameters
----------
url: str
payload: dict
Returns
-------
Url with payload appended
"""
if payload:
params = uri_encode_payload(payload)
if params:
url = f"{url}?{params}"
return url
# below are utils for WOQLQuery
def add_namespaces_to_variable(var):
"""Adds namespace to given variable
Parameters
----------
var: str
Variable
Returns
-------
Variable attached with namespace
"""
if var[:2] != "v:":
return "v:" + var
return var
def add_namespaces_to_variables(variables):
"""Adds namespace to given variables
Parameters
----------
variables: list [str]
Returns
-------
Variables attached with namespace
"""
nvars = []
for v_item in variables:
nvars.append(add_namespaces_to_variable(v_item))
return nvars
def empty(obj):
"""* is the object empty?
* returns true if the json object is empty
"""
if not obj:
# Assume if it has a length property with a non-zero value
# that that property is correct.
return True
if len(obj) > 0:
return False
if len(obj) == 0:
return True
# Otherwise, does it have any properties of its own?
# if type(obj) == dict:
# for key in obj.keys():
# if hasattr(obj,key):
# return False
return True
def shorten(url, prefixes=None):
"""Get shortened url
Parameters
----------
url: str
prefixes: dict
Returns
-------
Url with prefixes added and shortened
"""
prefixes = prefixes if prefixes else STANDARD_URLS
for pref, val in prefixes.items():
short_url = url[: len(val)]
if val == short_url:
return f"{pref}:{short_url}"
return url
def is_data_type(stype):
"""Checks if the given type is a datatype or not
Parameters
----------
stype: str
Returns
-------
bool
"""
sh = shorten(stype)
if sh and (sh[:4] == "xsd:" or sh[:4] == "xdd:"):
return True
return False
def valid_url(string):
"""Checks if the given url is valid
Parameters
----------
string: str
Url which needs to be validated
Returns
-------
bool
"""
if string and (string[:7] == "http://" or string[:8] == "https://"):
return True
return False
def url_fraqment(url):
"""Gets the url fragment
Parameters
----------
url: str
Returns
-------
str
url fragment
"""
bits = url.split("#")
if len(bits) > 1:
return bits[1]
return url
def label_from_url(url):
"""Get the label from url
Parameters
----------
url: str
Returns
-------
str
Label
"""
nurl = url_fraqment(url)
nurl = nurl if nurl else url
last_char = nurl.rfind("/")
if last_char < len(nurl) - 1:
nurl = nurl[last_char + 1 :]
nurl = nurl.replace("_", " ")
return nurl[0].upper() + nurl[1:]
def _result2stream(result):
"""turning JSON string into a interable that give you a stream of dictionary"""
decoder = json.JSONDecoder()
idx = 0
result_length = len(result)
while True:
if idx >= result_length:
return
data, offset = decoder.raw_decode(result[idx:])
idx += offset
while idx < result_length and result[idx].isspace():
idx += 1
yield data
def _finish_response(request_response, get_version=False):
"""Get the response text
Parameters
----------
request_response: Response Object
Returns
-------
str
Response text
Raises
------
DatabaseError
For status codes 400 to 598
"""
if request_response.status_code == 200:
if get_version:
return request_response.text, request_response.headers.get(
"Terminusdb-Data-Version"
)
return request_response.text # if not a json not it raises an error
elif request_response.status_code > 399 and request_response.status_code < 599:
raise DatabaseError(request_response)
def _clean_list(obj):
cleaned = []
for item in obj:
if isinstance(item, str):
cleaned.append(item)
elif hasattr(item, "items"):
cleaned.append(_clean_dict(item))
elif not isinstance(item, str) and hasattr(item, "__iter__"):
cleaned.append(_clean_list(item))
elif hasattr(item, "isoformat"):
cleaned.append(item.isoformat())
else:
cleaned.append(item)
return cleaned
def _clean_dict(obj):
cleaned = {}
for key, item in obj.items():
if isinstance(item, str):
cleaned[key] = item
elif hasattr(item, "items"):
cleaned[key] = _clean_dict(item)
elif hasattr(item, "__iter__"):
cleaned[key] = _clean_list(item)
elif hasattr(item, "isoformat"):
cleaned[key] = item.isoformat()
else:
cleaned[key] = item
return cleaned
def _dt_list(obj):
cleaned = []
for item in obj:
if isinstance(item, str):
try:
cleaned.append(datetime.fromisoformat(item))
except ValueError:
cleaned.append(item)
elif hasattr(item, "items"):
cleaned.append(_clean_dict(item))
elif hasattr(item, "__iter__"):
cleaned.append(_clean_list(item))
else:
cleaned.append(item)
return cleaned
def _dt_dict(obj):
cleaned = {}
for key, item in obj.items():
if isinstance(item, str):
try:
cleaned[key] = datetime.fromisoformat(item)
except ValueError:
cleaned[key] = item
elif hasattr(item, "items"):
cleaned[key] = _dt_dict(item)
elif hasattr(item, "__iter__"):
cleaned[key] = _dt_list(item)
else:
cleaned[key] = item
return cleaned
| 22.915205
| 83
| 0.565012
|
acfffd3554ae4d63894755cf9c49ba40cd481b5e
| 117
|
py
|
Python
|
blueprints/video_discovery/scripts/kb/transform/constants.py
|
lfiedlerc/mindmeld-blueprints
|
4b5e1445ec75da25090bda8432c4a3a9d431a7f0
|
[
"Apache-2.0"
] | 12
|
2019-05-14T11:07:31.000Z
|
2021-08-27T23:10:32.000Z
|
blueprints/video_discovery/scripts/kb/transform/constants.py
|
lfiedlerc/mindmeld-blueprints
|
4b5e1445ec75da25090bda8432c4a3a9d431a7f0
|
[
"Apache-2.0"
] | 8
|
2019-07-03T20:13:54.000Z
|
2020-12-09T23:14:08.000Z
|
blueprints/video_discovery/scripts/kb/transform/constants.py
|
lfiedlerc/mindmeld-blueprints
|
4b5e1445ec75da25090bda8432c4a3a9d431a7f0
|
[
"Apache-2.0"
] | 13
|
2019-06-27T22:51:45.000Z
|
2021-12-07T22:09:26.000Z
|
POSTER_IMG_URL = 'http://image.tmdb.org/t/p/w185/'
TYPE_MOVIE = 'movie'
TYPE_TV = 'tv-show'
TYPE_EPISODE = 'episode'
| 23.4
| 50
| 0.709402
|
acfffd5f560bd0680dd67478d9fcc6e23b2e5b62
| 95
|
py
|
Python
|
terrascript/logentries/d.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/logentries/d.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/logentries/d.py
|
GarnerCorp/python-terrascript
|
ec6c2d9114dcd3cb955dd46069f8ba487e320a8c
|
[
"BSD-2-Clause"
] | 1
|
2018-11-15T16:23:05.000Z
|
2018-11-15T16:23:05.000Z
|
from terrascript import _data
class logentries_logset(_data): pass
logset = logentries_logset
| 19
| 36
| 0.842105
|
acfffd7f5db9541e6c630a552301915d12b76dc8
| 1,281
|
py
|
Python
|
tests/test_scripts/12_iozone_throughput.py
|
stanik137/xtreemfs
|
e548deba8be2631ab0d2e47a1652941729cf70de
|
[
"BSD-3-Clause"
] | 4
|
2016-04-26T03:43:54.000Z
|
2016-11-17T08:09:04.000Z
|
tests/test_scripts/12_iozone_throughput.py
|
stanik137/xtreemfs
|
e548deba8be2631ab0d2e47a1652941729cf70de
|
[
"BSD-3-Clause"
] | 17
|
2015-01-05T21:06:22.000Z
|
2015-12-07T20:45:44.000Z
|
tests/test_scripts/12_iozone_throughput.py
|
stanik137/xtreemfs
|
e548deba8be2631ab0d2e47a1652941729cf70de
|
[
"BSD-3-Clause"
] | 3
|
2016-04-26T03:43:55.000Z
|
2020-11-06T11:02:08.000Z
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011 by Bjoern Kolbeck, Minor Gordon, Zuse Institute Berlin
# Licensed under the BSD License, see LICENSE file for details.
import unittest, subprocess, sys, os
class iozoneThroughputTest(unittest.TestCase):
def __init__( self, stdout=sys.stdout, stderr=sys.stderr, *args, **kwds ):
unittest.TestCase.__init__( self )
self.stdout = stdout
self.stderr = stderr
def runTest( self ):
args = "iozone -t 1 -r 128k -s 20m"
p = subprocess.Popen( args, shell=True, stdout=self.stdout, stderr=self.stderr )
retcode = p.wait()
if retcode == 0:
pass # TODO: parse output
else:
self.assertEqual( retcode, 0 )
def createTestSuite( *args, **kwds ):
if not sys.platform.startswith( "win" ):
return unittest.TestSuite( [iozoneThroughputTest( *args, **kwds )] )
if __name__ == "__main__":
if not sys.platform.startswith( "win" ):
result = unittest.TextTestRunner( verbosity=2 ).run( createTestSuite() )
if not result.wasSuccessful():
sys.exit(1)
else:
print sys.modules[__name__].__file__.split( os.sep )[-1], "not supported on Windows"
| 32.846154
| 92
| 0.616706
|
acfffd844c028bb436d2fb621e38a1931ece9745
| 83,187
|
py
|
Python
|
tests/unit/gapic/datastore_v1/test_datastore.py
|
LaudateCorpus1/python-datastore
|
b1f955b8d410392174092cb8131673a10ccc33ec
|
[
"Apache-2.0"
] | 50
|
2020-03-07T16:55:45.000Z
|
2022-03-25T12:10:12.000Z
|
tests/unit/gapic/datastore_v1/test_datastore.py
|
LaudateCorpus1/python-datastore
|
b1f955b8d410392174092cb8131673a10ccc33ec
|
[
"Apache-2.0"
] | 161
|
2020-02-07T00:46:20.000Z
|
2022-03-16T20:02:16.000Z
|
tests/unit/gapic/datastore_v1/test_datastore.py
|
LaudateCorpus1/python-datastore
|
b1f955b8d410392174092cb8131673a10ccc33ec
|
[
"Apache-2.0"
] | 28
|
2020-02-07T00:55:36.000Z
|
2022-03-03T06:07:03.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.datastore_v1.services.datastore import DatastoreAsyncClient
from google.cloud.datastore_v1.services.datastore import DatastoreClient
from google.cloud.datastore_v1.services.datastore import transports
from google.cloud.datastore_v1.types import datastore
from google.cloud.datastore_v1.types import entity
from google.cloud.datastore_v1.types import query
from google.oauth2 import service_account
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import wrappers_pb2
from google.type import latlng_pb2
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DatastoreClient._get_default_mtls_endpoint(None) is None
assert DatastoreClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
DatastoreClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DatastoreClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DatastoreClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert DatastoreClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [DatastoreClient, DatastoreAsyncClient,])
def test_datastore_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "datastore.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.DatastoreGrpcTransport, "grpc"),
(transports.DatastoreGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_datastore_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [DatastoreClient, DatastoreAsyncClient,])
def test_datastore_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "datastore.googleapis.com:443"
def test_datastore_client_get_transport_class():
transport = DatastoreClient.get_transport_class()
available_transports = [
transports.DatastoreGrpcTransport,
]
assert transport in available_transports
transport = DatastoreClient.get_transport_class("grpc")
assert transport == transports.DatastoreGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatastoreClient, transports.DatastoreGrpcTransport, "grpc"),
(
DatastoreAsyncClient,
transports.DatastoreGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DatastoreClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatastoreClient)
)
@mock.patch.object(
DatastoreAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatastoreAsyncClient),
)
def test_datastore_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DatastoreClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DatastoreClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(DatastoreClient, transports.DatastoreGrpcTransport, "grpc", "true"),
(
DatastoreAsyncClient,
transports.DatastoreGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(DatastoreClient, transports.DatastoreGrpcTransport, "grpc", "false"),
(
DatastoreAsyncClient,
transports.DatastoreGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DatastoreClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatastoreClient)
)
@mock.patch.object(
DatastoreAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatastoreAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_datastore_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatastoreClient, transports.DatastoreGrpcTransport, "grpc"),
(
DatastoreAsyncClient,
transports.DatastoreGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_datastore_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatastoreClient, transports.DatastoreGrpcTransport, "grpc"),
(
DatastoreAsyncClient,
transports.DatastoreGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_datastore_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_datastore_client_client_options_from_dict():
with mock.patch(
"google.cloud.datastore_v1.services.datastore.transports.DatastoreGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DatastoreClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_lookup(transport: str = "grpc", request_type=datastore.LookupRequest):
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.LookupResponse()
response = client.lookup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.LookupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.LookupResponse)
def test_lookup_from_dict():
test_lookup(request_type=dict)
def test_lookup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup), "__call__") as call:
client.lookup()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.LookupRequest()
@pytest.mark.asyncio
async def test_lookup_async(
transport: str = "grpc_asyncio", request_type=datastore.LookupRequest
):
client = DatastoreAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.LookupResponse()
)
response = await client.lookup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.LookupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.LookupResponse)
@pytest.mark.asyncio
async def test_lookup_async_from_dict():
await test_lookup_async(request_type=dict)
def test_lookup_flattened():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.LookupResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.lookup(
project_id="project_id_value",
read_options=datastore.ReadOptions(
read_consistency=datastore.ReadOptions.ReadConsistency.STRONG
),
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].read_options
mock_val = datastore.ReadOptions(
read_consistency=datastore.ReadOptions.ReadConsistency.STRONG
)
assert arg == mock_val
arg = args[0].keys
mock_val = [
entity.Key(partition_id=entity.PartitionId(project_id="project_id_value"))
]
assert arg == mock_val
def test_lookup_flattened_error():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.lookup(
datastore.LookupRequest(),
project_id="project_id_value",
read_options=datastore.ReadOptions(
read_consistency=datastore.ReadOptions.ReadConsistency.STRONG
),
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
@pytest.mark.asyncio
async def test_lookup_flattened_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.LookupResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.LookupResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.lookup(
project_id="project_id_value",
read_options=datastore.ReadOptions(
read_consistency=datastore.ReadOptions.ReadConsistency.STRONG
),
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].read_options
mock_val = datastore.ReadOptions(
read_consistency=datastore.ReadOptions.ReadConsistency.STRONG
)
assert arg == mock_val
arg = args[0].keys
mock_val = [
entity.Key(partition_id=entity.PartitionId(project_id="project_id_value"))
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_lookup_flattened_error_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.lookup(
datastore.LookupRequest(),
project_id="project_id_value",
read_options=datastore.ReadOptions(
read_consistency=datastore.ReadOptions.ReadConsistency.STRONG
),
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
def test_run_query(transport: str = "grpc", request_type=datastore.RunQueryRequest):
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_query), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.RunQueryResponse()
response = client.run_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.RunQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.RunQueryResponse)
def test_run_query_from_dict():
test_run_query(request_type=dict)
def test_run_query_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_query), "__call__") as call:
client.run_query()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.RunQueryRequest()
@pytest.mark.asyncio
async def test_run_query_async(
transport: str = "grpc_asyncio", request_type=datastore.RunQueryRequest
):
client = DatastoreAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.run_query), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.RunQueryResponse()
)
response = await client.run_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.RunQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.RunQueryResponse)
@pytest.mark.asyncio
async def test_run_query_async_from_dict():
await test_run_query_async(request_type=dict)
def test_begin_transaction(
transport: str = "grpc", request_type=datastore.BeginTransactionRequest
):
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.BeginTransactionResponse(
transaction=b"transaction_blob",
)
response = client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.BeginTransactionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.BeginTransactionResponse)
assert response.transaction == b"transaction_blob"
def test_begin_transaction_from_dict():
test_begin_transaction(request_type=dict)
def test_begin_transaction_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
client.begin_transaction()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.BeginTransactionRequest()
@pytest.mark.asyncio
async def test_begin_transaction_async(
transport: str = "grpc_asyncio", request_type=datastore.BeginTransactionRequest
):
client = DatastoreAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.BeginTransactionResponse(transaction=b"transaction_blob",)
)
response = await client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.BeginTransactionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.BeginTransactionResponse)
assert response.transaction == b"transaction_blob"
@pytest.mark.asyncio
async def test_begin_transaction_async_from_dict():
await test_begin_transaction_async(request_type=dict)
def test_begin_transaction_flattened():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.BeginTransactionResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.begin_transaction(project_id="project_id_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
def test_begin_transaction_flattened_error():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.begin_transaction(
datastore.BeginTransactionRequest(), project_id="project_id_value",
)
@pytest.mark.asyncio
async def test_begin_transaction_flattened_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.BeginTransactionResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.BeginTransactionResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.begin_transaction(project_id="project_id_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_begin_transaction_flattened_error_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.begin_transaction(
datastore.BeginTransactionRequest(), project_id="project_id_value",
)
def test_commit(transport: str = "grpc", request_type=datastore.CommitRequest):
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.CommitResponse(index_updates=1389,)
response = client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.CommitRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.CommitResponse)
assert response.index_updates == 1389
def test_commit_from_dict():
test_commit(request_type=dict)
def test_commit_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
client.commit()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.CommitRequest()
@pytest.mark.asyncio
async def test_commit_async(
transport: str = "grpc_asyncio", request_type=datastore.CommitRequest
):
client = DatastoreAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.CommitResponse(index_updates=1389,)
)
response = await client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.CommitRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.CommitResponse)
assert response.index_updates == 1389
@pytest.mark.asyncio
async def test_commit_async_from_dict():
await test_commit_async(request_type=dict)
def test_commit_flattened():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.CommitResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.commit(
project_id="project_id_value",
mode=datastore.CommitRequest.Mode.TRANSACTIONAL,
transaction=b"transaction_blob",
mutations=[
datastore.Mutation(
insert=entity.Entity(
key=entity.Key(
partition_id=entity.PartitionId(
project_id="project_id_value"
)
)
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].mode
mock_val = datastore.CommitRequest.Mode.TRANSACTIONAL
assert arg == mock_val
arg = args[0].mutations
mock_val = [
datastore.Mutation(
insert=entity.Entity(
key=entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
)
)
]
assert arg == mock_val
assert args[0].transaction == b"transaction_blob"
def test_commit_flattened_error():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.commit(
datastore.CommitRequest(),
project_id="project_id_value",
mode=datastore.CommitRequest.Mode.TRANSACTIONAL,
transaction=b"transaction_blob",
mutations=[
datastore.Mutation(
insert=entity.Entity(
key=entity.Key(
partition_id=entity.PartitionId(
project_id="project_id_value"
)
)
)
)
],
)
@pytest.mark.asyncio
async def test_commit_flattened_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.commit), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.CommitResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.CommitResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.commit(
project_id="project_id_value",
mode=datastore.CommitRequest.Mode.TRANSACTIONAL,
transaction=b"transaction_blob",
mutations=[
datastore.Mutation(
insert=entity.Entity(
key=entity.Key(
partition_id=entity.PartitionId(
project_id="project_id_value"
)
)
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].mode
mock_val = datastore.CommitRequest.Mode.TRANSACTIONAL
assert arg == mock_val
arg = args[0].mutations
mock_val = [
datastore.Mutation(
insert=entity.Entity(
key=entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
)
)
]
assert arg == mock_val
assert args[0].transaction == b"transaction_blob"
@pytest.mark.asyncio
async def test_commit_flattened_error_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.commit(
datastore.CommitRequest(),
project_id="project_id_value",
mode=datastore.CommitRequest.Mode.TRANSACTIONAL,
transaction=b"transaction_blob",
mutations=[
datastore.Mutation(
insert=entity.Entity(
key=entity.Key(
partition_id=entity.PartitionId(
project_id="project_id_value"
)
)
)
)
],
)
def test_rollback(transport: str = "grpc", request_type=datastore.RollbackRequest):
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.RollbackResponse()
response = client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.RollbackRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.RollbackResponse)
def test_rollback_from_dict():
test_rollback(request_type=dict)
def test_rollback_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
client.rollback()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.RollbackRequest()
@pytest.mark.asyncio
async def test_rollback_async(
transport: str = "grpc_asyncio", request_type=datastore.RollbackRequest
):
client = DatastoreAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.RollbackResponse()
)
response = await client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.RollbackRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.RollbackResponse)
@pytest.mark.asyncio
async def test_rollback_async_from_dict():
await test_rollback_async(request_type=dict)
def test_rollback_flattened():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.RollbackResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.rollback(
project_id="project_id_value", transaction=b"transaction_blob",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].transaction
mock_val = b"transaction_blob"
assert arg == mock_val
def test_rollback_flattened_error():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.rollback(
datastore.RollbackRequest(),
project_id="project_id_value",
transaction=b"transaction_blob",
)
@pytest.mark.asyncio
async def test_rollback_flattened_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.rollback), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.RollbackResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.RollbackResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.rollback(
project_id="project_id_value", transaction=b"transaction_blob",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].transaction
mock_val = b"transaction_blob"
assert arg == mock_val
@pytest.mark.asyncio
async def test_rollback_flattened_error_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.rollback(
datastore.RollbackRequest(),
project_id="project_id_value",
transaction=b"transaction_blob",
)
def test_allocate_ids(
transport: str = "grpc", request_type=datastore.AllocateIdsRequest
):
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.allocate_ids), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.AllocateIdsResponse()
response = client.allocate_ids(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.AllocateIdsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.AllocateIdsResponse)
def test_allocate_ids_from_dict():
test_allocate_ids(request_type=dict)
def test_allocate_ids_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.allocate_ids), "__call__") as call:
client.allocate_ids()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.AllocateIdsRequest()
@pytest.mark.asyncio
async def test_allocate_ids_async(
transport: str = "grpc_asyncio", request_type=datastore.AllocateIdsRequest
):
client = DatastoreAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.allocate_ids), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.AllocateIdsResponse()
)
response = await client.allocate_ids(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.AllocateIdsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.AllocateIdsResponse)
@pytest.mark.asyncio
async def test_allocate_ids_async_from_dict():
await test_allocate_ids_async(request_type=dict)
def test_allocate_ids_flattened():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.allocate_ids), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.AllocateIdsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.allocate_ids(
project_id="project_id_value",
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].keys
mock_val = [
entity.Key(partition_id=entity.PartitionId(project_id="project_id_value"))
]
assert arg == mock_val
def test_allocate_ids_flattened_error():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.allocate_ids(
datastore.AllocateIdsRequest(),
project_id="project_id_value",
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
@pytest.mark.asyncio
async def test_allocate_ids_flattened_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.allocate_ids), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.AllocateIdsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.AllocateIdsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.allocate_ids(
project_id="project_id_value",
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].keys
mock_val = [
entity.Key(partition_id=entity.PartitionId(project_id="project_id_value"))
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_allocate_ids_flattened_error_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.allocate_ids(
datastore.AllocateIdsRequest(),
project_id="project_id_value",
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
def test_reserve_ids(transport: str = "grpc", request_type=datastore.ReserveIdsRequest):
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reserve_ids), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.ReserveIdsResponse()
response = client.reserve_ids(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.ReserveIdsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.ReserveIdsResponse)
def test_reserve_ids_from_dict():
test_reserve_ids(request_type=dict)
def test_reserve_ids_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reserve_ids), "__call__") as call:
client.reserve_ids()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.ReserveIdsRequest()
@pytest.mark.asyncio
async def test_reserve_ids_async(
transport: str = "grpc_asyncio", request_type=datastore.ReserveIdsRequest
):
client = DatastoreAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reserve_ids), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.ReserveIdsResponse()
)
response = await client.reserve_ids(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore.ReserveIdsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastore.ReserveIdsResponse)
@pytest.mark.asyncio
async def test_reserve_ids_async_from_dict():
await test_reserve_ids_async(request_type=dict)
def test_reserve_ids_flattened():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reserve_ids), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.ReserveIdsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.reserve_ids(
project_id="project_id_value",
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].keys
mock_val = [
entity.Key(partition_id=entity.PartitionId(project_id="project_id_value"))
]
assert arg == mock_val
def test_reserve_ids_flattened_error():
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.reserve_ids(
datastore.ReserveIdsRequest(),
project_id="project_id_value",
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
@pytest.mark.asyncio
async def test_reserve_ids_flattened_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reserve_ids), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore.ReserveIdsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore.ReserveIdsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.reserve_ids(
project_id="project_id_value",
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].project_id
mock_val = "project_id_value"
assert arg == mock_val
arg = args[0].keys
mock_val = [
entity.Key(partition_id=entity.PartitionId(project_id="project_id_value"))
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_reserve_ids_flattened_error_async():
client = DatastoreAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.reserve_ids(
datastore.ReserveIdsRequest(),
project_id="project_id_value",
keys=[
entity.Key(
partition_id=entity.PartitionId(project_id="project_id_value")
)
],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DatastoreGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DatastoreGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatastoreClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.DatastoreGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatastoreClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DatastoreGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DatastoreClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DatastoreGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DatastoreGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.DatastoreGrpcTransport, transports.DatastoreGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DatastoreClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.DatastoreGrpcTransport,)
def test_datastore_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DatastoreTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_datastore_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.datastore_v1.services.datastore.transports.DatastoreTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DatastoreTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"lookup",
"run_query",
"begin_transaction",
"commit",
"rollback",
"allocate_ids",
"reserve_ids",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_datastore_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.datastore_v1.services.datastore.transports.DatastoreTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DatastoreTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
quota_project_id="octopus",
)
def test_datastore_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.datastore_v1.services.datastore.transports.DatastoreTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DatastoreTransport()
adc.assert_called_once()
def test_datastore_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DatastoreClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.DatastoreGrpcTransport, transports.DatastoreGrpcAsyncIOTransport,],
)
def test_datastore_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DatastoreGrpcTransport, grpc_helpers),
(transports.DatastoreGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_datastore_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"datastore.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
scopes=["1", "2"],
default_host="datastore.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.DatastoreGrpcTransport, transports.DatastoreGrpcAsyncIOTransport],
)
def test_datastore_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_datastore_host_no_port():
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datastore.googleapis.com"
),
)
assert client.transport._host == "datastore.googleapis.com:443"
def test_datastore_host_with_port():
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datastore.googleapis.com:8000"
),
)
assert client.transport._host == "datastore.googleapis.com:8000"
def test_datastore_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DatastoreGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_datastore_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DatastoreGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DatastoreGrpcTransport, transports.DatastoreGrpcAsyncIOTransport],
)
def test_datastore_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DatastoreGrpcTransport, transports.DatastoreGrpcAsyncIOTransport],
)
def test_datastore_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DatastoreClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = DatastoreClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = DatastoreClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = DatastoreClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = DatastoreClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = DatastoreClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = DatastoreClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = DatastoreClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DatastoreClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = DatastoreClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DatastoreTransport, "_prep_wrapped_messages"
) as prep:
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DatastoreTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DatastoreClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = DatastoreAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = DatastoreClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 37.421053
| 107
| 0.671259
|
acfffdd94655cd764ae34e693bc3423439bca767
| 12,211
|
py
|
Python
|
src/parsetab.py
|
ccverak/sc
|
f8224474d993e1fed8648a1c77b46dae76e87b1c
|
[
"MIT"
] | null | null | null |
src/parsetab.py
|
ccverak/sc
|
f8224474d993e1fed8648a1c77b46dae76e87b1c
|
[
"MIT"
] | null | null | null |
src/parsetab.py
|
ccverak/sc
|
f8224474d993e1fed8648a1c77b46dae76e87b1c
|
[
"MIT"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
_lr_method = 'LALR'
_lr_signature = '\x8e\n\xbb\x058\x13\xef\xb1E\xe7\xd3\x16\xeb\x1f\x15U'
_lr_action_items = {'REAL':([1,13,24,25,31,38,39,42,48,49,50,51,52,53,54,55,56,57,58,59,61,64,94,],[20,20,20,20,-26,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,-21,]),'READ':([0,5,6,7,9,10,16,18,19,32,35,41,43,45,65,71,74,88,91,93,96,98,99,100,101,102,104,],[2,2,-2,-6,-5,-4,2,-7,2,-3,2,2,-14,2,2,-24,-17,-25,-22,-19,-11,2,-8,-9,-15,2,-13,]),'YLOG':([20,21,22,23,26,27,28,29,37,46,72,75,76,77,78,79,80,81,82,83,84,85,86,87,89,],[-49,-47,-34,-46,-48,-43,-33,61,61,-36,61,-35,-42,-28,-29,-38,-41,-40,-39,-31,-32,-30,-37,61,61,]),'WHILE':([0,5,6,7,9,10,16,18,19,32,35,41,43,45,65,71,74,88,91,93,96,98,99,100,101,102,104,],[3,3,-2,-6,-5,-4,3,-7,3,-3,3,3,-14,3,3,-24,-17,-25,-22,-19,-11,3,-8,-9,-15,3,-13,]),'IGUAL_IGUAL':([20,21,22,23,26,27,28,46,75,77,78,83,84,85,],[-49,-47,-34,-46,-48,52,-33,-36,-35,-28,-29,-31,-32,-30,]),'MAYOR_QUE':([20,21,22,23,26,27,28,46,75,77,78,83,84,85,],[-49,-47,-34,-46,-48,53,-33,-36,-35,-28,-29,-31,-32,-30,]),'DIV':([20,21,22,23,26,27,28,46,47,69,70,75,76,77,78,79,80,81,82,83,84,85,86,92,],[-49,-47,-34,-46,-48,55,-33,-36,55,55,55,-35,55,55,55,55,55,55,55,-31,-32,-30,55,55,]),'PUNTO_Y_COMA':([20,21,22,23,26,27,28,37,46,67,70,75,76,77,78,79,80,81,82,83,84,85,86,87,89,90,92,95,],[-49,-47,-34,-46,-48,-43,-33,68,-36,94,96,-35,-42,-28,-29,-38,-41,-40,-39,-31,-32,-30,-37,-45,-44,99,-12,100,]),'MULT':([20,21,22,23,26,27,28,46,47,69,70,75,76,77,78,79,80,81,82,83,84,85,86,92,],[-49,-47,-34,-46,-48,57,-33,-36,57,57,57,-35,57,57,57,57,57,57,57,-31,-32,-30,57,57,]),'DISTINTO':([20,21,22,23,26,27,28,46,75,77,78,83,84,85,],[-49,-47,-34,-46,-48,48,-33,-36,-35,-28,-29,-31,-32,-30,]),'FBOOLEAN':([1,13,24,25,31,38,39,42,48,49,50,51,52,53,54,55,56,57,58,59,61,64,94,],[21,21,21,21,-26,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,-21,]),'ASIG':([11,15,34,],[-10,39,64,]),'OLOG':([20,21,22,23,26,27,28,29,37,46,72,75,76,77,78,79,80,81,82,83,84,85,86,87,89,],[-49,-47,-34,-46,-48,-43,-33,59,59,-36,59,-35,-42,-28,-29,-38,-41,-40,-39,-31,-32,-30,-37,59,59,]),'TBOOLEAN':([1,13,24,25,31,38,39,42,48,49,50,51,52,53,54,55,56,57,58,59,61,64,94,],[23,23,23,23,-26,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,-21,]),'RE':([1,13,20,21,22,23,24,25,26,27,28,31,38,39,42,46,47,48,49,50,51,52,53,54,55,56,57,58,59,61,64,69,70,75,76,77,78,79,80,81,82,83,84,85,86,92,94,],[24,24,-49,-47,-34,-46,24,24,-48,50,-33,-26,24,24,24,-36,50,24,24,24,24,24,24,24,24,24,24,24,24,24,24,50,50,-35,50,-28,-29,50,50,50,50,-31,-32,-30,50,50,-21,]),'MENOR_QUE':([20,21,22,23,26,27,28,46,75,77,78,83,84,85,],[-49,-47,-34,-46,-48,51,-33,-36,-35,-28,-29,-31,-32,-30,]),'MAYOR_IGUAL':([20,21,22,23,26,27,28,46,75,77,78,83,84,85,],[-49,-47,-34,-46,-48,54,-33,-36,-35,-28,-29,-31,-32,-30,]),'PAR_AB':([1,2,3,12,13,14,17,24,25,31,38,39,42,48,49,50,51,52,53,54,55,56,57,58,59,61,64,94,],[25,30,31,36,25,38,42,25,25,-26,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,-21,]),'MENOR_IGUAL':([20,21,22,23,26,27,28,46,75,77,78,83,84,85,],[-49,-47,-34,-46,-48,58,-33,-36,-35,-28,-29,-31,-32,-30,]),'ENTERO':([1,13,24,25,31,38,39,42,48,49,50,51,52,53,54,55,56,57,58,59,61,64,94,],[26,26,26,26,-26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,-21,]),'IDENT':([0,1,5,6,7,8,9,10,13,16,18,19,24,25,30,31,32,35,36,38,39,41,42,43,45,48,49,50,51,52,53,54,55,56,57,58,59,61,64,65,68,71,74,88,91,93,94,96,98,99,100,101,102,104,],[11,28,11,-2,-6,11,-5,-4,28,11,-7,11,28,28,62,-26,-3,11,11,28,28,11,28,-14,11,28,28,28,28,28,28,28,28,28,28,28,28,28,28,11,-20,-24,-17,-25,-22,-19,-21,-11,11,-8,-9,-15,11,-13,]),'LLAVE_AB':([60,63,73,97,],[88,91,98,101,]),'FOR':([0,5,6,7,9,10,16,18,19,32,35,41,43,45,65,71,74,88,91,93,96,98,99,100,101,102,104,],[12,12,-2,-6,-5,-4,12,-7,12,-3,12,12,-14,12,12,-24,-17,-25,-22,-19,-11,12,-8,-9,-15,12,-13,]),'LLAVE_CERR':([6,7,9,10,18,32,40,41,43,45,65,66,71,74,93,96,99,100,102,103,104,],[-2,-6,-5,-4,-7,-3,71,-27,-14,74,-23,93,-24,-17,-19,-11,-8,-9,-18,104,-13,]),'ELSE':([44,74,],[73,-16,]),'WRITE':([0,5,6,7,9,10,16,18,19,32,35,41,43,45,65,71,74,88,91,93,96,98,99,100,101,102,104,],[14,14,-2,-6,-5,-4,14,-7,14,-3,14,14,-14,14,14,-24,-17,-25,-22,-19,-11,14,-8,-9,-15,14,-13,]),'IF':([0,5,6,7,9,10,16,18,19,32,35,41,43,45,65,71,74,88,91,93,96,98,99,100,101,102,104,],[17,17,-2,-6,-5,-4,17,-7,17,-3,17,17,-14,17,17,-24,-17,-25,-22,-19,-11,17,-8,-9,-15,17,-13,]),'PAR_CERR':([20,21,22,23,26,27,28,29,33,46,47,62,69,72,75,76,77,78,79,80,81,82,83,84,85,86,87,89,92,],[-49,-47,-34,-46,-48,-43,-33,60,63,-36,75,90,95,97,-35,-42,-28,-29,-38,-41,-40,-39,-31,-32,-30,-37,-45,-44,-12,]),'SU':([20,21,22,23,26,27,28,46,47,69,70,75,76,77,78,79,80,81,82,83,84,85,86,92,],[-49,-47,-34,-46,-48,49,-33,-36,49,49,49,-35,49,-28,-29,49,49,49,49,-31,-32,-30,49,49,]),'$end':([4,5,6,7,9,10,18,32,43,71,74,93,96,99,100,104,],[0,-1,-2,-6,-5,-4,-7,-3,-14,-24,-17,-19,-11,-8,-9,-13,]),'MOD':([20,21,22,23,26,27,28,46,47,69,70,75,76,77,78,79,80,81,82,83,84,85,86,92,],[-49,-47,-34,-46,-48,56,-33,-36,56,56,56,-35,56,56,56,56,56,56,56,-31,-32,-30,56,56,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _lr_action.has_key(_x): _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'SentAsig2':([8,36,],[33,67,]),'WhileParAb':([0,5,16,19,35,41,45,65,98,102,],[1,1,1,1,1,1,1,1,1,1,]),'ParteSentW':([16,],[40,]),'SentVIf2':([19,],[43,]),'SentVElse':([98,],[103,]),'Sent':([0,5,16,19,35,41,45,65,98,102,],[6,32,6,6,6,32,32,32,6,32,]),'SentVIf':([19,],[44,]),'SentWhile':([0,5,16,19,35,41,45,65,98,102,],[7,7,7,7,7,7,7,7,7,7,]),'ParteIniFor':([0,5,16,19,35,41,45,65,98,102,],[8,8,8,8,8,8,8,8,8,8,]),'SentIf':([0,5,16,19,35,41,45,65,98,102,],[9,9,9,9,9,9,9,9,9,9,]),'SentAsig':([0,5,16,19,35,41,45,65,98,102,],[10,10,10,10,10,10,10,10,10,10,]),'Expresion':([1,13,24,25,38,39,42,48,49,50,51,52,53,54,55,56,57,58,59,61,64,],[27,27,46,47,69,70,27,76,77,78,79,80,81,82,83,84,85,86,27,27,92,]),'SeccionIni':([0,5,16,19,35,41,45,65,98,102,],[13,13,13,13,13,13,13,13,13,13,]),'ParteDir':([0,5,8,16,19,35,36,41,45,65,98,102,],[15,15,34,15,15,15,34,15,15,15,15,15,]),'ListSent':([0,16,19,35,98,],[5,41,45,65,102,]),'ParteWhile':([0,5,16,19,35,41,45,65,98,102,],[16,16,16,16,16,16,16,16,16,16,]),'SentFor':([0,5,16,19,35,41,45,65,98,102,],[18,18,18,18,18,18,18,18,18,18,]),'LSentFor':([35,],[66,]),'Constante':([1,13,24,25,38,39,42,48,49,50,51,52,53,54,55,56,57,58,59,61,64,],[22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,]),'Program':([0,],[4,]),'Cond':([1,13,42,59,61,],[29,37,72,87,89,]),'ParteIf':([0,5,16,19,35,41,45,65,98,102,],[19,19,19,19,19,19,19,19,19,19,]),'ParteSentIncF':([8,],[35,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _lr_goto.has_key(_x): _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S'",1,None,None,None),
('Program',1,'p_Program','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',44),
('ListSent',1,'p_ListSent','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',48),
('ListSent',2,'p_ListSent_R','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',52),
('Sent',1,'p_Sent','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',56),
('Sent',1,'p_Sent','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',57),
('Sent',1,'p_Sent','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',58),
('Sent',1,'p_Sent','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',59),
('Sent',5,'p_SentRead','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',63),
('Sent',5,'p_SentWrite','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',69),
('ParteDir',1,'p_ParteDir','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',78),
('SentAsig',4,'p_SentAsig','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',83),
('SentAsig2',3,'p_SentAsig2','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',87),
('SentIf',6,'p_SentIf','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',93),
('SentIf',2,'p_SentIf2','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',97),
('ParteIf',5,'p_ParteIf','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',101),
('SentVIf',2,'p_SentVIf','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',107),
('SentVIf2',2,'p_SentVIf2','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',116),
('SentVElse',1,'p_SentVElse','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',122),
('SentFor',4,'p_SentFor','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',128),
('ParteIniFor',3,'p_ParteIniFor','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',132),
('SeccionIni',4,'p_SeccionIni','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',145),
('ParteSentIncF',3,'p_ParteSentIncF','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',150),
('LSentFor',1,'p_LSentFor','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',156),
('SentWhile',3,'p_SentWhile','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',163),
('ParteWhile',4,'p_ParteWhile','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',167),
('WhileParAb',2,'p_WhileParAb','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',174),
('ParteSentW',1,'p_ParteSentW','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',179),
('Expresion',3,'p_Expresion','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',188),
('Expresion',3,'p_Expresion','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',189),
('Expresion',3,'p_Expresion','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',190),
('Expresion',3,'p_Expresion','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',191),
('Expresion',3,'p_Expresion','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',192),
('Expresion',1,'p_Expresion_IDENT','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',196),
('Expresion',1,'p_Expresion_CTE','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',201),
('Expresion',3,'p_Expresion_Group','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',205),
('Expresion',2,'p_Expresion_NE','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',209),
('Cond',3,'p_Cond','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',214),
('Cond',3,'p_Cond','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',215),
('Cond',3,'p_Cond','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',216),
('Cond',3,'p_Cond','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',217),
('Cond',3,'p_Cond','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',218),
('Cond',3,'p_Cond','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',219),
('Cond',1,'p_Cond_Expresion','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',223),
('Cond',3,'p_Cond_Logic','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',227),
('Cond',3,'p_Cond_Logic','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',228),
('Constante',1,'p_Constante','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',232),
('Constante',1,'p_Constante','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',233),
('Constante',1,'p_Constante','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',234),
('Constante',1,'p_Constante','c:\\Documents and Settings\\ccvera\\Desktop\\sc 1.0\\src\\scparser.py',235),
]
| 156.551282
| 4,956
| 0.624601
|
acfffebaad2938771d01dd305f621ae0a07da168
| 1,173
|
py
|
Python
|
pybiotools4p/softwares/fasta/gffread.py
|
btrspg/pybiotools4p
|
b12bdea26570fc52fa8823f0a2cfeed8491ac4c1
|
[
"Apache-2.0"
] | null | null | null |
pybiotools4p/softwares/fasta/gffread.py
|
btrspg/pybiotools4p
|
b12bdea26570fc52fa8823f0a2cfeed8491ac4c1
|
[
"Apache-2.0"
] | null | null | null |
pybiotools4p/softwares/fasta/gffread.py
|
btrspg/pybiotools4p
|
b12bdea26570fc52fa8823f0a2cfeed8491ac4c1
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: 11_gffread.ipynb (unless otherwise specified).
__all__ = ['Gffread']
# Cell
from ..base import Base,modify_cmd
# Cell
class Gffread(Base):
def __init__(self, software, fd):
super(Gffread, self).__init__(software)
self._default = fd
def cmd_version(self):
'''
:return:
'''
return 'echo {repr} ;{software} --version'.format(
repr=self.__repr__(),
software=self._software
)
@modify_cmd
def cmd_extract_fasta(self, gtf, reference, output):
'''
:param gtf:
:param reference:
:param output:
:return:
'''
return r'''
{software} {para} \
{gtf} \
-g {reference} \
-w {output}
'''.format(
para=self._default['gtf2fasta'],
software=self._software,
**locals()
)
def __repr__(self):
return 'gffread:' + self._software
def __str__(self):
return 'GFF/GTF utility providing format conversions, ' \
'filtering, FASTA sequence extraction and more.'
| 22.557692
| 90
| 0.537937
|
acffff045cd59ca3aa1ac5136002a4eca0990cc3
| 6,351
|
py
|
Python
|
tests/test_shutdown.py
|
pentschev/ucx-py
|
d701a3facd85ef2deece619a4f707fdebee36e3c
|
[
"BSD-3-Clause"
] | 76
|
2019-06-08T04:03:39.000Z
|
2022-01-07T20:34:23.000Z
|
tests/test_shutdown.py
|
rapidsai/ucx-py
|
e28d770aa0b47c0e63c2e7e61649f1b355560e8a
|
[
"BSD-3-Clause"
] | 644
|
2019-06-04T23:06:02.000Z
|
2022-02-24T11:17:45.000Z
|
tests/test_shutdown.py
|
pentschev/ucx-py
|
d701a3facd85ef2deece619a4f707fdebee36e3c
|
[
"BSD-3-Clause"
] | 32
|
2019-08-14T09:22:02.000Z
|
2022-01-21T20:17:50.000Z
|
import asyncio
import sys
import numpy as np
import pytest
import ucp
def _skip_if_not_supported(message_type):
if message_type == "am" and not ucp._libs.ucx_api.is_am_supported():
pytest.skip("AM only supported in UCX >= 1.11")
async def _shutdown_send(ep, message_type):
msg = np.arange(10 ** 6)
if message_type == "tag":
await ep.send(msg)
else:
await ep.am_send(msg)
async def _shutdown_recv(ep, message_type):
if message_type == "tag":
msg = np.empty(10 ** 6)
await ep.recv(msg)
else:
await ep.am_recv()
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_server_shutdown(message_type):
"""The server calls shutdown"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def server_node(ep):
with pytest.raises(ucp.exceptions.UCXCanceled):
await asyncio.gather(_shutdown_recv(ep, message_type), ep.close())
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
with pytest.raises(ucp.exceptions.UCXCanceled):
await _shutdown_recv(ep, message_type)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
await client_node(listener.port)
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="test currently fails for python3.6"
)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_client_shutdown(message_type):
"""The client calls shutdown"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
with pytest.raises(ucp.exceptions.UCXCanceled):
await asyncio.gather(_shutdown_recv(ep, message_type), ep.close())
async def server_node(ep):
with pytest.raises(ucp.exceptions.UCXCanceled):
await _shutdown_recv(ep, message_type)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
await client_node(listener.port)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_listener_close(message_type):
"""The server close the listener"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def client_node(listener):
ep = await ucp.create_endpoint(
ucp.get_address(),
listener.port,
endpoint_error_handling=endpoint_error_handling,
)
await _shutdown_recv(ep, message_type)
await _shutdown_recv(ep, message_type)
assert listener.closed() is False
listener.close()
assert listener.closed() is True
async def server_node(ep):
await _shutdown_send(ep, message_type)
await _shutdown_send(ep, message_type)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
await client_node(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_listener_del(message_type):
"""The client delete the listener"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def server_node(ep):
await _shutdown_send(ep, message_type)
await _shutdown_send(ep, message_type)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
ep = await ucp.create_endpoint(
ucp.get_address(),
listener.port,
endpoint_error_handling=endpoint_error_handling,
)
await _shutdown_recv(ep, message_type)
assert listener.closed() is False
del listener
await _shutdown_recv(ep, message_type)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_close_after_n_recv(message_type):
"""The Endpoint.close_after_n_recv()"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def server_node(ep):
for _ in range(10):
await _shutdown_send(ep, message_type)
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
ep.close_after_n_recv(10)
for _ in range(10):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
for _ in range(5):
await _shutdown_recv(ep, message_type)
ep.close_after_n_recv(5)
for _ in range(5):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
for _ in range(5):
await _shutdown_recv(ep, message_type)
ep.close_after_n_recv(10, count_from_ep_creation=True)
for _ in range(5):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
for _ in range(10):
await _shutdown_recv(ep, message_type)
with pytest.raises(
ucp.exceptions.UCXError, match="`n` cannot be less than current recv_count",
):
ep.close_after_n_recv(5, count_from_ep_creation=True)
ep.close_after_n_recv(1)
with pytest.raises(
ucp.exceptions.UCXError, match="close_after_n_recv has already been set to",
):
ep.close_after_n_recv(1)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
await client_node(listener.port)
| 31.440594
| 88
| 0.675169
|
acffff5e82faf89367ecafaebc0f3167a77f21cb
| 742
|
py
|
Python
|
send.py
|
svenseeberg/ml-text-classification
|
e20191a923e43707b32aad5c73162512723441af
|
[
"MIT"
] | 1
|
2018-07-20T08:45:19.000Z
|
2018-07-20T08:45:19.000Z
|
send.py
|
svenseeberg/ml-text-classification
|
e20191a923e43707b32aad5c73162512723441af
|
[
"MIT"
] | null | null | null |
send.py
|
svenseeberg/ml-text-classification
|
e20191a923e43707b32aad5c73162512723441af
|
[
"MIT"
] | 1
|
2018-07-20T08:45:24.000Z
|
2018-07-20T08:45:24.000Z
|
#!/usr/bin/env python3
import requests
import sys
import json
with open('training.json') as f:
data = json.load(f)
if(len(sys.argv) == 1 or sys.argv[1] == "add"):
x = 0
y = len(data)
for item in data:
r = requests.post('http://localhost:8082/add/', json=item)
x = x + 1
print(str(x)+" of "+str(y))
elif(sys.argv[1] == "train"):
r = requests.post('http://localhost:8082/train/')
elif(sys.argv[1] == "flush"):
r = requests.post('http://localhost:8082/flush/')
elif(sys.argv[1] == "train"):
r = requests.post('http://localhost:8082/train/')
elif(sys.argv[1] == "classify"):
item = ['Charlie is reading documentation.']
r = requests.post('http://localhost:8082/classify/', json=item)
| 29.68
| 67
| 0.603774
|
acffff5ef1f370e741b58401b2ff3a658d861783
| 9,251
|
py
|
Python
|
set1/challenge6.py
|
8u1a/my_matasano_crypto_challenges
|
0da1f7fc9445449d5d02867b659325ef5fe62c7d
|
[
"Unlicense"
] | null | null | null |
set1/challenge6.py
|
8u1a/my_matasano_crypto_challenges
|
0da1f7fc9445449d5d02867b659325ef5fe62c7d
|
[
"Unlicense"
] | null | null | null |
set1/challenge6.py
|
8u1a/my_matasano_crypto_challenges
|
0da1f7fc9445449d5d02867b659325ef5fe62c7d
|
[
"Unlicense"
] | null | null | null |
__author__ = 'christianbuia'
import binascii
import sys
import base64
def hamming_distance_two_hexstrings(hexstring1, hexstring2):
distance = 0
if len(hexstring1) != len(hexstring2):
sys.stderr.write("unexpected: length of compared strings don't match. exiting.\n")
return False
bytes1 = binascii.unhexlify(hexstring1)
bytes2 = binascii.unhexlify(hexstring2)
bin_string1 = ""
bin_string2 = ""
for i in range(len(bytes1)):
#taking [2:] to convert 0b1110100 to 1110100
temp_bin1 = bin(bytes1[i])[2:]
temp_bin2 = bin(bytes2[i])[2:]
while len(temp_bin1) < 8:
temp_bin1 = "0" + temp_bin1
while len(temp_bin2) < 8:
temp_bin2 = "0" + temp_bin2
bin_string1 += temp_bin1
bin_string2 += temp_bin2
for i in range(len(bin_string1)):
if bin_string1[i] != bin_string2[i]:
distance += 1
return distance
#--------------------------------------------------------------------------
def multibyte_xor(bs, key):
count=0
decoded_bytes = []
for b in bs:
decoded_bytes.append(b^key[count % len(key)])
count+=1
return bytearray(decoded_bytes)
#--------------------------------------------------------------------------
def fixed_xor_hexstrings(hexstring1, key):
bytes1=binascii.unhexlify(hexstring1)
decoded = ""
for byte in bytes1:
#print(byte)
#print(key)
decoded += chr(byte^key)
return decoded
#--------------------------------------------------------------------------
def evaluate_as_english(message, ratio_common_printables, ratio_spaces_to_letters):
#count the number of common printables vs non-common printbables
count_cp = 0
count_ncp = 0
count_letters = 0
count_spaces = 0
for m in message:
letters=False
numbers=False
punct = False
m = ord(m)
if m > 64 and m < 123:
letters = True
count_letters+=1
if m > 47 and m < 58:
numbers=True
if m==32 or m==33 or m==34 or m==40 or m==41 or m==46 or m==63:
punct = True
if m==32:
count_spaces+=1
if letters or numbers or punct:
count_cp+=1
else:
count_ncp+=1
if count_cp / (count_cp + count_ncp) > ratio_common_printables:
if count_spaces / (count_letters + count_spaces) > ratio_spaces_to_letters:
return True
else:
return False
#--------------------------------------------------------------------------
def solve_block(block_data):
for i in range(256):
message = fixed_xor_hexstrings(block_data, i)
if evaluate_as_english(message, .8, .1):
return i
return False
#--------------------------------------------------------------------------
def transpose_blocks(ciphertext, block_size):
transposed_blocks = []
#iterate through the length of the key
for i in range(block_size):
b_array = bytearray()
count=0
for b in ciphertext:
if ((count - i) % block_size) == 0:
b_array.append(b)
count+=1
transposed_blocks.append(b_array)
return transposed_blocks
#--------------------------------------------------------------------------
def solve_challenge(b64_crypt):
ciphertext = base64.b64decode(b64_crypt)
#dictionary of hamming distances in the form {'keysize':'distance'}
keysize_hamming_distances = {}
for x in range(40):
if x < 2:
continue
distances = []
#compute the average normalized hamming distance given keysize x
for i in range((len(ciphertext) // x) - 1):
h = hamming_distance_two_hexstrings(binascii.hexlify(ciphertext[i*x:i*x+x]), binascii.hexlify(ciphertext[(i+1)*x:(i+1)*x+x]))
h_normal = h / x
distances.append(h_normal)
keysize_hamming_distances[x] = sum(distances)/len(distances)
keysize_candidates_size = 1
keysize_candidates = []
c = 0
#determine candidate keysizes
for v in sorted(keysize_hamming_distances.values()):
for i in keysize_hamming_distances.keys():
if keysize_hamming_distances[i] == v:
keysize_candidates.append(i)
c += 1
continue
if c < keysize_candidates_size:
continue
else:
break
#for each key size, attempt to solve the multibyte key
for k_candidate in keysize_candidates:
standard_blocks = [ciphertext[x:x+k_candidate] for x in range(0, len(ciphertext), k_candidate)]
transposed_blocks = transpose_blocks(ciphertext, k_candidate)
xor_bytes = [solve_block(binascii.hexlify(tblock)) for tblock in transposed_blocks]
key = bytearray(xor_bytes)
plaintext = multibyte_xor(ciphertext, key)
print(plaintext.decode("utf-8"))
return True
#--------------------------------------------------------------------------
#hamming function test
#string1 = "this is a test"
#string2 = "wokka wokka!!!"
#print(hamming_distance_two_hexstrings(binascii.hexlify(bytes(string1, "ascii")), binascii.hexlify(bytes(string2, "ascii"))))
b64_crypt = """HUIfTQsPAh9PE048GmllH0kcDk4TAQsHThsBFkU2AB4BSWQgVB0dQzNTTmVS
BgBHVBwNRU0HBAxTEjwMHghJGgkRTxRMIRpHKwAFHUdZEQQJAGQmB1MANxYG
DBoXQR0BUlQwXwAgEwoFR08SSAhFTmU+Fgk4RQYFCBpGB08fWXh+amI2DB0P
QQ1IBlUaGwAdQnQEHgFJGgkRAlJ6f0kASDoAGhNJGk9FSA8dDVMEOgFSGQEL
QRMGAEwxX1NiFQYHCQdUCxdBFBZJeTM1CxsBBQ9GB08dTnhOSCdSBAcMRVhI
CEEATyBUCHQLHRlJAgAOFlwAUjBpZR9JAgJUAAELB04CEFMBJhAVTQIHAh9P
G054MGk2UgoBCVQGBwlTTgIQUwg7EAYFSQ8PEE87ADpfRyscSWQzT1QCEFMa
TwUWEXQMBk0PAg4DQ1JMPU4ALwtJDQhOFw0VVB1PDhxFXigLTRkBEgcKVVN4
Tk9iBgELR1MdDAAAFwoFHww6Ql5NLgFBIg4cSTRWQWI1Bk9HKn47CE8BGwFT
QjcEBx4MThUcDgYHKxpUKhdJGQZZVCFFVwcDBVMHMUV4LAcKQR0JUlk3TwAm
HQdJEwATARNFTg5JFwQ5C15NHQYEGk94dzBDADsdHE4UVBUaDE5JTwgHRTkA
Umc6AUETCgYAN1xGYlUKDxJTEUgsAA0ABwcXOwlSGQELQQcbE0c9GioWGgwc
AgcHSAtPTgsAABY9C1VNCAINGxgXRHgwaWUfSQcJABkRRU8ZAUkDDTUWF01j
OgkRTxVJKlZJJwFJHQYADUgRSAsWSR8KIgBSAAxOABoLUlQwW1RiGxpOCEtU
YiROCk8gUwY1C1IJCAACEU8QRSxORTBSHQYGTlQJC1lOBAAXRTpCUh0FDxhU
ZXhzLFtHJ1JbTkoNVDEAQU4bARZFOwsXTRAPRlQYE042WwAuGxoaAk5UHAoA
ZCYdVBZ0ChQLSQMYVAcXQTwaUy1SBQsTAAAAAAAMCggHRSQJExRJGgkGAAdH
MBoqER1JJ0dDFQZFRhsBAlMMIEUHHUkPDxBPH0EzXwArBkkdCFUaDEVHAQAN
U29lSEBAWk44G09fDXhxTi0RAk4ITlQbCk0LTx4cCjBFeCsGHEETAB1EeFZV
IRlFTi4AGAEORU4CEFMXPBwfCBpOAAAdHUMxVVUxUmM9ElARGgZBAg4PAQQz
DB4EGhoIFwoKUDFbTCsWBg0OTwEbRSonSARTBDpFFwsPCwIATxNOPBpUKhMd
Th5PAUgGQQBPCxYRdG87TQoPD1QbE0s9GkFiFAUXR0cdGgkADwENUwg1DhdN
AQsTVBgXVHYaKkg7TgNHTB0DAAA9DgQACjpFX0BJPQAZHB1OeE5PYjYMAg5M
FQBFKjoHDAEAcxZSAwZOBREBC0k2HQxiKwYbR0MVBkVUHBZJBwp0DRMDDk5r
NhoGACFVVWUeBU4MRREYRVQcFgAdQnQRHU0OCxVUAgsAK05ZLhdJZChWERpF
QQALSRwTMRdeTRkcABcbG0M9Gk0jGQwdR1ARGgNFDRtJeSchEVIDBhpBHQlS
WTdPBzAXSQ9HTBsJA0UcQUl5bw0KB0oFAkETCgYANlVXKhcbC0sAGgdFUAIO
ChZJdAsdTR0HDBFDUk43GkcrAAUdRyonBwpOTkJEUyo8RR8USSkOEENSSDdX
RSAdDRdLAA0HEAAeHQYRBDYJC00MDxVUZSFQOV1IJwYdB0dXHRwNAA9PGgMK
OwtTTSoBDBFPHU54W04mUhoPHgAdHEQAZGU/OjV6RSQMBwcNGA5SaTtfADsX
GUJHWREYSQAnSARTBjsIGwNOTgkVHRYANFNLJ1IIThVIHQYKAGQmBwcKLAwR
DB0HDxNPAU94Q083UhoaBkcTDRcAAgYCFkU1RQUEBwFBfjwdAChPTikBSR0T
TwRIEVIXBgcURTULFk0OBxMYTwFUN0oAIQAQBwkHVGIzQQAGBR8EdCwRCEkH
ElQcF0w0U05lUggAAwANBxAAHgoGAwkxRRMfDE4DARYbTn8aKmUxCBsURVQf
DVlOGwEWRTIXFwwCHUEVHRcAMlVDKRsHSUdMHQMAAC0dCAkcdCIeGAxOazkA
BEk2HQAjHA1OAFIbBxNJAEhJBxctDBwKSRoOVBwbTj8aQS4dBwlHKjUECQAa
BxscEDMNUhkBC0ETBxdULFUAJQAGARFJGk9FVAYGGlMNMRcXTRoBDxNPeG43
TQA7HRxJFUVUCQhBFAoNUwctRQYFDE43PT9SUDdJUydcSWRtcwANFVAHAU5T
FjtFGgwbCkEYBhlFeFsABRcbAwZOVCYEWgdPYyARNRcGAQwKQRYWUlQwXwAg
ExoLFAAcARFUBwFOUwImCgcDDU5rIAcXUj0dU2IcBk4TUh0YFUkASEkcC3QI
GwMMQkE9SB8AMk9TNlIOCxNUHQZCAAoAHh1FXjYCDBsFABkOBkk7FgALVQRO
D0EaDwxOSU8dGgI8EVIBAAUEVA5SRjlUQTYbCk5teRsdRVQcDhkDADBFHwhJ
AQ8XClJBNl4AC1IdBghVEwARABoHCAdFXjwdGEkDCBMHBgAwW1YnUgAaRyon
B0VTGgoZUwE7EhxNCAAFVAMXTjwaTSdSEAESUlQNBFJOZU5LXHQMHE0EF0EA
Bh9FeRp5LQdFTkAZREgMU04CEFMcMQQAQ0lkay0ABwcqXwA1FwgFAk4dBkIA
CA4aB0l0PD1MSQ8PEE87ADtbTmIGDAILAB0cRSo3ABwBRTYKFhROHUETCgZU
MVQHYhoGGksABwdJAB0ASTpFNwQcTRoDBBgDUkksGioRHUkKCE5THEVCC08E
EgF0BBwJSQoOGkgGADpfADETDU5tBzcJEFMLTx0bAHQJCx8ADRJUDRdMN1RH
YgYGTi5jMURFeQEaSRAEOkURDAUCQRkKUmQ5XgBIKwYbQFIRSBVJGgwBGgtz
RRNNDwcVWE8BT3hJVCcCSQwGQx9IBE4KTwwdASEXF01jIgQATwZIPRpXKwYK
BkdEGwsRTxxDSToGMUlSCQZOFRwKUkQ5VEMnUh0BR0MBGgAAZDwGUwY7CBdN
HB5BFwMdUz0aQSwWSQoITlMcRUILTxoCEDUXF01jNw4BTwVBNlRBYhAIGhNM
EUgIRU5CRFMkOhwGBAQLTVQOHFkvUkUwF0lkbXkbHUVUBgAcFA0gRQYFCBpB
PU8FQSsaVycTAkJHYhsRSQAXABxUFzFFFggICkEDHR1OPxoqER1JDQhNEUgK
TkJPDAUAJhwQAg0XQRUBFgArU04lUh0GDlNUGwpOCU9jeTY1HFJARE4xGA4L
ACxSQTZSDxsJSw1ICFUdBgpTNjUcXk0OAUEDBxtUPRpCLQtFTgBPVB8NSRoK
SREKLUUVAklkERgOCwAsUkE2Ug8bCUsNSAhVHQYKUyI7RQUFABoEVA0dWXQa
Ry1SHgYOVBFIB08XQ0kUCnRvPgwQTgUbGBwAOVREYhAGAQBJEUgETgpPGR8E
LUUGBQgaQRIaHEshGk03AQANR1QdBAkAFwAcUwE9AFxNY2QxGA4LACxSQTZS
DxsJSw1ICFUdBgpTJjsIF00GAE1ULB1NPRpPLF5JAgJUVAUAAAYKCAFFXjUe
DBBOFRwOBgA+T04pC0kDElMdC0VXBgYdFkU2CgtNEAEUVBwTWXhTVG5SGg8e
AB0cRSo+AwgKRSANExlJCBQaBAsANU9TKxFJL0dMHRwRTAtPBRwQMAAATQcB
FlRlIkw5QwA2GggaR0YBBg5ZTgIcAAw3SVIaAQcVEU8QTyEaYy0fDE4ITlhI
Jk8DCkkcC3hFMQIEC0EbAVIqCFZBO1IdBgZUVA4QTgUWSR4QJwwRTWM=""".replace("\n", "")
solve_challenge(b64_crypt)
| 37.605691
| 137
| 0.736893
|
acffffc9567db7842132db486253a7f33e9a5137
| 6,347
|
py
|
Python
|
475.py
|
AiiDeHua/CISC475-19S
|
f9905ed92381c776b6337c9d71ab75dc99dc197a
|
[
"MIT"
] | null | null | null |
475.py
|
AiiDeHua/CISC475-19S
|
f9905ed92381c776b6337c9d71ab75dc99dc197a
|
[
"MIT"
] | null | null | null |
475.py
|
AiiDeHua/CISC475-19S
|
f9905ed92381c776b6337c9d71ab75dc99dc197a
|
[
"MIT"
] | null | null | null |
# coding = utf-8
import bibtexparser as bp
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import convert_to_unicode
import os, shutil
import jieba
import jieba.posseg as pseg
from gensim import corpora, models, similarities
#-------------------------global variable-----------------------------------
KEY_WORD = ''
#list of instances of BibData
bibinfo = []
#list of instances that to output
extractList = []
#-------------------------function define-----------------------------------
#return a whole string
def entry_to_string(entry):
result = '@Article{\n'
for key in entry:
if(key == 'title' or key == 'abstract'):
result = ' ' + result + str(key)+'=['+str(entry[key])+']\n'
else:
result = ' ' + result + str(key)+'='+str(entry[key])+'\n'
resulr = result + '\n'
return result
#return a string list
def entry_to_stringList(entry):
result = []
result.append('@Article{\n')
for key in entry:
if(key == 'title' or key == 'abstract'):
result.append( ' ' + str(key)+'=['+str(entry[key])+']'+'\n')
else:
result.append( ' ' + str(key)+'='+str(entry[key])+'\n')
result.append('\n')
return result
def setKeyWord():
keyword_dir = 'search/search.txt'
with open(keyword_dir, 'r', encoding='utf-8') as f:
KEY_WORD = f.read()
print('now keyword is: '+ KEY_WORD +'\n')
def exportBib(BibList):
output_dir = 'output/output.bib'
#with open(keyword_dir, 'w+') as f:
#for b in BibList:
#f.write(b.text)
#print('WRITE: '+ b.title +'\n')
fs = open(output_dir, 'w', encoding='utf-8')
for b in BibList:
fs.write(entry_to_string(b.text))
print('WRITE: '+ b.title +'\n')
fs.close()
def selectExport():
extractList.clear()
single_export_flag = 0
total_export_flag = input('manually select to export bib file(enter 1) or export all inut bib file with sorted(enter 0)\n')
if total_export_flag > 0:
for b in bibinfo:
print('Title: '+b.title+'\n')
print('Abstract: '+b.abstract+'\n\n')
single_export_flag = input('choose this bib(enter 1) or not(enter 0)\n')
if single_export_flag > 0:
extractList.append(b)
exportBib(extractList)
else:
exportBib(bibinfo)
#-------------------------definition----------------------------------------
# a class saves bib
class BibData:
text = ''
title = ''
score = 0
abstract = []
'''
def __init__(self, tex, tle, s, abst):
self.text = tex
self.title = tle
self.score = s
self.abstract = abst
def __init__(self, tex, tle, abst):
self.text = tex
self.title = tle
self.score = 0
self.abstract = abst'''
def __init__(self, entry):
self.text = entry
self.title = entry['title']
self.score = 0
try:
entry['abstract']
except Exception:
self.abstract.append(entry['title'])
else:
self.abstract = entry['abstract'].split("\\\\")
def set_score(self, s):
self.score = s
def set_title(self, t):
self.title = t
def set_abstract(self, abst):
self.abstract = abst
def sort_key(self):
return self.abstract
#--------------------------get similarity score-----------------------
def StopWordsList(filepath):
wlst = [w.strip() for w in open(filepath, 'r', encoding='utf8').readlines()]
return wlst
def StopWordsList(filepath):
wlst = [w.strip() for w in open(filepath, 'r', encoding='utf8').readlines()]
return wlst
def seg_sentence(sentence, stop_words):
# stop_flag = ['x', 'c', 'u', 'd', 'p', 't', 'uj', 'm', 'f', 'r']#过滤数字m
stop_flag = ['x', 'c', 'u', 'd', 'p', 't', 'uj', 'f', 'r']
sentence_seged = pseg.cut(sentence)
# sentence_seged = set(sentence_seged)
outstr = []
for word,flag in sentence_seged:
# if word not in stop_words:
if word not in stop_words and flag not in stop_flag:
outstr.append(word)
return outstr
#input a Bibdata type
#set score to this Bibdata
def calScore(bdata):
spPath = 'stopwords.txt'
score = 0
stop_words = StopWordsList(spPath)
# 1、将【文本集】生产【分词列表】
texts = bdata.abstract
#一、建立词袋模型
# 2、基于文件集建立【词典】,并提取词典特征数
dictionary = corpora.Dictionary([texts])
feature_cnt = len(dictionary.token2id.keys())
#feature_cnt = len(dictionary.token2id)
# 3、基于词典,将【分词列表集】转换为【稀疏向量集】,也就是【语料库】
corpus = [dictionary.doc2bow([text]) for text in texts]
# 4、使用“TF-TDF模型”处理【语料库】
#二、建立TF-IDF模型
tfidf = models.TfidfModel(corpus)
#三构建一个query文本,利用词袋模型的字典将其映射到向量空间
# 5、同理,用词典把搜索词也转换为稀疏向量
#kw_vector = dictionary.doc2bow(lcut(KEY_WORD))
kw_vector = dictionary.doc2bow(seg_sentence(KEY_WORD, stop_words))
# 6、对稀疏向量建立索引
index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=feature_cnt)
# 7、相似的计算
sim = index[tfidf[kw_vector]]
result_list = []
for i in range(len(sim)):
#print('keyword 与 text%d 相似度为:%.2f' % (i + 1, sim[i]))
#if sim[i] > score:
# result_list.append(orig_txt[i])
score = score + sim[i]
#result_list.append(orig_txt[i])
#print(score)
bdata.set_score(score)
#print('原始的句子:',result_list)
#-----------------------------main---------------------------------------
work_dir = 'bib_collect/'
print('loading data...')
for parent, dirnames, filenames in os.walk(work_dir, followlinks=True):
for filename in filenames:
if '.bib'in filename:
with open(work_dir+filename, 'r', encoding='utf-8') as bibfile:
parser = BibTexParser() # 声明解析器类
parser.customization = convert_to_unicode # 将BibTeX编码强制转换为UTF编码
bibdata = bp.load(bibfile, parser = parser) # 通过bp.load()加载
# 输出作者和DOI
#print(bibdata.entries[0]['abstract'].split("\\\\"))
# put entries into bibdata list
for e in bibdata.entries:
tmpBib = BibData(e)
bibinfo.append(tmpBib)
print('\nsuccessfully loading data\n')
#print(len(bibinfo[0].abstract))
#for b in bibinfo:
#print(b.title + ': ',b.score, '\n')
setKeyWord()
for bdata in bibinfo:
calScore(bdata)
bibinfo.sort(key=lambda x:x.score)
#for b in bibinfo:
#print(b.title + ': ',b.score, '\n')
#selectExport()
exportBib(bibinfo)
| 27.960352
| 124
| 0.5902
|
4a000045be0c48718029a2f63eccc8eb583f0070
| 32
|
py
|
Python
|
libgcv/model/ho_relation/__init__.py
|
bhaveshbaranda/Human-Object-Relation-Network
|
8f358fa14fe14c7c02758359dce2a5c625be87c1
|
[
"MIT"
] | 2
|
2020-09-18T12:16:39.000Z
|
2021-07-08T01:41:33.000Z
|
libgcv/model/ho_relation/__init__.py
|
bhaveshbaranda/Human-Object-Relation-Network
|
8f358fa14fe14c7c02758359dce2a5c625be87c1
|
[
"MIT"
] | 2
|
2020-09-18T12:16:34.000Z
|
2021-07-15T05:33:26.000Z
|
libgcv/model/ho_relation/__init__.py
|
bhaveshbaranda/Human-Object-Relation-Network
|
8f358fa14fe14c7c02758359dce2a5c625be87c1
|
[
"MIT"
] | 6
|
2020-09-17T05:57:54.000Z
|
2021-07-15T05:34:01.000Z
|
from .ho_relation_net import *
| 10.666667
| 30
| 0.78125
|
4a0000b1479967cf71ea91df3c68bbd86baec81d
| 8,571
|
py
|
Python
|
tests/db/test_quotes_basic.py
|
pgoslatara/soda-sql
|
79dc8ad1c79ac8fe3f75955ecc8424ee0aef95f3
|
[
"Apache-2.0"
] | 1
|
2022-02-25T01:31:12.000Z
|
2022-02-25T01:31:12.000Z
|
tests/db/test_quotes_basic.py
|
pgoslatara/soda-sql
|
79dc8ad1c79ac8fe3f75955ecc8424ee0aef95f3
|
[
"Apache-2.0"
] | null | null | null |
tests/db/test_quotes_basic.py
|
pgoslatara/soda-sql
|
79dc8ad1c79ac8fe3f75955ecc8424ee0aef95f3
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
from unittest import TestCase
import boto3
import psycopg2
import pyathena
from google.cloud import bigquery
from google.cloud.bigquery import dbapi
from google.oauth2.service_account import Credentials
from snowflake import connector
from sodasql.common.logging_helper import LoggingHelper
from sodasql.scan.aws_credentials import AwsCredentials
from sodasql.scan.db import sql_update, sql_fetchone
from sodasql.scan.env_vars import EnvVars
from tests.common.boto3_helper import Boto3Helper
from tests.warehouses.athena_fixture import AthenaFixture
LoggingHelper.configure_for_test()
EnvVars.load_env_vars('test')
class TestSqlQuotes(TestCase):
connection = None
table_name = 'quotestest'
cte_table_name = 'ctetablename'
def test_postgres(self):
database = 'sodasql'
schema = 'public'
connection = psycopg2.connect(
user='sodasql',
host='localhost',
database=database,
options=f'-c search_path={schema}')
try:
sql_update(connection, (
f'DROP TABLE IF EXISTS "{database}"."{schema}"."{self.table_name}"'
))
sql_update(connection, (
f'CREATE TABLE "{database}"."{schema}"."{self.table_name}" (\n'
f' "id" VARCHAR(255), \n'
f' "size" INTEGER );'
))
sql_fetchone(connection, (
f'WITH "{self.cte_table_name}" as ( \n'
f' SELECT "id" as "v", "size" as "s", LENGTH("id") as "l" \n'
f' FROM "{database}"."{schema}"."{self.table_name}" \n'
f' WHERE "size" = 1 \n'
f' ORDER BY "size" ASC ) \n'
f'SELECT COUNT(DISTINCT("v")), COUNT("s") \n'
f'FROM "{self.cte_table_name}" \n'
f'WHERE "l" > 0'
))
finally:
connection.close()
def test_redshift(self):
database = 'soda_test'
schema = 'public'
connection = psycopg2.connect(
user=os.getenv('SODA_REDSHIFT_USERNAME'),
password=os.getenv('SODA_REDSHIFT_PASSWORD'),
host=os.getenv('SODA_REDSHIFT_ENDPOINT'),
port='5439',
database=database)
try:
sql_update(connection, (
f'DROP TABLE IF EXISTS "{database}"."{schema}"."{self.table_name}"'
))
sql_update(connection, (
f'CREATE TABLE "{database}"."{schema}"."{self.table_name}" (\n'
f' "id" VARCHAR(255), \n'
f' "size" INTEGER );'
))
sql_fetchone(connection, (
f'WITH "{self.cte_table_name}" as ( \n'
f' SELECT "id" as "v", "size" as "s", LENGTH("id") as "l" \n'
f' FROM "{database}"."{schema}"."{self.table_name}" \n'
f' WHERE "size" = 1 \n'
f' ORDER BY "size" ASC ) \n'
f'SELECT COUNT(DISTINCT("v")), COUNT("s") \n'
f'FROM "{self.cte_table_name}" \n'
f'WHERE "l" > 0'
))
finally:
connection.close()
def test_snowflake(self):
schema = 'PUBLIC'
connection = connector.connect(
user=os.getenv('SODA_SNOWFLAKE_USERNAME'),
password=os.getenv('SODA_SNOWFLAKE_PASSWORD'),
account='SODADATAPARTNER.eu-central-1',
warehouse='DEMO_WH',
schema=schema
)
try:
database = 'soda_test_quotes_db'
sql_update(connection, (
f'CREATE DATABASE IF NOT EXISTS "{database}"'
))
sql_update(connection, (
f'DROP TABLE IF EXISTS "{database}"."{schema}"."{self.table_name}"'
))
sql_update(connection, (
f'CREATE TABLE "{database}"."{schema}"."{self.table_name}" (\n'
f' "id" VARCHAR(255), \n'
f' "size" INTEGER );'
))
sql_fetchone(connection, (
f'WITH "{self.cte_table_name}" as ( \n'
f' SELECT "id" as "v", "size" as "s", LENGTH("id") as "l" \n'
f' FROM "{database}"."{schema}"."{self.table_name}" \n'
f' WHERE "size" = 1 \n'
f' ORDER BY "size" ASC ) \n'
f'SELECT COUNT(DISTINCT("v")), COUNT("s") \n'
f'FROM "{self.cte_table_name}" \n'
f'WHERE "l" > 0'
))
finally:
connection.close()
def test_bigquery(self):
database = 'sodalite'
account_info_json_str = os.getenv('BIGQUERY_ACCOUNT_INFO_JSON')
account_info_json_dict = json.loads(account_info_json_str)
credentials = Credentials.from_service_account_info(account_info_json_dict)
project_id = account_info_json_dict['project_id']
client = bigquery.Client(project=project_id, credentials=credentials)
connection = dbapi.Connection(client)
try:
sql_update(connection, (
f'DROP TABLE IF EXISTS `{database}`.`{self.table_name}`'
))
sql_update(connection, (
f'CREATE TABLE `{database}`.`{self.table_name}` (\n'
f' `id` STRING, \n'
f' `size` INT64 );'
))
sql_fetchone(connection, (
f'WITH `{self.cte_table_name}` as ( \n'
f' SELECT "id" as `v`, "size" as `s`, LENGTH("id") as `l` \n'
f' FROM `{database}`.`{self.table_name}` \n'
f' WHERE `size` = 1 \n'
f' ORDER BY `size` ASC ) \n'
f'SELECT COUNT(DISTINCT("v")), COUNT("s") \n'
f'FROM `{self.cte_table_name}`;'
))
finally:
connection.close()
def test_athena(self):
Boto3Helper.filter_false_positive_boto3_warning()
aws_access_key_id = os.getenv('SODA_ATHENA_ACCESS_KEY_ID')
aws_secret_access_key = os.getenv('SODA_ATHENA_SECRET_ACCESS_KEY')
s3_staging_bucket = 'sodalite-test'
s3_staging_folder = 'soda_quotes_test'
database = 'sodalite_test'
schema = 'PUBLIC'
connection = pyathena.connect(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
s3_staging_dir=f's3://{s3_staging_bucket}/',
region_name='eu-west-1')
try:
sql_update(connection, (
f'CREATE DATABASE IF NOT EXISTS `{database}`'
))
sql_update(connection, (
f'DROP TABLE IF EXISTS `{database}`.`{self.table_name}`'
))
sql_update(connection, (
f'CREATE EXTERNAL TABLE `{database}`.`{self.table_name}` (\n'
f' `id` VARCHAR(255), \n'
f' `size` INTEGER ) \n'
f"LOCATION 's3://{s3_staging_bucket}/{s3_staging_folder}';"
))
sql_fetchone(connection, (
f'WITH "{self.cte_table_name}" as ( \n'
f' SELECT "id" as "v", "size" as "s", LENGTH("id") as "l" \n'
f' FROM "{database}"."{self.table_name}" \n'
f' WHERE "size" = 1 \n'
f' ORDER BY "size" ASC ) \n'
f'SELECT COUNT(DISTINCT("v")), COUNT("s") \n'
f'FROM "{self.cte_table_name}" \n'
f'WHERE "l" > 0'
))
finally:
TestSqlQuotes.delete_athena_s3_staging_files(
aws_access_key_id,
aws_secret_access_key,
s3_staging_bucket,
s3_staging_folder)
connection.close()
@staticmethod
def delete_athena_s3_staging_files(aws_access_key_id, aws_secret_access_key, s3_staging_bucket, s3_staging_folder):
aws_credentials = AwsCredentials(access_key_id=aws_access_key_id,
secret_access_key=aws_secret_access_key)
aws_credentials = aws_credentials.resolve_role("soda_sql_test_cleanup")
s3_client = boto3.client(
's3',
region_name=aws_credentials.region_name,
aws_access_key_id=aws_credentials.access_key_id,
aws_secret_access_key=aws_credentials.secret_access_key,
aws_session_token=aws_credentials.session_token
)
AthenaFixture.delete_s3_files(s3_client, s3_staging_bucket, s3_staging_folder)
| 34.841463
| 119
| 0.54101
|
4a0000ec843c1930098a0cdd86fbdccb3b14d690
| 2,162
|
py
|
Python
|
ssseg/cfgs/encnet/cfgs_cityscapes_resnet101os8.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | 1
|
2021-05-28T06:42:37.000Z
|
2021-05-28T06:42:37.000Z
|
ssseg/cfgs/encnet/cfgs_cityscapes_resnet101os8.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | null | null | null |
ssseg/cfgs/encnet/cfgs_cityscapes_resnet101os8.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | null | null | null |
'''define the config file for cityscapes and resnet101os8'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'cityscapes',
'rootdir': 'data/CityScapes',
'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 1024), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 1024), 'data_type': 'tensor'}),]
}
)
DATASET_CFG['test'].update(
{
'type': 'cityscapes',
'rootdir': 'data/CityScapes',
'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),],
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
DATALOADER_CFG['train'].update(
{
'batch_size': 8,
}
)
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 220
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 19,
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'encnet_resnet101os8_cityscapes_train',
'logfilepath': 'encnet_resnet101os8_cityscapes_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'encnet_resnet101os8_cityscapes_test',
'logfilepath': 'encnet_resnet101os8_cityscapes_test/test.log',
'resultsavepath': 'encnet_resnet101os8_cityscapes_test/encnet_resnet101os8_cityscapes_results.pkl'
}
)
| 31.794118
| 110
| 0.597132
|
4a000161b20e74b0839f877b0ce4a24038f2ebc1
| 21,412
|
py
|
Python
|
hoomd/md/angle.py
|
PetersResearchGroup/PCND
|
584768cc683a6df0152ead69b567d05b781aab2b
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/md/angle.py
|
PetersResearchGroup/PCND
|
584768cc683a6df0152ead69b567d05b781aab2b
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/md/angle.py
|
PetersResearchGroup/PCND
|
584768cc683a6df0152ead69b567d05b781aab2b
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2009-2017 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
R""" Angle potentials.
Angles add forces between specified triplets of particles and are typically used to
model chemical angles between two bonds.
By themselves, angles that have been specified in an initial configuration do nothing. Only when you
specify an angle force (i.e. angle.harmonic), are forces actually calculated between the
listed particles.
"""
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import force
import hoomd
import math;
import sys;
class coeff:
R""" Define angle coefficients.
The coefficients for all angle force are specified using this class. Coefficients are
specified per angle type.
There are two ways to set the coefficients for a particular angle potential.
The first way is to save the angle potential in a variable and call :py:meth:`set()` directly.
See below for an example of this.
The second method is to build the coeff class first and then assign it to the
angle potential. There are some advantages to this method in that you could specify a
complicated set of angle potential coefficients in a separate python file and import
it into your job script.
Example::
my_coeffs = hoomd.md.angle.coeff();
my_angle_force.angle_coeff.set('polymer', k=330.0, r=0.84)
my_angle_force.angle_coeff.set('backbone', k=330.0, r=0.84)
"""
## \internal
# \brief Initializes the class
# \details
# The main task to be performed during initialization is just to init some variables
# \param self Python required class instance variable
def __init__(self):
self.values = {};
self.default_coeff = {}
## \var values
# \internal
# \brief Contains the vector of set values in a dictionary
## \var default_coeff
# \internal
# \brief default_coeff['coeff'] lists the default value for \a coeff, if it is set
## \internal
# \brief Sets a default value for a given coefficient
# \details
# \param name Name of the coefficient to for which to set the default
# \param value Default value to set
#
# Some coefficients have reasonable default values and the user should not be burdened with typing them in
# all the time. set_default_coeff() sets
def set_default_coeff(self, name, value):
self.default_coeff[name] = value;
def set(self, type, **coeffs):
R""" Sets parameters for angle types.
Args:
type (str): Type of angle (or a list of type names)
coeffs: Named coefficients (see below for examples)
Calling :py:meth:`set()` results in one or more parameters being set for a angle type. Types are identified
by name, and parameters are also added by name. Which parameters you need to specify depends on the angle
potential you are setting these coefficients for, see the corresponding documentation.
All possible angle types as defined in the simulation box must be specified before executing run().
You will receive an error if you fail to do so. It is not an error, however, to specify coefficients for
angle types that do not exist in the simulation. This can be useful in defining a potential field for many
different types of angles even when some simulations only include a subset.
Examples::
my_angle_force.angle_coeff.set('polymer', k=330.0, r0=0.84)
my_angle_force.angle_coeff.set('backbone', k=1000.0, r0=1.0)
my_angle_force.angle_coeff.set(['angleA','angleB'], k=100, r0=0.0)
Note:
Single parameters can be updated. If both ``k`` and ``r0`` have already been set for a particle type,
then executing ``coeff.set('polymer', r0=1.0)`` will update the value of ``r0`` and leave the other
parameters as they were previously set.
"""
hoomd.util.print_status_line();
# listify the input
type = hoomd.util.listify(type)
for typei in type:
self.set_single(typei, coeffs);
## \internal
# \brief Sets a single parameter
def set_single(self, type, coeffs):
type = str(type);
# create the type identifier if it hasn't been created yet
if (not type in self.values):
self.values[type] = {};
# update each of the values provided
if len(coeffs) == 0:
hoomd.context.msg.error("No coefficents specified\n");
for name, val in coeffs.items():
self.values[type][name] = val;
# set the default values
for name, val in self.default_coeff.items():
# don't override a coeff if it is already set
if not name in self.values[type]:
self.values[type][name] = val;
## \internal
# \brief Verifies that all values are set
# \details
# \param self Python required self variable
# \param required_coeffs list of required variables
#
# This can only be run after the system has been initialized
def verify(self, required_coeffs):
# first, check that the system has been initialized
if not hoomd.init.is_initialized():
hoomd.context.msg.error("Cannot verify angle coefficients before initialization\n");
raise RuntimeError('Error verifying force coefficients');
# get a list of types from the particle data
ntypes = hoomd.context.current.system_definition.getAngleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getAngleData().getNameByType(i));
valid = True;
# loop over all possible types and verify that all required variables are set
for i in range(0,ntypes):
type = type_list[i];
if type not in self.values.keys():
hoomd.context.msg.error("Angle type " +str(type) + " not found in angle coeff\n");
valid = False;
continue;
# verify that all required values are set by counting the matches
count = 0;
for coeff_name in self.values[type].keys():
if not coeff_name in required_coeffs:
hoomd.context.msg.notice(2, "Notice: Possible typo? Force coeff " + str(coeff_name) + " is specified for type " + str(type) + \
", but is not used by the angle force\n");
else:
count += 1;
if count != len(required_coeffs):
hoomd.context.msg.error("Angle type " + str(type) + " is missing required coefficients\n");
valid = False;
return valid;
## \internal
# \brief Gets the value of a single angle potential coefficient
# \detail
# \param type Name of angle type
# \param coeff_name Coefficient to get
def get(self, type, coeff_name):
if type not in self.values.keys():
hoomd.context.msg.error("Bug detected in force.coeff. Please report\n");
raise RuntimeError("Error setting angle coeff");
return self.values[type][coeff_name];
## \internal
# \brief Return metadata
def get_metadata(self):
return self.values
class harmonic(force._force):
R""" Harmonic angle potential.
The command angle.harmonic specifies a harmonic potential energy between every triplet of particles
with an angle specified between them.
.. math::
V(\theta) = \frac{1}{2} k \left( \theta - \theta_0 \right)^2
where :math:`\theta` is the angle between the triplet of particles.
Coefficients:
- :math:`\theta_0` - rest angle ``t0`` (in radians)
- :math:`k` - potential constant ``k`` (in units of energy/radians^2)
Coefficients :math:`k` and :math:`\theta_0` must be set for each type of angle in the simulation using the
method :py:meth:`angle_coeff.set() <coeff.set()>`.
Examples::
harmonic = angle.harmonic()
harmonic.angle_coeff.set('polymer', k=3.0, t0=0.7851)
harmonic.angle_coeff.set('backbone', k=100.0, t0=1.0)
"""
def __init__(self):
hoomd.util.print_status_line();
# check that some angles are defined
if hoomd.context.current.system_definition.getAngleData().getNGlobal() == 0:
hoomd.context.msg.error("No angles are defined.\n");
raise RuntimeError("Error creating angle forces");
# initialize the base class
force._force.__init__(self);
# setup the coefficient vector
self.angle_coeff = coeff();
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.HarmonicAngleForceCompute(hoomd.context.current.system_definition);
else:
self.cpp_force = _md.HarmonicAngleForceComputeGPU(hoomd.context.current.system_definition);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
self.required_coeffs = ['k', 't0'];
## \internal
# \brief Update coefficients in C++
def update_coeffs(self):
coeff_list = self.required_coeffs;
# check that the force coefficients are valid
if not self.angle_coeff.verify(coeff_list):
hoomd.context.msg.error("Not all force coefficients are set\n");
raise RuntimeError("Error updating force coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getAngleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getAngleData().getNameByType(i));
for i in range(0,ntypes):
# build a dict of the coeffs to pass to proces_coeff
coeff_dict = {};
for name in coeff_list:
coeff_dict[name] = self.angle_coeff.get(type_list[i], name);
self.cpp_force.setParams(i, coeff_dict['k'], coeff_dict['t0']);
## \internal
# \brief Get metadata
def get_metadata(self):
data = force._force.get_metadata(self)
# make sure coefficients are up-to-date
self.update_coeffs()
data['angle_coeff'] = self.angle_coeff
return data
class cosinesq(force._force):
R""" Cosine squared angle potential.
The command angle.cosinesq specifies a cosine squared potential energy
between every triplet of particles with an angle specified between them.
.. math::
V(\theta) = \frac{1}{2} k \left( \cos\theta - \cos\theta_0 \right)^2
where :math:`\theta` is the angle between the triplet of particles.
This angle style is also known as g96, since they were used in the
gromos96 force field. These are also the types of angles used with the
coarse-grained MARTINI force field.
Coefficients:
- :math:`\theta_0` - rest angle ``t0`` (in radians)
- :math:`k` - potential constant ``k`` (in units of energy)
Coefficients :math:`k` and :math:`\theta_0` must be set for each type of
angle in the simulation using the method :py:meth:`angle_coeff.set() <coeff.set()>`.
Note that the value of :math:`k` for this angle potential is not comparable to
the value of :math:`k` for harmonic angles, as they have different units.
Examples::
cosinesq = angle.cosinesq()
cosinesq.angle_coeff.set('polymer', k=3.0, t0=0.7851)
cosinesq.angle_coeff.set('backbone', k=100.0, t0=1.0)
"""
def __init__(self):
hoomd.util.print_status_line();
# check that some angles are defined
if hoomd.context.current.system_definition.getAngleData().getNGlobal() == 0:
hoomd.context.msg.error("No angles are defined.\n");
raise RuntimeError("Error creating angle forces");
# initialize the base class
force._force.__init__(self);
# setup the coefficient vector
self.angle_coeff = coeff();
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.CosineSqAngleForceCompute(
hoomd.context.current.system_definition);
else:
self.cpp_force = _md.CosineSqAngleForceComputeGPU(
hoomd.context.current.system_definition);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
self.required_coeffs = ['k', 't0'];
## \internal
# \brief Update coefficients in C++
def update_coeffs(self):
coeff_list = self.required_coeffs;
# check that the force coefficients are valid
if not self.angle_coeff.verify(coeff_list):
hoomd.context.msg.error("Not all force coefficients are set\n");
raise RuntimeError("Error updating force coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getAngleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(
hoomd.context.current.system_definition.getAngleData().getNameByType(i));
for i in range(0, ntypes):
# build a dict of the coeffs to pass to proces_coeff
coeff_dict = {};
for name in coeff_list:
coeff_dict[name] = self.angle_coeff.get(type_list[i], name);
self.cpp_force.setParams(i, coeff_dict['k'], coeff_dict['t0']);
## \internal
# \brief Get metadata
def get_metadata(self):
data = force._force.get_metadata(self)
# make sure coefficients are up-to-date
self.update_coeffs()
data['angle_coeff'] = self.angle_coeff
return data
def _table_eval(theta, V, T, width):
dth = (math.pi) / float(width-1);
i = int(round((theta)/dth))
return (V[i], T[i])
class table(force._force):
R""" Tabulated angle potential.
Args:
width (int): Number of points to use to interpolate V and F (see documentation above)
name (str): Name of the force instance
:py:class:`table` specifies that a tabulated angle potential should be added to every bonded triple of particles
in the simulation.
The torque :math:`T` is (in units of force * distance) and the potential :math:`V(\theta)` is (in energy units):
.. math::
T(\theta) = & T_{\mathrm{user}}(\theta) \\
V(\theta) = & V_{\mathrm{user}}(\theta)
where :math:`\theta` is the angle from A-B to B-C in the triple.
:math:`T_{\mathrm{user}}(\theta)` and :math:`V_{\mathrm{user}}(\theta)` are evaluated on *width* grid points
between :math:`0` and :math:`\pi`. Values are interpolated linearly between grid points.
For correctness, you must specify: :math:`T = -\frac{\partial V}{\partial \theta}`
Parameters:
- :math:`T_{\mathrm{user}}(\theta)` and :math:`V_{\mathrm{user}}(\theta)` - evaluated by `func` (see example)
- coefficients passed to `func` - `coeff` (see example)
The table *width* is set once when :py:class:`table` is specified. There are two ways to specify the other
parameters.
.. rubric:: Set table from a given function
When you have a functional form for T and F, you can enter that
directly into python. :py:class:`table` will evaluate the given function over *width* points between :math:`0` and :math:`\pi`
and use the resulting values in the table::
def harmonic(theta, kappa, theta_0):
V = 0.5 * kappa * (theta-theta_0)**2;
T = -kappa*(theta-theta_0);
return (V, T)
btable = angle.table(width=1000)
btable.angle_coeff.set('angle1', func=harmonic, coeff=dict(kappa=330, theta_0=0))
btable.angle_coeff.set('angle2', func=harmonic,coeff=dict(kappa=30, theta_0=0.1))
.. rubric:: Set a table from a file
When you have no function for for *T* or *F*, or you otherwise have the data listed in a file, :py:class:`table` can use the given
values directly. You must first specify the number of rows in your tables when initializing :py:class:`table`. Then use
:py:meth:`set_from_file()` to read the file::
btable = angle.table(width=1000)
btable.set_from_file('polymer', 'angle.dat')
"""
def __init__(self, width, name=None):
hoomd.util.print_status_line();
# initialize the base class
force._force.__init__(self, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.TableAngleForceCompute(hoomd.context.current.system_definition, int(width), self.name);
else:
self.cpp_force = _md.TableAngleForceComputeGPU(hoomd.context.current.system_definition, int(width), self.name);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent matrix
self.angle_coeff = coeff();
# stash the width for later use
self.width = width;
def update_angle_table(self, atype, func, coeff):
# allocate arrays to store V and F
Vtable = _hoomd.std_vector_scalar();
Ttable = _hoomd.std_vector_scalar();
# calculate dth
dth = math.pi / float(self.width-1);
# evaluate each point of the function
for i in range(0, self.width):
theta = dth * i;
(V,T) = func(theta, **coeff);
# fill out the tables
Vtable.append(V);
Ttable.append(T);
# pass the tables on to the underlying cpp compute
self.cpp_force.setTable(atype, Vtable, Ttable);
def update_coeffs(self):
# check that the angle coefficents are valid
if not self.angle_coeff.verify(["func", "coeff"]):
hoomd.context.msg.error("Not all angle coefficients are set for angle.table\n");
raise RuntimeError("Error updating angle coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getAngleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getAngleData().getNameByType(i));
# loop through all of the unique type angles and evaluate the table
for i in range(0,ntypes):
func = self.angle_coeff.get(type_list[i], "func");
coeff = self.angle_coeff.get(type_list[i], "coeff");
self.update_angle_table(i, func, coeff);
def set_from_file(self, anglename, filename):
R""" Set a angle pair interaction from a file.
Args:
anglename (str): Name of angle
filename (str): Name of the file to read
The provided file specifies V and F at equally spaced theta values::
#t V T
0.0 2.0 -3.0
1.5707 3.0 -4.0
3.1414 2.0 -3.0
Warning:
The theta values are not used by the code. It is assumed that a table that has N rows will start at 0, end at :math:`\pi`
and that :math:`\delta \theta = \pi/(N-1)`. The table is read
directly into the grid points used to evaluate :math:`T_{\mathrm{user}}(\theta)` and :math:`V_{\mathrm{user}}(\theta)`.
"""
hoomd.util.print_status_line();
# open the file
f = open(filename);
theta_table = [];
V_table = [];
T_table = [];
# read in lines from the file
for line in f.readlines():
line = line.strip();
# skip comment lines
if line[0] == '#':
continue;
# split out the columns
cols = line.split();
values = [float(f) for f in cols];
# validate the input
if len(values) != 3:
hoomd.context.msg.error("angle.table: file must have exactly 3 columns\n");
raise RuntimeError("Error reading table file");
# append to the tables
theta_table.append(values[0]);
V_table.append(values[1]);
T_table.append(values[2]);
# validate input
if self.width != len(theta_table):
hoomd.context.msg.error("angle.table: file must have exactly " + str(self.width) + " rows\n");
raise RuntimeError("Error reading table file");
# check for even spacing
dth = math.pi / float(self.width-1);
for i in range(0,self.width):
theta = dth * i;
if math.fabs(theta - theta_table[i]) > 1e-3:
hoomd.context.msg.error("angle.table: theta must be monotonically increasing and evenly spaced\n");
raise RuntimeError("Error reading table file");
hoomd.util.quiet_status();
self.angle_coeff.set(anglename, func=_table_eval, coeff=dict(V=V_table, T=T_table, width=self.width))
hoomd.util.unquiet_status();
## \internal
# \brief Get metadata
def get_metadata(self):
data = force._force.get_metadata(self)
# make sure coefficients are up-to-date
self.update_coeffs()
data['angle_coeff'] = self.angle_coeff
return data
| 37.433566
| 147
| 0.632683
|
4a0001ebb75b96bd209e4ef34bea2e63c0d34420
| 1,046
|
py
|
Python
|
avalon/vendor/qtawesome/__init__.py
|
bumpybox/core
|
5a24640484f19e48dc12682dae979adc6d41dc0b
|
[
"MIT"
] | 168
|
2017-06-23T15:50:43.000Z
|
2022-02-27T10:48:45.000Z
|
avalon/vendor/qtawesome/__init__.py
|
bumpybox/core
|
5a24640484f19e48dc12682dae979adc6d41dc0b
|
[
"MIT"
] | 1,019
|
2021-04-26T06:22:56.000Z
|
2022-03-31T16:30:43.000Z
|
avalon/vendor/qtawesome/__init__.py
|
bumpybox/core
|
5a24640484f19e48dc12682dae979adc6d41dc0b
|
[
"MIT"
] | 42
|
2017-06-23T15:27:26.000Z
|
2021-09-29T17:28:18.000Z
|
"""
qtawesome - use font-awesome in PyQt / PySide applications
This is a port to Python of the C++ QtAwesome library by Rick Blommers
"""
from .iconic_font import IconicFont, set_global_defaults
from .animation import Pulse, Spin
from ._version import version_info, __version__
_resource = {'iconic': None, }
def _instance():
if _resource['iconic'] is None:
_resource['iconic'] = IconicFont(('fa', 'fontawesome-webfont.ttf', 'fontawesome-webfont-charmap.json'),
('ei', 'elusiveicons-webfont.ttf', 'elusiveicons-webfont-charmap.json'))
return _resource['iconic']
def icon(*args, **kwargs):
return _instance().icon(*args, **kwargs)
def load_font(*args, **kwargs):
return _instance().load_font(*args, **kwargs)
def charmap(prefixed_name):
prefix, name = prefixed_name.split('.')
return _instance().charmap[prefix][name]
def font(*args, **kwargs):
return _instance().font(*args, **kwargs)
def set_defaults(**kwargs):
return set_global_defaults(**kwargs)
| 26.15
| 113
| 0.678776
|
4a000255c50f5139a42dd9bf5c0549d332a3dd14
| 3,647
|
py
|
Python
|
cw/generate_cython_pyi.py
|
aarondewindt/cw
|
6be9770da0f0fc34ea47d7ab83e6929c4823e98a
|
[
"MIT"
] | 1
|
2021-10-06T07:20:43.000Z
|
2021-10-06T07:20:43.000Z
|
cw/generate_cython_pyi.py
|
aarondewindt/cw
|
6be9770da0f0fc34ea47d7ab83e6929c4823e98a
|
[
"MIT"
] | 3
|
2019-02-18T13:49:39.000Z
|
2020-12-28T04:13:27.000Z
|
cw/generate_cython_pyi.py
|
aarondewindt/cw
|
6be9770da0f0fc34ea47d7ab83e6929c4823e98a
|
[
"MIT"
] | null | null | null |
import re
from io import StringIO
from cw.cli_base import CLIBase
import argparse
from pathlib import Path
pattern = r'(?:cdef class )(.+)(?::)|((?: def )[\s\S]+?(?::))(?:(\s*"""[\s\S]*?""")|\s*\n)|( @[\s\S]*?\n)'
"""
Regular expression matching class, functions and decorators.
"""
func_pattern = r"(?: def\s+?)(\w+?)(?:\()([\s\S]+?)(?:\))([\w\W]*?)(?::)"
param_pattern = r"(?:\s*?\w+\s+)?([\w\W]+?)$"
# TODO: Test generate_cython_pyi
# TODO: Make generate_cython_pyi more flexible. It's currently too limiting.
# List of requested features.
# - Non class member functions.
# - Class static variables.
# - Automatic imports. Or at least a pragma to define these.
# - Python 'class' definitions. Not only "cdef class".
# - Empty class with pass in it.
# - Cython properties.
def generate_cython_pyi(pyx_code: str) -> str:
"""
Generates the class and function stubs for *.pyi files
from Cython *.pyx code.
:param pyx_code: String containing the code from the pyx file.
:return: String containing the code for the pyi file.
"""
stubs = StringIO()
stubs.write("from typing import Sequence, Tuple, Dict, Optional, Union, Iterable\n"
"from os import PathLike\n"
"import numpy as np\n\n")
# Find and loop through all, class, function and decorators in the file.
for match in re.finditer(pattern, pyx_code):
if match.group(1) is not None:
# Write cdef class as class.
stubs.write(f"\nclass {match.group(1)}:\n")
elif match.group(2) is not None:
match_2 = match.group(2).replace("__cinit__", "__init__")
# Find function name, docstring, arguments, their types and the return type.
func_match = re.match(func_pattern, match_2)
if func_match is None:
continue
func_name = func_match.group(1)
func_params = func_match.group(2)
func_params = [re.match(param_pattern, param).group(1) for param in func_params.split(",")]
stubs.write(f" def {func_name}({', '.join(func_params)}){func_match.group(3)}:{match.group(3) or ''}\n ...\n\n")
elif match.group(4) is not None:
# Write decorator.
stubs.write(match.group(4))
return stubs.getvalue()
class Pyx2PyiCLI(CLIBase):
@classmethod
def configure_arg_parser(cls, sub_parsers):
parser = sub_parsers.add_parser("x2i", help="Generates *.pyi files with stubs from Cython *.pyx file.")
parser.add_argument("paths", nargs='+')
parser.set_defaults(func=cls.main)
@classmethod
def main(cls, args: argparse.Namespace):
# Loop through all paths.
for path in args.paths:
pyx_path = Path(path)
pyi_path = pyx_path.with_suffix(".pyi")
if pyx_path.is_file():
with pyx_path.open("r") as f_pyx:
with pyi_path.open("w") as f_pyi:
# Read code in pyx.
# Call generate_cython_pyi.
# Write result to pyi file.
f_pyi.write(generate_cython_pyi(f_pyx.read()))
print(f"{pyx_path} -> {pyi_path}")
elif pyx_path.is_dir():
for path in pyx_path.glob("*.pyx"):
pyi_path = path.with_suffix(".pyi")
with path.open("r") as f_pyx:
with pyi_path.open("w") as f_pyi:
f_pyi.write(generate_cython_pyi(f_pyx.read()))
print(f"{path} -> {pyi_path.name}")
| 37.214286
| 134
| 0.574445
|
4a0003c2c6c0f59cd9603e695939c138aca6bd15
| 472
|
py
|
Python
|
oasispackage/probes/project1/yes.py
|
jose-caballero/oasis-server
|
a8368287748288264903af562156bdd572f7fe30
|
[
"Apache-2.0"
] | null | null | null |
oasispackage/probes/project1/yes.py
|
jose-caballero/oasis-server
|
a8368287748288264903af562156bdd572f7fe30
|
[
"Apache-2.0"
] | null | null | null |
oasispackage/probes/project1/yes.py
|
jose-caballero/oasis-server
|
a8368287748288264903af562156bdd572f7fe30
|
[
"Apache-2.0"
] | 1
|
2019-09-18T18:51:11.000Z
|
2019-09-18T18:51:11.000Z
|
#!/usr/bin/env python
from oasispackage.interfaces import BaseProbe
import sys
class yes(BaseProbe):
"""
Fake probe, just for testing purpopses.
Always return 0
"""
def __init__(self, options):
super(yes, self).__init__(options)
def run(self):
##self.log.debug('calling probe <yes> for vo %s' %self.oasis.vo)
return 0
if __name__ == '__main__':
probe = yes(sys.argv[1:])
rc = probe.run()
sys.exit(rc)
| 17.481481
| 72
| 0.614407
|
4a0003ee334c4a4f31cc030b2fcd0e30b923af4e
| 2,116
|
py
|
Python
|
plugins/SentimentAnalysis/__init__.py
|
ecumene/Automata
|
f34a329d161111ee42caaf5fd2bca8d3a202ed39
|
[
"MIT"
] | 11
|
2019-07-11T12:53:45.000Z
|
2022-01-19T21:11:38.000Z
|
plugins/SentimentAnalysis/__init__.py
|
ecumene/Automata
|
f34a329d161111ee42caaf5fd2bca8d3a202ed39
|
[
"MIT"
] | 36
|
2019-10-03T18:48:15.000Z
|
2022-02-01T16:01:07.000Z
|
plugins/SentimentAnalysis/__init__.py
|
ecumene/Automata
|
f34a329d161111ee42caaf5fd2bca8d3a202ed39
|
[
"MIT"
] | 21
|
2019-12-09T03:42:34.000Z
|
2022-01-30T22:48:58.000Z
|
from nltk.sentiment import SentimentIntensityAnalyzer
import nltk
from nextcord.ext import commands
from Plugin import AutomataPlugin
nltk.downloader.download('vader_lexicon')
class SentimentAnalysis(AutomataPlugin):
"""NLTK Sentiment Analyzer"""
sia = SentimentIntensityAnalyzer()
@commands.command()
async def sentiment(self, ctx, *, argument=None):
"""Replies with the sentiment of the sentence"""
message_to_reply_to = ctx.message
message_to_be_scored = argument
if argument is None and ctx.message.reference is None:
historical_messages = await ctx.channel.history(limit=2).flatten()
message_to_reply_to = historical_messages[1]
message_to_be_scored = message_to_reply_to.content
elif argument is None and ctx.message.reference is not None:
message_to_reply_to = await ctx.fetch_message(ctx.message.reference.message_id)
message_to_be_scored = message_to_reply_to.content
sentiment_text = ''
output_template = "<@{author}>: This text is **{sentiment_text}**."
compound_score = self.sia.polarity_scores(message_to_be_scored)['compound']
absolute_score = abs(compound_score)
if absolute_score == 0:
sentiment_text = 'absolutely neutral'
elif 0.01 < absolute_score < 0.25:
sentiment_text = 'slightly '
elif 0.25 <= absolute_score < 0.50:
sentiment_text = 'somewhat '
elif 0.50 <= absolute_score < 0.75:
sentiment_text = ''
elif 0.75 <= absolute_score < 0.90:
sentiment_text = 'mostly '
elif 0.90 <= absolute_score < 1.00:
sentiment_text = 'overwhelmingly '
elif absolute_score == 1.00:
sentiment_text = 'absolutely '
if compound_score < 0:
sentiment_text += 'negative'
elif compound_score > 0:
sentiment_text += 'positive'
output = output_template.format(author=ctx.message.author.id, sentiment_text=sentiment_text)
await message_to_reply_to.reply(output)
| 36.482759
| 100
| 0.660208
|
4a00049f767654e24f7b022fa406517593bdfe06
| 33,241
|
py
|
Python
|
nova/virt/hyperv/vmops.py
|
nicholaskuechler/nova
|
ff412c3888b234eb123161cc4e6d0d0d69c0004e
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/hyperv/vmops.py
|
nicholaskuechler/nova
|
ff412c3888b234eb123161cc4e6d0d0d69c0004e
|
[
"Apache-2.0"
] | 5
|
2016-07-11T20:59:47.000Z
|
2020-07-28T09:56:35.000Z
|
nova/virt/hyperv/vmops.py
|
nicholaskuechler/nova
|
ff412c3888b234eb123161cc4e6d0d0d69c0004e
|
[
"Apache-2.0"
] | 3
|
2018-01-29T00:44:44.000Z
|
2020-07-24T01:19:20.000Z
|
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import functools
import os
import time
from eventlet import timeout as etimeout
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import uuidutils
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova import utils
from nova.virt import configdrive
from nova.virt import hardware
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import ioutils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='Path of qemu-img command which is used to convert '
'between different image types'),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help='Attaches the Config Drive image as a cdrom drive '
'instead of a disk drive'),
cfg.BoolOpt('enable_instance_metrics_collection',
default=False,
help='Enables metrics collections for an instance by using '
'Hyper-V\'s metric APIs. Collected data can by retrieved '
'by other apps and services, e.g.: Ceilometer. '
'Requires Hyper-V / Windows Server 2012 and above'),
cfg.FloatOpt('dynamic_memory_ratio',
default=1.0,
help='Enables dynamic memory allocation (ballooning) when '
'set to a value greater than 1. The value expresses '
'the ratio between the total RAM assigned to an '
'instance and its startup RAM amount. For example a '
'ratio of 2.0 for an instance with 1024MB of RAM '
'implies 512MB of RAM allocated at startup'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=60,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('network_api_class', 'nova.network')
SHUTDOWN_TIME_INCREMENT = 5
REBOOT_TYPE_SOFT = 'SOFT'
REBOOT_TYPE_HARD = 'HARD'
VM_GENERATIONS = {
constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1,
constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2
}
VM_GENERATIONS_CONTROLLER_TYPES = {
constants.VM_GEN_1: constants.CTRL_TYPE_IDE,
constants.VM_GEN_2: constants.CTRL_TYPE_SCSI
}
def check_admin_permissions(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
# Make sure the windows account has the required admin permissions.
self._vmutils.check_admin_permissions()
return function(self, *args, **kwds)
return wrapper
class VMOps(object):
_vif_driver_class_map = {
'nova.network.neutronv2.api.API':
'nova.virt.hyperv.vif.HyperVNeutronVIFDriver',
'nova.network.api.API':
'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
}
# The console log is stored in two files, each should have at most half of
# the maximum console log size.
_MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._hostutils = utilsfactory.get_hostutils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
self._vif_driver = None
self._load_vif_driver_class()
self._vm_log_writers = {}
def _load_vif_driver_class(self):
try:
class_name = self._vif_driver_class_map[CONF.network_api_class]
self._vif_driver = importutils.import_object(class_name)
except KeyError:
raise TypeError(_("VIF driver not found for "
"network_api_class: %s") %
CONF.network_api_class)
def list_instance_uuids(self):
instance_uuids = []
for (instance_name, notes) in self._vmutils.list_instance_notes():
if notes and uuidutils.is_uuid_like(notes[0]):
instance_uuids.append(str(notes[0]))
else:
LOG.debug("Notes not found or not resembling a GUID for "
"instance: %s" % instance_name)
return instance_uuids
def list_instances(self):
return self._vmutils.list_instances()
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug("get_info called for instance", instance=instance)
instance_name = instance.name
if not self._vmutils.vm_exists(instance_name):
raise exception.InstanceNotFound(instance_id=instance.uuid)
info = self._vmutils.get_vm_summary_info(instance_name)
state = constants.HYPERV_POWER_STATE[info['EnabledState']]
return hardware.InstanceInfo(state=state,
max_mem_kb=info['MemoryUsage'],
mem_kb=info['MemoryUsage'],
num_cpu=info['NumberOfProcessors'],
cpu_time_ns=info['UpTime'])
def _create_root_vhd(self, context, instance):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['MaxInternalSize']
format_ext = base_vhd_path.split('.')[-1]
root_vhd_path = self._pathutils.get_root_vhd_path(instance.name,
format_ext)
root_vhd_size = instance.root_gb * units.Gi
try:
if CONF.use_cow_images:
LOG.debug("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._vhdutils.create_differencing_vhd(root_vhd_path,
base_vhd_path)
vhd_type = self._vhdutils.get_vhd_format(base_vhd_path)
if vhd_type == constants.DISK_FORMAT_VHD:
# The base image has already been resized. As differencing
# vhdx images support it, the root image will be resized
# instead if needed.
return root_vhd_path
else:
LOG.debug("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._pathutils.copyfile(base_vhd_path, root_vhd_path)
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
base_vhd_path, root_vhd_size))
if self._is_resize_needed(root_vhd_path, base_vhd_size,
root_vhd_internal_size,
instance):
self._vhdutils.resize_vhd(root_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(root_vhd_path):
self._pathutils.remove(root_vhd_path)
return root_vhd_path
def _is_resize_needed(self, vhd_path, old_size, new_size, instance):
if new_size < old_size:
error_msg = _("Cannot resize a VHD to a smaller size, the"
" original size is %(old_size)s, the"
" newer size is %(new_size)s"
) % {'old_size': old_size,
'new_size': new_size}
raise vmutils.VHDResizeException(error_msg)
elif new_size > old_size:
LOG.debug("Resizing VHD %(vhd_path)s to new "
"size %(new_size)s" %
{'new_size': new_size,
'vhd_path': vhd_path},
instance=instance)
return True
return False
def create_ephemeral_vhd(self, instance):
eph_vhd_size = instance.get('ephemeral_gb', 0) * units.Gi
if eph_vhd_size:
vhd_format = self._vhdutils.get_best_supported_vhd_format()
eph_vhd_path = self._pathutils.get_ephemeral_vhd_path(
instance.name, vhd_format)
self._vhdutils.create_dynamic_vhd(eph_vhd_path, eph_vhd_size,
vhd_format)
return eph_vhd_path
@check_admin_permissions
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info(_LI("Spawning new instance"), instance=instance)
instance_name = instance.name
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
# Make sure we're starting with a clean slate.
self._delete_disk_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._create_root_vhd(context, instance)
eph_vhd_path = self.create_ephemeral_vhd(instance)
vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta)
try:
self.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
if configdrive.required_by(instance):
configdrive_path = self._create_config_drive(instance,
injected_files,
admin_password,
network_info)
self.attach_config_drive(instance, configdrive_path, vm_gen)
self.power_on(instance)
except Exception:
with excutils.save_and_reraise_exception():
self.destroy(instance)
def create_instance(self, instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen):
instance_name = instance.name
instance_path = os.path.join(CONF.instances_path, instance_name)
self._vmutils.create_vm(instance_name,
instance.memory_mb,
instance.vcpus,
CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio,
vm_gen,
instance_path,
[instance.uuid])
self._vmutils.create_scsi_controller(instance_name)
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
ctrl_disk_addr = 0
if root_vhd_path:
self._attach_drive(instance_name, root_vhd_path, 0, ctrl_disk_addr,
controller_type)
ctrl_disk_addr = 1
if eph_vhd_path:
self._attach_drive(instance_name, eph_vhd_path, 0, ctrl_disk_addr,
controller_type)
# If ebs_root is False, the first volume will be attached to SCSI
# controller. Generation 2 VMs only has a SCSI controller.
ebs_root = vm_gen is not constants.VM_GEN_2 and root_vhd_path is None
self._volumeops.attach_volumes(block_device_info,
instance_name,
ebs_root)
for vif in network_info:
LOG.debug('Creating nic for instance', instance=instance)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
self._vif_driver.plug(instance, vif)
if CONF.hyperv.enable_instance_metrics_collection:
self._vmutils.enable_vm_metrics_collection(instance_name)
self._create_vm_com_port_pipe(instance)
def _attach_drive(self, instance_name, path, drive_addr, ctrl_disk_addr,
controller_type, drive_type=constants.DISK):
if controller_type == constants.CTRL_TYPE_SCSI:
self._vmutils.attach_scsi_drive(instance_name, path, drive_type)
else:
self._vmutils.attach_ide_drive(instance_name, path, drive_addr,
ctrl_disk_addr, drive_type)
def get_image_vm_generation(self, root_vhd_path, image_meta):
default_vm_gen = self._hostutils.get_default_vm_generation()
image_prop_vm = image_meta.properties.get(
'hw_machine_type', default_vm_gen)
if image_prop_vm not in self._hostutils.get_supported_vm_types():
LOG.error(_LE('Requested VM Generation %s is not supported on '
' this OS.'), image_prop_vm)
raise vmutils.HyperVException(
_('Requested VM Generation %s is not supported on this '
'OS.') % image_prop_vm)
vm_gen = VM_GENERATIONS[image_prop_vm]
if (vm_gen != constants.VM_GEN_1 and root_vhd_path and
self._vhdutils.get_vhd_format(
root_vhd_path) == constants.DISK_FORMAT_VHD):
LOG.error(_LE('Requested VM Generation %s, but provided VHD '
'instead of VHDX.'), vm_gen)
raise vmutils.HyperVException(
_('Requested VM Generation %s, but provided VHD instead of '
'VHDX.') % vm_gen)
return vm_gen
def _create_config_drive(self, instance, injected_files, admin_password,
network_info):
if CONF.config_drive_format != 'iso9660':
raise vmutils.UnsupportedConfigDriveFormatException(
_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
LOG.info(_LI('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md,
network_info=network_info)
instance_path = self._pathutils.get_instance_dir(
instance.name)
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with '
'error: %s'),
e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
configdrive_path = configdrive_path_iso
return configdrive_path
def attach_config_drive(self, instance, configdrive_path, vm_gen):
configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):]
# Do the attach here and if there is a certain file format that isn't
# supported in constants.DISK_FORMAT_MAP then bomb out.
try:
drive_type = constants.DISK_FORMAT_MAP[configdrive_ext]
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
self._attach_drive(instance.name, configdrive_path, 1, 0,
controller_type, drive_type)
except KeyError:
raise exception.InvalidDiskFormat(disk_format=configdrive_ext)
def _delete_disk_files(self, instance_name):
self._pathutils.get_instance_dir(instance_name,
create_dir=False,
remove_dir=True)
def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True):
instance_name = instance.name
LOG.info(_LI("Got request to destroy instance"), instance=instance)
try:
if self._vmutils.vm_exists(instance_name):
# Stop the VM first.
self._vmutils.stop_vm_jobs(instance_name)
self.power_off(instance)
if network_info:
for vif in network_info:
self._vif_driver.unplug(instance, vif)
self._vmutils.destroy_vm(instance_name)
self._volumeops.disconnect_volumes(block_device_info)
else:
LOG.debug("Instance not found", instance=instance)
if destroy_disks:
self._delete_disk_files(instance_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to destroy instance: %s'),
instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug("Rebooting instance", instance=instance)
if reboot_type == REBOOT_TYPE_SOFT:
if self._soft_shutdown(instance):
self.power_on(instance)
return
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_REBOOT)
def _soft_shutdown(self, instance,
timeout=CONF.hyperv.wait_soft_reboot_seconds,
retry_interval=SHUTDOWN_TIME_INCREMENT):
"""Perform a soft shutdown on the VM.
:return: True if the instance was shutdown within time limit,
False otherwise.
"""
LOG.debug("Performing Soft shutdown on instance", instance=instance)
while timeout > 0:
# Perform a soft shutdown on the instance.
# Wait maximum timeout for the instance to be shutdown.
# If it was not shutdown, retry until it succeeds or a maximum of
# time waited is equal to timeout.
wait_time = min(retry_interval, timeout)
try:
LOG.debug("Soft shutdown instance, timeout remaining: %d",
timeout, instance=instance)
self._vmutils.soft_shutdown_vm(instance.name)
if self._wait_for_power_off(instance.name, wait_time):
LOG.info(_LI("Soft shutdown succeeded."),
instance=instance)
return True
except vmutils.HyperVException as e:
# Exception is raised when trying to shutdown the instance
# while it is still booting.
LOG.debug("Soft shutdown failed: %s", e, instance=instance)
time.sleep(wait_time)
timeout -= retry_interval
LOG.warning(_LW("Timed out while waiting for soft shutdown."),
instance=instance)
return False
def pause(self, instance):
"""Pause VM instance."""
LOG.debug("Pause instance", instance=instance)
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug("Unpause instance", instance=instance)
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug("Suspend instance", instance=instance)
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug("Resume instance", instance=instance)
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
LOG.debug("Power off instance", instance=instance)
if retry_interval <= 0:
retry_interval = SHUTDOWN_TIME_INCREMENT
try:
if timeout and self._soft_shutdown(instance,
timeout,
retry_interval):
return
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_DISABLED)
except exception.InstanceNotFound:
# The manager can call the stop API after receiving instance
# power off events. If this is triggered when the instance
# is being deleted, it might attempt to power off an unexisting
# instance. We'll just pass in this case.
LOG.debug("Instance not found. Skipping power off",
instance=instance)
def power_on(self, instance, block_device_info=None):
"""Power on the specified instance."""
LOG.debug("Power on instance", instance=instance)
if block_device_info:
self._volumeops.fix_instance_volume_disk_paths(instance.name,
block_device_info)
self._set_vm_state(instance, constants.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, instance, req_state):
instance_name = instance.name
instance_uuid = instance.uuid
try:
self._vmutils.set_vm_state(instance_name, req_state)
if req_state in (constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_REBOOT):
self._delete_vm_console_log(instance)
if req_state in (constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_REBOOT):
self.log_vm_serial_output(instance_name,
instance_uuid)
LOG.debug("Successfully changed state of VM %(instance_name)s"
" to: %(req_state)s", {'instance_name': instance_name,
'req_state': req_state})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to change vm state of %(instance_name)s"
" to %(req_state)s"),
{'instance_name': instance_name,
'req_state': req_state})
def _get_vm_state(self, instance_name):
summary_info = self._vmutils.get_vm_summary_info(instance_name)
return summary_info['EnabledState']
def _wait_for_power_off(self, instance_name, time_limit):
"""Waiting for a VM to be in a disabled state.
:return: True if the instance is shutdown within time_limit,
False otherwise.
"""
desired_vm_states = [constants.HYPERV_VM_STATE_DISABLED]
def _check_vm_status(instance_name):
if self._get_vm_state(instance_name) in desired_vm_states:
raise loopingcall.LoopingCallDone()
periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status,
instance_name)
try:
# add a timeout to the periodic call.
periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT)
etimeout.with_timeout(time_limit, periodic_call.wait)
except etimeout.Timeout:
# VM did not shutdown in the expected time_limit.
return False
finally:
# stop the periodic call, in case of exceptions or Timeout.
periodic_call.stop()
return True
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self.power_on(instance, block_device_info)
def log_vm_serial_output(self, instance_name, instance_uuid):
# Uses a 'thread' that will run in background, reading
# the console output from the according named pipe and
# write it to a file.
console_log_path = self._pathutils.get_vm_console_log_paths(
instance_name)[0]
pipe_path = r'\\.\pipe\%s' % instance_uuid
@utils.synchronized(pipe_path)
def log_serial_output():
vm_log_writer = self._vm_log_writers.get(instance_uuid)
if vm_log_writer and vm_log_writer.is_active():
LOG.debug("Instance %s log writer is already running.",
instance_name)
else:
vm_log_writer = ioutils.IOThread(
pipe_path, console_log_path,
self._MAX_CONSOLE_LOG_FILE_SIZE)
vm_log_writer.start()
self._vm_log_writers[instance_uuid] = vm_log_writer
log_serial_output()
def get_console_output(self, instance):
console_log_paths = (
self._pathutils.get_vm_console_log_paths(instance.name))
try:
instance_log = ''
# Start with the oldest console log file.
for console_log_path in console_log_paths[::-1]:
if os.path.exists(console_log_path):
with open(console_log_path, 'rb') as fp:
instance_log += fp.read()
return instance_log
except IOError as err:
msg = _("Could not get instance console log. Error: %s") % err
raise vmutils.HyperVException(msg, instance=instance)
def _delete_vm_console_log(self, instance):
console_log_files = self._pathutils.get_vm_console_log_paths(
instance.name)
vm_log_writer = self._vm_log_writers.get(instance.uuid)
if vm_log_writer:
vm_log_writer.join()
for log_file in console_log_files:
fileutils.delete_if_exists(log_file)
def copy_vm_console_logs(self, vm_name, dest_host):
local_log_paths = self._pathutils.get_vm_console_log_paths(
vm_name)
remote_log_paths = self._pathutils.get_vm_console_log_paths(
vm_name, remote_server=dest_host)
for local_log_path, remote_log_path in zip(local_log_paths,
remote_log_paths):
if self._pathutils.exists(local_log_path):
self._pathutils.copy(local_log_path,
remote_log_path)
def _create_vm_com_port_pipe(self, instance):
# Creates a pipe to the COM 0 serial port of the specified vm.
pipe_path = r'\\.\pipe\%s' % instance.uuid
self._vmutils.get_vm_serial_port_connection(
instance.name, update_connection=pipe_path)
def restart_vm_log_writers(self):
# Restart the VM console log writers after nova compute restarts.
active_instances = self._vmutils.get_active_instances()
for instance_name in active_instances:
instance_path = self._pathutils.get_instance_dir(instance_name)
# Skip instances that are not created by Nova
if not os.path.exists(instance_path):
continue
vm_serial_conn = self._vmutils.get_vm_serial_port_connection(
instance_name)
if vm_serial_conn:
instance_uuid = os.path.basename(vm_serial_conn)
self.log_vm_serial_output(instance_name, instance_uuid)
def copy_vm_dvd_disks(self, vm_name, dest_host):
dvd_disk_paths = self._vmutils.get_vm_dvd_disk_paths(vm_name)
dest_path = self._pathutils.get_instance_dir(
vm_name, remote_server=dest_host)
for path in dvd_disk_paths:
self._pathutils.copyfile(path, dest_path)
def _check_hotplug_available(self, instance):
"""Check whether attaching an interface is possible for the given
instance.
:returns: True if attaching / detaching interfaces is possible for the
given instance.
"""
vm_state = self._get_vm_state(instance.name)
if vm_state == constants.HYPERV_VM_STATE_DISABLED:
# can attach / detach interface to stopped VMs.
return True
if not self._hostutils.check_min_windows_version(10, 0):
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("vNIC hot plugging is supported only in newer "
"versions than Windows Hyper-V / Server 2012 R2.")
return False
if (self._vmutils.get_vm_generation(instance.name) ==
constants.VM_GEN_1):
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("Cannot hot plug vNIC to a first generation VM.",
instance=instance)
return False
return True
def attach_interface(self, instance, vif):
if not self._check_hotplug_available(instance):
raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid)
LOG.debug('Attaching vif: %s', vif['id'], instance=instance)
self._vmutils.create_nic(instance.name, vif['id'], vif['address'])
self._vif_driver.plug(instance, vif)
def detach_interface(self, instance, vif):
try:
if not self._check_hotplug_available(instance):
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
LOG.debug('Detaching vif: %s', vif['id'], instance=instance)
self._vif_driver.unplug(instance, vif)
self._vmutils.destroy_nic(instance.name, vif['id'])
except exception.NotFound:
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("Instance not found during detach interface. It "
"might have been destroyed beforehand.",
instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
| 42.891613
| 79
| 0.597906
|
4a0004a02f74917a340a2e22e272209ed795a26a
| 542
|
py
|
Python
|
setup.py
|
m-rivera/mootler
|
d723de7535a6dccc7fb59887d7e07cfba67c74f0
|
[
"MIT"
] | 1
|
2021-04-12T12:42:29.000Z
|
2021-04-12T12:42:29.000Z
|
setup.py
|
m-rivera/mootler
|
d723de7535a6dccc7fb59887d7e07cfba67c74f0
|
[
"MIT"
] | null | null | null |
setup.py
|
m-rivera/mootler
|
d723de7535a6dccc7fb59887d7e07cfba67c74f0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, Extension
setup(name='mootler',
version='0.2.1',
description='Utility to analyse Moodle data and plot it',
author='Miguel Rivera',
author_email='miguel.rivera@ucl.ac.uk',
license='MIT',
packages=['mootler',
'mootler.src',],
scripts=['mootler/scripts/moot',
'mootler/scripts/moot_enrol',
'mootler/scripts/moot_pdf_merge'],
install_requires=[
'pandas',
'plotly',
'PyPDF2'],
)
| 25.809524
| 63
| 0.583026
|
4a00067e12056a3f8222c138e49faaa768565fb6
| 5,949
|
py
|
Python
|
tests/integration/modules/test_mac_softwareupdate.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2021-08-14T13:48:38.000Z
|
2021-08-14T13:48:38.000Z
|
tests/integration/modules/test_mac_softwareupdate.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 3
|
2015-03-31T14:44:05.000Z
|
2015-06-18T19:02:24.000Z
|
tests/integration/modules/test_mac_softwareupdate.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:24.000Z
|
2020-01-02T09:03:24.000Z
|
# -*- coding: utf-8 -*-
'''
integration tests for mac_softwareupdate
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.unit import skipIf
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest, skip_if_not_root
# Import Salt libs
import salt.utils.path
import salt.utils.platform
@skip_if_not_root
@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS')
@skipIf(not salt.utils.path.which('softwareupdate'), '\'softwareupdate\' binary not found in $PATH')
class MacSoftwareUpdateModuleTest(ModuleCase):
'''
Validate the mac_softwareupdate module
'''
IGNORED_LIST = []
SCHEDULE = False
CATALOG = ''
def setUp(self):
'''
Get current settings
'''
self.IGNORED_LIST = self.run_function('softwareupdate.list_ignored')
self.SCHEDULE = self.run_function('softwareupdate.schedule')
self.CATALOG = self.run_function('softwareupdate.get_catalog')
super(MacSoftwareUpdateModuleTest, self).setUp()
def tearDown(self):
'''
Reset to original settings
'''
if self.IGNORED_LIST:
for item in self.IGNORED_LIST:
self.run_function('softwareupdate.ignore', [item])
else:
self.run_function('softwareupdate.reset_ignored')
self.run_function('softwareupdate.schedule', [self.SCHEDULE])
if self.CATALOG == 'Default':
self.run_function('softwareupdate.reset_catalog')
else:
self.run_function('softwareupdate.set_catalog', [self.CATALOG])
super(MacSoftwareUpdateModuleTest, self).tearDown()
def test_list_available(self):
'''
Test softwareupdate.list_available
'''
# Can't predict what will be returned, so can only test that the return
# is the correct type, dict
self.assertIsInstance(
self.run_function('softwareupdate.list_available'), dict)
@destructiveTest
def test_ignore(self):
'''
Test softwareupdate.ignore
Test softwareupdate.list_ignored
Test softwareupdate.reset_ignored
'''
# Test reset_ignored
self.assertTrue(self.run_function('softwareupdate.reset_ignored'))
self.assertEqual(self.run_function('softwareupdate.list_ignored'), [])
# Test ignore
self.assertTrue(
self.run_function('softwareupdate.ignore', ['spongebob']))
self.assertTrue(
self.run_function('softwareupdate.ignore', ['squidward']))
# Test list_ignored and verify ignore
self.assertIn(
'spongebob',
self.run_function('softwareupdate.list_ignored'))
self.assertIn(
'squidward',
self.run_function('softwareupdate.list_ignored'))
@destructiveTest
def test_schedule(self):
'''
Test softwareupdate.schedule_enable
Test softwareupdate.schedule_enabled
'''
# Test enable
self.assertTrue(
self.run_function('softwareupdate.schedule_enable', [True]))
self.assertTrue(self.run_function('softwareupdate.schedule_enabled'))
# Test disable in case it was already enabled
self.assertTrue(
self.run_function('softwareupdate.schedule_enable', [False]))
self.assertFalse(self.run_function('softwareupdate.schedule_enabled'))
@destructiveTest
def test_update(self):
'''
Test softwareupdate.update_all
Test softwareupdate.update
Test softwareupdate.update_available
Need to know the names of updates that are available to properly test
the update functions...
'''
# There's no way to know what the dictionary will contain, so all we can
# check is that the return is a dictionary
self.assertIsInstance(
self.run_function('softwareupdate.update_all'), dict)
# Test update_available
self.assertFalse(
self.run_function('softwareupdate.update_available', ['spongebob']))
# Test update not available
self.assertIn(
'Update not available',
self.run_function('softwareupdate.update', ['spongebob']))
def test_list_downloads(self):
'''
Test softwareupdate.list_downloads
'''
self.assertIsInstance(
self.run_function('softwareupdate.list_downloads'), list)
@destructiveTest
def test_download(self):
'''
Test softwareupdate.download
Need to know the names of updates that are available to properly test
the download function
'''
# Test update not available
self.assertIn(
'Update not available',
self.run_function('softwareupdate.download', ['spongebob']))
@destructiveTest
def test_download_all(self):
'''
Test softwareupdate.download_all
'''
self.assertIsInstance(
self.run_function('softwareupdate.download_all'), list)
@destructiveTest
def test_get_set_reset_catalog(self):
'''
Test softwareupdate.download_all
'''
# Reset the catalog
self.assertTrue(self.run_function('softwareupdate.reset_catalog'))
self.assertEqual(self.run_function('softwareupdate.get_catalog'),
'Default')
# Test setting and getting the catalog
self.assertTrue(
self.run_function('softwareupdate.set_catalog', ['spongebob']))
self.assertEqual(
self.run_function('softwareupdate.get_catalog'), 'spongebob')
# Test reset the catalog
self.assertTrue(self.run_function('softwareupdate.reset_catalog'))
self.assertEqual(self.run_function('softwareupdate.get_catalog'),
'Default')
| 32.867403
| 100
| 0.646663
|
4a00071ead632cbed3e8790794fdc02cf748efec
| 3,408
|
py
|
Python
|
setup.py
|
Lewiscowles1986/bearlib
|
30f9b8ba4b7a8db4cd2f4c6e07966ae51d0a00dd
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
Lewiscowles1986/bearlib
|
30f9b8ba4b7a8db4cd2f4c6e07966ae51d0a00dd
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
Lewiscowles1986/bearlib
|
30f9b8ba4b7a8db4cd2f4c6e07966ae51d0a00dd
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import sys
import re
import codecs
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
cwd = os.path.abspath(os.path.dirname(__file__))
class PyTest(TestCommand):
"""You can pass a single string of arguments using the
--pytest-args or -a command-line option:
python setup.py test -a "--durations=5"
is equivalent to running:
py.test --durations=5
"""
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ['--strict', '--verbose', '--tb=long', 'tests']
def finalize_options(self):
TestCommand.finalize_options(self)
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
def read(filename):
with codecs.open(os.path.join(cwd, filename), 'rb', 'utf-8') as h:
return h.read()
metadata = read(os.path.join(cwd, 'bearlib', '__init__.py'))
def extract_metaitem(meta):
# swiped from https://hynek.me 's attr package
meta_match = re.search(r"""^__{meta}__\s+=\s+['\"]([^'\"]*)['\"]""".format(meta=meta),
metadata, re.MULTILINE)
if meta_match:
return meta_match.group(1)
raise RuntimeError('Unable to find __{meta}__ string.'.format(meta=meta))
if __name__ == '__main__':
setup(name='bearlib',
version=extract_metaitem('version'),
license=extract_metaitem('license'),
description=extract_metaitem('description'),
long_description=read('README.rst'),
author=extract_metaitem('author'),
author_email=extract_metaitem('email'),
maintainer=extract_metaitem('author'),
maintainer_email=extract_metaitem('email'),
url=extract_metaitem('url'),
download_url=extract_metaitem('download_url'),
packages=find_packages(exclude=('tests', 'docs')),
platforms=['Any'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
classifiers=[ 'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
# 'Programming Language :: Python :: Implementation :: CPython',
# 'Programming Language :: Python :: Implementation :: PyPy',
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
]
)
| 39.627907
| 90
| 0.598592
|
4a0007378b8ac7a4e0e0659d192f8c0eaf09cd62
| 231
|
py
|
Python
|
saleor/core/utils/filters.py
|
EitanRosenzvaig/mio
|
5e11b29ccb1724cae6b49ceef7350dcbc986083c
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/core/utils/filters.py
|
EitanRosenzvaig/mio
|
5e11b29ccb1724cae6b49ceef7350dcbc986083c
|
[
"BSD-3-Clause"
] | 1
|
2018-07-30T16:59:12.000Z
|
2018-07-30T16:59:12.000Z
|
saleor/core/utils/filters.py
|
EitanRosenzvaig/esmio
|
5e11b29ccb1724cae6b49ceef7350dcbc986083c
|
[
"BSD-3-Clause"
] | null | null | null |
def get_now_sorted_by(filter_set, default_sort='price'):
sort_by = filter_set.form.cleaned_data.get('sort_by')
if sort_by:
sort_by = sort_by[0].strip('-')
else:
sort_by = default_sort
return sort_by
| 28.875
| 57
| 0.670996
|
4a00074d81341fb97e1759d35e83ccd46ed2e2f0
| 4,661
|
py
|
Python
|
app.py
|
madut97/challenge2
|
5735ede255b7584bbe450d46ec6d990f4d0f2462
|
[
"MIT"
] | null | null | null |
app.py
|
madut97/challenge2
|
5735ede255b7584bbe450d46ec6d990f4d0f2462
|
[
"MIT"
] | null | null | null |
app.py
|
madut97/challenge2
|
5735ede255b7584bbe450d46ec6d990f4d0f2462
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Loan Qualifier Application.
This is a command line application to match applicants with qualifying loans.
Example:
$ python app.py
"""
from re import L
import sys
import fire
import questionary
from pathlib import Path
from qualifier.utils.fileio import load_csv
from qualifier.utils.calculators import (
calculate_monthly_debt_ratio,
calculate_loan_to_value_ratio,
)
from qualifier.filters.max_loan_size import filter_max_loan_size
from qualifier.filters.credit_score import filter_credit_score
from qualifier.filters.debt_to_income import filter_debt_to_income
from qualifier.filters.loan_to_value import filter_loan_to_value
from qualifier.utils.fileio import save_csv
def load_bank_data():
"""Ask for the file path to the latest banking data and load the CSV file.
Returns:
The bank data from the data rate sheet CSV file.
"""
csvpath = questionary.text("Enter a file path to a rate-sheet (.csv):").ask()
csvpath = Path(csvpath)
if not csvpath.exists():
sys.exit(f"Oops! Can't find this path: {csvpath}")
return load_csv(csvpath)
def get_applicant_info():
"""Prompt dialog to get the applicant's financial information.
Returns:
Returns the applicant's financial information.
"""
credit_score = questionary.text("What's your credit score?").ask()
debt = questionary.text("What's your current amount of monthly debt?").ask()
income = questionary.text("What's your total monthly income?").ask()
loan_amount = questionary.text("What's your desired loan amount?").ask()
home_value = questionary.text("What's your home value?").ask()
credit_score = int(credit_score)
debt = float(debt)
income = float(income)
loan_amount = float(loan_amount)
home_value = float(home_value)
return credit_score, debt, income, loan_amount, home_value
def find_qualifying_loans(bank_data, credit_score, debt, income, loan, home_value):
"""Determine which loans the user qualifies for.
Loan qualification criteria is based on:
- Credit Score
- Loan Size
- Debit to Income ratio (calculated)
- Loan to Value ratio (calculated)
Args:
bank_data (list): A list of bank data.
credit_score (int): The applicant's current credit score.
debt (float): The applicant's total monthly debt payments.
income (float): The applicant's total monthly income.
loan (float): The total loan amount applied for.
home_value (float): The estimated home value.
Returns:
A list of the banks willing to underwrite the loan.
"""
# Calculate the monthly debt ratio
monthly_debt_ratio = calculate_monthly_debt_ratio(debt, income)
print(f"The monthly debt to income ratio is {monthly_debt_ratio:.02f}")
# Calculate loan to value ratio
loan_to_value_ratio = calculate_loan_to_value_ratio(loan, home_value)
print(f"The loan to value ratio is {loan_to_value_ratio:.02f}.")
# Run qualification filters
bank_data_filtered = filter_max_loan_size(loan, bank_data)
bank_data_filtered = filter_credit_score(credit_score, bank_data_filtered)
bank_data_filtered = filter_debt_to_income(monthly_debt_ratio, bank_data_filtered)
bank_data_filtered = filter_loan_to_value(loan_to_value_ratio, bank_data_filtered)
print(f"Found {len(bank_data_filtered)} qualifying loans")
return bank_data_filtered
def save_qualifying_loans(qualifying_loans):
"""Saves the qualifying loans to a CSV file.
Args:
qualifying_loans (list of lists): The qualifying bank loans.
"""
# @TODO: Complete the usability dialog for saving the CSV Files.
if len(qualifying_loans) == 0:
sys.exit(f"Oops! Unfortunately there are no qualifying loans for you")
else:
user_answer = questionary.confirm("Would you like to save qualifying loans in a csv file?").ask()
if user_answer:
save_csv(qualifying_loans)
def run():
"""The main function for running the script."""
# Load the latest Bank data
bank_data = load_bank_data()
# Get the applicant's information
credit_score, debt, income, loan_amount, home_value = get_applicant_info()
# Find qualifying loans
qualifying_loans = find_qualifying_loans(
bank_data, credit_score, debt, income, loan_amount, home_value
)
# Save qualifying loans
save_qualifying_loans(qualifying_loans)
if __name__ == "__main__":
fire.Fire(run)
| 33.775362
| 106
| 0.700279
|
4a00075f5c98631242e4675311acb10097d06976
| 478
|
py
|
Python
|
tests/test_perf_daemon_caller.py
|
RedisLabsModules/redisbench-admin
|
d8573d3414e5e846411d2875555deca5c8a7d0f1
|
[
"Apache-2.0"
] | 5
|
2021-08-03T12:38:30.000Z
|
2022-03-23T07:35:38.000Z
|
tests/test_perf_daemon_caller.py
|
RedisLabsModules/redisbench-admin
|
d8573d3414e5e846411d2875555deca5c8a7d0f1
|
[
"Apache-2.0"
] | 91
|
2021-03-17T12:05:35.000Z
|
2022-03-29T22:20:00.000Z
|
tests/test_perf_daemon_caller.py
|
RedisLabsModules/redisbench-admin
|
d8573d3414e5e846411d2875555deca5c8a7d0f1
|
[
"Apache-2.0"
] | 2
|
2021-06-07T14:06:58.000Z
|
2021-07-27T10:37:15.000Z
|
# BSD 3-Clause License
#
# Copyright (c) 2021., Redis Labs Modules
# All rights reserved.
#
from time import sleep
from redisbench_admin.profilers.perf_daemon_caller import PerfDaemonRemoteCaller
def test_perf_daemon_remote_caller():
# remote_perf = PerfDaemonRemoteCaller("localhost:5000")
# result = remote_perf.start_profile(19899)
# assert result == True
# sleep(2)
# result = remote_perf.stop_profile()
# assert result == True
assert True
| 25.157895
| 80
| 0.730126
|
4a0007d8088d6bb3d8c0899b3492750389b1c6fc
| 1,422
|
py
|
Python
|
misc/compile_shader.py
|
xzfn/toy
|
5d4f6e631c662634a059a4a178174032b01cc81a
|
[
"MIT"
] | null | null | null |
misc/compile_shader.py
|
xzfn/toy
|
5d4f6e631c662634a059a4a178174032b01cc81a
|
[
"MIT"
] | null | null | null |
misc/compile_shader.py
|
xzfn/toy
|
5d4f6e631c662634a059a4a178174032b01cc81a
|
[
"MIT"
] | null | null | null |
import os
import glob
import traceback
import sys
sys.path.insert(0, '../script')
import shadercompiler
GLSLC_EXECUTABLE = 'glslc'
INPUT_FOLDER = '../shader'
OUTPUT_FOLDER = '../../toy_generated/shader'
def glob_endswith(folder, end):
return glob.glob(os.path.join(folder, '*' + end))
def compile_folder(folder, output_folder):
os.makedirs(output_folder, exist_ok=True)
vertex_files = glob_endswith(folder, '.vert.glsl')
fragment_files = glob_endswith(folder, '.frag.glsl')
shader_files = vertex_files + fragment_files
output_spvnames = []
for shader_file in shader_files:
basename = shader_file[len(folder) + 1:]
spvname = basename[:-5] + '.spv'
output_filename = os.path.join(output_folder, spvname)
if shadercompiler.is_shader_outdated(shader_file, output_filename):
shadercompiler.compile_glsl(shader_file, output_filename)
output_spvnames.append(spvname)
return output_spvnames
def remove_lost_shader_spv(folder, keeps):
spvfiles = glob_endswith(folder, '.spv')
spvnames = set([os.path.basename(spvfile) for spvfile in spvfiles])
keepnames = set(keeps)
to_remove_names = spvnames - keepnames
for to_remove_name in to_remove_names:
path = os.path.join(folder, to_remove_name)
print('remove lost shader spv', path)
os.remove(path)
if __name__ == '__main__':
output_spvnames = compile_folder(INPUT_FOLDER, OUTPUT_FOLDER)
remove_lost_shader_spv(OUTPUT_FOLDER, output_spvnames)
| 29.625
| 69
| 0.769339
|
4a00086b287261d72302f0b9b63a6fa52eaa3281
| 1,750
|
py
|
Python
|
config/urls.py
|
ADpDinamo/site
|
d7313cd6c151a381ccc803b81768673587cb8d45
|
[
"Apache-2.0"
] | null | null | null |
config/urls.py
|
ADpDinamo/site
|
d7313cd6c151a381ccc803b81768673587cb8d45
|
[
"Apache-2.0"
] | 8
|
2021-03-19T10:14:39.000Z
|
2022-03-12T00:24:41.000Z
|
config/urls.py
|
ADpDinamo/site
|
d7313cd6c151a381ccc803b81768673587cb8d45
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from adpd.users.views import HomePageView, CustomSignupView
# from adpd.cookies.urls
urlpatterns = [
path("", HomePageView.as_view(), name="home"),
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("utilizator/", include("adpd.users.urls", namespace="users")),
path("cont/", include("allauth.urls")),
path("inregistrare/", CustomSignupView.as_view(), name='reg'),
# Legal stuff
path('legale/', include('cookies.urls')),
# Frontend Admin
# path('dashboard', )
# Plati
path('plata/', include('payment.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 31.25
| 85
| 0.648
|
4a00096190c5fb40e25d5c53f1acb574343cba9b
| 143,311
|
py
|
Python
|
zerver/tests/test_signup.py
|
GrugLife/zulip
|
853a1938f8607bd48cb6d298cdb5026d7a6602ff
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_signup.py
|
GrugLife/zulip
|
853a1938f8607bd48cb6d298cdb5026d7a6602ff
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_signup.py
|
GrugLife/zulip
|
853a1938f8607bd48cb6d298cdb5026d7a6602ff
|
[
"Apache-2.0"
] | 1
|
2019-01-31T16:20:47.000Z
|
2019-01-31T16:20:47.000Z
|
# -*- coding: utf-8 -*-
import datetime
from email.utils import parseaddr
import re
import django_otp
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.http import HttpResponse, HttpRequest
from django.test import TestCase, override_settings
from django.utils.timezone import now as timezone_now
from django.core.exceptions import ValidationError
from two_factor.utils import default_device
from mock import patch, MagicMock
from zerver.lib.test_helpers import MockLDAP, get_test_image_file, avatar_disk_path
from confirmation.models import Confirmation, create_confirmation_link, MultiuseInvite, \
generate_key, confirmation_url, get_object_from_key, ConfirmationKeyException, \
one_click_unsubscribe_link
from confirmation import settings as confirmation_settings
from zerver.forms import HomepageForm, WRONG_SUBDOMAIN_ERROR, check_subdomain_available
from zerver.lib.actions import do_change_password
from zerver.lib.exceptions import CannotDeactivateLastUserError
from zerver.decorator import do_two_factor_login
from zerver.views.auth import login_or_register_remote_user, \
redirect_and_log_into_subdomain, start_two_factor_auth
from zerver.views.invite import get_invitee_emails_set
from zerver.views.registration import confirmation_key, \
send_confirm_registration_email
from zerver.models import (
get_realm, get_user, get_stream_recipient,
PreregistrationUser, Realm, RealmDomain, Recipient, Message,
ScheduledEmail, UserProfile, UserMessage,
Stream, Subscription, flush_per_request_caches
)
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin,
get_stream,
do_create_realm,
do_create_default_stream_group,
do_add_default_stream,
)
from zerver.lib.send_email import send_email, send_future_email, FromAddress
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import (
do_deactivate_realm,
do_deactivate_user,
do_set_realm_property,
add_new_user_history,
)
from zerver.lib.avatar import avatar_url
from zerver.lib.mobile_auth_otp import xor_hex_strings, ascii_to_hex, \
otp_encrypt_api_key, is_valid_otp, hex_to_ascii, otp_decrypt_api_key
from zerver.lib.notifications import enqueue_welcome_emails, \
followup_day2_email_delay
from zerver.lib.subdomains import is_root_domain_available
from zerver.lib.test_helpers import find_key_by_email, queries_captured, \
HostRequestMock, load_subdomain_token
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.sessions import get_session_dict_user
from zerver.lib.name_restrictions import is_disposable_domain
from zerver.context_processors import common_context
from collections import defaultdict
import re
import smtplib
import ujson
from typing import Any, Dict, List, Optional, Set
import urllib
import os
import pytz
class RedirectAndLogIntoSubdomainTestCase(ZulipTestCase):
def test_cookie_data(self) -> None:
realm = Realm.objects.all().first()
name = 'Hamlet'
email = self.example_email("hamlet")
response = redirect_and_log_into_subdomain(realm, name, email)
data = load_subdomain_token(response)
self.assertDictEqual(data, {'name': name, 'next': '',
'email': email,
'subdomain': realm.subdomain,
'is_signup': False})
response = redirect_and_log_into_subdomain(realm, name, email,
is_signup=True)
data = load_subdomain_token(response)
self.assertDictEqual(data, {'name': name, 'next': '',
'email': email,
'subdomain': realm.subdomain,
'is_signup': True})
class DeactivationNoticeTestCase(ZulipTestCase):
def test_redirection_for_deactivated_realm(self) -> None:
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 302)
self.assertIn('deactivated', result.url)
def test_redirection_for_active_realm(self) -> None:
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
def test_deactivation_notice_when_realm_is_active(self) -> None:
result = self.client_get('/accounts/deactivated/')
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
def test_deactivation_notice_when_deactivated(self) -> None:
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_get('/accounts/deactivated/')
self.assertIn("Zulip Dev, has been deactivated.", result.content.decode())
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self) -> None:
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[str, Dict[str, Any]]
realm = get_realm('zulip')
set_default_streams(realm, stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register(self.nonreg_email('test'), "test")
user_profile = self.nonreg_user('test')
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_stream_message(self.example_email('hamlet'), streams[0].name, "test")
add_new_user_history(user_profile, streams)
class InitialPasswordTest(ZulipTestCase):
def test_none_initial_password_salt(self) -> None:
with self.settings(INITIAL_PASSWORD_SALT=None):
self.assertIsNone(initial_password('test@test.com'))
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self) -> None:
email = self.example_email("hamlet")
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
from_email = outbox[0].from_email
self.assertIn("Zulip Account Security", from_email)
tokenized_no_reply_email = parseaddr(from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn("Psst. Word on the street is that you", outbox[0].body)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(
email, url_pattern=settings.EXTERNAL_HOST + r"(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = self.example_user('hamlet')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_password_reset_for_non_existent_user(self) -> None:
email = 'nonexisting@mars.com'
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
from_email = outbox[0].from_email
self.assertIn("Zulip Account Security", from_email)
tokenized_no_reply_email = parseaddr(from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Someone (possibly you) requested a password',
outbox[0].body)
self.assertNotIn('does have an active account in the zulip.testserver',
outbox[0].body)
def test_password_reset_for_deactivated_user(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.email
do_deactivate_user(user_profile)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
from_email = outbox[0].from_email
self.assertIn("Zulip Account Security", from_email)
tokenized_no_reply_email = parseaddr(from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Someone (possibly you) requested a password',
outbox[0].body)
self.assertNotIn('does have an active account in the zulip.testserver',
outbox[0].body)
self.assertIn('but your account has been deactivated',
outbox[0].body)
def test_password_reset_with_deactivated_realm(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.email
do_deactivate_realm(user_profile.realm)
# start the password reset process by supplying an email address
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once()
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_wrong_subdomain(self) -> None:
email = self.example_email("hamlet")
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email},
subdomain="zephyr")
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
message = outbox.pop()
tokenized_no_reply_email = parseaddr(message.from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Someone (possibly you) requested a password reset email for',
message.body)
self.assertIn("but you do not have an account in that organization",
message.body)
self.assertIn("You do have active accounts in the following organization(s).\nhttp://zulip.testserver",
message.body)
def test_invalid_subdomain(self) -> None:
email = self.example_email("hamlet")
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email},
subdomain="invalid")
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["There is no Zulip organization hosted at this subdomain."],
result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auth_only(self) -> None:
"""If the email auth backend is not enabled, password reset should do nothing"""
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once()
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_and_email_auth(self) -> None:
"""If both email and ldap auth backends are enabled, limit password
reset to users outside the LDAP domain"""
# If the domain matches, we don't generate an email
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once_with("Password reset not allowed for user in LDAP domain")
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
# If the domain doesn't match, we do generate an email
with self.settings(LDAP_APPEND_DOMAIN="example.com"):
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assertEqual(len(outbox), 1)
message = outbox.pop()
tokenized_no_reply_email = parseaddr(message.from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Psst. Word on the street is that you need a new password',
message.body)
def test_redirect_endpoints(self) -> None:
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["/accounts/home/"], result)
result = self.client_get('/accounts/new/send_confirm/alice@example.com')
self.assert_in_success_response(["/new/"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self) -> None:
self.login(self.example_email("hamlet"))
user_profile = self.example_user('hamlet')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_deactivated_user(self) -> None:
user_profile = self.example_user('hamlet')
do_deactivate_user(user_profile)
result = self.login_with_return(self.example_email("hamlet"), "xxx")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Your account is no longer active.", result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_bad_password(self) -> None:
email = self.example_email("hamlet")
result = self.login_with_return(email, password="wrongpassword")
self.assert_in_success_response([email], result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self) -> None:
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please enter a correct email and password", result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_wrong_subdomain(self) -> None:
with patch("logging.warning") as mock_warning:
result = self.login_with_return(self.mit_email("sipbtest"), "xxx")
mock_warning.assert_called_once()
self.assertEqual(result.status_code, 200)
self.assert_in_response("Your Zulip account is not a member of the "
"organization associated with this subdomain.", result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_invalid_subdomain(self) -> None:
result = self.login_with_return(self.example_email("hamlet"), "xxx",
subdomain="invalid")
self.assertEqual(result.status_code, 200)
self.assert_in_response("There is no Zulip organization hosted at this subdomain.", result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_register(self) -> None:
realm = get_realm("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[str, Dict[str, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
# Clear all the caches.
flush_per_request_caches()
ContentType.objects.clear_cache()
Site.objects.clear_cache()
with queries_captured() as queries:
self.register(self.nonreg_email('test'), "test")
# Ensure the number of queries we make is not O(streams)
self.assert_length(queries, 79)
user_profile = self.nonreg_user('test')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self) -> None:
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_post('/accounts/home/', {'email': self.nonreg_email('test')},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_register_deactivated_partway_through(self) -> None:
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
email = self.nonreg_email('test')
result = self.client_post('/accounts/home/', {'email': email},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertNotIn('deactivated', result.url)
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.submit_reg_form_for_user(email, "abcd1234", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_login_deactivated_realm(self) -> None:
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return(self.example_email("hamlet"), subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
def test_logout(self) -> None:
self.login(self.example_email("hamlet"))
# We use the logout API, not self.logout, to make sure we test
# the actual logout code path.
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self) -> None:
"""
You can log in even if your password contain non-ASCII characters.
"""
email = self.nonreg_email('test')
password = u"hümbüǵ"
# Registering succeeds.
self.register(email, password)
user_profile = self.nonreg_user('test')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.logout()
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.logout()
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
@override_settings(TWO_FACTOR_AUTHENTICATION_ENABLED=False)
def test_login_page_redirects_logged_in_user(self) -> None:
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
self.login(self.example_email("cordelia"))
response = self.client_get("/login/")
self.assertEqual(response["Location"], "http://zulip.testserver")
def test_options_request_to_login_page(self) -> None:
response = self.client_options('/login/')
self.assertEqual(response.status_code, 200)
@override_settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True)
def test_login_page_redirects_logged_in_user_under_2fa(self) -> None:
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
user_profile = self.example_user("cordelia")
self.create_default_device(user_profile)
self.login(self.example_email("cordelia"))
self.login_2fa(user_profile)
response = self.client_get("/login/")
self.assertEqual(response["Location"], "http://zulip.testserver")
def test_start_two_factor_auth(self) -> None:
request = MagicMock(POST=dict())
with patch('zerver.views.auth.TwoFactorLoginView') as mock_view:
mock_view.as_view.return_value = lambda *a, **k: HttpResponse()
response = start_two_factor_auth(request)
self.assertTrue(isinstance(response, HttpResponse))
def test_do_two_factor_login(self) -> None:
user_profile = self.example_user('hamlet')
self.create_default_device(user_profile)
request = MagicMock()
with patch('zerver.decorator.django_otp.login') as mock_login:
do_two_factor_login(request, user_profile)
mock_login.assert_called_once()
class InviteUserBase(ZulipTestCase):
def check_sent_emails(self, correct_recipients: List[str],
custom_from_name: Optional[str]=None) -> None:
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
if len(outbox) == 0:
return
if custom_from_name is not None:
self.assertIn(custom_from_name, outbox[0].from_email)
tokenized_no_reply_email = parseaddr(outbox[0].from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
def invite(self, users: str, streams: List[str], body: str='',
invite_as_admin: str="false") -> HttpResponse:
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invites",
{"invitee_emails": users,
"stream": streams,
"invite_as_admin": invite_as_admin})
class InviteUserTest(InviteUserBase):
def test_successful_invite_user(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee], custom_from_name="Hamlet")
def test_newbie_restrictions(self) -> None:
user_profile = self.example_user('hamlet')
invitee = "alice-test@zulip.com"
stream_name = 'Denmark'
self.login(user_profile.email)
result = self.invite(invitee, [stream_name])
self.assert_json_success(result)
user_profile.date_joined = timezone_now() - datetime.timedelta(days=10)
user_profile.save()
with self.settings(INVITES_MIN_USER_AGE_DAYS=5):
result = self.invite(invitee, [stream_name])
self.assert_json_success(result)
with self.settings(INVITES_MIN_USER_AGE_DAYS=15):
result = self.invite(invitee, [stream_name])
self.assert_json_error_contains(result, "Your account is too new")
def test_invite_limits(self) -> None:
user_profile = self.example_user('hamlet')
realm = user_profile.realm
stream_name = 'Denmark'
# These constants only need to be in descending order
# for this test to trigger an InvitationError based
# on max daily counts.
site_max = 50
realm_max = 40
num_invitees = 30
max_daily_count = 20
daily_counts = [(1, max_daily_count)]
invite_emails = [
'foo-%02d@zulip.com' % (i,)
for i in range(num_invitees)
]
invitees = ','.join(invite_emails)
self.login(user_profile.email)
realm.max_invites = realm_max
realm.date_created = timezone_now()
realm.save()
def try_invite() -> HttpResponse:
with self.settings(OPEN_REALM_CREATION=True,
INVITES_DEFAULT_REALM_DAILY_MAX=site_max,
INVITES_NEW_REALM_LIMIT_DAYS=daily_counts):
result = self.invite(invitees, [stream_name])
return result
result = try_invite()
self.assert_json_error_contains(result, 'enough remaining invites')
# Next show that aggregate limits expire once the realm is old
# enough.
realm.date_created = timezone_now() - datetime.timedelta(days=8)
realm.save()
result = try_invite()
self.assert_json_success(result)
# Next get line coverage on bumping a realm's max_invites.
realm.date_created = timezone_now()
realm.max_invites = site_max + 10
realm.save()
result = try_invite()
self.assert_json_success(result)
# Finally get coverage on the case that OPEN_REALM_CREATION is False.
with self.settings(OPEN_REALM_CREATION=False):
result = self.invite(invitees, [stream_name])
self.assert_json_success(result)
def test_successful_invite_user_as_admin_from_admin_account(self) -> None:
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login(self.example_email('iago'))
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, ["Denmark"], invite_as_admin="true"))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertTrue(invitee_profile.is_realm_admin)
def test_invite_user_as_admin_from_normal_account(self) -> None:
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login(self.example_email('hamlet'))
invitee = self.nonreg_email('alice')
response = self.invite(invitee, ["Denmark"], invite_as_admin="true")
self.assert_json_error(response, "Must be an organization administrator")
def test_successful_invite_user_with_name(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email], custom_from_name="Hamlet")
def test_successful_invite_user_with_name_and_normal_one(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2], custom_from_name="Hamlet")
def test_require_realm_admin(self) -> None:
"""
The invite_by_admins_only realm setting works properly.
"""
realm = get_realm('zulip')
realm.invite_by_admins_only = True
realm.save()
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_error(self.invite(invitee, ["Denmark"]),
"Must be an organization administrator")
# Now verify an administrator can do it
self.login("iago@zulip.com")
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_successful_invite_user_with_notifications_stream(self) -> None:
"""
A call to /json/invites with valid parameters unconditionally
subscribes the invitee to the notifications stream if it exists and is
public.
"""
realm = get_realm('zulip')
notifications_stream = get_stream('Verona', realm)
realm.notifications_stream_id = notifications_stream.id
realm.save()
self.login(self.example_email("hamlet"))
invitee = 'alice-test@zulip.com'
self.assert_json_success(self.invite(invitee, ['Denmark']))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
prereg_user = PreregistrationUser.objects.get(email=invitee)
stream_ids = [stream.id for stream in prereg_user.streams.all()]
self.assertTrue(notifications_stream.id in stream_ids)
def test_invite_user_signup_initial_history(self) -> None:
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login(self.example_email('hamlet'))
user_profile = self.example_user('hamlet')
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe(user_profile, private_stream_name)
public_msg_id = self.send_stream_message(
self.example_email("hamlet"),
"Denmark",
topic_name="Public topic",
content="Public message",
)
secret_msg_id = self.send_stream_message(
self.example_email("hamlet"),
private_stream_name,
topic_name="Secret topic",
content="Secret message",
)
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
self.assertFalse(invitee_profile.is_realm_admin)
# Test that exactly 2 new Zulip messages were sent, both notifications.
last_3_messages = list(reversed(list(Message.objects.all().order_by("-id")[0:3])))
first_msg = last_3_messages[0]
self.assertEqual(first_msg.id, secret_msg_id)
# The first, from notification-bot to the user who invited the new user.
second_msg = last_3_messages[1]
self.assertEqual(second_msg.sender.email, "notification-bot@zulip.com")
self.assertTrue(second_msg.content.startswith("alice_zulip.com <`alice@zulip.com`> accepted your"))
# The second, from welcome-bot to the user who was invited.
third_msg = last_3_messages[2]
self.assertEqual(third_msg.sender.email, "welcome-bot@zulip.com")
self.assertTrue(third_msg.content.startswith("Hello, and welcome to Zulip!"))
def test_multi_user_invite(self) -> None:
"""
Invites multiple users with a variety of delimiters.
"""
self.login(self.example_email("hamlet"))
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_max_invites_model(self) -> None:
realm = get_realm("zulip")
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
realm.max_invites = 3
realm.save()
self.assertEqual(get_realm("zulip").max_invites, 3)
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.save()
def test_invite_too_many_users(self) -> None:
# Only a light test of this pathway; e.g. doesn't test that
# the limit gets reset after 24 hours
self.login(self.example_email("iago"))
self.client_post("/json/invites",
{"invitee_emails": "1@zulip.com, 2@zulip.com",
"stream": ["Denmark"]}),
self.assert_json_error(
self.client_post("/json/invites",
{"invitee_emails": ", ".join(
[str(i) for i in range(get_realm("zulip").max_invites - 1)]),
"stream": ["Denmark"]}),
"You do not have enough remaining invites. "
"Please contact zulip-admin@example.com to have your limit raised. "
"No invitations were sent.")
def test_missing_or_invalid_params(self) -> None:
"""
Tests inviting with various missing or invalid parameters.
"""
self.login(self.example_email("hamlet"))
self.assert_json_error(
self.client_post("/json/invites",
{"invitee_emails": "foo@zulip.com"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
self.assert_json_error(
self.invite("", ["Denmark"]),
"You must specify at least one email address.")
self.check_sent_emails([])
def test_guest_user_invitation(self) -> None:
"""
Guest user can't invite new users
"""
self.login(self.example_email("polonius"))
invitee = "alice-test@zulip.com"
self.assert_json_error(self.invite(invitee, ["Denmark"]), "Not allowed for guest users")
self.assertEqual(find_key_by_email(invitee), None)
self.check_sent_emails([])
def test_invalid_stream(self) -> None:
"""
Tests inviting to a non-existent stream.
"""
self.login(self.example_email("hamlet"))
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self) -> None:
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login(self.example_email("hamlet"))
self.assert_json_error(
self.client_post("/json/invites",
{"invitee_emails": self.example_email("hamlet"),
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=self.example_email("hamlet")))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self) -> None:
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login(self.example_email("hamlet"))
existing = [self.example_email("hamlet"), u"othello@zulip.com"]
new = [u"foo-test@zulip.com", u"bar-test@zulip.com"]
result = self.client_post("/json/invites",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = PreregistrationUser.objects.get(email='foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self) -> None:
"""
In a realm with `emails_restricted_to_domains = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_using_disposable_email(self) -> None:
"""
In a realm with `disallow_disposable_email_addresses = True`, you can't invite
people with a disposable domain.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.disallow_disposable_email_addresses = True
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@mailnator.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self) -> None:
"""
In a realm with `emails_restricted_to_domains = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_outside_domain_before_closing(self) -> None:
"""
If you invite someone with a different domain from that of the realm
when `emails_restricted_to_domains = False`, but `emails_restricted_to_domains` later
changes to true, the invitation should succeed but the invitee's signup
attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@example.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("only allows users with email addresses", result)
def test_disposable_emails_before_closing(self) -> None:
"""
If you invite someone with a disposable email when
`disallow_disposable_email_addresses = False`, but
later changes to true, the invitation should succeed
but the invitee's signup attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.disallow_disposable_email_addresses = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@mailnator.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.disallow_disposable_email_addresses = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@mailnator.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please sign up using a real email address.", result)
def test_invite_with_email_containing_plus_before_closing(self) -> None:
"""
If you invite someone with an email containing plus when
`emails_restricted_to_domains = False`, but later change
`emails_restricted_to_domains = True`, the invitation should
succeed but the invitee's signup attempt should fail as
users are not allowed to signup using email containing +
when the realm is restricted to domain.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo+label@zulip.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
result = self.submit_reg_form_for_user(external_address, "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Zulip Dev, does not allow signups using emails\n that contains +", result)
def test_invalid_email_check_after_confirming_email(self) -> None:
self.login(self.example_email("hamlet"))
email = "test@zulip.com"
self.assert_json_success(self.invite(email, ["Denmark"]))
obj = Confirmation.objects.get(confirmation_key=find_key_by_email(email))
prereg_user = obj.content_object
prereg_user.email = "invalid.email"
prereg_user.save()
result = self.submit_reg_form_for_user(email, "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("The email address you are trying to sign up with is not valid", result)
def test_invite_with_non_ascii_streams(self) -> None:
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login(self.example_email("hamlet"))
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe(self.example_user("hamlet"), stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_invitation_reminder_email(self) -> None:
from django.core.mail import outbox
# All users belong to zulip realm
referrer_user = 'hamlet'
current_user_email = self.example_email(referrer_user)
self.login(current_user_email)
invitee_email = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee_email, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee_email))
self.check_sent_emails([invitee_email])
data = {"email": invitee_email, "referrer_email": current_user_email}
invitee = PreregistrationUser.objects.get(email=data["email"])
referrer = self.example_user(referrer_user)
link = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer_name': referrer.full_name,
'referrer_email': referrer.email,
'referrer_realm_name': referrer.realm.name,
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_future_email(
"zerver/emails/invitation_reminder", referrer.realm, to_email=data["email"],
from_address=FromAddress.NOREPLY, context=context)
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
send_email(**ujson.loads(job.data))
self.assertEqual(len(outbox), email_count + 1)
self.assertIn(FromAddress.NOREPLY, outbox[-1].from_email)
# Now verify that signing up clears invite_reminder emails
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 1)
self.register(invitee_email, "test")
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 0)
# make sure users can't take a valid confirmation key from another
# pathway and use it with the invitation url route
def test_confirmation_key_of_wrong_type(self) -> None:
user = self.example_user('hamlet')
url = create_confirmation_link(user, 'host', Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
# Mainly a test of get_object_from_key, rather than of the invitation pathway
with self.assertRaises(ConfirmationKeyException) as cm:
get_object_from_key(registration_key, Confirmation.INVITATION)
self.assertEqual(cm.exception.error_type, ConfirmationKeyException.DOES_NOT_EXIST)
# Verify that using the wrong type doesn't work in the main confirm code path
email_change_url = create_confirmation_link(user, 'host', Confirmation.EMAIL_CHANGE)
email_change_key = email_change_url.split('/')[-1]
url = '/accounts/do_confirm/' + email_change_key
result = self.client_get(url)
self.assert_in_success_response(["Whoops. We couldn't find your "
"confirmation link in the system."], result)
def test_confirmation_expired(self) -> None:
user = self.example_user('hamlet')
url = create_confirmation_link(user, 'host', Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
conf = Confirmation.objects.filter(confirmation_key=registration_key).first()
conf.date_sent -= datetime.timedelta(weeks=3)
conf.save()
target_url = '/' + url.split('/', 3)[3]
result = self.client_get(target_url)
self.assert_in_success_response(["Whoops. The confirmation link has expired "
"or been deactivated."], result)
class InvitationsTestCase(InviteUserBase):
def test_successful_get_open_invitations(self) -> None:
"""
A GET call to /json/invites returns all unexpired invitations.
"""
days_to_activate = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', "Wrong")
active_value = getattr(confirmation_settings, 'STATUS_ACTIVE', "Wrong")
self.assertNotEqual(days_to_activate, "Wrong")
self.assertNotEqual(active_value, "Wrong")
self.login(self.example_email("iago"))
user_profile = self.example_user("iago")
prereg_user_one = PreregistrationUser(email="TestOne@zulip.com", referred_by=user_profile)
prereg_user_one.save()
expired_datetime = timezone_now() - datetime.timedelta(days=(days_to_activate+1))
prereg_user_two = PreregistrationUser(email="TestTwo@zulip.com", referred_by=user_profile)
prereg_user_two.save()
PreregistrationUser.objects.filter(id=prereg_user_two.id).update(invited_at=expired_datetime)
prereg_user_three = PreregistrationUser(email="TestThree@zulip.com",
referred_by=user_profile, status=active_value)
prereg_user_three.save()
result = self.client_get("/json/invites")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["TestOne@zulip.com"], result)
self.assert_not_in_success_response(["TestTwo@zulip.com", "TestThree@zulip.com"], result)
def test_successful_delete_invitation(self) -> None:
"""
A DELETE call to /json/invites/<ID> should delete the invite and
any scheduled invitation reminder emails.
"""
self.login(self.example_email("iago"))
invitee = "DeleteMe@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify that the scheduled email exists.
ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER)
result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assertEqual(result.status_code, 200)
error_result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assert_json_error(error_result, "No such invitation")
self.assertRaises(ScheduledEmail.DoesNotExist,
lambda: ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER))
def test_successful_resend_invitation(self) -> None:
"""
A POST call to /json/invites/<ID>/resend should send an invitation reminder email
and delete any scheduled invitation reminder email.
"""
self.login(self.example_email("iago"))
invitee = "resend_me@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify and then clear from the outbox the original invite email
self.check_sent_emails([invitee], custom_from_name="Zulip")
from django.core.mail import outbox
outbox.pop()
# Verify that the scheduled email exists.
scheduledemail_filter = ScheduledEmail.objects.filter(
address=invitee, type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(scheduledemail_filter.count(), 1)
original_timestamp = scheduledemail_filter.values_list('scheduled_timestamp', flat=True)
# Resend invite
result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assertEqual(ScheduledEmail.objects.filter(
address=invitee, type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check that we have exactly one scheduled email, and that it is different
self.assertEqual(scheduledemail_filter.count(), 1)
self.assertNotEqual(original_timestamp,
scheduledemail_filter.values_list('scheduled_timestamp', flat=True))
self.assertEqual(result.status_code, 200)
error_result = self.client_post('/json/invites/' + str(9999) + '/resend')
self.assert_json_error(error_result, "No such invitation")
self.check_sent_emails([invitee], custom_from_name="Zulip")
def test_accessing_invites_in_another_realm(self) -> None:
invitor = UserProfile.objects.exclude(realm=get_realm('zulip')).first()
prereg_user = PreregistrationUser.objects.create(
email='email', referred_by=invitor, realm=invitor.realm)
self.login(self.example_email("iago"))
error_result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_error(error_result, "No such invitation")
error_result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assert_json_error(error_result, "No such invitation")
class InviteeEmailsParserTests(TestCase):
def setUp(self) -> None:
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self) -> None:
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self) -> None:
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self) -> None:
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self) -> None:
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class MultiuseInviteTest(ZulipTestCase):
def setUp(self) -> None:
self.realm = get_realm('zulip')
self.realm.invite_required = True
self.realm.save()
def generate_multiuse_invite_link(self, streams: List[Stream]=None,
date_sent: Optional[datetime.datetime]=None) -> str:
invite = MultiuseInvite(realm=self.realm, referred_by=self.example_user("iago"))
invite.save()
if streams is not None:
invite.streams.set(streams)
if date_sent is None:
date_sent = timezone_now()
key = generate_key()
Confirmation.objects.create(content_object=invite, date_sent=date_sent,
confirmation_key=key, type=Confirmation.MULTIUSE_INVITE)
return confirmation_url(key, self.realm.host, Confirmation.MULTIUSE_INVITE)
def check_user_able_to_register(self, email: str, invite_link: str) -> None:
password = "password"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
from django.core.mail import outbox
outbox.pop()
def test_valid_multiuse_link(self) -> None:
email1 = self.nonreg_email("test")
email2 = self.nonreg_email("test1")
email3 = self.nonreg_email("alice")
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS - 1)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
self.check_user_able_to_register(email1, invite_link)
self.check_user_able_to_register(email2, invite_link)
self.check_user_able_to_register(email3, invite_link)
def test_expired_multiuse_link(self) -> None:
email = self.nonreg_email('newuser')
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("The confirmation link has expired or been deactivated.", result)
def test_invalid_multiuse_link(self) -> None:
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("Whoops. The confirmation link is malformed.", result)
def test_invalid_multiuse_link_in_open_realm(self) -> None:
self.realm.invite_required = False
self.realm.save()
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
with patch('zerver.views.registration.get_realm_from_request', return_value=self.realm):
with patch('zerver.views.registration.get_realm', return_value=self.realm):
self.check_user_able_to_register(email, invite_link)
def test_multiuse_link_with_specified_streams(self) -> None:
name1 = "newuser"
name2 = "bob"
email1 = self.nonreg_email(name1)
email2 = self.nonreg_email(name2)
stream_names = ["Rome", "Scotland", "Venice"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email1, invite_link)
self.check_user_subscribed_only_to_streams(name1, streams)
stream_names = ["Rome", "Verona"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email2, invite_link)
self.check_user_subscribed_only_to_streams(name2, streams)
def test_create_multiuse_link_api_call(self) -> None:
self.login(self.example_email('iago'))
result = self.client_post('/json/invites/multiuse')
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
def test_create_multiuse_link_with_specified_streams_api_call(self) -> None:
self.login(self.example_email('iago'))
stream_names = ["Rome", "Scotland", "Venice"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
stream_ids = [stream.id for stream in streams]
result = self.client_post('/json/invites/multiuse',
{"stream_ids": ujson.dumps(stream_ids)})
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
self.check_user_subscribed_only_to_streams("test", streams)
def test_only_admin_can_create_multiuse_link_api_call(self) -> None:
self.login(self.example_email('iago'))
# Only admins should be able to create multiuse invites even if
# invite_by_admins_only is set to False.
self.realm.invite_by_admins_only = False
self.realm.save()
result = self.client_post('/json/invites/multiuse')
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
self.login(self.example_email('hamlet'))
result = self.client_post('/json/invites/multiuse')
self.assert_json_error(result, "Must be an organization administrator")
def test_create_multiuse_link_invalid_stream_api_call(self) -> None:
self.login(self.example_email('iago'))
result = self.client_post('/json/invites/multiuse',
{"stream_ids": ujson.dumps([54321])})
self.assert_json_error(result, "Invalid stream id 54321. No invites were sent.")
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self) -> None:
# An invalid unsubscribe token "test123" produces an error.
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
# An unknown message type "fake" produces an error.
user_profile = self.example_user('hamlet')
unsubscribe_link = one_click_unsubscribe_link(user_profile, "fake")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = self.example_user('hamlet')
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
user_profile = self.example_user('hamlet')
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(user_profile)
self.assertEqual(2, ScheduledEmail.objects.filter(user=user_profile).count())
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, ScheduledEmail.objects.filter(user=user_profile).count())
def test_digest_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
user_profile = self.example_user('hamlet')
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
context = {'name': '', 'realm_uri': '', 'unread_pms': [], 'hot_conversations': [],
'new_users': [], 'new_streams': {'plain': []}, 'unsubscribe_link': ''}
send_future_email('zerver/emails/digest', user_profile.realm,
to_user_id=user_profile.id, context=context)
self.assertEqual(1, ScheduledEmail.objects.filter(user=user_profile).count())
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, ScheduledEmail.objects.filter(user=user_profile).count())
def test_login_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in login
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = self.example_user('hamlet')
user_profile.enable_login_emails = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile, "login")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_login_emails)
class RealmCreationTest(ZulipTestCase):
@override_settings(OPEN_REALM_CREATION=True)
def check_able_to_create_realm(self, email: str) -> None:
password = "test"
string_id = "zuliptest"
realm = get_realm(string_id)
# Make sure the realm does not exist
self.assertIsNone(realm)
# Create new realm with the email
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/new/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].startswith('http://zuliptest.testserver/accounts/login/subdomain/'))
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.CORPORATE)
self.assertEqual(realm.emails_restricted_to_domains, False)
self.assertEqual(realm.invite_required, True)
# Check welcome messages
for stream_name, text, message_count in [
('announce', 'This is', 1),
(Realm.INITIAL_PRIVATE_STREAM_NAME, 'This is', 1),
('general', 'Welcome to', 1),
('new members', 'stream is', 1),
('zulip', 'Here is', 3)]:
stream = get_stream(stream_name, realm)
recipient = get_stream_recipient(stream.id)
messages = Message.objects.filter(recipient=recipient).order_by('pub_date')
self.assertEqual(len(messages), message_count)
self.assertIn(text, messages[0].content)
def test_create_realm_non_existing_email(self) -> None:
self.check_able_to_create_realm("user1@test.com")
def test_create_realm_existing_email(self) -> None:
self.check_able_to_create_realm("hamlet@zulip.com")
def test_create_realm_as_system_bot(self) -> None:
result = self.client_post('/new/', {'email': 'notification-bot@zulip.com'})
self.assertEqual(result.status_code, 200)
self.assert_in_response('notification-bot@zulip.com is an email address reserved', result)
def test_create_realm_no_creation_key(self) -> None:
"""
Trying to create a realm without a creation_key should fail when
OPEN_REALM_CREATION is false.
"""
email = "user1@test.com"
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response('New organization creation disabled', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_subdomain(self) -> None:
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm(string_id))
# Create new realm with the email
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/new/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
@override_settings(OPEN_REALM_CREATION=True)
def test_mailinator_signup(self) -> None:
result = self.client_post('/new/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "length 3 or greater",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'zephyr': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://a-0.testserver/accounts/login/subdomain/'))
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://testserver/accounts/login/subdomain/'))
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain_option(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://testserver/accounts/login/subdomain/'))
def test_is_root_domain_available(self) -> None:
self.assertTrue(is_root_domain_available())
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
self.assertFalse(is_root_domain_available())
realm = get_realm("zulip")
realm.string_id = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
realm.save()
self.assertFalse(is_root_domain_available())
def test_subdomain_check_api(self) -> None:
result = self.client_get("/json/realm/subdomain/zulip")
self.assert_in_success_response(["Subdomain unavailable. Please choose a different one."], result)
result = self.client_get("/json/realm/subdomain/zu_lip")
self.assert_in_success_response(["Subdomain can only have lowercase letters, numbers, and \'-\'s."], result)
result = self.client_get("/json/realm/subdomain/hufflepuff")
self.assert_in_success_response(["available"], result)
self.assert_not_in_success_response(["unavailable"], result)
def test_subdomain_check_management_command(self) -> None:
# Short names should work
check_subdomain_available('aa', from_management_command=True)
# So should reserved ones
check_subdomain_available('zulip', from_management_command=True)
# malformed names should still not
with self.assertRaises(ValidationError):
check_subdomain_available('-ba_d-', from_management_command=True)
class UserSignUpTest(ZulipTestCase):
def _assert_redirected_to(self, result: HttpResponse, url: str) -> None:
self.assertEqual(result.status_code, 302)
self.assertEqual(result['LOCATION'], url)
def test_bad_email_configuration_for_accounts_home(self) -> None:
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_confirm_registration_email',
side_effect=smtplib.SMTPException('uh oh')
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/accounts/home/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0][0],
'Error in accounts_home: uh oh'
)
def test_bad_email_configuration_for_create_realm(self) -> None:
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_confirm_registration_email',
side_effect=smtplib.SMTPException('uh oh')
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/new/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0][0],
'Error in create_realm: uh oh'
)
def test_user_default_language_and_timezone(self) -> None:
"""
Check if the default language of new user is the default language
of the realm.
"""
email = self.nonreg_email('newguy')
password = "newpassword"
timezone = "US/Mountain"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_language', u"de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, timezone=timezone)
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newguy')
self.assertEqual(user_profile.default_language, realm.default_language)
self.assertEqual(user_profile.timezone, timezone)
from django.core.mail import outbox
outbox.pop()
def test_default_twenty_four_hour_time(self) -> None:
"""
Check if the default twenty_four_hour_time setting of new user
is the default twenty_four_hour_time of the realm.
"""
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_twenty_four_hour_time', True)
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newguy')
self.assertEqual(user_profile.twenty_four_hour_time, realm.default_twenty_four_hour_time)
def test_signup_already_active(self) -> None:
"""
Check if signing up with an active email redirects to a login page.
"""
email = self.example_email("hamlet")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
result = self.client_get(result.url)
self.assert_in_response("You've already registered", result)
def test_signup_system_bot(self) -> None:
email = "notification-bot@zulip.com"
result = self.client_post('/accounts/home/', {'email': email}, subdomain="lear")
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
result = self.client_get(result.url)
# This is not really the right error message, but at least it's an error.
self.assert_in_response("You've already registered", result)
def test_signup_existing_email(self) -> None:
"""
Check if signing up with an email used in another realm succeeds.
"""
email = self.example_email('hamlet')
password = "newpassword"
realm = get_realm('lear')
result = self.client_post('/accounts/home/', {'email': email}, subdomain="lear")
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain="lear")
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain="lear")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, subdomain="lear")
self.assertEqual(result.status_code, 302)
get_user(email, realm)
self.assertEqual(UserProfile.objects.filter(email=email).count(), 2)
def test_signup_invalid_name(self) -> None:
"""
Check if an invalid name during signup is handled properly.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, full_name="<invalid>")
self.assert_in_success_response(["Invalid characters in name!"], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_without_password(self) -> None:
"""
Check if signing up without a password works properly when
password_auth_enabled is False.
"""
email = self.nonreg_email('newuser')
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
with patch('zerver.views.registration.password_auth_enabled', return_value=False):
result = self.client_post(
'/accounts/register/',
{'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
# User should now be logged in.
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newuser')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_signup_without_full_name(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'from_confirmation': '1'})
self.assert_in_success_response(["You're almost there."], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_with_full_name(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'full_name': "New Guy",
'from_confirmation': '1'})
self.assert_in_success_response(["You're almost there."], result)
def test_signup_with_default_stream_group(self) -> None:
# Check if user is subscribed to the streams of default
# stream group as well as default streams.
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm("zulip")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
default_streams = []
for stream_name in ["venice", "verona"]:
stream = get_stream(stream_name, realm)
do_add_default_stream(stream)
default_streams.append(stream)
group1_streams = []
for stream_name in ["scotland", "denmark"]:
stream = get_stream(stream_name, realm)
group1_streams.append(stream)
do_create_default_stream_group(realm, "group 1", "group 1 description", group1_streams)
result = self.submit_reg_form_for_user(email, password, default_stream_groups=["group 1"])
self.check_user_subscribed_only_to_streams("newguy", default_streams + group1_streams)
def test_signup_with_multiple_default_stream_groups(self) -> None:
# Check if user is subscribed to the streams of default
# stream groups as well as default streams.
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm("zulip")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
default_streams = []
for stream_name in ["venice", "verona"]:
stream = get_stream(stream_name, realm)
do_add_default_stream(stream)
default_streams.append(stream)
group1_streams = []
for stream_name in ["scotland", "denmark"]:
stream = get_stream(stream_name, realm)
group1_streams.append(stream)
do_create_default_stream_group(realm, "group 1", "group 1 description", group1_streams)
group2_streams = []
for stream_name in ["scotland", "rome"]:
stream = get_stream(stream_name, realm)
group2_streams.append(stream)
do_create_default_stream_group(realm, "group 2", "group 2 description", group2_streams)
result = self.submit_reg_form_for_user(email, password,
default_stream_groups=["group 1", "group 2"])
self.check_user_subscribed_only_to_streams(
"newguy", list(set(default_streams + group1_streams + group2_streams)))
def test_signup_without_user_settings_from_another_realm(self) -> None:
email = self.example_email('hamlet')
password = "newpassword"
subdomain = "lear"
realm = get_realm("lear")
# Make an account in the Zulip realm, but we're not copying from there.
hamlet_in_zulip = get_user(self.example_email("hamlet"), get_realm("zulip"))
hamlet_in_zulip.left_side_userlist = True
hamlet_in_zulip.default_language = "de"
hamlet_in_zulip.emojiset = "twitter"
hamlet_in_zulip.high_contrast_mode = True
hamlet_in_zulip.enter_sends = True
hamlet_in_zulip.tutorial_status = UserProfile.TUTORIAL_FINISHED
hamlet_in_zulip.save()
result = self.client_post('/accounts/home/', {'email': email}, subdomain=subdomain)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=subdomain)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain=subdomain)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, source_realm="on",
HTTP_HOST=subdomain + ".testserver")
hamlet = get_user(self.example_email("hamlet"), realm)
self.assertEqual(hamlet.left_side_userlist, False)
self.assertEqual(hamlet.default_language, "en")
self.assertEqual(hamlet.emojiset, "google-blob")
self.assertEqual(hamlet.high_contrast_mode, False)
self.assertEqual(hamlet.enable_stream_sounds, False)
self.assertEqual(hamlet.enter_sends, False)
self.assertEqual(hamlet.tutorial_status, UserProfile.TUTORIAL_WAITING)
def test_signup_with_user_settings_from_another_realm(self) -> None:
email = self.example_email('hamlet')
password = "newpassword"
subdomain = "lear"
lear_realm = get_realm("lear")
zulip_realm = get_realm("zulip")
self.login(self.example_email("hamlet"))
with get_test_image_file('img.png') as image_file:
self.client_post("/json/users/me/avatar", {'file': image_file})
hamlet_in_zulip = get_user(self.example_email("hamlet"), zulip_realm)
hamlet_in_zulip.left_side_userlist = True
hamlet_in_zulip.default_language = "de"
hamlet_in_zulip.emojiset = "twitter"
hamlet_in_zulip.high_contrast_mode = True
hamlet_in_zulip.enter_sends = True
hamlet_in_zulip.tutorial_status = UserProfile.TUTORIAL_FINISHED
hamlet_in_zulip.save()
result = self.client_post('/accounts/home/', {'email': email}, subdomain=subdomain)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=subdomain)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain=subdomain)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, source_realm="zulip",
HTTP_HOST=subdomain + ".testserver")
hamlet_in_lear = get_user(self.example_email("hamlet"), lear_realm)
self.assertEqual(hamlet_in_lear.left_side_userlist, True)
self.assertEqual(hamlet_in_lear.default_language, "de")
self.assertEqual(hamlet_in_lear.emojiset, "twitter")
self.assertEqual(hamlet_in_lear.high_contrast_mode, True)
self.assertEqual(hamlet_in_lear.enter_sends, True)
self.assertEqual(hamlet_in_lear.enable_stream_sounds, False)
self.assertEqual(hamlet_in_lear.tutorial_status, UserProfile.TUTORIAL_FINISHED)
zulip_path_id = avatar_disk_path(hamlet_in_zulip)
hamlet_path_id = avatar_disk_path(hamlet_in_zulip)
self.assertEqual(open(zulip_path_id, "rb").read(), open(hamlet_path_id, "rb").read())
def test_signup_invalid_subdomain(self) -> None:
"""
Check if attempting to authenticate to the wrong subdomain logs an
error and redirects.
"""
email = "newuser@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
def invalid_subdomain(**kwargs: Any) -> Any:
return_data = kwargs.get('return_data', {})
return_data['invalid_subdomain'] = True
with patch('zerver.views.registration.authenticate', side_effect=invalid_subdomain):
with patch('logging.error') as mock_error:
result = self.client_post(
'/accounts/register/',
{'password': password,
'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
mock_error.assert_called_once()
self.assertEqual(result.status_code, 302)
def test_replace_subdomain_in_confirmation_link(self) -> None:
"""
Check that manually changing the subdomain in a registration
confirmation link doesn't allow you to register to a different realm.
"""
email = "newuser@zulip.com"
self.client_post('/accounts/home/', {'email': email})
result = self.client_post(
'/accounts/register/',
{'password': "password",
'key': find_key_by_email(email),
'terms': True,
'full_name': "New User",
'from_confirmation': '1'}, subdomain="zephyr")
self.assert_in_success_response(["We couldn't find your confirmation link"], result)
def test_failed_signup_due_to_restricted_domain(self) -> None:
realm = get_realm('zulip')
realm.invite_required = False
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Your email address, {}, is not in one of the domains".format(email),
form.errors['email'][0])
def test_failed_signup_due_to_disposable_email(self) -> None:
realm = get_realm('zulip')
realm.emails_restricted_to_domains = False
realm.disallow_disposable_email_addresses = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'abc@mailnator.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Please use your real email address", form.errors['email'][0])
def test_failed_signup_due_to_email_containing_plus(self) -> None:
realm = get_realm('zulip')
realm.emails_restricted_to_domains = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'iago+label@zulip.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Email addresses containing + are not allowed in this organization.", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self) -> None:
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'user@zulip.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Please request an invite for {} from".format(email),
form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self) -> None:
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=None)
self.assertIn("organization you are trying to join using {} does "
"not exist".format(email), form.errors['email'][0])
def test_access_signup_page_in_root_domain_without_realm(self) -> None:
result = self.client_get('/register', subdomain="", follow=True)
self.assert_in_success_response(["Find your Zulip accounts"], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_from_confirmation(self) -> None:
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New LDAP fullname']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Full name should be set from LDAP
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New LDAP fullname",
"newuser@zulip.com"],
result)
# Verify that the user is asked for name
self.assert_in_success_response(['id_full_name'], result)
# TODO: Ideally, we wouldn't ask for a password if LDAP is
# enabled, in which case this assert should be invertedq.
self.assert_in_success_response(['id_password'], result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_end_to_end(self) -> None:
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
full_name = 'New LDAP fullname'
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': [full_name],
'sn': ['shortname'],
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
# Click confirmation link
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Full name should be set from LDAP
self.assert_in_success_response(["You're almost there.",
full_name,
"newuser@zulip.com"],
result)
# Submit the final form with the wrong password.
result = self.submit_reg_form_for_user(email,
'wrongpassword',
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Didn't create an account
with self.assertRaises(UserProfile.DoesNotExist):
user_profile = UserProfile.objects.get(email=email)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
# Submit the final form with the wrong password.
result = self.submit_reg_form_for_user(email,
password,
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auto_registration_on_login(self) -> None:
"""The most common way for LDAP authentication to be used is with a
server that doesn't have a terms-of-service required, in which
case we offer a complete single-sign-on experience (where the
user just enters their LDAP username and password, and their
account is created if it doesn't already exist).
This test verifies that flow.
"""
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
full_name = 'New LDAP fullname'
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': [full_name],
'sn': ['shortname'],
}
}
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
self.login_with_return(email, password,
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_when_names_changes_are_disabled(self) -> None:
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New LDAP fullname'],
'sn': ['New LDAP shortname'],
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
# Click confirmation link. This will 'authenticated_full_name'
# session variable which will be used to set the fullname of
# the user.
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# Name comes from LDAP session.
self.assertEqual(user_profile.full_name, 'New LDAP fullname')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_signup_with_ldap_and_email_enabled_using_email(self) -> None:
password = "mynewpassword"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New LDAP fullname'],
'sn': ['New LDAP shortname'],
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# If the user's email is inside the LDAP domain and we just
# have a wrong password, then we refuse to create an account
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
# Important: This doesn't match the new user
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
# We get redirected back to the login page because password was wrong
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
self.assertFalse(UserProfile.objects.filter(email=email).exists())
# If the user's email is outside the LDAP domain, though, we
# successfully create an account with a password in the Zulip
# database.
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
# Important: This doesn't match the new user
LDAP_APPEND_DOMAIN='example.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
with patch('zerver.views.registration.logging.warning') as mock_warning:
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
mock_warning.assert_called_once_with("New account email newuser@zulip.com could not be found in LDAP")
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
user_profile = UserProfile.objects.get(email=email)
# Name comes from the POST request, not LDAP
self.assertEqual(user_profile.full_name, 'Non-LDAP Full Name')
def test_registration_when_name_changes_are_disabled(self) -> None:
"""
Test `name_changes_disabled` when we are not running under LDAP.
"""
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
full_name="New Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# 'New Name' comes from POST data; not from LDAP session.
self.assertEqual(user_profile.full_name, 'New Name')
def test_realm_creation_through_ldap(self) -> None:
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com',
TERMS_OF_SERVICE=False,
):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
key = find_key_by_email(email),
confirmation = Confirmation.objects.get(confirmation_key=key[0])
prereg_user = confirmation.content_object
prereg_user.realm_creation = True
prereg_user.save()
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored: Any) -> None:
password = "test"
subdomain = "zephyr"
user_profile = self.mit_user("sipbtest")
email = user_profile.email
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email}, subdomain="zephyr")
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"], subdomain="zephyr")
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url, subdomain="zephyr")
self.assertEqual(result.status_code, 200)
# If the mirror dummy user is already active, attempting to
# submit the registration form should raise an AssertionError
# (this is an invalid state, so it's a bug we got here):
user_profile.is_active = True
user_profile.save()
with self.assertRaisesRegex(AssertionError, "Mirror dummy user is already active!"):
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile.is_active = False
user_profile.save()
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_registration_of_active_mirror_dummy_user(self) -> None:
"""
Trying to activate an already-active mirror dummy user should
raise an AssertionError.
"""
user_profile = self.mit_user("sipbtest")
email = user_profile.email
user_profile.is_mirror_dummy = True
user_profile.is_active = True
user_profile.save()
with self.assertRaisesRegex(AssertionError, "Mirror dummy user is already active!"):
self.client_post('/register/', {'email': email}, subdomain="zephyr")
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self) -> None:
email = self.example_email("hamlet")
self.login(email)
user = self.example_user('hamlet')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = self.example_user('hamlet')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self) -> None:
email = self.example_email("iago")
self.login(email)
user = self.example_user('iago')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator.")
user = self.example_user('iago')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = self.example_email("hamlet")
user_2 = self.example_user('hamlet')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
def test_do_not_deactivate_final_user(self) -> None:
realm = get_realm('zulip')
UserProfile.objects.filter(realm=realm, is_realm_admin=False).update(is_active=False)
email = self.example_email("iago")
self.login(email)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only user.")
class TestLoginPage(ZulipTestCase):
def test_login_page_wrong_subdomain_error(self) -> None:
result = self.client_get("/login/?subdomain=1")
self.assertIn(WRONG_SUBDOMAIN_ERROR, result.content.decode('utf8'))
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_alias(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_domain(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/')
mock_get_host.return_value = 'www.testserver.com'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True,
EXTERNAL_HOST='www.testserver.com',
ROOT_SUBDOMAIN_ALIASES=['test']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_works_without_subdomains(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
class TestFindMyTeam(ZulipTestCase):
def test_template(self) -> None:
result = self.client_get('/accounts/find/')
self.assertIn("Find your Zulip accounts", result.content.decode('utf8'))
def test_result(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,cordelia@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2Ccordelia%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn(self.example_email("iago"), content)
self.assertIn(self.example_email("cordelia"), content)
from django.core.mail import outbox
# 3 = 1 + 2 -- Cordelia gets an email each for the "zulip" and "lear" realms.
self.assertEqual(len(outbox), 3)
def test_find_team_ignore_invalid_email(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,invalid_email@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2Cinvalid_email%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn(self.example_email("iago"), content)
self.assertIn("invalid_email@", content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_reject_invalid_email(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="invalid_string"))
self.assertEqual(result.status_code, 200)
self.assertIn(b"Enter a valid email", result.content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
# Just for coverage on perhaps-unnecessary validation code.
result = self.client_get('/accounts/find/?emails=invalid')
self.assertEqual(result.status_code, 200)
def test_find_team_zero_emails(self) -> None:
data = {'emails': ''}
result = self.client_post('/accounts/find/', data)
self.assertIn('This field is required', result.content.decode('utf8'))
self.assertEqual(result.status_code, 200)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_one_email(self) -> None:
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_deactivated_user(self) -> None:
do_deactivate_user(self.example_user("hamlet"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_deactivated_realm(self) -> None:
do_deactivate_realm(get_realm("zulip"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_bot_email(self) -> None:
data = {'emails': self.example_email("webhook_bot")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=webhook-bot%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_more_than_ten_emails(self) -> None:
data = {'emails': ','.join(['hamlet-{}@zulip.com'.format(i) for i in range(11)])}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 200)
self.assertIn("Please enter at most 10", result.content.decode('utf8'))
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
class ConfirmationKeyTest(ZulipTestCase):
def test_confirmation_key(self) -> None:
request = MagicMock()
request.session = {
'confirmation_key': {'confirmation_key': 'xyzzy'}
}
result = confirmation_key(request)
self.assert_json_success(result)
self.assert_in_response('xyzzy', result)
class MobileAuthOTPTest(ZulipTestCase):
def test_xor_hex_strings(self) -> None:
self.assertEqual(xor_hex_strings('1237c81ab', '18989fd12'), '0aaf57cb9')
with self.assertRaises(AssertionError):
xor_hex_strings('1', '31')
def test_is_valid_otp(self) -> None:
self.assertEqual(is_valid_otp('1234'), False)
self.assertEqual(is_valid_otp('1234abcd' * 8), True)
self.assertEqual(is_valid_otp('1234abcZ' * 8), False)
def test_ascii_to_hex(self) -> None:
self.assertEqual(ascii_to_hex('ZcdR1234'), '5a63645231323334')
self.assertEqual(hex_to_ascii('5a63645231323334'), 'ZcdR1234')
def test_otp_encrypt_api_key(self) -> None:
api_key = '12ac' * 8
otp = '7be38894' * 8
result = otp_encrypt_api_key(api_key, otp)
self.assertEqual(result, '4ad1e9f7' * 8)
decryped = otp_decrypt_api_key(result, otp)
self.assertEqual(decryped, api_key)
class FollowupEmailTest(ZulipTestCase):
def test_followup_day2_email(self) -> None:
user_profile = self.example_user('hamlet')
# Test date_joined == Sunday
user_profile.date_joined = datetime.datetime(2018, 1, 7, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=2, hours=-1))
# Test date_joined == Tuesday
user_profile.date_joined = datetime.datetime(2018, 1, 2, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=2, hours=-1))
# Test date_joined == Thursday
user_profile.date_joined = datetime.datetime(2018, 1, 4, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=1, hours=-1))
# Test date_joined == Friday
user_profile.date_joined = datetime.datetime(2018, 1, 5, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=3, hours=-1))
# Time offset of America/Phoenix is -07:00
user_profile.timezone = 'America/Phoenix'
# Test date_joined == Friday in UTC, but Thursday in the user's timezone
user_profile.date_joined = datetime.datetime(2018, 1, 5, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=1, hours=-1))
class NoReplyEmailTest(ZulipTestCase):
def test_noreply_email_address(self) -> None:
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, FromAddress.tokenized_no_reply_address()))
with self.settings(ADD_TOKENS_TO_NOREPLY_ADDRESS=False):
self.assertEqual(FromAddress.tokenized_no_reply_address(), "noreply@testserver")
class TwoFactorAuthTest(ZulipTestCase):
@patch('two_factor.models.totp')
def test_two_factor_login(self, mock_totp):
# type: (MagicMock) -> None
token = 123456
email = self.example_email('hamlet')
password = 'testing'
user_profile = self.example_user('hamlet')
user_profile.set_password(password)
user_profile.save()
self.create_default_device(user_profile)
def totp(*args, **kwargs):
# type: (*Any, **Any) -> int
return token
mock_totp.side_effect = totp
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',),
TWO_FACTOR_CALL_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_SMS_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_AUTHENTICATION_ENABLED=True):
first_step_data = {"username": email,
"password": password,
"two_factor_login_view-current_step": "auth"}
result = self.client_post("/accounts/login/", first_step_data)
self.assertEqual(result.status_code, 200)
second_step_data = {"token-otp_token": str(token),
"two_factor_login_view-current_step": "token"}
result = self.client_post("/accounts/login/", second_step_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://zulip.testserver")
# Going to login page should redirect to '/' if user is already
# logged in.
result = self.client_get('/accounts/login/')
self.assertEqual(result["Location"], "http://zulip.testserver")
class NameRestrictionsTest(ZulipTestCase):
def test_whitelisted_disposable_domains(self) -> None:
self.assertFalse(is_disposable_domain('OPayQ.com'))
| 45.466688
| 118
| 0.63859
|
4a000aca15bba2d79d6d237224414cb6e0376deb
| 1,377
|
py
|
Python
|
test/test_limit_greater_than_allowed.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 5
|
2021-05-17T04:45:03.000Z
|
2022-03-23T12:51:46.000Z
|
test/test_limit_greater_than_allowed.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | null | null | null |
test/test_limit_greater_than_allowed.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 2
|
2021-06-02T07:32:26.000Z
|
2022-02-12T02:36:23.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.banned_ip_address_details import BannedIpAddressDetails
globals()['BannedIpAddressDetails'] = BannedIpAddressDetails
from cryptoapis.model.limit_greater_than_allowed import LimitGreaterThanAllowed
class TestLimitGreaterThanAllowed(unittest.TestCase):
"""LimitGreaterThanAllowed unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testLimitGreaterThanAllowed(self):
"""Test LimitGreaterThanAllowed"""
# FIXME: construct object with mandatory attributes with example values
# model = LimitGreaterThanAllowed() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 35.307692
| 484
| 0.764706
|
4a000b8e52df1eced1b39146dd91133024a39afd
| 157
|
py
|
Python
|
emmett_mongorest/__init__.py
|
gi0baro/emmett-mongorest
|
814658b3d8051ad649f1bc7cffb32354b3b7a97c
|
[
"BSD-3-Clause"
] | null | null | null |
emmett_mongorest/__init__.py
|
gi0baro/emmett-mongorest
|
814658b3d8051ad649f1bc7cffb32354b3b7a97c
|
[
"BSD-3-Clause"
] | 3
|
2020-12-27T12:29:12.000Z
|
2022-02-27T10:34:33.000Z
|
emmett_mongorest/__init__.py
|
gi0baro/emmett-mongorest
|
814658b3d8051ad649f1bc7cffb32354b3b7a97c
|
[
"BSD-3-Clause"
] | null | null | null |
from .ext import MongoREST
from .helpers import MongoQuery
from .rest import MongoRESTModule
from .parsers import Parser
from .serializers import Serializer
| 26.166667
| 35
| 0.840764
|
4a000c01a81be730834d786cc0da40df3bd50747
| 1,399
|
py
|
Python
|
scheduler_task/study_apscheduler/examples/executors/simple.py
|
2581676612/python
|
b309564a05838b23044bb8112fd4ef71307266b6
|
[
"MIT"
] | 112
|
2017-09-19T17:38:38.000Z
|
2020-05-27T18:00:27.000Z
|
scheduler_task/study_apscheduler/examples/executors/simple.py
|
tomoncle/Python-notes
|
ce675486290c3d1c7c2e4890b57e3d0c8a1228cc
|
[
"MIT"
] | null | null | null |
scheduler_task/study_apscheduler/examples/executors/simple.py
|
tomoncle/Python-notes
|
ce675486290c3d1c7c2e4890b57e3d0c8a1228cc
|
[
"MIT"
] | 56
|
2017-09-20T01:24:12.000Z
|
2020-04-16T06:19:31.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-7-22 上午10:29
# @Author : Tom.Lee
# @CopyRight : 2016-2017
# @File : t.py
# @Product : PyCharm
import datetime
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.schedulers.blocking import BlockingScheduler
scheduler = BlockingScheduler()
def current_time():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# ###################使用add_job运行任务#################
def job1():
print 'job1 is running, Now is %s' % current_time()
def job2():
print 'job2 is running, Now is %s' % current_time()
# 每隔5秒运行一次job1
scheduler.add_job(job1, 'interval', seconds=5, id='job1')
# 每隔5秒运行一次job2
scheduler.add_job(job2, 'cron', second='*/5', id='job2')
# ###################使用装饰器添加任务#################
# 每隔5秒运行一次job3
@scheduler.scheduled_job('interval', seconds=5, id='job3')
def job3():
print 'job3 is running, Now is %s' % current_time()
# 每隔5秒运行一次job4
@scheduler.scheduled_job('cron', second='*/5', id='job4')
def job4():
print 'job4 is running, Now is %s' % current_time()
executors = {
'processpool': ProcessPoolExecutor(5),
'default': ThreadPoolExecutor(20)
}
job_defaults = {
'coalesce': False,
'max_instances': 5
}
scheduler.configure(executors=executors, job_defaults=job_defaults)
scheduler.start()
| 22.564516
| 78
| 0.639743
|
4a000c5b4f08feab87791776c26e0626a10f6558
| 4,125
|
py
|
Python
|
tests/conftest.py
|
koelemay/dvc
|
b9b8c46fde7cdf7d9544dd83705d95beb601792a
|
[
"Apache-2.0"
] | 1
|
2021-06-18T19:36:13.000Z
|
2021-06-18T19:36:13.000Z
|
tests/conftest.py
|
koelemay/dvc
|
b9b8c46fde7cdf7d9544dd83705d95beb601792a
|
[
"Apache-2.0"
] | 82
|
2021-05-04T02:40:05.000Z
|
2022-03-31T03:14:04.000Z
|
tests/conftest.py
|
koelemay/dvc
|
b9b8c46fde7cdf7d9544dd83705d95beb601792a
|
[
"Apache-2.0"
] | null | null | null |
import os
from contextlib import suppress
import pytest
from .dir_helpers import * # noqa, pylint: disable=wildcard-import
from .remotes import * # noqa, pylint: disable=wildcard-import
# Prevent updater and analytics from running their processes
os.environ["DVC_TEST"] = "true"
# Ensure progress output even when not outputting to raw sys.stderr console
os.environ["DVC_IGNORE_ISATTY"] = "true"
REMOTES = {
# remote: enabled_by_default?
"azure": False,
"gdrive": False,
"gs": False,
"hdfs": False,
"http": True,
"oss": False,
"s3": True,
"ssh": True,
"webdav": True,
}
@pytest.fixture(autouse=True)
def reset_loglevel(request, caplog):
"""
Use it to ensure log level at the start of each test
regardless of dvc.logger.setup(), Repo configs or whatever.
"""
ini_opt = None
with suppress(ValueError):
ini_opt = request.config.getini("log_level")
level = request.config.getoption("--log-level") or ini_opt
if level:
with caplog.at_level(level.upper(), logger="dvc"):
yield
else:
yield
@pytest.fixture(autouse=True)
def enable_ui():
from dvc.ui import ui
ui.enable()
@pytest.fixture(scope="session", autouse=True)
def _close_pools():
from dvc.fs.pool import close_pools
yield
close_pools()
def _get_opt(remote_name, action):
return f"--{action}-{remote_name}"
def pytest_addoption(parser):
"""Adds remote-related flags to selectively disable/enable for tests
Eg: If some remotes, eg: ssh is enabled to be tested for by default
(see above `REMOTES`), then, `--disable-ssh` flag is added. If remotes
like `hdfs` are disabled by default, `--enable-hdfs` is added to make them
run.
You can also make everything run-by-default with `--all` flag, which takes
precedence on all previous `--enable-*`/`--disable-*` flags.
"""
parser.addoption(
"--all",
action="store_true",
default=False,
help="Test all of the remotes, unless other flags also supplied",
)
for remote_name in REMOTES:
for action in ("enable", "disable"):
opt = _get_opt(remote_name, action)
parser.addoption(
opt,
action="store_true",
default=None,
help=f"{action} tests for {remote_name}",
)
class DVCTestConfig:
def __init__(self):
self.enabled_remotes = set()
def requires(self, remote_name):
if remote_name not in REMOTES or remote_name in self.enabled_remotes:
return
pytest.skip(f"{remote_name} tests not enabled through CLI")
def apply_marker(self, marker):
self.requires(marker.name)
def pytest_runtest_setup(item):
# Apply test markers to skip tests selectively
# NOTE: this only works on individual tests,
# for fixture, use `test_config` fixture and
# run `test_config.requires(remote_name)`.
for marker in item.iter_markers():
item.config.dvc_config.apply_marker(marker)
@pytest.fixture(scope="session")
def test_config(request):
return request.config.dvc_config
def pytest_configure(config):
config.dvc_config = DVCTestConfig()
for remote_name in REMOTES:
config.addinivalue_line(
"markers", f"{remote_name}: mark test as requiring {remote_name}"
)
enabled_remotes = config.dvc_config.enabled_remotes
if config.getoption("--all"):
enabled_remotes.update(REMOTES)
else:
default_enabled = {k for k, v in REMOTES.items() if v}
enabled_remotes.update(default_enabled)
for remote_name in REMOTES:
enabled_opt = _get_opt(remote_name, "enable")
disabled_opt = _get_opt(remote_name, "disable")
enabled = config.getoption(enabled_opt)
disabled = config.getoption(disabled_opt)
if disabled and enabled:
continue # default behavior if both flags are supplied
if disabled:
enabled_remotes.discard(remote_name)
if enabled:
enabled_remotes.add(remote_name)
| 27.871622
| 78
| 0.65503
|
4a000cbc4b9b26a70f7860191cca4dda71776548
| 674
|
py
|
Python
|
base.py
|
kolorowestudio/critic
|
db3b4909e15835be2009395b1541dea896d249c9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
base.py
|
kolorowestudio/critic
|
db3b4909e15835be2009395b1541dea896d249c9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
base.py
|
kolorowestudio/critic
|
db3b4909e15835be2009395b1541dea896d249c9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
class Error(Exception):
pass
| 35.473684
| 80
| 0.746291
|
4a000cbfdc4c69bc74ddf59aeaa75dceabc42226
| 2,228
|
py
|
Python
|
commandopt/__init__.py
|
jaegerbobomb/commandopt
|
7009e545bf7c5eb9a842798d07bff1e4075ac507
|
[
"MIT"
] | null | null | null |
commandopt/__init__.py
|
jaegerbobomb/commandopt
|
7009e545bf7c5eb9a842798d07bff1e4075ac507
|
[
"MIT"
] | null | null | null |
commandopt/__init__.py
|
jaegerbobomb/commandopt
|
7009e545bf7c5eb9a842798d07bff1e4075ac507
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from collections import namedtuple
import functools
from itertools import chain, combinations
from commandopt.exceptions import NoCommandFoundError
__version__ = "0.2.0"
CommandsOpts = namedtuple("CommandsOpts", ["opts", "f"])
def commandopt(mandopts, opts=None):
"""Decorator to register commands given docopt arguments.
:param mandopts: List of mandatory arguments
:param opts: List of optional arguments
"""
opts = [] if opts is None else opts
def inner_decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
# register wrapped function in Command.COMMANDS mapping
Command.add_command(mandopts, wrapped)
# get all combinations of optionals arguments
# ex : (opt1,), (opt2,), (opt1, opt2) ...
opts_combinations = [combinations(opts, r) for r in range(len(opts) + 1)]
for combination in chain.from_iterable(opts_combinations):
# register wrapped function with optional arguments
Command.add_command(mandopts + list(combination), wrapped)
return wrapped
return inner_decorator
class Command(object):
"""Dumb class to keep all the registered commands."""
COMMANDS = set()
def __new__(cls, arguments, call=False, give_kwargs=False):
"""Select the right command function and call it if asked."""
f = cls.choose_command(arguments)
if call and not give_kwargs:
return f(arguments)
elif call:
raise NotImplementedError
# TODO get arguments without "--" or "<>"...
return f(**arguments)
return f
@classmethod
def add_command(cls, opts, f):
cls.COMMANDS.add(CommandsOpts(opts=tuple(opts), f=f))
@classmethod
def list_commands(cls):
return cls.COMMANDS
@classmethod
def choose_command(cls, arguments):
# First get all "True" arguments from docopt
opts_input = set([opt for opt in arguments.keys() if arguments[opt]])
for c in cls.COMMANDS:
if opts_input == set(c.opts):
return c.f
raise NoCommandFoundError(opts_input)
| 30.944444
| 81
| 0.64228
|
4a000e56e2b8d1f669730f67df72d435cc65232f
| 438
|
py
|
Python
|
machine learning training/ML/overview.py
|
TrisNol/Fashion-Product-Classification
|
760c08efe62b925a1205df45a92e91d72144151f
|
[
"MIT"
] | null | null | null |
machine learning training/ML/overview.py
|
TrisNol/Fashion-Product-Classification
|
760c08efe62b925a1205df45a92e91d72144151f
|
[
"MIT"
] | null | null | null |
machine learning training/ML/overview.py
|
TrisNol/Fashion-Product-Classification
|
760c08efe62b925a1205df45a92e91d72144151f
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv("./assets/styles.csv", error_bad_lines=False) # multiple lines with errors
# print(data.head)
# for column in data.columns:
# print("Unique Values for " + column)
# print(data[column].unique())
names = data['gender'].unique()
values = data.gender.value_counts()
print(values)
plt.figure(figsize=(9, 3))
plt.subplot(131)
plt.bar(names, values)
plt.show()
| 23.052632
| 93
| 0.712329
|
4a000ebc913fbd48a189b89133cf64ed7392db8a
| 2,090
|
py
|
Python
|
examples/adspygoogle/dfp/v201311/creative_service/get_creatives_by_statement.py
|
cherry-wb/googleads-python-lib
|
24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04
|
[
"Apache-2.0"
] | null | null | null |
examples/adspygoogle/dfp/v201311/creative_service/get_creatives_by_statement.py
|
cherry-wb/googleads-python-lib
|
24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04
|
[
"Apache-2.0"
] | null | null | null |
examples/adspygoogle/dfp/v201311/creative_service/get_creatives_by_statement.py
|
cherry-wb/googleads-python-lib
|
24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04
|
[
"Apache-2.0"
] | 2
|
2020-04-02T19:00:31.000Z
|
2020-08-06T03:28:38.000Z
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all image creatives. The statement retrieves up to the
maximum page size limit of 500. To create an image creative,
run create_creatives.py."""
__author__ = ('Jeff Sham',
'Vincent Tsao')
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201311')
# Create statement object to only select image creatives.
values = [{
'key': 'creativeType',
'value': {
'xsi_type': 'TextValue',
'value': 'ImageCreative'
}
}]
filter_statement = {'query': 'WHERE creativeType = :creativeType LIMIT 500',
'values': values}
# Get creatives by statement.
response = creative_service.GetCreativesByStatement(filter_statement)[0]
creatives = []
if 'results' in response:
creatives = response['results']
# Display results.
for creative in creatives:
print ('Creative with id \'%s\', name \'%s\', and type \'%s\' was found.'
% (creative['id'], creative['name'], creative['Creative_Type']))
print
print 'Number of results found: %s' % len(creatives)
| 32.65625
| 80
| 0.697608
|
4a0010515b643a40d9d4a54ad452f6972c3174db
| 3,056
|
py
|
Python
|
experiments/evaluation/utils.py
|
malteos/aspect-document-embeddings
|
0836ea54a9192dbc2b01bb212c7521668bb398af
|
[
"MIT"
] | 2
|
2022-03-21T18:31:57.000Z
|
2022-03-21T23:45:33.000Z
|
experiments/evaluation/utils.py
|
malteos/aspect-document-embeddings
|
0836ea54a9192dbc2b01bb212c7521668bb398af
|
[
"MIT"
] | null | null | null |
experiments/evaluation/utils.py
|
malteos/aspect-document-embeddings
|
0836ea54a9192dbc2b01bb212c7521668bb398af
|
[
"MIT"
] | null | null | null |
from typing import List, Union, Set
import numpy as np
import pandas as pd
def get_reciprocal_rank(retrieved_docs: List, relevant_docs: Union[Set, List]) -> float:
"""
The mean reciprocal rank is a statistic measure for evaluating any process that produces a list of possible
responses to a sample of queries, ordered by probability of correctness.
rank_i: The reciprocal rank of a query response is the multiplicative inverse of the rank of the first correct answer-
:param retrieved_docs: List of queries and their retrieved documents (from evaluated system)
:param relevant_docs: List of queries and their relevant documents (from gold standard)
:return:
"""
for rank, retrieved_doc in enumerate(retrieved_docs, start=1):
if retrieved_doc in relevant_docs:
return 1. / rank
return 0.
def get_avg_precision(retrieved_docs: List, relevant_docs: Union[Set, List]) -> float:
retrieved_relevant_docs = 0.
precision_sum = 0.
# Compute avg. precision
if len(relevant_docs) > 0:
for rank, retrieved_doc in enumerate(retrieved_docs, start=1):
if retrieved_doc in relevant_docs:
retrieved_relevant_docs += 1
precision_sum += retrieved_relevant_docs / rank
return precision_sum / len(relevant_docs)
else:
return 0.
def get_mean_avg_precision(queries_retrieved_docs: List[List], queries_relevant_docs: List[List]) -> float:
"""
https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision
:param queries_retrieved_docs: List of queries and their retrieved documents (from evaluated system)
:param queries_relevant_docs: List of queries and their relevant documents (from gold standard)
:return: MAP score
"""
assert len(queries_retrieved_docs) == len(queries_relevant_docs)
sum_avg_precision = 0.
# Iterate over all queries
for query_idx, retrieved_docs in enumerate(queries_retrieved_docs):
relevant_docs = queries_relevant_docs[query_idx]
sum_avg_precision += get_avg_precision(retrieved_docs, relevant_docs)
return sum_avg_precision / len(queries_retrieved_docs)
def highlight_max(data, color='green'):
"""
Highlight the maximum in a Series or DataFrame.
Usage:
df.style.apply(highlight_max)
"""
attr = 'background-color: {}'.format(color)
#remove % and cast to float
data = data.replace('%','', regex=True).astype(float)
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_max = data == data.max()
return [attr if v else '' for v in is_max]
else: # from .apply(axis=None)
is_max = data == data.max().max()
return pd.DataFrame(np.where(is_max, attr, ''),
index=data.index, columns=data.columns)
def compute_dcg_at_k(relevances, k):
dcg = 0
for i in range(min(len(relevances), k)):
dcg += relevances[i] / np.log2(i + 2) #+2 as we start our idx at 0
return dcg
| 33.955556
| 122
| 0.690772
|
4a00114dcd688db575b5a5e1e0ff253053363e71
| 807
|
py
|
Python
|
test/test_cms_page_list_response.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | 1
|
2017-05-03T12:48:22.000Z
|
2017-05-03T12:48:22.000Z
|
test/test_cms_page_list_response.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | null | null | null |
test/test_cms_page_list_response.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.cms_page_list_response import CMSPageListResponse
class TestCMSPageListResponse(unittest.TestCase):
""" CMSPageListResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCMSPageListResponse(self):
"""
Test CMSPageListResponse
"""
model = kinow_client.models.cms_page_list_response.CMSPageListResponse()
if __name__ == '__main__':
unittest.main()
| 18.767442
| 80
| 0.70632
|
4a00117891cc05f94a9614802e3254fd8533a408
| 609
|
py
|
Python
|
src/Two Sum.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
src/Two Sum.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
src/Two Sum.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
num = list(enumerate(num))
num.sort(cmp=Solution.compare)
h = 0
t = len(num) - 1
while h < t:
s = num[h][1] + num[t][1]
if s < target:
h += 1
elif s > target:
t -= 1
else:
h, t = num[h][0]+1, num[t][0]+1
return (h, t) if h < t else (t, h)
@staticmethod
def compare(n1, n2):
return cmp(n1[1], n2[1])
| 26.478261
| 50
| 0.380952
|
4a00118d039032ced6f3cf3b2fb6745722d6d66d
| 1,656
|
py
|
Python
|
wandb_hparams_search.py
|
yiftachn/Survival-Analysis
|
2fbdaa8bd2c52a54b10143929f35539492fb76a8
|
[
"MIT"
] | null | null | null |
wandb_hparams_search.py
|
yiftachn/Survival-Analysis
|
2fbdaa8bd2c52a54b10143929f35539492fb76a8
|
[
"MIT"
] | null | null | null |
wandb_hparams_search.py
|
yiftachn/Survival-Analysis
|
2fbdaa8bd2c52a54b10143929f35539492fb76a8
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.model_selection import cross_val_score
from sksurv.ensemble import RandomSurvivalForest
from sksurv.linear_model import CoxnetSurvivalAnalysis, CoxPHSurvivalAnalysis
from sksurv.metrics import as_integrated_brier_score_scorer
import wandb
from config import SEED
from etl.data_loading import get_df_for_stage
from etl.train_test_split import impute_nan_values_and_split_to_train_test
rsf_sweep = dict(
n_estimators=1000,
min_samples_split=10,
min_samples_leaf=15,
max_features="sqrt",
n_jobs=-1,
random_state=SEED
)
coxnet_sweep = dict(
l1_ratio=0.99,
fit_baseline_model=True
)
baseline_sweep = dict(
alpha=0,
ties="breslow",
n_iter=100,
tol=1e-9
)
import wandb
sweep_id = wandb.sweep(sweep_config)
def main():
wandb.init(project="survival_analysis", entity="survival_analysis") # , config=rsf_hparams_defaults)
# config = wandb.config
times = [3, 6, 12] # months
df = get_df_for_stage('intra')
X_train, X_test, y_train, y_test = impute_nan_values_and_split_to_train_test(df)
models = dict(
baseline=CoxPHSurvivalAnalysis(**baseline_hparams_defaults),
rsf=RandomSurvivalForest(**rsf_hparams_defaults),
coxnet=CoxnetSurvivalAnalysis(**coxnet_hparams_defaults)
)
brier_scores = {}
for model_name, model in models.items():
model = as_integrated_brier_score_scorer(model, times)
model.fit(X_train, y_train)
brier_scores[model_name] = [model.score(X_test, y_test)]
wandb.log({"metrics": wandb.Table(dataframe=pd.DataFrame(brier_scores))})
if __name__ == '__main__':
main()
| 26.709677
| 105
| 0.740338
|
4a001197cb37e9e63c3fa340572647dc6cd8aa53
| 2,381
|
py
|
Python
|
app/config/settings/test.py
|
nickmoreton/nhsx-website
|
2397d1308376c02b75323d30e6bc916af0daac9d
|
[
"MIT"
] | null | null | null |
app/config/settings/test.py
|
nickmoreton/nhsx-website
|
2397d1308376c02b75323d30e6bc916af0daac9d
|
[
"MIT"
] | null | null | null |
app/config/settings/test.py
|
nickmoreton/nhsx-website
|
2397d1308376c02b75323d30e6bc916af0daac9d
|
[
"MIT"
] | null | null | null |
import os
import logging
from .base import * # NOQA
DEBUG = True
BASE_URL = "http://0.0.0.0:5000"
COLLECTFAST_ENABLED = False
CACHEOPS_ENABLED = False
WAGTAIL_CACHE = False
ASSETS_DEBUG = True
ASSETS_AUTO_BUILD = True
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
ALLOWED_HOSTS = ["*"]
# Use in-memory SQLite for the tests for speed.
DATABASES = {
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "file::memory:",}
}
# Use basic DB search backend for tests
WAGTAILSEARCH_BACKENDS = {
"default": {"BACKEND": "wagtail.search.backends.db",},
}
# Override the cache settings so they never interfere with cached keys
# in the site
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session_cache"
WAGTAIL_CACHE_BACKEND = "wagtail_cache"
REDIS_HOST = os.environ.get("REDIS_HOST", "")
REDIS_PORT = os.environ.get("REDIS_PORT", "6379")
REDIS_HOST_FULL = f"redis://{REDIS_HOST}:{REDIS_PORT}"
REDIS_HOST_CACHEOPS = f"{REDIS_HOST_FULL}/6"
REDIS_HOST_PAGECACHE = f"{REDIS_HOST_FULL}/7"
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": REDIS_HOST_CACHEOPS,
"OPTIONS": {
"PARSER_CLASS": "redis.connection.HiredisParser",
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"PASSWORD": os.environ.get("REDIS_PASSWORD"),
"IGNORE_EXCEPTIONS": True,
},
},
"wagtail_cache": {
"BACKEND": "wagtailcache.compat_backends.django_redis.RedisCache",
"LOCATION": REDIS_HOST_PAGECACHE,
"TIMEOUT": 60 * 60 * 24 * 7, # Seven days
"OPTIONS": {
"PARSER_CLASS": "redis.connection.HiredisParser",
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"PASSWORD": os.environ.get("REDIS_PASSWORD"),
"IGNORE_EXCEPTIONS": True,
},
},
"session_cache": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": os.environ.get("MEMCACHED_LOCATION", "127.0.0.1:11211"),
},
}
def filter_deprecation_warnings(record):
warnings_to_suppress = ["RemovedInDjango30Warning"]
# Return false to suppress message.
return not any([warn in record.getMessage() for warn in warnings_to_suppress])
warn_logger = logging.getLogger("py.warnings")
warn_logger.addFilter(filter_deprecation_warnings)
| 28.686747
| 82
| 0.677446
|
4a0011b02d01378a106da414abc422864e3efcfa
| 2,731
|
py
|
Python
|
internal/twirptest/no_package_name/no_package_name_pb2_twirp.py
|
thinkdata-works/twirp
|
28b19adf69788dbb62094d19c1b06d7284683988
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
internal/twirptest/no_package_name/no_package_name_pb2_twirp.py
|
thinkdata-works/twirp
|
28b19adf69788dbb62094d19c1b06d7284683988
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2019-10-01T15:19:32.000Z
|
2020-05-28T14:48:55.000Z
|
internal/twirptest/no_package_name/no_package_name_pb2_twirp.py
|
thinkdata-works/twirp
|
28b19adf69788dbb62094d19c1b06d7284683988
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Code generated by protoc-gen-twirp_python v5.8.0, DO NOT EDIT.
# source: no_package_name.proto
try:
import httplib
from urllib2 import Request, HTTPError, urlopen
except ImportError:
import http.client as httplib
from urllib.request import Request, urlopen
from urllib.error import HTTPError
import json
from google.protobuf import symbol_database as _symbol_database
import sys
_sym_db = _symbol_database.Default()
class TwirpException(httplib.HTTPException):
def __init__(self, code, message, meta):
self.code = code
self.message = message
self.meta = meta
super(TwirpException, self).__init__(message)
@classmethod
def from_http_err(cls, err):
try:
jsonerr = json.load(err)
code = jsonerr["code"]
msg = jsonerr["msg"]
meta = jsonerr.get("meta")
if meta is None:
meta = {}
except:
code = "internal"
msg = "Error from intermediary with HTTP status code {} {}".format(
err.code, httplib.responses[err.code],
)
meta = {}
return cls(code, msg, meta)
class SvcClient(object):
def __init__(self, server_address, request_decorator=None):
"""Creates a new client for the Svc service.
Args:
server_address: The address of the server to send requests to, in
the full protocol://host:port form.
request_decorator: A function to modify the http request being
sent. Takes in a urllib.request.Request object and returns the
same request object
"""
if sys.version_info[0] > 2:
self.__target = server_address
else:
self.__target = server_address.encode('ascii')
self.__service_name = "Svc"
self.__request_decorator = request_decorator
def __make_request(self, body, full_method):
req = Request(
url=self.__target + "/twirp" + full_method,
data=body,
headers={"Content-Type": "application/protobuf"},
)
if self.__request_decorator:
req = self.__request_decorator(req)
try:
resp = urlopen(req)
except HTTPError as err:
raise TwirpException.from_http_err(err)
return resp.read()
def send(self, msg):
serialize = _sym_db.GetSymbol("Msg").SerializeToString
deserialize = _sym_db.GetSymbol("Msg").FromString
full_method = "/{}/{}".format(self.__service_name, "Send")
body = serialize(msg)
resp_str = self.__make_request(body=body, full_method=full_method)
return deserialize(resp_str)
| 31.755814
| 79
| 0.616258
|
4a0012392366395230a2d062191612812326d123
| 464
|
py
|
Python
|
_examples/fontbutton.py
|
airon90/python-gtk3-tutorial
|
db3940bef1834756c309f1bcb2f8e2bb9731763b
|
[
"CC0-1.0"
] | 11
|
2020-02-22T04:14:06.000Z
|
2022-02-27T12:21:39.000Z
|
_examples/fontbutton.py
|
airon90/python-gtk3-tutorial
|
db3940bef1834756c309f1bcb2f8e2bb9731763b
|
[
"CC0-1.0"
] | null | null | null |
_examples/fontbutton.py
|
airon90/python-gtk3-tutorial
|
db3940bef1834756c309f1bcb2f8e2bb9731763b
|
[
"CC0-1.0"
] | 4
|
2019-08-19T22:55:38.000Z
|
2021-06-20T03:31:18.000Z
|
#!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
def font_changed(fontbutton):
print("Font selected: %s" % fontbutton.get_font_name())
window = Gtk.Window()
window.set_title("FontButton")
window.set_default_size(150, -1)
window.connect("destroy", Gtk.main_quit)
fontbutton = Gtk.FontButton(title="FontButton")
fontbutton.connect("font-set", font_changed)
window.add(fontbutton)
window.show_all()
Gtk.main()
| 21.090909
| 59
| 0.752155
|
4a00126fd7dd6cb7295c083a6ebbddc5f977826a
| 1,249
|
py
|
Python
|
open_alchemy/facades/models.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 40
|
2019-11-05T06:50:35.000Z
|
2022-03-09T01:34:57.000Z
|
open_alchemy/facades/models.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 178
|
2019-11-03T04:10:38.000Z
|
2022-03-31T00:07:17.000Z
|
open_alchemy/facades/models.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 17
|
2019-11-04T07:22:46.000Z
|
2022-03-23T05:29:49.000Z
|
"""Functions for interacting with the OpenAlchemy models."""
import typing
import open_alchemy
from open_alchemy import types
def get_base() -> typing.Any:
"""
Get the models.Base used as the declarative base for models.
Returns:
The models.Base.
"""
# pylint: disable=no-member
return open_alchemy.models.Base # type: ignore
def get_model(*, name: str) -> typing.Optional[typing.Type]:
"""
Get a model by name from models.
Args:
name: The name of the model.
Returns:
The model with the name.
"""
return getattr(open_alchemy.models, name, None)
def get_model_schema(*, name: str) -> typing.Optional[types.Schema]:
"""
Get the schema of a model by name from models.
Args:
name: The name of the model.
Returns:
The schema of the model with the name.
"""
model = get_model(name=name)
if model is None:
return None
return model._schema # pylint: disable=protected-access
def set_model(*, name: str, model: typing.Type) -> None:
"""
Set model by name on models.
Args:
model: The model to set.
name: The name of the model.
"""
setattr(open_alchemy.models, name, model)
| 20.145161
| 68
| 0.630104
|
4a001305b29600b4772418a66d3c675dd8e381d7
| 8,426
|
py
|
Python
|
Lib/test/test_kqueue.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | 1
|
2018-06-21T18:21:24.000Z
|
2018-06-21T18:21:24.000Z
|
Lib/test/test_kqueue.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_kqueue.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
"""
Tests dla kqueue wrapper.
"""
zaimportuj errno
zaimportuj os
zaimportuj select
zaimportuj socket
zaimportuj sys
zaimportuj time
zaimportuj unittest
jeżeli nie hasattr(select, "kqueue"):
podnieś unittest.SkipTest("test works only on BSD")
klasa TestKQueue(unittest.TestCase):
def test_create_queue(self):
kq = select.kqueue()
self.assertPrawda(kq.fileno() > 0, kq.fileno())
self.assertPrawda(nie kq.closed)
kq.close()
self.assertPrawda(kq.closed)
self.assertRaises(ValueError, kq.fileno)
def test_create_event(self):
z operator zaimportuj lt, le, gt, ge
fd = os.open(os.devnull, os.O_WRONLY)
self.addCleanup(os.close, fd)
ev = select.kevent(fd)
other = select.kevent(1000)
self.assertEqual(ev.ident, fd)
self.assertEqual(ev.filter, select.KQ_FILTER_READ)
self.assertEqual(ev.flags, select.KQ_EV_ADD)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
self.assertPrawda(ev < other)
self.assertPrawda(other >= ev)
dla op w lt, le, gt, ge:
self.assertRaises(TypeError, op, ev, Nic)
self.assertRaises(TypeError, op, ev, 1)
self.assertRaises(TypeError, op, ev, "ev")
ev = select.kevent(fd, select.KQ_FILTER_WRITE)
self.assertEqual(ev.ident, fd)
self.assertEqual(ev.filter, select.KQ_FILTER_WRITE)
self.assertEqual(ev.flags, select.KQ_EV_ADD)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
ev = select.kevent(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ONESHOT)
self.assertEqual(ev.ident, fd)
self.assertEqual(ev.filter, select.KQ_FILTER_WRITE)
self.assertEqual(ev.flags, select.KQ_EV_ONESHOT)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
ev = select.kevent(1, 2, 3, 4, 5, 6)
self.assertEqual(ev.ident, 1)
self.assertEqual(ev.filter, 2)
self.assertEqual(ev.flags, 3)
self.assertEqual(ev.fflags, 4)
self.assertEqual(ev.data, 5)
self.assertEqual(ev.udata, 6)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
bignum = 0x7fff
ev = select.kevent(bignum, 1, 2, 3, bignum - 1, bignum)
self.assertEqual(ev.ident, bignum)
self.assertEqual(ev.filter, 1)
self.assertEqual(ev.flags, 2)
self.assertEqual(ev.fflags, 3)
self.assertEqual(ev.data, bignum - 1)
self.assertEqual(ev.udata, bignum)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
# Issue 11973
bignum = 0xffff
ev = select.kevent(0, 1, bignum)
self.assertEqual(ev.ident, 0)
self.assertEqual(ev.filter, 1)
self.assertEqual(ev.flags, bignum)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
# Issue 11973
bignum = 0xffffffff
ev = select.kevent(0, 1, 2, bignum)
self.assertEqual(ev.ident, 0)
self.assertEqual(ev.filter, 1)
self.assertEqual(ev.flags, 2)
self.assertEqual(ev.fflags, bignum)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
def test_queue_event(self):
serverSocket = socket.socket()
serverSocket.bind(('127.0.0.1', 0))
serverSocket.listen()
client = socket.socket()
client.setblocking(Nieprawda)
spróbuj:
client.connect(('127.0.0.1', serverSocket.getsockname()[1]))
wyjąwszy OSError jako e:
self.assertEqual(e.args[0], errno.EINPROGRESS)
inaczej:
#raise AssertionError("Connect should have podnieśd EINPROGRESS")
dalej # FreeBSD doesn't podnieś an exception here
server, addr = serverSocket.accept()
kq = select.kqueue()
kq2 = select.kqueue.fromfd(kq.fileno())
ev = select.kevent(server.fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq.control([ev], 0)
ev = select.kevent(server.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq.control([ev], 0)
ev = select.kevent(client.fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq2.control([ev], 0)
ev = select.kevent(client.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq2.control([ev], 0)
events = kq.control(Nic, 4, 1)
events = set((e.ident, e.filter) dla e w events)
self.assertEqual(events, set([
(client.fileno(), select.KQ_FILTER_WRITE),
(server.fileno(), select.KQ_FILTER_WRITE)]))
client.send(b"Hello!")
server.send(b"world!!!")
# We may need to call it several times
dla i w range(10):
events = kq.control(Nic, 4, 1)
jeżeli len(events) == 4:
przerwij
time.sleep(1.0)
inaczej:
self.fail('timeout waiting dla event notifications')
events = set((e.ident, e.filter) dla e w events)
self.assertEqual(events, set([
(client.fileno(), select.KQ_FILTER_WRITE),
(client.fileno(), select.KQ_FILTER_READ),
(server.fileno(), select.KQ_FILTER_WRITE),
(server.fileno(), select.KQ_FILTER_READ)]))
# Remove completely client, oraz server read part
ev = select.kevent(client.fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
kq.control([ev], 0)
ev = select.kevent(client.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
kq.control([ev], 0)
ev = select.kevent(server.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
kq.control([ev], 0, 0)
events = kq.control([], 4, 0.99)
events = set((e.ident, e.filter) dla e w events)
self.assertEqual(events, set([
(server.fileno(), select.KQ_FILTER_WRITE)]))
client.close()
server.close()
serverSocket.close()
def testPair(self):
kq = select.kqueue()
a, b = socket.socketpair()
a.send(b'foo')
event1 = select.kevent(a, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
event2 = select.kevent(b, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
r = kq.control([event1, event2], 1, 1)
self.assertPrawda(r)
self.assertNieprawda(r[0].flags & select.KQ_EV_ERROR)
self.assertEqual(b.recv(r[0].data), b'foo')
a.close()
b.close()
kq.close()
def test_close(self):
open_file = open(__file__, "rb")
self.addCleanup(open_file.close)
fd = open_file.fileno()
kqueue = select.kqueue()
# test fileno() method oraz closed attribute
self.assertIsInstance(kqueue.fileno(), int)
self.assertNieprawda(kqueue.closed)
# test close()
kqueue.close()
self.assertPrawda(kqueue.closed)
self.assertRaises(ValueError, kqueue.fileno)
# close() can be called more than once
kqueue.close()
# operations must fail przy ValueError("I/O operation on closed ...")
self.assertRaises(ValueError, kqueue.control, Nic, 4)
def test_fd_non_inheritable(self):
kqueue = select.kqueue()
self.addCleanup(kqueue.close)
self.assertEqual(os.get_inheritable(kqueue.fileno()), Nieprawda)
jeżeli __name__ == "__main__":
unittest.main()
| 34.962656
| 96
| 0.589841
|
4a00139672c5b37b9abb155af6fb478fcf19803b
| 7,488
|
py
|
Python
|
src/workflow/background.py
|
xilopaint/alfred-urban-dictionary
|
5d1f797583e524c404d570aebd9c10ec66e9c1d6
|
[
"MIT"
] | 53
|
2018-08-29T08:42:18.000Z
|
2022-02-24T00:33:59.000Z
|
src/workflow/background.py
|
xilopaint/alfred-urban-dictionary
|
5d1f797583e524c404d570aebd9c10ec66e9c1d6
|
[
"MIT"
] | 6
|
2018-08-28T22:49:54.000Z
|
2021-11-21T16:02:40.000Z
|
src/workflow/background.py
|
xilopaint/alfred-urban-dictionary
|
5d1f797583e524c404d570aebd9c10ec66e9c1d6
|
[
"MIT"
] | 5
|
2018-08-28T22:46:27.000Z
|
2021-11-10T14:54:38.000Z
|
#!/usr/bin/env python3
"""This module provides an API to run commands in background processes.
Combine with the :ref:`caching API <caching-data>` to work from cached data
while you fetch fresh data in the background.
See :ref:`the User Manual <background-processes>` for more information
and examples.
"""
import os
import pickle
import signal
import subprocess
import sys
from workflow import Workflow
__all__ = ["is_running", "run_in_background"]
_wf = None
def wf():
global _wf
if _wf is None:
_wf = Workflow()
return _wf
def _log():
return wf().logger
def _arg_cache(name):
"""Return path to pickle cache file for arguments.
:param name: name of task
:type name: ``unicode``
:returns: Path to cache file
:rtype: ``unicode`` filepath
"""
return wf().cachefile(name + ".argcache")
def _pid_file(name):
"""Return path to PID file for ``name``.
:param name: name of task
:type name: ``unicode``
:returns: Path to PID file for task
:rtype: ``unicode`` filepath
"""
return wf().cachefile(name + ".pid")
def _process_exists(pid):
"""Check if a process with PID ``pid`` exists.
:param pid: PID to check
:type pid: ``int``
:returns: ``True`` if process exists, else ``False``
:rtype: ``Boolean``
"""
try:
os.kill(pid, 0)
except OSError: # not running
return False
return True
def _job_pid(name):
"""Get PID of job or `None` if job does not exist.
Args:
name (str): Name of job.
Returns:
int: PID of job process (or `None` if job doesn't exist).
"""
pidfile = _pid_file(name)
if not os.path.exists(pidfile):
return
with open(pidfile, "rb") as fp:
read = fp.read()
print(str(read))
pid = int.from_bytes(read, sys.byteorder)
print(pid)
if _process_exists(pid):
return pid
os.unlink(pidfile)
def is_running(name):
"""Test whether task ``name`` is currently running.
:param name: name of task
:type name: unicode
:returns: ``True`` if task with name ``name`` is running, else ``False``
:rtype: bool
"""
if _job_pid(name) is not None:
return True
return False
def _background(
pidfile, stdin="/dev/null", stdout="/dev/null", stderr="/dev/null"
): # pragma: no cover
"""Fork the current process into a background daemon.
:param pidfile: file to write PID of daemon process to.
:type pidfile: filepath
:param stdin: where to read input
:type stdin: filepath
:param stdout: where to write stdout output
:type stdout: filepath
:param stderr: where to write stderr output
:type stderr: filepath
"""
def _fork_and_exit_parent(errmsg, wait=False, write=False):
try:
pid = os.fork()
if pid > 0:
if write: # write PID of child process to `pidfile`
tmp = pidfile + ".tmp"
with open(tmp, "wb") as fp:
fp.write(pid.to_bytes(4, sys.byteorder))
os.rename(tmp, pidfile)
if wait: # wait for child process to exit
os.waitpid(pid, 0)
os._exit(0)
except OSError as err:
_log().critical("%s: (%d) %s", errmsg, err.errno, err.strerror)
raise err
# Do first fork and wait for second fork to finish.
_fork_and_exit_parent("fork #1 failed", wait=True)
# Decouple from parent environment.
os.chdir(wf().workflowdir)
os.setsid()
# Do second fork and write PID to pidfile.
_fork_and_exit_parent("fork #2 failed", write=True)
# Now I am a daemon!
# Redirect standard file descriptors.
si = open(stdin, "r", 1)
so = open(stdout, "a+", 1)
se = open(stderr, "a+", 1)
if hasattr(sys.stdin, "fileno"):
os.dup2(si.fileno(), sys.stdin.fileno())
if hasattr(sys.stdout, "fileno"):
os.dup2(so.fileno(), sys.stdout.fileno())
if hasattr(sys.stderr, "fileno"):
os.dup2(se.fileno(), sys.stderr.fileno())
def kill(name, sig=signal.SIGTERM):
"""Send a signal to job ``name`` via :func:`os.kill`.
.. versionadded:: 1.29
Args:
name (str): Name of the job
sig (int, optional): Signal to send (default: SIGTERM)
Returns:
bool: `False` if job isn't running, `True` if signal was sent.
"""
pid = _job_pid(name)
if pid is None:
return False
os.kill(pid, sig)
return True
def run_in_background(name, args, **kwargs):
r"""Cache arguments then call this script again via :func:`subprocess.call`.
:param name: name of job
:type name: unicode
:param args: arguments passed as first argument to :func:`subprocess.call`
:param \**kwargs: keyword arguments to :func:`subprocess.call`
:returns: exit code of sub-process
:rtype: int
When you call this function, it caches its arguments and then calls
``background.py`` in a subprocess. The Python subprocess will load the
cached arguments, fork into the background, and then run the command you
specified.
This function will return as soon as the ``background.py`` subprocess has
forked, returning the exit code of *that* process (i.e. not of the command
you're trying to run).
If that process fails, an error will be written to the log file.
If a process is already running under the same name, this function will
return immediately and will not run the specified command.
"""
if is_running(name):
_log().info("[%s] job already running", name)
return
argcache = _arg_cache(name)
# Cache arguments
with open(argcache, "wb") as fp:
pickle.dump({"args": args, "kwargs": kwargs}, fp)
_log().debug("[%s] command cached: %s", name, argcache)
# Call this script
cmd = [sys.executable, "-m", "workflow.background", name]
_log().debug("[%s] passing job to background runner: %r", name, cmd)
retcode = subprocess.call(cmd, env={"PYTHONPATH": ":".join(sys.path)})
if retcode: # pragma: no cover
_log().error("[%s] background runner failed with %d", name, retcode)
else:
_log().debug("[%s] background job started", name)
return retcode
def main(wf): # pragma: no cover
"""Run command in a background process.
Load cached arguments, fork into background, then call
:meth:`subprocess.call` with cached arguments.
"""
log = wf.logger
name = wf.args[0]
argcache = _arg_cache(name)
if not os.path.exists(argcache):
msg = "[{0}] command cache not found: {1}".format(name, argcache)
log.critical(msg)
raise IOError(msg)
# Fork to background and run command
pidfile = _pid_file(name)
_background(pidfile)
# Load cached arguments
with open(argcache, "rb") as fp:
data = pickle.load(fp)
# Cached arguments
args = data["args"]
kwargs = data["kwargs"]
# Delete argument cache file
os.unlink(argcache)
try:
# Run the command
log.debug("[%s] running command: %r", name, args)
retcode = subprocess.call(args, **kwargs)
if retcode:
log.error("[%s] command failed with status %d", name, retcode)
finally:
os.unlink(pidfile)
log.debug("[%s] job complete", name)
if __name__ == "__main__": # pragma: no cover
wf().run(main)
| 26.181818
| 80
| 0.61445
|
4a00161d13b774508d1d13a4433a7c820c3d923e
| 7,405
|
py
|
Python
|
code/Tiramisu_calibration.py
|
uncbiag/LTS
|
1b0d45f9aac9454a12d75e8308f2a2ad07275438
|
[
"Apache-2.0"
] | 14
|
2021-09-29T06:03:05.000Z
|
2022-03-29T14:36:10.000Z
|
code/Tiramisu_calibration.py
|
uncbiag/LTS
|
1b0d45f9aac9454a12d75e8308f2a2ad07275438
|
[
"Apache-2.0"
] | null | null | null |
code/Tiramisu_calibration.py
|
uncbiag/LTS
|
1b0d45f9aac9454a12d75e8308f2a2ad07275438
|
[
"Apache-2.0"
] | 5
|
2021-08-17T03:15:04.000Z
|
2022-03-18T08:47:01.000Z
|
import torch
import torchvision
from PIL import Image
from torchvision import transforms
import numpy as np
import glob
from Tiramisu_calibration_Dataset import *
from torch.utils.data import DataLoader
from calibration_models import *
from torch import nn, optim
import os
from tensorboardX import SummaryWriter
import time
import datetime
import os
import sys
import argparse
import random
sys.path.append(os.path.realpath(".."))
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default=0, type=int, help='index of used GPU')
parser.add_argument('--model-name', default='LTS', type=str, help='model name: IBTS, LTS, TS')
parser.add_argument('--epochs', default=200, type=int, help='max epochs')
parser.add_argument('--batch-size', default=4, type=int, help='batch size')
parser.add_argument('--lr', default=1e-4, type=float, help='inital learning rate')
parser.add_argument('--seed', default=None, type=int, help='seed for initializing training.')
parser.add_argument('--save-per-epoch', default=1, type=int, help='number of epochs to save model.')
if __name__ == "__main__":
args = parser.parse_args()
model_name = str(args.model_name)
total_logits_list = glob.glob('/YOUR_PATH_TO_CamVid/prediction_results/val/*_logit.pt')
total_logits_list.sort()
## training and validation split
train_logits_list = total_logits_list[:90]
val_logits_list = total_logits_list[90:]
nll_criterion = nn.CrossEntropyLoss()
max_epochs = args.epochs
batch_size = args.batch_size
lr = args.lr
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
TIRAMISU_train = TIRAMISU_CALIBRATION(train_logits_list, 'val')
TIRAMISU_train_dataloader = DataLoader(TIRAMISU_train, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
TIRAMISU_val = TIRAMISU_CALIBRATION(val_logits_list, 'val')
TIRAMISU_val_dataloader = DataLoader(TIRAMISU_val, batch_size=1, shuffle=False, num_workers=4, pin_memory=True)
if model_name == 'IBTS':
experiment_name = model_name + '_CamVid' + '_epoch_' + str(max_epochs) + '_batchsize_' + str(batch_size) + '_lr_' + str(lr)
calibration_model = IBTS_CamVid_With_Image()
elif model_name == 'LTS':
experiment_name = model_name + '_CamVid' + '_epoch_' + str(max_epochs) + '_batchsize_' + str(batch_size) + '_lr_' + str(lr)
calibration_model = LTS_CamVid_With_Image()
elif model_name == 'TS':
experiment_name = model_name + '_CamVid' + '_epoch_' + str(max_epochs) + '_batchsize_' + str(batch_size) + '_lr_' + str(lr)
calibration_model = Temperature_Scaling()
else:
raise ValueError('Wrong Model Name!')
calibration_model.weights_init()
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
calibration_model.cuda(args.gpu)
else:
calibration_model.cuda()
optimizer = optim.Adam(calibration_model.parameters(), lr=lr)
print("Computing Loss")
val_loss = 0
for val_image, val_logits, val_labels, val_preds, val_boundary in TIRAMISU_val_dataloader:
val_labels = val_labels.long().cuda(args.gpu)
val_loss += nll_criterion(val_logits, val_labels).item()
mean_val_loss = val_loss/len(TIRAMISU_val_dataloader)
print('Before calibration - NLL: %.5f' % (mean_val_loss))
calibration_model.train()
now = datetime.datetime.now()
now_date = "{:02d}{:02d}{:02d}".format(now.month, now.day, now.year)
now_time = "{:02d}{:02d}{:02d}".format(now.hour, now.minute, now.second)
writer = SummaryWriter(os.path.join('./logs_CamVid', now_date, experiment_name + '_' + now_time))
for epoch in range(max_epochs):
for i, (train_image, train_logits, train_labels, train_preds, train_boundary) in enumerate(TIRAMISU_train_dataloader):
global_step = epoch * len(TIRAMISU_train_dataloader) + (i + 1) * batch_size
train_image, train_logits, train_labels = train_image.cuda(args.gpu), train_logits.cuda(args.gpu), train_labels.long().cuda(args.gpu)
optimizer.zero_grad()
logits_calibrate = calibration_model(train_logits, train_image, args)
loss = nll_criterion(logits_calibrate, train_labels)
loss.backward()
optimizer.step()
print("{} epoch, {} iter, training loss: {:.5f}".format(epoch, i + 1, loss.item()))
writer.add_scalar('loss/training', loss.item(), global_step=global_step)
## save the current best model and checkpoint
if i%10 == 9 and epoch % args.save_per_epoch == (args.save_per_epoch - 1):
with torch.set_grad_enabled(False):
tmp_loss = 0
for val_image, val_logits, val_labels, val_preds, val_boundary in TIRAMISU_val_dataloader:
val_image, val_logits, val_labels = val_image.cuda(args.gpu), val_logits.cuda(args.gpu), val_labels.long().cuda(args.gpu)
logits_cali = calibration_model(val_logits, val_image)
tmp_loss += nll_criterion(logits_cali, val_labels).item()
mean_tmp_loss = tmp_loss/len(TIRAMISU_val_dataloader)
print("{} epoch, {} iter, training loss: {:.5f}, val loss: {:.5f}".format(epoch, i+1, loss.item(), mean_tmp_loss))
writer.add_scalar('loss/validation', mean_tmp_loss, global_step=global_step)
if mean_tmp_loss < mean_val_loss:
mean_val_loss = mean_tmp_loss
print('%d epoch, current lowest - NLL: %.5f' % (epoch, mean_val_loss))
writer.add_scalar('validation/lowest_loss', mean_val_loss, global_step=global_step)
torch.save(calibration_model.state_dict(), './calibration_Tiramisu/' + experiment_name + '_params.pth.tar')
best_state = {'epoch': epoch,
'state_dict': calibration_model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_score': mean_val_loss,
'global_step': global_step
}
torch.save(best_state, './calibration_Tiramisu/' + experiment_name + '_model_best.pth.tar')
current_state = {'epoch': epoch,
'state_dict': calibration_model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_score': mean_tmp_loss,
'global_step': global_step
}
torch.save(current_state, './calibration_Tiramisu/' + experiment_name + '_checkpoint.pth.tar')
| 48.084416
| 145
| 0.635922
|
4a00171038b545b9f576921aa2f541d2968e72e9
| 1,975
|
py
|
Python
|
server/website/script/controller_simulator/upload_data.py
|
newopscn/ottertune
|
1d57ad99019b669dfbfd099bae031037c66e822d
|
[
"Apache-2.0"
] | null | null | null |
server/website/script/controller_simulator/upload_data.py
|
newopscn/ottertune
|
1d57ad99019b669dfbfd099bae031037c66e822d
|
[
"Apache-2.0"
] | null | null | null |
server/website/script/controller_simulator/upload_data.py
|
newopscn/ottertune
|
1d57ad99019b669dfbfd099bae031037c66e822d
|
[
"Apache-2.0"
] | null | null | null |
#
# OtterTune - upload_data.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
'''
Created on Nov 30, 2017
@author: dvanaken
'''
import argparse
import glob
import logging
import os
import urllib2
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
register_openers()
# Logging
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.StreamHandler())
LOG.setLevel(logging.INFO)
def upload(basedir, upload_code):
for wkld_dir in sorted(glob.glob(os.path.join(basedir, '*'))):
LOG.info('Uploading sample for workload %s...', wkld_dir)
sample_idx = 0
while True:
samples = glob.glob(os.path.join(wkld_dir, 'sample-{}__*').format(sample_idx))
if len(samples) == 0:
break
assert len(samples) == 4
basename = samples[0].split('__')[0]
params = {
'summary': open(basename + '__summary.json', "r"),
'knobs': open(basename + '__knobs.json', "r"),
'metrics_before': open(basename + '__metrics_start.json', 'r'),
'metrics_after': open(basename + '__metrics_end.json', 'r'),
'upload_code': upload_code,
}
datagen, headers = multipart_encode(params)
request = urllib2.Request("http://0.0.0.0:8000/new_result/", datagen, headers)
LOG.info("Response: %s\n", urllib2.urlopen(request).read())
sample_idx += 1
def main():
parser = argparse.ArgumentParser(description="Upload generated data to the website")
parser.add_argument('datadir', type=str, nargs=1,
help='Directory containing the generated data')
parser.add_argument('upload_code', type=str, nargs=1,
help='The website\'s upload code')
args = parser.parse_args()
upload(args.datadir[0], args.upload_code[0])
if __name__ == "__main__":
main()
| 30.384615
| 90
| 0.619747
|
4a0017624bb82332fb59429c6d12fe1d3b639185
| 1,172
|
py
|
Python
|
maskrcnn_benchmark/solver/build.py
|
sadicLiu/mask_rcnn_code
|
a8878f81f6bbfa63b5a4b7ca2bfb80673e4febfd
|
[
"MIT"
] | 1
|
2019-03-27T07:58:58.000Z
|
2019-03-27T07:58:58.000Z
|
maskrcnn_benchmark/solver/build.py
|
sadicLiu/mask_rcnn_code
|
a8878f81f6bbfa63b5a4b7ca2bfb80673e4febfd
|
[
"MIT"
] | 1
|
2019-03-20T08:46:45.000Z
|
2019-03-20T13:43:47.000Z
|
maskrcnn_benchmark/solver/build.py
|
sadicLiu/mask_rcnn_code
|
a8878f81f6bbfa63b5a4b7ca2bfb80673e4febfd
|
[
"MIT"
] | null | null | null |
import torch
from .lr_scheduler import WarmupMultiStepLR
def make_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
# pre-trained model参数不更新
continue
lr = cfg.SOLVER.BASE_LR # 0.0025
weight_decay = cfg.SOLVER.WEIGHT_DECAY # 0.0001
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR # *2
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS # 0
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
# Optimizer文档: https://pytorch.org/docs/stable/optim.html?highlight=torch%20optim%20sgd
# 对于指定优化参数的变量使用指定的参数进行优化, 未指定的使用函数中传入的参数进行更新
optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
return optimizer
def make_lr_scheduler(cfg, optimizer):
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS, # (480000, 640000)
cfg.SOLVER.GAMMA, # 0.1
warmup_factor=cfg.SOLVER.WARMUP_FACTOR, # 1.0 / 3
warmup_iters=cfg.SOLVER.WARMUP_ITERS, # 500
warmup_method=cfg.SOLVER.WARMUP_METHOD, # "linear"
)
| 32.555556
| 91
| 0.653584
|
4a00177db6e5eeadd9043c16463ad28fc3dedb2f
| 16,856
|
py
|
Python
|
anchore_engine/services/policy_engine/engine/feeds/client.py
|
signed8bit/anchore-engine
|
ef8a2d2155f7c053179ff890826eae3fc5ae0ab8
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/engine/feeds/client.py
|
signed8bit/anchore-engine
|
ef8a2d2155f7c053179ff890826eae3fc5ae0ab8
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/engine/feeds/client.py
|
signed8bit/anchore-engine
|
ef8a2d2155f7c053179ff890826eae3fc5ae0ab8
|
[
"Apache-2.0"
] | null | null | null |
import abc
import copy
import requests
import requests.exceptions
import json
import datetime
import typing
from io import BytesIO
import ijson
from anchore_engine.configuration import localconfig
from anchore_engine.subsys import logger
from anchore_engine.utils import ensure_str, ensure_bytes, AnchoreException
from anchore_engine.services.policy_engine.engine.feeds import (
IFeedSource,
FeedGroupList,
FeedList,
GroupData,
)
from anchore_engine.common.schemas import (
FeedAPIRecord,
FeedAPIGroupRecord,
)
FEED_DATA_ITEMS_PATH = "data.item"
FEED_DATA_NEXT_TOKEN_PATH = "next_token"
class FeedServiceClient(IFeedSource):
"""
Base client class with no auth
"""
def __init__(self, endpoint, http_client=None):
if not endpoint:
raise ValueError("endpoint cannot be None")
self.http_client = http_client
self.feed_url = endpoint
self.group_url = self.feed_url + "/{feed}"
self.group_data_url = self.group_url + "/{group}"
self.retry_count = 3
def _map_error_to_exception(self, exc, username, url):
if exc.response.status_code == 401:
raise InvalidCredentialsError(username, url)
elif exc.response.status_code == 403:
raise InsufficientAccessTierError(
"Access denied due to insufficient permissions for user: {}".format(
username
)
)
else:
raise Exception(
"Feed operation failed for user: {}. Msg: {}. Response: {}".format(
username, exc.response, exc.response.body
)
)
def list_feeds(self) -> FeedList:
more_data = True
next_token = None
feed_list = FeedList(feeds=[])
while more_data:
url = self.feed_url + (("?next_token=" + next_token) if next_token else "")
try:
record = self.http_client.execute_request(
requests.get, url, retries=self.retry_count
)
if record["success"]:
data = json.loads(ensure_str(record["content"]))
if data and "feeds" in data:
feed_list.feeds.extend(
[
FeedAPIRecord(
name=x.get("name"),
description=x.get("description"),
access_tier=x.get("access_tier"),
)
for x in data["feeds"]
]
)
if "next_token" in data and data["next_token"]:
next_token = data["next_token"]
more_data = True
else:
more_data = False
else:
raise Exception(
"Feed list operation failed. Msg: {}. Response: {}".format(
record.get("err_msg"), record.get("text")
)
)
except Exception as e:
logger.exception("Error executing feed listing: {}".format(e))
raise e
return feed_list
def list_feed_groups(self, feed: str) -> FeedGroupList:
group_list = FeedGroupList(groups=[])
more_data = True
next_token = None
while more_data:
url = self.group_url.format(feed=feed) + (
("?next_token=" + next_token) if next_token else ""
)
try:
record = self.http_client.execute_request(
requests.get, url, retries=self.retry_count
)
if record["success"]:
data = json.loads(ensure_str(record["content"]))
if "groups" in data:
group_list.groups.extend(
[
FeedAPIGroupRecord(
name=x.get("name"),
description=x.get("description"),
access_tier=x.get("access_tier"),
)
for x in data["groups"]
]
)
if "next_token" in data and data["next_token"]:
next_token = data["next_token"]
more_data = True
else:
more_data = False
else:
raise Exception(
"Feed list operation failed. Msg: {}. Response: {}".format(
record.get("err_msg"), record.get("text")
)
)
except Exception as e:
logger.debug("Error executing feed listing: {}".format(e))
raise e
return group_list
def get_feed_group_data(
self,
feed: str,
group: str,
since: datetime.datetime = None,
next_token: str = None,
):
try:
record = self.get_raw_feed_group_data(feed, group, since, next_token)
if record["success"]:
next_token, group_data, count = self._extract_response_data(
record["content"]
)
return GroupData(
data=group_data,
next_token=next_token,
since=since,
record_count=count,
)
else:
raise Exception(
"Feed list operation failed. Msg: {}. Response: {}".format(
record.get("err_msg"), record.get("text")
)
)
except Exception as e:
logger.debug("Error executing feed data download: {}".format(e))
raise e
def get_raw_feed_group_data(
self,
feed: str,
group: str,
since: datetime.datetime = None,
next_token: str = None,
) -> typing.Tuple:
if since and not isinstance(since, datetime.datetime):
raise TypeError("since should be a datetime object")
baseurl = self.group_data_url.format(feed=feed, group=group)
if since:
baseurl += "?since={}".format(since.isoformat())
if next_token:
url = baseurl + "&next_token={}".format(next_token)
else:
url = baseurl
elif next_token:
url = baseurl + "?next_token={}".format(next_token)
else:
url = baseurl
logger.debug("data group url: " + str(url))
try:
return self.http_client.execute_request(
requests.get, url, retries=self.retry_count
)
except Exception as e:
logger.debug("Error executing feed data download: {}".format(e))
raise e
def _extract_response_data(self, response_text):
next_token = None
sio = BytesIO(response_text)
count = 0
# Get the next token
p = ijson.items(sio, FEED_DATA_NEXT_TOKEN_PATH)
d = [x for x in p]
if len(d) == 1:
next_token = d[0]
# Be explicit, no empty strings
if not next_token:
next_token = None
# Get the record count
# Not using the special parser for handling decimals here because this isn't on the return path, just counting records
sio.seek(0)
for i in ijson.items(sio, FEED_DATA_ITEMS_PATH):
count += 1
logger.debug("Found {} records in data chunk".format(count))
sio.close()
return next_token, response_text, count
def get_client(
feeds_url=None, user=None, conn_timeout=None, read_timeout=None, ssl_verify=None
):
"""
Returns a configured client based on the local config. Reads configuration from the loaded system configuration.
Uses the admin user's credentials for the feed service if they are available in the external_service_auths/anchoreio/anchorecli/auth json path of the config file. If no specific user credentials are found then the anonymous user credentials are used.
:return: initialize AnchoreIOFeedClient
"""
logger.debug(
"Initializing a feeds client: url={}, user={}, conn_timeout={}, read_timeout={}".format(
feeds_url,
user
if user is None or type(user) not in [tuple, list] or len(user) == 0
else (user[0], "***redacted**"),
conn_timeout,
read_timeout,
)
)
if not (feeds_url and user and conn_timeout and read_timeout):
conf = localconfig.get_config()
if not conf:
logger.error("No configuration available. Cannot initialize feed client")
raise ValueError("None for local config")
else:
conf = {
"feeds": {
"connection_timeout_seconds": conn_timeout,
"read_timeout_seconds": read_timeout,
"url": feeds_url,
"ssl_verify": ssl_verify,
}
}
if not conn_timeout:
conn_timeout = conf.get("feeds", {}).get("connection_timeout_seconds")
if not read_timeout:
read_timeout = conf.get("feeds", {}).get("read_timeout_seconds")
if not feeds_url:
feeds_url = conf.get("feeds", {}).get("url")
if not feeds_url:
raise ValueError("no feed service url available")
verify = conf.get("feeds", {}).get("ssl_verify", True)
password = None
if not user:
try:
admin_usr = (
conf.get("credentials", {})
.get("users", {})
.get("admin", {})
.get("external_service_auths", {})
.get("anchoreio", {})
.get("anchorecli", {})
.get("auth")
)
if admin_usr:
user, password = admin_usr.split(":")
except AttributeError:
# Something isn't found or was set to None.
pass
else:
user, password = user[0], user[1]
if not user:
user = conf.get("feeds", {}).get("anonymous_user_username")
password = conf.get("feeds", {}).get("anonymous_user_password")
logger.debug("using values: " + str([feeds_url, user, conn_timeout, read_timeout]))
http_client = HTTPBasicAuthClient(
username=user,
password=password,
connect_timeout=conn_timeout,
read_timeout=read_timeout,
verify=verify,
)
return FeedServiceClient(endpoint=feeds_url, http_client=http_client)
class InsufficientAccessTierError(Exception):
pass
class InvalidCredentialsError(Exception):
def __init__(self, username, target):
super(InvalidCredentialsError, self).__init__(
"Invalid credential for user {} for url: {}".format(username, target)
)
class IAuthenticatedHTTPClientBase(abc.ABC):
@abc.abstractmethod
def execute_request(
self, method, url, connect_timeout=None, read_timeout=None, retries=None
):
pass
@property
@abc.abstractmethod
def user(self):
pass
class HTTPBasicAuthClient(IAuthenticatedHTTPClientBase):
"""
Simple base client type for operations with no auth needed
"""
client_config = {
"max_retries": 3,
"conn_timeout": 3,
"read_timeout": 60,
"verify": True,
}
def __init__(
self,
username,
password,
connect_timeout=None,
read_timeout=None,
retries=None,
verify=True,
):
self.auth_config = copy.copy(self.client_config)
self._user = username
self.password = password
self.retries = retries
if connect_timeout:
self.auth_config["conn_timeout"] = connect_timeout
if read_timeout:
self.auth_config["read_timeout"] = read_timeout
if retries:
self.auth_config["max_retries"] = retries
self.auth_config["verify"] = verify
@property
def user(self):
return self._user
def _map_error_to_exception(self, exc, username, url=None):
if exc.response.status_code == 401:
raise InvalidCredentialsError(username, url)
elif exc.response.status_code == 403:
raise InsufficientAccessTierError(
"Access denied due to insufficient permissions for user: {}".format(
username
)
)
else:
raise Exception(
"Feed operation failed for user: {}. Msg: {}. Response: {}".format(
self.user, exc.response, exc.response.body
)
)
def authenticated_get(
self, url, connect_timeout=None, read_timeout=None, retries=None
):
return self.execute_request(
requests.get, url, connect_timeout, read_timeout, retries
)
def execute_request(
self, method, url, connect_timeout=None, read_timeout=None, retries=None
):
"""
Execute an HTTP request with auth params and the specified timeout overrides
:param method: a callable for the http method to execute (e.g. requests.get, requests.put, ...)
:param url:
:param timeout:
:param retries:
:return:
"""
# make a request
if not connect_timeout:
connect_timeout = int(self.auth_config["conn_timeout"])
if not read_timeout:
read_timeout = int(self.auth_config["read_timeout"])
if not retries:
retries = int(self.auth_config["max_retries"])
retries = int(retries)
verify = self.auth_config["verify"]
ret = {"status_code": 1, "content": "", "success": False}
success = False
count = 0
conn_timeout = int(connect_timeout)
read_timeout = int(read_timeout)
while not success and count < retries:
count += 1
logger.debug("get attempt " + str(count) + " of " + str(retries))
try:
auth = (self.user, self.password)
logger.debug(
"making authenticated request (user={}, conn_timeout={}, read_timeout={}, verify={}) to url {}".format(
str(self.user), conn_timeout, read_timeout, verify, str(url)
)
)
r = method(
url, auth=auth, timeout=(conn_timeout, read_timeout), verify=verify
)
logger.debug("\tresponse status_code: " + str(r.status_code))
if r.status_code == 401:
logger.debug(
"Got HTTP 401 on authenticated {}, response body: {}".format(
method.__name__, str(r.text)
)
)
r.raise_for_status()
elif r.status_code == 200:
success = True
ret["success"] = True
elif r.status_code in [403, 404]:
r.raise_for_status()
ret["status_code"] = r.status_code
ret["content"] = r.content
except requests.exceptions.ConnectTimeout as err:
logger.debug("attempt failed: " + str(err))
ret["content"] = ensure_bytes("server error: timed_out: " + str(err))
# return(ret)
except requests.HTTPError as e:
if e.response is not None and 400 <= e.response.status_code < 500:
self._map_error_to_exception(e, username=self.user, url=url)
# raise e
else:
logger.debug("attempt failed: " + str(e))
ret["content"] = ensure_bytes("server error: " + str(e))
except Exception as err:
logger.debug("attempt failed: " + str(err))
ret["content"] = ensure_bytes("server error: " + str(err))
return ret
class AnchoreIOClientError(AnchoreException):
def __init__(
self,
cause,
msg="Error initializing anchore.io client with configured credentials",
):
self.cause = str(cause)
self.msg = msg
def __repr__(self):
return "{} - exception: {}".format(self.msg, self.cause)
def __str__(self):
return "{} - exception: {}".format(self.msg, self.cause)
| 33.246548
| 254
| 0.531324
|
4a00177fd1c6af5806d144d3ab0399564e71b96c
| 4,039
|
py
|
Python
|
tests/data/test_waveform_generator.py
|
AndreFCruz/scikit-multiflow
|
c4dbbb70d4ed839d95a18ca799f073ac9ff9ba49
|
[
"BSD-3-Clause"
] | null | null | null |
tests/data/test_waveform_generator.py
|
AndreFCruz/scikit-multiflow
|
c4dbbb70d4ed839d95a18ca799f073ac9ff9ba49
|
[
"BSD-3-Clause"
] | null | null | null |
tests/data/test_waveform_generator.py
|
AndreFCruz/scikit-multiflow
|
c4dbbb70d4ed839d95a18ca799f073ac9ff9ba49
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import numpy as np
from skmultiflow.data.waveform_generator import WaveformGenerator
def test_waveform_generator(test_path):
stream = WaveformGenerator(random_state=23, has_noise=False)
stream.prepare_for_use()
assert stream.n_remaining_samples() == -1
expected_names = ['att_num_0', 'att_num_1', 'att_num_2', 'att_num_3', 'att_num_4',
'att_num_5', 'att_num_6', 'att_num_7', 'att_num_8', 'att_num_9',
'att_num_10', 'att_num_11', 'att_num_12', 'att_num_13', 'att_num_14',
'att_num_15', 'att_num_16', 'att_num_17', 'att_num_18', 'att_num_19',
'att_num_20']
assert stream.feature_names == expected_names
expected_targets = [0, 1, 2]
assert stream.target_values == expected_targets
assert stream.target_names == ['target_0']
assert stream.n_features == 21
assert stream.n_cat_features == 0
assert stream.n_num_features == 21
assert stream.n_targets == 1
assert stream.get_data_info() == 'Waveform Generator - 1 targets, 3 classes, 21 features'
assert stream.has_more_samples() is True
assert stream.is_restartable() is True
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'waveform_stream.npz')
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
X, y = stream.next_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
X, y = stream.last_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
stream.restart()
X, y = stream.next_sample(10)
assert np.alltrue(X == X_expected)
assert np.alltrue(y == y_expected)
assert stream.n_targets == np.array(y).ndim
assert stream.n_features == X.shape[1]
def test_waveform_generator_noise(test_path):
# Noise test
stream = WaveformGenerator(random_state=23, has_noise=True)
stream.prepare_for_use()
assert stream.n_remaining_samples() == -1
expected_names = ['att_num_0', 'att_num_1', 'att_num_2', 'att_num_3', 'att_num_4',
'att_num_5', 'att_num_6', 'att_num_7', 'att_num_8', 'att_num_9',
'att_num_10', 'att_num_11', 'att_num_12', 'att_num_13', 'att_num_14',
'att_num_15', 'att_num_16', 'att_num_17', 'att_num_18', 'att_num_19',
'att_num_20', 'att_num_21', 'att_num_22', 'att_num_23', 'att_num_24',
'att_num_25', 'att_num_26', 'att_num_27', 'att_num_28', 'att_num_29',
'att_num_30', 'att_num_31', 'att_num_32', 'att_num_33', 'att_num_34',
'att_num_35', 'att_num_36', 'att_num_37', 'att_num_38', 'att_num_39',
]
assert stream.feature_names == expected_names
expected_targets = [0, 1, 2]
assert stream.target_values == expected_targets
assert stream.target_names == ['target_0']
assert stream.n_features == 40
assert stream.n_cat_features == 0
assert stream.n_num_features == 40
assert stream.n_targets == 1
assert stream.get_data_info() == 'Waveform Generator - 1 targets, 3 classes, 40 features'
assert stream.has_more_samples() is True
assert stream.is_restartable() is True
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'waveform_noise_stream.npz')
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
X, y = stream.next_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
X, y = stream.last_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
stream.restart()
X, y = stream.next_sample(10)
assert np.alltrue(X == X_expected)
assert np.alltrue(y == y_expected)
assert stream.n_targets == np.array(y).ndim
assert stream.n_features == X.shape[1]
| 33.380165
| 93
| 0.644466
|
4a001925b0775f2a56cc554989ce6ac20c150c79
| 1,148
|
py
|
Python
|
cloudcafe/compute/extensions/volumes_boot_api/composites.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
cloudcafe/compute/extensions/volumes_boot_api/composites.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
cloudcafe/compute/extensions/volumes_boot_api/composites.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | 1
|
2020-04-13T17:44:28.000Z
|
2020-04-13T17:44:28.000Z
|
"""
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.compute.common.composites import BaseComputeComposite
from cloudcafe.compute.extensions.volumes_boot_api.behaviors import \
VolumeServerBehaviors
from cloudcafe.compute.extensions.volumes_boot_api.client import \
VolumesBootClient
class BootFromVolumeComposite(BaseComputeComposite):
behavior_class = VolumeServerBehaviors
def __init__(self, auth_composite):
super(BootFromVolumeComposite, self).__init__(auth_composite)
self.client = VolumesBootClient(
**self.compute_auth_composite.client_args)
self.behaviors = None
| 35.875
| 72
| 0.787456
|
4a0019a30889f3f8050876acafa5dc9a6d3631ed
| 10,928
|
py
|
Python
|
pybind/slxos/v16r_1_00b/interface/ethernet/interface_eth_isis_conf/intf_isis/interface_isis/interface_metric/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/interface/ethernet/interface_eth_isis_conf/intf_isis/interface_isis/interface_metric/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/interface/ethernet/interface_eth_isis_conf/intf_isis/interface_isis/interface_metric/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class interface_metric(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/ethernet/interface-eth-isis-conf/intf-isis/interface-isis/interface-metric. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__interface_metric_level','__interface_metric_val',)
_yang_name = 'interface-metric'
_rest_name = 'metric'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__interface_metric_val = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16777215']}), is_leaf=True, yang_name="interface-metric-val", rest_name="interface-metric-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='uint32', is_config=True)
self.__interface_metric_level = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'level-2': {'value': 2}, u'level-1': {'value': 1}},), is_leaf=True, yang_name="interface-metric-level", rest_name="interface-metric-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='enumeration', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'ethernet', u'interface-eth-isis-conf', u'intf-isis', u'interface-isis', u'interface-metric']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ethernet', u'isis', u'metric']
def _get_interface_metric_level(self):
"""
Getter method for interface_metric_level, mapped from YANG variable /interface/ethernet/interface_eth_isis_conf/intf_isis/interface_isis/interface_metric/interface_metric_level (enumeration)
"""
return self.__interface_metric_level
def _set_interface_metric_level(self, v, load=False):
"""
Setter method for interface_metric_level, mapped from YANG variable /interface/ethernet/interface_eth_isis_conf/intf_isis/interface_isis/interface_metric/interface_metric_level (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_metric_level is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_metric_level() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'level-2': {'value': 2}, u'level-1': {'value': 1}},), is_leaf=True, yang_name="interface-metric-level", rest_name="interface-metric-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_metric_level must be of a type compatible with enumeration""",
'defined-type': "brocade-isis:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'level-2': {'value': 2}, u'level-1': {'value': 1}},), is_leaf=True, yang_name="interface-metric-level", rest_name="interface-metric-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='enumeration', is_config=True)""",
})
self.__interface_metric_level = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_metric_level(self):
self.__interface_metric_level = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'level-2': {'value': 2}, u'level-1': {'value': 1}},), is_leaf=True, yang_name="interface-metric-level", rest_name="interface-metric-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-incomplete-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='enumeration', is_config=True)
def _get_interface_metric_val(self):
"""
Getter method for interface_metric_val, mapped from YANG variable /interface/ethernet/interface_eth_isis_conf/intf_isis/interface_isis/interface_metric/interface_metric_val (uint32)
"""
return self.__interface_metric_val
def _set_interface_metric_val(self, v, load=False):
"""
Setter method for interface_metric_val, mapped from YANG variable /interface/ethernet/interface_eth_isis_conf/intf_isis/interface_isis/interface_metric/interface_metric_val (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_metric_val is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_metric_val() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16777215']}), is_leaf=True, yang_name="interface-metric-val", rest_name="interface-metric-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_metric_val must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16777215']}), is_leaf=True, yang_name="interface-metric-val", rest_name="interface-metric-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='uint32', is_config=True)""",
})
self.__interface_metric_val = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_metric_val(self):
self.__interface_metric_val = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16777215']}), is_leaf=True, yang_name="interface-metric-val", rest_name="interface-metric-val", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='uint32', is_config=True)
interface_metric_level = __builtin__.property(_get_interface_metric_level, _set_interface_metric_level)
interface_metric_val = __builtin__.property(_get_interface_metric_val, _set_interface_metric_val)
_pyangbind_elements = {'interface_metric_level': interface_metric_level, 'interface_metric_val': interface_metric_val, }
| 67.042945
| 646
| 0.721083
|
4a001a3479facf98b14b33047bd675f1a288c417
| 6,034
|
py
|
Python
|
paasta_tools/oom_logger.py
|
sofyat/paasta
|
6765ed992ec224bf78741ffcd615a6a6c6ffb780
|
[
"Apache-2.0"
] | null | null | null |
paasta_tools/oom_logger.py
|
sofyat/paasta
|
6765ed992ec224bf78741ffcd615a6a6c6ffb780
|
[
"Apache-2.0"
] | null | null | null |
paasta_tools/oom_logger.py
|
sofyat/paasta
|
6765ed992ec224bf78741ffcd615a6a6c6ffb780
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2015-2017 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
paasta_oom_logger is supposed to be used as a syslog-ng destination.
It looks for OOM events in the log, adds PaaSTA service and instance names
and send JSON-encoded messages the Scribe stream 'tmp_paasta_oom_events'.
syslog-ng.conf:
destination paasta_oom_logger {
program("exec /usr/bin/paasta_oom_logger" template("${UNIXTIME} ${HOST} ${MESSAGE}\n") );
};
filter f_cgroup_oom {
match(" killed as a result of limit of ") or match(" invoked oom-killer: ");
};
log {
source(s_all);
filter(f_cgroup_oom);
destination(paasta_oom_logger);
};
"""
import re
import sys
from collections import namedtuple
from docker.errors import APIError
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.utils import _log
from paasta_tools.utils import DEFAULT_LOGLEVEL
from paasta_tools.utils import get_docker_client
from paasta_tools.utils import load_system_paasta_config
try:
import yelp_meteorite
except ImportError:
# Sorry to any non-yelpers but you won't
# get metrics emitted as our metrics lib
# is currently not open source
yelp_meteorite = None
LogLine = namedtuple(
"LogLine",
[
"timestamp",
"hostname",
"container_id",
"cluster",
"service",
"instance",
"process_name",
],
)
def capture_oom_events_from_stdin():
process_name_regex = re.compile(
r"^\d+\s[a-zA-Z0-9\-]+\s.*\]\s(.+)\sinvoked\soom-killer:"
)
oom_regex_docker = re.compile(
r"^(\d+)\s([a-zA-Z0-9\-]+)\s.*Task in /docker/(\w{12})\w+ killed as a"
)
oom_regex_kubernetes = re.compile(
r"^(\d+)\s([a-zA-Z0-9\-]+)\s.*Task in /kubepods/[a-zA-Z]+/pod[-\w]+/(\w{12})\w+ killed as a"
)
process_name = ""
while True:
syslog = sys.stdin.readline()
if not syslog:
break
r = process_name_regex.search(syslog)
if r:
process_name = r.group(1)
r = oom_regex_docker.search(syslog)
if r:
yield (int(r.group(1)), r.group(2), r.group(3), process_name)
process_name = ""
r = oom_regex_kubernetes.search(syslog)
if r:
yield (int(r.group(1)), r.group(2), r.group(3), process_name)
process_name = ""
def get_container_env_as_dict(docker_inspect):
env_vars = {}
config = docker_inspect.get("Config")
if config is not None:
env = config.get("Env", [])
for i in env:
name, _, value = i.partition("=")
env_vars[name] = value
return env_vars
def log_to_scribe(logger, log_line):
"""Send the event to 'tmp_paasta_oom_events'."""
line = (
'{"timestamp": %d, "hostname": "%s", "container_id": "%s", "cluster": "%s", '
'"service": "%s", "instance": "%s", "process_name": "%s"}'
% (
log_line.timestamp,
log_line.hostname,
log_line.container_id,
log_line.cluster,
log_line.service,
log_line.instance,
log_line.process_name,
)
)
logger.log_line("tmp_paasta_oom_events", line)
def log_to_paasta(log_line):
"""Add the event to the standard PaaSTA logging backend."""
line = "oom-killer killed {} on {} (container_id: {}).".format(
"a %s process" % log_line.process_name
if log_line.process_name
else "a process",
log_line.hostname,
log_line.container_id,
)
_log(
service=log_line.service,
instance=log_line.instance,
component="oom",
cluster=log_line.cluster,
level=DEFAULT_LOGLEVEL,
line=line,
)
def send_sfx_event(service, instance, cluster):
if yelp_meteorite:
service_instance_config = get_instance_config(
service=service, instance=instance, cluster=cluster
)
yelp_meteorite.events.emit_event(
"paasta.service.oom_events",
dimensions={
"paasta_cluster": cluster,
"paasta_instance": instance,
"paasta_service": service,
"paasta_pool": service_instance_config.get_pool(),
},
)
def main():
try:
from clog.loggers import ScribeLogger
except ImportError:
print("Scribe logger unavailable, exiting.", file=sys.stderr)
sys.exit(1)
scribe_logger = ScribeLogger(host="169.254.255.254", port=1463, retry_interval=5)
cluster = load_system_paasta_config().get_cluster()
client = get_docker_client()
for (
timestamp,
hostname,
container_id,
process_name,
) in capture_oom_events_from_stdin():
try:
docker_inspect = client.inspect_container(resource_id=container_id)
except (APIError):
continue
env_vars = get_container_env_as_dict(docker_inspect)
service = env_vars.get("PAASTA_SERVICE", "unknown")
instance = env_vars.get("PAASTA_INSTANCE", "unknown")
log_line = LogLine(
timestamp=timestamp,
hostname=hostname,
container_id=container_id,
cluster=cluster,
service=service,
instance=instance,
process_name=process_name,
)
log_to_scribe(scribe_logger, log_line)
log_to_paasta(log_line)
send_sfx_event(service, instance, cluster)
if __name__ == "__main__":
main()
| 29.724138
| 100
| 0.62761
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.