hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0706a99ed381161a3c1ab460e11daee648ac9c0f
| 74
|
py
|
Python
|
djangocms_versioning/test_utils/polls/views.py
|
NarenderRajuB/djangocms-versioning
|
aa7a16fe275a6d8a41781ffb1e10427de917c2d5
|
[
"BSD-3-Clause"
] | 12
|
2018-09-04T10:33:16.000Z
|
2021-09-07T14:30:12.000Z
|
djangocms_versioning/test_utils/polls/views.py
|
NarenderRajuB/djangocms-versioning
|
aa7a16fe275a6d8a41781ffb1e10427de917c2d5
|
[
"BSD-3-Clause"
] | 46
|
2018-07-31T08:45:17.000Z
|
2021-09-08T15:45:05.000Z
|
djangocms_versioning/test_utils/polls/views.py
|
NarenderRajuB/djangocms-versioning
|
aa7a16fe275a6d8a41781ffb1e10427de917c2d5
|
[
"BSD-3-Clause"
] | 16
|
2018-08-30T19:08:45.000Z
|
2021-07-13T11:31:43.000Z
|
from django.views.generic import View
class PreviewView(View):
pass
| 12.333333
| 37
| 0.756757
| 10
| 74
| 5.6
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175676
| 74
| 5
| 38
| 14.8
| 0.918033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
07226cf588afcbd56fa40ee37f2c1c59471161f2
| 124
|
py
|
Python
|
09-2021-06-18/src/_solutions/bank_account_v0.py
|
eotp/python-FU-class
|
f0a7518b3e3204a77e8855bef91afeaabb0d52ac
|
[
"MIT"
] | 1
|
2020-01-17T14:51:40.000Z
|
2020-01-17T14:51:40.000Z
|
08-2022-06-23/src/_solutions/bank_account_v0.py
|
eotp/python-FU-WiSe1920
|
4f225430ef8a70faca8c86c77cc888524c8e0546
|
[
"MIT"
] | null | null | null |
08-2022-06-23/src/_solutions/bank_account_v0.py
|
eotp/python-FU-WiSe1920
|
4f225430ef8a70faca8c86c77cc888524c8e0546
|
[
"MIT"
] | 1
|
2020-12-04T15:37:28.000Z
|
2020-12-04T15:37:28.000Z
|
class BankAccount:
bank_name = 'My Bank'
def print_bank_name(self):
print('My name is', self.bank_name)
| 24.8
| 43
| 0.637097
| 18
| 124
| 4.166667
| 0.5
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.258065
| 124
| 5
| 43
| 24.8
| 0.815217
| 0
| 0
| 0
| 0
| 0
| 0.136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
|
0
| 6
|
074a76853877fb1d3a7c84c006b854d3200d5a91
| 26,415
|
py
|
Python
|
imgaug/augmenters/pooling.py
|
fmder/imgaug
|
4c81c7a7503b64f54d76144385ea4330fd7c8a84
|
[
"MIT"
] | null | null | null |
imgaug/augmenters/pooling.py
|
fmder/imgaug
|
4c81c7a7503b64f54d76144385ea4330fd7c8a84
|
[
"MIT"
] | null | null | null |
imgaug/augmenters/pooling.py
|
fmder/imgaug
|
4c81c7a7503b64f54d76144385ea4330fd7c8a84
|
[
"MIT"
] | null | null | null |
"""
Augmenters that apply pooling operations to images.
List of augmenters:
* :class:`AveragePooling`
* :class:`MaxPooling`
* :class:`MinPooling`
* :class:`MedianPooling`
"""
from __future__ import print_function, division, absolute_import
from abc import ABCMeta, abstractmethod
import functools
import six
import numpy as np
import imgaug as ia
from . import meta
from .. import parameters as iap
def _compute_shape_after_pooling(image_shape, ksize_h, ksize_w):
if any([axis == 0 for axis in image_shape]):
return image_shape
height, width = image_shape[0:2]
if height % ksize_h > 0:
height += ksize_h - (height % ksize_h)
if width % ksize_w > 0:
width += ksize_w - (width % ksize_w)
return tuple([
height//ksize_h,
width//ksize_w,
] + list(image_shape[2:]))
@six.add_metaclass(ABCMeta)
class _AbstractPoolingBase(meta.Augmenter):
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(_AbstractPoolingBase, self).__init__(
name=name, deterministic=deterministic, random_state=random_state)
self.kernel_size = iap.handle_discrete_kernel_size_param(
kernel_size,
"kernel_size",
value_range=(0, None),
allow_floats=False)
self.keep_size = keep_size
self._resize_hm_and_sm_arrays = True
@abstractmethod
def _pool_image(self, image, kernel_size_h, kernel_size_w):
"""Apply pooling method with given kernel height/width to an image."""
def _draw_samples(self, nb_rows, random_state):
rss = random_state.duplicate(2)
mode = "single" if self.kernel_size[1] is None else "two"
kernel_sizes_h = self.kernel_size[0].draw_samples(
(nb_rows,),
random_state=rss[0])
if mode == "single":
kernel_sizes_w = kernel_sizes_h
else:
kernel_sizes_w = self.kernel_size[1].draw_samples(
(nb_rows,), random_state=rss[1])
return (
np.clip(kernel_sizes_h, 1, None),
np.clip(kernel_sizes_w, 1, None)
)
def _augment_batch(self, batch, random_state, parents, hooks):
if batch.images is None and self.keep_size:
return batch
samples = self._draw_samples(batch.nb_rows, random_state)
for column in batch.columns:
value_aug = getattr(
self, "_augment_%s_by_samples" % (column.name,)
)(column.value, samples)
setattr(batch, column.attr_name, value_aug)
return batch
def _augment_images_by_samples(self, images, samples):
if not self.keep_size:
images = list(images)
kernel_sizes_h, kernel_sizes_w = samples
gen = enumerate(zip(images, kernel_sizes_h, kernel_sizes_w))
for i, (image, ksize_h, ksize_w) in gen:
if ksize_h >= 2 or ksize_w >= 2:
image_pooled = self._pool_image(
image, ksize_h, ksize_w)
if self.keep_size:
image_pooled = ia.imresize_single_image(
image_pooled, image.shape[0:2])
images[i] = image_pooled
return images
def _augment_heatmaps_by_samples(self, heatmaps, samples):
return self._augment_hms_and_segmaps_by_samples(heatmaps, samples,
"arr_0to1")
def _augment_segmentation_maps_by_samples(self, segmaps, samples):
return self._augment_hms_and_segmaps_by_samples(segmaps, samples,
"arr")
def _augment_hms_and_segmaps_by_samples(self, augmentables, samples,
arr_attr_name):
if self.keep_size:
return augmentables
kernel_sizes_h, kernel_sizes_w = samples
gen = enumerate(zip(augmentables, kernel_sizes_h, kernel_sizes_w))
for i, (augmentable, ksize_h, ksize_w) in gen:
if ksize_h >= 2 or ksize_w >= 2:
# We could also keep the size of the HM/SM array unchanged
# here as the library can handle HMs/SMs that are larger
# than the image. This might be inintuitive however and
# could lead to unnecessary performance degredation.
if self._resize_hm_and_sm_arrays:
new_shape_arr = _compute_shape_after_pooling(
getattr(augmentable, arr_attr_name).shape,
ksize_h, ksize_w)
augmentable = augmentable.resize(new_shape_arr[0:2])
new_shape = _compute_shape_after_pooling(
augmentable.shape, ksize_h, ksize_w)
augmentable.shape = new_shape
augmentables[i] = augmentable
return augmentables
def _augment_keypoints_by_samples(self, keypoints_on_images, samples):
if self.keep_size:
return keypoints_on_images
kernel_sizes_h, kernel_sizes_w = samples
gen = enumerate(zip(keypoints_on_images, kernel_sizes_h,
kernel_sizes_w))
for i, (kpsoi, ksize_h, ksize_w) in gen:
if ksize_h >= 2 or ksize_w >= 2:
new_shape = _compute_shape_after_pooling(
kpsoi.shape, ksize_h, ksize_w)
keypoints_on_images[i] = kpsoi.on_(new_shape)
return keypoints_on_images
def _augment_polygons_by_samples(self, polygons_on_images, samples):
func = functools.partial(self._augment_keypoints_by_samples,
samples=samples)
return self._apply_to_polygons_as_keypoints(polygons_on_images, func,
recoverer=None)
def _augment_line_strings_by_samples(self, line_strings_on_images, samples):
func = functools.partial(self._augment_keypoints_by_samples,
samples=samples)
return self._apply_to_cbaois_as_keypoints(line_strings_on_images, func)
def _augment_bounding_boxes_by_samples(self, bounding_boxes_on_images,
samples):
func = functools.partial(self._augment_keypoints_by_samples,
samples=samples)
return self._apply_to_cbaois_as_keypoints(bounding_boxes_on_images,
func)
def get_parameters(self):
"""See :func:`imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.kernel_size, self.keep_size]
# TODO rename kernel size parameters in all augmenters to kernel_size
# TODO add per_channel
# TODO add upscaling interpolation mode?
class AveragePooling(_AbstractPoolingBase):
"""
Apply average pooling to images.
This augmenter pools images with kernel sizes ``H x W`` by averaging the
pixel values within these windows. For e.g. ``2 x 2`` this halves the image
size. Optionally, the augmenter will automatically re-upscale the image
to the input size (by default this is activated).
Note that this augmenter is very similar to ``AverageBlur``.
``AverageBlur`` applies averaging within windows of given kernel size
*without* striding, while ``AveragePooling`` applies striding corresponding
to the kernel size, with optional upscaling afterwards. The upscaling
is configured to create "pixelated"/"blocky" images by default.
.. note::
During heatmap or segmentation map augmentation, the respective
arrays are not changed, only the shapes of the underlying images
are updated. This is because imgaug can handle maps/maks that are
larger/smaller than their corresponding image.
dtype support::
See :func:`imgaug.imgaug.avg_pool`.
Attributes
----------
kernel_size : int or tuple of int or list of int or imgaug.parameters.StochasticParameter or tuple of tuple of int or tuple of list of int or tuple of imgaug.parameters.StochasticParameter, optional
The kernel size of the pooling operation.
* If an int, then that value will be used for all images for both
kernel height and width.
* If a tuple ``(a, b)``, then a value from the discrete range
``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image and used for both kernel height and width.
* If a StochasticParameter, then a value will be sampled per image
from that parameter per image and used for both kernel height and
width.
* If a tuple of tuple of int given as ``((a, b), (c, d))``, then two
values will be sampled independently from the discrete ranges
``[a..b]`` and ``[c..d]`` per image and used as the kernel height
and width.
* If a tuple of lists of int, then two values will be sampled
independently per image, one from the first list and one from the
second, and used as the kernel height and width.
* If a tuple of StochasticParameter, then two values will be sampled
indepdently per image, one from the first parameter and one from the
second, and used as the kernel height and width.
keep_size : bool, optional
After pooling, the result image will usually have a different
height/width compared to the original input image. If this
parameter is set to True, then the pooled image will be resized
to the input image's size, i.e. the augmenter's output shape is always
identical to the input shape.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.AveragePooling(2)
Create an augmenter that always pools with a kernel size of ``2 x 2``.
>>> aug = iaa.AveragePooling(2, keep_size=False)
Create an augmenter that always pools with a kernel size of ``2 x 2``
and does *not* resize back to the input image size, i.e. the resulting
images have half the resolution.
>>> aug = iaa.AveragePooling([2, 8])
Create an augmenter that always pools either with a kernel size
of ``2 x 2`` or ``8 x 8``.
>>> aug = iaa.AveragePooling((1, 7))
Create an augmenter that always pools with a kernel size of
``1 x 1`` (does nothing) to ``7 x 7``. The kernel sizes are always
symmetric.
>>> aug = iaa.AveragePooling(((1, 7), (1, 7)))
Create an augmenter that always pools with a kernel size of
``H x W`` where ``H`` and ``W`` are both sampled independently from the
range ``[1..7]``. E.g. resulting kernel sizes could be ``3 x 7``
or ``5 x 1``.
"""
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(AveragePooling, self).__init__(
kernel_size=kernel_size, keep_size=keep_size,
name=name, deterministic=deterministic, random_state=random_state)
def _pool_image(self, image, kernel_size_h, kernel_size_w):
return ia.avg_pool(
image,
(kernel_size_h, kernel_size_w)
)
class MaxPooling(_AbstractPoolingBase):
"""
Apply max pooling to images.
This augmenter pools images with kernel sizes ``H x W`` by taking the
maximum pixel value over windows. For e.g. ``2 x 2`` this halves the image
size. Optionally, the augmenter will automatically re-upscale the image
to the input size (by default this is activated).
The maximum within each pixel window is always taken channelwise..
.. note::
During heatmap or segmentation map augmentation, the respective
arrays are not changed, only the shapes of the underlying images
are updated. This is because imgaug can handle maps/maks that are
larger/smaller than their corresponding image.
dtype support::
See :func:`imgaug.imgaug.max_pool`.
Attributes
----------
kernel_size : int or tuple of int or list of int or imgaug.parameters.StochasticParameter or tuple of tuple of int or tuple of list of int or tuple of imgaug.parameters.StochasticParameter, optional
The kernel size of the pooling operation.
* If an int, then that value will be used for all images for both
kernel height and width.
* If a tuple ``(a, b)``, then a value from the discrete range
``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image and used for both kernel height and width.
* If a StochasticParameter, then a value will be sampled per image
from that parameter per image and used for both kernel height and
width.
* If a tuple of tuple of int given as ``((a, b), (c, d))``, then two
values will be sampled independently from the discrete ranges
``[a..b]`` and ``[c..d]`` per image and used as the kernel height
and width.
* If a tuple of lists of int, then two values will be sampled
independently per image, one from the first list and one from the
second, and used as the kernel height and width.
* If a tuple of StochasticParameter, then two values will be sampled
indepdently per image, one from the first parameter and one from the
second, and used as the kernel height and width.
keep_size : bool, optional
After pooling, the result image will usually have a different
height/width compared to the original input image. If this
parameter is set to True, then the pooled image will be resized
to the input image's size, i.e. the augmenter's output shape is always
identical to the input shape.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.MaxPooling(2)
Create an augmenter that always pools with a kernel size of ``2 x 2``.
>>> aug = iaa.MaxPooling(2, keep_size=False)
Create an augmenter that always pools with a kernel size of ``2 x 2``
and does *not* resize back to the input image size, i.e. the resulting
images have half the resolution.
>>> aug = iaa.MaxPooling([2, 8])
Create an augmenter that always pools either with a kernel size
of ``2 x 2`` or ``8 x 8``.
>>> aug = iaa.MaxPooling((1, 7))
Create an augmenter that always pools with a kernel size of
``1 x 1`` (does nothing) to ``7 x 7``. The kernel sizes are always
symmetric.
>>> aug = iaa.MaxPooling(((1, 7), (1, 7)))
Create an augmenter that always pools with a kernel size of
``H x W`` where ``H`` and ``W`` are both sampled independently from the
range ``[1..7]``. E.g. resulting kernel sizes could be ``3 x 7``
or ``5 x 1``.
"""
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(MaxPooling, self).__init__(
kernel_size=kernel_size, keep_size=keep_size,
name=name, deterministic=deterministic, random_state=random_state)
def _pool_image(self, image, kernel_size_h, kernel_size_w):
# TODO extend max_pool to support pad_mode and set it here
# to reflection padding
return ia.max_pool(
image,
(kernel_size_h, kernel_size_w)
)
class MinPooling(_AbstractPoolingBase):
"""
Apply minimum pooling to images.
This augmenter pools images with kernel sizes ``H x W`` by taking the
minimum pixel value over windows. For e.g. ``2 x 2`` this halves the image
size. Optionally, the augmenter will automatically re-upscale the image
to the input size (by default this is activated).
The minimum within each pixel window is always taken channelwise.
.. note::
During heatmap or segmentation map augmentation, the respective
arrays are not changed, only the shapes of the underlying images
are updated. This is because imgaug can handle maps/maks that are
larger/smaller than their corresponding image.
dtype support::
See :func:`imgaug.imgaug.pool`.
Attributes
----------
kernel_size : int or tuple of int or list of int or imgaug.parameters.StochasticParameter or tuple of tuple of int or tuple of list of int or tuple of imgaug.parameters.StochasticParameter, optional
The kernel size of the pooling operation.
* If an int, then that value will be used for all images for both
kernel height and width.
* If a tuple ``(a, b)``, then a value from the discrete range
``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image and used for both kernel height and width.
* If a StochasticParameter, then a value will be sampled per image
from that parameter per image and used for both kernel height and
width.
* If a tuple of tuple of int given as ``((a, b), (c, d))``, then two
values will be sampled independently from the discrete ranges
``[a..b]`` and ``[c..d]`` per image and used as the kernel height
and width.
* If a tuple of lists of int, then two values will be sampled
independently per image, one from the first list and one from the
second, and used as the kernel height and width.
* If a tuple of StochasticParameter, then two values will be sampled
indepdently per image, one from the first parameter and one from the
second, and used as the kernel height and width.
keep_size : bool, optional
After pooling, the result image will usually have a different
height/width compared to the original input image. If this
parameter is set to True, then the pooled image will be resized
to the input image's size, i.e. the augmenter's output shape is always
identical to the input shape.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.MinPooling(2)
Create an augmenter that always pools with a kernel size of ``2 x 2``.
>>> aug = iaa.MinPooling(2, keep_size=False)
Create an augmenter that always pools with a kernel size of ``2 x 2``
and does *not* resize back to the input image size, i.e. the resulting
images have half the resolution.
>>> aug = iaa.MinPooling([2, 8])
Create an augmenter that always pools either with a kernel size
of ``2 x 2`` or ``8 x 8``.
>>> aug = iaa.MinPooling((1, 7))
Create an augmenter that always pools with a kernel size of
``1 x 1`` (does nothing) to ``7 x 7``. The kernel sizes are always
symmetric.
>>> aug = iaa.MinPooling(((1, 7), (1, 7)))
Create an augmenter that always pools with a kernel size of
``H x W`` where ``H`` and ``W`` are both sampled independently from the
range ``[1..7]``. E.g. resulting kernel sizes could be ``3 x 7``
or ``5 x 1``.
"""
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(MinPooling, self).__init__(
kernel_size=kernel_size, keep_size=keep_size,
name=name, deterministic=deterministic, random_state=random_state)
def _pool_image(self, image, kernel_size_h, kernel_size_w):
# TODO extend pool to support pad_mode and set it here
# to reflection padding
return ia.min_pool(
image,
(kernel_size_h, kernel_size_w)
)
class MedianPooling(_AbstractPoolingBase):
"""
Apply median pooling to images.
This augmenter pools images with kernel sizes ``H x W`` by taking the
median pixel value over windows. For e.g. ``2 x 2`` this halves the image
size. Optionally, the augmenter will automatically re-upscale the image
to the input size (by default this is activated).
The median within each pixel window is always taken channelwise.
.. note::
During heatmap or segmentation map augmentation, the respective
arrays are not changed, only the shapes of the underlying images
are updated. This is because imgaug can handle maps/maks that are
larger/smaller than their corresponding image.
dtype support::
See :func:`imgaug.imgaug.pool`.
Attributes
----------
kernel_size : int or tuple of int or list of int or imgaug.parameters.StochasticParameter or tuple of tuple of int or tuple of list of int or tuple of imgaug.parameters.StochasticParameter, optional
The kernel size of the pooling operation.
* If an int, then that value will be used for all images for both
kernel height and width.
* If a tuple ``(a, b)``, then a value from the discrete range
``[a..b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list per
image and used for both kernel height and width.
* If a StochasticParameter, then a value will be sampled per image
from that parameter per image and used for both kernel height and
width.
* If a tuple of tuple of int given as ``((a, b), (c, d))``, then two
values will be sampled independently from the discrete ranges
``[a..b]`` and ``[c..d]`` per image and used as the kernel height
and width.
* If a tuple of lists of int, then two values will be sampled
independently per image, one from the first list and one from the
second, and used as the kernel height and width.
* If a tuple of StochasticParameter, then two values will be sampled
indepdently per image, one from the first parameter and one from the
second, and used as the kernel height and width.
keep_size : bool, optional
After pooling, the result image will usually have a different
height/width compared to the original input image. If this
parameter is set to True, then the pooled image will be resized
to the input image's size, i.e. the augmenter's output shape is always
identical to the input shape.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.MedianPooling(2)
Create an augmenter that always pools with a kernel size of ``2 x 2``.
>>> aug = iaa.MedianPooling(2, keep_size=False)
Create an augmenter that always pools with a kernel size of ``2 x 2``
and does *not* resize back to the input image size, i.e. the resulting
images have half the resolution.
>>> aug = iaa.MedianPooling([2, 8])
Create an augmenter that always pools either with a kernel size
of ``2 x 2`` or ``8 x 8``.
>>> aug = iaa.MedianPooling((1, 7))
Create an augmenter that always pools with a kernel size of
``1 x 1`` (does nothing) to ``7 x 7``. The kernel sizes are always
symmetric.
>>> aug = iaa.MedianPooling(((1, 7), (1, 7)))
Create an augmenter that always pools with a kernel size of
``H x W`` where ``H`` and ``W`` are both sampled independently from the
range ``[1..7]``. E.g. resulting kernel sizes could be ``3 x 7``
or ``5 x 1``.
"""
# TODO add floats as ksize denoting fractions of image sizes
# (note possible overlap with fractional kernel sizes here)
def __init__(self, kernel_size, keep_size=True,
name=None, deterministic=False, random_state=None):
super(MedianPooling, self).__init__(
kernel_size=kernel_size, keep_size=keep_size,
name=name, deterministic=deterministic, random_state=random_state)
def _pool_image(self, image, kernel_size_h, kernel_size_w):
# TODO extend pool to support pad_mode and set it here
# to reflection padding
return ia.median_pool(
image,
(kernel_size_h, kernel_size_w)
)
| 41.273438
| 202
| 0.653795
| 3,696
| 26,415
| 4.541396
| 0.081169
| 0.0423
| 0.017158
| 0.028597
| 0.8056
| 0.792851
| 0.782901
| 0.773905
| 0.767054
| 0.749777
| 0
| 0.007629
| 0.270528
| 26,415
| 639
| 203
| 41.338028
| 0.863459
| 0.637895
| 0
| 0.321637
| 0
| 0
| 0.007061
| 0.002633
| 0
| 0
| 0
| 0.007825
| 0
| 1
| 0.128655
| false
| 0
| 0.046784
| 0.035088
| 0.321637
| 0.005848
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0755f4b2c3f21a5bd6057ccad2beb943076e9ed6
| 4,128
|
py
|
Python
|
agents/customization.py
|
jun2tong/Continual-Learning-Benchmark
|
b6fdcc56ec2821b945105b1a54cdea03b421a6b2
|
[
"MIT"
] | null | null | null |
agents/customization.py
|
jun2tong/Continual-Learning-Benchmark
|
b6fdcc56ec2821b945105b1a54cdea03b421a6b2
|
[
"MIT"
] | null | null | null |
agents/customization.py
|
jun2tong/Continual-Learning-Benchmark
|
b6fdcc56ec2821b945105b1a54cdea03b421a6b2
|
[
"MIT"
] | null | null | null |
import torch
from .default import NormalNN
from .regularization import SI, EWC, EWC_online
from .exp_replay import Naive_Rehearsal, GEM
from modules.criterions import BCEauto
def init_zero_weights(m):
with torch.no_grad():
if type(m) == torch.nn.Linear:
m.weight.zero_()
m.bias.zero_()
elif type(m) == torch.nn.ModuleDict:
for l in m.values():
init_zero_weights(l)
else:
assert False, 'Only support linear layer'
def NormalNN_reset_optim(agent_config):
agent = NormalNN(agent_config)
agent.reset_optimizer = True
return agent
def NormalNN_BCE(agent_config):
agent = NormalNN(agent_config)
agent.criterion_fn = BCEauto()
return agent
def SI_BCE(agent_config):
agent = SI(agent_config)
agent.criterion_fn = BCEauto()
return agent
def SI_splitMNIST_zero_init(agent_config):
agent = SI(agent_config)
agent.damping_factor = 1e-3
agent.reset_optimizer = True
agent.model.last.apply(init_zero_weights)
return agent
def SI_splitMNIST_rand_init(agent_config):
agent = SI(agent_config)
agent.damping_factor = 1e-3
agent.reset_optimizer = True
return agent
def EWC_BCE(agent_config):
agent = EWC(agent_config)
agent.criterion_fn = BCEauto()
return agent
def EWC_mnist(agent_config):
agent = EWC(agent_config)
agent.n_fisher_sample = 60000
return agent
def EWC_online_mnist(agent_config):
agent = EWC(agent_config)
agent.n_fisher_sample = 60000
agent.online_reg = True
return agent
def EWC_online_empFI(agent_config):
agent = EWC(agent_config)
agent.empFI = True
return agent
def EWC_zero_init(agent_config):
agent = EWC(agent_config)
agent.reset_optimizer = True
agent.model.last.apply(init_zero_weights)
return agent
def EWC_rand_init(agent_config):
agent = EWC(agent_config)
agent.reset_optimizer = True
return agent
def EWC_reset_optim(agent_config):
agent = EWC(agent_config)
agent.reset_optimizer = True
return agent
def EWC_online_reset_optim(agent_config):
agent = EWC_online(agent_config)
agent.reset_optimizer = True
return agent
def Naive_Rehearsal_100(agent_config):
agent = Naive_Rehearsal(agent_config)
agent.memory_size = 100
return agent
def Naive_Rehearsal_200(agent_config):
agent = Naive_Rehearsal(agent_config)
agent.memory_size = 200
return agent
def Naive_Rehearsal_400(agent_config):
agent = Naive_Rehearsal(agent_config)
agent.memory_size = 400
return agent
def Naive_Rehearsal_1100(agent_config):
agent = Naive_Rehearsal(agent_config)
agent.memory_size = 1100
return agent
def Naive_Rehearsal_1400(agent_config):
agent = Naive_Rehearsal(agent_config)
agent.memory_size = 1400
return agent
def Naive_Rehearsal_4000(agent_config):
agent = Naive_Rehearsal(agent_config)
agent.memory_size = 4000
return agent
def Naive_Rehearsal_4400(agent_config):
agent = Naive_Rehearsal(agent_config)
agent.memory_size = 4400
return agent
def Naive_Rehearsal_5600(agent_config):
agent = Naive_Rehearsal(agent_config)
agent.memory_size = 5600
return agent
def Naive_Rehearsal_16000(agent_config):
agent = Naive_Rehearsal(agent_config)
agent.memory_size = 16000
return agent
def GEM_100(agent_config):
agent = GEM(agent_config)
agent.memory_size = 100
return agent
def GEM_200(agent_config):
agent = GEM(agent_config)
agent.memory_size = 200
return agent
def GEM_400(agent_config):
agent = GEM(agent_config)
agent.memory_size = 400
return agent
def GEM_1100(agent_config):
agent = GEM(agent_config)
agent.memory_size = 1100
return agent
def GEM_4000(agent_config):
agent = GEM(agent_config)
agent.memory_size = 4000
return agent
def GEM_4400(agent_config):
agent = GEM(agent_config)
agent.memory_size = 4400
return agent
def GEM_16000(agent_config):
agent = GEM(agent_config)
agent.memory_size = 16000
return agent
| 20.954315
| 53
| 0.71657
| 563
| 4,128
| 4.960924
| 0.142096
| 0.228428
| 0.332259
| 0.126029
| 0.833154
| 0.745077
| 0.719298
| 0.676334
| 0.65879
| 0.419262
| 0
| 0.041104
| 0.210271
| 4,128
| 196
| 54
| 21.061224
| 0.815644
| 0
| 0
| 0.639706
| 0
| 0
| 0.006056
| 0
| 0
| 0
| 0
| 0
| 0.007353
| 1
| 0.220588
| false
| 0
| 0.036765
| 0
| 0.470588
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
07633806c43b768aed031c3d9dd8b74f84b0fa4e
| 19
|
py
|
Python
|
src/python/__init__.py
|
wmeddie/veda
|
52849411106071ebfdb7d0c86e34684c0e0fa843
|
[
"BSD-3-Clause"
] | 9
|
2020-07-20T07:37:06.000Z
|
2022-03-11T19:59:21.000Z
|
src/python/__init__.py
|
wmeddie/veda
|
52849411106071ebfdb7d0c86e34684c0e0fa843
|
[
"BSD-3-Clause"
] | 14
|
2020-09-07T10:53:01.000Z
|
2022-03-26T02:50:43.000Z
|
src/python/__init__.py
|
wmeddie/veda
|
52849411106071ebfdb7d0c86e34684c0e0fa843
|
[
"BSD-3-Clause"
] | 2
|
2021-07-17T03:19:11.000Z
|
2021-09-02T10:30:48.000Z
|
from .veda import *
| 19
| 19
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4ae21c30981d63587f6f412c5f02b1c336a5a8e7
| 47
|
py
|
Python
|
sim/unitytrainers/ppo/__init__.py
|
PranayKr/Bonsai_CodeBase
|
4e9a7281364bd71f7cd466634ef8379eacb1b716
|
[
"Apache-2.0"
] | null | null | null |
sim/unitytrainers/ppo/__init__.py
|
PranayKr/Bonsai_CodeBase
|
4e9a7281364bd71f7cd466634ef8379eacb1b716
|
[
"Apache-2.0"
] | 7
|
2019-12-16T22:13:37.000Z
|
2022-02-10T01:05:42.000Z
|
sim/unitytrainers/ppo/__init__.py
|
PranayKr/Bonsai_CodeBase
|
4e9a7281364bd71f7cd466634ef8379eacb1b716
|
[
"Apache-2.0"
] | null | null | null |
from .models import *
from .trainer import *
| 15.666667
| 23
| 0.702128
| 6
| 47
| 5.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 47
| 2
| 24
| 23.5
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ab14e411f6dcaaf3e6627ec9e261473435c29953
| 24
|
py
|
Python
|
python/testData/completion/className/thirdPartyPackageBundledDependenciesNotSuggested/site-packages/requests/__init__.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/completion/className/thirdPartyPackageBundledDependenciesNotSuggested/site-packages/requests/__init__.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/completion/className/thirdPartyPackageBundledDependenciesNotSuggested/site-packages/requests/__init__.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
def request():
pass
| 8
| 14
| 0.583333
| 3
| 24
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 24
| 2
| 15
| 12
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ab20197b4e923878c3501ed0cf142c5fa9c07ff9
| 11,052
|
py
|
Python
|
tests/unit/dbestclient/io/test_stratifiedreservoir.py
|
qingzma/DBEstClient
|
d2cdf51bc3c69e50bcf4d1d516673b7d20843c16
|
[
"BSD-2-Clause"
] | 11
|
2019-12-24T02:39:35.000Z
|
2022-03-21T22:39:41.000Z
|
tests/unit/dbestclient/io/test_stratifiedreservoir.py
|
Forever-MrX/DBEstClient
|
d2cdf51bc3c69e50bcf4d1d516673b7d20843c16
|
[
"BSD-2-Clause"
] | 4
|
2019-12-09T09:48:17.000Z
|
2021-07-07T02:58:26.000Z
|
tests/unit/dbestclient/io/test_stratifiedreservoir.py
|
qingzma/DBEstClient
|
d2cdf51bc3c69e50bcf4d1d516673b7d20843c16
|
[
"BSD-2-Clause"
] | 8
|
2019-11-08T02:10:37.000Z
|
2022-03-21T22:42:46.000Z
|
# Created by Qingzhi Ma at 2020-11-23
# All right reserved
# Department of Computer Science
# the University of Warwick
# Q.Ma.2@warwick.ac.uk
import unittest
from dbestclient.io.stratifiedreservoir import StratifiedReservoir
class TestStratifiedReservoir(unittest.TestCase):
""""""
def test_tpcds_1job_no_equality(self):
sr = StratifiedReservoir(
"data/tpcds/40G/ss_1k.csv",
file_header="ss_sold_date_sk|ss_sold_time_sk|ss_item_sk|ss_customer_sk|ss_cdemo_sk|ss_hdemo_sk|ss_addr_sk|ss_store_sk|ss_promo_sk|ss_ticket_number|ss_quantity|ss_wholesale_cost|ss_list_price|ss_sales_price|ss_ext_discount_amt|ss_ext_sales_price|ss_ext_wholesale_cost|ss_ext_list_price|ss_ext_tax|ss_coupon_amt|ss_net_paid|ss_net_paid_inc_tax|ss_net_profit|none",
n_jobs=1,
capacity=5,
)
sr.make_sample_no_distinct(
gb_cols=["ss_store_sk"],
equality_cols=None,
feature_cols=["ss_sold_date_sk", "ss_ext_wholesale_cost"],
label_cols=["ss_sales_price", "real", None],
split_char="|",
)
cate, fea, lbl = sr.get_categorical_features_label()
self.assertEqual(sr.size(), 948)
def test_tpcds_2job_no_equality(self):
sr = StratifiedReservoir(
"data/tpcds/40G/ss_1k.csv",
file_header="ss_sold_date_sk|ss_sold_time_sk|ss_item_sk|ss_customer_sk|ss_cdemo_sk|ss_hdemo_sk|ss_addr_sk|ss_store_sk|ss_promo_sk|ss_ticket_number|ss_quantity|ss_wholesale_cost|ss_list_price|ss_sales_price|ss_ext_discount_amt|ss_ext_sales_price|ss_ext_wholesale_cost|ss_ext_list_price|ss_ext_tax|ss_coupon_amt|ss_net_paid|ss_net_paid_inc_tax|ss_net_profit|none",
n_jobs=2,
capacity=5,
)
sr.make_sample_no_distinct(
gb_cols=["ss_store_sk"],
equality_cols=None,
feature_cols=["ss_sold_date_sk", "ss_ext_wholesale_cost"],
label_cols=["ss_sales_price", "real", None],
split_char="|",
)
self.assertEqual(sr.size(), 948)
def test_tpcds_1job(self):
sr = StratifiedReservoir(
"data/tpcds/40G/ss_1k.csv",
file_header="ss_sold_date_sk|ss_sold_time_sk|ss_item_sk|ss_customer_sk|ss_cdemo_sk|ss_hdemo_sk|ss_addr_sk|ss_store_sk|ss_promo_sk|ss_ticket_number|ss_quantity|ss_wholesale_cost|ss_list_price|ss_sales_price|ss_ext_discount_amt|ss_ext_sales_price|ss_ext_wholesale_cost|ss_ext_list_price|ss_ext_tax|ss_coupon_amt|ss_net_paid|ss_net_paid_inc_tax|ss_net_profit|none",
n_jobs=1,
capacity=5,
)
sr.make_sample_no_distinct(
gb_cols=["ss_store_sk"],
equality_cols=["ss_coupon_amt"],
feature_cols=["ss_sold_date_sk", "ss_ext_wholesale_cost"],
label_cols=["ss_sales_price", "real", None],
split_char="|",
)
self.assertEqual(sr.size(), 1000)
def test_tpcds_2job(self):
sr = StratifiedReservoir(
"data/tpcds/40G/ss_1k.csv",
file_header="ss_sold_date_sk|ss_sold_time_sk|ss_item_sk|ss_customer_sk|ss_cdemo_sk|ss_hdemo_sk|ss_addr_sk|ss_store_sk|ss_promo_sk|ss_ticket_number|ss_quantity|ss_wholesale_cost|ss_list_price|ss_sales_price|ss_ext_discount_amt|ss_ext_sales_price|ss_ext_wholesale_cost|ss_ext_list_price|ss_ext_tax|ss_coupon_amt|ss_net_paid|ss_net_paid_inc_tax|ss_net_profit|none",
n_jobs=2,
capacity=5,
)
sr.make_sample_no_distinct(
gb_cols=["ss_store_sk"],
equality_cols=["ss_coupon_amt"],
feature_cols=["ss_sold_date_sk", "ss_ext_wholesale_cost"],
label_cols=["ss_sales_price", "real", None],
split_char="|",
)
self.assertEqual(sr.size(), 1000)
def test_toy_no_header_1(self):
sr = StratifiedReservoir(
"data/toy/toy.txt",
file_header="range1,range2,cate1,cate2,gb1,gb2,label",
n_jobs=1,
capacity=5,
)
sr.make_sample_no_distinct(
gb_cols=["gb1", "gb2"],
equality_cols=["cate1", "cate2"],
feature_cols=["range1", "range2"],
label_cols=["label", "real", None],
split_char=",",
)
cate, features, labels = sr.get_categorical_features_label()
cate_target = [
["store_id1", "cust_id2", "paris", "male"],
["store_id1", "cust_id1", "london", "male"],
["store_id1", "cust_id1", "london", "male"],
]
cate = sorted(cate.tolist(), key=lambda words: ",".join(words))
cate_target = sorted(cate_target, key=lambda words: ",".join(words))
features_target = [[1.0, 2.0], [1.1, 2.1], [1.1, 2.1]]
features_target = sorted(features_target, key=lambda words: words[0])
features = sorted(features.tolist(), key=lambda words: words[0])
labels_target = [1000.0, 2000.0, 3000.0]
labels_target.sort()
labels = labels.tolist()
labels.sort()
self.assertEqual(cate, cate_target)
self.assertEqual(features, features_target)
self.assertEqual(labels, labels_target)
def test_toy_no_header_2(self):
sr = StratifiedReservoir(
"data/toy/toy.txt",
file_header="range1,range2,cate1,cate2,gb1,gb2,label",
n_jobs=1,
capacity=5,
)
sr.make_sample_no_distinct(
gb_cols=["gb1", "gb2"],
equality_cols=["cate1", "cate2"],
feature_cols=["range1", "range2"],
label_cols=["label", "real", None],
split_char=",",
)
cate, features, labels = sr.get_categorical_features_label()
cate_target = [
["store_id1", "cust_id2", "paris", "male"],
["store_id1", "cust_id1", "london", "male"],
["store_id1", "cust_id1", "london", "male"],
]
cate = sorted(cate.tolist(), key=lambda words: ",".join(words))
cate_target = sorted(cate_target, key=lambda words: ",".join(words))
features_target = [[1.0, 2.0], [1.1, 2.1], [1.1, 2.1]]
features_target = sorted(features_target, key=lambda words: words[0])
features = sorted(features.tolist(), key=lambda words: words[0])
labels_target = [1000.0, 2000.0, 3000.0]
labels_target.sort()
labels = labels.tolist()
labels.sort()
self.assertEqual(cate, cate_target)
self.assertEqual(features, features_target)
self.assertEqual(labels, labels_target)
def test_toy_with_header_1job(self):
sr = StratifiedReservoir("data/toy/toy_with_header.txt", n_jobs=1, capacity=5)
sr.make_sample_no_distinct(
gb_cols=["gb1", "gb2"],
equality_cols=["cate1", "cate2"],
feature_cols=["range1", "range2"],
label_cols=["label", "real", None],
split_char=",",
)
cate, features, labels = sr.get_categorical_features_label()
cate_target = [
["store_id1", "cust_id2", "paris", "male"],
["store_id1", "cust_id1", "london", "male"],
["store_id1", "cust_id1", "london", "male"],
]
cate = sorted(cate.tolist(), key=lambda words: ",".join(words))
cate_target = sorted(cate_target, key=lambda words: ",".join(words))
features_target = [[1.0, 2.0], [1.1, 2.1], [1.1, 2.1]]
features_target = sorted(features_target, key=lambda words: words[0])
features = sorted(features.tolist(), key=lambda words: words[0])
labels_target = [1000.0, 2000.0, 3000.0]
labels_target.sort()
labels = labels.tolist()
labels.sort()
self.assertEqual(cate, cate_target)
self.assertEqual(features, features_target)
self.assertEqual(labels, labels_target)
def test_toy_with_header_2job(self):
sr = StratifiedReservoir("data/toy/toy_with_header.txt", n_jobs=2, capacity=5)
sr.make_sample_no_distinct(
gb_cols=["gb1", "gb2"],
equality_cols=["cate1", "cate2"],
feature_cols=["range1", "range2"],
label_cols=["label", "real", None],
split_char=",",
)
cate, features, labels = sr.get_categorical_features_label()
cate_target = [
["store_id1", "cust_id2", "paris", "male"],
["store_id1", "cust_id1", "london", "male"],
["store_id1", "cust_id1", "london", "male"],
]
cate = sorted(cate.tolist(), key=lambda words: ",".join(words))
cate_target = sorted(cate_target, key=lambda words: ",".join(words))
features_target = [[1.0, 2.0], [1.1, 2.1], [1.1, 2.1]]
features_target = sorted(features_target, key=lambda words: words[0])
features = sorted(features.tolist(), key=lambda words: words[0])
labels_target = [1000.0, 2000.0, 3000.0]
labels_target.sort()
labels = labels.tolist()
labels.sort()
self.assertEqual(cate, cate_target)
self.assertEqual(features, features_target)
self.assertEqual(labels, labels_target)
# def test_hw(self):
# sr = StratifiedReservoir(
# "../data/huawei/merged",
# file_header="ts,apmac,accType,radioid,band,ssid,usermac,downSpeed,rssi,upLinkSpeed,downLinkSpeed,txDiscardRatio,latency,downBytes,upBytes,kpiCount,authTimeoutTimes,assoFailTimes,authFailTimes,dhcpFailTimes,assoSuccTimes,authSuccTimes,dhcpSuccTimes,dot1XSuccTimes,dot1XFailTimes,onlineSuccTimes,txDiscardFrames,txFrames,tenantId,siteId,siteName,directRegion,regionLevelOne,regionLevelTwo,regionLevelThree,regionLevelFour,regionLevelFive,regionLevelSix,regionLevelSeven,regionLevelEight,parentResId,acName,resId,apname,publicArea,vendor,duration,badCount,badTime,lowRssiCount,lowRssiDur,highLatencyCount,highLatencyDur,highDiscardCount,highDiscardDur,nonFiveGCount,nonFiveGDur,exception_flag,last_acc_rst,linkQuality,portal_succ_times,portal_fail_times,roam_succ_times,roam_fail_times",
# n_jobs=8,
# capacity=100,
# )
# sr.make_sample(
# gb_cols=["ts"],
# equality_cols=["regionLevelEight", "ssid"],
# feature_cols=["downSpeed"],
# label_cols=["latency"],
# split_char=",",
# )
# ft = sr.get_ft()
# # for key in ft:
# # print(key, ft[key])
# # print("predictions", predictions)
# self.assertEqual(81526479, sr.size())
if __name__ == "__main__":
unittest.main()
# TestStratifiedReservoir().test_tpcds_1job_no_equality()
# TestStratifiedReservoir().test_tpcds_2job_no_equality()
# TestStratifiedReservoir().test_tpcds_1job()
# TestStratifiedReservoir().test_tpcds_2job()
# TestStratifiedReservoir().test_toy_no_header_2()
# TestStratifiedReservoir().test_toy_with_header_1job()
# TestStratifiedReservoir().test_toy_with_header_2job()
# TestStratifiedReservoir().test_hw()
| 45.110204
| 798
| 0.64097
| 1,398
| 11,052
| 4.704578
| 0.150215
| 0.024327
| 0.034058
| 0.039684
| 0.797932
| 0.753687
| 0.753687
| 0.753687
| 0.745933
| 0.745933
| 0
| 0.029991
| 0.227651
| 11,052
| 244
| 799
| 45.295082
| 0.740511
| 0.162052
| 0
| 0.815217
| 0
| 0.021739
| 0.263369
| 0.184619
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.043478
| false
| 0
| 0.01087
| 0
| 0.059783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab204888574980011111ee148e0154e4128e1100
| 96
|
py
|
Python
|
pyisic/_standards/jsic13/__init__.py
|
sayari-analytics/pyisic
|
42ed46f5bc446a0bbc0edf30b64bc4ab939dd033
|
[
"MIT"
] | 3
|
2021-11-18T15:32:38.000Z
|
2022-02-28T19:16:14.000Z
|
pyisic/_standards/jsic13/__init__.py
|
sayari-analytics/pyisic
|
42ed46f5bc446a0bbc0edf30b64bc4ab939dd033
|
[
"MIT"
] | 18
|
2021-06-28T19:17:49.000Z
|
2022-03-23T20:20:18.000Z
|
pyisic/_standards/jsic13/__init__.py
|
sayari-analytics/pyisic
|
42ed46f5bc446a0bbc0edf30b64bc4ab939dd033
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .jsic13 import JSIC13
from .jsic13_to_isic4 import JSIC13_to_ISIC4
| 24
| 44
| 0.75
| 15
| 96
| 4.533333
| 0.533333
| 0.294118
| 0.382353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13253
| 0.135417
| 96
| 3
| 45
| 32
| 0.686747
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
db7fb1894e128552b991a904facb241872ebaa71
| 48
|
py
|
Python
|
services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/__init__.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 25
|
2018-04-13T12:44:12.000Z
|
2022-03-12T15:01:17.000Z
|
services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/__init__.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 2,553
|
2018-01-18T17:11:55.000Z
|
2022-03-31T16:26:40.000Z
|
services/director-v2/src/simcore_service_director_v2/models/schemas/dynamic_services/__init__.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | 20
|
2018-01-18T19:45:33.000Z
|
2022-03-29T07:08:47.000Z
|
from .scheduler import *
from .service import *
| 16
| 24
| 0.75
| 6
| 48
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 2
| 25
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91cd9c4e4dbbc6e342a79ce468c331f6c12df8eb
| 5,296
|
py
|
Python
|
python/interpret/perf/curve.py
|
zzzace2000/interpret
|
8a4b7c61378e1299870da3a581ae626e55ecb6a0
|
[
"MIT"
] | 1
|
2021-12-27T13:42:01.000Z
|
2021-12-27T13:42:01.000Z
|
python/interpret-core/interpret/perf/curve.py
|
LiYaangY/interpret
|
ab21bba5e139bee0234abe7c24efb6ffbcf36f9d
|
[
"MIT"
] | null | null | null |
python/interpret-core/interpret/perf/curve.py
|
LiYaangY/interpret
|
ab21bba5e139bee0234abe7c24efb6ffbcf36f9d
|
[
"MIT"
] | 2
|
2021-08-10T19:40:47.000Z
|
2021-11-16T16:01:22.000Z
|
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
from ..api.base import ExplainerMixin, ExplanationMixin
from ..utils import unify_data, gen_name_from_class, unify_predict_fn
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve, average_precision_score
import numpy as np
class PR(ExplainerMixin):
available_explanations = ["perf"]
explainer_type = "perf"
def __init__(self, predict_fn, feature_names=None, feature_types=None, **kwargs):
self.predict_fn = predict_fn
self.kwargs = kwargs
self.feature_names = feature_names
self.feature_types = feature_types
def explain_perf(self, X, y, name=None):
if name is None:
name = gen_name_from_class(self)
X, y, self.feature_names, self.feature_types = unify_data(
X, y, self.feature_names, self.feature_types
)
predict_fn = unify_predict_fn(self.predict_fn, X)
scores = predict_fn(X)
precision, recall, thresh = precision_recall_curve(y, scores)
ap = average_precision_score(y, scores)
abs_residuals = np.abs(y - scores)
counts, values = np.histogram(abs_residuals, bins="doane")
overall_dict = {
"type": "perf_curve",
"density": {"names": values, "scores": counts},
"scores": scores,
"x_values": recall,
"y_values": precision,
"threshold": thresh,
"auc": ap,
}
internal_obj = {"overall": overall_dict, "specific": None}
return PRExplanation(
"perf",
internal_obj,
feature_names=self.feature_names,
feature_types=self.feature_types,
name=name,
)
class ROC(ExplainerMixin):
available_explanations = ["perf"]
explainer_type = "perf"
def __init__(self, predict_fn, feature_names=None, feature_types=None, **kwargs):
self.predict_fn = predict_fn
self.kwargs = kwargs
self.feature_names = feature_names
self.feature_types = feature_types
def explain_perf(self, X, y, name=None):
if name is None:
name = gen_name_from_class(self)
X, y, self.feature_names, self.feature_types = unify_data(
X, y, self.feature_names, self.feature_types
)
predict_fn = unify_predict_fn(self.predict_fn, X)
scores = predict_fn(X)
fpr, tpr, thresh = roc_curve(y, scores)
roc_auc = auc(fpr, tpr)
abs_residuals = np.abs(y - scores)
counts, values = np.histogram(abs_residuals, bins="doane")
overall_dict = {
"type": "perf_curve",
"density": {"names": values, "scores": counts},
"scores": scores,
"x_values": fpr,
"y_values": tpr,
"threshold": thresh,
"auc": roc_auc,
}
internal_obj = {"overall": overall_dict, "specific": None}
return ROCExplanation(
"perf",
internal_obj,
feature_names=self.feature_names,
feature_types=self.feature_types,
name=name,
)
class ROCExplanation(ExplanationMixin):
explanation_type = None
def __init__(
self,
explanation_type,
internal_obj,
feature_names=None,
feature_types=None,
name=None,
selector=None,
):
self.explanation_type = explanation_type
self._internal_obj = internal_obj
self.feature_names = feature_names
self.feature_types = feature_types
self.name = name
self.selector = selector
def data(self, key=None):
if key is None:
return self._internal_obj["overall"]
return None
def visualize(self, key=None):
from ..visual.plot import plot_performance_curve
data_dict = self.data(key)
if data_dict is None:
return None
return plot_performance_curve(
data_dict,
xtitle="FPR",
ytitle="TPR",
baseline=True,
title="ROC Curve: " + self.name,
auc_prefix="AUC",
)
class PRExplanation(ExplanationMixin):
explanation_type = None
def __init__(
self,
explanation_type,
internal_obj,
feature_names=None,
feature_types=None,
name=None,
selector=None,
):
self.explanation_type = explanation_type
self._internal_obj = internal_obj
self.feature_names = feature_names
self.feature_types = feature_types
self.name = name
self.selector = selector
def data(self, key=None):
if key is None:
return self._internal_obj["overall"]
return None
def visualize(self, key=None):
from ..visual.plot import plot_performance_curve
data_dict = self.data(key)
if data_dict is None:
return None
return plot_performance_curve(
data_dict,
xtitle="Recall",
ytitle="Precision",
baseline=False,
title="PR Curve: " + self.name,
auc_prefix="Average Precision",
)
| 28.782609
| 85
| 0.598754
| 592
| 5,296
| 5.089527
| 0.155405
| 0.079655
| 0.053103
| 0.076336
| 0.785264
| 0.77066
| 0.77066
| 0.77066
| 0.739462
| 0.739462
| 0
| 0.001097
| 0.311367
| 5,296
| 183
| 86
| 28.939891
| 0.825062
| 0.015672
| 0
| 0.734694
| 0
| 0
| 0.052207
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068027
| false
| 0
| 0.047619
| 0
| 0.251701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
53010313d7b765d7109635137aca8cfab8ca9b59
| 36
|
py
|
Python
|
s3zilla/__init__.py
|
rootVIII/s3zilla
|
cc0e343883c8ba01ce2d37879258dcafea3123e7
|
[
"MIT"
] | 10
|
2019-04-22T10:11:44.000Z
|
2021-12-16T15:52:55.000Z
|
s3zilla/__init__.py
|
rootVIII/s3zilla
|
cc0e343883c8ba01ce2d37879258dcafea3123e7
|
[
"MIT"
] | null | null | null |
s3zilla/__init__.py
|
rootVIII/s3zilla
|
cc0e343883c8ba01ce2d37879258dcafea3123e7
|
[
"MIT"
] | 2
|
2019-08-26T01:33:46.000Z
|
2019-11-03T21:10:45.000Z
|
from s3zilla.s3zilla import S3Zilla
| 18
| 35
| 0.861111
| 5
| 36
| 6.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 0.111111
| 36
| 1
| 36
| 36
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
530fb83fc8dedb02d15abea1de23fb66baf394e7
| 1,098
|
py
|
Python
|
iceworm/engine/ops/__init__.py
|
wrmsr0/iceworm
|
09431bb3cdc4f6796aafca41e37d42ebe0ddfeef
|
[
"BSD-3-Clause"
] | null | null | null |
iceworm/engine/ops/__init__.py
|
wrmsr0/iceworm
|
09431bb3cdc4f6796aafca41e37d42ebe0ddfeef
|
[
"BSD-3-Clause"
] | 1
|
2021-01-19T14:29:19.000Z
|
2021-01-19T14:34:27.000Z
|
iceworm/engine/ops/__init__.py
|
wrmsr0/iceworm
|
09431bb3cdc4f6796aafca41e37d42ebe0ddfeef
|
[
"BSD-3-Clause"
] | 1
|
2020-12-31T22:29:52.000Z
|
2020-12-31T22:29:52.000Z
|
from . import inject # noqa
from .base import Annotation # noqa
from .base import List # noqa
from .base import ListExecutor # noqa
from .base import Op # noqa
from .base import OpExecutor # noqa
from .base import OpGen # noqa
from .base import Set # noqa
from .base import Set # noqa
from .conns import ConnOp # noqa
from .conns import ConnsOp # noqa
from .conns import CopyTable # noqa
from .conns import CreateTable # noqa
from .conns import CreateTableAs # noqa
from .conns import CreateTableAsExecutor # noqa
from .conns import CreateTableExecutor # noqa
from .conns import DropTable # noqa
from .conns import DropTableExecutor # noqa
from .conns import Exec # noqa
from .conns import ExecExecutor # noqa
from .conns import InsertIntoEval # noqa
from .conns import InsertIntoEvalExecutor # noqa
from .conns import InsertIntoSelect # noqa
from .conns import InsertIntoSelectExecutor # noqa
from .conns import Transaction # noqa
from .conns import TransactionExecutor # noqa
from .driving import OpExecutionDriver # noqa
from .transforms import OpTransformer # noqa
| 37.862069
| 51
| 0.769581
| 139
| 1,098
| 6.079137
| 0.244604
| 0.255621
| 0.261538
| 0.382249
| 0.059172
| 0.059172
| 0.059172
| 0
| 0
| 0
| 0
| 0
| 0.179417
| 1,098
| 28
| 52
| 39.214286
| 0.937847
| 0.126594
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5353283ede0f8d5866a1baa0800b6cab3bef6ee2
| 3,593
|
py
|
Python
|
data_utils/test_modelnet_dataset.py
|
zhuyuanxiang/PointNet-Series
|
d494ff803e1f0fe1ac51591ce1a1469646a82ea0
|
[
"MIT"
] | null | null | null |
data_utils/test_modelnet_dataset.py
|
zhuyuanxiang/PointNet-Series
|
d494ff803e1f0fe1ac51591ce1a1469646a82ea0
|
[
"MIT"
] | null | null | null |
data_utils/test_modelnet_dataset.py
|
zhuyuanxiang/PointNet-Series
|
d494ff803e1f0fe1ac51591ce1a1469646a82ea0
|
[
"MIT"
] | null | null | null |
"""
=================================================
@path : PointNet-Series -> test_modelnet_dataset.py
@IDE : PyCharm
@Author : zYx.Tom, 526614962@qq.com
@Date : 2022-01-19 15:13
@Version: v0.1
@License: (C)Copyright 2020-2022, zYx.Tom
@Reference:
@Desc :
==================================================
"""
from argparse import Namespace
from datetime import datetime
from data_utils.modelnet_dataset import ModelNetDataset
class TestClass:
def test_modelnet10_test_1024pts(self):
args = Namespace()
args.uniform_sample = False
args.modelnet10 = True
data_path = "../data/pickle/modelnet/"
myDataset = ModelNetDataset(root=data_path, args=args, split='test')
assert myDataset.get_pickle_file_name() == "../data/pickle/modelnet/modelnet10_test_1024pts.dat"
def test_modelnet10_test_1024pts_fps(self):
args = Namespace()
args.uniform_sample = True
args.modelnet10 = True
data_path = "../data/pickle/modelnet/"
myDataset = ModelNetDataset(root=data_path, args=args, split='test')
assert myDataset.get_pickle_file_name() == "../data/pickle/modelnet/modelnet10_test_1024pts_fps.dat"
def test_modelnet10_train_1024pts(self):
args = Namespace()
args.uniform_sample = False
args.modelnet10 = True
data_path = "../data/pickle/modelnet/"
myDataset = ModelNetDataset(root=data_path, args=args, split='train')
assert myDataset.get_pickle_file_name() == "../data/pickle/modelnet/modelnet10_train_1024pts.dat"
def test_modelnet10_train_1024pts_fps(self):
args = Namespace()
args.uniform_sample = True
args.modelnet10 = True
data_path = "../data/pickle/modelnet/"
myDataset = ModelNetDataset(root=data_path, args=args, split='train')
assert myDataset.get_pickle_file_name() == "../data/pickle/modelnet/modelnet10_train_1024pts_fps.dat"
def test_modelnet40_test_1024pts(self):
args = Namespace()
args.uniform_sample = False
args.modelnet10 = False
data_path = "../data/pickle/modelnet/"
myDataset = ModelNetDataset(root=data_path, args=args, split='test')
assert myDataset.get_pickle_file_name() == "../data/pickle/modelnet/modelnet40_test_1024pts.dat"
def test_modelnet40_test_1024pts_fps(self):
args = Namespace()
args.uniform_sample = True
args.modelnet10 = False
data_path = "../data/pickle/modelnet/"
myDataset = ModelNetDataset(root=data_path, args=args, split='test')
assert myDataset.get_pickle_file_name() == "../data/pickle/modelnet/modelnet40_test_1024pts_fps.dat"
def test_modelnet40_train_1024pts(self):
args = Namespace()
args.uniform_sample = False
args.modelnet10 = False
data_path = "../data/pickle/modelnet/"
myDataset = ModelNetDataset(root=data_path, args=args, split='train')
assert myDataset.get_pickle_file_name() == "../data/pickle/modelnet/modelnet40_train_1024pts.dat"
def test_modelnet40_train_1024pts_fps(self):
args = Namespace()
args.uniform_sample = True
args.modelnet10 = False
data_path = "../data/pickle/modelnet/"
myDataset = ModelNetDataset(root=data_path, args=args, split='train')
assert myDataset.get_pickle_file_name() == "../data/pickle/modelnet/modelnet40_train_1024pts_fps.dat"
def main(name):
print(f'Hi, {name}', datetime.now())
pass
if __name__ == "__main__":
__author__ = 'zYx.Tom'
main(__author__)
| 38.223404
| 109
| 0.664347
| 412
| 3,593
| 5.512136
| 0.165049
| 0.056363
| 0.126816
| 0.073976
| 0.85557
| 0.835315
| 0.778512
| 0.778512
| 0.778512
| 0.778512
| 0
| 0.049378
| 0.193988
| 3,593
| 93
| 110
| 38.634409
| 0.734807
| 0.087392
| 0
| 0.606061
| 0
| 0
| 0.208193
| 0.189544
| 0
| 0
| 0
| 0
| 0.121212
| 1
| 0.136364
| false
| 0.015152
| 0.045455
| 0
| 0.19697
| 0.015152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
536b8c0c1a42706ed77a7a11150a46baba787ecc
| 28,137
|
py
|
Python
|
test/unit/test_sap_cli_aunit.py
|
jakub-vaclavik-sap/sapcli
|
a0f40c3b2363bba0d34f705d92dd420d9adf3987
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_sap_cli_aunit.py
|
jakub-vaclavik-sap/sapcli
|
a0f40c3b2363bba0d34f705d92dd420d9adf3987
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_sap_cli_aunit.py
|
jakub-vaclavik-sap/sapcli
|
a0f40c3b2363bba0d34f705d92dd420d9adf3987
|
[
"Apache-2.0"
] | 1
|
2022-01-10T03:58:03.000Z
|
2022-01-10T03:58:03.000Z
|
#!/usr/bin/env python3
import sys
import unittest
from io import StringIO
from types import SimpleNamespace
from unittest.mock import patch, call, Mock, mock_open
import sap.adt.cts
import sap.cli.aunit
from fixtures_adt_aunit import AUNIT_NO_TEST_RESULTS_XML, AUNIT_RESULTS_XML, GLOBAL_TEST_CLASS_AUNIT_RESULTS_XML, AUNIT_RESULTS_NO_TEST_METHODS_XML
from fixtures_adt_coverage import ACOVERAGE_RESULTS_XML, ACOVERAGE_STATEMENTS_RESULTS_XML
from infra import generate_parse_args
from mock import Connection, Response
from sap.cli.aunit import ResultOptions
from sap.errors import SAPCliError
parse_args = generate_parse_args(sap.cli.aunit.CommandGroup())
class TestAUnitWrite(unittest.TestCase):
def setUp(self):
self.connection = Connection()
def assert_print_no_test_classes(self, mock_print):
self.assertEqual(
mock_print.call_args_list[0],
call('* [tolerable] [noTestClasses] - The task definition does not refer to any test', file=sys.stdout))
self.assertEqual(
mock_print.call_args_list[1],
call('Successful: 0', file=sys.stdout))
self.assertEqual(
mock_print.call_args_list[2],
call('Warnings: 0', file=sys.stdout))
self.assertEqual(
mock_print.call_args_list[3],
call('Errors: 0', file=sys.stdout))
def test_aunit_invalid(self):
with self.assertRaises(SAPCliError) as cm:
sap.cli.aunit.run('wrongconn', SimpleNamespace(type='foo', output='human'))
self.assertEqual(str(cm.exception), 'Unknown type: foo')
def test_print_aunit_output_raises(self):
with self.assertRaises(SAPCliError) as cm:
sap.cli.aunit.print_aunit_output(SimpleNamespace(output='foo'), Mock(), Mock())
self.assertEqual(str(cm.exception), 'Unsupported output type: foo')
def execute_run(self, *args, **kwargs):
cmd_args = parse_args('run', *args, **kwargs)
return cmd_args.execute(self.connection, cmd_args)
def test_aunit_program(self):
self.connection.set_responses(
Response(status_code=200, text=AUNIT_NO_TEST_RESULTS_XML, headers={})
)
with patch('sap.cli.aunit.print') as mock_print:
self.execute_run('program', '--output', 'human', 'yprogram', '--result', ResultOptions.ONLY_UNIT.value)
self.assertEqual(len(self.connection.execs), 1)
self.assertIn('programs/programs/yprogram', self.connection.execs[0].body)
self.assert_print_no_test_classes(mock_print)
def test_aunit_class_human(self):
self.connection.set_responses(Response(status_code=200, text=AUNIT_NO_TEST_RESULTS_XML, headers={}))
with patch('sap.cli.aunit.print') as mock_print:
self.execute_run('class', 'yclass', '--output', 'human', '--result', ResultOptions.ONLY_UNIT.value)
self.assertEqual(len(self.connection.execs), 1)
self.assertIn('oo/classes/yclass', self.connection.execs[0].body)
self.assert_print_no_test_classes(mock_print)
def test_aunit_package(self):
self.connection.set_responses(Response(status_code=200, text=AUNIT_NO_TEST_RESULTS_XML, headers={}))
with patch('sap.cli.aunit.print') as mock_print:
self.execute_run('package', 'ypackage', '--output', 'human', '--result', ResultOptions.ONLY_UNIT.value)
self.assertEqual(len(self.connection.execs), 1)
self.assertIn('packages/ypackage', self.connection.execs[0].body)
self.assert_print_no_test_classes(mock_print)
def test_aunit_junit4_no_test_methods(self):
self.connection.set_responses(Response(status_code=200, text=AUNIT_RESULTS_NO_TEST_METHODS_XML, headers={}))
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
self.execute_run('package', 'ypackage', '--output', 'junit4', '--result', ResultOptions.ONLY_UNIT.value)
self.assertEqual(len(self.connection.execs), 1)
self.assertEqual(
"""<?xml version="1.0" encoding="UTF-8" ?>
<testsuites name="ypackage">
<testsuite name="LTCL_TEST" package="ZCL_THEKING_MANUAL_HARDCORE" tests="0"/>
</testsuites>
""",
mock_stdout.getvalue()
)
def test_aunit_package_with_results(self):
self.connection.set_responses(Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}))
with patch('sap.cli.aunit.print') as mock_print:
exit_code = self.execute_run('package', 'ypackage', '--output', 'human', '--result', ResultOptions.ONLY_UNIT.value)
self.assertEqual(exit_code, 3)
self.assertEqual(len(self.connection.execs), 1)
self.assertIn('packages/ypackage', self.connection.execs[0].body)
self.assertEqual(mock_print.call_args_list[0], call('ZCL_THEKING_MANUAL_HARDCORE', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[1], call(' LTCL_TEST', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[2], call(' DO_THE_FAIL [ERR]', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[3], call(' DO_THE_WARN [SKIP]', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[4], call(' DO_THE_TEST [OK]', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[5], call(' LTCL_TEST_HARDER', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[6], call(' DO_THE_FAIL [ERR]', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[7], call(' DO_THE_TEST [OK]', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[8], call('ZEXAMPLE_TESTS', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[9], call(' LTCL_TEST', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[10], call(' DO_THE_FAIL [ERR]', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[11], call(' DO_THE_TEST [OK]', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[12], call('', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[13], call('ZCL_THEKING_MANUAL_HARDCORE=>LTCL_TEST=>DO_THE_FAIL', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[14], call('* [critical] [failedAssertion] - Critical Assertion Error: \'I am supposed to fail\'', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[15], call('ZCL_THEKING_MANUAL_HARDCORE=>LTCL_TEST_HARDER=>DO_THE_FAIL', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[16], call('* [critical] [failedAssertion] - Critical Assertion Error: \'I am supposed to fail\'', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[17], call('ZEXAMPLE_TESTS=>LTCL_TEST=>DO_THE_FAIL', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[18], call('* [critical] [failedAssertion] - Critical Assertion Error: \'I am supposed to fail\'', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[19], call('* [critical] [failedAssertion] - Error<LOAD_PROGRAM_CLASS_MISMATCH>', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[20], call('', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[21], call('Successful: 3', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[22], call('Warnings: 1', file=sys.stdout))
self.assertEqual(mock_print.call_args_list[23], call('Errors: 3', file=sys.stdout))
def test_aunit_package_with_results_raw(self):
self.connection.set_responses(Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}))
with patch('sap.cli.aunit.print') as mock_print:
exit_code = self.execute_run('package', 'ypackage', '--output', 'raw', '--result', ResultOptions.ONLY_UNIT.value)
self.assertEqual(exit_code, 3)
self.assertEqual(len(self.connection.execs), 1)
self.assertIn('packages/ypackage', self.connection.execs[0].body)
self.assertEqual(mock_print.call_args_list[0][0], (AUNIT_RESULTS_XML,))
def test_aunit_package_with_results_junit4(self):
self.connection.set_responses(Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}))
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
exit_code = self.execute_run('package', 'ypackage', '--output', 'junit4', '--result', ResultOptions.ONLY_UNIT.value)
self.assertEqual(exit_code, 3)
self.assertEqual(len(self.connection.execs), 1)
self.assertIn('packages/ypackage', self.connection.execs[0].body)
self.maxDiff = None
self.assertEqual(mock_stdout.getvalue(),
'''<?xml version="1.0" encoding="UTF-8" ?>
<testsuites name="ypackage">
<testsuite name="LTCL_TEST" package="ZCL_THEKING_MANUAL_HARDCORE" tests="3">
<testcase name="DO_THE_FAIL" classname="ZCL_THEKING_MANUAL_HARDCORE=>LTCL_TEST" status="ERR">
<system-err>True expected
Test 'LTCL_TEST->DO_THE_FAIL' in Main Program 'ZCL_THEKING_MANUAL_HARDCORE===CP'.</system-err>
<error type="failedAssertion" message="Critical Assertion Error: 'I am supposed to fail'">Include: <ZCL_THEKING_MANUAL_HARDCORE===CCAU> Line: <19> (DO_THE_FAIL)</error>
</testcase>
<testcase name="DO_THE_WARN" classname="ZCL_THEKING_MANUAL_HARDCORE=>LTCL_TEST" status="SKIP">
<system-err>True expected
Test 'LTCL_TEST->DO_THE_WARN' in Main Program 'ZCL_THEKING_MANUAL_HARDCORE===CP'.</system-err>
<error type="failedAssertion" message="Warning: 'I am supposed to warn'">Include: <ZCL_THEKING_MANUAL_HARDCORE===CCAU> Line: <19> (DO_THE_WARN)</error>
</testcase>
<testcase name="DO_THE_TEST" classname="ZCL_THEKING_MANUAL_HARDCORE=>LTCL_TEST" status="OK"/>
</testsuite>
<testsuite name="LTCL_TEST_HARDER" package="ZCL_THEKING_MANUAL_HARDCORE" tests="2">
<testcase name="DO_THE_FAIL" classname="ZCL_THEKING_MANUAL_HARDCORE=>LTCL_TEST_HARDER" status="ERR">
<system-err>True expected
Test 'LTCL_TEST_HARDER->DO_THE_FAIL' in Main Program 'ZCL_THEKING_MANUAL_HARDCORE===CP'.</system-err>
<error type="failedAssertion" message="Critical Assertion Error: 'I am supposed to fail'">Include: <ZCL_THEKING_MANUAL_HARDCORE===CCAU> Line: <19> (DO_THE_FAIL)</error>
</testcase>
<testcase name="DO_THE_TEST" classname="ZCL_THEKING_MANUAL_HARDCORE=>LTCL_TEST_HARDER" status="OK"/>
</testsuite>
<testsuite name="LTCL_TEST" package="ZEXAMPLE_TESTS" tests="2">
<testcase name="DO_THE_FAIL" classname="ZEXAMPLE_TESTS=>LTCL_TEST" status="ERR">
<system-err>True expected
Test 'LTCL_TEST->DO_THE_FAIL' in Main Program 'ZEXAMPLE_TESTS'.</system-err>
<error type="failedAssertion" message="Critical Assertion Error: 'I am supposed to fail'">Include: <ZEXAMPLE_TESTS> Line: <24> (DO_THE_FAIL)
Include: <ZEXAMPLE_TESTS> Line: <25> (PREPARE_THE_FAIL)</error>
<error type="failedAssertion" message="Error<LOAD_PROGRAM_CLASS_MISMATCH>"/>
</testcase>
<testcase name="DO_THE_TEST" classname="ZEXAMPLE_TESTS=>LTCL_TEST" status="OK"/>
</testsuite>
</testsuites>
''')
def test_aunit_package_with_results_sonar(self):
self.connection.set_responses(Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}))
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
exit_code = self.execute_run('package', 'ypackage', '--output', 'sonar', '--result', ResultOptions.ONLY_UNIT.value)
self.assertEqual(exit_code, 3)
self.assertEqual(len(self.connection.execs), 1)
self.assertIn('packages/ypackage', self.connection.execs[0].body)
self.maxDiff = None
self.assertEqual(mock_stdout.getvalue(),
'''<?xml version="1.0" encoding="UTF-8" ?>
<testExecutions version="1">
<file path="ypackage/ZCL_THEKING_MANUAL_HARDCORE=>LTCL_TEST">
<testCase name="DO_THE_FAIL" duration="33">
<error message="Critical Assertion Error: 'I am supposed to fail'">
True expected
Test 'LTCL_TEST->DO_THE_FAIL' in Main Program 'ZCL_THEKING_MANUAL_HARDCORE===CP'.
Include: <ZCL_THEKING_MANUAL_HARDCORE===CCAU> Line: <19> (DO_THE_FAIL)
</error>
</testCase>
<testCase name="DO_THE_WARN" duration="33">
<skipped message="Warning: 'I am supposed to warn'">
True expected
Test 'LTCL_TEST->DO_THE_WARN' in Main Program 'ZCL_THEKING_MANUAL_HARDCORE===CP'.
Include: <ZCL_THEKING_MANUAL_HARDCORE===CCAU> Line: <19> (DO_THE_WARN)
</skipped>
</testCase>
<testCase name="DO_THE_TEST" duration="0"/>
</file>
<file path="ypackage/ZCL_THEKING_MANUAL_HARDCORE=>LTCL_TEST_HARDER">
<testCase name="DO_THE_FAIL" duration="0">
<error message="Critical Assertion Error: 'I am supposed to fail'">
True expected
Test 'LTCL_TEST_HARDER->DO_THE_FAIL' in Main Program 'ZCL_THEKING_MANUAL_HARDCORE===CP'.
Include: <ZCL_THEKING_MANUAL_HARDCORE===CCAU> Line: <19> (DO_THE_FAIL)
</error>
</testCase>
<testCase name="DO_THE_TEST" duration="0"/>
</file>
<file path="ypackage/ZEXAMPLE_TESTS=>LTCL_TEST">
<testCase name="DO_THE_FAIL" duration="0">
<error message="Critical Assertion Error: 'I am supposed to fail'">
True expected
Test 'LTCL_TEST->DO_THE_FAIL' in Main Program 'ZEXAMPLE_TESTS'.
Include: <ZEXAMPLE_TESTS> Line: <24> (DO_THE_FAIL)
Include: <ZEXAMPLE_TESTS> Line: <25> (PREPARE_THE_FAIL)
</error>
<error message="Error<LOAD_PROGRAM_CLASS_MISMATCH>">
</error>
</testCase>
<testCase name="DO_THE_TEST" duration="0"/>
</file>
</testExecutions>
''')
def test_aunit_parser_results_global_class_tests(self):
results = sap.adt.aunit.parse_aunit_response(GLOBAL_TEST_CLASS_AUNIT_RESULTS_XML).run_results
output = StringIO()
sap.cli.aunit.print_aunit_junit4(results, SimpleNamespace(name=['$TMP']), output)
self.maxDiff = None
self.assertEqual(output.getvalue(),
'''<?xml version="1.0" encoding="UTF-8" ?>
<testsuites name="$TMP">
<testsuite name="ZCL_TEST_CLASS" package="ZCL_TEST_CLASS" tests="1">
<testcase name="DO_THE_TEST" classname="ZCL_TEST_CLASS" status="OK"/>
</testsuite>
</testsuites>
''')
def test_aunit_parser_results_global_class_tests_multiple_targets(self):
results = sap.adt.aunit.parse_aunit_response(GLOBAL_TEST_CLASS_AUNIT_RESULTS_XML)
output = StringIO()
sap.cli.aunit.print_aunit_junit4(results.run_results, SimpleNamespace(name=['$TMP', '$LOCAL', '$BAR']), output)
self.maxDiff = None
self.assertEqual(output.getvalue(),
'''<?xml version="1.0" encoding="UTF-8" ?>
<testsuites name="$TMP|$LOCAL|$BAR">
<testsuite name="ZCL_TEST_CLASS" package="ZCL_TEST_CLASS" tests="1">
<testcase name="DO_THE_TEST" classname="ZCL_TEST_CLASS" status="OK"/>
</testsuite>
</testsuites>
''')
def test_aunit_parser_results_global_class_tests_sonar(self):
results = sap.adt.aunit.parse_aunit_response(GLOBAL_TEST_CLASS_AUNIT_RESULTS_XML).run_results
output = StringIO()
sap.cli.aunit.print_aunit_sonar(results, SimpleNamespace(name=['$TMP']), output)
self.maxDiff = None
self.assertEqual(output.getvalue(),
'''<?xml version="1.0" encoding="UTF-8" ?>
<testExecutions version="1">
<file path="$TMP/ZCL_TEST_CLASS=>ZCL_TEST_CLASS">
<testCase name="DO_THE_TEST" duration="0"/>
<testCase name="ZCL_TEST_CLASS" duration="0">
<skipped message="The global test class [ZCL_TEST_CLASS] is not abstract">
You can find further informations in document <CHAP> <SAUNIT_TEST_CL_POOL>
</skipped>
</testCase>
</file>
</testExecutions>
''')
@patch('os.walk')
def test_print_aunit_sonar_filename_is_not_none(self, walk):
walk.return_value = [('.', None, ['zcl_theking_manual_hardcore.clas.testclasses.abap', 'bar'])]
results = sap.adt.aunit.parse_aunit_response(AUNIT_RESULTS_NO_TEST_METHODS_XML).run_results
output = StringIO()
sap.cli.aunit.print_aunit_sonar(results, SimpleNamespace(name=['foo']), output)
self.assertEqual(
'''<?xml version="1.0" encoding="UTF-8" ?>
<testExecutions version="1">
<file path="zcl_theking_manual_hardcore.clas.testclasses.abap">
</file>
</testExecutions>
''',
output.getvalue()
)
def test_print_acoverage_output_raises(self):
with self.assertRaises(SAPCliError) as cm:
sap.cli.aunit.print_acoverage_output(SimpleNamespace(coverage_output='foo'), Mock(), Mock(), Mock())
self.assertEqual(str(cm.exception), 'Unsupported output type: foo')
@patch('sap.cli.aunit.get_acoverage_statements')
def test_acoverage_package_with_results_raw(self, get_acoverage_statements):
get_acoverage_statements.return_value = []
self.connection.set_responses(
Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_RESULTS_XML, headers={})
)
with patch('sap.cli.aunit.print') as mock_print:
exit_code = self.execute_run(
'package', 'ypackage', '--coverage-output', 'raw', '--result', ResultOptions.ONLY_COVERAGE.value
)
self.assertEqual(exit_code, None)
self.assertEqual(len(self.connection.execs), 2)
self.assertEqual(mock_print.call_args_list[0], call(ACOVERAGE_RESULTS_XML, file=sys.stdout))
@patch('sap.cli.aunit.get_acoverage_statements')
def test_acoverage_package_with_results_human(self, get_acoverage_statements):
get_acoverage_statements.return_value = []
self.connection.set_responses(
Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_RESULTS_XML, headers={})
)
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
exit_code = self.execute_run(
'package', 'ypackage', '--coverage-output', 'human', '--result', ResultOptions.ONLY_COVERAGE.value
)
self.assertEqual(exit_code, None)
self.assertEqual(len(self.connection.execs), 2)
self.assertEqual(mock_stdout.getvalue(),
'''TEST_CHECK_LIST : 29.00%
FOO===========================CP : 95.24%
FOO : 95.24%
METHOD_A : 100.00%
METHOD_B : 75.00%
BAR===========================CP : 0.00%
''')
def test_acoverage_package_with_results_jacoco(self):
self.connection.set_responses(
Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_STATEMENTS_RESULTS_XML, headers={}),
)
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
exit_code = self.execute_run(
'package', 'ypackage', '--coverage-output', 'jacoco', '--result', ResultOptions.ONLY_COVERAGE.value
)
self.assertEqual(exit_code, None)
self.assertEqual(len(self.connection.execs), 3)
self.assertEqual(mock_stdout.getvalue(),
'''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!DOCTYPE report PUBLIC "-//JACOCO//DTD Report 1.1//EN" "report.dtd">
<report name="ypackage">
<package name="TEST_CHECK_LIST">
<class name="FOO" sourcefilename="FOO">
<method name="METHOD_A" line="52">
<counter type="BRANCH" missed="2" covered="3"/>
<counter type="METHOD" missed="0" covered="1"/>
<counter type="INSTRUCTION" missed="0" covered="5"/>
</method>
<method name="METHOD_B" line="199">
<counter type="BRANCH" missed="1" covered="1"/>
<counter type="METHOD" missed="0" covered="1"/>
<counter type="INSTRUCTION" missed="2" covered="6"/>
</method>
<counter type="BRANCH" missed="7" covered="22"/>
<counter type="METHOD" missed="0" covered="8"/>
<counter type="INSTRUCTION" missed="3" covered="60"/>
</class>
<sourcefile name="FOO">
<line nr="53" mi="0" ci="1"/>
<line nr="54" mi="0" ci="1"/>
<line nr="55" mi="0" ci="1"/>
<line nr="56" mi="0" ci="1"/>
<line nr="59" mi="0" ci="1"/>
<line nr="209" mi="0" ci="1"/>
<line nr="212" mi="0" ci="1"/>
<line nr="215" mi="0" ci="1"/>
<line nr="216" mi="0" ci="1"/>
<line nr="219" mi="0" ci="1"/>
<line nr="220" mi="0" ci="1"/>
<line nr="224" mi="1" ci="0"/>
<line nr="225" mi="1" ci="0"/>
</sourcefile>
<class name="BAR" sourcefilename="BAR">
<counter type="BRANCH" missed="0" covered="0"/>
<counter type="METHOD" missed="0" covered="0"/>
<counter type="INSTRUCTION" missed="0" covered="0"/>
</class>
<counter type="BRANCH" missed="105" covered="29"/>
<counter type="METHOD" missed="42" covered="10"/>
<counter type="INSTRUCTION" missed="235" covered="96"/>
</package>
</report>
''')
def test_result_option_all(self):
self.connection.set_responses(
Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_STATEMENTS_RESULTS_XML, headers={}),
)
with patch('sap.cli.aunit.print') as mock_print:
exit_code = self.execute_run(
'package', 'ypackage', '--output', 'raw', '--coverage-output', 'raw', '--result', ResultOptions.ALL.value
)
self.assertEqual(exit_code, 3)
self.assertEqual(len(self.connection.execs), 3)
self.assertEqual(len(mock_print.call_args_list), 2)
self.assertEqual(mock_print.call_args_list[0], call(AUNIT_RESULTS_XML, file=sys.stdout))
self.assertEqual(mock_print.call_args_list[1], call(ACOVERAGE_RESULTS_XML, file=sys.stdout))
def test_result_option_unit(self):
self.connection.set_responses(
Response(status_code=200, text=AUNIT_RESULTS_XML, headers={})
)
with patch('sap.cli.aunit.print') as mock_print:
exit_code = self.execute_run(
'package', 'ypackage', '--output', 'raw', '--coverage-output', 'raw', '--result', ResultOptions.ONLY_UNIT.value
)
self.assertEqual(exit_code, 3)
self.assertEqual(len(self.connection.execs), 1)
self.assertEqual(len(mock_print.call_args_list), 1)
self.assertEqual(mock_print.call_args_list[0], call(AUNIT_RESULTS_XML, file=sys.stdout))
def test_result_option_coverage(self):
self.connection.set_responses(
Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_STATEMENTS_RESULTS_XML, headers={}),
)
with patch('sap.cli.aunit.print') as mock_print:
exit_code = self.execute_run(
'package', 'ypackage', '--output', 'raw', '--coverage-output', 'raw', '--result', ResultOptions.ONLY_COVERAGE.value
)
self.assertEqual(exit_code, None)
self.assertEqual(len(self.connection.execs), 3)
self.assertEqual(len(mock_print.call_args_list), 1)
self.assertEqual(mock_print.call_args_list[0], call(ACOVERAGE_RESULTS_XML, file=sys.stdout))
def test_coverage_filepath(self):
self.connection.set_responses(
Response(status_code=200, text=AUNIT_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_RESULTS_XML, headers={}),
Response(status_code=200, text=ACOVERAGE_STATEMENTS_RESULTS_XML, headers={}),
)
coverage_filepath = 'path/to/coverage'
with patch('sap.cli.aunit.open', mock_open()) as mock_file:
exit_code = self.execute_run(
'package', 'ypackage', '--output', 'raw', '--coverage-output', 'raw', '--result', ResultOptions.ONLY_COVERAGE.value,
'--coverage-filepath', coverage_filepath
)
mock_file.assert_called_with(coverage_filepath, 'w+')
def test_aunit_parser_results_global_class_tests_sonar_multiple_targets(self):
results = sap.adt.aunit.parse_aunit_response(GLOBAL_TEST_CLASS_AUNIT_RESULTS_XML)
output = StringIO()
sap.cli.aunit.print_aunit_sonar(results.run_results, SimpleNamespace(name=['$LOCAL', '$TMP']), output)
self.maxDiff = None
self.assertEqual(output.getvalue(),
'''<?xml version="1.0" encoding="UTF-8" ?>
<testExecutions version="1">
<file path="UNKNOWN_PACKAGE/ZCL_TEST_CLASS=>ZCL_TEST_CLASS">
<testCase name="DO_THE_TEST" duration="0"/>
<testCase name="ZCL_TEST_CLASS" duration="0">
<skipped message="The global test class [ZCL_TEST_CLASS] is not abstract">
You can find further informations in document <CHAP> <SAUNIT_TEST_CL_POOL>
</skipped>
</testCase>
</file>
</testExecutions>
''')
class TestAUnitCommandRunTransport(unittest.TestCase):
@patch('sap.adt.cts.Workbench.fetch_transport_request')
def test_not_found_transport(self, fake_fetch_transports):
fake_fetch_transports.return_value = None
connection = Mock()
args = parse_args('run', 'transport', 'NPLK123456')
with self.assertRaises(sap.errors.SAPCliError) as caught:
args.execute(connection, args)
self.assertEqual(str(caught.exception), 'The transport was not found: NPLK123456')
@patch('sap.cli.core.printerr')
@patch('sap.adt.cts.Workbench.fetch_transport_request')
def test_no_testable_objects(self, fake_fetch_transports, fake_printerr):
connection = Mock()
fake_fetch_transports.return_value = sap.adt.cts.WorkbenchTransport(
[], connection, 'NPLK123456', 'FILAK', 'Description', 'D')
args = parse_args('run', 'transport', 'NPLK123456')
ret = args.execute(connection, args)
fake_printerr.assert_called_once_with('No testable objects found')
self.assertEqual(ret, 1)
@patch('sap.adt.cts.Workbench.fetch_transport_request')
@patch('sap.adt.AUnit.execute')
def test_all_kinds_and_more(self, fake_execute, fake_fetch_transports):
connection = Connection()
fake_fetch_transports.return_value = sap.adt.cts.WorkbenchTransport(
[sap.adt.cts.WorkbenchTask('NPLK123456',
[sap.adt.cts.WorkbenchABAPObject('R3TR', 'PROG', 'program', 'T', 'descr', 'X', '000000'),
sap.adt.cts.WorkbenchABAPObject('R3TR', 'CLAS', 'class', 'T', 'descr', 'X', '000001'),
],
connection, 'NPLK123457', 'FILAK', 'Description', 'D'),
sap.adt.cts.WorkbenchTask('NPLK123456',
[sap.adt.cts.WorkbenchABAPObject('R3TR', 'FUGR', 'functions', 'T', 'descr', 'X', '000000'),
sap.adt.cts.WorkbenchABAPObject('R3TR', 'TABU', 'table', 'T', 'descr', 'X', '000001'),
],
connection, 'NPLK123458', 'FILAK', 'Description', 'D'),
],
connection, 'NPLK123456', 'FILAK', 'Description', 'D')
class SentinelError(Exception):
pass
def assert_objects(obj_sets, activate_coverage):
inclusive = [(ref.uri, ref.name) for ref in obj_sets.inclusive.references.references]
self.assertEqual(inclusive, [('/sap/bc/adt/programs/programs/program', 'PROGRAM'),
('/sap/bc/adt/oo/classes/class', 'CLASS'),
('/sap/bc/adt/functions/groups/functions', 'FUNCTIONS')])
raise SentinelError()
fake_execute.side_effect = assert_objects
args = parse_args('run', 'transport', 'NPLK123456')
with self.assertRaises(SentinelError):
args.execute(connection, args)
if __name__ == '__main__':
unittest.main()
| 47.130653
| 186
| 0.674166
| 3,608
| 28,137
| 5.017461
| 0.090078
| 0.063802
| 0.039883
| 0.034746
| 0.815279
| 0.791747
| 0.758825
| 0.735734
| 0.723968
| 0.688063
| 0
| 0.019786
| 0.179124
| 28,137
| 596
| 187
| 47.209732
| 0.763995
| 0.000746
| 0
| 0.442368
| 0
| 0
| 0.139463
| 0.03223
| 0
| 0
| 0
| 0
| 0.302181
| 1
| 0.093458
| false
| 0.003115
| 0.040498
| 0
| 0.146417
| 0.196262
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7267905e0aa66fec5b4ab84cbb86db0a6e1955c0
| 82
|
py
|
Python
|
3]. Competitive Programming/09]. HackerRank/2]. Tutorials/1]. 30 Days of Code/Python/Day_03.py
|
MLinesCode/The-Complete-FAANG-Preparation
|
2d0c7e8940eb2a58caaf4e978e548c08dd1f9a52
|
[
"MIT"
] | 6,969
|
2021-05-29T11:38:30.000Z
|
2022-03-31T19:31:49.000Z
|
3]. Competitive Programming/09]. HackerRank/2]. Tutorials/1]. 30 Days of Code/Python/Day_03.py
|
MLinesCode/The-Complete-FAANG-Preparation
|
2d0c7e8940eb2a58caaf4e978e548c08dd1f9a52
|
[
"MIT"
] | 75
|
2021-06-15T07:59:43.000Z
|
2022-02-22T14:21:52.000Z
|
3]. Competitive Programming/09]. HackerRank/2]. Tutorials/1]. 30 Days of Code/Python/Day_03.py
|
MLinesCode/The-Complete-FAANG-Preparation
|
2d0c7e8940eb2a58caaf4e978e548c08dd1f9a52
|
[
"MIT"
] | 1,524
|
2021-05-29T16:03:36.000Z
|
2022-03-31T17:46:13.000Z
|
# 3rd Solution
print(round(float(input())*(1+int(input())*.01+int(input())*.01)))
| 27.333333
| 66
| 0.634146
| 13
| 82
| 4
| 0.692308
| 0.307692
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.04878
| 82
| 3
| 66
| 27.333333
| 0.589744
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
72700cef183574dc58e7c2d12c2d04b5a102376d
| 27
|
py
|
Python
|
pg_analyse/inspections/contrib/index_health/__init__.py
|
idlesign/pg_analyse
|
ba0f6fd0e6aa8ca6978055e241c03dd47cf7ce16
|
[
"BSD-3-Clause"
] | 19
|
2020-03-11T19:30:01.000Z
|
2022-03-18T11:51:41.000Z
|
pg_analyse/inspections/contrib/index_health/__init__.py
|
idlesign/pg_analyse
|
ba0f6fd0e6aa8ca6978055e241c03dd47cf7ce16
|
[
"BSD-3-Clause"
] | 2
|
2020-03-23T09:31:08.000Z
|
2020-04-28T09:32:49.000Z
|
pg_analyse/inspections/contrib/index_health/__init__.py
|
idlesign/pg_analyse
|
ba0f6fd0e6aa8ca6978055e241c03dd47cf7ce16
|
[
"BSD-3-Clause"
] | null | null | null |
from .inspections import *
| 13.5
| 26
| 0.777778
| 3
| 27
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
728931c8fb03871335012bd1b7af214066bd0bd6
| 14
|
py
|
Python
|
globals.py
|
vision-and-sensing/Adaptive-LiDAR-Sampling
|
fa49901cd9662393ffc2d267633ebe0b65be0a30
|
[
"MIT"
] | 4
|
2021-02-22T15:08:12.000Z
|
2021-09-17T03:33:24.000Z
|
globals.py
|
vision-and-sensing/Adaptive-LiDAR-Sampling
|
fa49901cd9662393ffc2d267633ebe0b65be0a30
|
[
"MIT"
] | null | null | null |
globals.py
|
vision-and-sensing/Adaptive-LiDAR-Sampling
|
fa49901cd9662393ffc2d267633ebe0b65be0a30
|
[
"MIT"
] | null | null | null |
B = 1024
K = 4
| 7
| 8
| 0.5
| 4
| 14
| 1.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.555556
| 0.357143
| 14
| 2
| 9
| 7
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7290919ec4cdcbc5f4fefb78f2ee7dc6fa66bf39
| 7,267
|
py
|
Python
|
pytype/tools/pyi_checker/errors_test.py
|
adamcataldo/pytype
|
7163e85880b52d53d58044e53157e2a21988308e
|
[
"Apache-2.0"
] | 2
|
2019-07-25T12:53:02.000Z
|
2019-08-18T16:26:16.000Z
|
pytype/tools/pyi_checker/errors_test.py
|
adamcataldo/pytype
|
7163e85880b52d53d58044e53157e2a21988308e
|
[
"Apache-2.0"
] | null | null | null |
pytype/tools/pyi_checker/errors_test.py
|
adamcataldo/pytype
|
7163e85880b52d53d58044e53157e2a21988308e
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for pyi_checker errors.
These are sanity checks to make sure error messages are correct.
"""
import textwrap
from pytype.tools.pyi_checker import definitions
from pytype.tools.pyi_checker import errors
from pytype.tools.pyi_checker import test_utils as utils
from typed_ast import ast3
import unittest
class ErrorTest(unittest.TestCase):
def test_missing_type_hint(self):
src = utils.func_from_source("def test(a, b): return a + b")
err = errors.MissingTypeHint(src)
self.assertRegex(
err.message,
"No type hint found for function test.")
def test_extra_type_hint(self):
src = utils.func_from_source("def test(a, b) -> int: ...")
err = errors.ExtraTypeHint(src)
self.assertRegex(
err.message,
"Type hint for function test has no corresponding source definition.")
def test_wrong_type_hint(self):
src = utils.func_from_source("def test(a, b) -> int: ...")
hint = utils.class_from_source("class test: ...")
err = errors.WrongTypeHint(src, hint)
self.assertRegex(
err.message,
r"^Type hint kind does not match source definition.\n.*"
r"function test.*\n.*class test$")
def test_wrong_decorators(self):
src = utils.func_from_source(
"""
@dec1
@dec2
@dec3
def test(): ...
""")
hint = utils.func_from_source(
"""
@dec1
@decZ
def test(): ...
""")
err = errors.WrongDecorators(src, hint)
self.assertRegex(
err.message,
r"^Type hint for function test has incorrect decorators.\n"
r".*Missing.*: dec2, dec3\n.*Extras.*: decZ$")
def test_wrong_arg_count(self):
src = utils.func_from_source("def test(a, b, *c, d, e, f, **g): pass")
hint = utils.func_from_source("def test(a, *c, d, e, f, **g): ...")
err = errors.WrongArgCount(src, hint)
self.assertRegex(
err.message,
r"^Type hint for function test has the wrong number of arguments.\n"
r".*Source:\s*def test\(a, b, \.\.\.\)\n"
r".*Type hint:\s*def test\(a, \.\.\.\)$")
def test_no_source_arg_count(self):
src = utils.func_from_source("def test(*c): pass")
hint = utils.func_from_source("def test(a, b): ...")
err = errors.WrongArgCount(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test has the wrong number of arguments.\n"
r".*Source:\s*def test\(\.\.\.\)\n"
r".*Type hint:\s*def test\(a, b\)$")
def test_no_hint_arg_count(self):
src = utils.func_from_source("def test(a, b): pass")
hint = utils.func_from_source("def test(*c): ...")
err = errors.WrongArgCount(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test has the wrong number of arguments.\n"
r".*Source:\s*def test\(a, b\)\n"
r".*Type hint:\s*def test\(\.\.\.\)$")
def test_wrong_kwonly_count(self):
src = utils.func_from_source("def test(a, b, *c, d, e, f, **g): pass")
hint = utils.func_from_source("def test(a, b, *c, d, e, f, h, **g): ...")
err = errors.WrongKwonlyCount(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test has the wrong number "
r"of keyword-only arguments.\n"
r".*Source:\s*def test\(\.\.\., \*c, d, e, f, \.\.\.\)\n",
r".*Type hint:\s*def test\(\.\.\., \*c, d, e, f, h, \.\.\.\)$")
def test_no_source_kwonly_count(self):
src = utils.func_from_source("def test(): pass")
hint = utils.func_from_source("def test(*, a, b,): ...")
err = errors.WrongKwonlyCount(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test has the wrong number "
r"of keyword-only arguments.\n"
r".*Source:\s*def test\(\)\n"
r".*Type hint:\s*def test\(\*, a, b\)$")
def test_no_hint_kwonly_count(self):
src = utils.func_from_source("def test(*c, d, e, f, **g): pass")
hint = utils.func_from_source("def test(*c): ...")
err = errors.WrongKwonlyCount(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test has the wrong number "
r"of keyword-only arguments.\n"
r".*Source:\s*def test\(\*c, d, e, f, ...\)\n"
r".*Type hint:\s*def test\(...\)$")
def test_wrong_arg_name(self):
src = utils.func_from_source("def test(a, b, e): pass")
hint = utils.func_from_source("def test(a, b, c): ...")
err = errors.WrongArgName(src, hint, "c")
self.assertRegex(
err.message,
r"Function test has no argument named 'c'.\n"
r".*Source:\s*def test\(a, b, e\)\n",
r".*Type hint:\s*def test\(a, b, c\)\n")
def test_wrong_kwonly_name(self):
src = utils.func_from_source("def test(*, d, e, f): pass")
hint = utils.func_from_source("def test(*, d, c, f): ...")
err = errors.WrongKwonlyName(src, hint, "c")
self.assertRegex(
err.message,
r"Function test has no keyword-only argument named 'c'.\n"
r".*Source:\s*def test\(\*, d, e, f\)\n"
r".*Type hint:\s*def test\(\*, d, c, f\)$")
def test_no_source_vararg(self):
src = utils.func_from_source("def test(a, b, **g): pass")
hint = utils.func_from_source("def test(*a, **g): ...")
err = errors.WrongVararg(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test should not have vararg '\*a'.")
def test_no_hint_vararg(self):
src = utils.func_from_source("def test(*c): pass")
hint = utils.func_from_source("def test(a, b): ...")
err = errors.WrongVararg(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test is missing the vararg '\*c'.")
def test_wrong_vararg_name(self):
src = utils.func_from_source("def test(a, b, *c, d, e): pass")
hint = utils.func_from_source("def test(a, b, *z, d, e): ...")
err = errors.WrongVararg(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test has wrong vararg name.\n"
r".*Source:\s*def test\(\.\.\., \*c, \.\.\.\)\n"
r".*Type hint:\s*def test\(\.\.\., \*z, \.\.\.\)$")
def test_no_source_kwarg(self):
src = utils.func_from_source("def test(): pass")
hint = utils.func_from_source("def test(**a): ...")
err = errors.WrongKwarg(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test should not have "
r"keyword argument '\*\*a'\.")
def test_no_hint_kwarg(self):
src = utils.func_from_source("def test(**a): pass")
hint = utils.func_from_source("def test(): pass")
err = errors.WrongKwarg(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test is missing keyword argument '\*\*a'\.")
def test_wrong_kwarg_name(self):
src = utils.func_from_source("def test(a, b, **e): pass")
hint = utils.func_from_source("def test(a, b, **c): ...")
err = errors.WrongKwarg(src, hint)
self.assertRegex(
err.message,
r"Type hint for function test has wrong keyword argument name.\n"
r".*Source:\s*def test\(\.\.\., \*\*e\)\n"
r".*Type hint:\s*def test\(\.\.\., \*\*c\)$")
if __name__ == "__main__":
unittest.main()
| 36.335
| 78
| 0.599972
| 1,065
| 7,267
| 3.96338
| 0.098592
| 0.117745
| 0.101635
| 0.148543
| 0.808576
| 0.768775
| 0.734423
| 0.718313
| 0.696281
| 0.645582
| 0
| 0.001244
| 0.225953
| 7,267
| 199
| 79
| 36.517588
| 0.749156
| 0.013073
| 0
| 0.42236
| 0
| 0.012422
| 0.386403
| 0
| 0
| 0
| 0
| 0
| 0.111801
| 1
| 0.111801
| false
| 0.093168
| 0.037267
| 0
| 0.15528
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
72bc7ca8b72af9d6cb78a5606b0064365978d64c
| 3,292
|
py
|
Python
|
chaosk8s/crd/probes.py
|
kpk-pl/chaostoolkit-kubernetes
|
01558b3273fc52f6e1bcf575c1e0ae10ad4b2ce0
|
[
"Apache-2.0"
] | 176
|
2017-10-14T10:19:24.000Z
|
2022-03-16T07:31:07.000Z
|
chaosk8s/crd/probes.py
|
kpk-pl/chaostoolkit-kubernetes
|
01558b3273fc52f6e1bcf575c1e0ae10ad4b2ce0
|
[
"Apache-2.0"
] | 112
|
2017-12-11T13:51:48.000Z
|
2022-03-30T14:10:50.000Z
|
chaosk8s/crd/probes.py
|
kpk-pl/chaostoolkit-kubernetes
|
01558b3273fc52f6e1bcf575c1e0ae10ad4b2ce0
|
[
"Apache-2.0"
] | 70
|
2018-01-23T23:37:42.000Z
|
2022-01-07T17:34:22.000Z
|
import json
from typing import Any, Dict, List
from chaoslib.exceptions import ActivityFailed
from chaoslib.types import Secrets
from kubernetes import client
from kubernetes.client.rest import ApiException
from chaosk8s import create_k8s_api_client
__all__ = [
"get_custom_object",
"get_cluster_custom_object",
"list_custom_objects",
"list_cluster_custom_objects",
]
def get_custom_object(
group: str,
version: str,
plural: str,
name: str,
ns: str = "default",
secrets: Secrets = None,
) -> Dict[str, Any]:
"""
Get a custom object in the given namespace.
Read more about custom resources here:
https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/
""" # noqa: E501
api = client.CustomObjectsApi(create_k8s_api_client(secrets))
try:
r = api.get_namespaced_custom_object(
group, version, ns, plural, name, _preload_content=False
)
return json.loads(r.data)
except ApiException as x:
raise ActivityFailed(
f"Failed to create custom resource object: '{x.reason}' {x.body}"
)
def list_custom_objects(
group: str, version: str, plural: str, ns: str = "default", secrets: Secrets = None
) -> List[Dict[str, Any]]:
"""
List custom objects in the given namespace.
Read more about custom resources here:
https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/
""" # noqa: E501
api = client.CustomObjectsApi(create_k8s_api_client(secrets))
try:
r = api.list_namespaced_custom_object(
group, version, ns, plural, _preload_content=False
)
return json.loads(r.data)
except ApiException as x:
raise ActivityFailed(
f"Failed to create custom resource object: '{x.reason}' {x.body}"
)
def get_cluster_custom_object(
group: str, version: str, plural: str, name: str, secrets: Secrets = None
) -> Dict[str, Any]:
"""
Get a custom object cluster-wide.
Read more about custom resources here:
https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/
""" # noqa: E501
api = client.CustomObjectsApi(create_k8s_api_client(secrets))
try:
r = api.get_cluster_custom_object(
group, version, plural, name, _preload_content=False
)
return json.loads(r.data)
except ApiException as x:
raise ActivityFailed(
f"Failed to create custom resource object: '{x.reason}' {x.body}"
)
def list_cluster_custom_objects(
group: str, version: str, plural: str, secrets: Secrets = None
) -> List[Dict[str, Any]]:
"""
List custom objects cluster-wide.
Read more about custom resources here:
https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/
""" # noqa: E501
api = client.CustomObjectsApi(create_k8s_api_client(secrets))
try:
r = api.list_cluster_custom_object(
group, version, plural, _preload_content=False
)
return json.loads(r.data)
except ApiException as x:
raise ActivityFailed(
f"Failed to create custom resource object: '{x.reason}' {x.body}"
)
| 29.927273
| 89
| 0.666768
| 406
| 3,292
| 5.261084
| 0.182266
| 0.05618
| 0.047753
| 0.042135
| 0.856273
| 0.851592
| 0.822566
| 0.777622
| 0.740169
| 0.740169
| 0
| 0.007117
| 0.231774
| 3,292
| 109
| 90
| 30.201835
| 0.837485
| 0.213852
| 0
| 0.4
| 0
| 0
| 0.140449
| 0.020867
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.1
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72c61b4f648b128ff13cd73e1a076801b5109dff
| 25,490
|
py
|
Python
|
tests/test_subscription.py
|
trag-stripe/dj-stripe
|
31fac752ab6540a917a554907d799e41ac3eba9c
|
[
"MIT"
] | null | null | null |
tests/test_subscription.py
|
trag-stripe/dj-stripe
|
31fac752ab6540a917a554907d799e41ac3eba9c
|
[
"MIT"
] | null | null | null |
tests/test_subscription.py
|
trag-stripe/dj-stripe
|
31fac752ab6540a917a554907d799e41ac3eba9c
|
[
"MIT"
] | null | null | null |
"""
dj-stripe Subscription Model Tests.
"""
from copy import deepcopy
from decimal import Decimal
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from stripe.error import InvalidRequestError
from djstripe.enums import SubscriptionStatus
from djstripe.models import Plan, Subscription
from . import (
FAKE_CUSTOMER,
FAKE_CUSTOMER_II,
FAKE_PLAN,
FAKE_PLAN_II,
FAKE_PLAN_METERED,
FAKE_PRODUCT,
FAKE_SUBSCRIPTION,
FAKE_SUBSCRIPTION_CANCELED,
FAKE_SUBSCRIPTION_METERED,
FAKE_SUBSCRIPTION_MULTI_PLAN,
FAKE_SUBSCRIPTION_NOT_PERIOD_CURRENT,
AssertStripeFksMixin,
datetime_to_unix,
)
class SubscriptionTest(AssertStripeFksMixin, TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="pydanny", email="pydanny@gmail.com"
)
self.customer = FAKE_CUSTOMER.create_for_user(self.user)
self.default_expected_blank_fks = {
"djstripe.Customer.coupon",
"djstripe.Subscription.pending_setup_intent",
}
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_str(
self, customer_retrieve_mock, product_retrieve_mock, plan_retrieve_mock
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
self.assertEqual(
str(subscription),
"{email} on {plan}".format(
email=self.user.email, plan=str(subscription.plan)
),
)
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_is_status_temporarily_current(
self, customer_retrieve_mock, product_retrieve_mock, plan_retrieve_mock
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
subscription.canceled_at = timezone.now() + timezone.timedelta(days=7)
subscription.current_period_end = timezone.now() + timezone.timedelta(days=7)
subscription.cancel_at_period_end = True
subscription.save()
self.assertTrue(subscription.is_status_current())
self.assertTrue(subscription.is_status_temporarily_current())
self.assertTrue(subscription.is_valid())
self.assertTrue(subscription in self.customer.active_subscriptions)
self.assertTrue(self.customer.has_active_subscription())
self.assertTrue(self.customer.has_any_active_subscription())
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_is_status_temporarily_current_false(
self, customer_retrieve_mock, product_retrieve_mock, plan_retrieve_mock
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
subscription.current_period_end = timezone.now() + timezone.timedelta(days=7)
subscription.save()
self.assertTrue(subscription.is_status_current())
self.assertFalse(subscription.is_status_temporarily_current())
self.assertTrue(subscription.is_valid())
self.assertTrue(subscription in self.customer.active_subscriptions)
self.assertTrue(self.customer.has_active_subscription())
self.assertTrue(self.customer.has_any_active_subscription())
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_is_status_temporarily_current_false_and_cancelled(
self, customer_retrieve_mock, product_retrieve_mock, plan_retrieve_mock
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
subscription.status = SubscriptionStatus.canceled
subscription.current_period_end = timezone.now() + timezone.timedelta(days=7)
subscription.save()
self.assertFalse(subscription.is_status_current())
self.assertFalse(subscription.is_status_temporarily_current())
self.assertFalse(subscription.is_valid())
self.assertFalse(subscription in self.customer.active_subscriptions)
self.assertFalse(self.customer.has_active_subscription())
self.assertFalse(self.customer.has_any_active_subscription())
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Subscription.retrieve", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_extend(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription_fake["current_period_end"] = datetime_to_unix(
timezone.now() - timezone.timedelta(days=20)
)
subscription_retrieve_mock.return_value = subscription_fake
subscription = Subscription.sync_from_stripe_data(subscription_fake)
self.assertFalse(subscription in self.customer.active_subscriptions)
self.assertEqual(self.customer.active_subscriptions.count(), 0)
delta = timezone.timedelta(days=30)
extended_subscription = subscription.extend(delta)
self.assertNotEqual(None, extended_subscription.trial_end)
self.assertTrue(self.customer.has_active_subscription())
self.assertTrue(self.customer.has_any_active_subscription())
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Subscription.retrieve",
return_value=deepcopy(FAKE_SUBSCRIPTION),
autospec=True,
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_extend_negative_delta(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION_NOT_PERIOD_CURRENT)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
with self.assertRaises(ValueError):
subscription.extend(timezone.timedelta(days=-30))
self.assertFalse(self.customer.has_active_subscription())
self.assertFalse(self.customer.has_any_active_subscription())
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Subscription.retrieve",
return_value=deepcopy(FAKE_SUBSCRIPTION),
autospec=True,
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_extend_with_trial(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
subscription.trial_end = timezone.now() + timezone.timedelta(days=5)
subscription.save()
delta = timezone.timedelta(days=30)
new_trial_end = subscription.trial_end + delta
extended_subscription = subscription.extend(delta)
self.assertEqual(
new_trial_end.replace(microsecond=0), extended_subscription.trial_end
)
self.assertTrue(self.customer.has_active_subscription())
self.assertTrue(self.customer.has_any_active_subscription())
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Subscription.retrieve",
return_value=deepcopy(FAKE_SUBSCRIPTION),
autospec=True,
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_update(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
self.assertEqual(1, subscription.quantity)
new_subscription = subscription.update(quantity=4)
self.assertEqual(4, new_subscription.quantity)
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Subscription.retrieve", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_update_set_empty_value(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription_fake.update({"tax_percent": Decimal(20.0)})
subscription_retrieve_mock.return_value = subscription_fake
subscription = Subscription.sync_from_stripe_data(subscription_fake)
self.assertEqual(Decimal(20.0), subscription.tax_percent)
new_subscription = subscription.update(tax_percent=Decimal(0.0))
self.assertEqual(Decimal(0.0), new_subscription.tax_percent)
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Subscription.retrieve",
return_value=deepcopy(FAKE_SUBSCRIPTION),
autospec=True,
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_update_with_plan_model(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
new_plan = Plan.sync_from_stripe_data(deepcopy(FAKE_PLAN_II))
self.assertEqual(FAKE_PLAN["id"], subscription.plan.id)
new_subscription = subscription.update(plan=new_plan)
self.assertEqual(FAKE_PLAN_II["id"], new_subscription.plan.id)
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
self.assert_fks(new_plan, expected_blank_fks={})
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Subscription.retrieve", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_cancel_now(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
subscription.current_period_end = timezone.now() + timezone.timedelta(days=7)
subscription.save()
cancel_timestamp = datetime_to_unix(timezone.now())
canceled_subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
canceled_subscription_fake["status"] = SubscriptionStatus.canceled
canceled_subscription_fake["canceled_at"] = cancel_timestamp
canceled_subscription_fake["ended_at"] = cancel_timestamp
subscription_retrieve_mock.return_value = (
canceled_subscription_fake
) # retrieve().delete()
self.assertTrue(self.customer.has_active_subscription())
self.assertEqual(self.customer.active_subscriptions.count(), 1)
self.assertTrue(self.customer.has_any_active_subscription())
new_subscription = subscription.cancel(at_period_end=False)
self.assertEqual(SubscriptionStatus.canceled, new_subscription.status)
self.assertEqual(False, new_subscription.cancel_at_period_end)
self.assertEqual(new_subscription.canceled_at, new_subscription.ended_at)
self.assertFalse(new_subscription.is_valid())
self.assertFalse(new_subscription in self.customer.active_subscriptions)
self.assertFalse(self.customer.has_active_subscription())
self.assertFalse(self.customer.has_any_active_subscription())
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Subscription.retrieve", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_cancel_at_period_end(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
current_period_end = timezone.now() + timezone.timedelta(days=7)
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
subscription.current_period_end = current_period_end
subscription.save()
canceled_subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
canceled_subscription_fake["current_period_end"] = datetime_to_unix(
current_period_end
)
canceled_subscription_fake["canceled_at"] = datetime_to_unix(timezone.now())
subscription_retrieve_mock.return_value = (
canceled_subscription_fake
) # retrieve().delete()
self.assertTrue(self.customer.has_active_subscription())
self.assertTrue(self.customer.has_any_active_subscription())
self.assertEqual(self.customer.active_subscriptions.count(), 1)
self.assertTrue(subscription in self.customer.active_subscriptions)
new_subscription = subscription.cancel(at_period_end=True)
self.assertEqual(self.customer.active_subscriptions.count(), 1)
self.assertTrue(new_subscription in self.customer.active_subscriptions)
self.assertEqual(SubscriptionStatus.active, new_subscription.status)
self.assertEqual(True, new_subscription.cancel_at_period_end)
self.assertNotEqual(new_subscription.canceled_at, new_subscription.ended_at)
self.assertTrue(new_subscription.is_valid())
self.assertTrue(self.customer.has_active_subscription())
self.assertTrue(self.customer.has_any_active_subscription())
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Subscription.retrieve", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_cancel_during_trial_sets_at_period_end(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
subscription.trial_end = timezone.now() + timezone.timedelta(days=7)
subscription.save()
cancel_timestamp = datetime_to_unix(timezone.now())
canceled_subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
canceled_subscription_fake["status"] = SubscriptionStatus.canceled
canceled_subscription_fake["canceled_at"] = cancel_timestamp
canceled_subscription_fake["ended_at"] = cancel_timestamp
subscription_retrieve_mock.return_value = (
canceled_subscription_fake
) # retrieve().delete()
self.assertTrue(self.customer.has_active_subscription())
self.assertTrue(self.customer.has_any_active_subscription())
new_subscription = subscription.cancel(at_period_end=False)
self.assertEqual(SubscriptionStatus.canceled, new_subscription.status)
self.assertEqual(False, new_subscription.cancel_at_period_end)
self.assertEqual(new_subscription.canceled_at, new_subscription.ended_at)
self.assertFalse(new_subscription.is_valid())
self.assertFalse(self.customer.has_active_subscription())
self.assertFalse(self.customer.has_any_active_subscription())
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN), autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Subscription.retrieve", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_cancel_and_reactivate(
self,
customer_retrieve_mock,
subscription_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
current_period_end = timezone.now() + timezone.timedelta(days=7)
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
subscription.current_period_end = current_period_end
subscription.save()
canceled_subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
canceled_subscription_fake["current_period_end"] = datetime_to_unix(
current_period_end
)
canceled_subscription_fake["canceled_at"] = datetime_to_unix(timezone.now())
subscription_retrieve_mock.return_value = canceled_subscription_fake
self.assertTrue(self.customer.has_active_subscription())
self.assertTrue(self.customer.has_any_active_subscription())
new_subscription = subscription.cancel(at_period_end=True)
self.assertEqual(new_subscription.cancel_at_period_end, True)
new_subscription.reactivate()
subscription_reactivate_fake = deepcopy(FAKE_SUBSCRIPTION)
reactivated_subscription = Subscription.sync_from_stripe_data(
subscription_reactivate_fake
)
self.assertEqual(reactivated_subscription.cancel_at_period_end, False)
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("djstripe.models.Subscription._api_delete", autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Subscription.retrieve",
return_value=deepcopy(FAKE_SUBSCRIPTION_CANCELED),
)
def test_cancel_already_canceled(
self,
subscription_retrieve_mock,
product_retrieve_mock,
subscription_delete_mock,
):
subscription_delete_mock.side_effect = InvalidRequestError(
"No such subscription: sub_xxxx", "blah"
)
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
self.assertEqual(Subscription.objects.filter(status="canceled").count(), 0)
subscription.cancel(at_period_end=False)
self.assertEqual(Subscription.objects.filter(status="canceled").count(), 1)
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("djstripe.models.Subscription._api_delete", autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
def test_cancel_error_in_cancel(
self, product_retrieve_mock, subscription_delete_mock
):
subscription_delete_mock.side_effect = InvalidRequestError(
"Unexpected error", "blah"
)
subscription_fake = deepcopy(FAKE_SUBSCRIPTION)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
with self.assertRaises(InvalidRequestError):
subscription.cancel(at_period_end=False)
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
@patch("stripe.Plan.retrieve", autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Customer.retrieve",
return_value=deepcopy(FAKE_CUSTOMER_II),
autospec=True,
)
@patch(
"stripe.Subscription.retrieve",
return_value=deepcopy(FAKE_SUBSCRIPTION_MULTI_PLAN),
)
def test_sync_multi_plan(
self,
subscription_retrieve_mock,
customer_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION_MULTI_PLAN)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
self.assertIsNone(subscription.plan)
self.assertIsNone(subscription.quantity)
items = subscription.items.all()
self.assertEqual(2, len(items))
self.assert_fks(
subscription,
expected_blank_fks=self.default_expected_blank_fks
| {"djstripe.Customer.subscriber", "djstripe.Subscription.plan"},
)
@patch("stripe.Plan.retrieve", autospec=True)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Customer.retrieve",
return_value=deepcopy(FAKE_CUSTOMER_II),
autospec=True,
)
@patch(
"stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION_METERED)
)
def test_sync_metered_plan(
self,
subscription_retrieve_mock,
customer_retrieve_mock,
product_retrieve_mock,
plan_retrieve_mock,
):
subscription_fake = deepcopy(FAKE_SUBSCRIPTION_METERED)
self.assertNotIn(
"quantity",
subscription_fake["items"]["data"],
"Expect Metered plan SubscriptionItem to have no quantity",
)
subscription = Subscription.sync_from_stripe_data(subscription_fake)
items = subscription.items.all()
self.assertEqual(1, len(items))
item = items[0]
self.assertEqual(subscription.quantity, 1)
# Note that subscription.quantity is 1,
# but item.quantity isn't set on metered plans
self.assertIsNone(item.quantity)
self.assertEqual(item.plan.id, FAKE_PLAN_METERED["id"])
self.assert_fks(
subscription, expected_blank_fks=self.default_expected_blank_fks
)
| 37.988077
| 88
| 0.70816
| 2,691
| 25,490
| 6.372724
| 0.059086
| 0.05528
| 0.060937
| 0.086594
| 0.864832
| 0.848738
| 0.833868
| 0.821156
| 0.800921
| 0.785818
| 0
| 0.00207
| 0.20408
| 25,490
| 670
| 89
| 38.044776
| 0.843208
| 0.007022
| 0
| 0.657343
| 0
| 0
| 0.07941
| 0.053836
| 0
| 0
| 0
| 0
| 0.178322
| 1
| 0.033217
| false
| 0
| 0.017483
| 0
| 0.052448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72d9d81f71b49d8005ec6b4e27ced53173df725d
| 42
|
py
|
Python
|
ku/ebm/__init__.py
|
tonandr/keras_unsupervised
|
fd2a2494bca2eb745027178e220b42b5e5882f94
|
[
"BSD-3-Clause"
] | 4
|
2019-07-28T11:56:01.000Z
|
2021-11-06T02:50:58.000Z
|
ku/ebm/__init__.py
|
tonandr/keras_unsupervised
|
fd2a2494bca2eb745027178e220b42b5e5882f94
|
[
"BSD-3-Clause"
] | 2
|
2021-06-30T01:00:07.000Z
|
2021-07-21T08:04:40.000Z
|
ku/ebm/__init__.py
|
tonandr/keras_unsupervised
|
fd2a2494bca2eb745027178e220b42b5e5882f94
|
[
"BSD-3-Clause"
] | null | null | null |
from .dbn import DBN
from .rbm import RBM
| 21
| 21
| 0.761905
| 8
| 42
| 4
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 42
| 2
| 22
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f405887e05b5221d6596043b63f721c3de200150
| 179
|
py
|
Python
|
bin/iamonds/hexiamonds-stacked-chevrons-6x6.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/iamonds/hexiamonds-stacked-chevrons-6x6.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/iamonds/hexiamonds-stacked-chevrons-6x6.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
"""933 solutions"""
import puzzler
from puzzler.puzzles.hexiamonds import HexiamondsStackedChevrons_6x6
puzzler.run(HexiamondsStackedChevrons_6x6)
| 17.9
| 68
| 0.804469
| 20
| 179
| 7.1
| 0.75
| 0.394366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042945
| 0.089385
| 179
| 9
| 69
| 19.888889
| 0.828221
| 0.217877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f40785b5c99a921cf7fcdf3eb6931610cb831a13
| 42
|
py
|
Python
|
helloworld/__init__.py
|
jtap159/helloworld
|
b0cae97df867a23ad0c64c521b18bf8a37676319
|
[
"MIT"
] | null | null | null |
helloworld/__init__.py
|
jtap159/helloworld
|
b0cae97df867a23ad0c64c521b18bf8a37676319
|
[
"MIT"
] | null | null | null |
helloworld/__init__.py
|
jtap159/helloworld
|
b0cae97df867a23ad0c64c521b18bf8a37676319
|
[
"MIT"
] | null | null | null |
from helloworld.helloworld import sayhello
| 42
| 42
| 0.904762
| 5
| 42
| 7.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f474075e2b2a39b0f7d276296dea02ff91914863
| 7,983
|
py
|
Python
|
netapp_activeiq_api/apis/capacity_api.py
|
woutercoppens/netapp-activeiq-api
|
a8f86355ecdd769953b69e38824b4db07c11c89e
|
[
"Apache-2.0"
] | 3
|
2021-09-28T23:22:59.000Z
|
2021-11-23T14:53:54.000Z
|
netapp_activeiq_api/apis/capacity_api.py
|
woutercoppens/netapp-activeiq-api
|
a8f86355ecdd769953b69e38824b4db07c11c89e
|
[
"Apache-2.0"
] | null | null | null |
netapp_activeiq_api/apis/capacity_api.py
|
woutercoppens/netapp-activeiq-api
|
a8f86355ecdd769953b69e38824b4db07c11c89e
|
[
"Apache-2.0"
] | 1
|
2021-04-01T11:22:23.000Z
|
2021-04-01T11:22:23.000Z
|
# coding: utf-8
from .api_client import ApiClient
class CapacityApi:
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_capacity_details_by_level(self, id, level, **kwargs): # noqa: E501
"""Provides the details about the systems nearing allocated capacity limit for a customer, site, group, cluster, watchlist or a set of serial numbers. # noqa: E501
Lists information about systems for a customer, site, group, cluster, watchlist or serial numbers that have exceeded 90 percent system capacity or are predicted to do so soon. Systems are grouped into the following categories: currently above 90 percent, expected to exceed 90 percent within 1 month, expected to exceed 90 percent within 3 months, expected to exceed 90 percent within 6 months, not expected to exceed 90 percent within 6 months. # noqa: E501
:param str id: Unique identifier for the level. Valid values are customer ID, site ID, group name, cluster, serial numbers and watchList id. (required)
:param str level: Identifies the level for which information will be provided. Valid values are customer, site, group, cluster, serial_numbers and watchlist. (required)
"""
all_params = ["id", "level"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_capacity_details_by_level" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'id' is set
if "id" not in params or params["id"] is None:
raise ValueError(
"Missing the required parameter `id` when calling `get_capacity_details_by_level`"
) # noqa: E501
# verify the required parameter 'level' is set
if "level" not in params or params["level"] is None:
raise ValueError(
"Missing the required parameter `level` when calling `get_capacity_details_by_level`"
) # noqa: E501
path_params = {}
if "id" in params:
path_params["id"] = params["id"] # noqa: E501
if "level" in params:
path_params["level"] = params["level"] # noqa: E501
query_params = []
header_params = {}
body_params = None
return self.api_client.call_api(
"/v2/capacity/details/level/{level}/id/{id}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_capacity_summary_by_level(self, id, level, **kwargs): # noqa: E501
"""Provides the number of systems nearing allocated capacity limit for a customer, site, group, cluster watchlist or a set of serial numbers. # noqa: E501
Lists the number of systems for a customer, site, group, cluster, watchlist or a set of serial numbers that have exceeded 90 percent system capacity or are predicted to do so soon. Counts are provided for the following categories: currently above 90 percent, expected to exceed 90 percent within 1 month, expected to exceed 90 percent within 3 months, expected to exceed 90 percent within 6 months, sum of systems which are above 90 percent and expected to exceed 90 percent in 6 months, not expected to exceed 90 percent within 6 months. # noqa: E501
:param str id: Unique identifier for the level. Valid values are customer ID, site ID, group name, cluster id, serial numbers and watchList id. (required)
:param str level: Identifies the level for which information will be provided. Valid values are customer, site, group, cluster, serial_numbers and watchlist. (required)
"""
all_params = ["id", "level"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_capacity_summary_by_level" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'id' is set
if "id" not in params or params["id"] is None:
raise ValueError(
"Missing the required parameter `id` when calling `get_capacity_summary_by_level`"
) # noqa: E501
# verify the required parameter 'level' is set
if "level" not in params or params["level"] is None:
raise ValueError(
"Missing the required parameter `level` when calling `get_capacity_summary_by_level`"
) # noqa: E501
path_params = {}
if "id" in params:
path_params["id"] = params["id"] # noqa: E501
if "level" in params:
path_params["level"] = params["level"] # noqa: E501
query_params = []
header_params = {}
body_params = None
return self.api_client.call_api(
"/v2/capacity/summary/level/{level}/id/{id}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_capacity_trend_details_by_level(self, level, id, **kwargs): # noqa: E501
"""Provides the capacity trending details about the systems for a customer, site, group, or a set of serial numbers. # noqa: E501
Returns the used and allocated capacity for each of the last 6 months of available data for each system by pagination # noqa: E501
:param str level: Identifies the level for which information will be provided. Valid values are customer, site, group, and serial_numbers. (required)
:param str id: Unique identifier for the level. Valid values are customer ID, site ID, group name, and serial numbers. (required)
:param float start: The index of the first system to return.
:param float limit: Specifies the number of systems to be displayed on a page. The default value is 1000.
"""
all_params = ["level", "id", "start", "limit"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_capacity_trend_details_by_level" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'level' is set
if "level" not in params or params["level"] is None:
raise ValueError(
"Missing the required parameter `level` when calling `get_capacity_trend_details_by_level`"
) # noqa: E501
# verify the required parameter 'id' is set
if "id" not in params or params["id"] is None:
raise ValueError(
"Missing the required parameter `id` when calling `get_capacity_trend_details_by_level`"
) # noqa: E501
path_params = {}
if "level" in params:
path_params["level"] = params["level"] # noqa: E501
if "id" in params:
path_params["id"] = params["id"] # noqa: E501
query_params = []
if "start" in params:
query_params.append(("start", params["start"])) # noqa: E501
if "limit" in params:
query_params.append(("limit", params["limit"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/capacity/trend/level/{level}/id/{id}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
| 46.958824
| 562
| 0.615809
| 1,016
| 7,983
| 4.731299
| 0.140748
| 0.04327
| 0.049927
| 0.033701
| 0.86062
| 0.822134
| 0.811317
| 0.811317
| 0.803204
| 0.786353
| 0
| 0.022282
| 0.302894
| 7,983
| 169
| 563
| 47.236686
| 0.841509
| 0.399223
| 0
| 0.669565
| 0
| 0
| 0.232593
| 0.089459
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034783
| false
| 0
| 0.008696
| 0
| 0.078261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be46de9477b6d12d489bac9e3f55d4cc12a067cb
| 16,914
|
py
|
Python
|
upwork/routers/task.py
|
alexandru-grajdeanu/python-upwork
|
ffe7994c084c88c455a386791e4ec62a93bb7b6a
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-05-17T17:13:28.000Z
|
2020-05-17T17:13:28.000Z
|
upwork/routers/task.py
|
frolenkov-nikita/python-upwork
|
d052f5caedc632c73ad770b1f822a8a494f6b34b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
upwork/routers/task.py
|
frolenkov-nikita/python-upwork
|
d052f5caedc632c73ad770b1f822a8a494f6b34b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Python bindings to Upwork API
# python-upwork version 0.5
# (C) 2010-2015 Upwork
from upwork.compatibility import quote
from upwork.namespaces import Namespace
class Task(Namespace):
api_url = 'otask/'
version = 1
def get_team_tasks(self, company_id, team_id,
paging_offset=0, paging_count=1000):
"""
Retrieve a list of all activities in the given team.
This call returns both archived and active activities.
The user authenticated must have been granted the appropriate
access to the team.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:team_id: Team ID. Use the 'id' value
from ``hr.get_team()`` API call.
"""
if paging_offset or not paging_count == 1000:
data = {'page': '{0};{1}'.format(paging_offset,
paging_count)}
else:
data = {}
url = 'tasks/companies/{0}/teams/{1}/tasks'.format(company_id,
team_id)
return self.get(url, data=data)
def get_company_tasks(self, company_id,
paging_offset=0, paging_count=1000):
"""
Retrieve a list of all activities within a company.
It is equivalent to the ``get_team_tasks`` so that
``team_id`` is equal to ``company_id`` which is parent
team ID.
This call returns both archived and active activities.
The user authenticated must have been granted the appropriate
access to the company.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
"""
team_id = company_id
return self.get_team_tasks(
company_id,
team_id,
paging_offset=paging_offset,
paging_count=paging_count
)
def _encode_task_codes(self, task_codes):
if isinstance(task_codes, (list, tuple)):
return ';'.join(str(c) for c in task_codes)
else:
return str(task_codes)
def get_team_specific_tasks(self, company_id, team_id, task_codes):
"""
Return a specific activities within a team.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:team_id: Team ID. Use the 'id' value
from ``hr.get_team()`` API call.
:task_codes: Task codes (must be a list, even of 1 item)
"""
task_codes = self._encode_task_codes(task_codes)
url = 'tasks/companies/{0}/teams/{1}/tasks/{2}'.format(
company_id, team_id, quote(task_codes))
result = self.get(url)
try:
return result["tasks"] or []
except KeyError:
return result
def get_company_specific_tasks(self, company_id, task_codes):
"""
Return a specific activities within a company.
This is identical to ``get_team_specific_tasks``,
so that ``team_id`` is the same as ``company_id``.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:task_codes: Task codes (must be a list, even of 1 item)
"""
team_id = company_id
return self.get_team_specific_tasks(
company_id, team_id, task_codes)
def post_team_task(self, company_id, team_id, code, description, url,
engagements=None, all_in_company=None):
"""
Create an activity within a team.
The authenticated user needs to have hiring manager privileges
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:team_id: Team ID. Use the 'id' value
from ``hr.get_team()`` API call.
:code: Task code
:description: Task description
:url: Task URL
:engagements: (optional) A list of engagements
that are to be assigned to the created activity.
It can be a single engagement ID,
or an iterable of IDs.
:all_in_company: (optional) If ``True``, assign the
created activity to all engagements
that are exist in the company at
the moment.
If both ``engagements`` and ``all_in_company`` are provided,
``engagements`` list will override the ``all_in_company`` setting.
"""
post_url = 'tasks/companies/{0}/teams/{1}/tasks'.format(
company_id, team_id)
data = {'code': code,
'description': description,
'url': url}
if engagements:
engagements = self._encode_task_codes(engagements)
data['engagements'] = engagements
if all_in_company:
data['all_in_company'] = 1
result = self.post(post_url, data)
return result
def post_company_task(self, company_id, code, description, url,
engagements=None, all_in_company=None):
"""
Create an activity within a company.
This call is identical to
``post_team_task`` so that ``team_id`` is equal
to ``company_id``.
The authenticated user needs to have hiring manager privileges.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:code: Activity ID
:description: Activity description
:url: Activity URL
:engagements: (optional) A list of engagements
that are to be assigned to the created activity.
It can be a single engagement ID,
or an iterable of IDs.
:all_in_company: (optional) If ``True``, assign the
created activity to all engagements
that are exist in the company at
the moment.
If both ``engagements`` and ``all_in_company`` are provided,
``engagements`` list will override the ``all_in_company`` setting.
"""
team_id = company_id
return self.post_team_task(
company_id, team_id, code, description,
url, engagements=engagements, all_in_company=all_in_company)
def put_team_task(self, company_id, team_id, code, description, url,
engagements=None, all_in_company=None):
"""
Update an activity within a team.
The authenticated user needs to have hiring manager privileges.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:team_id: Team ID. Use the 'id' value
from ``hr.get_team()`` API call.
:code: Task code
:description: Task description
:url: Task URL
:engagements: (optional) A list of engagements
that are to be assigned to the created activity.
It can be a single engagement ID,
or an iterable of IDs.
:all_in_company: (optional) If ``True``, assign the
updated activity to all engagements
that are exist in the company at
the moment.
If both ``engagements`` and ``all_in_company`` are provided,
``engagements`` list will override the ``all_in_company`` setting.
"""
put_url = 'tasks/companies/{0}/teams/{1}/tasks/{2}'.format(
company_id, team_id, quote(str(code)))
data = {'code': code,
'description': description,
'url': url}
if engagements:
engagements = self._encode_task_codes(engagements)
data['engagements'] = engagements
if all_in_company:
data['all_in_company'] = 1
result = self.put(put_url, data)
return result
def put_company_task(self, company_id, code, description, url,
engagements=None, all_in_company=None):
"""
Update an activity within a company.
This call is identical to ``put_team_task`` so that
``team_id`` is equal to ``company_id``.
The authenticated user needs to have hiring manager privileges.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:code: Task code
:description: Task description
:url: Task URL
:engagements: (optional) A list of engagements
that are to be assigned to the created activity.
It can be a single engagement ID,
or an iterable of IDs.
:all_in_company: (optional) If ``True``, assign the
created activity to all engagements
that are exist in the company at
the moment.
If both ``engagements`` and ``all_in_company`` are provided,
``engagements`` list will override the ``all_in_company`` setting.
"""
team_id = company_id
return self.put_team_task(
company_id, team_id, code,
description, url, engagements=engagements,
all_in_company=all_in_company)
def archive_team_task(self, company_id, team_id, task_code):
"""Archive single activity within a team.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:team_id: Team ID. Use the 'id' value
from ``hr.get_team()`` API call.
:task_code: A single Activity ID as a string
or a list or tuple of IDs.
"""
task_code = self._encode_task_codes(task_code)
url = 'tasks/companies/{0}/teams/{1}/archive/{2}'.format(
company_id, team_id, quote(task_code))
return self.put(url, data={})
def archive_company_task(self, company_id, task_code):
"""Archive single activity within a company.
This call is identical to ``archive_team_task``, so that
``team_id`` is the same as ``company_id``.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:task_code: A single Activity ID as a string
or a list or tuple of IDs.
"""
team_id = company_id
return self.archive_team_task(company_id, team_id, task_code)
def unarchive_team_task(self, company_id, team_id, task_code):
"""Unarchive single activity within a team.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:team_id: Team ID. Use the 'id' value
from ``hr.get_team()`` API call.
:task_code: A single Activity ID as a string
or a list or tuple of IDs.
"""
task_code = self._encode_task_codes(task_code)
url = 'tasks/companies/{0}/teams/{1}/unarchive/{2}'.format(
company_id, team_id, quote(task_code))
return self.put(url, data={})
def unarchive_company_task(self, company_id, task_code):
"""Unarchive single activity within a company.
This call is identical to ``unarchive_team_task``, so that
``team_id`` is the same as ``company_id``.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:task_code: A single Activity ID as a string
or a list or tuple of IDs.
"""
team_id = company_id
return self.unarchive_team_task(company_id, team_id, task_code)
def assign_engagement(self, company_id, team_id,
engagement, task_codes=None):
"""Assign an existing engagement to the list of activities.
Note that activity will appear in contractor's team client
only if his engagement is assigned to the activity and
activities are activated for the ongoing contract.
This will override assigned engagements for the given activities.
For example, if you pass empty ``task_codes`` or just omit
this parameter, contractor engagement will be unassigned from
all Activities.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:team_id: Team ID. Use the 'id' value
from ``hr.get_team()`` API call.
:engagement: Engagement ID that will be assigned/unassigned
to the given list of Activities.
:task_codes: Task codes (must be a list, even of 1 item)
"""
task_codes = self._encode_task_codes(task_codes)
url = 'tasks/companies/{0}/teams/{1}/engagements/{2}/tasks'.format(
company_id, team_id, engagement)
data = {'tasks': task_codes}
result = self.put(url, data)
return result
def update_batch_tasks(self, company_id, csv_data):
"""
Batch update Activities using csv file contents.
This call is experimental, use it on your own risk.
*Parameters:*
:company_id: Company ID. Use the ``parent_team__id`` value
from ``hr.get_team()`` API call.
:csv_data: Task records in csv format but with "<br>"
as line separator -
"companyid","teamid","userid","taskid","description","url"
Example:
"acmeinc","","","T1","A Task","http://example.com"<br>
"acmeinc","acmeinc:dev","b42","T2","Task 2",""
"""
data = {'data': csv_data}
url = 'tasks/companies/{0}/tasks/batch'.format(company_id)
return self.put(url, data)
class Task_V2(Namespace):
api_url = 'tasks/'
version = 2
def _encode_task_codes(self, task_codes):
if isinstance(task_codes, (list, tuple)):
return ';'.join(str(c) for c in task_codes)
else:
return str(task_codes)
def list_engagement_activities(self, engagement_ref):
"""
Retrieve list of all activities assigned to the specific engagement.
The user authenticated must have been granted the appropriate
hiring manager permissions.
*Parameters:*
:engagement_ref: Engagement reference ID. You can get it using
'List engagemnets' API call. Example: `1234`.
"""
url = 'tasks/contracts/{0}'.format(engagement_ref)
result = self.get(url)
return result
def assign_to_engagement(self, engagement_ref, task_codes=None):
"""Assign a list of activities to the existing engagement.
Note that activity will appear in contractor's team client
only if his engagement is assigned to the activity and
activities are activated for the ongoing contract.
This will override assigned engagements for the given activities.
For example, if you pass empty ``task_codes`` or just omit
this parameter, contractor engagement will be unassigned from
all Activities.
*Parameters:*
:engagement_ref: Engagement ID that will be assigned/unassigned
to the given list of Activities.
:task_codes: Task codes (must be a list, even of 1 item)
"""
task_codes = self._encode_task_codes(task_codes)
url = 'tasks/contracts/{0}'.format(engagement_ref)
data = {'tasks': task_codes}
result = self.put(url, data)
return result
| 35.834746
| 79
| 0.558709
| 2,003
| 16,914
| 4.537194
| 0.093859
| 0.067342
| 0.023768
| 0.03004
| 0.830436
| 0.81272
| 0.795224
| 0.783121
| 0.770577
| 0.737566
| 0
| 0.006056
| 0.355682
| 16,914
| 471
| 80
| 35.910828
| 0.827858
| 0.546707
| 0
| 0.48062
| 0
| 0
| 0.081213
| 0.052906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139535
| false
| 0
| 0.015504
| 0
| 0.364341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be4dec4674b78bf4e1a8327c3d028260689ad9ac
| 2,897
|
py
|
Python
|
linkedin/appco.py
|
zzh-python/all-project
|
915a47fb42d63ff3a36814992283c2f4ed8703a3
|
[
"Apache-2.0"
] | 58
|
2019-03-01T08:15:19.000Z
|
2022-03-28T03:16:17.000Z
|
linkedin/appco.py
|
zzh-python/all-project
|
915a47fb42d63ff3a36814992283c2f4ed8703a3
|
[
"Apache-2.0"
] | 2
|
2020-06-08T08:07:46.000Z
|
2020-11-02T11:48:05.000Z
|
linkedin/appco.py
|
zzh-python/all-project
|
915a47fb42d63ff3a36814992283c2f4ed8703a3
|
[
"Apache-2.0"
] | 37
|
2019-02-26T23:30:08.000Z
|
2022-01-27T05:10:18.000Z
|
import requests
cook='appbot_convert={%22referer%22:null%2C%22landing%22:%22https://appbot.co/%22}; _ga=GA1.2.30329182.1540298320; intercom-id-glvjson7=f3876d87-0fec-45f4-89f2-85f1209950c4; appbot_session_active=true; cookieconsent_status=allow; intercom-session-glvjson7=UlhJK3ZCTHF6dFlBLzNVV2IyZDhjaWNSWTc4cDRuN2dZZ0dRa2dyMjZBd21SUlpJT0xnVlJ6ZzkxQlFiZTdUby0tbjRtS0hjS3hUenNxT1o5RDZOanUzQT09--223ba2c3f3a550394ee3dd5ad077e76c32fc0be0; remember_user_token=BAhbCFsGaQJOkkkiIiQyYSQxMCQxN0FGZHNDZ050RmRwT2JSRElDalguBjoGRVRJIhcxNTQwNDM2NDA4Ljk2MjM3NDcGOwBG--578f0df79fd57a0bc670f3b164c91fb874fa529d; _gid=GA1.2.998133860.1540436421; _gat=1; _hp2_id.116503402=%7B%22userId%22%3A%227121901154332925%22%2C%22pageviewId%22%3A%228825843111957251%22%2C%22sessionId%22%3A%222353689844032949%22%2C%22identity%22%3A%221509662199%40qq.com%22%2C%22trackerVersion%22%3A%224.0%22%2C%22identityField%22%3Anull%2C%22isIdentified%22%3A1%7D; _hp2_ses_props.116503402=%7B%22ts%22%3A1540436420819%2C%22d%22%3A%22app.appbot.co%22%2C%22h%22%3A%22%2Fapps%2F1393448-arena-of-valor%2Freviews%22%7D; _appbot_session=eURGbk02K0Zpbnh5a2dNVnpHc0drWGc3aGtDWTRQZEZldVI5M0NtYS9JTlQvTW5UZldPRmR3c1RMS2FtcVN0TzV5dXVWRjlmOGQxQ29NRTM0aUlqTlNLKzloUkdJUVk3M2lwaFlmc2VlWDdscWFZMDNFM0drVXozSU4vcnVERjlrRVFMME0yNWhOQnpWVUI5a1QxWUNTMXpXUWhqdmRNdCsxaUhpT29UMmZHa3dPMWZuTWxyckFUWExLTndqNTNTMVh5aThmN05qTjNmTHpUNFBjL1hDL2d1NHIydjNMQThDL1lmVmZyem9VTlJKMWJkZ3ZRNUlSS2RaRkgyc2NETll3QWoxZ3NDR1FwZmp3N2MrSXlabEhhblM0UVdlVklvaHFYYlIrN3d0dGhtRGZaaDBqQUtQa1RadUhUYzVDaHFIZ2JNUVJpN2RuMHZEQlZOVHlpNkhzRlJUT1Y1MDNDc0UxNmRiNGd5WWFnPS0tR296UWQramlRU1NXZGVEN2R5d3VDQT09--875d723f0968d9abea12c03687e14f9c665ffae2; filterOpen=true'
header={
'Cookie':cook,
'referer':'https://app.appbot.co/apps/1393448-arena-of-valor/reviews',
's':'980fb71da83a2a5e09431e5023b2e5373f7dbc7f',
# 's':'786e31f6befa521723b0afe90eca31f937d09190'
# 's':'7f581ec28840d94a3e84b61431ef62617e568273'
# 's':'df760c8aecd6c8a60ab5f5d1fd61dda9ad1b3ee0'
# 's':'7cab2996fdc06853ea6c4e7db9511680d5a8408a'
# 's':'b2576c0bd7fafb1fa3dad789846ce7f769055419'
}
url='https://app.appbot.co/apps/1393448-arena-of-valor/reviews#/?dlangs=zh&end=2018-10-25&start=2018-07-27'
url='https://app.appbot.co/data/apps/1393448/reviews?start=2018-07-27&end=2018-10-25&dlangs=zh&count=10&page=1'
url='https://app.appbot.co/data/apps/10242/reviews?start=2018-07-27&end=2018-10-25&dlangs=zh-Hant&count=10&page=1'
url='https://app.appbot.co/data/apps/10242/reviews?start=2018-07-27&end=2018-10-25&dlangs=zh&count=10&page=1'
# url='https://app.appbot.co/data/apps/1393448/reviews?start=2018-07-27&end=2018-10-25&dlangs=zh&count=10&page=2&____c=c7d8c9c8e91be712870923d832de2fdd43b5a4bf'
# url='https://app.appbot.co/data/apps/1393448/reviews?start=2018-07-27&end=2018-10-25&dlangs=zh&count=10&page=3'
req=requests.get(url,headers=header)
print(req.text)
| 103.464286
| 1,652
| 0.838108
| 306
| 2,897
| 7.869281
| 0.388889
| 0.0299
| 0.040698
| 0.046512
| 0.213455
| 0.212209
| 0.212209
| 0.212209
| 0.212209
| 0.212209
| 0
| 0.274454
| 0.03659
| 2,897
| 28
| 1,653
| 103.464286
| 0.58832
| 0.174318
| 0
| 0
| 0
| 0.384615
| 0.923469
| 0.668793
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be99eca575d2ea20bbda8ac7e29af8f2bb6ccc82
| 3,013
|
py
|
Python
|
academicstoday/shared_auth/views/email_views.py
|
MikaSoftware/academicstoday-paas-django
|
cf58cf216d377ea97a2676cd594f96fb9d602a46
|
[
"BSD-3-Clause"
] | null | null | null |
academicstoday/shared_auth/views/email_views.py
|
MikaSoftware/academicstoday-paas-django
|
cf58cf216d377ea97a2676cd594f96fb9d602a46
|
[
"BSD-3-Clause"
] | 6
|
2020-06-05T17:54:00.000Z
|
2022-03-11T23:18:41.000Z
|
academicstoday/shared_auth/views/email_views.py
|
MikaSoftware/academicstoday-paas-django
|
cf58cf216d377ea97a2676cd594f96fb9d602a46
|
[
"BSD-3-Clause"
] | 2
|
2020-05-01T12:50:38.000Z
|
2021-07-17T09:51:12.000Z
|
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.core.exceptions import PermissionDenied
from django.shortcuts import render
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import condition
from shared_foundation import constants
from shared_foundation.models import SharedUser
from shared_foundation.utils import reverse_with_full_domain
def reset_password_email_page(request, pr_access_code=None):
# Find the user or error.
try:
me = SharedUser.objects.get(pr_access_code=pr_access_code)
if not me.has_pr_code_expired():
# Indicate that the account is active.
me.was_activated = True
me.save()
else:
# Erro message indicating code expired.
raise PermissionDenied(_('Access code expired.'))
except SharedUser.DoesNotExist:
raise PermissionDenied(_('Wrong access code.'))
# Generate the data.
url = reverse_with_full_domain(
reverse_url_id='at_reset_password_master',
resolve_url_args=[pr_access_code]
)
web_view_url = reverse_with_full_domain(
reverse_url_id='at_reset_password_email',
resolve_url_args=[pr_access_code]
)
param = {
'constants': constants,
'url': url,
'web_view_url': web_view_url,
'me': me
}
# DEVELOPERS NOTE:
# - When copying the "Sunday" open source email theme into our code, we will
# need to use a formatter to inline the CSS.
# - https://templates.mailchimp.com/resources/inline-css/
return render(request, 'shared_auth/email/reset_password_email.html', param)
def user_activation_email_page(request, pr_access_code=None):
# Find the user or error.
try:
me = SharedUser.objects.get(pr_access_code=pr_access_code)
if not me.has_pr_code_expired():
# Indicate that the account is active.
me.was_activated = True
me.save()
else:
# Erro message indicating code expired.
raise PermissionDenied(_('Access code expired.'))
except SharedUser.DoesNotExist:
raise PermissionDenied(_('Wrong access code.'))
# Generate the data.
url = reverse_with_full_domain(
reverse_url_id='at_user_activation_detail',
resolve_url_args=[pr_access_code]
)
web_view_url = reverse_with_full_domain(
reverse_url_id='at_activate_email',
resolve_url_args=[pr_access_code]
)
param = {
'constants': constants,
'url': url,
'web_view_url': web_view_url,
'me': me
}
# DEVELOPERS NOTE:
# - When copying the "Sunday" open source email theme into our code, we will
# need to use a formatter to inline the CSS.
# - https://templates.mailchimp.com/resources/inline-css/
return render(request, 'shared_auth/email/user_activation_email_view.html', param)
| 34.632184
| 86
| 0.683372
| 384
| 3,013
| 5.088542
| 0.296875
| 0.071648
| 0.061412
| 0.053736
| 0.733879
| 0.733879
| 0.733879
| 0.733879
| 0.733879
| 0.733879
| 0
| 0.000433
| 0.233322
| 3,013
| 86
| 87
| 35.034884
| 0.845455
| 0.213409
| 0
| 0.610169
| 0
| 0
| 0.131378
| 0.069728
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0.067797
| 0.152542
| 0
| 0.220339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
bebd450ab3863f6d821c7db6ce97715c424a445a
| 177
|
py
|
Python
|
texpy/main.py
|
PapaCharlie/texpy
|
214f0a2adfc8c57c052c706638785809a024f940
|
[
"MIT"
] | null | null | null |
texpy/main.py
|
PapaCharlie/texpy
|
214f0a2adfc8c57c052c706638785809a024f940
|
[
"MIT"
] | null | null | null |
texpy/main.py
|
PapaCharlie/texpy
|
214f0a2adfc8c57c052c706638785809a024f940
|
[
"MIT"
] | null | null | null |
import integrals
import plain
from utils import tex_to_plain
def parse(string, **flags):
print integrals.main(string, **flags) or "",
print plain.main(string, **flags)
| 22.125
| 48
| 0.723164
| 25
| 177
| 5.04
| 0.56
| 0.261905
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158192
| 177
| 8
| 49
| 22.125
| 0.845638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
fe25735416fdb12da8e97ddaf32ffd30accbb7d0
| 193
|
py
|
Python
|
web/website/apps.py
|
mnahinkhan/rnpfind
|
5aa956ddd528ab9ebd9588be845f78c449915b78
|
[
"MIT"
] | 3
|
2021-06-08T03:55:03.000Z
|
2021-06-15T07:33:08.000Z
|
web/website/apps.py
|
mnahinkhan/RNPFind
|
8b561e087f943421c847dcb708ee386ee6439fa5
|
[
"MIT"
] | 1
|
2022-02-24T15:34:24.000Z
|
2022-03-04T09:59:10.000Z
|
web/website/apps.py
|
mnahinkhan/RNPFind
|
8b561e087f943421c847dcb708ee386ee6439fa5
|
[
"MIT"
] | 1
|
2021-07-22T04:13:34.000Z
|
2021-07-22T04:13:34.000Z
|
"""
Autogenerated by Django - lists app configurations
"""
from django.apps import AppConfig
class WebsiteConfig(AppConfig):
"""
Autogenerated by Django
"""
name = "website"
| 14.846154
| 50
| 0.678756
| 19
| 193
| 6.894737
| 0.736842
| 0.229008
| 0.320611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217617
| 193
| 12
| 51
| 16.083333
| 0.86755
| 0.38342
| 0
| 0
| 1
| 0
| 0.072917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fe99f4c464f48ab5660d9574cbe8bdc2d37857a2
| 139
|
py
|
Python
|
weather/GeoWeatherExceptions.py
|
Gabriel737/CadorsMap
|
2bca28b8bda79caad1149bcedd1dc4953c84e13b
|
[
"MIT"
] | 1
|
2021-12-11T21:11:06.000Z
|
2021-12-11T21:11:06.000Z
|
weather/GeoWeatherExceptions.py
|
Gabriel737/CadorsMap
|
2bca28b8bda79caad1149bcedd1dc4953c84e13b
|
[
"MIT"
] | null | null | null |
weather/GeoWeatherExceptions.py
|
Gabriel737/CadorsMap
|
2bca28b8bda79caad1149bcedd1dc4953c84e13b
|
[
"MIT"
] | 1
|
2021-12-11T21:01:57.000Z
|
2021-12-11T21:01:57.000Z
|
class GeoWeatherServiceFailedToLocateException(Exception):
pass
class GeoWeatherServiceFailedToRetrieveException(Exception):
pass
| 23.166667
| 60
| 0.848921
| 8
| 139
| 14.75
| 0.625
| 0.220339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107914
| 139
| 5
| 61
| 27.8
| 0.951613
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
fe9c883d3eeb6663b042b3c802445f66f98731cb
| 3,960
|
py
|
Python
|
arus/tests/test_scheduler.py
|
qutang/arus
|
ee422bbadc72635037944359d00475f698e8fc61
|
[
"MIT"
] | null | null | null |
arus/tests/test_scheduler.py
|
qutang/arus
|
ee422bbadc72635037944359d00475f698e8fc61
|
[
"MIT"
] | 264
|
2019-09-25T14:15:39.000Z
|
2022-03-11T10:11:38.000Z
|
arus/tests/test_scheduler.py
|
qutang/arus
|
ee422bbadc72635037944359d00475f698e8fc61
|
[
"MIT"
] | null | null | null |
import pytest
from .. import scheduler
import os
import time
def task1():
print('task1 start on {}'.format(os.getpid()))
time.sleep(2)
print('task1 stop on {}'.format(os.getpid()))
return 'task1', time.time(), os.getpid()
def task2():
print('task2 start on {}'.format(os.getpid()))
time.sleep(0.5)
print('task2 stop on {}'.format(os.getpid()))
return 'task2', time.time(), os.getpid()
def task3():
print('task3 start on {}'.format(os.getpid()))
time.sleep(1)
print('task3 stop on {}'.format(os.getpid()))
return 'task3', time.time(), os.getpid()
class TestScheduler:
@pytest.mark.parametrize('scheme', [scheduler.Scheduler.Scheme.SUBMIT_ORDER, scheduler.Scheduler.Scheme.EXECUTION_ORDER, scheduler.Scheduler.Scheme.AFTER_PREVIOUS_DONE])
@pytest.mark.parametrize('mode', [scheduler.Scheduler.Mode.THREAD, scheduler.Scheduler.Mode.PROCESS])
def test_modes_and_schemes_with_get_all_remaining_results(self, scheme, mode):
sch = scheduler.Scheduler(mode=mode, scheme=scheme, max_workers=5)
sch.submit(task1)
sch.submit(task2)
sch.submit(task3)
results = sch.get_all_remaining_results()
if mode == scheduler.Scheduler.Mode.THREAD:
assert len(set([r[2] for r in results])) == 1
else:
assert len(set([r[2] for r in results])) == 3
if scheme == scheduler.Scheduler.Scheme.EXECUTION_ORDER:
assert results[0][0] == 'task2'
assert results[1][0] == 'task3'
assert results[2][0] == 'task1'
else:
assert results[0][0] == 'task1'
assert results[1][0] == 'task2'
assert results[2][0] == 'task3'
if scheme == scheduler.Scheduler.Scheme.AFTER_PREVIOUS_DONE:
assert results[2][1] > results[1][1]
assert results[1][1] > results[0][1]
elif scheme == scheduler.Scheduler.Scheme.EXECUTION_ORDER:
assert results[2][1] > results[1][1]
assert results[1][1] > results[0][1]
else:
assert results[0][1] > results[2][1]
assert results[2][1] > results[1][1]
sch.shutdown()
@pytest.mark.parametrize('scheme', [scheduler.Scheduler.Scheme.SUBMIT_ORDER, scheduler.Scheduler.Scheme.EXECUTION_ORDER, scheduler.Scheduler.Scheme.AFTER_PREVIOUS_DONE])
@pytest.mark.parametrize('mode', [scheduler.Scheduler.Mode.THREAD, scheduler.Scheduler.Mode.PROCESS])
def test_modes_and_schemes_with_get_result(self, scheme, mode):
sch = scheduler.Scheduler(mode=mode, scheme=scheme, max_workers=5)
sch.submit(task1)
sch.submit(task2)
sch.submit(task3)
results = []
while True:
try:
result = sch.get_result()
results.append(result)
except scheduler.Scheduler.ResultNotAvailableError:
continue
if len(results) == 3:
break
if mode == scheduler.Scheduler.Mode.THREAD:
assert len(set([r[2] for r in results])) == 1
else:
assert len(set([r[2] for r in results])) == 3
if scheme == scheduler.Scheduler.Scheme.EXECUTION_ORDER:
assert results[0][0] == 'task2'
assert results[1][0] == 'task3'
assert results[2][0] == 'task1'
else:
assert results[0][0] == 'task1'
assert results[1][0] == 'task2'
assert results[2][0] == 'task3'
if scheme == scheduler.Scheduler.Scheme.AFTER_PREVIOUS_DONE:
assert results[2][1] > results[1][1]
assert results[1][1] > results[0][1]
elif scheme == scheduler.Scheduler.Scheme.EXECUTION_ORDER:
assert results[2][1] > results[1][1]
assert results[1][1] > results[0][1]
else:
assert results[0][1] > results[2][1]
assert results[2][1] > results[1][1]
sch.shutdown()
| 40.408163
| 173
| 0.59798
| 490
| 3,960
| 4.759184
| 0.140816
| 0.133791
| 0.123499
| 0.102916
| 0.849057
| 0.832762
| 0.799314
| 0.76072
| 0.76072
| 0.76072
| 0
| 0.039769
| 0.257071
| 3,960
| 97
| 174
| 40.824742
| 0.752889
| 0
| 0
| 0.636364
| 0
| 0
| 0.04899
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 1
| 0.056818
| false
| 0
| 0.045455
| 0
| 0.147727
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22ccede72a12021d1108c6ef77107c6e04d590bc
| 34
|
py
|
Python
|
alexnet_cifar10/__init__.py
|
zhangjunpeng9354/Learning-Tensorflow-by-Models
|
9e6ab4da4ec66fb6e7934d129c57110c85e3d7da
|
[
"MIT"
] | 1
|
2017-10-05T00:23:20.000Z
|
2017-10-05T00:23:20.000Z
|
alexnet_cifar10/__init__.py
|
zhangjunpeng9354/Learning-Tensorflow-by-Models
|
9e6ab4da4ec66fb6e7934d129c57110c85e3d7da
|
[
"MIT"
] | null | null | null |
alexnet_cifar10/__init__.py
|
zhangjunpeng9354/Learning-Tensorflow-by-Models
|
9e6ab4da4ec66fb6e7934d129c57110c85e3d7da
|
[
"MIT"
] | null | null | null |
import input
from model import *
| 8.5
| 19
| 0.764706
| 5
| 34
| 5.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 34
| 3
| 20
| 11.333333
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
22ececfb2a5d9e03f484b8c96fa703a80e2afaeb
| 1,671
|
py
|
Python
|
server/tests/models/test_arg_limit.py
|
athenianco/athenian-api
|
dd5556101a8c49703d6b0516e4268b9e8d8eda5b
|
[
"RSA-MD"
] | 9
|
2020-10-11T22:12:03.000Z
|
2022-02-26T02:16:45.000Z
|
server/tests/models/test_arg_limit.py
|
athenianco/athenian-api
|
dd5556101a8c49703d6b0516e4268b9e8d8eda5b
|
[
"RSA-MD"
] | 246
|
2019-12-05T06:37:30.000Z
|
2022-03-29T10:00:07.000Z
|
server/tests/models/test_arg_limit.py
|
athenianco/athenian-api
|
dd5556101a8c49703d6b0516e4268b9e8d8eda5b
|
[
"RSA-MD"
] | 5
|
2019-12-04T22:38:05.000Z
|
2021-02-26T00:50:04.000Z
|
from sqlalchemy import select
from sqlalchemy.dialects import postgresql, sqlite
from athenian.api.models.metadata.github import Repository
async def test_query_argument_limit_in(mdb):
rows = await mdb.fetch_all(select([Repository]).where(Repository.full_name.in_(
["r%d" % i for i in range(1 << 16)] + ["src-d/go-git"])))
assert rows
async def test_in_inlining():
check_any_values = "= ANY (VALUES ('r0'), ('r1'),"
sql = select([Repository]).where(Repository.full_name.in_(
["r%d" % i for i in range(1 << 3)] + ["src-d/go-git"]))
postgres_sql = str(sql.compile(dialect=postgresql.dialect()))
assert check_any_values not in postgres_sql
sql = select([Repository]).where(Repository.full_name.in_(
["r%d" % i for i in range(1 << 16)] + ["src-d/go-git"]))
postgres_sql = str(sql.compile(dialect=postgresql.dialect()))
assert check_any_values not in postgres_sql
sql = select([Repository]).where(Repository.full_name.in_any_values(
["r%d" % i for i in range(1 << 3)] + ["src-d/go-git"]))
postgres_sql = str(sql.compile(dialect=postgresql.dialect()))
assert check_any_values not in postgres_sql
sql = select([Repository]).where(Repository.full_name.in_any_values(
["r%d" % i for i in range(1 << 16)] + ["src-d/go-git"]))
postgres_sql = str(sql.compile(dialect=postgresql.dialect()))
assert check_any_values in postgres_sql
sql = select([Repository]).where(Repository.full_name.in_any_values(
["r%d" % i for i in range(1 << 16)] + ["src-d/go-git"]))
postgres_sql = str(sql.compile(dialect=sqlite.dialect()))
assert check_any_values not in postgres_sql
| 47.742857
| 83
| 0.678636
| 255
| 1,671
| 4.27451
| 0.203922
| 0.082569
| 0.115596
| 0.170642
| 0.778899
| 0.778899
| 0.778899
| 0.778899
| 0.778899
| 0.73945
| 0
| 0.012912
| 0.165769
| 1,671
| 34
| 84
| 49.147059
| 0.76901
| 0
| 0
| 0.62069
| 0
| 0
| 0.071215
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 1
| 0
| false
| 0
| 0.103448
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22f2db204ed7d8c464d421bf34a26190a15b5dbd
| 367
|
py
|
Python
|
python/pyprepbuddy/cluster/cluster.py
|
veera83372/prep-buddy
|
d2abbf376e91b841b28476bd45026800fcd7a33c
|
[
"Apache-2.0"
] | null | null | null |
python/pyprepbuddy/cluster/cluster.py
|
veera83372/prep-buddy
|
d2abbf376e91b841b28476bd45026800fcd7a33c
|
[
"Apache-2.0"
] | null | null | null |
python/pyprepbuddy/cluster/cluster.py
|
veera83372/prep-buddy
|
d2abbf376e91b841b28476bd45026800fcd7a33c
|
[
"Apache-2.0"
] | 1
|
2018-05-29T16:21:33.000Z
|
2018-05-29T16:21:33.000Z
|
class Cluster(object):
"""
Cluster contains groups of values by their specified key
"""
def __init__(self, cluster):
self.__cluster = cluster
def __contains__(self, item):
return self.__cluster.containsValue(item)
def size(self):
return self.__cluster.size()
def get_cluster(self):
return self.__cluster
| 22.9375
| 60
| 0.648501
| 42
| 367
| 5.261905
| 0.452381
| 0.248869
| 0.230769
| 0.190045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.258856
| 367
| 15
| 61
| 24.466667
| 0.8125
| 0.152589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0
| 0.333333
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
fe0e7caf2438cb95c1fa1892354fa2a67e9acf42
| 2,425
|
py
|
Python
|
sympy/crypto/__init__.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/crypto/__init__.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/crypto/__init__.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy.crypto.crypto import (
cycle_list,
encipher_shift,
encipher_affine,
encipher_substitution,
check_and_join,
encipher_vigenere,
decipher_vigenere,
bifid5_square,
bifid6_square,
encipher_hill,
decipher_hill,
encipher_bifid5,
encipher_bifid6,
decipher_bifid5,
decipher_bifid6,
encipher_kid_rsa,
decipher_kid_rsa,
kid_rsa_private_key,
kid_rsa_public_key,
decipher_rsa,
rsa_private_key,
rsa_public_key,
encipher_rsa,
lfsr_connection_polynomial,
lfsr_autocorrelation,
lfsr_sequence,
encode_morse,
decode_morse,
elgamal_private_key,
elgamal_public_key,
decipher_elgamal,
encipher_elgamal,
dh_private_key,
dh_public_key,
dh_shared_key,
padded_key,
encipher_bifid,
decipher_bifid,
bifid_square,
bifid5,
bifid6,
bifid10,
decipher_gm,
encipher_gm,
gm_public_key,
gm_private_key,
bg_private_key,
bg_public_key,
encipher_bg,
decipher_bg,
encipher_rot13,
decipher_rot13,
encipher_atbash,
decipher_atbash,
encipher_railfence,
decipher_railfence,
)
__all__ = [
"cycle_list",
"encipher_shift",
"encipher_affine",
"encipher_substitution",
"check_and_join",
"encipher_vigenere",
"decipher_vigenere",
"bifid5_square",
"bifid6_square",
"encipher_hill",
"decipher_hill",
"encipher_bifid5",
"encipher_bifid6",
"decipher_bifid5",
"decipher_bifid6",
"encipher_kid_rsa",
"decipher_kid_rsa",
"kid_rsa_private_key",
"kid_rsa_public_key",
"decipher_rsa",
"rsa_private_key",
"rsa_public_key",
"encipher_rsa",
"lfsr_connection_polynomial",
"lfsr_autocorrelation",
"lfsr_sequence",
"encode_morse",
"decode_morse",
"elgamal_private_key",
"elgamal_public_key",
"decipher_elgamal",
"encipher_elgamal",
"dh_private_key",
"dh_public_key",
"dh_shared_key",
"padded_key",
"encipher_bifid",
"decipher_bifid",
"bifid_square",
"bifid5",
"bifid6",
"bifid10",
"decipher_gm",
"encipher_gm",
"gm_public_key",
"gm_private_key",
"bg_private_key",
"bg_public_key",
"encipher_bg",
"decipher_bg",
"encipher_rot13",
"decipher_rot13",
"encipher_atbash",
"decipher_atbash",
"encipher_railfence",
"decipher_railfence",
]
| 20.550847
| 33
| 0.669691
| 262
| 2,425
| 5.633588
| 0.167939
| 0.081301
| 0.03523
| 0.02981
| 0.979675
| 0.979675
| 0.979675
| 0.979675
| 0.979675
| 0.979675
| 0
| 0.015094
| 0.235052
| 2,425
| 117
| 34
| 20.726496
| 0.780593
| 0
| 0
| 0
| 0
| 0
| 0.327835
| 0.019381
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008621
| 0
| 0.008621
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe165b97b4f738953deab5dc97f971d96d7ae3e4
| 132
|
py
|
Python
|
login/admin.py
|
bibapple/LoginApp
|
63bae464076ae033097c88db25ec03a7b409d95e
|
[
"Apache-2.0"
] | null | null | null |
login/admin.py
|
bibapple/LoginApp
|
63bae464076ae033097c88db25ec03a7b409d95e
|
[
"Apache-2.0"
] | 10
|
2020-02-12T00:40:59.000Z
|
2022-01-13T01:20:39.000Z
|
login/admin.py
|
bibapple/LoginApp
|
63bae464076ae033097c88db25ec03a7b409d95e
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from . import models
admin.site.register(models.User)
admin.site.register(models.ConfirmEmail)
| 14.666667
| 40
| 0.80303
| 18
| 132
| 5.888889
| 0.555556
| 0.169811
| 0.320755
| 0.433962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106061
| 132
| 8
| 41
| 16.5
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a3ace3e1a4c614e6fe172bd222a117b9196491e4
| 1,978
|
py
|
Python
|
test/test_cli.py
|
goneri/ansible-builder
|
a57334b2090a38d54931129b8d0308a2d0b361bd
|
[
"Apache-2.0"
] | 1
|
2021-12-06T16:55:55.000Z
|
2021-12-06T16:55:55.000Z
|
test/test_cli.py
|
goneri/ansible-builder
|
a57334b2090a38d54931129b8d0308a2d0b361bd
|
[
"Apache-2.0"
] | null | null | null |
test/test_cli.py
|
goneri/ansible-builder
|
a57334b2090a38d54931129b8d0308a2d0b361bd
|
[
"Apache-2.0"
] | null | null | null |
from ansible_builder.main import AnsibleBuilder
from ansible_builder.cli import parse_args
def prepare(args):
args = parse_args(args)
return AnsibleBuilder(**vars(args))
def test_custom_image(exec_env_definition_file, tmpdir):
content = {'version': 1}
path = str(exec_env_definition_file(content=content))
# test with 'container' sub-command
aee = prepare(['container', 'build', '-f', path, '--build-arg', 'EE_BASE_IMAGE=my-custom-image', '-c', str(tmpdir)])
assert aee.build_args == {'EE_BASE_IMAGE': 'my-custom-image'}
# test without 'container' sub-command (defaulting to 'container')
aee = prepare(['build', '-f', path, '--build-arg', 'EE_BASE_IMAGE=my-custom-image', '-c', str(tmpdir)])
assert aee.build_args == {'EE_BASE_IMAGE': 'my-custom-image'}
def test_custom_ansible_galaxy_cli_collection_opts(exec_env_definition_file, tmpdir):
content = {'version': 1}
path = str(exec_env_definition_file(content=content))
# test with 'container' sub-command
aee = prepare(['container', 'build', '-f', path, '--build-arg', 'ANSIBLE_GALAXY_CLI_COLLECTION_OPTS=--pre', '-c', str(tmpdir)])
assert aee.build_args == {'ANSIBLE_GALAXY_CLI_COLLECTION_OPTS': '--pre'}
# test without 'container' sub-command (defaulting to 'container')
aee = prepare(['build', '-f', path, '--build-arg', 'ANSIBLE_GALAXY_CLI_COLLECTION_OPTS=--pre', '-c', str(tmpdir)])
assert aee.build_args == {'ANSIBLE_GALAXY_CLI_COLLECTION_OPTS': '--pre'}
def test_build_context(good_exec_env_definition_path, tmpdir):
path = str(good_exec_env_definition_path)
build_context = str(tmpdir)
# test with 'container' sub-command
aee = prepare(['container', 'build', '-f', path, '-c', build_context])
assert aee.build_context == build_context
# test without 'container' sub-command (defaulting to 'container')
aee = prepare(['build', '-f', path, '-c', build_context])
assert aee.build_context == build_context
| 42.085106
| 131
| 0.699191
| 263
| 1,978
| 4.996198
| 0.18251
| 0.073059
| 0.077626
| 0.098935
| 0.831811
| 0.770928
| 0.770928
| 0.770928
| 0.770928
| 0.770928
| 0
| 0.001179
| 0.142568
| 1,978
| 46
| 132
| 43
| 0.773585
| 0.149646
| 0
| 0.384615
| 0
| 0
| 0.245373
| 0.122985
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3cf29d2f473960a4ceeac97356e80b2fb81a39a
| 407
|
py
|
Python
|
exercises/complex-numbers/complex_numbers.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,177
|
2017-06-21T20:24:06.000Z
|
2022-03-29T02:30:55.000Z
|
exercises/complex-numbers/complex_numbers.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,890
|
2017-06-18T20:06:10.000Z
|
2022-03-31T18:35:51.000Z
|
exercises/complex-numbers/complex_numbers.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,095
|
2017-06-26T23:06:19.000Z
|
2022-03-29T03:25:38.000Z
|
class ComplexNumber:
def __init__(self, real, imaginary):
pass
def __eq__(self, other):
pass
def __add__(self, other):
pass
def __mul__(self, other):
pass
def __sub__(self, other):
pass
def __truediv__(self, other):
pass
def __abs__(self):
pass
def conjugate(self):
pass
def exp(self):
pass
| 14.535714
| 40
| 0.540541
| 45
| 407
| 4.266667
| 0.377778
| 0.291667
| 0.338542
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.373464
| 407
| 27
| 41
| 15.074074
| 0.752941
| 0
| 0
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.473684
| false
| 0.473684
| 0
| 0
| 0.526316
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
a3d46ad92b0cfc7891fd0a7e0dae3ce327269dfe
| 108
|
py
|
Python
|
validators.py
|
setivolkylany/DjangoAppTemplate
|
d26a63116d9f01e321374e3560b84836a1bea4c7
|
[
"MIT"
] | null | null | null |
validators.py
|
setivolkylany/DjangoAppTemplate
|
d26a63116d9f01e321374e3560b84836a1bea4c7
|
[
"MIT"
] | null | null | null |
validators.py
|
setivolkylany/DjangoAppTemplate
|
d26a63116d9f01e321374e3560b84836a1bea4c7
|
[
"MIT"
] | null | null | null |
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
| 27
| 55
| 0.861111
| 14
| 108
| 6.5
| 0.785714
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101852
| 108
| 3
| 56
| 36
| 0.938144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4304452939d42e97c3de7239c0970fa4ca4199e8
| 533
|
py
|
Python
|
faktotum/research/__init__.py
|
severinsimmler/extract
|
c1e76a29929e2334976b18ba9218403d85331f51
|
[
"MIT"
] | 2
|
2020-02-19T14:29:21.000Z
|
2020-02-22T14:33:08.000Z
|
faktotum/research/__init__.py
|
severinsimmler/faktotum
|
c1e76a29929e2334976b18ba9218403d85331f51
|
[
"MIT"
] | null | null | null |
faktotum/research/__init__.py
|
severinsimmler/faktotum
|
c1e76a29929e2334976b18ba9218403d85331f51
|
[
"MIT"
] | null | null | null |
import logging
import transformers
logging.getLogger("transformers").setLevel(logging.ERROR)
from faktotum.research import evaluation
from faktotum.research.corpus import load_corpus, sentencize_corpus, tokenize_corpus
from faktotum.research.knowledge import KnowledgeBase
from faktotum.research.ontologia import FastText, TfIdf, Word2Vec
from faktotum.research.utils import sentencize, tokenize
from faktotum.research import vendor
from faktotum.research import clustering
from faktotum.research import regression, classification
| 38.071429
| 84
| 0.864916
| 63
| 533
| 7.269841
| 0.396825
| 0.209607
| 0.349345
| 0.227074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002049
| 0.084428
| 533
| 13
| 85
| 41
| 0.936475
| 0
| 0
| 0
| 0
| 0
| 0.022514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.909091
| 0
| 0.909091
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
432d758dcf8a65a9d80e8fbb5b38627ae441184d
| 198
|
py
|
Python
|
peleffybenchmarktools/dihedrals/__init__.py
|
martimunicoy/offpele-benchmarks
|
20af939ce60252c05e0c1e44b85cf89a4f8a2245
|
[
"MIT"
] | null | null | null |
peleffybenchmarktools/dihedrals/__init__.py
|
martimunicoy/offpele-benchmarks
|
20af939ce60252c05e0c1e44b85cf89a4f8a2245
|
[
"MIT"
] | 7
|
2020-08-07T14:51:02.000Z
|
2020-10-30T20:18:38.000Z
|
peleffybenchmarktools/dihedrals/__init__.py
|
martimunicoy/offpele-benchmarks
|
20af939ce60252c05e0c1e44b85cf89a4f8a2245
|
[
"MIT"
] | null | null | null |
from .dihedralhandler import DihedralBenchmark
from .energyhandler import (OpenMMEnergeticProfile, OpenFFEnergeticProfile,
PELEEnergeticProfile, OFFPELEEnergeticProfile)
| 49.5
| 75
| 0.767677
| 11
| 198
| 13.818182
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19697
| 198
| 3
| 76
| 66
| 0.955975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a60a030584af45b15a48ecb28bd359eaf0d478b
| 33,212
|
py
|
Python
|
test_sc3.py
|
burja8x/StreamAi
|
09d600632299f436ccb6706ec4e53f6250ded93d
|
[
"MIT"
] | null | null | null |
test_sc3.py
|
burja8x/StreamAi
|
09d600632299f436ccb6706ec4e53f6250ded93d
|
[
"MIT"
] | null | null | null |
test_sc3.py
|
burja8x/StreamAi
|
09d600632299f436ccb6706ec4e53f6250ded93d
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from sc2 import *
import random
import time
class Test(TestCase):
def test_1_admin_c(self):
a = send_tx(sc.functions.changeFeeAccount(accX0.address), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.changeWaitTime(5, 10), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.changeFeeMake(Web3.toWei(0.00003, 'ether')), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.changeFeeTake(11), accA) # = 1.1 %
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.changeWaitTime(11, 100010), accA)
self.assertEqual(a[1], 1)
def test_2_if_exist_fail(self):
b = sc.functions.hosts(accP0.address).call()
c = sc.functions.hosts(accP1.address).call()
a = send_tx(sc.functions.newCProvider("roža", 100001, 256, 10, 10), accP0)
#self.assertEqual(a[1], 0 if c[10] == 0 else 1)
a = send_tx(sc.functions.newCProvider("apple", 100000, 256, 10, 10), accP1)
#self.assertEqual(a[1], 0 if b[10] == 0 else 1)
d = sc.functions.hosts(accP1.address).call()
a = send_tx(sc.functions.newCProvider("ibm", 33358, 256, 20, 10), accP1)
#self.assertEqual(a[1], 0 if d[10] == 0 else 1)
def test_3_change_provider_data(self):
i = random.randint(1000, 1000000)
j = random.randint(3, 99)
k = random.randint(3, 99)
g = random.randint(3, 99)
a = send_tx(sc.functions.changeCProviderData(i, j, g, k), accP1)
self.assertEqual(a[1], 1)
b = sc.functions.hosts(accP1.address).call()
self.assertEqual(b[2], i)
self.assertEqual(b[3], j)
self.assertEqual(b[4], g)
self.assertEqual(b[5], k)
a = send_tx(sc.functions.changeCProviderData(1100000, 2000, 30, 200), accP1)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.changeCProviderData(1100000, 2000, 30, 200), accP0)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.changeCProviderData(1100000, 2000, 20, 200), accP2)
self.assertEqual(a[1], 0)
def test_4_change_provider_data_1(self):
with self.assertRaises(Exception):
a = send_tx(sc.functions.changeCProviderData(10000000000, 10, 1, 10), accP1)
with self.assertRaises(Exception):
a = send_tx(sc.functions.changeCProviderData(10000, 100000000, 1, 10), accP1)
with self.assertRaises(Exception):
a = send_tx(sc.functions.changeCProviderData(10000, 100, 10, 100000000), accP1)
def test_5_change_provider_data_2(self):
a = send_tx(sc.functions.changeCProviderData(1000, 10, 1, 10), accX1)
self.assertEqual(a[1], 0)
def test_6_allowCProvider(self):
a = send_tx(sc.functions.allowCProvider(accP0.address, True), accX1)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.allowCProvider(accP0.address, True), accA)
self.assertEqual(a[1], 1)
hc = sc.functions.hostCounter().call() + 1
# error ====
print(hc)
p = sc.functions.getProviders(1, hc).call()
id = 0
for x in p:
if x[11] == accP0.address:
id = x[10]
b = sc.functions.hostsIndex(id).call()
self.assertEqual(b, accP0.address)
a = send_tx(sc.functions.allowCProvider(accP0.address, False), accA)
self.assertEqual(a[1], 1)
b = sc.functions.hostsIndex(id).call()
self.assertEqual(b, accP0.address)
a = send_tx(sc.functions.allowCProvider(accP0.address, True), accA)
self.assertEqual(a[1], 1)
b = sc.functions.hostsIndex(id).call()
self.assertEqual(b, accP0.address)
def test_7_add_method(self):
feeMake = Web3.fromWei(sc.functions.feeMake().call(), 'ether')
a = send_tx(sc.functions.sell("Detekcija mask", 512, 1, False, Web3.toWei(0.00000012, 'ether'),
"bafybeifk6r6ugz62kdrkeitqukase2fojvt6gfasafvzv3rykczww7qawm",
False, "ec dockerHubLink"), accM0,
value=feeMake)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.sell("Detekcija mask v2", 1024, 1, True, Web3.toWei(0.00000022, 'ether'),
"bafybeifk6r6ugz62kdrkeitqukase2fojvt6gfasafvzv3rykczww7qawm",
False, "ec dockerHubLink"), accM0,
value=feeMake)
self.assertEqual(a[1], 1)
# https://nft.storage/files/
a = send_tx(sc.functions.sell("vreme", 4096, 4, False, Web3.toWei(0.00000019, 'ether'),
"bafybeifk6r6ugz62kdrkeitqukase2fojvt6gfasafvzv3rykczww7qawm",
False, "ec dockerHubLink"), accM1,
value=feeMake)
self.assertEqual(a[1], 1)
def test_8_allow_method(self):
m = sc.functions.methodCounter().call()
a = send_tx(sc.functions.allowAiMethod(m - 2, True), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.allowAiMethod(m - 1, True), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.allowAiMethod(m, True), accA)
self.assertEqual(a[1], 1)
def test_9_set_c_price(self):
m = sc.functions.methodCounter().call()
p1 = Web3.toWei(0.000000671, 'ether')
p2 = Web3.toWei(0.000000682, 'ether')
a = send_tx(sc.functions.setContainerCost(m - 2, p1), accP0)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.allowCProvider(accP1.address, True), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.setContainerCost(m - 2, p2), accP1)
self.assertEqual(a[1], 1)
array = [accP0.address, accP1.address]
b = sc.functions.getPricesOfProviders(m - 2, array).call()
self.assertEqual(p1, b[0])
self.assertEqual(p2, b[1])
a = send_tx(sc.functions.setContainerCost(m - 2, 0), accP0)
self.assertEqual(a[1], 1)
b = sc.functions.getPricesOfProviders(m - 2, array).call()
self.assertEqual(0, b[0])
a = send_tx(sc.functions.setContainerCost(m - 1, p1), accP0)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.setContainerCost(m - 2, p1), accP0)
self.assertEqual(a[1], 1)
class Test_Buy(TestCase):
def test_1_Buy(self):
buy_time = 16
method_id = sc.functions.methodCounter().call() - 2
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC link video streem..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
end_price = Web3.fromWei(m + n + 1000, 'ether')
a = send_tx(sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC link video streem..."),
accU0, value=end_price)
self.assertEqual(a[1], 1)
c = sc.functions.dealIdCounter().call()
l0 = sc.functions.locked(c - 1).call()
l1 = sc.functions.locked(c).call()
self.assertEqual(l0[0], l1[0])
self.assertEqual(l0[2], l1[2])
self.assertNotEqual(l0[4], l1[4])
end_price = Web3.fromWei(m + n - 1, 'ether')
a = send_tx(sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC link video streem..."),
accU0, value=end_price)
self.assertEqual(a[1], 0)
def test_2_Buy(self):
buy_time = 21
method_id = sc.functions.methodCounter().call() - 2
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC link video streem..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
dealId = sc.functions.dealIdCounter().call()
b = send_tx(sc.functions.complaint(dealId, "not working !!!"), accU0)
self.assertEqual(b[1], 0)
b = send_tx(sc.functions.complaint(dealId, "not workinggdf !!!"), accU1)
self.assertEqual(b[1], 1)
b = send_tx(sc.functions.complaint(dealId, "not workihrtng !!!"), accU1)
self.assertEqual(b[1], 0)
b = send_tx(sc.functions.complaint(dealId, "not workihrtng !!!"), accU2)
self.assertEqual(b[1], 0)
def test_3_Buy(self):
buy_time = 2
method_id = sc.functions.methodCounter().call() - 2
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC link video streem..."),
accU1, value=end_price)
self.assertEqual(a[1], 0) # time to small....
def test_4_Buy_Start_Stop(self):
buy_time = 20
method_id = sc.functions.methodCounter().call() - 2
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
dealId = sc.functions.dealIdCounter().call()
a = send_tx(sc.functions.start(dealId, "THIS is EC(# mqtt link)"), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.delivered(dealId, False), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.delivered(dealId, False), accA)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.start(dealId, "THIS is EC(# mqtt link)"), accA)
self.assertEqual(a[1], 0)
def test_5_Buy_Start_Stop(self):
buy_time = 20
method_id = 2
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
dealId = sc.functions.dealIdCounter().call()
a = send_tx(sc.functions.start(dealId, "THIS is EC(# mqtt link)"), accU1)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.delivered(dealId, False), accU1)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.start(dealId, "THIS is EC(# mqtt link)"), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.delivered(dealId, False), accU0)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.delivered(dealId, False), accA)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.delivered(dealId, False), accA)
self.assertEqual(a[1], 0)
def test_6_Buy_Disable(self):
buy_time = 19
method_id = sc.functions.methodCounter().call() - 2
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.allowAiMethod(method_id, False), accA)
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.allowAiMethod(method_id, True), accA)
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .h7uz789678967....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
def test_7_Buy_Disable_P(self):
buy_time = 19
method_id = sc.functions.methodCounter().call() - 2
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.allowCProvider(accP0.address, False), accA)
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.allowCProvider(accP0.address, True), accA)
self.assertEqual(a[1], 1)
def test_8_Buy_Disable_Acctive(self):
buy_time = 19
method_id = sc.functions.methodCounter().call() - 2
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
methodX = sc.functions.aiMethods(method_id).call()
if methodX[0] == accM0.address:
acc = accM0
elif methodX[0] == accM1.address:
acc = accM1
elif methodX[0] == accM2.address:
acc = accM2
else:
self.assertTrue(False)
a = send_tx(sc.functions.activateAiMethod(method_id, False), acc)
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU0, value=end_price)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.activateAiMethod(method_id, True), acc)
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU2, value=end_price)
self.assertEqual(a[1], 1)
def test_9_Buy_Ram_Cpus(self):
buy_time = 19
method_id = 1
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
p0 = sc.functions.hosts(accP0.address).call()
print("maxram", p0[2], "maxCpus", p0[3], "maxGpus", p0[4], "maxInst", p0[5])
print("usedRam", p0[6], "usedCpus", p0[7], "usedGpus", p0[8], "usedInst", p0[9])
a = send_tx(sc.functions.changeCProviderData(p0[6] + 50, p0[3], p0[4], p0[5]), accP0) # ram
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.changeCProviderData(p0[6], p0[7], p0[4], p0[9]), accP0) # gpu
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.changeCProviderData(p0[2], p0[7], p0[4], p0[5]), accP0) # cpus
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.changeCProviderData(p0[2], p0[3], p0[4], p0[9]), accP0) # cpus
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 0)
a = send_tx(sc.functions.changeCProviderData(p0[2], p0[3], p0[4], p0[9] + 1), accP0) # inst
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
a = send_tx(sc.functions.changeCProviderData(p0[2], p0[3], p0[4], p0[5]), accP0)
self.assertEqual(a[1], 1)
p0 = sc.functions.hosts(accP0.address).call()
print("maxram", p0[2], "maxCpus", p0[3], "maxGpus", p0[4], "maxInst", p0[5])
print("usedRam", p0[6], "usedCpus", p0[7], "usedGpus", p0[8], "usedInst", p0[9])
class Test_Buy_X(TestCase):
def test_1_delivered(self):
k_state = 0
for dealId in range(1, sc.functions.dealIdCounter().call()):
ll = sc.functions.locked(dealId).call()
print("locked:", ll)
if ll[0] != 0:
eth_provider = sc.functions.eth(ll[1]).call()
eth_m_creator = sc.functions.eth(ll[3]).call()
eth_fee = sc.functions.eth(sc.functions.feeAccount().call()).call()
eth_buyer = sc.functions.eth(ll[6]).call()
if k_state == 0:
print("delivered without error")
print(eth_provider, eth_m_creator, eth_fee, eth_buyer)
a = send_tx(sc.functions.delivered(dealId, False), accA)
self.assertEqual(a[1], 1)
ttt = sc.functions.locked(dealId).call()
self.assertEqual(ttt[0], 0)
self.assertEqual(ttt[2], 0)
self.assertEqual(ttt[4], 0)
self.assertEqual(eth_buyer, sc.functions.eth(ll[6]).call())
self.assertEqual(eth_provider + ll[0], sc.functions.eth(ll[1]).call())
self.assertEqual(eth_m_creator + ll[2], sc.functions.eth(ll[3]).call())
self.assertEqual(eth_fee + ll[4], sc.functions.eth(sc.functions.feeAccount().call()).call())
k_state = 1
elif k_state == 1:
print("delivered with error")
print(eth_provider, eth_m_creator, eth_fee, eth_buyer)
a = send_tx(sc.functions.delivered(dealId, True), accA)
self.assertEqual(a[1], 1)
ttt = sc.functions.locked(dealId).call()
self.assertEqual(ttt[0], 0)
self.assertEqual(ttt[2], 0)
self.assertEqual(ttt[4], 0)
self.assertEqual(eth_buyer + ll[0] + ll[2] + ll[4], sc.functions.eth(ll[6]).call())
self.assertEqual(eth_provider, sc.functions.eth(ll[1]).call())
self.assertEqual(eth_m_creator, sc.functions.eth(ll[3]).call())
self.assertEqual(eth_fee, sc.functions.eth(sc.functions.feeAccount().call()).call())
k_state = 0
print("end")
# pazi da ni user == drugim (admin , ....)
# for i in allAcc.keys():
# print(i, allAcc[i].address, Web3.fromWei(sc.functions.eth(allAcc[i].address).call(), 'ether'))
def test_4_SAFU_user(self):
# get first working method....
method_id = -1
pprice = -1
provider_a = ""
p = sc.functions.getProviders(1, sc.functions.hostCounter().call() + 1).call()
arrayProviders = []
for ww in p:
if ww[1]:
arrayProviders.append(ww[11])
n = sc.functions.methodCounter().call()
m = sc.functions.getMethods(1, n + 1).call()
y = 0
for x in m:
if not x[9] and x[6] and x[7]:
print(x)
gg = 0
for pp in sc.functions.getPricesOfProviders(1+y, arrayProviders).call():
if pp != 0:
print(pp)
method_id = y+1
pprice = pp
provider_a = arrayProviders[gg]
gg += 1
y += 1
self.assertNotEqual(method_id, -1)
self.assertNotEqual(pprice, -1)
self.assertNotEqual(provider_a, "")
#
buy_time = 19
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
print(end_price)
a = send_tx(sc.functions.changeWaitTime(500, 500), accA)
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, provider_a, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
dealId = sc.functions.dealIdCounter().call()
ll = sc.functions.locked(dealId).call()
print("release Block ", ll[5], " l ", w3.eth.block_number)
# eth_provider = sc.functions.eth(ll[1]).call()
# eth_m_creator = sc.functions.eth(ll[3]).call()
# eth_fee = sc.functions.eth(sc.functions.feeAccount().call()).call()
# eth_buyer = sc.functions.eth(ll[6]).call()
time.sleep(30)
a = send_tx(sc.functions.returnToBuyerSAFU(dealId), accU1)
self.assertEqual(a[1], 0)
#
#----------------------------------------------------------------------------
a = send_tx(sc.functions.changeWaitTime(0, 0), accA)
self.assertEqual(a[1], 1)
a = send_tx(
sc.functions.buy(method_id, provider_a, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 1)
dealId = sc.functions.dealIdCounter().call()
ll = sc.functions.locked(dealId).call()
print("release Block ", ll[5], " l ", w3.eth.block_number)
time.sleep(30)
a = send_tx(sc.functions.returnToBuyerSAFU(dealId), accU1)
self.assertEqual(a[1], 1)
def test_2_Withdraw(self):
pass
def test_3_BlockNumber(self):
pass
def test_5_change_AskUrl(self):
print(sc.functions.url0().call())
print(sc.functions.url1().call())
url0 = "debela mis .com ...//"
url1 = "slon je lacen"
a = send_tx(sc.functions.changeAskUrl(url0, url1), accA)
self.assertEqual(a[1], 1)
u0 = sc.functions.url0().call()
u1 = sc.functions.url1().call()
print(u0, u1)
self.assertEqual(u0, url0)
self.assertEqual(u1, url1)
a = send_tx(sc.functions.changeAskUrl(
"https://stream-ai-api.aleksvujic.fun/api/v1/account/isAiMethodAllowedForUser?aiMethodId=",
"&userEthAddress="), accA)
self.assertEqual(a[1], 1)
def test_6_change_Oracle(self):
# https://market.link/search/jobs?network=42&page=1&search=get%20bool
print(sc.functions.oracleAddr().call())
print(sc.functions.jobId().call())
# print(Web3.toBytes(hexstr="1bc99b4b57034ae4bcc3a6b6f6daaede"))
# print(Web3.toBytes(text="1bc99b4b57034ae4bcc3a6b6f6daaede"))
# a = send_tx(sc.functions.changeOracle("0x1b666ad0d20bC4F35f218120d7ed1e2df60627cC", 100000000000000000,
# Web3.toBytes(text="1bc99b4b57034ae4bcc3a6b6f6daaede")), accA)
a = send_tx(sc.functions.changeOracle("0x56dd6586DB0D08c6Ce7B2f2805af28616E082455", 100000000000000000,
Web3.toBytes(text="1b2658f2d679437cb2d8db115c646d02")), accA)
self.assertEqual(a[1], 1)
print(sc.functions.oracleAddr().call())
print(sc.functions.jobId().call())
class Test_Buy_Oracle(TestCase):
mmm_id = 0
def test_1_not_allowed(self):
feeMake = Web3.fromWei(sc.functions.feeMake().call(), 'ether')
a = send_tx(sc.functions.sell("Mask detection", 512, 1, True, Web3.toWei(0.00000013, 'ether'),
"bafybeifk6r6ugz62kdrkeitqukase2fojvt6gfasafvzv3rykczww7qawm",
True, "ec dockerHubLink"), accM0, value=feeMake)
self.assertEqual(a[1], 1)
method_id = sc.functions.methodCounter().call()
a = send_tx(sc.functions.allowAiMethod(method_id, True), accA)
self.assertEqual(a[1], 1)
buy_time = 19
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accU1, value=end_price)
self.assertEqual(a[1], 0)
def test_2_allowed(self):
method_id = sc.functions.methodCounter().call()
m = sc.functions.aiMethods(method_id).call()
self.assertTrue(m[9]) # if onlyAllowedUsers
b = sc.functions.isUserAllowed(accX0.address, method_id).call()
print(b)
a = send_tx(sc.functions.allowUserToUseMethod(method_id, accX0.address, True), accA)
self.assertEqual(a[1], 1)
b = sc.functions.isUserAllowed(accX0.address, method_id).call()
print(accX0.address, b)
self.assertEqual(b, True)
b = sc.functions.isUserAllowed(accU1.address, method_id).call()
self.assertEqual(b, False)
a = send_tx(sc.functions.setContainerCost(method_id, 10000000000), accP0)
buy_time = 18
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
print("provider Price:", pprice)
print("method Price:", mprice[5])
print("feeTake:", feeTake)
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accX0, value=end_price)
self.assertEqual(a[1], 1)
def test_3_allowed_false(self):
method_id = sc.functions.methodCounter().call()
m = sc.functions.aiMethods(method_id).call()
print(m)
self.assertTrue(m[9]) # if onlyAllowedUsers
a = send_tx(sc.functions.allowUserToUseMethod(method_id, accX0.address, False), accA)
self.assertEqual(a[1], 1)
b = sc.functions.isUserAllowed(accX0.address, method_id).call()
print(b)
self.assertEqual(b, False)
buy_time = 23
pprice = sc.functions.getProviderPrice(method_id, accP0.address).call()
mprice = sc.functions.aiMethods(method_id).call()
feeTake = sc.functions.feeTake().call()
n = (pprice * buy_time) + (mprice[5] * buy_time)
m = (n * feeTake) / 1000
end_price = Web3.fromWei(m + n, 'ether')
a = send_tx(
sc.functions.buy(method_id, accP0.address, pprice, mprice[5], buy_time, "EC .....m..."),
accX0, value=end_price)
self.assertEqual(a[1], 0)
def test_4_call_link(self):
method_id = sc.functions.methodCounter().call()
m = sc.functions.aiMethods(method_id).call()
self.assertTrue(m[9]) # if onlyAllowedUsers
b = sc.functions.isUserAllowed(accX1.address, method_id).call()
if b:
a = send_tx(sc.functions.allowUserToUseMethod(method_id, accX1.address, False), accA)
self.assertEqual(a[1], 1)
b = sc.functions.isUserAllowed(accX1.address, method_id).call()
self.assertEqual(b, False)
a = send_tx(sc.functions.changeUserChecker(True), accA)
self.assertEqual(a[1], 1)
print(accX1.address, b)
a = send_tx(scLink.functions.transferAndCall(sc.address, 100000000000000000, Web3.toBytes(
hexstr=accX1.address[2:]) + Web3.toBytes(method_id)), accX0)
self.assertEqual(a[1], 1)
c = 0
while True:
b = sc.functions.isUserAllowed(accX1.address, method_id).call()
print(b)
if b:
print(b)
break
time.sleep(7)
if c >= 20:
self.assertTrue(False)
c += 1
b = sc.functions.isUserAllowed(accX1.address, method_id).call()
print(b)
def test_5_call_link(self):
method_id = sc.functions.methodCounter().call()
m = sc.functions.aiMethods(method_id).call()
self.assertTrue(m[9]) # if onlyAllowedUsers
b = sc.functions.isUserAllowed(accX0.address, method_id).call()
if not b:
a = send_tx(sc.functions.allowUserToUseMethod(method_id, accX0.address, True), accA)
self.assertEqual(a[1], 1)
b = sc.functions.isUserAllowed(accX0.address, method_id).call()
print(accX0.address, b)
self.assertEqual(b, True)
print(accX0.address[2:])
print(Web3.toBytes(hexstr=accX0.address[2:]) + Web3.toBytes(method_id))
a = send_tx(scLink.functions.transferAndCall(sc.address, 100000000000000000, Web3.toBytes(
hexstr=accX0.address[2:]) + Web3.toBytes(method_id)), accX0)
self.assertEqual(a[1], 1)
c = 0
while True:
b = sc.functions.isUserAllowed(accX0.address, method_id).call()
print(b)
if not b:
print(b)
break
time.sleep(7)
if c >= 20:
self.assertTrue(False)
c += 1
b = sc.functions.isUserAllowed(accX0.address, method_id).call()
print(b)
| 39.118963
| 119
| 0.577713
| 4,184
| 33,212
| 4.476577
| 0.06979
| 0.137427
| 0.043566
| 0.092579
| 0.835184
| 0.817886
| 0.783129
| 0.774106
| 0.749706
| 0.732835
| 0
| 0.054844
| 0.273124
| 33,212
| 849
| 120
| 39.118963
| 0.721014
| 0.035379
| 0
| 0.67638
| 0
| 0.001534
| 0.056304
| 0.010373
| 0
| 0
| 0.001312
| 0
| 0.219325
| 1
| 0.044479
| false
| 0.003067
| 0.006135
| 0
| 0.058282
| 0.116564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a748dafe7e9673c63c15540d11f40bb5a121e06
| 181
|
py
|
Python
|
main.py
|
barnett617/python_analysis
|
cb8d9cdbdcdf4176853aff9eebc0c759c28e330b
|
[
"MIT"
] | 2
|
2020-07-27T16:16:10.000Z
|
2021-06-04T10:01:11.000Z
|
main.py
|
barnett617/python_analysis
|
cb8d9cdbdcdf4176853aff9eebc0c759c28e330b
|
[
"MIT"
] | null | null | null |
main.py
|
barnett617/python_analysis
|
cb8d9cdbdcdf4176853aff9eebc0c759c28e330b
|
[
"MIT"
] | 2
|
2019-05-09T03:44:20.000Z
|
2020-02-08T12:25:25.000Z
|
# -*-coding:utf-8-*-
import module_histogram
import module_line_graph
import module_pie_chart
module_histogram.show_plt()
module_line_graph.show_plt()
module_pie_chart.show_plt()
| 18.1
| 28
| 0.828729
| 28
| 181
| 4.892857
| 0.428571
| 0.262774
| 0.218978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005952
| 0.071823
| 181
| 9
| 29
| 20.111111
| 0.809524
| 0.099448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4abaf8431eebab28704456f1a02a4e1ee7959bbc
| 195
|
py
|
Python
|
main/admin.py
|
vipinkhushu/xunbao2017
|
e5f225b50976d42b8a577170e4556b59ad4b13e0
|
[
"MIT"
] | null | null | null |
main/admin.py
|
vipinkhushu/xunbao2017
|
e5f225b50976d42b8a577170e4556b59ad4b13e0
|
[
"MIT"
] | null | null | null |
main/admin.py
|
vipinkhushu/xunbao2017
|
e5f225b50976d42b8a577170e4556b59ad4b13e0
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import player,question,message,logs
admin.site.register(player)
admin.site.register(question)
admin.site.register(message)
admin.site.register(logs)
| 27.857143
| 48
| 0.830769
| 28
| 195
| 5.785714
| 0.428571
| 0.222222
| 0.419753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061538
| 195
| 7
| 49
| 27.857143
| 0.885246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4355dd84162a531daa81062c375f3f0c7be09dc7
| 142
|
py
|
Python
|
pages/main_page.py
|
RezerF/course-project-selenium-stepik
|
2579c23dca637679eb43a898f299582becc475b8
|
[
"Unlicense"
] | null | null | null |
pages/main_page.py
|
RezerF/course-project-selenium-stepik
|
2579c23dca637679eb43a898f299582becc475b8
|
[
"Unlicense"
] | null | null | null |
pages/main_page.py
|
RezerF/course-project-selenium-stepik
|
2579c23dca637679eb43a898f299582becc475b8
|
[
"Unlicense"
] | null | null | null |
from .base_page import BasePage
from .locators import MainPageLocators
from .login_page import LoginPage
class MainPage(BasePage):
pass
| 17.75
| 38
| 0.809859
| 18
| 142
| 6.277778
| 0.666667
| 0.176991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147887
| 142
| 7
| 39
| 20.285714
| 0.933884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
43574fda0e286cd03624f2cece4b2f5a5eea573f
| 5,991
|
py
|
Python
|
cryptohack/crossed-wires/decrypt.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 9
|
2021-04-20T15:28:36.000Z
|
2022-03-08T19:53:48.000Z
|
cryptohack/crossed-wires/decrypt.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | null | null | null |
cryptohack/crossed-wires/decrypt.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 6
|
2021-06-24T03:25:21.000Z
|
2022-02-20T21:44:52.000Z
|
#!/usr/bin/env python3
from Cryptodome.Util import number
# Since encryption was use friends' key, e and d are useless
N, _ = (21711308225346315542706844618441565741046498277716979943478360598053144971379956916575370343448988601905854572029635846626259487297950305231661109855854947494209135205589258643517961521594924368498672064293208230802441077390193682958095111922082677813175804775628884377724377647428385841831277059274172982280545237765559969228707506857561215268491024097063920337721783673060530181637161577401589126558556182546896783307370517275046522704047385786111489447064794210010802761708615907245523492585896286374996088089317826162798278528296206977900274431829829206103227171839270887476436899494428371323874689055690729986771, 2734411677251148030723138005716109733838866545375527602018255159319631026653190783670493107936401603981429171880504360560494771017246468702902647370954220312452541342858747590576273775107870450853533717116684326976263006435733382045807971890762018747729574021057430331778033982359184838159747331236538501849965329264774927607570410347019418407451937875684373454982306923178403161216817237890962651214718831954215200637651103907209347900857824722653217179548148145687181377220544864521808230122730967452981435355334932104265488075777638608041325256776275200067541533022527964743478554948792578057708522350812154888097)
# (N, e) pairs from friends
friend_keys = [(21711308225346315542706844618441565741046498277716979943478360598053144971379956916575370343448988601905854572029635846626259487297950305231661109855854947494209135205589258643517961521594924368498672064293208230802441077390193682958095111922082677813175804775628884377724377647428385841831277059274172982280545237765559969228707506857561215268491024097063920337721783673060530181637161577401589126558556182546896783307370517275046522704047385786111489447064794210010802761708615907245523492585896286374996088089317826162798278528296206977900274431829829206103227171839270887476436899494428371323874689055690729986771, 106979), (21711308225346315542706844618441565741046498277716979943478360598053144971379956916575370343448988601905854572029635846626259487297950305231661109855854947494209135205589258643517961521594924368498672064293208230802441077390193682958095111922082677813175804775628884377724377647428385841831277059274172982280545237765559969228707506857561215268491024097063920337721783673060530181637161577401589126558556182546896783307370517275046522704047385786111489447064794210010802761708615907245523492585896286374996088089317826162798278528296206977900274431829829206103227171839270887476436899494428371323874689055690729986771, 108533), (21711308225346315542706844618441565741046498277716979943478360598053144971379956916575370343448988601905854572029635846626259487297950305231661109855854947494209135205589258643517961521594924368498672064293208230802441077390193682958095111922082677813175804775628884377724377647428385841831277059274172982280545237765559969228707506857561215268491024097063920337721783673060530181637161577401589126558556182546896783307370517275046522704047385786111489447064794210010802761708615907245523492585896286374996088089317826162798278528296206977900274431829829206103227171839270887476436899494428371323874689055690729986771, 69557), (21711308225346315542706844618441565741046498277716979943478360598053144971379956916575370343448988601905854572029635846626259487297950305231661109855854947494209135205589258643517961521594924368498672064293208230802441077390193682958095111922082677813175804775628884377724377647428385841831277059274172982280545237765559969228707506857561215268491024097063920337721783673060530181637161577401589126558556182546896783307370517275046522704047385786111489447064794210010802761708615907245523492585896286374996088089317826162798278528296206977900274431829829206103227171839270887476436899494428371323874689055690729986771, 97117), (21711308225346315542706844618441565741046498277716979943478360598053144971379956916575370343448988601905854572029635846626259487297950305231661109855854947494209135205589258643517961521594924368498672064293208230802441077390193682958095111922082677813175804775628884377724377647428385841831277059274172982280545237765559969228707506857561215268491024097063920337721783673060530181637161577401589126558556182546896783307370517275046522704047385786111489447064794210010802761708615907245523492585896286374996088089317826162798278528296206977900274431829829206103227171839270887476436899494428371323874689055690729986771, 103231)]
c = 20304610279578186738172766224224793119885071262464464448863461184092225736054747976985179673905441502689126216282897704508745403799054734121583968853999791604281615154100736259131453424385364324630229671185343778172807262640709301838274824603101692485662726226902121105591137437331463201881264245562214012160875177167442010952439360623396658974413900469093836794752270399520074596329058725874834082188697377597949405779039139194196065364426213208345461407030771089787529200057105746584493554722790592530472869581310117300343461207750821737840042745530876391793484035024644475535353227851321505537398888106855012746117
# factorization of N
p = 134460556242811604004061671529264401215233974442536870999694816691450423689575549530215841622090861571494882591368883283016107051686642467260643894947947473532769025695530343815260424314855023688439603651834585971233941772580950216838838690315383700689885536546289584980534945897919914730948196240662991266027
q = 161469718942256895682124261315253003309512855995894840701317251772156087404025170146631429756064534716206164807382734456438092732743677793224010769460318383691408352089793973150914149255603969984103815563896440419666191368964699279209687091969164697704779792586727943470780308857107052647197945528236341228473
phi = (q-1)*(p-1)
# reverse encryption process
for key in friend_keys[::-1]:
d = number.inverse(key[1], phi)
c = pow(c, d, N)
print(number.long_to_bytes(c))
| 272.318182
| 3,157
| 0.972626
| 80
| 5,991
| 72.775
| 0.6375
| 0.003435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.946298
| 0.014689
| 5,991
| 21
| 3,158
| 285.285714
| 0.03998
| 0.025371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.090909
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
438fb4992c15aa12585160724986297ff437d702
| 50
|
py
|
Python
|
wrappers/__init__.py
|
CN-UPB/python-mano-wrappers
|
8e3607feaa97bc3e2c906ee8e4b25b21853ea6cf
|
[
"Apache-2.0"
] | null | null | null |
wrappers/__init__.py
|
CN-UPB/python-mano-wrappers
|
8e3607feaa97bc3e2c906ee8e4b25b21853ea6cf
|
[
"Apache-2.0"
] | null | null | null |
wrappers/__init__.py
|
CN-UPB/python-mano-wrappers
|
8e3607feaa97bc3e2c906ee8e4b25b21853ea6cf
|
[
"Apache-2.0"
] | null | null | null |
from . import OSMClient
from . import SONATAClient
| 25
| 26
| 0.82
| 6
| 50
| 6.833333
| 0.666667
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14
| 50
| 2
| 26
| 25
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
439ee04280092da8e3e2fb476a94d0855a3a410c
| 38
|
py
|
Python
|
server/main/views/__init__.py
|
jphacks/TK_1905
|
f4af0a26bacedde415f9f873c917fbdb4910e386
|
[
"MIT"
] | 7
|
2019-10-26T05:44:14.000Z
|
2019-11-10T13:06:11.000Z
|
server/main/views/__init__.py
|
jphacks/TK_1905
|
f4af0a26bacedde415f9f873c917fbdb4910e386
|
[
"MIT"
] | 2
|
2019-11-07T16:28:36.000Z
|
2020-06-06T00:12:58.000Z
|
server/main/views/__init__.py
|
jphacks/TK_1905
|
f4af0a26bacedde415f9f873c917fbdb4910e386
|
[
"MIT"
] | null | null | null |
from .api import *
from .web import *
| 12.666667
| 18
| 0.684211
| 6
| 38
| 4.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 38
| 2
| 19
| 19
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
43d48d4af9983d49019d9d399d1288de878f4952
| 136
|
py
|
Python
|
dataPipelines/gc_crawler_status_tracker/config.py
|
Wildertrek/gamechanger-data
|
d087044594c722bd373cce1a48293d1a6da5d24e
|
[
"MIT"
] | 18
|
2021-04-20T20:34:01.000Z
|
2021-11-08T10:28:17.000Z
|
dataPipelines/gc_crawler_status_tracker/config.py
|
Wildertrek/gamechanger-data
|
d087044594c722bd373cce1a48293d1a6da5d24e
|
[
"MIT"
] | 15
|
2021-04-20T20:31:33.000Z
|
2022-03-18T16:00:44.000Z
|
dataPipelines/gc_crawler_status_tracker/config.py
|
ekmixon/gamechanger-crawlers
|
60a0cf20338fb3dc134eec117bccd519cede9288
|
[
"MIT"
] | 8
|
2021-04-23T11:38:26.000Z
|
2021-11-17T22:42:38.000Z
|
from configuration.utils import get_connection_helper_from_env
class Config:
connection_helper = get_connection_helper_from_env()
| 22.666667
| 62
| 0.852941
| 18
| 136
| 5.944444
| 0.555556
| 0.448598
| 0.35514
| 0.429907
| 0.485981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110294
| 136
| 5
| 63
| 27.2
| 0.884298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
43d9bcaaa6296e2c48b39e0064e587018059e361
| 116
|
py
|
Python
|
app/frontend/__init__.py
|
rblack42/flask-inventory
|
48c0cfaf1ab10d0891c5af9d2b609e2b9e44ed74
|
[
"BSD-3-Clause"
] | null | null | null |
app/frontend/__init__.py
|
rblack42/flask-inventory
|
48c0cfaf1ab10d0891c5af9d2b609e2b9e44ed74
|
[
"BSD-3-Clause"
] | null | null | null |
app/frontend/__init__.py
|
rblack42/flask-inventory
|
48c0cfaf1ab10d0891c5af9d2b609e2b9e44ed74
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Blueprint
frontend_blueprint = Blueprint('frontend', __name__)
from app.frontend import views
| 14.5
| 52
| 0.801724
| 14
| 116
| 6.285714
| 0.571429
| 0.386364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 116
| 7
| 53
| 16.571429
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
78e52b7cc185e858cab47c76a99b88cd05eace6a
| 84
|
py
|
Python
|
tests/test_pydent/test_models/models/test_collection.py
|
aquariumbio/trident
|
d1712cae544103fb145e3171894e4b35141f6813
|
[
"MIT"
] | 5
|
2019-01-21T11:12:05.000Z
|
2020-03-05T20:52:14.000Z
|
tests/test_pydent/test_models/models/test_collection.py
|
aquariumbio/pydent
|
d1712cae544103fb145e3171894e4b35141f6813
|
[
"MIT"
] | 28
|
2020-11-18T02:07:09.000Z
|
2021-06-08T15:49:41.000Z
|
tests/test_pydent/test_models/models/test_collection.py
|
aquariumbio/trident
|
d1712cae544103fb145e3171894e4b35141f6813
|
[
"MIT"
] | 2
|
2021-02-27T19:23:45.000Z
|
2021-09-14T10:29:07.000Z
|
from pydent.models import Collection
# TODO: mock tests for Collections and Parts
| 16.8
| 44
| 0.797619
| 12
| 84
| 5.583333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 84
| 4
| 45
| 21
| 0.957143
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6029b208ecebe8ef4127ac5645b9e1ecd61c1934
| 40,153
|
py
|
Python
|
mff/kernels/manybodykernel.py
|
alvarovm/mff
|
cd1b22b606dfd64d91dc94fece72ad6a707212af
|
[
"Apache-2.0"
] | 14
|
2019-03-22T18:57:34.000Z
|
2021-12-15T11:37:17.000Z
|
mff/kernels/manybodykernel.py
|
alvarovm/mff
|
cd1b22b606dfd64d91dc94fece72ad6a707212af
|
[
"Apache-2.0"
] | 4
|
2019-06-18T14:55:46.000Z
|
2019-11-26T19:34:59.000Z
|
mff/kernels/manybodykernel.py
|
alvarovm/mff
|
cd1b22b606dfd64d91dc94fece72ad6a707212af
|
[
"Apache-2.0"
] | 3
|
2019-08-05T14:42:20.000Z
|
2022-03-16T18:48:54.000Z
|
# -*- coding: utf-8 -*-
import logging
import os.path
import pickle
from abc import ABCMeta, abstractmethod
import numpy as np
from mff.kernels.base import Kernel, Mffpath
logger = logging.getLogger(__name__)
def dummy_calc_ff(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype = data
if kertype == "single":
with open(Mffpath / "k3_ff_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k3_ff_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3, 3))
for i in np.arange(len(array)):
result[i] = fun(np.zeros(3), np.zeros(3), array[i][0],
array[i][1], theta0, theta1, theta2)
return result
def dummy_calc_ee(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel valuesf
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype = data
if kertype == "single":
with open(Mffpath / "k3_ee_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k3_ee_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros(len(array))
for i in np.arange(len(array)):
for conf1 in array[i][0]:
for conf2 in array[i][1]:
result[i] += fun(np.zeros(3), np.zeros(3),
conf1, conf2, theta0, theta1, theta2)
return result
def dummy_calc_ef(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype = data
if kertype == "single":
with open(Mffpath / "k3_ef_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k3_ef_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3))
for i in np.arange(len(array)):
conf2 = np.array(array[i][1], dtype='float')
for conf1 in array[i][0]:
conf1 = np.array(conf1, dtype='float')
result[i] += -fun(np.zeros(3), np.zeros(3), conf1,
conf2, theta0, theta1, theta2)
return result
class BaseManyBody(Kernel, metaclass=ABCMeta):
""" Many body kernel class
Handles the functions common to the single-species and
multi-species three-body kernels.
Args:
kernel_name (str): To choose between single- and two-species kernel
theta[0] (float) : lengthscale of the kernel
theta[1] (float) : decay rate of the cutoff function
theta[2] (float) : cutoff radius
bounds (list) : bounds of the kernel function.
Attributes:
km_ee (object): Energy-energy kernel function
km_ef (object): Energy-force kernel function
km_ff (object): Force-force kernel function
"""
@abstractmethod
def __init__(self, kernel_name, theta, bounds):
super().__init__(kernel_name)
self.theta = theta
self.bounds = bounds
self.km_ee, self.km_ef, self.km_ff = self.compile_theano()
def calc(self, X1, X2, ncores=1):
"""
Calculate the energy-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X1) * 3, len(X2) * 3))
if ncores > 1:
confs = []
for x1 in X1:
for x2 in X2:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body force-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i * 3: i * 3 + 3, 3 * j:3 * j +
3] = result[(j + i * len(X2))]
else:
for i, conf1 in enumerate(X1):
for j, conf2 in enumerate(X2):
ker[i * 3:i * 3 + 3, 3 * j:3 * j + 3] += self.km_ff(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ef(self, X_glob, X, ncores=1, mapping = False):
"""
Calculate the energy-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X_glob), len(X) * 3))
if ncores > 1:
confs = []
for x1 in X_glob:
for x2 in X:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body energy-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
pool.close()
pool.join()
result = np.vstack(np.asarray(result))
for i in range(len(X_glob)):
for j in range(len(X)):
ker[i, 3 * j:3 * j + 3] = result[(j + i * len(X))]
else:
for i, x1 in enumerate(X_glob):
for j, conf2 in enumerate(X):
for conf1 in x1:
ker[i, 3 * j:3 * j + 3] += self.km_ef(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ee(self, X1, X2, ncores=1, mapping = False):
"""
Calculate the energy-energy kernel between two global environments.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N1 x N2 matrix of the scalar-valued kernels
"""
if ncores > 1: # Used for multiprocessing
confs = []
# Build a list of all input pairs which matrix needs to be computed
for x1 in X1:
for x2 in X2:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body energy-energy kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
ker = np.zeros((len(X1), len(X2)))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i, j] = result[j + i*len(X2)]
else:
ker = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
for conf1 in x1:
for conf2 in x2:
ker[i, j] += self.km_ee(conf1, conf2,
self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_gram(self, X, ncores=1, eval_gradient=False):
"""
Calculate the force-force gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N*3 x N*3 gram matrix of the matrix-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1:
confs = []
for i in np.arange(len(X)):
for j in np.arange(i + 1):
thislist = np.asarray([X[i], X[j]])
confs.append(thislist)
n = len(confs)
logger.info(
'Using %i cores for the many-body force-force gram matrix calculation' % (ncores))
import sys
sys.setrecursionlimit(100000)
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
off_diag = np.zeros((len(X) * 3, len(X) * 3))
diag = np.zeros((len(X) * 3, len(X) * 3))
for i in np.arange(len(X)):
diag[3 * i:3 * i + 3, 3 * i:3 * i +
3] = result[i + i * (i + 1) // 2]
for j in np.arange(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 *
j + 3] = result[j + i * (i + 1) // 2]
else:
diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
off_diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
for i in np.arange(X.shape[0]):
diag[3 * i:3 * i + 3, 3 * i:3 * i + 3] = \
self.km_ff(X[i], X[i], self.theta[0],
self.theta[1], self.theta[2])
for j in np.arange(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 * j + 3] = \
self.km_ff(X[i], X[j], self.theta[0],
self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T
return gram
def calc_gram_e(self, X, ncores=1, eval_gradient=False): # Untested
"""
Calculate the energy-energy gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N x N gram matrix of the scalar-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1:
confs = []
# Build a list of all input pairs which matrix needs to be computed
for i in np.arange(len(X)):
for j in np.arange(i + 1):
thislist = np.array([list(X[i]), list(X[j])])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the many-body energy-energy gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
off_diag = np.zeros((len(X), len(X)))
diag = np.zeros((len(X), len(X)))
for i in np.arange(len(X)):
diag[i, i] = result[i + i * (i + 1) // 2]
for j in np.arange(i):
off_diag[i, j] = result[j + i * (i + 1) // 2]
else:
diag = np.zeros((X.shape[0], X.shape[0]))
off_diag = np.zeros((X.shape[0], X.shape[0]))
for i in np.arange(X.shape[0]):
for k, conf1 in enumerate(X[i]):
diag[i, i] += self.km_ee(conf1, conf1,
self.theta[0], self.theta[1], self.theta[2])
for conf2 in X[i][:k]:
# *2 here to speed up the loop
diag[i, i] += 2.0*self.km_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
for j in np.arange(i):
for conf1 in X[i]:
for conf2 in X[j]:
off_diag[i, j] += self.km_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T # Gram matrix is symmetric
return gram
def calc_gram_ef(self, X, X_glob, ncores=1, eval_gradient=False):
"""
Calculate the energy-force gram matrix for a set of configurations X.
This returns a non-symmetric matrix which is equal to the transpose of
the force-energy gram matrix.
Args:
X (list): list of N1 M1x5 arrays containing xyz coordinates and atomic species
X_glob (list): list of N2 M2x5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N2 x N1*3 gram matrix of the vector-valued kernels
"""
gram = np.zeros((X_glob.shape[0], X.shape[0] * 3))
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1: # Multiprocessing
confs = []
for i in np.arange(len(X_glob)):
for j in np.arange(len(X)):
thislist = np.asarray([X_glob[i], X[j]])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the many-body energy-force gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
for i in np.arange(X_glob.shape[0]):
for j in np.arange(X.shape[0]):
gram[i, 3 * j:3 * j + 3] = result[3 *
(j + i * X.shape[0]):3 + 3*(j + i * X.shape[0])]
else:
for i in np.arange(X_glob.shape[0]):
for j in np.arange(X.shape[0]):
for k in X_glob[i]:
gram[i, 3 * j:3 * j + 3] += self.km_ef(
k, X[j], self.theta[0], self.theta[1], self.theta[2])
self.gram_ef = gram
return gram
def calc_diag(self, X):
diag = np.zeros((X.shape[0] * 3))
for i in np.arange(X.shape[0]):
diag[i * 3:(i + 1) * 3] = np.diag(self.km_ff(X[i], X[i],
self.theta[0], self.theta[1], self.theta[2]))
return diag
def calc_diag_e(self, X):
diag = np.zeros((X.shape[0]))
for i in np.arange(X.shape[0]):
diag[i] = self.km_ee(X[i], X[i], self.theta[0],
self.theta[1], self.theta[2])
return diag
@staticmethod
@abstractmethod
def compile_theano():
return None, None, None
class ManyBodySingleSpeciesKernel(BaseManyBody):
"""Many body two species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): decay rate of the cutoff function
theta[2] (float): cutoff radius
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-2, 1e2), (1e-2, 1e2))):
super().__init__(kernel_name='ManyBodySingleSpecies', theta=theta, bounds=bounds)
self.type = "single"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for energy and force learning
ker_jkmn_withcutoff = ker_jkmn #* cutoff_ikmn
The position of the atoms relative to the centrla one, and their chemical species
are defined by a matrix of dimension Mx5
Returns:
km_ee (func): energy-energy kernel
km_ef (func): energy-force kernel
km_ff (func): force-force kernel
"""
if not (os.path.exists(Mffpath / 'k3_ee_s.pickle') and
os.path.exists(Mffpath / 'k3_ef_s.pickle') and os.path.exists(Mffpath / 'k3_ff_s.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
logger.info("Started compilation of theano three body kernels")
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
theta = T.dscalar('theta')
rc = T.dscalar('rc')
# positions of neighbours without chemical species
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
# --------------------------------------------------
# RELATIVE DISTANCES TO CENTRAL VECTOR AND BETWEEN NEIGHBOURS
# --------------------------------------------------
# first and second configuration
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
rjk = T.sqrt(
T.sum((rho1s[None, :, :] - rho1s[:, None, :]) ** 2, axis=2))
rmn = T.sqrt(
T.sum((rho2s[None, :, :] - rho2s[:, None, :]) ** 2, axis=2))
# --------------------------------------------------
# BUILD THE KERNEL
# --------------------------------------------------
# Squared exp of differences
se_1j2m = T.exp(-(r1j[:, None] - r2m[None, :])
** 2 / (2 * sig ** 2))
se_jkmn = T.exp(-(rjk[:, :, None, None] -
rmn[None, None, :, :]) ** 2 / (2 * sig ** 2))
se_jk2m = T.exp(-(rjk[:, :, None] -
r2m[None, None, :]) ** 2 / (2 * sig ** 2))
se_1jmn = T.exp(-(r1j[:, None, None] -
rmn[None, :, :]) ** 2 / (2 * sig ** 2))
# Kernel not summed (cyclic permutations)
k1n = (se_1j2m[:, None, :, None] *
se_1j2m[None, :, None, :] * se_jkmn)
k2n = (se_1jmn[:, None, :, :] * se_jk2m[:, :,
None, :] * se_1j2m[None, :, :, None])
k3n = (se_1j2m[:, None, None, :] *
se_jk2m[:, :, :, None] * se_1jmn[None, :, :, :])
# final shape is M1 M1 M2 M2
ker = k1n + k2n + k3n
cut_j = 0.5*(1+T.cos(np.pi*r1j/rc))
cut_m = 0.5*(1+T.cos(np.pi*r2m/rc))
cut_jk = cut_j[:,None]*cut_j[None,:]*0.5*(1+T.cos(np.pi*rjk/rc))
cut_mn = cut_m[:,None]*cut_m[None,:]*0.5*(1+T.cos(np.pi*rmn/rc))
# --------------------------------------------------
# REMOVE DIAGONAL ELEMENTS AND ADD CUTOFF
# --------------------------------------------------
# remove diagonal elements AND lower triangular ones from first configuration
mask_jk = T.triu(T.ones_like(rjk)) - T.identity_like(rjk)
# remove diagonal elements from second configuration
mask_mn = T.ones_like(rmn) - T.identity_like(rmn)
# Combine masks
mask_jkmn = mask_jk[:, :, None, None] * mask_mn[None, None, :, :]
# Apply mask and then apply cutoff functions
ker = ker * mask_jkmn
ker = T.sum(ker * cut_jk[:, :, None, None]
* cut_mn[None, None, :, :])
ker = T.exp(ker / 1000)
# --------------------------------------------------
# FINAL FUNCTIONS
# --------------------------------------------------
# global energy energy kernel
k_ee_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], ker, on_unused_input='ignore')
# global energy force kernel
k_ef = T.grad(ker, r2)
k_ef_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ef, on_unused_input='ignore')
# local force force kernel
k_ff = T.grad(ker, r1)
k_ff_der, updates = scan(lambda j, k_ff, r2: T.grad(k_ff[j], r2),
sequences=T.arange(k_ff.shape[0]), non_sequences=[k_ff, r2])
k_ff_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ff_der, on_unused_input='ignore')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'k3_ee_s.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'k3_ef_s.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'k3_ff_s.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
else:
print("Loading Kernels")
with open(Mffpath / "k3_ee_s.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "k3_ef_s.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "k3_ff_s.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
# WRAPPERS (we don't want to plug the position of the central element every time)
def km_ee(conf1, conf2, sig, theta, rc):
"""
Many body kernel for global energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (float): scalar valued energy-energy many-body kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def km_ef(conf1, conf2, sig, theta, rc):
"""
Many body kernel for global energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (array): 3x1 energy-force many-body kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def km_ff(conf1, conf2, sig, theta, rc):
"""
Many body kernel for local force-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (matrix): 3x3 force-force 3-body kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
logger.info("Ended compilation of theano three body kernels")
return km_ee, km_ef, km_ff
class ManyBodyManySpeciesKernel(BaseManyBody):
"""Many body many species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): decay rate of the cutoff function
theta[2] (float): cutoff radius
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-2, 1e2), (1e-2, 1e2))):
super().__init__(kernel_name='ManyBodyManySpecies', theta=theta, bounds=bounds)
self.type = "multi"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for energy and force learning
ker_jkmn_withcutoff = ker_jkmn #* cutoff_ikmn
The position of the atoms relative to the centrla one, and their chemical species
are defined by a matrix of dimension Mx5
Returns:
km_ee (func): energy-energy kernel
km_ef (func): energy-force kernel
km_ff (func): force-force kernel
"""
if not (os.path.exists(Mffpath / 'k3_ee_m.pickle') and
os.path.exists(Mffpath / 'k3_ef_m.pickle') and os.path.exists(Mffpath / 'k3_ff_m.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
logger.info("Started compilation of theano three body kernels")
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
theta = T.dscalar('theta')
rc = T.dscalar('rc')
# positions of neighbours without chemical species
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
alpha_1 = rho1[:, 3].flatten()
alpha_2 = rho2[:, 3].flatten()
alpha_j = rho1[:, 4].flatten()
alpha_m = rho2[:, 4].flatten()
alpha_k = rho1[:, 4].flatten()
alpha_n = rho2[:, 4].flatten()
# --------------------------------------------------
# RELATIVE DISTANCES TO CENTRAL VECTOR AND BETWEEN NEIGHBOURS
# --------------------------------------------------
# first and second configuration
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
rjk = T.sqrt(
T.sum((rho1s[None, :, :] - rho1s[:, None, :]) ** 2, axis=2))
rmn = T.sqrt(
T.sum((rho2s[None, :, :] - rho2s[:, None, :]) ** 2, axis=2))
# --------------------------------------------------
# CHEMICAL SPECIES MASK
# --------------------------------------------------
# numerical kronecker
def delta_alpha2(a1j, a2m):
d = np.exp(-(a1j - a2m) ** 2 / (2 * 0.00001 ** 2))
return d
# permutation 1
delta_alphas12 = delta_alpha2(alpha_1[0], alpha_2[0])
delta_alphasjm = delta_alpha2(alpha_j[:, None], alpha_m[None, :])
delta_alphas_jmkn = delta_alphasjm[:, None,
:, None] * delta_alphasjm[None, :, None, :]
delta_perm1 = delta_alphas12 * delta_alphas_jmkn
# permutation 3
delta_alphas1m = delta_alpha2(
alpha_1[0, None], alpha_m[None, :]).flatten()
delta_alphasjn = delta_alpha2(alpha_j[:, None], alpha_n[None, :])
delta_alphask2 = delta_alpha2(
alpha_k[:, None], alpha_2[None, 0]).flatten()
delta_perm3 = delta_alphas1m[None, None, :, None] * delta_alphasjn[:, None, None, :] * \
delta_alphask2[None, :, None, None]
# permutation 5
delta_alphas1n = delta_alpha2(
alpha_1[0, None], alpha_n[None, :]).flatten()
delta_alphasj2 = delta_alpha2(
alpha_j[:, None], alpha_2[None, 0]).flatten()
delta_alphaskm = delta_alpha2(alpha_k[:, None], alpha_m[None, :])
delta_perm5 = delta_alphas1n[None, None, None, :] * delta_alphaskm[None, :, :, None] * \
delta_alphasj2[:, None, None, None]
# --------------------------------------------------
# BUILD THE KERNEL
# --------------------------------------------------
# Squared exp of differences
se_1j2m = T.exp(-(r1j[:, None] - r2m[None, :])
** 2 / (2 * sig ** 2))
se_jkmn = T.exp(-(rjk[:, :, None, None] -
rmn[None, None, :, :]) ** 2 / (2 * sig ** 2))
se_jk2m = T.exp(-(rjk[:, :, None] -
r2m[None, None, :]) ** 2 / (2 * sig ** 2))
se_1jmn = T.exp(-(r1j[:, None, None] -
rmn[None, :, :]) ** 2 / (2 * sig ** 2))
# Kernel not summed (cyclic permutations)
k1n = (se_1j2m[:, None, :, None] *
se_1j2m[None, :, None, :] * se_jkmn)
k2n = (se_1jmn[:, None, :, :] * se_jk2m[:, :,
None, :] * se_1j2m[None, :, :, None])
k3n = (se_1j2m[:, None, None, :] *
se_jk2m[:, :, :, None] * se_1jmn[None, :, :, :])
# final shape is M1 M1 M2 M2
ker_loc = k1n * delta_perm1 + k2n * delta_perm3 + k3n * delta_perm5
# Faster version of cutoff (less calculations)
cut_j = 0.5*(1+T.cos(np.pi*r1j/rc))
cut_m = 0.5*(1+T.cos(np.pi*r2m/rc))
cut_jk = cut_j[:,None]*cut_j[None,:]*0.5*(1+T.cos(np.pi*rjk/rc))
cut_mn = cut_m[:,None]*cut_m[None,:]*0.5*(1+T.cos(np.pi*rmn/rc))
# --------------------------------------------------
# REMOVE DIAGONAL ELEMENTS
# --------------------------------------------------
# remove diagonal elements AND lower triangular ones from first configuration
mask_jk = T.triu(T.ones_like(rjk)) - T.identity_like(rjk)
# remove diagonal elements from second configuration
mask_mn = T.ones_like(rmn) - T.identity_like(rmn)
# Combine masks
mask_jkmn = mask_jk[:, :, None, None] * mask_mn[None, None, :, :]
# Apply mask and then apply cutoff functions
ker_loc = ker_loc * mask_jkmn
ker_loc = T.sum(
ker_loc * cut_jk[:, :, None, None] * cut_mn[None, None, :, :])
ker_loc = T.exp(ker_loc / 20)
# --------------------------------------------------
# FINAL FUNCTIONS
# --------------------------------------------------
# energy energy kernel
k_ee_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], ker_loc, on_unused_input='ignore')
# energy force kernel
k_ef_cut = T.grad(ker_loc, r2)
k_ef_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ef_cut, on_unused_input='ignore')
# force force kernel
k_ff_cut = T.grad(ker_loc, r1)
k_ff_cut_der, updates = scan(lambda j, k_ff_cut, r2: T.grad(k_ff_cut[j], r2),
sequences=T.arange(k_ff_cut.shape[0]), non_sequences=[k_ff_cut, r2])
k_ff_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ff_cut_der, on_unused_input='ignore')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'k3_ee_m.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'k3_ef_m.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'k3_ff_m.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
else:
print("Loading Kernels")
with open(Mffpath / "k3_ee_m.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "k3_ef_m.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "k3_ff_m.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
# WRAPPERS (we don't want to plug the position of the central element every time)
def km_ee(conf1, conf2, sig, theta, rc):
"""
Many body kernel for energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (float): scalar valued energy-energy many-body kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def km_ef(conf1, conf2, sig, theta, rc):
"""
Many body kernel for energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (array): 3x1 energy-force many-body kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def km_ff(conf1, conf2, sig, theta, rc):
"""
Many body kernel for force-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (matrix): 3x3 force-force many-body kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
logger.info("Ended compilation of theano many body kernels")
return km_ee, km_ef, km_ff
| 39.993028
| 114
| 0.493014
| 4,858
| 40,153
| 3.988884
| 0.080486
| 0.02508
| 0.018062
| 0.010734
| 0.865982
| 0.838631
| 0.820054
| 0.791568
| 0.772113
| 0.749665
| 0
| 0.033301
| 0.366548
| 40,153
| 1,003
| 115
| 40.032901
| 0.728563
| 0.272333
| 0
| 0.660886
| 0
| 0
| 0.046983
| 0.000762
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046243
| false
| 0
| 0.042389
| 0.001927
| 0.134875
| 0.007707
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6049de05de393fd3e880f87515b8150f65e31371
| 3,514
|
py
|
Python
|
home/pi/blissflixx/chls/bfch_r_shortfilms/__init__.py
|
erick-guerra/Royalbox
|
967dbbdddc94b9968e6eba873f0d20328fd86f66
|
[
"MIT"
] | 1
|
2022-01-29T11:17:58.000Z
|
2022-01-29T11:17:58.000Z
|
home/pi/blissflixx/chls/bfch_r_shortfilms/__init__.py
|
erick-guerra/Royalbox
|
967dbbdddc94b9968e6eba873f0d20328fd86f66
|
[
"MIT"
] | null | null | null |
home/pi/blissflixx/chls/bfch_r_shortfilms/__init__.py
|
erick-guerra/Royalbox
|
967dbbdddc94b9968e6eba873f0d20328fd86f66
|
[
"MIT"
] | null | null | null |
import chanutils.reddit
_SUBREDDIT = 'Shortfilms'
_FEEDLIST = [
{'title':'Hot', 'url':'http://www.reddit.com/r/Shortfilms.json'},
{'title':'New', 'url':'http://www.reddit.com/r/Shortfilms/new.json'},
{'title':'Action & Adventure', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f01%27+&sort=top&restrict_sr=on'},
{'title':'Animation', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f02%27+&sort=top&restrict_sr=on'},
{'title':'Art Films', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f17%27+&sort=top&restrict_sr=on'},
{'title':'Comedy', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f05%27+&sort=top&restrict_sr=on'},
{'title':'Crime', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f06%27+&sort=top&restrict_sr=on'},
{'title':'Documentary', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f07%27+&sort=top&restrict_sr=on'},
{'title':'Drama', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f08%27+&sort=top&restrict_sr=on'},
{'title':'Experimental', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f09%27+&sort=top&restrict_sr=on'},
{'title':'Film Noir', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f10%27+&sort=top&restrict_sr=on'},
{'title':'Gay & Lesbian', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f11%27+&sort=top&restrict_sr=on'},
{'title':'Horror', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f12%27+&sort=top&restrict_sr=on'},
{'title':'Musical', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f13%27+&sort=top&restrict_sr=on'},
{'title':'Mystery', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f14%27+&sort=top&restrict_sr=on'},
{'title':'Parody', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f26%27+&sort=top&restrict_sr=on'},
{'title':'Romance', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f16%27+&sort=top&restrict_sr=on'},
{'title':'Sci-Fi & Fantasy', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f03%27+&sort=top&restrict_sr=on'},
{'title':'Surreal', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f23%27+&sort=top&restrict_sr=on'},
{'title':'Thriller', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f18%27+&sort=top&restrict_sr=on'},
{'title':'War', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f19%27+&sort=top&restrict_sr=on'},
{'title':'Western', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f20%27+&sort=top&restrict_sr=on'},
{'title':'World Cinema', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f21%27+&sort=top&restrict_sr=on'},
{'title':'Amateur', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f22%27+&sort=top&restrict_sr=on'},
{'title':'Genre Defying', 'url':'http://www.reddit.com/r/Shortfilms/search.json?q=flair%3A%27f27%27+&sort=top&restrict_sr=on'},
]
def name():
return 'Short Films'
def image():
return "icon.png"
def description():
return "Short Films from /r/Shortfilms subreddit (<a target='_blank' href='http://www.reddit.com/r/Shortfilms'>http://www.reddit.com/r/Shortfilms</a>)."
def feedlist():
return _FEEDLIST
def feed(idx):
return chanutils.reddit.get_feed(_FEEDLIST[idx])
def search(q):
return chanutils.reddit.search(_SUBREDDIT, q)
| 70.28
| 154
| 0.700341
| 577
| 3,514
| 4.213172
| 0.164645
| 0.126697
| 0.144385
| 0.177705
| 0.744961
| 0.744961
| 0.714109
| 0.454134
| 0.454134
| 0.454134
| 0
| 0.048175
| 0.048947
| 3,514
| 49
| 155
| 71.714286
| 0.679234
| 0
| 0
| 0
| 0
| 0.585366
| 0.784291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.02439
| 0.146341
| 0.317073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
60f0e74ca920ba5c7a99479d84da7cd02e0b3e56
| 24
|
py
|
Python
|
contrib/tools/python/src/Lib/plat-mac/Carbon/IBCarbon.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
python/src/Lib/plat-mac/Carbon/IBCarbon.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
python/src/Lib/plat-mac/Carbon/IBCarbon.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
from _IBCarbon import *
| 12
| 23
| 0.791667
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
60ff8d95a3fdca7931bdd631fdd95714e11e890e
| 46
|
py
|
Python
|
csv_to_dictionary/__init__.py
|
EthanDayley/csv_to_dictionary
|
a49103da8667e542aca30f797394af3e7d695aa5
|
[
"MIT"
] | 1
|
2018-03-02T18:55:33.000Z
|
2018-03-02T18:55:33.000Z
|
csv_to_dictionary/__init__.py
|
EthanDayley/csv_to_dictionary
|
a49103da8667e542aca30f797394af3e7d695aa5
|
[
"MIT"
] | null | null | null |
csv_to_dictionary/__init__.py
|
EthanDayley/csv_to_dictionary
|
a49103da8667e542aca30f797394af3e7d695aa5
|
[
"MIT"
] | null | null | null |
from .csv_to_dictionary import convert_simple
| 23
| 45
| 0.891304
| 7
| 46
| 5.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
880ffc1a0208c4b2c907ab0a28203a51fce736b9
| 31,415
|
py
|
Python
|
sigpy/interp.py
|
kmjohnson3/sigpy
|
6d5f9c66f7446a13b3615c31446bbce8adc5dfaa
|
[
"BSD-3-Clause"
] | 196
|
2018-07-07T00:42:42.000Z
|
2022-03-22T02:30:24.000Z
|
sigpy/interp.py
|
kmjohnson3/sigpy
|
6d5f9c66f7446a13b3615c31446bbce8adc5dfaa
|
[
"BSD-3-Clause"
] | 79
|
2018-10-12T19:53:21.000Z
|
2022-03-30T13:44:41.000Z
|
sigpy/interp.py
|
kmjohnson3/sigpy
|
6d5f9c66f7446a13b3615c31446bbce8adc5dfaa
|
[
"BSD-3-Clause"
] | 68
|
2018-09-26T03:46:42.000Z
|
2022-03-11T03:51:49.000Z
|
# -*- coding: utf-8 -*-
"""Interpolation functions.
"""
import numpy as np
import numba as nb
from sigpy import backend, config, util
__all__ = ['interpolate', 'gridding']
KERNELS = ['spline', 'kaiser_bessel']
def interpolate(input, coord, kernel='spline', width=2, param=1):
r"""Interpolation from array to points specified by coordinates.
Let :math:`x` be the input, :math:`y` be the output,
:math:`c` be the coordinates, :math:`W` be the kernel width,
and :math:`K` be the interpolation kernel, then the function computes,
.. math ::
y[j] = \sum_{i : \| i - c[j] \|_\infty \leq W / 2}
K\left(\frac{i - c[j]}{W / 2}\right) x[i]
There are two types of kernels: 'spline' and 'kaiser_bessel'.
'spline' uses the cardinal B-spline functions as kernels.
The order of the spline can be specified using param.
For example, param=1 performs linear interpolation.
Concretely, for param=0, :math:`K(x) = 1`,
for param=1, :math:`K(x) = 1 - |x|`, and
for param=2, :math:`K(x) = \frac{9}{8} (1 - |x|)^2`
for :math:`|x| > \frac{1}{3}`
and :math:`K(x) = \frac{3}{4} (1 - 3 x^2)` for :math:`|x| < \frac{1}{3}`.
These function expressions are derived from the reference wikipedia
page by shifting and scaling the range to -1 to 1.
When the coordinates specifies a uniformly spaced grid,
it is recommended to use the original scaling with width=param + 1
so that the interpolation weights add up to one.
'kaiser_bessel' uses the Kaiser-Bessel function as kernel.
Concretely, :math:`K(x) = I_0(\beta \sqrt{1 - x^2})`,
where :math:`I_0` is the modified Bessel function of the first kind.
The beta parameter can be specified with param.
The modified Bessel function of the first kind is approximated
using the power series, following the reference.
Args:
input (array): Input array of shape.
coord (array): Coordinate array of shape [..., ndim]
width (float or tuple of floats): Interpolation kernel full-width.
kernel (str): Interpolation kernel, {'spline', 'kaiser_bessel'}.
param (float or tuple of floats): Kernel parameter.
Returns:
output (array): Output array.
References:
https://en.wikipedia.org/wiki/Spline_wavelet#Cardinal_B-splines_of_small_orders
http://people.math.sfu.ca/~cbm/aands/page_378.htm
"""
ndim = coord.shape[-1]
batch_shape = input.shape[:-ndim]
batch_size = util.prod(batch_shape)
pts_shape = coord.shape[:-1]
npts = util.prod(pts_shape)
xp = backend.get_array_module(input)
input = input.reshape([batch_size] + list(input.shape[-ndim:]))
coord = coord.reshape([npts, ndim])
output = xp.zeros([batch_size, npts], dtype=input.dtype)
if np.isscalar(param):
param = xp.array([param] * ndim, coord.dtype)
else:
param = xp.array(param, coord.dtype)
if np.isscalar(width):
width = xp.array([width] * ndim, coord.dtype)
else:
width = xp.array(width, coord.dtype)
if xp == np:
_interpolate[kernel][ndim - 1](output, input, coord, width, param)
else: # pragma: no cover
_interpolate_cuda[kernel][ndim - 1](
input, coord, width, param, output, size=npts)
return output.reshape(batch_shape + pts_shape)
def gridding(input, coord, shape, kernel="spline", width=2, param=1):
r"""Gridding of points specified by coordinates to array.
Let :math:`y` be the input, :math:`x` be the output,
:math:`c` be the coordinates, :math:`W` be the kernel width,
and :math:`K` be the interpolation kernel, then the function computes,
.. math ::
x[i] = \sum_{j : \| i - c[j] \|_\infty \leq W / 2}
K\left(\frac{i - c[j]}{W / 2}\right) y[j]
There are two types of kernels: 'spline' and 'kaiser_bessel'.
'spline' uses the cardinal B-spline functions as kernels.
The order of the spline can be specified using param.
For example, param=1 performs linear interpolation.
Concretely, for param=0, :math:`K(x) = 1`,
for param=1, :math:`K(x) = 1 - |x|`, and
for param=2, :math:`K(x) = \frac{9}{8} (1 - |x|)^2`
for :math:`|x| > \frac{1}{3}`
and :math:`K(x) = \frac{3}{4} (1 - 3 x^2)` for :math:`|x| < \frac{1}{3}`.
These function expressions are derived from the reference wikipedia
page by shifting and scaling the range to -1 to 1.
When the coordinates specifies a uniformly spaced grid,
it is recommended to use the original scaling with width=param + 1
so that the interpolation weights add up to one.
'kaiser_bessel' uses the Kaiser-Bessel function as kernel.
Concretely, :math:`K(x) = I_0(\beta \sqrt{1 - x^2})`,
where :math:`I_0` is the modified Bessel function of the first kind.
The beta parameter can be specified with param.
The modified Bessel function of the first kind is approximated
using the power series, following the reference.
Args:
input (array): Input array.
coord (array): Coordinate array of shape [..., ndim]
width (float or tuple of floats): Interpolation kernel full-width.
kernel (str): Interpolation kernel, {"spline", "kaiser_bessel"}.
param (float or tuple of floats): Kernel parameter.
Returns:
output (array): Output array.
References:
https://en.wikipedia.org/wiki/Spline_wavelet#Cardinal_B-splines_of_small_orders
http://people.math.sfu.ca/~cbm/aands/page_378.htm
"""
ndim = coord.shape[-1]
batch_shape = shape[:-ndim]
batch_size = util.prod(batch_shape)
pts_shape = coord.shape[:-1]
npts = util.prod(pts_shape)
xp = backend.get_array_module(input)
isreal = np.issubdtype(input.dtype, np.floating)
input = input.reshape([batch_size, npts])
coord = coord.reshape([npts, ndim])
output = xp.zeros([batch_size] + list(shape[-ndim:]), dtype=input.dtype)
if np.isscalar(param):
param = xp.array([param] * ndim, coord.dtype)
else:
param = xp.array(param, coord.dtype)
if np.isscalar(width):
width = xp.array([width] * ndim, coord.dtype)
else:
width = xp.array(width, coord.dtype)
if xp == np:
_gridding[kernel][ndim - 1](output, input, coord, width, param)
else: # pragma: no cover
if isreal:
_gridding_cuda[kernel][ndim - 1](
input, coord, width, param, output, size=npts)
else:
_gridding_cuda_complex[kernel][ndim - 1](
input, coord, width, param, output, size=npts)
return output.reshape(shape)
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _spline_kernel(x, order):
if abs(x) > 1:
return 0
if order == 0:
return 1
elif order == 1:
return 1 - abs(x)
elif order == 2:
if abs(x) > 1 / 3:
return 9 / 8 * (1 - abs(x))**2
else:
return 3 / 4 * (1 - 3 * x**2)
@nb.jit(nopython=True, cache=True) # pragma: no cover
def _kaiser_bessel_kernel(x, beta):
if abs(x) > 1:
return 0
x = beta * (1 - x**2)**0.5
t = x / 3.75
if x < 3.75:
return 1 + 3.5156229 * t**2 + 3.0899424 * t**4 +\
1.2067492 * t**6 + 0.2659732 * t**8 +\
0.0360768 * t**10 + 0.0045813 * t**12
else:
return x**-0.5 * np.exp(x) * (
0.39894228 + 0.01328592 * t**-1 +
0.00225319 * t**-2 - 0.00157565 * t**-3 +
0.00916281 * t**-4 - 0.02057706 * t**-5 +
0.02635537 * t**-6 - 0.01647633 * t**-7 +
0.00392377 * t**-8)
def _get_interpolate(kernel):
if kernel == 'spline':
kernel = _spline_kernel
elif kernel == 'kaiser_bessel':
kernel = _kaiser_bessel_kernel
@nb.jit(nopython=True) # pragma: no cover
def _interpolate1(output, input, coord, width, param):
batch_size, nx = input.shape
npts = coord.shape[0]
for i in range(npts):
kx = coord[i, -1]
x0 = np.ceil(kx - width[-1] / 2)
x1 = np.floor(kx + width[-1] / 2)
for x in range(x0, x1 + 1):
w = kernel((x - kx) / (width[-1] / 2), param[-1])
for b in range(batch_size):
output[b, i] += w * input[b, x % nx]
return output
@nb.jit(nopython=True) # pragma: no cover
def _interpolate2(output, input, coord, width, param):
batch_size, ny, nx = input.shape
npts = coord.shape[0]
for i in range(npts):
kx, ky = coord[i, -1], coord[i, -2]
x0, y0 = (np.ceil(kx - width[-1] / 2),
np.ceil(ky - width[-2] / 2))
x1, y1 = (np.floor(kx + width[-1] / 2),
np.floor(ky + width[-2] / 2))
for y in range(y0, y1 + 1):
wy = kernel((y - ky) / (width[-2] / 2), param[-2])
for x in range(x0, x1 + 1):
w = wy * kernel((x - kx) / (width[-1] / 2), param[-1])
for b in range(batch_size):
output[b, i] += w * input[b, y % ny, x % nx]
return output
@nb.jit(nopython=True) # pragma: no cover
def _interpolate3(output, input, coord, width, param):
batch_size, nz, ny, nx = input.shape
npts = coord.shape[0]
for i in range(npts):
kx, ky, kz = coord[i, -1], coord[i, -2], coord[i, -3]
x0, y0, z0 = (np.ceil(kx - width[-1] / 2),
np.ceil(ky - width[-2] / 2),
np.ceil(kz - width[-3] / 2))
x1, y1, z1 = (np.floor(kx + width[-1] / 2),
np.floor(ky + width[-2] / 2),
np.floor(kz + width[-3] / 2))
for z in range(z0, z1 + 1):
wz = kernel((z - kz) / (width[-3] / 2), param[-3])
for y in range(y0, y1 + 1):
wy = wz * kernel((y - ky) / (width[-2] / 2), param[-2])
for x in range(x0, x1 + 1):
w = wy * kernel((x - kx) / (width[-1] / 2), param[-1])
for b in range(batch_size):
output[b, i] += w * input[
b, z % nz, y % ny, x % nx]
return output
return _interpolate1, _interpolate2, _interpolate3
def _get_gridding(kernel):
if kernel == 'spline':
kernel = _spline_kernel
elif kernel == 'kaiser_bessel':
kernel = _kaiser_bessel_kernel
@nb.jit(nopython=True) # pragma: no cover
def _gridding1(output, input, coord, width, param):
batch_size, nx = output.shape
npts = coord.shape[0]
for i in range(npts):
kx = coord[i, -1]
x0 = np.ceil(kx - width[-1] / 2)
x1 = np.floor(kx + width[-1] / 2)
for x in range(x0, x1 + 1):
w = kernel((x - kx) / (width[-1] / 2), param[-1])
for b in range(batch_size):
output[b, x % nx] += w * input[b, i]
return output
@nb.jit(nopython=True) # pragma: no cover
def _gridding2(output, input, coord, width, param):
batch_size, ny, nx = output.shape
npts = coord.shape[0]
for i in range(npts):
kx, ky = coord[i, -1], coord[i, -2]
x0, y0 = (np.ceil(kx - width[-1] / 2),
np.ceil(ky - width[-2] / 2))
x1, y1 = (np.floor(kx + width[-1] / 2),
np.floor(ky + width[-2] / 2))
for y in range(y0, y1 + 1):
wy = kernel((y - ky) / (width[-2] / 2), param[-2])
for x in range(x0, x1 + 1):
w = wy * kernel((x - kx) / (width[-1] / 2), param[-1])
for b in range(batch_size):
output[b, y % ny, x % nx] += w * input[b, i]
return output
@nb.jit(nopython=True) # pragma: no cover
def _gridding3(output, input, coord, width, param):
batch_size, nz, ny, nx = output.shape
npts = coord.shape[0]
for i in range(npts):
kx, ky, kz = coord[i, -1], coord[i, -2], coord[i, -3]
x0, y0, z0 = (np.ceil(kx - width[-1] / 2),
np.ceil(ky - width[-2] / 2),
np.ceil(kz - width[-3] / 2))
x1, y1, z1 = (np.floor(kx + width[-1] / 2),
np.floor(ky + width[-2] / 2),
np.floor(kz + width[-3] / 2))
for z in range(z0, z1 + 1):
wz = kernel((z - kz) / (width[-3] / 2), param[-3])
for y in range(y0, y1 + 1):
wy = wz * kernel((y - ky) / (width[-2] / 2), param[-2])
for x in range(x0, x1 + 1):
w = wy * kernel(
(x - kx) / (width[-1] / 2), param[-1])
for b in range(batch_size):
output[b, z % nz, y % ny, x % nx] += w * input[
b, i]
return output
return _gridding1, _gridding2, _gridding3
_interpolate = {}
_gridding = {}
for kernel in KERNELS:
_interpolate[kernel] = _get_interpolate(kernel)
_gridding[kernel] = _get_gridding(kernel)
if config.cupy_enabled: # pragma: no cover
import cupy as cp
_spline_kernel_cuda = """
__device__ inline S kernel(S x, S order) {
if (fabsf(x) > 1)
return 0;
if (order == 0)
return 1;
else if (order == 1)
return 1 - fabsf(x);
else if (fabsf(x) > 1 / 3)
return 9 / 8 * (1 - fabsf(x)) * (1 - fabsf(x));
else
return 3 / 4 * (1 - 3 * x * x);
}
"""
_kaiser_bessel_kernel_cuda = """
__device__ inline S kernel(S x, S beta) {
if (fabsf(x) > 1)
return 0;
x = beta * sqrt(1 - x * x);
S t = x / 3.75;
S t2 = t * t;
S t4 = t2 * t2;
S t6 = t4 * t2;
S t8 = t6 * t2;
if (x < 3.75) {
S t10 = t8 * t2;
S t12 = t10 * t2;
return 1 + 3.5156229 * t2 + 3.0899424 * t4 +
1.2067492 * t6 + 0.2659732 * t8 +
0.0360768 * t10 + 0.0045813 * t12;
} else {
S t3 = t * t2;
S t5 = t3 * t2;
S t7 = t5 * t2;
return exp(x) / sqrt(x) * (
0.39894228 + 0.01328592 / t +
0.00225319 / t2 - 0.00157565 / t3 +
0.00916281 / t4 - 0.02057706 / t5 +
0.02635537 / t6 - 0.01647633 / t7 +
0.00392377 / t8);
}
}
"""
mod_cuda = """
__device__ inline int mod(int x, int n) {
return (x % n + n) % n;
}
"""
def _get_interpolate_cuda(kernel):
if kernel == 'spline':
kernel = _spline_kernel_cuda
elif kernel == 'kaiser_bessel':
kernel = _kaiser_bessel_kernel_cuda
_interpolate1_cuda = cp.ElementwiseKernel(
'raw T input, raw S coord, raw S width, raw S param',
'raw T output',
"""
const int ndim = 1;
const int batch_size = input.shape()[0];
const int nx = input.shape()[1];
const int coord_idx[] = {i, 0};
const S kx = coord[coord_idx];
const int x0 = ceil(kx - width[ndim - 1] / 2.0);
const int x1 = floor(kx + width[ndim - 1] / 2.0);
for (int x = x0; x < x1 + 1; x++) {
const S w = kernel(
((S) x - kx) / (width[ndim - 1] / 2.0), param[ndim - 1]);
for (int b = 0; b < batch_size; b++) {
const int input_idx[] = {b, mod(x, nx)};
const T v = (T) w * input[input_idx];
const int output_idx[] = {b, i};
output[output_idx] += v;
}
}
""",
name='interpolate1',
preamble=kernel + mod_cuda,
reduce_dims=False)
_interpolate2_cuda = cp.ElementwiseKernel(
'raw T input, raw S coord, raw S width, raw S param',
'raw T output',
"""
const int ndim = 2;
const int batch_size = input.shape()[0];
const int ny = input.shape()[1];
const int nx = input.shape()[2];
const int coordx_idx[] = {i, 1};
const S kx = coord[coordx_idx];
const int coordy_idx[] = {i, 0};
const S ky = coord[coordy_idx];
const int x0 = ceil(kx - width[ndim - 1] / 2.0);
const int y0 = ceil(ky - width[ndim - 2] / 2.0);
const int x1 = floor(kx + width[ndim - 1] / 2.0);
const int y1 = floor(ky + width[ndim - 2] / 2.0);
for (int y = y0; y < y1 + 1; y++) {
const S wy = kernel(
((S) y - ky) / (width[ndim - 2] / 2.0),
param[ndim - 2]);
for (int x = x0; x < x1 + 1; x++) {
const S w = wy * kernel(
((S) x - kx) / (width[ndim - 1] / 2.0),
param[ndim - 1]);
for (int b = 0; b < batch_size; b++) {
const int input_idx[] = {b, mod(y, ny), mod(x, nx)};
const T v = (T) w * input[input_idx];
const int output_idx[] = {b, i};
output[output_idx] += v;
}
}
}
""",
name='interpolate2',
preamble=kernel + mod_cuda,
reduce_dims=False)
_interpolate3_cuda = cp.ElementwiseKernel(
'raw T input, raw S coord, raw S width, raw S param', 'raw T output', """
const int ndim = 3;
const int batch_size = input.shape()[0];
const int nz = input.shape()[1];
const int ny = input.shape()[2];
const int nx = input.shape()[3];
const int coordz_idx[] = {i, 0};
const S kz = coord[coordz_idx];
const int coordy_idx[] = {i, 1};
const S ky = coord[coordy_idx];
const int coordx_idx[] = {i, 2};
const S kx = coord[coordx_idx];
const int x0 = ceil(kx - width[ndim - 1] / 2.0);
const int y0 = ceil(ky - width[ndim - 2] / 2.0);
const int z0 = ceil(kz - width[ndim - 3] / 2.0);
const int x1 = floor(kx + width[ndim - 1] / 2.0);
const int y1 = floor(ky + width[ndim - 2] / 2.0);
const int z1 = floor(kz + width[ndim - 3] / 2.0);
for (int z = z0; z < z1 + 1; z++) {
const S wz = kernel(
((S) z - kz) / (width[ndim - 3] / 2.0),
param[ndim - 3]);
for (int y = y0; y < y1 + 1; y++) {
const S wy = wz * kernel(
((S) y - ky) / (width[ndim - 2] / 2.0),
param[ndim - 2]);
for (int x = x0; x < x1 + 1; x++) {
const S w = wy * kernel(
((S) x - kx) / (width[ndim - 1] / 2.0),
param[ndim - 1]);
for (int b = 0; b < batch_size; b++) {
const int input_idx[] = {b, mod(z, nz), mod(y, ny),
mod(x, nx)};
const T v = (T) w * input[input_idx];
const int output_idx[] = {b, i};
output[output_idx] += v;
}
}
}
}
""", name='interpolate3', preamble=kernel + mod_cuda,
reduce_dims=False)
return _interpolate1_cuda, _interpolate2_cuda, _interpolate3_cuda
def _get_gridding_cuda(kernel):
if kernel == 'spline':
kernel = _spline_kernel_cuda
elif kernel == 'kaiser_bessel':
kernel = _kaiser_bessel_kernel_cuda
_gridding1_cuda = cp.ElementwiseKernel(
'raw T input, raw S coord, raw S width, raw S param',
'raw T output',
"""
const int ndim = 1;
const int batch_size = output.shape()[0];
const int nx = output.shape()[1];
const int coord_idx[] = {i, 0};
const S kx = coord[coord_idx];
const int x0 = ceil(kx - width[ndim - 1] / 2.0);
const int x1 = floor(kx + width[ndim - 1] / 2.0);
for (int x = x0; x < x1 + 1; x++) {
const S w = kernel(
((S) x - kx) / (width[ndim - 1] / 2.0), param[ndim - 1]);
for (int b = 0; b < batch_size; b++) {
const int input_idx[] = {b, i};
const T v = (T) w * input[input_idx];
const int output_idx[] = {b, mod(x, nx)};
atomicAdd(&output[output_idx], v);
}
}
""",
name='gridding1',
preamble=kernel + mod_cuda,
reduce_dims=False)
_gridding2_cuda = cp.ElementwiseKernel(
'raw T input, raw S coord, raw S width, raw S param', 'raw T output', """
const int ndim = 2;
const int batch_size = output.shape()[0];
const int ny = output.shape()[1];
const int nx = output.shape()[2];
const int coordx_idx[] = {i, 1};
const S kx = coord[coordx_idx];
const int coordy_idx[] = {i, 0};
const S ky = coord[coordy_idx];
const int x0 = ceil(kx - width[ndim - 1] / 2.0);
const int y0 = ceil(ky - width[ndim - 2] / 2.0);
const int x1 = floor(kx + width[ndim - 1] / 2.0);
const int y1 = floor(ky + width[ndim - 2] / 2.0);
for (int y = y0; y < y1 + 1; y++) {
const S wy = kernel(
((S) y - ky) / (width[ndim - 2] / 2.0),
param[ndim - 2]);
for (int x = x0; x < x1 + 1; x++) {
const S w = wy * kernel(
((S) x - kx) / (width[ndim - 1] / 2.0),
param[ndim - 1]);
for (int b = 0; b < batch_size; b++) {
const int input_idx[] = {b, i};
const T v = (T) w * input[input_idx];
const int output_idx[] = {b, mod(y, ny), mod(x, nx)};
atomicAdd(&output[output_idx], v);
}
}
}
""", name='gridding2', preamble=kernel + mod_cuda,
reduce_dims=False)
_gridding3_cuda = cp.ElementwiseKernel(
'raw T input, raw S coord, raw S width, raw S param', 'raw T output', """
const int ndim = 3;
const int batch_size = output.shape()[0];
const int nz = output.shape()[1];
const int ny = output.shape()[2];
const int nx = output.shape()[3];
const int coordz_idx[] = {i, 0};
const S kz = coord[coordz_idx];
const int coordy_idx[] = {i, 1};
const S ky = coord[coordy_idx];
const int coordx_idx[] = {i, 2};
const S kx = coord[coordx_idx];
const int x0 = ceil(kx - width[ndim - 1] / 2.0);
const int y0 = ceil(ky - width[ndim - 2] / 2.0);
const int z0 = ceil(kz - width[ndim - 3] / 2.0);
const int x1 = floor(kx + width[ndim - 1] / 2.0);
const int y1 = floor(ky + width[ndim - 2] / 2.0);
const int z1 = floor(kz + width[ndim - 3] / 2.0);
for (int z = z0; z < z1 + 1; z++) {
const S wz = kernel(
((S) z - kz) / (width[ndim - 3] / 2.0),
param[ndim - 3]);
for (int y = y0; y < y1 + 1; y++) {
const S wy = wz * kernel(
((S) y - ky) / (width[ndim - 2] / 2.0),
param[ndim - 2]);
for (int x = x0; x < x1 + 1; x++) {
const S w = wy * kernel(
((S) x - kx) / (width[ndim - 1] / 2.0),
param[ndim - 1]);
for (int b = 0; b < batch_size; b++) {
const int input_idx[] = {b, i};
const T v = (T) w * input[input_idx];
const int output_idx[] = {
b, mod(z, nz), mod(y, ny), mod(x, nx)};
atomicAdd(&output[output_idx], v);
}
}
}
}
""", name='gridding3', preamble=kernel + mod_cuda,
reduce_dims=False)
return _gridding1_cuda, _gridding2_cuda, _gridding3_cuda
def _get_gridding_cuda_complex(kernel):
if kernel == 'spline':
kernel = _spline_kernel_cuda
elif kernel == 'kaiser_bessel':
kernel = _kaiser_bessel_kernel_cuda
_gridding1_cuda_complex = cp.ElementwiseKernel(
'raw T input, raw S coord, raw S width, raw S param',
'raw T output',
"""
const int ndim = 1;
const int batch_size = output.shape()[0];
const int nx = output.shape()[1];
const int coord_idx[] = {i, 0};
const S kx = coord[coord_idx];
const int x0 = ceil(kx - width[ndim - 1] / 2.0);
const int x1 = floor(kx + width[ndim - 1] / 2.0);
for (int x = x0; x < x1 + 1; x++) {
const S w = kernel(
((S) x - kx) / (width[ndim - 1] / 2.0), param[ndim - 1]);
for (int b = 0; b < batch_size; b++) {
const int input_idx[] = {b, i};
const T v = (T) w * input[input_idx];
const int output_idx[] = {b, mod(x, nx)};
atomicAdd(
reinterpret_cast<T::value_type*>(
&(output[output_idx])), v.real());
atomicAdd(
reinterpret_cast<T::value_type*>(
&(output[output_idx])) + 1, v.imag());
}
}
""",
name='gridding1_complex',
preamble=kernel + mod_cuda,
reduce_dims=False)
_gridding2_cuda_complex = cp.ElementwiseKernel(
'raw T input, raw S coord, raw S width, raw S param',
'raw T output',
"""
const int ndim = 2;
const int batch_size = output.shape()[0];
const int ny = output.shape()[1];
const int nx = output.shape()[2];
const int coordx_idx[] = {i, 1};
const S kx = coord[coordx_idx];
const int coordy_idx[] = {i, 0};
const S ky = coord[coordy_idx];
const int x0 = ceil(kx - width[ndim - 1] / 2.0);
const int y0 = ceil(ky - width[ndim - 2] / 2.0);
const int x1 = floor(kx + width[ndim - 1] / 2.0);
const int y1 = floor(ky + width[ndim - 2] / 2.0);
for (int y = y0; y < y1 + 1; y++) {
const S wy = kernel(
((S) y - ky) / (width[ndim - 2] / 2.0),
param[ndim - 2]);
for (int x = x0; x < x1 + 1; x++) {
const S w = wy * kernel(
((S) x - kx) / (width[ndim - 1] / 2.0),
param[ndim - 1]);
for (int b = 0; b < batch_size; b++) {
const int input_idx[] = {b, i};
const T v = (T) w * input[input_idx];
const int output_idx[] = {b, mod(y, ny), mod(x, nx)};
atomicAdd(reinterpret_cast<T::value_type*>(
&(output[output_idx])), v.real());
atomicAdd(reinterpret_cast<T::value_type*>(
&(output[output_idx])) + 1, v.imag());
}
}
}
""",
name='gridding2_complex',
preamble=kernel + mod_cuda,
reduce_dims=False)
_gridding3_cuda_complex = cp.ElementwiseKernel(
'raw T input, raw S coord, raw S width, raw S param',
'raw T output',
"""
const int ndim = 3;
const int batch_size = output.shape()[0];
const int nz = output.shape()[1];
const int ny = output.shape()[2];
const int nx = output.shape()[3];
const int coordz_idx[] = {i, 0};
const S kz = coord[coordz_idx];
const int coordy_idx[] = {i, 1};
const S ky = coord[coordy_idx];
const int coordx_idx[] = {i, 2};
const S kx = coord[coordx_idx];
const int x0 = ceil(kx - width[ndim - 1] / 2.0);
const int y0 = ceil(ky - width[ndim - 2] / 2.0);
const int z0 = ceil(kz - width[ndim - 3] / 2.0);
const int x1 = floor(kx + width[ndim - 1] / 2.0);
const int y1 = floor(ky + width[ndim - 2] / 2.0);
const int z1 = floor(kz + width[ndim - 3] / 2.0);
for (int z = z0; z < z1 + 1; z++) {
const S wz = kernel(
((S) z - kz) / (width[ndim - 3] / 2.0),
param[ndim - 3]);
for (int y = y0; y < y1 + 1; y++) {
const S wy = wz * kernel(
((S) y - ky) / (width[ndim - 2] / 2.0),
param[ndim - 2]);
for (int x = x0; x < x1 + 1; x++) {
const S w = wy * kernel(
((S) x - kx) / (width[ndim - 1] / 2.0),
param[ndim - 1]);
for (int b = 0; b < batch_size; b++) {
const int input_idx[] = {b, i};
const T v = (T) w * input[input_idx];
const int output_idx[] = {
b, mod(z, nz), mod(y, ny), mod(x, nx)};
atomicAdd(reinterpret_cast<T::value_type*>(
&(output[output_idx])), v.real());
atomicAdd(reinterpret_cast<T::value_type*>(
&(output[output_idx])) + 1, v.imag());
}
}
}
}
""",
name='gridding3_complex',
preamble=kernel + mod_cuda,
reduce_dims=False)
return _gridding1_cuda_complex, _gridding2_cuda_complex, \
_gridding3_cuda_complex
_interpolate_cuda = {}
_gridding_cuda = {}
_gridding_cuda_complex = {}
for kernel in KERNELS:
_interpolate_cuda[kernel] = _get_interpolate_cuda(kernel)
_gridding_cuda[kernel] = _get_gridding_cuda(kernel)
_gridding_cuda_complex[kernel] = _get_gridding_cuda_complex(kernel)
| 37.13357
| 87
| 0.467006
| 4,063
| 31,415
| 3.522274
| 0.064484
| 0.060373
| 0.02264
| 0.02264
| 0.870659
| 0.851862
| 0.844316
| 0.831179
| 0.821885
| 0.79121
| 0
| 0.052093
| 0.396276
| 31,415
| 845
| 88
| 37.177515
| 0.702468
| 0.132103
| 0
| 0.582589
| 0
| 0
| 0.388568
| 0.003149
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033482
| false
| 0
| 0.008929
| 0
| 0.109375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7149511b8120586fe9092e9dabbf04e2fd82934c
| 236
|
py
|
Python
|
mason/engines/metastore/models/credentials/__init__.py
|
kyprifog/mason
|
bf45672124ef841bc16216c293034f4ccc506621
|
[
"Apache-2.0"
] | 4
|
2021-04-12T17:49:34.000Z
|
2022-01-23T19:54:29.000Z
|
mason/engines/metastore/models/credentials/__init__.py
|
kyprifog/mason
|
bf45672124ef841bc16216c293034f4ccc506621
|
[
"Apache-2.0"
] | 24
|
2021-04-30T18:40:25.000Z
|
2021-05-12T20:52:06.000Z
|
mason/engines/metastore/models/credentials/__init__.py
|
kyprifog/mason
|
bf45672124ef841bc16216c293034f4ccc506621
|
[
"Apache-2.0"
] | 3
|
2021-04-12T19:40:43.000Z
|
2021-09-07T21:56:36.000Z
|
class MetastoreCredentials:
def __init__(self):
pass
def to_dict(self): return {}
class InvalidCredentials:
def __init__(self, reason: str):
self.reason = reason
def to_dict(self): return {}
| 16.857143
| 36
| 0.622881
| 26
| 236
| 5.269231
| 0.461538
| 0.10219
| 0.160584
| 0.189781
| 0.277372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.283898
| 236
| 13
| 37
| 18.153846
| 0.810651
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.125
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
718457e56167d7666921e966a93c04a280206e61
| 5,283
|
py
|
Python
|
tests/conversation_manager/test_create_statement.py
|
Jack2313/WeChatterBot
|
377899e8cab4ca5eca9b0136207e2afb97d9acb2
|
[
"BSD-3-Clause"
] | 1
|
2020-04-03T02:54:18.000Z
|
2020-04-03T02:54:18.000Z
|
tests/conversation_manager/test_create_statement.py
|
Jack2313/WeChatterBot
|
377899e8cab4ca5eca9b0136207e2afb97d9acb2
|
[
"BSD-3-Clause"
] | 7
|
2020-04-11T13:22:50.000Z
|
2020-05-14T00:19:37.000Z
|
tests/conversation_manager/test_create_statement.py
|
Jack2313/WeChatterBot
|
377899e8cab4ca5eca9b0136207e2afb97d9acb2
|
[
"BSD-3-Clause"
] | 3
|
2020-04-11T12:09:56.000Z
|
2020-12-16T13:26:20.000Z
|
from unittest import TestCase
from app.view.conversation_manager import generate_token
import json
from app import create_app
class CreateStatementTestCase(TestCase):
"""
Unit tests for the Create Statement method.
LJF: all tests clear 2020-5-12
"""
def setUp(self):
self.app = create_app().test_client()
self.myheaders = {'Content-Type': 'application/json'}
self.token = generate_token(b'buaa', 3600)
# super().setUp()
def test_no_attribute(self):
data = {}
r = self.app.post(
'admin/create_statement',
data=json.dumps(data),
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_no_text(self):
data = {
'response': '对话回复',
'username': 'wechatterbot',
'token': self.token
}
r = self.app.post(
'admin/create_statement',
data=json.dumps(data),
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_no_response(self):
data = {
'text': '对话内容',
'username': 'wechatterbot',
'token': self.token
}
r = self.app.post(
'admin/create_statement',
data=json.dumps(data),
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_no_username(self):
data = {
'response': '对话回复',
'token': self.token,
'text': '对话内容'
}
r = self.app.post(
'admin/create_statement',
data=json.dumps(data),
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000001)
self.assertEqual(r.status_code, 400)
def test_wrong_json(self):
data = {
'response': '对话回复',
'text': '对话内容',
'username': 'wechatterbot',
'token': self.token
}
r = self.app.post(
'admin/create_statement',
data=data,
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000041)
self.assertEqual(r.status_code, 400)
def test_token_check_fail(self):
data = {
'response': '对话回复',
'text': '对话内容',
'username': 'wechatterwhat',
'token': self.token
}
r = self.app.post(
'admin/create_statement',
data=json.dumps(data),
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000044)
self.assertEqual(r.status_code, 401)
def test_empty_text(self):
data = {
'response': '对话回复',
'text': '',
'username': 'wechatterbot',
'token': self.token
}
r = self.app.post(
'admin/create_statement',
data=json.dumps(data),
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000045)
self.assertEqual(r.status_code, 400)
def test_empty_response(self):
data = {
'response': '',
'text': '对话内容',
'username': 'wechatterbot',
'token': self.token
}
r = self.app.post(
'admin/create_statement',
data=json.dumps(data),
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
self.assertEqual(result['code'], 10000045)
self.assertEqual(r.status_code, 400)
def test_successful_creation(self):
data = {
'response': '对话回复',
'text': '对话内容',
'username': 'wechatterbot',
'token': self.token
}
r = self.app.post(
'admin/create_statement',
data=json.dumps(data),
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
statement = result['statement']
self.assertEqual(r.status_code, 200)
self.assertEqual(result['code'], 1)
self.assertEqual(statement['text'], "对话内容")
def test_successful_with_tags(self):
data = {
'response': '对话回复',
'text': '对话内容',
'username': 'wechatterbot',
'token': self.token,
'tags': 'test'
}
r = self.app.post(
'admin/create_statement',
data=json.dumps(data),
headers=self.myheaders
)
result = json.loads(r.data.decode('utf-8'))
statement = result['statement']
self.assertEqual(r.status_code, 200)
self.assertEqual(result['code'], 1)
self.assertEqual(statement['text'], "对话内容")
| 30.188571
| 61
| 0.526595
| 541
| 5,283
| 5.053604
| 0.144177
| 0.120702
| 0.029261
| 0.043892
| 0.795538
| 0.767008
| 0.767008
| 0.753841
| 0.740673
| 0.740673
| 0
| 0.03339
| 0.33674
| 5,283
| 174
| 62
| 30.362069
| 0.746861
| 0.017225
| 0
| 0.692308
| 1
| 0
| 0.143465
| 0.042537
| 0
| 0
| 0
| 0
| 0.141026
| 1
| 0.070513
| false
| 0
| 0.025641
| 0
| 0.102564
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
718c22405923f2b023b6cc58157a258263e3a382
| 42
|
py
|
Python
|
scraper/middlewares/selenium/__init__.py
|
otrenav/cfe-rates-scraping
|
9e3c7be2cc166e69b6db7fda3f6db841ff9579ea
|
[
"MIT"
] | null | null | null |
scraper/middlewares/selenium/__init__.py
|
otrenav/cfe-rates-scraping
|
9e3c7be2cc166e69b6db7fda3f6db841ff9579ea
|
[
"MIT"
] | null | null | null |
scraper/middlewares/selenium/__init__.py
|
otrenav/cfe-rates-scraping
|
9e3c7be2cc166e69b6db7fda3f6db841ff9579ea
|
[
"MIT"
] | null | null | null |
from .selenium import SeleniumMiddleware
| 14
| 40
| 0.857143
| 4
| 42
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 2
| 41
| 21
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
719f3b6dff37f4fbd1ccba3133a1424330041a82
| 195
|
py
|
Python
|
_test_projects/basics/file_blueprint.py
|
oren0e/cob
|
f2a5d74a15f5262d7980e4cf1f1a20af29194ffb
|
[
"BSD-3-Clause"
] | 2
|
2019-04-07T20:19:55.000Z
|
2021-05-27T10:23:31.000Z
|
_test_projects/basics/file_blueprint.py
|
oren0e/cob
|
f2a5d74a15f5262d7980e4cf1f1a20af29194ffb
|
[
"BSD-3-Clause"
] | 126
|
2016-08-10T19:59:45.000Z
|
2021-11-26T06:58:16.000Z
|
_test_projects/basics/file_blueprint.py
|
oren0e/cob
|
f2a5d74a15f5262d7980e4cf1f1a20af29194ffb
|
[
"BSD-3-Clause"
] | 6
|
2017-11-16T12:05:47.000Z
|
2021-11-24T09:21:17.000Z
|
# cob: type=blueprint mountpoint=/blueprints/file
from flask import Blueprint
blueprint = Blueprint('file_blueprint', __name__)
@blueprint.route('/test')
def route():
return 'this is file'
| 21.666667
| 49
| 0.748718
| 24
| 195
| 5.875
| 0.666667
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 195
| 8
| 50
| 24.375
| 0.829412
| 0.241026
| 0
| 0
| 0
| 0
| 0.212329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0.6
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
|
0
| 6
|
4619f63f39c7bb94b175da3d7d83adafc5147f72
| 39
|
py
|
Python
|
test/tests/__init__.py
|
HansBug/treevalue
|
6f2f5b2de00b04a06201a87ccee678ade9deff57
|
[
"Apache-2.0"
] | null | null | null |
test/tests/__init__.py
|
HansBug/treevalue
|
6f2f5b2de00b04a06201a87ccee678ade9deff57
|
[
"Apache-2.0"
] | 1
|
2021-07-24T13:30:14.000Z
|
2021-07-24T13:30:14.000Z
|
test/tests/__init__.py
|
HansBug/treevalue
|
6f2f5b2de00b04a06201a87ccee678ade9deff57
|
[
"Apache-2.0"
] | null | null | null |
from .utils import float_eq, eq_extend
| 19.5
| 38
| 0.820513
| 7
| 39
| 4.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 39
| 1
| 39
| 39
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1cb5c302ae15b077421f72e8eda900bedb621fca
| 29
|
py
|
Python
|
agents/__init__.py
|
petros94/monte-carlo-gridworld
|
787a6fb42476e55d7411731fa17a6603333f65a9
|
[
"MIT"
] | null | null | null |
agents/__init__.py
|
petros94/monte-carlo-gridworld
|
787a6fb42476e55d7411731fa17a6603333f65a9
|
[
"MIT"
] | null | null | null |
agents/__init__.py
|
petros94/monte-carlo-gridworld
|
787a6fb42476e55d7411731fa17a6603333f65a9
|
[
"MIT"
] | null | null | null |
from agents.mc_agent import *
| 29
| 29
| 0.827586
| 5
| 29
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1cbc98a5f22158d8e060999c58f279f0eeeb8f9e
| 19
|
py
|
Python
|
htmotor/__init__.py
|
5elenay/htmotor
|
6e39046e979f51670adb85a7f6e736c2d9f7b97f
|
[
"MIT"
] | 5
|
2021-06-15T17:33:13.000Z
|
2021-08-14T21:43:24.000Z
|
htmotor/__init__.py
|
5elenay/htmotor
|
6e39046e979f51670adb85a7f6e736c2d9f7b97f
|
[
"MIT"
] | null | null | null |
htmotor/__init__.py
|
5elenay/htmotor
|
6e39046e979f51670adb85a7f6e736c2d9f7b97f
|
[
"MIT"
] | 1
|
2021-09-20T21:13:01.000Z
|
2021-09-20T21:13:01.000Z
|
from .html import *
| 19
| 19
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1cdaa50ebd69b10aa636d9cec39829c0c21d5ac2
| 3,401
|
py
|
Python
|
mainService/mainService/apps/core_sample/migrations/0008_carbon_layer_disruption_layer_oil_layer_rock_layer.py
|
Godis715/Core-Sample-Analysis
|
892b3d322e9ce86dab0da9754b902b504b2e0d8b
|
[
"Apache-2.0"
] | 2
|
2019-09-18T10:59:21.000Z
|
2019-10-02T16:50:05.000Z
|
mainService/mainService/apps/core_sample/migrations/0008_carbon_layer_disruption_layer_oil_layer_rock_layer.py
|
Godis715/Core-Sample-Analysis
|
892b3d322e9ce86dab0da9754b902b504b2e0d8b
|
[
"Apache-2.0"
] | 78
|
2019-09-20T16:56:18.000Z
|
2022-03-12T00:04:37.000Z
|
mainService/mainService/apps/core_sample/migrations/0008_carbon_layer_disruption_layer_oil_layer_rock_layer.py
|
Godis715/Core-Sample-Analysis
|
892b3d322e9ce86dab0da9754b902b504b2e0d8b
|
[
"Apache-2.0"
] | 1
|
2019-10-03T20:49:34.000Z
|
2019-10-03T20:49:34.000Z
|
# Generated by Django 2.2.6 on 2019-10-22 18:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core_sample', '0007_remove_markup_version'),
]
operations = [
migrations.CreateModel(
name='Rock_layer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('top', models.FloatField(verbose_name='Вверх')),
('bottom', models.FloatField(verbose_name='Низ')),
('class_label', models.IntegerField(choices=[(1, 'siltstone'), (2, 'sandstone'), (3, 'clay')], verbose_name='Класс')),
('markup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core_sample.Markup', verbose_name='Разметка')),
],
options={
'verbose_name': 'Слой породы',
'verbose_name_plural': 'Слои породы',
},
),
migrations.CreateModel(
name='Oil_layer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('top', models.FloatField(verbose_name='Вверх')),
('bottom', models.FloatField(verbose_name='Низ')),
('class_label', models.IntegerField(choices=[(1, 'notDefined'), (2, 'low'), (3, 'high')], verbose_name='Класс')),
('markup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core_sample.Markup', verbose_name='Разметка')),
],
options={
'verbose_name': 'Слой нефтенасыщенности',
'verbose_name_plural': 'Слои нефтенасыщенности',
},
),
migrations.CreateModel(
name='Disruption_layer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('top', models.FloatField(verbose_name='Вверх')),
('bottom', models.FloatField(verbose_name='Низ')),
('class_label', models.IntegerField(choices=[(1, 'none'), (2, 'low'), (3, 'high')], verbose_name='Класс')),
('markup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core_sample.Markup', verbose_name='Разметка')),
],
options={
'verbose_name': 'Слой разрушенности',
'verbose_name_plural': 'Слои разрушенности',
},
),
migrations.CreateModel(
name='Carbon_layer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('top', models.FloatField(verbose_name='Вверх')),
('bottom', models.FloatField(verbose_name='Низ')),
('class_label', models.IntegerField(choices=[(1, 'notDefined'), (2, 'low'), (3, 'high')], verbose_name='Класс')),
('markup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core_sample.Markup', verbose_name='Разметка')),
],
options={
'verbose_name': 'Слой карбонатности',
'verbose_name_plural': 'Слои карбонатности',
},
),
]
| 47.901408
| 141
| 0.565716
| 327
| 3,401
| 5.700306
| 0.232416
| 0.165236
| 0.098712
| 0.11588
| 0.722639
| 0.722639
| 0.722639
| 0.722639
| 0.722639
| 0.722639
| 0
| 0.012617
| 0.277565
| 3,401
| 70
| 142
| 48.585714
| 0.746032
| 0.013231
| 0
| 0.59375
| 1
| 0
| 0.205426
| 0.007752
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.078125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1cf44b5b5dd37838b7e092bb842aab84879b162f
| 3,267
|
py
|
Python
|
util/svg_dict.py
|
widyaageng/Sudoku_auto
|
94b612fd3266cdd42d20973e98a89f90d664d57c
|
[
"BSD-2-Clause"
] | null | null | null |
util/svg_dict.py
|
widyaageng/Sudoku_auto
|
94b612fd3266cdd42d20973e98a89f90d664d57c
|
[
"BSD-2-Clause"
] | null | null | null |
util/svg_dict.py
|
widyaageng/Sudoku_auto
|
94b612fd3266cdd42d20973e98a89f90d664d57c
|
[
"BSD-2-Clause"
] | null | null | null |
# Map for SVG graphics from HTML
num_class = ['M8.954 30V3.545h-.267c-.738.41-6.706 4.655-7.71 5.311V5.883c.635-.41 6.767-4.758 7.977-5.476h2.789V30h-2.79z',
'M.12 9.57C.16 4.462 4.057.791 9.41.791c5.209 0 9.187 3.568 9.187 8.224 0 3.076-1.415 5.475-6.275 10.664l-7.998 8.53v.247h15.012V31H.284v-1.969L10.62 17.854c4.122-4.43 5.168-6.193 5.168-8.736 0-3.302-2.81-5.865-6.44-5.865-3.814 0-6.46 2.584-6.5 6.316v.02H.12v-.02z',
'M6.698 16.932v-2.42h3.466c3.814 0 6.46-2.338 6.46-5.722 0-3.22-2.646-5.537-6.317-5.537-3.67 0-6.255 2.174-6.542 5.537H1.038C1.366 3.95 5.037.792 10.41.792c5.045 0 9.064 3.404 9.064 7.67 0 3.568-2.05 6.173-5.496 6.932v.266c4.225.472 6.85 3.281 6.85 7.342 0 4.86-4.491 8.613-10.295 8.613-5.722 0-9.926-3.404-10.11-8.182H3.13c.246 3.322 3.322 5.721 7.382 5.721 4.286 0 7.424-2.645 7.424-6.214 0-3.711-2.912-6.008-7.65-6.008H6.699z',
'M15.855 30v-6.686H.987v-2.563C3.633 16.281 7.283 10.6 14.563.366h4.02v20.426h4.43v2.522h-4.43V30h-2.728zM3.92 20.628v.184h11.935V3.052h-.184C10.03 10.744 7.099 15.338 3.92 20.629z',
'M10.553 30.615c-5.373 0-9.474-3.445-9.782-8.264H3.52c.308 3.322 3.322 5.783 7.055 5.783 4.327 0 7.424-3.097 7.424-7.445 0-4.347-3.097-7.444-7.363-7.444-2.912 0-5.496 1.415-6.747 3.692H1.222l1.6-16.53h16.14V2.93H5.037l-.985 10.787h.267c1.415-1.846 3.876-2.912 6.768-2.912 5.68 0 9.72 4.08 9.72 9.802 0 5.866-4.245 10.008-10.254 10.008z',
'M10.964 31.595c-4 0-7.158-1.99-9.003-5.64C.648 23.638-.01 20.582-.01 16.83-.008 6.76 4.135.792 11.17.792c4.901 0 8.613 2.953 9.454 7.567h-2.871c-.739-3.076-3.323-5.045-6.624-5.045-5.312 0-8.347 4.963-8.409 13.74h.246c1.292-3.322 4.553-5.454 8.43-5.454 5.618 0 9.76 4.183 9.76 9.843 0 5.886-4.285 10.152-10.191 10.152zm-.041-2.482c4.204 0 7.403-3.281 7.403-7.567 0-4.368-3.097-7.506-7.383-7.506-4.225 0-7.485 3.158-7.485 7.3 0 4.41 3.24 7.773 7.465 7.773z',
'M3.017 30L16.696 3.155V2.93H.29V.407h19.277v2.625L6.01 30z',
'M10.533 31.615c-6.193 0-10.48-3.527-10.48-8.593 0-3.834 2.584-6.87 6.46-7.567v-.246c-3.22-.759-5.311-3.343-5.311-6.583 0-4.573 3.876-7.834 9.33-7.834 5.456 0 9.332 3.24 9.332 7.834 0 3.22-2.071 5.804-5.291 6.583v.246c3.855.697 6.46 3.732 6.46 7.567 0 5.086-4.286 8.593-10.5 8.593zm0-2.42c4.532 0 7.67-2.604 7.67-6.357 0-3.671-3.117-6.173-7.67-6.173-4.532 0-7.65 2.523-7.65 6.173 0 3.753 3.118 6.357 7.65 6.357zm0-14.95c3.896 0 6.562-2.174 6.562-5.393 0-3.343-2.666-5.64-6.562-5.64-3.897 0-6.563 2.297-6.563 5.64 0 3.199 2.666 5.393 6.563 5.393z',
'M10.897 31.595c-4.983 0-8.613-2.974-9.454-7.547h2.871c.718 3.015 3.22 5.045 6.624 5.045 5.23 0 8.203-4.779 8.408-13.064.02-.205-.102-.471-.123-.676H19.1c-1.271 3.26-4.552 5.434-8.428 5.434-5.66 0-9.762-4.163-9.762-9.803C.91 5.1 5.175.792 11.102.792c4 0 7.157 2.01 9.003 5.68 1.313 2.298 1.969 5.333 1.969 9.106 0 10.028-4.102 16.017-11.177 16.017zm.226-13.248c4.245 0 7.485-3.2 7.485-7.28 0-4.39-3.22-7.794-7.465-7.794-4.224 0-7.403 3.302-7.403 7.63 0 4.285 3.035 7.444 7.383 7.444z']
idx_list = [i + 1 for i in range(9)]
num_to_svg = dict(zip(idx_list, num_class))
svg_to_num = dict(zip(num_class, idx_list))
def get_num_to_svg(num):
return num_to_svg[num]
def get_svg_to_num(svg):
return svg_to_num[svg]
| 155.571429
| 560
| 0.657484
| 951
| 3,267
| 2.237645
| 0.384858
| 0.010338
| 0.005639
| 0.006579
| 0.028195
| 0.012218
| 0.012218
| 0
| 0
| 0
| 0
| 0.639539
| 0.123661
| 3,267
| 21
| 561
| 155.571429
| 0.103737
| 0.009183
| 0
| 0
| 0
| 0.5625
| 0.880408
| 0.322312
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0.125
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
1cf6c94250898b4e09aa80a4bc438cfbc8755fd5
| 1,205
|
py
|
Python
|
CCV/scripts/img_resize.py
|
YCJGG/Partial-video-retrieval
|
65eec9c87cd18e70103c42918c49e0552ec6cc21
|
[
"MIT"
] | 2
|
2018-09-08T11:54:10.000Z
|
2018-10-09T13:48:09.000Z
|
CCV/scripts/img_resize.py
|
YCJGG/Partial-video-retrieval
|
65eec9c87cd18e70103c42918c49e0552ec6cc21
|
[
"MIT"
] | null | null | null |
CCV/scripts/img_resize.py
|
YCJGG/Partial-video-retrieval
|
65eec9c87cd18e70103c42918c49e0552ec6cc21
|
[
"MIT"
] | null | null | null |
from scipy import misc
import multiprocessing as mp
import glob
import os
frame_root = '../test_frames'
folder_list = glob.glob(frame_root+'/*')
def fun(folder):
print folder
img_list = glob.glob(folder+'/*.jpg')
for img_name in img_list:
img = misc.imread(img_name)
if img.shape[1]>img.shape[0]:
if img.shape[1] == 112:
continue
scale = float(112/float(img.shape[0]))
img = misc.imresize(img,(int(img.shape[0] * scale + 1), 112))
else:
if img.shape[0] == 112:
continue
scale = float(112/float(img.shape[1]))
img = misc.imresize(img,(112, int(img.shape[1] * scale + 1)))
misc.imsave(img_name, img)
"""
for folder in folder_list:
print folder
img_list = glob.glob(folder+'/*.jpg')
for img_name in img_list:
img = misc.imread(img_name)
if img.shape[1]>img.shape[0]:
if img.shape[1] == 112:
continue
scale = float(112/float(img.shape[0]))
img = misc.imresize(img,(int(img.shape[0] * scale + 1), 112))
else:
if img.shape[0] == 112:
continue
scale = float(112/float(img.shape[1]))
img = misc.imresize(img,(112, int(img.shape[1] * scale + 1)))
misc.imsave(img_name, img)
"""
pool = mp.Pool(processes=15)
pool.map(fun, folder_list)
| 26.195652
| 64
| 0.652282
| 200
| 1,205
| 3.85
| 0.2
| 0.166234
| 0.093506
| 0.057143
| 0.763636
| 0.763636
| 0.763636
| 0.763636
| 0.763636
| 0.763636
| 0
| 0.058233
| 0.173444
| 1,205
| 45
| 65
| 26.777778
| 0.714859
| 0
| 0
| 0.083333
| 0
| 0
| 0.031519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.166667
| null | null | 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1cf8123070639be0b35afd4e181333d79a9527a4
| 456
|
py
|
Python
|
Script/deprecated/FishEditor/AssetImporter.py
|
yushroom/FishEngine_-Experiment
|
81e4c06f20f6b94dc561b358f8a11a092678aeeb
|
[
"MIT"
] | 1
|
2018-12-20T02:38:44.000Z
|
2018-12-20T02:38:44.000Z
|
Script/deprecated/FishEditor/AssetImporter.py
|
yushroom/FishEngine_-Experiment
|
81e4c06f20f6b94dc561b358f8a11a092678aeeb
|
[
"MIT"
] | null | null | null |
Script/deprecated/FishEditor/AssetImporter.py
|
yushroom/FishEngine_-Experiment
|
81e4c06f20f6b94dc561b358f8a11a092678aeeb
|
[
"MIT"
] | 1
|
2018-10-25T19:40:22.000Z
|
2018-10-25T19:40:22.000Z
|
class AssetImporter:
def __init__(self):
pass
@staticmethod
def Create(path:str)->'AssetImporter':
raise NotImplementedError
@property
def assetPath(self)->str:
raise NotImplementedError
def SaveAndReimport(self):
from . import AssetDataBase
AssetDataBase.ImportAsset(self.assetPath)
@staticmethod
def GetAtPath(path:str)->'AssetImporter':
raise NotImplementedError
| 24
| 49
| 0.664474
| 39
| 456
| 7.666667
| 0.512821
| 0.240803
| 0.133779
| 0.167224
| 0.294314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.256579
| 456
| 19
| 50
| 24
| 0.882006
| 0
| 0
| 0.333333
| 0
| 0
| 0.056893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.066667
| 0.4
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1c27922d6b049cac7c323d3fd180bf4d862bd474
| 307
|
py
|
Python
|
skfem/element/element_tri/__init__.py
|
carlosal1015/scikit-fem
|
1e73a417e9b43fe0a36e29807792c41fa289b77d
|
[
"BSD-3-Clause"
] | null | null | null |
skfem/element/element_tri/__init__.py
|
carlosal1015/scikit-fem
|
1e73a417e9b43fe0a36e29807792c41fa289b77d
|
[
"BSD-3-Clause"
] | null | null | null |
skfem/element/element_tri/__init__.py
|
carlosal1015/scikit-fem
|
1e73a417e9b43fe0a36e29807792c41fa289b77d
|
[
"BSD-3-Clause"
] | null | null | null |
from .element_tri_p1 import ElementTriP1
from .element_tri_p2 import ElementTriP2
from .element_tri_dg import ElementTriDG
from .element_tri_p0 import ElementTriP0
from .element_tri_rt0 import ElementTriRT0
from .element_tri_morley import ElementTriMorley
from .element_tri_argyris import ElementTriArgyris
| 38.375
| 50
| 0.885993
| 42
| 307
| 6.142857
| 0.428571
| 0.29845
| 0.379845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028674
| 0.091205
| 307
| 7
| 51
| 43.857143
| 0.896057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1c28c1f144372b74db75af034c1a5c109cdc8cea
| 92,810
|
py
|
Python
|
pennylane/ops/qubit/parametric_ops.py
|
MoritzWillmann/pennylane
|
2b07d22cfcc6406ba28e5c647062340b240a4ee5
|
[
"Apache-2.0"
] | null | null | null |
pennylane/ops/qubit/parametric_ops.py
|
MoritzWillmann/pennylane
|
2b07d22cfcc6406ba28e5c647062340b240a4ee5
|
[
"Apache-2.0"
] | null | null | null |
pennylane/ops/qubit/parametric_ops.py
|
MoritzWillmann/pennylane
|
2b07d22cfcc6406ba28e5c647062340b240a4ee5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments
"""
This submodule contains the discrete-variable quantum operations that are the
core parameterized gates.
"""
# pylint:disable=abstract-method,arguments-differ,protected-access,invalid-overridden-method
import functools
import math
from operator import matmul
import numpy as np
import pennylane as qml
from pennylane.operation import AnyWires, Operation
from pennylane.ops.qubit.non_parametric_ops import PauliX, PauliY, PauliZ, Hadamard
from pennylane.operation import expand_matrix
from pennylane.utils import pauli_eigs
from pennylane.wires import Wires
INV_SQRT2 = 1 / math.sqrt(2)
stack_last = functools.partial(qml.math.stack, axis=-1)
class RX(Operation):
r"""
The single qubit X rotation
.. math:: R_x(\phi) = e^{-i\phi\sigma_x/2} = \begin{bmatrix}
\cos(\phi/2) & -i\sin(\phi/2) \\
-i\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_x(\phi)) = \frac{1}{2}\left[f(R_x(\phi+\pi/2)) - f(R_x(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_x(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "X"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliX(wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.RX.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.RX.compute_matrix(torch.tensor(0.5))
tensor([[0.9689+0.0000j, 0.0000-0.2474j],
[0.0000-0.2474j, 0.9689+0.0000j]])
"""
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
if qml.math.get_interface(theta) == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
js = -1j * s
return qml.math.stack([stack_last([c, js]), stack_last([js, c])], axis=-2)
def adjoint(self):
return RX(-self.data[0], wires=self.wires)
def pow(self, z):
return [RX(self.data[0] * z, wires=self.wires)]
def _controlled(self, wire):
CRX(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RX(\theta) = RZ(-\pi/2) RY(\theta) RZ(\pi/2)
pi_half = qml.math.ones_like(self.data[0]) * (np.pi / 2)
return [pi_half, self.data[0], -pi_half]
class RY(Operation):
r"""
The single qubit Y rotation
.. math:: R_y(\phi) = e^{-i\phi\sigma_y/2} = \begin{bmatrix}
\cos(\phi/2) & -\sin(\phi/2) \\
\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_y(\phi)) = \frac{1}{2}\left[f(R_y(\phi+\pi/2)) - f(R_y(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_y(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Y"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliY(wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.RY.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.RY.compute_matrix(torch.tensor(0.5))
tensor([[ 0.9689, -0.2474],
[ 0.2474, 0.9689]])
"""
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
if qml.math.get_interface(theta) == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
s = (1 + 0j) * s
return qml.math.stack([stack_last([c, -s]), stack_last([s, c])], axis=-2)
def adjoint(self):
return RY(-self.data[0], wires=self.wires)
def pow(self, z):
return [RY(self.data[0] * z, wires=self.wires)]
def _controlled(self, wire):
CRY(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RY(\theta) = RZ(0) RY(\theta) RZ(0)
return [0.0, self.data[0], 0.0]
class RZ(Operation):
r"""
The single qubit Z rotation
.. math:: R_z(\phi) = e^{-i\phi\sigma_z/2} = \begin{bmatrix}
e^{-i\phi/2} & 0 \\
0 & e^{i\phi/2}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_z(\phi)) = \frac{1}{2}\left[f(R_z(\phi+\pi/2)) - f(R_z(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_z(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Z"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliZ(wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.RZ.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.RZ.compute_matrix(torch.tensor(0.5))
tensor([[0.9689-0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.9689+0.2474j]])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
p = qml.math.exp(-0.5j * theta)
z = qml.math.zeros_like(p)
return qml.math.stack([stack_last([p, z]), stack_last([z, qml.math.conj(p)])], axis=-2)
@staticmethod
def compute_eigvals(theta): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.RZ.eigvals`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.RZ.compute_eigvals(torch.tensor(0.5))
tensor([0.9689-0.2474j, 0.9689+0.2474j])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
p = qml.math.exp(-0.5j * theta)
return stack_last([p, qml.math.conj(p)])
def adjoint(self):
return RZ(-self.data[0], wires=self.wires)
def pow(self, z):
return [RZ(self.data[0] * z, wires=self.wires)]
def _controlled(self, wire):
CRZ(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RZ(\theta) = RZ(\theta) RY(0) RZ(0)
return [self.data[0], 0.0, 0.0]
class PhaseShift(Operation):
r"""
Arbitrary single qubit local phase shift
.. math:: R_\phi(\phi) = e^{i\phi/2}R_z(\phi) = \begin{bmatrix}
1 & 0 \\
0 & e^{i\phi}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_\phi(\phi)) = \frac{1}{2}\left[f(R_\phi(\phi+\pi/2)) - f(R_\phi(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_{\phi}(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Z"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return qml.Projector(np.array([1]), wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "Rϕ", cache=cache)
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.PhaseShift.matrix`
Args:
phi (tensor_like or float): phase shift
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.PhaseShift.compute_matrix(torch.tensor(0.5))
tensor([[0.9689-0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.9689+0.2474j]])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
p = qml.math.exp(1j * phi)
z = qml.math.zeros_like(p)
return qml.math.stack([stack_last([qml.math.ones_like(p), z]), stack_last([z, p])], axis=-2)
@staticmethod
def compute_eigvals(phi): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.PhaseShift.eigvals`
Args:
phi (tensor_like or float): phase shift
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.PhaseShift.compute_eigvals(torch.tensor(0.5))
tensor([1.0000+0.0000j, 0.8776+0.4794j])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
p = qml.math.exp(1j * phi)
return stack_last([qml.math.ones_like(p), p])
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.PhaseShift.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Any, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.PhaseShift.compute_decomposition(1.234, wires=0)
[RZ(1.234, wires=[0])]
"""
return [RZ(phi, wires=wires)]
def adjoint(self):
return PhaseShift(-self.data[0], wires=self.wires)
def pow(self, z):
return [PhaseShift(self.data[0] * z, wires=self.wires)]
def _controlled(self, wire):
ControlledPhaseShift(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# PhaseShift(\theta) = RZ(\theta) RY(0) RZ(0)
return [self.data[0], 0.0, 0.0]
class ControlledPhaseShift(Operation):
r"""
A qubit controlled phase shift.
.. math:: CR_\phi(\phi) = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & e^{i\phi}
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(CR_\phi(\phi)) = \frac{1}{2}\left[f(CR_\phi(\phi+\pi/2)) - f(CR_\phi(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`CR_{\phi}(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Z"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return qml.Projector(np.array([1, 1]), wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "Rϕ", cache=cache)
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.PhaseShift.matrix`
Args:
phi (tensor_like or float): phase shift
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.PhaseShift.compute_matrix(torch.tensor(0.5))
tensor([[1.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0000+0.0000j],
[0.0+0.0j, 1.0+0.0j, 0.0+0.0j, 0.0000+0.0000j],
[0.0+0.0j, 0.0+0.0j, 1.0+0.0j, 0.0000+0.0000j],
[0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.8776+0.4794j]])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
exp_part = qml.math.exp(1j * phi)
if qml.math.ndim(phi) > 0:
ones = qml.math.ones_like(exp_part)
zeros = qml.math.zeros_like(exp_part)
matrix = [
[ones, zeros, zeros, zeros],
[zeros, ones, zeros, zeros],
[zeros, zeros, ones, zeros],
[zeros, zeros, zeros, exp_part],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
return qml.math.diag([1, 1, 1, exp_part])
@staticmethod
def compute_eigvals(phi): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.ControlledPhaseShift.eigvals`
Args:
phi (tensor_like or float): phase shift
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.ControlledPhaseShift.compute_eigvals(torch.tensor(0.5))
tensor([1.0000+0.0000j, 1.0000+0.0000j, 1.0000+0.0000j, 0.8776+0.4794j])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
exp_part = qml.math.exp(1j * phi)
ones = qml.math.ones_like(exp_part)
return stack_last([ones, ones, ones, exp_part])
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.ControlledPhaseShift.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.ControlledPhaseShift.compute_decomposition(1.234, wires=(0,1))
[PhaseShift(0.617, wires=[0]),
CNOT(wires=[0, 1]),
PhaseShift(-0.617, wires=[1]),
CNOT(wires=[0, 1]),
PhaseShift(0.617, wires=[1])]
"""
decomp_ops = [
qml.PhaseShift(phi / 2, wires=wires[0]),
qml.CNOT(wires=wires),
qml.PhaseShift(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
qml.PhaseShift(phi / 2, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
return ControlledPhaseShift(-self.data[0], wires=self.wires)
def pow(self, z):
return [ControlledPhaseShift(self.data[0] * z, wires=self.wires)]
@property
def control_wires(self):
return Wires(self.wires[0])
CPhase = ControlledPhaseShift
class Rot(Operation):
r"""
Arbitrary single qubit rotation
.. math::
R(\phi,\theta,\omega) = RZ(\omega)RY(\theta)RZ(\phi)= \begin{bmatrix}
e^{-i(\phi+\omega)/2}\cos(\theta/2) & -e^{i(\phi-\omega)/2}\sin(\theta/2) \\
e^{-i(\phi-\omega)/2}\sin(\theta/2) & e^{i(\phi+\omega)/2}\cos(\theta/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 3
* Number of dimensions per parameter: (0, 0, 0)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R(\phi, \theta, \omega)) = \frac{1}{2}\left[f(R(\phi+\pi/2, \theta, \omega)) - f(R(\phi-\pi/2, \theta, \omega))\right]`
where :math:`f` is an expectation value depending on :math:`R(\phi, \theta, \omega)`.
This gradient recipe applies for each angle argument :math:`\{\phi, \theta, \omega\}`.
.. note::
If the ``Rot`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.RZ` and :class:`~.RY` gates.
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Any, Wires): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 3
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0, 0, 0)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,), (1,), (1,)]
def __init__(self, phi, theta, omega, wires, do_queue=True, id=None):
super().__init__(phi, theta, omega, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(phi, theta, omega): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.Rot.matrix`
Args:
phi (tensor_like or float): first rotation angle
theta (tensor_like or float): second rotation angle
omega (tensor_like or float): third rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.Rot.compute_matrix(torch.tensor(0.1), torch.tensor(0.2), torch.tensor(0.3))
tensor([[ 0.9752-0.1977j, -0.0993+0.0100j],
[ 0.0993+0.0100j, 0.9752+0.1977j]])
"""
# It might be that they are in different interfaces, e.g.,
# Rot(0.2, 0.3, tf.Variable(0.5), wires=0)
# So we need to make sure the matrix comes out having the right type
interface = qml.math._multi_dispatch([phi, theta, omega])
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
# If anything is not tensorflow, it has to be casted and then
if interface == "tensorflow":
phi = qml.math.cast_like(qml.math.asarray(phi, like=interface), 1j)
omega = qml.math.cast_like(qml.math.asarray(omega, like=interface), 1j)
c = qml.math.cast_like(qml.math.asarray(c, like=interface), 1j)
s = qml.math.cast_like(qml.math.asarray(s, like=interface), 1j)
# The following variable is used to assert the all terms to be stacked have same shape
one = qml.math.ones_like(phi) * qml.math.ones_like(omega)
c = c * one
s = s * one
mat = [
[
qml.math.exp(-0.5j * (phi + omega)) * c,
-qml.math.exp(0.5j * (phi - omega)) * s,
],
[
qml.math.exp(-0.5j * (phi - omega)) * s,
qml.math.exp(0.5j * (phi + omega)) * c,
],
]
return qml.math.stack([stack_last(row) for row in mat], axis=-2)
@staticmethod
def compute_decomposition(phi, theta, omega, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.Rot.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Any, Wires): the wire the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.Rot.compute_decomposition(1.2, 2.3, 3.4, wires=0)
[RZ(1.2, wires=[0]), RY(2.3, wires=[0]), RZ(3.4, wires=[0])]
"""
decomp_ops = [
RZ(phi, wires=wires),
RY(theta, wires=wires),
RZ(omega, wires=wires),
]
return decomp_ops
def adjoint(self):
phi, theta, omega = self.parameters
return Rot(-omega, -theta, -phi, wires=self.wires)
def _controlled(self, wire):
CRot(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
return self.data
class MultiRZ(Operation):
r"""
Arbitrary multi Z rotation.
.. math::
MultiRZ(\theta) = \exp(-i \frac{\theta}{2} Z^{\otimes n})
**Details:**
* Number of wires: Any
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\theta}f(MultiRZ(\theta)) = \frac{1}{2}\left[f(MultiRZ(\theta +\pi/2)) - f(MultiRZ(\theta-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`MultiRZ(\theta)`.
.. note::
If the ``MultiRZ`` gate is not supported on the targeted device, PennyLane
will decompose the gate using :class:`~.RZ` and :class:`~.CNOT` gates.
Args:
theta (tensor_like or float): rotation angle :math:`\theta`
wires (Sequence[int] or int): the wires the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = AnyWires
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def __init__(self, theta, wires=None, do_queue=True, id=None):
wires = Wires(wires)
self.hyperparameters["num_wires"] = len(wires)
super().__init__(theta, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta, num_wires): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.MultiRZ.matrix`
Args:
theta (tensor_like or float): rotation angle
num_wires (int): number of wires the rotation acts on
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.MultiRZ.compute_matrix(torch.tensor(0.1), 2)
tensor([[0.9988-0.0500j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.9988+0.0500j, 0.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000+0.0000j, 0.9988+0.0500j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j, 0.9988-0.0500j]])
"""
eigs = qml.math.convert_like(pauli_eigs(num_wires), theta)
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
eigs = qml.math.cast_like(eigs, 1j)
if qml.math.ndim(theta) > 0:
eigvals = [qml.math.exp(-0.5j * t * eigs) for t in theta]
return qml.math.stack([qml.math.diag(eig) for eig in eigvals])
eigvals = qml.math.exp(-0.5j * theta * eigs)
return qml.math.diag(eigvals)
def generator(self):
return -0.5 * functools.reduce(matmul, [qml.PauliZ(w) for w in self.wires])
@staticmethod
def compute_eigvals(theta, num_wires): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.MultiRZ.eigvals`
Args:
theta (tensor_like or float): rotation angle
num_wires (int): number of wires the rotation acts on
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.MultiRZ.compute_eigvals(torch.tensor(0.5), 3)
tensor([0.9689-0.2474j, 0.9689+0.2474j, 0.9689+0.2474j, 0.9689-0.2474j,
0.9689+0.2474j, 0.9689-0.2474j, 0.9689-0.2474j, 0.9689+0.2474j])
"""
eigs = qml.math.convert_like(pauli_eigs(num_wires), theta)
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
eigs = qml.math.cast_like(eigs, 1j)
if qml.math.ndim(theta) > 0:
return qml.math.exp(qml.math.tensordot(-0.5j * theta, eigs, axes=0))
return qml.math.exp(-0.5j * theta * eigs)
@staticmethod
def compute_decomposition(
theta, wires, **kwargs
): # pylint: disable=arguments-differ,unused-argument
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.MultiRZ.decomposition`.
Args:
theta (float): rotation angle :math:`\theta`
wires (Iterable, Wires): the wires the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.MultiRZ.compute_decomposition(1.2, wires=(0,1))
[CNOT(wires=[1, 0]), RZ(1.2, wires=[0]), CNOT(wires=[1, 0])]
"""
ops = [qml.CNOT(wires=(w0, w1)) for w0, w1 in zip(wires[~0:0:-1], wires[~1::-1])]
ops.append(RZ(theta, wires=wires[0]))
ops += [qml.CNOT(wires=(w0, w1)) for w0, w1 in zip(wires[1:], wires[:~0])]
return ops
def adjoint(self):
return MultiRZ(-self.parameters[0], wires=self.wires)
def pow(self, z):
return [MultiRZ(self.data[0] * z, wires=self.wires)]
class PauliRot(Operation):
r"""
Arbitrary Pauli word rotation.
.. math::
RP(\theta, P) = \exp(-i \frac{\theta}{2} P)
**Details:**
* Number of wires: Any
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\theta}f(RP(\theta)) = \frac{1}{2}\left[f(RP(\theta +\pi/2)) - f(RP(\theta-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`RP(\theta)`.
.. note::
If the ``PauliRot`` gate is not supported on the targeted device, PennyLane
will decompose the gate using :class:`~.RX`, :class:`~.Hadamard`, :class:`~.RZ`
and :class:`~.CNOT` gates.
Args:
theta (float): rotation angle :math:`\theta`
pauli_word (string): the Pauli word defining the rotation
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
**Example**
>>> dev = qml.device('default.qubit', wires=1)
>>> @qml.qnode(dev)
... def example_circuit():
... qml.PauliRot(0.5, 'X', wires=0)
... return qml.expval(qml.PauliZ(0))
>>> print(example_circuit())
0.8775825618903724
"""
num_wires = AnyWires
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
do_check_domain = False
grad_method = "A"
parameter_frequencies = [(1,)]
_ALLOWED_CHARACTERS = "IXYZ"
_PAULI_CONJUGATION_MATRICES = {
"X": Hadamard.compute_matrix(),
"Y": RX.compute_matrix(np.pi / 2),
"Z": np.array([[1, 0], [0, 1]]),
}
def __init__(self, theta, pauli_word, wires=None, do_queue=True, id=None):
super().__init__(theta, wires=wires, do_queue=do_queue, id=id)
self.hyperparameters["pauli_word"] = pauli_word
if not PauliRot._check_pauli_word(pauli_word):
raise ValueError(
f'The given Pauli word "{pauli_word}" contains characters that are not allowed.'
" Allowed characters are I, X, Y and Z"
)
num_wires = 1 if isinstance(wires, int) else len(wires)
if not len(pauli_word) == num_wires:
raise ValueError(
f"The given Pauli word has length {len(pauli_word)}, length "
f"{num_wires} was expected for wires {wires}"
)
def label(self, decimals=None, base_label=None, cache=None):
r"""A customizable string representation of the operator.
Args:
decimals=None (int): If ``None``, no parameters are included. Else,
specifies how to round the parameters.
base_label=None (str): overwrite the non-parameter component of the label
cache=None (dict): dictionary that caries information between label calls
in the same drawing
Returns:
str: label to use in drawings
**Example:**
>>> op = qml.PauliRot(0.1, "XYY", wires=(0,1,2))
>>> op.label()
'RXYY'
>>> op.label(decimals=2)
'RXYY\n(0.10)'
>>> op.label(base_label="PauliRot")
'PauliRot\n(0.10)'
"""
pauli_word = self.hyperparameters["pauli_word"]
op_label = base_label or ("R" + pauli_word)
if self.inverse:
op_label += "⁻¹"
# TODO[dwierichs]: Implement a proper label for parameter-broadcasted operators
if decimals is not None and self.batch_size is None:
param_string = f"\n({qml.math.asarray(self.parameters[0]):.{decimals}f})"
op_label += param_string
return op_label
@staticmethod
def _check_pauli_word(pauli_word):
"""Check that the given Pauli word has correct structure.
Args:
pauli_word (str): Pauli word to be checked
Returns:
bool: Whether the Pauli word has correct structure.
"""
return all(pauli in PauliRot._ALLOWED_CHARACTERS for pauli in set(pauli_word))
@staticmethod
def compute_matrix(theta, pauli_word): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.PauliRot.matrix`
Args:
theta (tensor_like or float): rotation angle
pauli_word (str): string representation of Pauli word
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.PauliRot.compute_matrix(0.5, 'X')
[[9.6891e-01+4.9796e-18j 2.7357e-17-2.4740e-01j]
[2.7357e-17-2.4740e-01j 9.6891e-01+4.9796e-18j]]
"""
if not PauliRot._check_pauli_word(pauli_word):
raise ValueError(
f'The given Pauli word "{pauli_word}" contains characters that are not allowed.'
" Allowed characters are I, X, Y and Z"
)
interface = qml.math.get_interface(theta)
if interface == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
# Simplest case is if the Pauli is the identity matrix
if set(pauli_word) == {"I"}:
exp = qml.math.exp(-0.5j * theta)
iden = qml.math.eye(2 ** len(pauli_word), like=theta)
if qml.math.get_interface(theta) == "tensorflow":
iden = qml.math.cast_like(iden, 1j)
if qml.math.ndim(theta) == 0:
return exp * iden
return qml.math.stack([e * iden for e in exp])
# We first generate the matrix excluding the identity parts and expand it afterwards.
# To this end, we have to store on which wires the non-identity parts act
non_identity_wires, non_identity_gates = zip(
*[(wire, gate) for wire, gate in enumerate(pauli_word) if gate != "I"]
)
multi_Z_rot_matrix = MultiRZ.compute_matrix(theta, len(non_identity_gates))
# now we conjugate with Hadamard and RX to create the Pauli string
conjugation_matrix = functools.reduce(
qml.math.kron,
[PauliRot._PAULI_CONJUGATION_MATRICES[gate] for gate in non_identity_gates],
)
if interface == "tensorflow":
conjugation_matrix = qml.math.cast_like(conjugation_matrix, 1j)
# Note: we use einsum with reverse arguments here because it is not multi-dispatched
# and the tensordot containing multi_Z_rot_matrix should decide about the interface
return expand_matrix(
qml.math.einsum(
"...jk,ij->...ik",
qml.math.tensordot(multi_Z_rot_matrix, conjugation_matrix, axes=[[-1], [0]]),
qml.math.conj(conjugation_matrix),
),
non_identity_wires,
list(range(len(pauli_word))),
)
def generator(self):
pauli_word = self.hyperparameters["pauli_word"]
wire_map = {w: i for i, w in enumerate(self.wires)}
return -0.5 * qml.grouping.string_to_pauli_word(pauli_word, wire_map=wire_map)
@staticmethod
def compute_eigvals(theta, pauli_word): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.PauliRot.eigvals`
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.PauliRot.compute_eigvals(torch.tensor(0.5), "X")
tensor([0.9689-0.2474j, 0.9689+0.2474j])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
# Identity must be treated specially because its eigenvalues are all the same
if set(pauli_word) == {"I"}:
exp = qml.math.exp(-0.5j * theta)
ones = qml.math.ones(2 ** len(pauli_word), like=theta)
if qml.math.get_interface(theta) == "tensorflow":
ones = qml.math.cast_like(ones, 1j)
if qml.math.ndim(theta) == 0:
return exp * ones
return qml.math.tensordot(exp, ones, axes=0)
return MultiRZ.compute_eigvals(theta, len(pauli_word))
@staticmethod
def compute_decomposition(theta, wires, pauli_word):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.PauliRot.decomposition`.
Args:
theta (float): rotation angle :math:`\theta`
pauli_word (string): the Pauli word defining the rotation
wires (Iterable, Wires): the wires the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.PauliRot.compute_decomposition(1.2, "XY", wires=(0,1))
[Hadamard(wires=[0]),
RX(1.5707963267948966, wires=[1]),
MultiRZ(1.2, wires=[0, 1]),
Hadamard(wires=[0]),
RX(-1.5707963267948966, wires=[1])]
"""
if isinstance(wires, int): # Catch cases when the wire is passed as a single int.
wires = [wires]
# Check for identity and do nothing
if set(pauli_word) == {"I"}:
return []
active_wires, active_gates = zip(
*[(wire, gate) for wire, gate in zip(wires, pauli_word) if gate != "I"]
)
ops = []
for wire, gate in zip(active_wires, active_gates):
if gate == "X":
ops.append(Hadamard(wires=[wire]))
elif gate == "Y":
ops.append(RX(np.pi / 2, wires=[wire]))
ops.append(MultiRZ(theta, wires=list(active_wires)))
for wire, gate in zip(active_wires, active_gates):
if gate == "X":
ops.append(Hadamard(wires=[wire]))
elif gate == "Y":
ops.append(RX(-np.pi / 2, wires=[wire]))
return ops
def adjoint(self):
return PauliRot(-self.parameters[0], self.hyperparameters["pauli_word"], wires=self.wires)
def pow(self, z):
return [PauliRot(self.data[0] * z, self.hyperparameters["pauli_word"], wires=self.wires)]
class CRX(Operation):
r"""
The controlled-RX operator
.. math::
\begin{align}
CR_x(\phi) &=
\begin{bmatrix}
& 1 & 0 & 0 & 0 \\
& 0 & 1 & 0 & 0\\
& 0 & 0 & \cos(\phi/2) & -i\sin(\phi/2)\\
& 0 & 0 & -i\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
\end{align}
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: The controlled-RX operator satisfies a four-term parameter-shift rule
(see Appendix F, https://doi.org/10.1088/1367-2630/ac2cb3):
.. math::
\frac{d}{d\phi}f(CR_x(\phi)) = c_+ \left[f(CR_x(\phi+a)) - f(CR_x(\phi-a))\right] - c_- \left[f(CR_x(\phi+b)) - f(CR_x(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_x(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "X"
grad_method = "A"
parameter_frequencies = [(0.5, 1.0)]
def generator(self):
return -0.5 * qml.Projector(np.array([1]), wires=self.wires[0]) @ qml.PauliX(self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "RX", cache=cache)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CRX.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.CRX.compute_matrix(torch.tensor(0.5))
tensor([[1.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 1.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 0.0+0.0j, 0.9689+0.0j, 0.0-0.2474j],
[0.0+0.0j, 0.0+0.0j, 0.0-0.2474j, 0.9689+0.0j]])
"""
interface = qml.math.get_interface(theta)
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
if interface == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
js = -1j * s
ones = qml.math.ones_like(js)
zeros = qml.math.zeros_like(js)
matrix = [
[ones, zeros, zeros, zeros],
[zeros, ones, zeros, zeros],
[zeros, zeros, c, js],
[zeros, zeros, js, c],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CRot.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Iterable, Wires): the wires the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.CRX.compute_decomposition(1.2, wires=(0,1))
[RZ(1.5707963267948966, wires=[1]),
RY(0.6, wires=[1]),
CNOT(wires=[0, 1]),
RY(-0.6, wires=[1]),
CNOT(wires=[0, 1]),
RZ(-1.5707963267948966, wires=[1])]
"""
pi_half = qml.math.ones_like(phi) * (np.pi / 2)
decomp_ops = [
RZ(pi_half, wires=wires[1]),
RY(phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RZ(-pi_half, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
return CRX(-self.data[0], wires=self.wires)
def pow(self, z):
return [CRX(self.data[0] * z, wires=self.wires)]
@property
def control_wires(self):
return Wires(self.wires[0])
class CRY(Operation):
r"""
The controlled-RY operator
.. math::
\begin{align}
CR_y(\phi) &=
\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & \cos(\phi/2) & -\sin(\phi/2)\\
0 & 0 & \sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
\end{align}
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: The controlled-RY operator satisfies a four-term parameter-shift rule
(see Appendix F, https://doi.org/10.1088/1367-2630/ac2cb3):
.. math::
\frac{d}{d\phi}f(CR_y(\phi)) = c_+ \left[f(CR_y(\phi+a)) - f(CR_y(\phi-a))\right] - c_- \left[f(CR_y(\phi+b)) - f(CR_y(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_y(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Y"
grad_method = "A"
parameter_frequencies = [(0.5, 1.0)]
def generator(self):
return -0.5 * qml.Projector(np.array([1]), wires=self.wires[0]) @ qml.PauliY(self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "RY", cache=cache)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CRY.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.CRY.compute_matrix(torch.tensor(0.5))
tensor([[ 1.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 1.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.9689, -0.2474],
[ 0.0000, 0.0000, 0.2474, 0.9689]], dtype=torch.float64)
"""
interface = qml.math.get_interface(theta)
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
if interface == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
s = (1 + 0j) * s
ones = qml.math.ones_like(s)
zeros = qml.math.zeros_like(s)
matrix = [
[ones, zeros, zeros, zeros],
[zeros, ones, zeros, zeros],
[zeros, zeros, c, -s],
[zeros, zeros, s, c],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CRY.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.CRY.compute_decomposition(1.2, wires=(0,1))
[RY(0.6, wires=[1]),
CNOT(wires=[0, 1]),
RY(-0.6, wires=[1]),
CNOT(wires=[0, 1])]
"""
decomp_ops = [
RY(phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
return CRY(-self.data[0], wires=self.wires)
def pow(self, z):
return [CRY(self.data[0] * z, wires=self.wires)]
@property
def control_wires(self):
return Wires(self.wires[0])
class CRZ(Operation):
r"""
The controlled-RZ operator
.. math::
\begin{align}
CR_z(\phi) &=
\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & e^{-i\phi/2} & 0\\
0 & 0 & 0 & e^{i\phi/2}
\end{bmatrix}.
\end{align}
.. note:: The subscripts of the operations in the formula refer to the wires they act on, e.g. 1 corresponds to the first element in ``wires`` that is the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: The controlled-RZ operator satisfies a four-term parameter-shift rule
(see Appendix F, https://doi.org/10.1088/1367-2630/ac2cb3):
.. math::
\frac{d}{d\phi}f(CR_z(\phi)) = c_+ \left[f(CR_z(\phi+a)) - f(CR_z(\phi-a))\right] - c_- \left[f(CR_z(\phi+b)) - f(CR_z(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_z(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Z"
grad_method = "A"
parameter_frequencies = [(0.5, 1.0)]
def generator(self):
return -0.5 * qml.Projector(np.array([1]), wires=self.wires[0]) @ qml.PauliZ(self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "RZ", cache=cache)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CRZ.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.CRZ.compute_matrix(torch.tensor(0.5))
tensor([[1.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 1.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 0.0+0.0j, 0.9689-0.2474j, 0.0+0.0j],
[0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.9689+0.2474j]])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
exp_part = qml.math.exp(-1j * theta / 2)
ones = qml.math.ones_like(exp_part)
zeros = qml.math.zeros_like(exp_part)
matrix = [
[ones, zeros, zeros, zeros],
[zeros, ones, zeros, zeros],
[zeros, zeros, exp_part, zeros],
[zeros, zeros, zeros, qml.math.conj(exp_part)],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_eigvals(theta): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.CRZ.eigvals`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.CRZ.compute_eigvals(torch.tensor(0.5))
tensor([1.0000+0.0000j, 1.0000+0.0000j, 0.9689-0.2474j, 0.9689+0.2474j])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
exp_part = qml.math.exp(-0.5j * theta)
o = qml.math.ones_like(exp_part)
return stack_last([o, o, exp_part, qml.math.conj(exp_part)])
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CRZ.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.CRZ.compute_decomposition(1.2, wires=(0,1))
[PhaseShift(0.6, wires=[1]),
CNOT(wires=[0, 1]),
PhaseShift(-0.6, wires=[1]),
CNOT(wires=[0, 1])]
"""
decomp_ops = [
PhaseShift(phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
PhaseShift(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
return CRZ(-self.data[0], wires=self.wires)
def pow(self, z):
return [CRZ(self.data[0] * z, wires=self.wires)]
@property
def control_wires(self):
return Wires(self.wires[0])
class CRot(Operation):
r"""
The controlled-Rot operator
.. math:: CR(\phi, \theta, \omega) = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & e^{-i(\phi+\omega)/2}\cos(\theta/2) & -e^{i(\phi-\omega)/2}\sin(\theta/2)\\
0 & 0 & e^{-i(\phi-\omega)/2}\sin(\theta/2) & e^{i(\phi+\omega)/2}\cos(\theta/2)
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 3
* Number of dimensions per parameter: (0, 0, 0)
* Gradient recipe: The controlled-Rot operator satisfies a four-term parameter-shift rule
(see Appendix F, https://doi.org/10.1088/1367-2630/ac2cb3):
.. math::
\frac{d}{d\mathbf{x}_i}f(CR(\mathbf{x}_i)) = c_+ \left[f(CR(\mathbf{x}_i+a)) - f(CR(\mathbf{x}_i-a))\right] - c_- \left[f(CR(\mathbf{x}_i+b)) - f(CR(\mathbf{x}_i-b))\right]
where :math:`f` is an expectation value depending on :math:`CR(\mathbf{x}_i)`, and
- :math:`\mathbf{x} = (\phi, \theta, \omega)` and `i` is an index to :math:`\mathbf{x}`
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 3
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0, 0, 0)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(0.5, 1.0), (0.5, 1.0), (0.5, 1.0)]
def __init__(self, phi, theta, omega, wires, do_queue=True, id=None):
super().__init__(phi, theta, omega, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "Rot", cache=cache)
@staticmethod
def compute_matrix(phi, theta, omega): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CRot.matrix`
Args:
phi(tensor_like or float): first rotation angle
theta (tensor_like or float): second rotation angle
omega (tensor_like or float): third rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.CRot.compute_matrix(torch.tensor(0.1), torch.tensor(0.2), torch.tensor(0.3))
tensor([[ 1.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[ 0.0+0.0j, 1.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[ 0.0+0.0j, 0.0+0.0j, 0.9752-0.1977j, -0.0993+0.0100j],
[ 0.0+0.0j, 0.0+0.0j, 0.0993+0.0100j, 0.9752+0.1977j]])
"""
# It might be that they are in different interfaces, e.g.,
# CRot(0.2, 0.3, tf.Variable(0.5), wires=[0, 1])
# So we need to make sure the matrix comes out having the right type
interface = qml.math._multi_dispatch([phi, theta, omega])
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
# If anything is not tensorflow, it has to be casted
if interface == "tensorflow":
phi = qml.math.cast_like(qml.math.asarray(phi, like=interface), 1j)
omega = qml.math.cast_like(qml.math.asarray(omega, like=interface), 1j)
c = qml.math.cast_like(qml.math.asarray(c, like=interface), 1j)
s = qml.math.cast_like(qml.math.asarray(s, like=interface), 1j)
# The following variable is used to assert the all terms to be stacked have same shape
one = qml.math.ones_like(phi) * qml.math.ones_like(omega)
c = c * one
s = s * one
o = qml.math.ones_like(c)
z = qml.math.zeros_like(c)
mat = [
[o, z, z, z],
[z, o, z, z],
[
z,
z,
qml.math.exp(-0.5j * (phi + omega)) * c,
-qml.math.exp(0.5j * (phi - omega)) * s,
],
[
z,
z,
qml.math.exp(-0.5j * (phi - omega)) * s,
qml.math.exp(0.5j * (phi + omega)) * c,
],
]
return qml.math.stack([stack_last(row) for row in mat], axis=-2)
@staticmethod
def compute_decomposition(phi, theta, omega, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CRot.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Iterable, Wires): the wires the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.PhaseShift.compute_decomposition(1.234, wires=0)
[RZ(-1.1, wires=[1]),
CNOT(wires=[0, 1]),
RZ(-2.3, wires=[1]),
RY(-1.15, wires=[1]),
CNOT(wires=[0, 1]),
RY(1.15, wires=[1]),
RZ(3.4, wires=[1])]
"""
decomp_ops = [
RZ((phi - omega) / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RZ(-(phi + omega) / 2, wires=wires[1]),
RY(-theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(theta / 2, wires=wires[1]),
RZ(omega, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
phi, theta, omega = self.parameters
return CRot(-omega, -theta, -phi, wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class U1(Operation):
r"""
U1 gate.
.. math:: U_1(\phi) = e^{i\phi/2}R_z(\phi) = \begin{bmatrix}
1 & 0 \\
0 & e^{i\phi}
\end{bmatrix}.
.. note::
The ``U1`` gate is an alias for the phase shift operation :class:`~.PhaseShift`.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_1(\phi)) = \frac{1}{2}\left[f(U_1(\phi+\pi/2)) - f(U_1(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`U_1(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return qml.Projector(np.array([1]), wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.U1.matrix`
Args:
phi (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.U1.compute_matrix(torch.tensor(0.5))
tensor([[1.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.8776+0.4794j]])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
p = qml.math.exp(1j * phi)
z = qml.math.zeros_like(p)
return qml.math.stack([stack_last([qml.math.ones_like(p), z]), stack_last([z, p])], axis=-2)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.U1.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Any, Wires): Wire that the operator acts on.
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.U1.compute_decomposition(1.234, wires=0)
[PhaseShift(1.234, wires=[0])]
"""
return [PhaseShift(phi, wires=wires)]
def adjoint(self):
return U1(-self.data[0], wires=self.wires)
def pow(self, z):
return [U1(self.data[0] * z, wires=self.wires)]
class U2(Operation):
r"""
U2 gate.
.. math::
U_2(\phi, \delta) = \frac{1}{\sqrt{2}}\begin{bmatrix} 1 & -\exp(i \delta)
\\ \exp(i \phi) & \exp(i (\phi + \delta)) \end{bmatrix}
The :math:`U_2` gate is related to the single-qubit rotation :math:`R` (:class:`Rot`) and the
:math:`R_\phi` (:class:`PhaseShift`) gates via the following relation:
.. math::
U_2(\phi, \delta) = R_\phi(\phi+\delta) R(\delta,\pi/2,-\delta)
.. note::
If the ``U2`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.Rot` and :class:`~.PhaseShift` gates.
**Details:**
* Number of wires: 1
* Number of parameters: 2
* Number of dimensions per parameter: (0, 0)
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_2(\phi, \delta)) = \frac{1}{2}\left[f(U_2(\phi+\pi/2, \delta)) - f(U_2(\phi-\pi/2, \delta))\right]`
where :math:`f` is an expectation value depending on :math:`U_2(\phi, \delta)`.
This gradient recipe applies for each angle argument :math:`\{\phi, \delta\}`.
Args:
phi (float): azimuthal angle :math:`\phi`
delta (float): quantum phase :math:`\delta`
wires (Sequence[int] or int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 2
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0, 0)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,), (1,)]
def __init__(self, phi, delta, wires, do_queue=True, id=None):
super().__init__(phi, delta, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(phi, delta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.U2.matrix`
Args:
phi (tensor_like or float): azimuthal angle
delta (tensor_like or float): quantum phase
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.U2.compute_matrix(torch.tensor(0.1), torch.tensor(0.2))
tensor([[ 0.7071+0.0000j, -0.6930-0.1405j],
[ 0.7036+0.0706j, 0.6755+0.2090j]])
"""
interface = qml.math._multi_dispatch([phi, delta])
# If anything is not tensorflow, it has to be casted and then
if interface == "tensorflow":
phi = qml.math.cast_like(qml.math.asarray(phi, like=interface), 1j)
delta = qml.math.cast_like(qml.math.asarray(delta, like=interface), 1j)
one = qml.math.ones_like(phi) * qml.math.ones_like(delta)
mat = [
[one, -qml.math.exp(1j * delta) * one],
[qml.math.exp(1j * phi) * one, qml.math.exp(1j * (phi + delta))],
]
return INV_SQRT2 * qml.math.stack([stack_last(row) for row in mat], axis=-2)
@staticmethod
def compute_decomposition(phi, delta, wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.U2.decomposition`.
Args:
phi (float): azimuthal angle :math:`\phi`
delta (float): quantum phase :math:`\delta`
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.U2.compute_decomposition(1.23, 2.34, wires=0)
[Rot(2.34, 1.5707963267948966, -2.34, wires=[0]),
PhaseShift(2.34, wires=[0]),
PhaseShift(1.23, wires=[0])]
"""
pi_half = qml.math.ones_like(delta) * (np.pi / 2)
decomp_ops = [
Rot(delta, pi_half, -delta, wires=wires),
PhaseShift(delta, wires=wires),
PhaseShift(phi, wires=wires),
]
return decomp_ops
def adjoint(self):
phi, delta = self.parameters
new_delta = qml.math.mod((np.pi - phi), (2 * np.pi))
new_phi = qml.math.mod((np.pi - delta), (2 * np.pi))
return U2(new_phi, new_delta, wires=self.wires)
class U3(Operation):
r"""
Arbitrary single qubit unitary.
.. math::
U_3(\theta, \phi, \delta) = \begin{bmatrix} \cos(\theta/2) & -\exp(i \delta)\sin(\theta/2) \\
\exp(i \phi)\sin(\theta/2) & \exp(i (\phi + \delta))\cos(\theta/2) \end{bmatrix}
The :math:`U_3` gate is related to the single-qubit rotation :math:`R` (:class:`Rot`) and the
:math:`R_\phi` (:class:`PhaseShift`) gates via the following relation:
.. math::
U_3(\theta, \phi, \delta) = R_\phi(\phi+\delta) R(\delta,\theta,-\delta)
.. note::
If the ``U3`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.PhaseShift` and :class:`~.Rot` gates.
**Details:**
* Number of wires: 1
* Number of parameters: 3
* Number of dimensions per parameter: (0, 0, 0)
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_3(\theta, \phi, \delta)) = \frac{1}{2}\left[f(U_3(\theta+\pi/2, \phi, \delta)) - f(U_3(\theta-\pi/2, \phi, \delta))\right]`
where :math:`f` is an expectation value depending on :math:`U_3(\theta, \phi, \delta)`.
This gradient recipe applies for each angle argument :math:`\{\theta, \phi, \delta\}`.
Args:
theta (float): polar angle :math:`\theta`
phi (float): azimuthal angle :math:`\phi`
delta (float): quantum phase :math:`\delta`
wires (Sequence[int] or int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 3
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0, 0, 0)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,), (1,), (1,)]
def __init__(self, theta, phi, delta, wires, do_queue=True, id=None):
super().__init__(theta, phi, delta, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta, phi, delta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.U3.matrix`
Args:
theta (tensor_like or float): polar angle
phi (tensor_like or float): azimuthal angle
delta (tensor_like or float): quantum phase
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.U3.compute_matrix(torch.tensor(0.1), torch.tensor(0.2), torch.tensor(0.3))
tensor([[ 0.9988+0.0000j, -0.0477-0.0148j],
[ 0.0490+0.0099j, 0.8765+0.4788j]])
"""
# It might be that they are in different interfaces, e.g.,
# U3(0.2, 0.3, tf.Variable(0.5), wires=0)
# So we need to make sure the matrix comes out having the right type
interface = qml.math._multi_dispatch([theta, phi, delta])
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
# If anything is not tensorflow, it has to be casted and then
if interface == "tensorflow":
phi = qml.math.cast_like(qml.math.asarray(phi, like=interface), 1j)
delta = qml.math.cast_like(qml.math.asarray(delta, like=interface), 1j)
c = qml.math.cast_like(qml.math.asarray(c, like=interface), 1j)
s = qml.math.cast_like(qml.math.asarray(s, like=interface), 1j)
# The following variable is used to assert the all terms to be stacked have same shape
one = qml.math.ones_like(phi) * qml.math.ones_like(delta)
c = c * one
s = s * one
mat = [
[c, -s * qml.math.exp(1j * delta)],
[s * qml.math.exp(1j * phi), c * qml.math.exp(1j * (phi + delta))],
]
return qml.math.stack([stack_last(row) for row in mat], axis=-2)
@staticmethod
def compute_decomposition(theta, phi, delta, wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.U3.decomposition`.
Args:
theta (float): polar angle :math:`\theta`
phi (float): azimuthal angle :math:`\phi`
delta (float): quantum phase :math:`\delta`
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.U3.compute_decomposition(1.23, 2.34, 3.45, wires=0)
[Rot(3.45, 1.23, -3.45, wires=[0]),
PhaseShift(3.45, wires=[0]),
PhaseShift(2.34, wires=[0])]
"""
decomp_ops = [
Rot(delta, theta, -delta, wires=wires),
PhaseShift(delta, wires=wires),
PhaseShift(phi, wires=wires),
]
return decomp_ops
def adjoint(self):
theta, phi, delta = self.parameters
new_delta = qml.math.mod((np.pi - phi), (2 * np.pi))
new_phi = qml.math.mod((np.pi - delta), (2 * np.pi))
return U3(theta, new_phi, new_delta, wires=self.wires)
class IsingXX(Operation):
r"""
Ising XX coupling gate
.. math:: XX(\phi) = \begin{bmatrix}
\cos(\phi / 2) & 0 & 0 & -i \sin(\phi / 2) \\
0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\
0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\
-i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2)
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(XX(\phi)) = \frac{1}{2}\left[f(XX(\phi +\pi/2)) - f(XX(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`XX(\phi)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliX(wires=self.wires[0]) @ PauliX(wires=self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
.. seealso:: :meth:`~.IsingXX.matrix`
Args:
phi (tensor_like or float): phase angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.IsingXX.compute_matrix(torch.tensor(0.5))
tensor([[0.9689+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000-0.2474j],
[0.0000+0.0000j, 0.9689+0.0000j, 0.0000-0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000-0.2474j, 0.9689+0.0000j, 0.0000+0.0000j],
[0.0000-0.2474j, 0.0000+0.0000j, 0.0000+0.0000j, 0.9689+0.0000j]],
dtype=torch.complex128)
"""
c = qml.math.cos(phi / 2)
s = qml.math.sin(phi / 2)
if qml.math.get_interface(phi) == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
js = -1j * s
z = qml.math.zeros_like(js)
matrix = [
[c, z, z, js],
[z, c, js, z],
[z, js, c, z],
[js, z, z, c],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.IsingXX.decomposition`.
Args:
phi (float): the phase angle
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.IsingXX.compute_decomposition(1.23, wires=(0,1))
[CNOT(wires=[0, 1]), RX(1.23, wires=[0]), CNOT(wires=[0, 1]]
"""
decomp_ops = [
qml.CNOT(wires=wires),
RX(phi, wires=[wires[0]]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
(phi,) = self.parameters
return IsingXX(-phi, wires=self.wires)
def pow(self, z):
return [IsingXX(self.data[0] * z, wires=self.wires)]
class IsingYY(Operation):
r"""
Ising YY coupling gate
.. math:: \mathtt{YY}(\phi) = \begin{bmatrix}
\cos(\phi / 2) & 0 & 0 & i \sin(\phi / 2) \\
0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\
0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\
i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2)
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(YY(\phi)) = \frac{1}{2}\left[f(YY(\phi +\pi/2)) - f(YY(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`YY(\phi)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliY(wires=self.wires[0]) @ PauliY(wires=self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.IsingYY.decomposition`.
Args:
phi (float): the phase angle
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.IsingYY.compute_decomposition(1.23, wires=(0,1))
[CY(wires=[0, 1]), RY(1.23, wires=[0]), CY(wires=[0, 1])]
"""
return [
qml.CY(wires=wires),
qml.RY(phi, wires=[wires[0]]),
qml.CY(wires=wires),
]
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.IsingYY.matrix`
Args:
phi (tensor_like or float): phase angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.IsingYY.compute_matrix(torch.tensor(0.5))
tensor([[0.9689+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.2474j],
[0.0000+0.0000j, 0.9689+0.0000j, 0.0000-0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000-0.2474j, 0.9689+0.0000j, 0.0000+0.0000j],
[0.0000+0.2474j, 0.0000+0.0000j, 0.0000+0.0000j, 0.9689+0.0000j]])
"""
c = qml.math.cos(phi / 2)
s = qml.math.sin(phi / 2)
if qml.math.get_interface(phi) == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
js = 1j * s
z = qml.math.zeros_like(js)
matrix = [
[c, z, z, js],
[z, c, -js, z],
[z, -js, c, z],
[js, z, z, c],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
def adjoint(self):
(phi,) = self.parameters
return IsingYY(-phi, wires=self.wires)
def pow(self, z):
return [IsingYY(self.data[0] * z, wires=self.wires)]
class IsingZZ(Operation):
r"""
Ising ZZ coupling gate
.. math:: ZZ(\phi) = \begin{bmatrix}
e^{-i \phi / 2} & 0 & 0 & 0 \\
0 & e^{i \phi / 2} & 0 & 0 \\
0 & 0 & e^{i \phi / 2} & 0 \\
0 & 0 & 0 & e^{-i \phi / 2}
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(ZZ(\phi)) = \frac{1}{2}\left[f(ZZ(\phi +\pi/2)) - f(ZZ(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`ZZ(\theta)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliZ(wires=self.wires[0]) @ PauliZ(wires=self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.IsingZZ.decomposition`.
Args:
phi (float): the phase angle
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.IsingZZ.compute_decomposition(1.23, wires=0)
[CNOT(wires=[0, 1]), RZ(1.23, wires=[1]), CNOT(wires=[0, 1])]
"""
return [
qml.CNOT(wires=wires),
qml.RZ(phi, wires=[wires[1]]),
qml.CNOT(wires=wires),
]
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.IsingZZ.matrix`
Args:
phi (tensor_like or float): phase angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.IsingZZ.compute_matrix(torch.tensor(0.5))
tensor([[0.9689-0.2474j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.9689+0.2474j, 0.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000+0.0000j, 0.9689+0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j, 0.9689-0.2474j]])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
neg_phase = qml.math.exp(-0.5j * phi)
pos_phase = qml.math.exp(0.5j * phi)
zeros = qml.math.zeros_like(pos_phase)
matrix = [
[neg_phase, zeros, zeros, zeros],
[zeros, pos_phase, zeros, zeros],
[zeros, zeros, pos_phase, zeros],
[zeros, zeros, zeros, neg_phase],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_eigvals(phi): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.IsingZZ.eigvals`
Args:
phi (tensor_like or float): phase angle
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.IsingZZ.compute_eigvals(torch.tensor(0.5))
tensor([0.9689-0.2474j, 0.9689+0.2474j, 0.9689+0.2474j, 0.9689-0.2474j])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
pos_phase = qml.math.exp(1.0j * phi / 2)
neg_phase = qml.math.exp(-1.0j * phi / 2)
return stack_last([neg_phase, pos_phase, pos_phase, neg_phase])
def adjoint(self):
(phi,) = self.parameters
return IsingZZ(-phi, wires=self.wires)
def pow(self, z):
return [IsingZZ(self.data[0] * z, wires=self.wires)]
| 34.348631
| 182
| 0.588105
| 12,699
| 92,810
| 4.222458
| 0.04457
| 0.028198
| 0.004812
| 0.010873
| 0.873949
| 0.848548
| 0.820835
| 0.795267
| 0.778296
| 0.767237
| 0
| 0.044887
| 0.274356
| 92,810
| 2,701
| 183
| 34.361348
| 0.751288
| 0.522584
| 0
| 0.613917
| 0
| 0
| 0.022912
| 0.001585
| 0
| 0
| 0
| 0.00037
| 0
| 1
| 0.142536
| false
| 0
| 0.011223
| 0.059484
| 0.413019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c418861fb6e75e48a46198115b66dd8dd3e8209
| 137
|
py
|
Python
|
app/main/errors.py
|
geoffrey45/Baseline-news
|
d211a84e087a222cf1720808f4abe31b9315c632
|
[
"MIT"
] | null | null | null |
app/main/errors.py
|
geoffrey45/Baseline-news
|
d211a84e087a222cf1720808f4abe31b9315c632
|
[
"MIT"
] | null | null | null |
app/main/errors.py
|
geoffrey45/Baseline-news
|
d211a84e087a222cf1720808f4abe31b9315c632
|
[
"MIT"
] | null | null | null |
from flask import render_template
from . import main
@main.app_errorhandler(404)
def fof(error):
return render_template('fof.html'),404
| 22.833333
| 39
| 0.79562
| 21
| 137
| 5.047619
| 0.666667
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 0.10219
| 137
| 6
| 39
| 22.833333
| 0.813008
| 0
| 0
| 0
| 0
| 0
| 0.057971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
1c8defa0b35e732ccf1fe1cd71b47161957311c6
| 28
|
py
|
Python
|
01-Python/10-Flask/hello.py
|
Jerrywx/Python_Down
|
361d6bb8a5f7768c7064e97c40e4f485ece14a27
|
[
"Apache-2.0"
] | null | null | null |
01-Python/10-Flask/hello.py
|
Jerrywx/Python_Down
|
361d6bb8a5f7768c7064e97c40e4f485ece14a27
|
[
"Apache-2.0"
] | null | null | null |
01-Python/10-Flask/hello.py
|
Jerrywx/Python_Down
|
361d6bb8a5f7768c7064e97c40e4f485ece14a27
|
[
"Apache-2.0"
] | null | null | null |
import flask
print("Hello")
| 9.333333
| 14
| 0.75
| 4
| 28
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 3
| 14
| 9.333333
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
1c9be4c94a0dac9e94755cf96cf9c396f91ce138
| 6,007
|
py
|
Python
|
openldap/tests/test_check.py
|
volksman/integrations-core
|
34405662b09bf4a8c32feaed16a4745c7e1f24c0
|
[
"BSD-3-Clause"
] | null | null | null |
openldap/tests/test_check.py
|
volksman/integrations-core
|
34405662b09bf4a8c32feaed16a4745c7e1f24c0
|
[
"BSD-3-Clause"
] | null | null | null |
openldap/tests/test_check.py
|
volksman/integrations-core
|
34405662b09bf4a8c32feaed16a4745c7e1f24c0
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import ldap3
import pytest
from datadog_checks.dev.docker import get_docker_hostname
from datadog_checks.utils.platform import Platform
pytestmark = pytest.mark.integration
@pytest.fixture
def instance():
return {
"url": "ldap://{}:3890".format(get_docker_hostname()),
"username": "cn=monitor,dc=example,dc=org",
"password": "monitor",
"custom_queries": [{
"name": "stats",
"search_base": "cn=statistics,cn=monitor",
"search_filter": "(!(cn=Statistics))",
}],
"tags": ["test:integration"]
}
@pytest.fixture
def instance_ssl(instance):
instance["url"] = "ldaps://{}:6360".format(get_docker_hostname())
return instance
def test_check(aggregator, check, openldap_server, instance):
tags = ["url:{}".format(instance["url"]), "test:integration"]
check.check(instance)
aggregator.assert_service_check("openldap.can_connect", check.OK, tags=tags)
aggregator.assert_metric("openldap.bind_time", tags=tags)
aggregator.assert_metric("openldap.connections.current", tags=tags)
aggregator.assert_metric("openldap.connections.max_file_descriptors", tags=tags)
aggregator.assert_metric("openldap.connections.total", tags=tags)
aggregator.assert_metric("openldap.operations.completed.total", tags=tags)
aggregator.assert_metric("openldap.operations.initiated.total", tags=tags)
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:abandon"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:abandon"])
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:add"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:add"])
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:bind"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:bind"])
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:compare"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:compare"])
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:delete"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:delete"])
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:extended"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:extended"])
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:modify"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:modify"])
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:modrdn"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:modrdn"])
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:search"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:search"])
aggregator.assert_metric("openldap.operations.completed", tags=tags + ["operation:unbind"])
aggregator.assert_metric("openldap.operations.initiated", tags=tags + ["operation:unbind"])
aggregator.assert_metric("openldap.statistics.bytes", tags=tags)
aggregator.assert_metric("openldap.statistics.entries", tags=tags)
aggregator.assert_metric("openldap.statistics.pdu", tags=tags)
aggregator.assert_metric("openldap.statistics.referrals", tags=tags)
aggregator.assert_metric("openldap.threads", tags=tags + ["status:active"])
aggregator.assert_metric("openldap.threads", tags=tags + ["status:backload"])
aggregator.assert_metric("openldap.threads", tags=tags + ["status:open"])
aggregator.assert_metric("openldap.threads", tags=tags + ["status:pending"])
aggregator.assert_metric("openldap.threads", tags=tags + ["status:starting"])
aggregator.assert_metric("openldap.threads.max", tags=tags)
aggregator.assert_metric("openldap.threads.max_pending", tags=tags)
aggregator.assert_metric("openldap.uptime", tags=tags)
aggregator.assert_metric("openldap.waiter.read", tags=tags)
aggregator.assert_metric("openldap.waiter.write", tags=tags)
aggregator.assert_metric("openldap.query.duration", tags=tags + ["query:stats"])
aggregator.assert_metric("openldap.query.entries", tags=tags + ["query:stats"])
aggregator.assert_all_metrics_covered()
def test_check_ssl(aggregator, check, openldap_server, instance_ssl):
tags = ["url:{}".format(instance_ssl["url"]), "test:integration"]
# Should fail certificate verification
with pytest.raises(ldap3.core.exceptions.LDAPExceptionError):
check.check(instance_ssl)
aggregator.assert_service_check("openldap.can_connect", check.CRITICAL, tags=tags)
instance_ssl["ssl_verify"] = False
# Should work now
check.check(instance_ssl)
aggregator.assert_service_check("openldap.can_connect", check.OK, tags=tags)
def test_check_connection_failure(aggregator, check, openldap_server, instance):
instance["url"] = "bad_url"
tags = ["url:{}".format(instance["url"]), "test:integration"]
# Should fail certificate verification
with pytest.raises(ldap3.core.exceptions.LDAPExceptionError):
check.check(instance)
aggregator.assert_service_check("openldap.can_connect", check.CRITICAL, tags=tags)
@pytest.mark.skipif(not Platform.is_linux(), reason='Windows sockets are not file handles')
def test_check_socket(aggregator, check, openldap_server, instance):
instance["url"] = "ldapi://{}".format(openldap_server)
tags = ["url:{}".format(instance["url"]), "test:integration"]
check.check(instance)
aggregator.assert_service_check("openldap.can_connect", check.OK, tags=tags)
| 54.117117
| 97
| 0.738805
| 681
| 6,007
| 6.374449
| 0.198238
| 0.176918
| 0.212854
| 0.290256
| 0.806957
| 0.765953
| 0.713891
| 0.587422
| 0.506796
| 0.161023
| 0
| 0.003007
| 0.1142
| 6,007
| 110
| 98
| 54.609091
| 0.812817
| 0.03163
| 0
| 0.191011
| 0
| 0
| 0.338726
| 0.171256
| 0
| 0
| 0
| 0
| 0.539326
| 1
| 0.067416
| false
| 0.011236
| 0.044944
| 0.011236
| 0.134831
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
98d5854b34df60365d8aaa9dc66c8001fa64c498
| 28
|
py
|
Python
|
strips/domains/__init__.py
|
yijiangh/pyplanners
|
ef1ae33e233f20cd93ce03cba363b0f14fd078bc
|
[
"MIT"
] | 23
|
2017-11-13T23:56:25.000Z
|
2022-02-12T08:56:28.000Z
|
strips/domains/__init__.py
|
yijiangh/pyplanners
|
ef1ae33e233f20cd93ce03cba363b0f14fd078bc
|
[
"MIT"
] | 1
|
2022-01-04T17:07:47.000Z
|
2022-01-04T17:07:47.000Z
|
strips/domains/__init__.py
|
yijiangh/pyplanners
|
ef1ae33e233f20cd93ce03cba363b0f14fd078bc
|
[
"MIT"
] | 6
|
2017-07-13T07:21:13.000Z
|
2022-03-25T08:21:57.000Z
|
from .blocks_world import *
| 14
| 27
| 0.785714
| 4
| 28
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7275672cbe2bedb25747aca98a264314a64c40a
| 123
|
py
|
Python
|
03 Operators and Operands/logicaloperators.py
|
Himanshu44626748/Learn-Python
|
f3a4d997f2d29b146e5f7434f4801ae94bc3483f
|
[
"MIT"
] | 2
|
2020-03-16T14:57:44.000Z
|
2020-11-29T07:45:54.000Z
|
03 Operators and Operands/logicaloperators.py
|
Himanshu44626748/Learn-Python
|
f3a4d997f2d29b146e5f7434f4801ae94bc3483f
|
[
"MIT"
] | null | null | null |
03 Operators and Operands/logicaloperators.py
|
Himanshu44626748/Learn-Python
|
f3a4d997f2d29b146e5f7434f4801ae94bc3483f
|
[
"MIT"
] | 1
|
2020-08-13T07:59:02.000Z
|
2020-08-13T07:59:02.000Z
|
x = 20
y = 30
print((x==25 and y==30))
print((x==25 or y==30))
print(not(x==25 or y==30))
print((not(x==25) and y==30))
| 12.3
| 29
| 0.536585
| 30
| 123
| 2.2
| 0.3
| 0.227273
| 0.484848
| 0.272727
| 0.954545
| 0.530303
| 0.530303
| 0.530303
| 0.530303
| 0
| 0
| 0.196078
| 0.170732
| 123
| 10
| 29
| 12.3
| 0.45098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c7326dfe4af8d62fbff416efd4d0c585ac4447cd
| 8,744
|
py
|
Python
|
tests/test_insert.py
|
lovette/mysqlstmt
|
ef7fa56ee45046018d6a6cd2c64abce19a8b33a8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_insert.py
|
lovette/mysqlstmt
|
ef7fa56ee45046018d6a6cd2c64abce19a8b33a8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_insert.py
|
lovette/mysqlstmt
|
ef7fa56ee45046018d6a6cd2c64abce19a8b33a8
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from nose.tools import assert_equals, raises
from mysqlstmt import Insert, Select
from collections import OrderedDict
class TestInsert(unittest.TestCase):
def test_constructor_table_name(self):
q = Insert('t1')
sql_t = q.set_value('t1c1', 1).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (1)', None))
def test_set_value_int(self):
q = Insert()
sql_t = q.into_table('t1').set_value('t1c1', 1).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (1)', None))
def test_set_value_int_callable(self):
q = Insert()
sql_t = q.into_table('t1').set_value('t1c1', 1)()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (1)', None))
def test_set_value_ints(self):
q = Insert()
sql_t = q.into_table('t1').set_value('t1c1', 1).set_value('t1c2', 2).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`, `t1c2`) VALUES (1, 2)', None))
def test_dict_int(self):
q = Insert()
values = {'t1c1': 1}
sql_t = q.into_table('t1').set_value(values).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (1)', None))
def test_dict_ints(self):
q = Insert()
values = OrderedDict([('t1c1', 1), ('t1c2', 2)])
sql_t = q.into_table('t1').set_value(values).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`, `t1c2`) VALUES (1, 2)', None))
def test_dict_strings(self):
q = Insert()
values = OrderedDict([('t1c1', 'a'), ('t1c2', 'b')])
sql_t = q.into_table('t1').set_value(values).sql()
assert_equals(sql_t, ("INSERT INTO t1 (`t1c1`, `t1c2`) VALUES (?, ?)", ['a', 'b']))
def test_null(self):
q = Insert()
values = OrderedDict([('t1c1', 'a'), ('t1c2', None)])
sql_t = q.into_table('t1').set_value(values).sql()
assert_equals(sql_t, ("INSERT INTO t1 (`t1c1`, `t1c2`) VALUES (?, NULL)", ['a']))
def test_function_value(self):
q = Insert()
sql_t = q.into_table('t1').set_value('t1c1', 'NOW()').sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (?)', ['NOW()']))
def test_function_raw_value(self):
q = Insert()
sql_t = q.into_table('t1').set_raw_value('t1c1', 'NOW()').sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (NOW())', None))
def test_function_raw_value_dict(self):
q = Insert()
sql_t = q.into_table('t1').set_raw_value({'t1c1': 'NOW()'}).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (NOW())', None))
def test_function_raw_value_with_valparams(self):
q = Insert()
sql_t = q.into_table('t1').set_raw_value('t1c1', 'PASSWORD(?)', value_params=('mypw',)).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (PASSWORD(?))', ['mypw']))
def test_function_raw_value_dict_with_valparams(self):
q = Insert()
sql_t = q.into_table('t1').set_raw_value({'t1c1': ('PASSWORD(?)', ('mypw',))}).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (PASSWORD(?))', ['mypw']))
def test_select_string_col(self):
q = Insert()
sql_t = q.into_table('t1').columns('t1c1').select('SELECT t2c1 FROM t2').sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) SELECT t2c1 FROM t2', None))
def test_select_string_cols(self):
q = Insert()
sql_t = q.into_table('t1').columns(['t1c1', 't1c2']).select('SELECT `t2c1`, `t2c2` FROM t2').sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`, `t1c2`) SELECT `t2c1`, `t2c2` FROM t2', None))
def test_select_obj_cols(self):
q = Insert()
qselect = Select('t2').columns(['t2c1', 't2c2'])
sql_t = q.into_table('t1').columns(['t1c1', 't1c2']).select(qselect).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`, `t1c2`) SELECT `t2c1`, `t2c2` FROM t2', None))
def test_ignore(self):
q = Insert('t1', ignore_error=True)
sql_t = q.set_value('t1c1', 1).sql()
assert_equals(sql_t, ('INSERT IGNORE INTO t1 (`t1c1`) VALUES (1)', None))
def test_function_batch_1x1(self):
q = Insert()
data = [['v1']]
sql_t = q.into_table('t1').columns('t1c1').set_batch_value(data).sql()
assert_equals(sql_t, ("INSERT INTO t1 (`t1c1`) VALUES (?)", data))
def test_function_batch_3x1(self):
q = Insert()
data = [['v1'], ['v2'], ['NOW()']]
sql_t = q.into_table('t1').columns('t1c1').set_batch_value(data).sql()
assert_equals(sql_t, ("INSERT INTO t1 (`t1c1`) VALUES (?)", data))
def test_function_batch_3x3(self):
q = Insert()
data = [['v1', 'v2', 'NOW()'], ['v1', 'v2', 'NOW()'], ['v1', 'v2', 'NOW()']]
sql_t = q.into_table('t1').columns(['t1c1', 't1c2', 't1c3']).set_batch_value(data).sql()
assert_equals(sql_t, ("INSERT INTO t1 (`t1c1`, `t1c2`, `t1c3`) VALUES (?, ?, ?)", data))
def test_function_batch_1x1_noparam(self):
q = Insert(placeholder=False)
data = [["'v1'"]]
sql = q.into_table('t1').columns('t1c1').set_batch_value(data).sql()
assert_equals(sql, "INSERT INTO t1 (`t1c1`) VALUES ('v1')")
def test_function_batch_3x1_noparam(self):
q = Insert(placeholder=False)
data = [["'v1'"], ["'v2'"], ['NOW()']]
sql = q.into_table('t1').columns('t1c1').set_batch_value(data).sql()
assert_equals(sql, "INSERT INTO t1 (`t1c1`) VALUES ('v1'), ('v2'), (NOW())")
def test_function_batch_3x3_noparam(self):
q = Insert(placeholder=False)
data = [["'r1v1'", "'r1v2'", 'NOW()'], ["'r2v1'", "'r2v2'", 'NOW()'], ["'r3v1'", "'r3v2'", 'NOW()']]
sql = q.into_table('t1').columns(['t1c1', 't1c2', 't1c3']).set_batch_value(data).sql()
assert_equals(sql, "INSERT INTO t1 (`t1c1`, `t1c2`, `t1c3`) VALUES ('r1v1', 'r1v2', NOW()), ('r2v1', 'r2v2', NOW()), ('r3v1', 'r3v2', NOW())")
def test_dict_strings_utf_param(self):
q = Insert()
values = OrderedDict([('t1c1', u'äöü')])
sql_t = q.into_table('t1').set_value(values).sql()
assert_equals(sql_t, ("INSERT INTO t1 (`t1c1`) VALUES (?)", [u'äöü']))
def test_dict_strings_utf_raw(self):
q = Insert()
sql_t = q.into_table('t1').set_raw_value('t1c1', u'"äöü"').sql()
assert_equals(sql_t, (u'INSERT INTO t1 (`t1c1`) VALUES ("äöü")', None))
def test_dict_strings_utf_batch(self):
q = Insert()
data = [[u'äöü']]
sql_t = q.into_table('t1').columns('t1c1').set_batch_value(data).sql()
assert_equals(sql_t, ('INSERT INTO t1 (`t1c1`) VALUES (?)', data))
def test_dict_strings_utf_noparam(self):
q = Insert(placeholder=False)
sql = q.into_table('t1').set_value('t1c1', u'"äöü"').sql()
assert_equals(sql, u'INSERT INTO t1 (`t1c1`) VALUES ("äöü")')
def test_set_value_int_option(self):
q = Insert()
sql_t = q.set_option('LOW_PRIORITY').into_table('t1').set_value('t1c1', 1).sql()
assert_equals(sql_t, ('INSERT LOW_PRIORITY INTO t1 (`t1c1`) VALUES (1)', None))
@raises(ValueError)
def test_fail_no_tables(self):
q = Insert()
q.set_value('t1c1', 1).sql()
@raises(ValueError)
def test_fail_multi_tables(self):
Insert(['t1', 't2'])
@raises(ValueError)
def test_fail_no_values(self):
q = Insert('t1')
q.sql()
@raises(ValueError)
def test_fail_set_columns(self):
q = Insert()
q.into_table('t1').columns('t1c1').set_value('t1c1', 1).sql()
@raises(ValueError)
def test_fail_select_with_set_value(self):
q = Insert()
q.into_table('t1').set_value('t1c1', 1).select('SELECT * FROM t2').sql()
@raises(ValueError)
def test_fail_select_no_columns(self):
q = Insert()
q.into_table('t1').select('SELECT * FROM t2').sql()
@raises(ValueError)
def test_fail_batch_values(self):
q = Insert()
data = [['v1']]
q.into_table('t1').set_value('t1c1', 1).set_batch_value(data).sql()
@raises(ValueError)
def test_fail_batch_no_columns(self):
q = Insert()
data = [['v1']]
q.into_table('t1').set_batch_value(data).sql()
@raises(ValueError)
def test_fail_batch_select(self):
q = Insert()
data = [['v1']]
q.into_table('t1').columns('t1c1').set_batch_value(data).select('SELECT * FROM t2').sql()
@raises(ValueError)
def test_fail_select_with_params(self):
q = Insert()
qselect = Select('t2').columns(['t2c1']).where_value('t2c1', 't2v1')
q.into_table('t1').columns(['t1c1']).select(qselect).sql()
| 40.669767
| 150
| 0.584858
| 1,213
| 8,744
| 3.976917
| 0.079143
| 0.039801
| 0.08437
| 0.079602
| 0.875415
| 0.825041
| 0.761194
| 0.71393
| 0.626451
| 0.606551
| 0
| 0.050635
| 0.216263
| 8,744
| 214
| 151
| 40.859813
| 0.653291
| 0.002402
| 0
| 0.439306
| 0
| 0.00578
| 0.210068
| 0
| 0
| 0
| 0
| 0
| 0.16763
| 1
| 0.219653
| false
| 0.023121
| 0.023121
| 0
| 0.248555
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c73ba4d5a3586ccb983492f53d2077824a4a9c23
| 48
|
py
|
Python
|
imagepy/tools/Standard/rectangle_tol.py
|
dada1437903138/imagepy
|
65d9ce088894eef587054e04018f9d34ff65084f
|
[
"BSD-4-Clause"
] | 1,178
|
2017-05-25T06:59:01.000Z
|
2022-03-31T11:38:53.000Z
|
imagepy/tools/Standard/rectangle_tol.py
|
TomisTony/imagepy
|
3c378ebaf72762b94f0826a410897757ebafe689
|
[
"BSD-4-Clause"
] | 76
|
2017-06-10T17:01:50.000Z
|
2021-12-23T08:13:29.000Z
|
imagepy/tools/Standard/rectangle_tol.py
|
TomisTony/imagepy
|
3c378ebaf72762b94f0826a410897757ebafe689
|
[
"BSD-4-Clause"
] | 315
|
2017-05-25T12:59:53.000Z
|
2022-03-07T22:52:21.000Z
|
from sciapp.action import RectangleROI as Plugin
| 48
| 48
| 0.875
| 7
| 48
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 48
| 1
| 48
| 48
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c754699085940e2d4eab000c0e3eaaa279d138db
| 1,823
|
py
|
Python
|
weasyl/test/login/test_get_account_verification_token.py
|
sl1-1/weasyl
|
d4f6bf3e33b85a2289a451d95d5b90ff24f5d539
|
[
"Apache-2.0"
] | 1
|
2019-02-15T04:21:48.000Z
|
2019-02-15T04:21:48.000Z
|
weasyl/test/login/test_get_account_verification_token.py
|
sl1-1/weasyl
|
d4f6bf3e33b85a2289a451d95d5b90ff24f5d539
|
[
"Apache-2.0"
] | 254
|
2017-12-23T19:36:43.000Z
|
2020-04-14T21:46:13.000Z
|
weasyl/test/login/test_get_account_verification_token.py
|
sl1-1/weasyl
|
d4f6bf3e33b85a2289a451d95d5b90ff24f5d539
|
[
"Apache-2.0"
] | 1
|
2017-12-23T18:42:16.000Z
|
2017-12-23T18:42:16.000Z
|
from __future__ import absolute_import
import pytest
import arrow
from weasyl import login
from weasyl import define as d
from weasyl.test.utils import Bag
user_name = "test"
email_addr = "test@weasyl.com"
token = "a" * 40
# Main test password
raw_password = "0123456789"
@pytest.mark.usefixtures('db')
def test_acct_verif_token_returned_if_email_provided_to_function():
form = Bag(username=user_name, password='0123456789', passcheck='0123456789',
email=email_addr, emailcheck=email_addr,
day='12', month='12', year=arrow.now().year - 19)
d.engine.execute(d.meta.tables["logincreate"].insert(), {
"token": token,
"username": form.username,
"login_name": form.username,
"hashpass": login.passhash(raw_password),
"email": form.email,
"birthday": arrow.Arrow(2000, 1, 1),
"unixtime": arrow.now(),
})
acct_verification_token = login.get_account_verification_token(email=form.email, username=None)
assert token == acct_verification_token
@pytest.mark.usefixtures('db')
def test_acct_verif_token_returned_if_username_provided_to_function():
form = Bag(username=user_name, password='0123456789', passcheck='0123456789',
email=email_addr, emailcheck=email_addr,
day='12', month='12', year=arrow.now().year - 19)
d.engine.execute(d.meta.tables["logincreate"].insert(), {
"token": token,
"username": form.username,
"login_name": form.username,
"hashpass": login.passhash(raw_password),
"email": form.email,
"birthday": arrow.Arrow(2000, 1, 1),
"unixtime": arrow.now(),
})
acct_verification_token = login.get_account_verification_token(email=None, username=form.username)
assert token == acct_verification_token
| 34.396226
| 102
| 0.681295
| 224
| 1,823
| 5.321429
| 0.285714
| 0.08557
| 0.07047
| 0.038591
| 0.778523
| 0.724832
| 0.724832
| 0.724832
| 0.724832
| 0.724832
| 0
| 0.051421
| 0.189248
| 1,823
| 52
| 103
| 35.057692
| 0.755074
| 0.009874
| 0
| 0.666667
| 0
| 0
| 0.115363
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.047619
| false
| 0.119048
| 0.142857
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
c759c9ab8ec317c12e9c50fae9314fdd2af17606
| 30
|
py
|
Python
|
server/src/police_lineups/context/__init__.py
|
vabalcar/police-lineups
|
9c4a17d58e973d6db6e442bd9d5f4313ad4d51b7
|
[
"MIT"
] | null | null | null |
server/src/police_lineups/context/__init__.py
|
vabalcar/police-lineups
|
9c4a17d58e973d6db6e442bd9d5f4313ad4d51b7
|
[
"MIT"
] | 2
|
2021-09-24T11:43:58.000Z
|
2021-09-24T12:00:21.000Z
|
server/src/police_lineups/context/__init__.py
|
vabalcar/police-lineups
|
9c4a17d58e973d6db6e442bd9d5f4313ad4d51b7
|
[
"MIT"
] | null | null | null |
from .user import UserContext
| 15
| 29
| 0.833333
| 4
| 30
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c78cdbe4c128c78b587f6b2d42af30efcc44aa11
| 29
|
py
|
Python
|
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/schemas/__init__.py
|
LeroyShirto/cookiecutter-flask-restful-docker
|
a71fe98480fb38c3e353ce9b64dad4d7a1b0ccac
|
[
"MIT"
] | 5
|
2018-05-12T15:34:11.000Z
|
2020-07-09T09:16:02.000Z
|
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/schemas/__init__.py
|
LeroyShirto/cookiecutter-flask-restful-docker
|
a71fe98480fb38c3e353ce9b64dad4d7a1b0ccac
|
[
"MIT"
] | 1
|
2021-11-30T11:06:38.000Z
|
2021-11-30T11:06:38.000Z
|
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/schemas/__init__.py
|
LeroyShirto/cookiecutter-flask-restful-docker
|
a71fe98480fb38c3e353ce9b64dad4d7a1b0ccac
|
[
"MIT"
] | 1
|
2019-02-13T09:57:55.000Z
|
2019-02-13T09:57:55.000Z
|
from .user import UserSchema
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4003385f88006a77739fb43ae3590c2882365db3
| 168
|
py
|
Python
|
tardis/rest/token_generator/__main__.py
|
maxfischer2781/tardis
|
a83ba0a02d2f153a8ab95b84ec78bc6ababa57a5
|
[
"MIT"
] | 4
|
2018-05-22T13:22:06.000Z
|
2019-03-26T15:32:57.000Z
|
tardis/rest/token_generator/__main__.py
|
maxfischer2781/tardis
|
a83ba0a02d2f153a8ab95b84ec78bc6ababa57a5
|
[
"MIT"
] | 50
|
2018-05-18T11:46:39.000Z
|
2019-04-26T07:29:45.000Z
|
tardis/rest/token_generator/__main__.py
|
maxfischer2781/tardis
|
a83ba0a02d2f153a8ab95b84ec78bc6ababa57a5
|
[
"MIT"
] | 2
|
2018-12-12T13:15:59.000Z
|
2018-12-17T08:18:15.000Z
|
from .generate_token import generate_token
import typer
def generate_token_cli():
typer.run(generate_token)
if __name__ == "__main__":
generate_token_cli()
| 15.272727
| 42
| 0.761905
| 22
| 168
| 5.136364
| 0.5
| 0.575221
| 0.336283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154762
| 168
| 10
| 43
| 16.8
| 0.795775
| 0
| 0
| 0
| 1
| 0
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
400bb7ebd6eca5cb86cb65e33abcb57eb189869c
| 5,254
|
py
|
Python
|
plugin/src/py/android_screenshot_tests/test_device_name_calculator.py
|
xiphirx/screenshot-tests-for-android
|
d6c0107239cea8675e76c1e868701f50b5e46be2
|
[
"Apache-2.0"
] | 1
|
2021-01-13T13:13:55.000Z
|
2021-01-13T13:13:55.000Z
|
plugin/src/py/android_screenshot_tests/test_device_name_calculator.py
|
xiphirx/screenshot-tests-for-android
|
d6c0107239cea8675e76c1e868701f50b5e46be2
|
[
"Apache-2.0"
] | null | null | null |
plugin/src/py/android_screenshot_tests/test_device_name_calculator.py
|
xiphirx/screenshot-tests-for-android
|
d6c0107239cea8675e76c1e868701f50b5e46be2
|
[
"Apache-2.0"
] | null | null | null |
import sys
import unittest
from .device_name_calculator import DeviceNameCalculator
if sys.version_info >= (3,):
from unittest.mock import *
else:
from mock import *
class TestDeviceNameCalculator(unittest.TestCase):
def test_API_19_GP_XXHDPI_1080x1920_arm64_v8a_esES(self):
def mock_data(parameters):
if 'ro.build.version.sdk' in parameters:
return '19'
elif 'com.google.android.gms' in parameters:
return 'package:/data/app/com.google.android.gms-pHwJaHhvXiRvuTo2Qxdbww==/base.apk'
elif 'density' in parameters:
return 'Physical density: 420'
elif 'size' in parameters:
return 'Physical size: 1080x1920'
elif 'ro.product.cpu.abi' in parameters:
return 'arm64-v8a'
elif 'persist.sys.locale' in parameters:
return 'es-ES'
return None
adb_executor = MagicMock()
adb_executor.execute.side_effect = mock_data
device_calculator = DeviceNameCalculator(adb_executor)
result = device_calculator.name()
assert result == "API_19_GP_XXHDPI_1080x1920_arm64-v8a_es-ES"
def test_API_23_NO_GP_XXHDPI_1080x1920_arm64_v8a_esES(self):
def mock_data(parameters):
if 'ro.build.version.sdk' in parameters:
return '23'
elif 'com.google.android.gms' in parameters:
return None
elif 'density' in parameters:
return 'Physical density: 420'
elif 'size' in parameters:
return 'Physical size: 1080x1920'
elif 'ro.product.cpu.abi' in parameters:
return 'arm64-v8a'
elif 'persist.sys.locale' in parameters:
return 'es-ES'
return None
adb_executor = MagicMock()
adb_executor.execute.side_effect = mock_data
device_calculator = DeviceNameCalculator(adb_executor)
result = device_calculator.name()
assert result == "API_23_NO_GP_XXHDPI_1080x1920_arm64-v8a_es-ES"
def test_API_25_NO_GP_XXHDPI_1080x1920_x86_esES(self):
def mock_data(parameters):
if 'ro.build.version.sdk' in parameters:
return '25'
elif 'com.google.android.gms' in parameters:
return None
elif 'density' in parameters:
return 'Physical density: 420'
elif 'size' in parameters:
return 'Physical size: 1080x1920'
elif 'ro.product.cpu.abi' in parameters:
return 'x86'
elif 'persist.sys.locale' in parameters:
return None
elif 'ro.product.locale' in parameters:
return 'es-ES'
return None
adb_executor = MagicMock()
adb_executor.execute.side_effect = mock_data
device_calculator = DeviceNameCalculator(adb_executor)
result = device_calculator.name()
assert result == "API_25_NO_GP_XXHDPI_1080x1920_x86_es-ES"
def density_10_to_LDPI(self):
def mock_data(parameters):
return 'Physical density: 10'
adb_executor = MagicMock()
adb_executor.execute.side_effect = mock_data
device_calculator = DeviceNameCalculator(adb_executor)
result = device_calculator._screen_density_text()
assert result == "LDPI"
def density_140_to_MDPI(self):
def mock_data(parameters):
return 'Physical density: 140'
adb_executor = MagicMock()
adb_executor.execute.side_effect = mock_data
device_calculator = DeviceNameCalculator(adb_executor)
result = device_calculator._screen_density_text()
assert result == "MDPI"
def density_200_to_HDPI(self):
def mock_data(parameters):
return 'Physical density: 200'
adb_executor = MagicMock()
adb_executor.execute.side_effect = mock_data
device_calculator = DeviceNameCalculator(adb_executor)
result = device_calculator._screen_density_text()
assert result == "HDPI"
def density_250_to_XHDPI(self):
def mock_data(parameters):
return 'Physical density: 250'
adb_executor = MagicMock()
adb_executor.execute.side_effect = mock_data
device_calculator = DeviceNameCalculator(adb_executor)
result = device_calculator._screen_density_text()
assert result == "XHDPI"
def density_340_to_XXHDPI(self):
def mock_data(parameters):
return 'Physical density: 340'
adb_executor = MagicMock()
adb_executor.execute.side_effect = mock_data
device_calculator = DeviceNameCalculator(adb_executor)
result = device_calculator._screen_density_text()
assert result == "XXHDPI"
def density_500_to_XXXHDPI(self):
def mock_data(parameters):
return 'Physical density: 500'
adb_executor = MagicMock()
adb_executor.execute.side_effect = mock_data
device_calculator = DeviceNameCalculator(adb_executor)
result = device_calculator._screen_density_text()
assert result == "XXXHDPI"
| 31.27381
| 99
| 0.635135
| 573
| 5,254
| 5.551483
| 0.143106
| 0.093367
| 0.107513
| 0.042439
| 0.864194
| 0.861679
| 0.861679
| 0.832443
| 0.730274
| 0.730274
| 0
| 0.042627
| 0.290065
| 5,254
| 167
| 100
| 31.461078
| 0.810188
| 0
| 0
| 0.677966
| 0
| 0
| 0.15531
| 0.050628
| 0
| 0
| 0
| 0
| 0.076271
| 1
| 0.152542
| false
| 0
| 0.042373
| 0.050847
| 0.440678
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
40689724aed30d6294012decca56364ee88a477e
| 2,216
|
py
|
Python
|
plot_2/plt_box_r.py
|
Mrchengyuan/modling-dynamic-system
|
a6bfac864c27cc92161f1fc88af605dda3106feb
|
[
"Unlicense"
] | 1
|
2021-06-23T02:11:33.000Z
|
2021-06-23T02:11:33.000Z
|
plot_2/plt_box_r.py
|
Mrchengyuan/modling-dynamic-system
|
a6bfac864c27cc92161f1fc88af605dda3106feb
|
[
"Unlicense"
] | null | null | null |
plot_2/plt_box_r.py
|
Mrchengyuan/modling-dynamic-system
|
a6bfac864c27cc92161f1fc88af605dda3106feb
|
[
"Unlicense"
] | null | null | null |
import matplotlib.pyplot as plt
from matplotlib.pyplot import MultipleLocator
font_titles = {'family': 'Times New Roman',
'color': 'black',
'weight': 'normal',
'size': 8,}
font_labels = {'family': 'Times New Roman',
'color': 'black',
'weight': 'normal',
'size': 8,}
import numpy as np
time_scale = np.arange(0.01,0.51,0.01)
xticks=[0,10,20,30,40,50]
lable=[0,0.1,0.2,0.3,0.4,0.5]
ytricks=[0,0.25,0.5,0.75,0.1]
mae_hybrid_model_close_training=np.load('mae_error_hybrid_model.npy')
mae_seq2seq_model_close_training=np.load('mae_error_seq2seq_model.npy')
uy_mae_hybrid_model_close_training=mae_hybrid_model_close_training[:,:,0:1].reshape(-1,50)
uy_mae_seq2seq_model_close_training=mae_seq2seq_model_close_training[:,:,0:1].reshape(-1,50)
r_mae_hybrid_model_close_training=mae_hybrid_model_close_training[:,:,1:2].reshape(-1,50)
r_mae_seq2seq_model_close_training=mae_seq2seq_model_close_training[:,:,1:2].reshape(-1,50)
plt.figure(dpi=120,figsize=(12,3.3))
plt.subplot(121)
plt.xlabel('Predicted Horizens (s)',fontdict=font_labels)
plt.ylabel('$||e_{r,k}||$ (rad/s)',fontdict=font_labels)
plt.grid(True,alpha=0.4)
plt.title('Distribution of $||e_{r,k}||$,Hybrid model',font_titles)
plt.boxplot(r_mae_hybrid_model_close_training[:,0:50],showfliers=False,boxprops = {'color':'blue',},whiskerprops={'linestyle':'--','dashes':(5,3)})
y_major_locator=MultipleLocator(0.005)
ax=plt.gca()
ax.yaxis.set_major_locator(y_major_locator)
plt.xticks(xticks,labels=lable)
ax=plt.gca()
ax.yaxis.set_major_locator(y_major_locator)
plt.tick_params(labelsize=8)
plt.ylim(0,0.015)
plt.subplot(122)
plt.xlabel('Predicted Horizens (s)',fontdict=font_labels)
plt.ylabel('$||e_{r,k}||$ (rad/s)',fontdict=font_labels)
plt.grid(True,alpha=0.4)
plt.title('Distribution of $||e_{r,k}||$,GRU Encoder-Decoder',font_titles)
plt.boxplot(r_mae_seq2seq_model_close_training[:,0:50],showfliers=False,boxprops = {'color':'blue',},whiskerprops={'linestyle':'--','dashes':(5,3)})
plt.xticks(xticks,labels=lable)
y_major_locator=MultipleLocator(0.005)
ax=plt.gca()
ax.yaxis.set_major_locator(y_major_locator)
plt.tick_params(labelsize=8)
plt.ylim(0,0.015)
plt.savefig('箱线图r.png',dpi=600)
plt.show()
| 2,216
| 2,216
| 0.741426
| 373
| 2,216
| 4.160858
| 0.286863
| 0.07732
| 0.139175
| 0.073454
| 0.795747
| 0.751933
| 0.71134
| 0.670103
| 0.639175
| 0.639175
| 0
| 0.058567
| 0.06769
| 2,216
| 1
| 2,216
| 2,216
| 0.692643
| 0
| 0
| 0.52
| 0
| 0
| 0.173363
| 0.023928
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.06
| 0
| 0.06
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
904b5e9027b3145ce2aa3ac382d18ca4ed33db7e
| 74
|
py
|
Python
|
fileconversions/conversions/rtf_to_pdf_conversion.py
|
wilbertom/fileconversions
|
c48fda9b2804524fc57d1f6963d09645825b0da6
|
[
"MIT"
] | null | null | null |
fileconversions/conversions/rtf_to_pdf_conversion.py
|
wilbertom/fileconversions
|
c48fda9b2804524fc57d1f6963d09645825b0da6
|
[
"MIT"
] | null | null | null |
fileconversions/conversions/rtf_to_pdf_conversion.py
|
wilbertom/fileconversions
|
c48fda9b2804524fc57d1f6963d09645825b0da6
|
[
"MIT"
] | null | null | null |
from .conversion import Conversion
class RtfToPdf(Conversion):
pass
| 12.333333
| 34
| 0.77027
| 8
| 74
| 7.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175676
| 74
| 5
| 35
| 14.8
| 0.934426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
906e33c70489770cbea96f5dcff1cd11b561770b
| 40
|
py
|
Python
|
coggle_ecs/__init__.py
|
MrGVSV/coggle-ecs
|
e75a5081575e794f52779ecdb87e514ec197f029
|
[
"MIT"
] | null | null | null |
coggle_ecs/__init__.py
|
MrGVSV/coggle-ecs
|
e75a5081575e794f52779ecdb87e514ec197f029
|
[
"MIT"
] | null | null | null |
coggle_ecs/__init__.py
|
MrGVSV/coggle-ecs
|
e75a5081575e794f52779ecdb87e514ec197f029
|
[
"MIT"
] | null | null | null |
from coggle_ecs.parser import CoggleECS
| 20
| 39
| 0.875
| 6
| 40
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90d08ba3d35dc3628fe8ce0221f76ab0d343ca58
| 26
|
py
|
Python
|
classpath/__init__.py
|
kimi641/pyJVM
|
9e2b2392044a8ddd41ff8dda18a26e307776ae34
|
[
"MIT"
] | null | null | null |
classpath/__init__.py
|
kimi641/pyJVM
|
9e2b2392044a8ddd41ff8dda18a26e307776ae34
|
[
"MIT"
] | 1
|
2021-01-21T09:38:24.000Z
|
2021-01-21T09:38:24.000Z
|
classpath/__init__.py
|
kimi641/pyJVM
|
9e2b2392044a8ddd41ff8dda18a26e307776ae34
|
[
"MIT"
] | null | null | null |
from .class_path import *
| 13
| 25
| 0.769231
| 4
| 26
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90e141db9ea3ab744ea3a8ce48fdafaff4bacef5
| 115
|
py
|
Python
|
office365/sharepoint/principal/appprincipal_identity_provider.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | 544
|
2016-08-04T17:10:16.000Z
|
2022-03-31T07:17:20.000Z
|
office365/sharepoint/principal/appprincipal_identity_provider.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | 438
|
2016-10-11T12:24:22.000Z
|
2022-03-31T19:30:35.000Z
|
office365/sharepoint/principal/appprincipal_identity_provider.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | 202
|
2016-08-22T19:29:40.000Z
|
2022-03-30T20:26:15.000Z
|
from office365.sharepoint.base_entity import BaseEntity
class AppPrincipalIdentityProvider(BaseEntity):
pass
| 19.166667
| 55
| 0.843478
| 11
| 115
| 8.727273
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.113043
| 115
| 5
| 56
| 23
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
2971b7539cf24ef40f2147451c8311bdeb111f18
| 1,648
|
py
|
Python
|
tests/unit/clients/test_back_refs_delete.py
|
atsgen/tf-vcenter-manager
|
2cfa07f038b86b3087842c34abb96b15da0b36fb
|
[
"Apache-2.0"
] | 1
|
2022-03-13T06:31:40.000Z
|
2022-03-13T06:31:40.000Z
|
tests/unit/clients/test_back_refs_delete.py
|
atsgen/tf-vcenter-manager
|
2cfa07f038b86b3087842c34abb96b15da0b36fb
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/clients/test_back_refs_delete.py
|
atsgen/tf-vcenter-manager
|
2cfa07f038b86b3087842c34abb96b15da0b36fb
|
[
"Apache-2.0"
] | 1
|
2020-08-25T12:46:12.000Z
|
2020-08-25T12:46:12.000Z
|
""" Deleting objects in VNC should also delete it's back-ref objects. """
def test_delete_vmi(vnc_api_client, vnc_lib, vnc_vmi_1):
vnc_vmi_1.get_instance_ip_back_refs.return_value = [{'uuid': 'instance-ip-uuid'}]
vnc_lib.virtual_machine_interface_read.return_value = vnc_vmi_1
vnc_api_client.delete_vmi('vmi-uuid-1')
vnc_lib.instance_ip_delete.assert_called_once_with(id='instance-ip-uuid')
vnc_lib.virtual_machine_interface_delete.assert_called_once_with(id='vmi-uuid-1')
def test_vmi_no_back_refs(vnc_api_client, vnc_lib, vnc_vmi_1):
vnc_vmi_1.get_instance_ip_back_refs.return_value = None
vnc_lib.virtual_machine_interface_read.return_value = vnc_vmi_1
vnc_api_client.delete_vmi('vmi-uuid-1')
vnc_lib.instance_ip_delete.assert_not_called()
def test_delete_vm(vnc_api_client, vnc_lib, vnc_vm, vnc_vmi_1):
vnc_vm.get_virtual_machine_interface_back_refs.return_value = [{'uuid': 'vmi-uuid-1'}]
vnc_lib.virtual_machine_read.return_value = vnc_vm
vnc_vmi_1.get_instance_ip_back_refs.return_value = [{'uuid': 'instance-ip-uuid'}]
vnc_lib.virtual_machine_interface_read.return_value = vnc_vmi_1
vnc_api_client.delete_vm('vm-uuid')
vnc_lib.virtual_machine_interface_delete.assert_called_once_with(id='vmi-uuid-1')
vnc_lib.virtual_machine_delete.assert_called_once_with(id='vm-uuid')
def test_vm_no_back_refs(vnc_api_client, vnc_lib, vnc_vm):
vnc_vm.get_virtual_machine_interface_back_refs.return_value = None
vnc_lib.virtual_machine_read.return_value = vnc_vm
vnc_api_client.delete_vm('vm-uuid')
vnc_lib.virtual_machine_interface_delete.assert_not_called()
| 39.238095
| 90
| 0.799757
| 280
| 1,648
| 4.182143
| 0.135714
| 0.076857
| 0.0538
| 0.153715
| 0.891546
| 0.884714
| 0.847139
| 0.823228
| 0.790777
| 0.748933
| 0
| 0.009459
| 0.101942
| 1,648
| 41
| 91
| 40.195122
| 0.781757
| 0.039442
| 0
| 0.541667
| 0
| 0
| 0.083175
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
46352139922766c390ca513f9137205da017e2e4
| 129
|
py
|
Python
|
bi_reports_illustrate_bot/admin.py
|
BIChatbotGenerator/BICGen
|
45a96e171219f4543b14869bf832633b634ecc15
|
[
"Apache-2.0"
] | null | null | null |
bi_reports_illustrate_bot/admin.py
|
BIChatbotGenerator/BICGen
|
45a96e171219f4543b14869bf832633b634ecc15
|
[
"Apache-2.0"
] | null | null | null |
bi_reports_illustrate_bot/admin.py
|
BIChatbotGenerator/BICGen
|
45a96e171219f4543b14869bf832633b634ecc15
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register([TelegramState, TelegramUser, TelegramChat, Report])
| 25.8
| 72
| 0.806202
| 15
| 129
| 6.933333
| 0.8
| 0.211538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100775
| 129
| 5
| 72
| 25.8
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
468c46c0ec6ee310609c5a38a0469cf65e12adde
| 102
|
py
|
Python
|
python/triangle2.py
|
jeremyprice/strengths_name_tents
|
4b4bbec9d5e6d8c24b7ff98c855c3f58bb7e6aac
|
[
"Unlicense"
] | null | null | null |
python/triangle2.py
|
jeremyprice/strengths_name_tents
|
4b4bbec9d5e6d8c24b7ff98c855c3f58bb7e6aac
|
[
"Unlicense"
] | null | null | null |
python/triangle2.py
|
jeremyprice/strengths_name_tents
|
4b4bbec9d5e6d8c24b7ff98c855c3f58bb7e6aac
|
[
"Unlicense"
] | null | null | null |
n = 1
while n < 5:
print(n * "*")
n = n + 1
while n > 0:
print(n * "*")
n = n - 1
| 12.75
| 18
| 0.333333
| 18
| 102
| 1.888889
| 0.333333
| 0.235294
| 0.411765
| 0.470588
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.460784
| 102
| 8
| 19
| 12.75
| 0.527273
| 0
| 0
| 0.285714
| 0
| 0
| 0.019417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d3b9d3733e24c354c111d3ab33ff99d37caec131
| 3,440
|
py
|
Python
|
testcode/cleanh5.py
|
lachmann12/prismexp
|
2e9c739b6c8c10c44f9f81e84e87e47dac2c73f0
|
[
"Apache-2.0"
] | 1
|
2020-11-10T13:33:26.000Z
|
2020-11-10T13:33:26.000Z
|
testcode/cleanh5.py
|
MaayanLab/prismexp
|
39e6cad055d7698e466f0d197a1563d08d3e5eab
|
[
"Apache-2.0"
] | null | null | null |
testcode/cleanh5.py
|
MaayanLab/prismexp
|
39e6cad055d7698e466f0d197a1563d08d3e5eab
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.cluster import KMeans
import h5py as h5
import numpy as np
import pandas as pd
import random
from typing import List
import sys
import os
import time
import math
os.remove("mouse_matrix_3.h5")
f1 = h5.File("mouse_matrix.h5", "r")
exp = f1["data/expression"]
f = h5.File("mouse_matrix_3.h5", "w")
dset = f.create_dataset("data/expression", exp.shape, chunks=(2, 3000), dtype=np.int32, compression='gzip', compression_opts=9)
steps = 500
step_size = math.floor(exp.shape[0]/steps)
for i in range(0, steps+1):
print(i)
fromStep = i*step_size
toStep = min((i+1)*step_size, exp.shape[0])
ee = exp[fromStep:toStep, :]
dset[fromStep:toStep, :] = exp[fromStep:toStep, :]
f.close()
f1.close()
## benchmark me
f = h5.File("mouse_matrix_2.h5", "r")
sa = random.sample(set(range(0, 28000)), 2000)
sa.sort()
start = time.time()
exp = f["data/expression"][sa, :]
print("Extract samples: "+str(time.time()- start))
f.close()
f = h5.File("mouse_matrix_2.h5", "r")
sa = random.sample(set(range(0, 32000)), 10)
sa.sort()
start = time.time()
exp = f["data/expression"][:, 5]
print("Extract gene: "+str(time.time() - start))
f.close()
f = h5.File("mouse_matrix_2.h5", "r")
sa = random.sample(set(range(0, 32000)), 10)
sa.sort()
start = time.time()
exp = f["data/expression"][:, sa]
print("Extract gene (10): "+str(time.time() - start))
f.close()
f1 = h5.File("mouse_matrix.h5", "r")
f = h5.File("mouse_matrix_3.h5", "a")
keys = list(f1["meta"].keys())
for k in keys:
print(k)
f.create_dataset("meta/"+k, data=f1["meta/"+k], compression='gzip', compression_opts=9)
f.close()
f1.close()
f1 = h5.File("mouse_matrix.h5", "r")
exp = f1["data/expression"]
f = h5.File("mouse_matrix_2.h5", "w")
f.close()
f1.close()
## benchmark me
f = h5.File("mouse_matrix_t.h5", "r")
sa = random.sample(set(range(0, 284907)), 2000)
sa.sort()
start = time.time()
exp = f["data/expression"][sa, :]
print("Extract samples: "+str(time.time()- start))
f.close()
f = h5.File("mouse_matrix_t.h5", "r")
sa = random.sample(set(range(0, 32000)), 10)
sa.sort()
start = time.time()
exp = f["data/expression"][:, 5]
print("Extract gene: "+str(time.time() - start))
f.close()
f = h5.File("mouse_matrix_t.h5", "r")
sa = random.sample(set(range(0, 32000)), 10)
sa.sort()
start = time.time()
exp = f["data/expression"][:, sa]
print("Extract gene (10): "+str(time.time() - start))
f.close()
## benchmark me
f = h5.File("mouse_matrix.h5", "r")
sa = random.sample(set(range(0, 284907)), 2000)
sa.sort()
start = time.time()
exp = f["data/expression"][sa, :]
print("Extract samples: "+str(time.time()- start))
f.close()
f = h5.File("mouse_matrix.h5", "r")
sa = random.sample(set(range(0, 32000)), 10)
sa.sort()
start = time.time()
exp = f["data/expression"][:, 5]
print("Extract gene: "+str(time.time() - start))
f.close()
f = h5.File("mouse_matrix.h5", "r")
sa = random.sample(set(range(0, 32000)), 10)
sa.sort()
start = time.time()
exp = f["data/expression"][:, sa]
print("Extract gene (10): "+str(time.time() - start))
f.close()
## benchmark me
f = h5.File("mouse_matrix_t.h5", "r")
sa = random.sample(set(range(0, 284907)), 500)
sa.sort()
start = time.time()
exp = f["data/expression"][sa, :]
print("Extract samples: "+str(time.time()-start))
f.close()
f = h5.File("mouse_matrix.h5", "r")
sa.sort()
start = time.time()
exp2 = f["data/expression"][sa, :]
print("Extract samples: "+str(time.time()-start))
f.close()
| 21.234568
| 128
| 0.645349
| 572
| 3,440
| 3.818182
| 0.146853
| 0.080586
| 0.085623
| 0.132326
| 0.781136
| 0.744963
| 0.744963
| 0.732601
| 0.720238
| 0.720238
| 0
| 0.057076
| 0.12907
| 3,440
| 161
| 129
| 21.36646
| 0.671896
| 0.014826
| 0
| 0.736842
| 0
| 0
| 0.214497
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.087719
| 0
| 0.087719
| 0.114035
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d3e0399acff27310b02155dca49b2425de7657bc
| 106
|
py
|
Python
|
module/__init__.py
|
sp-nitech/DNN-HSMM
|
3476c262eb2b57bad9b85ea1f2bd282b0bafe49c
|
[
"BSD-3-Clause"
] | 38
|
2021-03-15T08:42:22.000Z
|
2022-03-14T10:32:15.000Z
|
module/__init__.py
|
sp-nitech/DNN-HSMM
|
3476c262eb2b57bad9b85ea1f2bd282b0bafe49c
|
[
"BSD-3-Clause"
] | 3
|
2021-07-07T02:11:08.000Z
|
2021-11-10T10:23:16.000Z
|
module/__init__.py
|
sp-nitech/DNN-HSMM
|
3476c262eb2b57bad9b85ea1f2bd282b0bafe49c
|
[
"BSD-3-Clause"
] | 9
|
2021-03-15T09:55:42.000Z
|
2022-03-14T10:32:18.000Z
|
from .data import DataSet, DataCollate
from .embedding import Model as Embedding
from .model import Model
| 26.5
| 41
| 0.820755
| 15
| 106
| 5.8
| 0.533333
| 0.252874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141509
| 106
| 3
| 42
| 35.333333
| 0.956044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
318994bbbbda506333a40fbd07cbfdd9f438873d
| 3,392
|
py
|
Python
|
tests/test_datasets.py
|
Lucas-Prates/ruptures
|
9685818d08ca024c0abb6ecf6121f2f86fb26dba
|
[
"BSD-2-Clause"
] | 1
|
2021-12-10T18:12:42.000Z
|
2021-12-10T18:12:42.000Z
|
tests/test_datasets.py
|
Lucas-Prates/ruptures
|
9685818d08ca024c0abb6ecf6121f2f86fb26dba
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_datasets.py
|
Lucas-Prates/ruptures
|
9685818d08ca024c0abb6ecf6121f2f86fb26dba
|
[
"BSD-2-Clause"
] | null | null | null |
from itertools import product
import pytest
import numpy as np
from ruptures.datasets import pw_constant, pw_linear, pw_normal, pw_wavy
@pytest.mark.parametrize("func", [pw_constant, pw_linear, pw_normal, pw_wavy])
def test_empty_arg(func):
func()
@pytest.mark.parametrize(
"func, n_samples, n_features, n_bkps, noise_std",
product([pw_constant], range(20, 1000, 200), range(1, 4), [2, 5, 3], [None, 1, 2]),
)
def test_constant(func, n_samples, n_features, n_bkps, noise_std):
signal, bkps = func(
n_samples=n_samples, n_features=n_features, n_bkps=n_bkps, noise_std=noise_std
)
assert signal.shape == (n_samples, n_features)
assert len(bkps) == n_bkps + 1
assert bkps[-1] == n_samples
def test_seed(n_samples=200, n_features=3, n_bkps=5, noise_std=1, seed=12345):
# pw_constant
signal1, bkps1 = pw_constant(
n_samples=n_samples,
n_features=n_features,
n_bkps=n_bkps,
noise_std=noise_std,
seed=seed,
)
signal2, bkps2 = pw_constant(
n_samples=n_samples,
n_features=n_features,
n_bkps=n_bkps,
noise_std=noise_std,
seed=seed,
)
assert np.allclose(signal1, signal2)
assert bkps1 == bkps2
# pw_normal
signal1, bkps1 = pw_normal(n_samples=n_samples, n_bkps=n_bkps, seed=seed)
signal2, bkps2 = pw_normal(n_samples=n_samples, n_bkps=n_bkps, seed=seed)
assert np.allclose(signal1, signal2)
assert bkps1 == bkps2
# pw_linear
signal1, bkps1 = pw_linear(
n_samples=n_samples,
n_features=n_features,
n_bkps=n_bkps,
noise_std=noise_std,
seed=seed,
)
signal2, bkps2 = pw_linear(
n_samples=n_samples,
n_features=n_features,
n_bkps=n_bkps,
noise_std=noise_std,
seed=seed,
)
assert np.allclose(signal1, signal2)
assert bkps1 == bkps2
# pw_wavy
signal1, bkps1 = pw_wavy(
n_samples=n_samples, n_bkps=n_bkps, noise_std=noise_std, seed=seed
)
signal2, bkps2 = pw_wavy(
n_samples=n_samples, n_bkps=n_bkps, noise_std=noise_std, seed=seed
)
assert np.allclose(signal1, signal2)
assert bkps1 == bkps2
@pytest.mark.parametrize(
"func, n_samples, n_features, n_bkps, noise_std",
product([pw_linear], range(20, 1000, 200), range(1, 4), [2, 5, 3], [None, 1, 2]),
)
def test_linear(func, n_samples, n_features, n_bkps, noise_std):
signal, bkps = func(
n_samples=n_samples, n_features=n_features, n_bkps=n_bkps, noise_std=noise_std
)
assert signal.shape == (n_samples, n_features + 1)
assert len(bkps) == n_bkps + 1
assert bkps[-1] == n_samples
@pytest.mark.parametrize(
"func, n_samples, n_bkps, noise_std",
product([pw_wavy], range(20, 1000, 200), [2, 5, 3], [None, 1, 2]),
)
def test_wavy(func, n_samples, n_bkps, noise_std):
signal, bkps = func(n_samples=n_samples, n_bkps=n_bkps, noise_std=noise_std)
assert signal.shape == (n_samples,)
assert len(bkps) == n_bkps + 1
assert bkps[-1] == n_samples
@pytest.mark.parametrize(
"func, n_samples, n_bkps", product([pw_normal], range(20, 1000, 200), [2, 5, 3])
)
def test_normal(func, n_samples, n_bkps):
signal, bkps = func(n_samples=n_samples, n_bkps=n_bkps)
assert signal.shape == (n_samples, 2)
assert len(bkps) == n_bkps + 1
assert bkps[-1] == n_samples
| 30.017699
| 87
| 0.662736
| 522
| 3,392
| 4.01341
| 0.09387
| 0.156563
| 0.146062
| 0.093079
| 0.845823
| 0.820048
| 0.815752
| 0.791408
| 0.753222
| 0.753222
| 0
| 0.041714
| 0.215507
| 3,392
| 112
| 88
| 30.285714
| 0.745584
| 0.011498
| 0
| 0.527473
| 0
| 0
| 0.045699
| 0
| 0
| 0
| 0
| 0
| 0.21978
| 1
| 0.065934
| false
| 0
| 0.043956
| 0
| 0.10989
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
31ee5678b7708ef1fe20be9e9870713b065aa6db
| 101
|
py
|
Python
|
extinct/components/models/dino/__init__.py
|
olliethomas/extinct-dino
|
7d47d8f7763d9791fa8d5027898b27fcee0901c4
|
[
"Apache-2.0"
] | null | null | null |
extinct/components/models/dino/__init__.py
|
olliethomas/extinct-dino
|
7d47d8f7763d9791fa8d5027898b27fcee0901c4
|
[
"Apache-2.0"
] | 1
|
2021-10-13T14:21:10.000Z
|
2021-10-13T14:21:10.000Z
|
extinct/components/models/dino/__init__.py
|
olliethomas/extinct-dino
|
7d47d8f7763d9791fa8d5027898b27fcee0901c4
|
[
"Apache-2.0"
] | null | null | null |
from .dino import *
from .eval import *
from .head import *
from .models import *
from .vit import *
| 16.833333
| 21
| 0.70297
| 15
| 101
| 4.733333
| 0.466667
| 0.56338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19802
| 101
| 5
| 22
| 20.2
| 0.876543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9ecb9f947090fe6d662b6e26fbc51eb77d5ea5ce
| 168
|
py
|
Python
|
Book_Ladder/web/page/__init__.py
|
Rdjroot/BookLadder
|
d4e1f90572f2dda2e7c25890b99c965ded0f02c8
|
[
"MIT"
] | null | null | null |
Book_Ladder/web/page/__init__.py
|
Rdjroot/BookLadder
|
d4e1f90572f2dda2e7c25890b99c965ded0f02c8
|
[
"MIT"
] | null | null | null |
Book_Ladder/web/page/__init__.py
|
Rdjroot/BookLadder
|
d4e1f90572f2dda2e7c25890b99c965ded0f02c8
|
[
"MIT"
] | null | null | null |
# -*- coding = utf-8 -*-
# @Time:2021/3/713:30
# @Author:Linyu
# @Software:PyCharm
from flask import Blueprint
page = Blueprint("page",__name__)
import web.page.views
| 18.666667
| 33
| 0.696429
| 24
| 168
| 4.708333
| 0.833333
| 0.230089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075342
| 0.130952
| 168
| 9
| 34
| 18.666667
| 0.69863
| 0.440476
| 0
| 0
| 0
| 0
| 0.044444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
9ef7e30ded83f0191f5b37633021bb760f535121
| 69
|
py
|
Python
|
lib/parsers/atom.py
|
RafalBuchner/turbo-snippets
|
740b70e7c588190b970921cdbaf2f465b7f0e968
|
[
"MIT"
] | null | null | null |
lib/parsers/atom.py
|
RafalBuchner/turbo-snippets
|
740b70e7c588190b970921cdbaf2f465b7f0e968
|
[
"MIT"
] | null | null | null |
lib/parsers/atom.py
|
RafalBuchner/turbo-snippets
|
740b70e7c588190b970921cdbaf2f465b7f0e968
|
[
"MIT"
] | null | null | null |
from .base import BaseParser
class AtomParser(BaseParser):
pass
| 13.8
| 29
| 0.768116
| 8
| 69
| 6.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 69
| 4
| 30
| 17.25
| 0.929825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
73162334da544af48fd81794cde68883da1a70dc
| 73
|
py
|
Python
|
pymt/utils/__init__.py
|
mwtoews/pymt
|
81a8469b0d0d115d21186ec1d1c9575690d51850
|
[
"MIT"
] | 38
|
2017-06-30T17:10:53.000Z
|
2022-01-05T07:38:03.000Z
|
pymt/utils/__init__.py
|
mwtoews/pymt
|
81a8469b0d0d115d21186ec1d1c9575690d51850
|
[
"MIT"
] | 96
|
2017-04-04T18:52:41.000Z
|
2021-11-01T21:30:48.000Z
|
pymt/utils/__init__.py
|
mwtoews/pymt
|
81a8469b0d0d115d21186ec1d1c9575690d51850
|
[
"MIT"
] | 15
|
2017-05-23T15:40:16.000Z
|
2021-06-14T21:30:28.000Z
|
from .utils import as_cwd, err, out
__all__ = ["as_cwd", "err", "out"]
| 14.6
| 35
| 0.630137
| 12
| 73
| 3.333333
| 0.666667
| 0.25
| 0.4
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178082
| 73
| 4
| 36
| 18.25
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
732f4bfcb3d8edbbd3ded216e2887a8fb0466a02
| 47
|
py
|
Python
|
src/adapy/action/__init__.py
|
LitterBot2017/Babysitter
|
ba189bbec20737670c3382bd3cccaa3a0e65b16c
|
[
"BSD-3-Clause"
] | null | null | null |
src/adapy/action/__init__.py
|
LitterBot2017/Babysitter
|
ba189bbec20737670c3382bd3cccaa3a0e65b16c
|
[
"BSD-3-Clause"
] | null | null | null |
src/adapy/action/__init__.py
|
LitterBot2017/Babysitter
|
ba189bbec20737670c3382bd3cccaa3a0e65b16c
|
[
"BSD-3-Clause"
] | null | null | null |
from grasping import Grasp
from rogue import *
| 15.666667
| 26
| 0.808511
| 7
| 47
| 5.428571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 27
| 23.5
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
735927ed116d3f5cb39d8324c45f4bcbf335b81e
| 61
|
py
|
Python
|
robot_ws/src/robot/spot_ros/spot_driver/scripts/__init__.py
|
ironWolf1990/ros-workspace
|
351ac9b15ab328cb2f1c77356383f0baa1204761
|
[
"MIT"
] | 1
|
2021-05-13T17:52:25.000Z
|
2021-05-13T17:52:25.000Z
|
robot_ws/src/robot/spot_ros/spot_driver/scripts/__init__.py
|
ironWolf1990/ros-workspace
|
351ac9b15ab328cb2f1c77356383f0baa1204761
|
[
"MIT"
] | null | null | null |
robot_ws/src/robot/spot_ros/spot_driver/scripts/__init__.py
|
ironWolf1990/ros-workspace
|
351ac9b15ab328cb2f1c77356383f0baa1204761
|
[
"MIT"
] | null | null | null |
import spot_ros
import spot_wrapper.py
import ros_helpers.py
| 15.25
| 22
| 0.868852
| 11
| 61
| 4.545455
| 0.545455
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 61
| 3
| 23
| 20.333333
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7df8b91dad762b43f7d4074c20df58b78cc4113e
| 201
|
py
|
Python
|
src/round2/pradnya/admin.py
|
sourabhedake/inc-pradnya-event-online-judge
|
90704b6816429415a5b74d46d200a903cad2d0e2
|
[
"MIT"
] | null | null | null |
src/round2/pradnya/admin.py
|
sourabhedake/inc-pradnya-event-online-judge
|
90704b6816429415a5b74d46d200a903cad2d0e2
|
[
"MIT"
] | null | null | null |
src/round2/pradnya/admin.py
|
sourabhedake/inc-pradnya-event-online-judge
|
90704b6816429415a5b74d46d200a903cad2d0e2
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from pradnya import models
# Register your models here.
admin.site.register(models.Questions)
admin.site.register(models.user)
admin.site.register(models.submissions)
| 28.714286
| 39
| 0.825871
| 28
| 201
| 5.928571
| 0.5
| 0.162651
| 0.307229
| 0.415663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084577
| 201
| 6
| 40
| 33.5
| 0.902174
| 0.129353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.