hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9a2e42257cdd85a1bbe5ca62b41e3356bfae4ca
| 2,795
|
py
|
Python
|
official/vision/beta/modeling/layers/__init__.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | 1
|
2021-05-22T12:50:50.000Z
|
2021-05-22T12:50:50.000Z
|
official/vision/beta/modeling/layers/__init__.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
official/vision/beta/modeling/layers/__init__.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Layers package definition."""
from official.vision.beta.modeling.layers.box_sampler import BoxSampler
from official.vision.beta.modeling.layers.detection_generator import DetectionGenerator
from official.vision.beta.modeling.layers.detection_generator import MultilevelDetectionGenerator
from official.vision.beta.modeling.layers.mask_sampler import MaskSampler
from official.vision.beta.modeling.layers.nn_blocks import BottleneckBlock
from official.vision.beta.modeling.layers.nn_blocks import BottleneckResidualInner
from official.vision.beta.modeling.layers.nn_blocks import DepthwiseSeparableConvBlock
from official.vision.beta.modeling.layers.nn_blocks import InvertedBottleneckBlock
from official.vision.beta.modeling.layers.nn_blocks import ResidualBlock
from official.vision.beta.modeling.layers.nn_blocks import ResidualInner
from official.vision.beta.modeling.layers.nn_blocks import ReversibleLayer
from official.vision.beta.modeling.layers.nn_blocks_3d import BottleneckBlock3D
from official.vision.beta.modeling.layers.nn_blocks_3d import SelfGating
from official.vision.beta.modeling.layers.nn_layers import CausalConvMixin
from official.vision.beta.modeling.layers.nn_layers import Conv2D
from official.vision.beta.modeling.layers.nn_layers import Conv3D
from official.vision.beta.modeling.layers.nn_layers import DepthwiseConv2D
from official.vision.beta.modeling.layers.nn_layers import GlobalAveragePool3D
from official.vision.beta.modeling.layers.nn_layers import PositionalEncoding
from official.vision.beta.modeling.layers.nn_layers import Scale
from official.vision.beta.modeling.layers.nn_layers import SpatialAveragePool3D
from official.vision.beta.modeling.layers.nn_layers import SqueezeExcitation
from official.vision.beta.modeling.layers.nn_layers import StochasticDepth
from official.vision.beta.modeling.layers.nn_layers import TemporalSoftmaxPool
from official.vision.beta.modeling.layers.roi_aligner import MultilevelROIAligner
from official.vision.beta.modeling.layers.roi_generator import MultilevelROIGenerator
from official.vision.beta.modeling.layers.roi_sampler import ROISampler
| 62.111111
| 98
| 0.840072
| 372
| 2,795
| 6.233871
| 0.306452
| 0.139715
| 0.209573
| 0.256145
| 0.566192
| 0.566192
| 0.535144
| 0.484692
| 0.484692
| 0.044847
| 0
| 0.006717
| 0.094454
| 2,795
| 44
| 99
| 63.522727
| 0.909522
| 0.223971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4e62ed89ab7a61092ecbb7dabe7e63006b953706
| 14
|
py
|
Python
|
emmaze/__init__.py
|
christopherphan/emmaze
|
c2428f6f99f80b5be104fe8eab3704ff70bce38b
|
[
"MIT"
] | null | null | null |
emmaze/__init__.py
|
christopherphan/emmaze
|
c2428f6f99f80b5be104fe8eab3704ff70bce38b
|
[
"MIT"
] | null | null | null |
emmaze/__init__.py
|
christopherphan/emmaze
|
c2428f6f99f80b5be104fe8eab3704ff70bce38b
|
[
"MIT"
] | null | null | null |
# noqa: D104
| 7
| 13
| 0.571429
| 2
| 14
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3
| 0.285714
| 14
| 1
| 14
| 14
| 0.5
| 0.714286
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4eb1dbd86aed7853c94197b79bcb8849a733c454
| 41
|
py
|
Python
|
model_loads/utils/exceptions.py
|
cwh94/model_loads
|
e387f18f5acf88a4b804fdac1577948ceebe8c01
|
[
"Apache-2.0"
] | null | null | null |
model_loads/utils/exceptions.py
|
cwh94/model_loads
|
e387f18f5acf88a4b804fdac1577948ceebe8c01
|
[
"Apache-2.0"
] | 1
|
2020-05-21T02:40:02.000Z
|
2020-06-03T15:37:49.000Z
|
model_loads/utils/exceptions.py
|
cwh94/model_loads
|
e387f18f5acf88a4b804fdac1577948ceebe8c01
|
[
"Apache-2.0"
] | 1
|
2020-05-21T16:55:27.000Z
|
2020-05-21T16:55:27.000Z
|
class LoadException(Exception):
pass
| 13.666667
| 31
| 0.756098
| 4
| 41
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 2
| 32
| 20.5
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
4ed59e5d129282a36e433e2bf3354a7c16921555
| 24
|
py
|
Python
|
app/modules/home/__init__.py
|
cupskeee/App-MFE
|
4f546a9e6a475f3937f1a77406e612e3354af2b7
|
[
"Apache-2.0"
] | null | null | null |
app/modules/home/__init__.py
|
cupskeee/App-MFE
|
4f546a9e6a475f3937f1a77406e612e3354af2b7
|
[
"Apache-2.0"
] | null | null | null |
app/modules/home/__init__.py
|
cupskeee/App-MFE
|
4f546a9e6a475f3937f1a77406e612e3354af2b7
|
[
"Apache-2.0"
] | null | null | null |
from .routes import home
| 24
| 24
| 0.833333
| 4
| 24
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
14cdabfff63c658c6e69d0a6949e4d939b7d4f24
| 156
|
py
|
Python
|
djangocms_slick_slider/admin.py
|
ELDAELRA/djangocms_slick_slider
|
57678d45b262083df5eeee2b88c2eee93699f064
|
[
"MIT"
] | null | null | null |
djangocms_slick_slider/admin.py
|
ELDAELRA/djangocms_slick_slider
|
57678d45b262083df5eeee2b88c2eee93699f064
|
[
"MIT"
] | null | null | null |
djangocms_slick_slider/admin.py
|
ELDAELRA/djangocms_slick_slider
|
57678d45b262083df5eeee2b88c2eee93699f064
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import SlickSliderImage
admin.site.register(SlickSliderImage)
| 19.5
| 37
| 0.75641
| 20
| 156
| 5.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007246
| 0.115385
| 156
| 7
| 38
| 22.285714
| 0.847826
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
14d72ab64d3e3ca4091c36234e6724e2ad4a84ca
| 138
|
py
|
Python
|
dynabuffers-python/dynabuffers/api/IAnnotation.py
|
rschluesselbauer/dynabuffers
|
c90becb5edff323ed5ac1ea394136babb0dcca1d
|
[
"Apache-2.0"
] | 2
|
2019-10-28T12:28:01.000Z
|
2020-07-07T12:25:40.000Z
|
dynabuffers-python/dynabuffers/api/IAnnotation.py
|
rschluesselbauer/dynabuffers
|
c90becb5edff323ed5ac1ea394136babb0dcca1d
|
[
"Apache-2.0"
] | 1
|
2021-12-21T07:35:22.000Z
|
2021-12-21T07:35:22.000Z
|
dynabuffers-python/dynabuffers/api/IAnnotation.py
|
rschluesselbauer/dynabuffers
|
c90becb5edff323ed5ac1ea394136babb0dcca1d
|
[
"Apache-2.0"
] | 1
|
2020-03-19T09:19:43.000Z
|
2020-03-19T09:19:43.000Z
|
from abc import ABC, abstractmethod
class IAnnotation(ABC):
@abstractmethod
def validate(self, fieldName, value):
pass
| 15.333333
| 41
| 0.695652
| 15
| 138
| 6.4
| 0.8
| 0.354167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.231884
| 138
| 8
| 42
| 17.25
| 0.90566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
093ab27f7af7ca31516d0064f503c04fd45911dd
| 166
|
py
|
Python
|
examples/double_pole_balancing/draw_net.py
|
adamtupper/pyneat
|
12bf2bf936602c0da7c40cfcb99aced2eb981faa
|
[
"MIT"
] | null | null | null |
examples/double_pole_balancing/draw_net.py
|
adamtupper/pyneat
|
12bf2bf936602c0da7c40cfcb99aced2eb981faa
|
[
"MIT"
] | null | null | null |
examples/double_pole_balancing/draw_net.py
|
adamtupper/pyneat
|
12bf2bf936602c0da7c40cfcb99aced2eb981faa
|
[
"MIT"
] | null | null | null |
import pickle
from visualize import draw_net
genome = pickle.load(open('results/run-0/solution.pickle', 'rb'))
draw_net(genome, filename='results/run-0/solution.gv')
| 33.2
| 65
| 0.777108
| 26
| 166
| 4.884615
| 0.615385
| 0.110236
| 0.204724
| 0.299213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.072289
| 166
| 5
| 66
| 33.2
| 0.811688
| 0
| 0
| 0
| 0
| 0
| 0.335329
| 0.323353
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
11dd6f5a4e69d1bfcbf8fcb063e38419a97fbb18
| 871
|
py
|
Python
|
pyaff4/plugins.py
|
aff4/python-aff4
|
94a3583475c07ad92147f70ff8a19e9e36f12aa9
|
[
"Apache-2.0"
] | 34
|
2017-10-21T16:12:58.000Z
|
2022-02-18T00:37:08.000Z
|
pyaff4/plugins.py
|
aff4/python-aff4
|
94a3583475c07ad92147f70ff8a19e9e36f12aa9
|
[
"Apache-2.0"
] | 23
|
2017-11-06T17:01:04.000Z
|
2021-12-26T14:09:38.000Z
|
pyaff4/plugins.py
|
aff4/python-aff4
|
94a3583475c07ad92147f70ff8a19e9e36f12aa9
|
[
"Apache-2.0"
] | 17
|
2019-02-11T00:47:02.000Z
|
2022-03-14T02:52:04.000Z
|
from __future__ import unicode_literals
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from pyaff4 import aff4
from pyaff4 import aff4_directory
try:
from pyaff4 import aff4_cloud
except ImportError:
pass
from pyaff4 import aff4_file
from pyaff4 import aff4_image
from pyaff4 import aff4_map
from pyaff4 import zip
| 33.5
| 80
| 0.786452
| 135
| 871
| 5
| 0.6
| 0.103704
| 0.165926
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028926
| 0.166475
| 871
| 25
| 81
| 34.84
| 0.900826
| 0.650976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.090909
| 0.818182
| 0
| 0.818182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
ee9d51046abb29f4d9b0a0ba5a656a19602cbca2
| 44,163
|
py
|
Python
|
tests/examples/minlplib/powerflow0014r.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 2
|
2021-07-03T13:19:10.000Z
|
2022-02-06T10:48:13.000Z
|
tests/examples/minlplib/powerflow0014r.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 1
|
2021-07-04T14:52:14.000Z
|
2021-07-15T10:17:11.000Z
|
tests/examples/minlplib/powerflow0014r.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | null | null | null |
# NLP written by GAMS Convert at 04/21/18 13:53:50
#
# Equation counts
# Total E G L N X C B
# 198 110 24 64 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 119 119 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 653 192 461 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x2 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(None,None),initialize=0)
m.obj = Objective(expr=430.293*m.x109**2 + 2000*m.x109 + 2500*m.x110**2 + 2000*m.x110 + 100*m.x111**2 + 4000*m.x111 +
100*m.x112**2 + 4000*m.x112 + 100*m.x113**2 + 4000*m.x113, sense=minimize)
m.c2 = Constraint(expr=0.567509596153698*m.x82*m.x83 - 1.1350191923074*m.x82**2 - 2.39093157587886*m.x82*m.x97 +
0.567509596153698*m.x83*m.x82 + 2.39093157587886*m.x83*m.x96 + 2.39093157587886*m.x96*m.x83 -
1.1350191923074*m.x96**2 + 0.567509596153698*m.x96*m.x97 - 2.39093157587886*m.x97*m.x82 +
0.567509596153698*m.x97*m.x96 + m.x1 == 0)
m.c3 = Constraint(expr=0.567509596153698*m.x82*m.x83 + 2.39093157587886*m.x82*m.x97 + 0.567509596153698*m.x83*m.x82 -
1.1350191923074*m.x83**2 - 2.39093157587886*m.x83*m.x96 - 2.39093157587886*m.x96*m.x83 +
0.567509596153698*m.x96*m.x97 + 2.39093157587886*m.x97*m.x82 + 0.567509596153698*m.x97*m.x96 -
1.1350191923074*m.x97**2 + m.x2 == 0)
m.c4 = Constraint(expr=4.54504135987637*m.x89*m.x101 - 4.54504135987637*m.x87*m.x103 + 4.54504135987637*m.x101*m.x89 -
4.54504135987637*m.x103*m.x87 + m.x3 == 0)
m.c5 = Constraint(expr=4.54504135987637*m.x87*m.x103 - 4.54504135987637*m.x89*m.x101 - 4.54504135987637*m.x101*m.x89 +
4.54504135987637*m.x103*m.x87 + m.x4 == 0)
m.c6 = Constraint(expr=0.9404423768502*m.x90*m.x91 - 1.8808847537004*m.x90**2 - 2.20147187473026*m.x90*m.x105 +
0.9404423768502*m.x91*m.x90 + 2.20147187473026*m.x91*m.x104 + 2.20147187473026*m.x104*m.x91 -
1.8808847537004*m.x104**2 + 0.9404423768502*m.x104*m.x105 - 2.20147187473026*m.x105*m.x90 +
0.9404423768502*m.x105*m.x104 + m.x5 == 0)
m.c7 = Constraint(expr=0.9404423768502*m.x90*m.x91 + 2.20147187473026*m.x90*m.x105 + 0.9404423768502*m.x91*m.x90 -
1.8808847537004*m.x91**2 - 2.20147187473026*m.x91*m.x104 - 2.20147187473026*m.x104*m.x91 +
0.9404423768502*m.x104*m.x105 + 2.20147187473026*m.x105*m.x90 + 0.9404423768502*m.x105*m.x104 -
1.8808847537004*m.x105**2 + m.x6 == 0)
m.c8 = Constraint(expr=2.39097169089518*m.x87*m.x98 - 2.39097169089518*m.x84*m.x101 + 2.39097169089518*m.x98*m.x87 -
2.39097169089518*m.x101*m.x84 + m.x7 == 0)
m.c9 = Constraint(expr=2.39097169089518*m.x84*m.x101 - 2.39097169089518*m.x87*m.x98 - 2.39097169089518*m.x98*m.x87 +
2.39097169089518*m.x101*m.x84 + m.x8 == 0)
m.c10 = Constraint(expr=1.98396952622808*m.x86*m.x99 - 1.98396952622808*m.x85*m.x100 + 1.98396952622808*m.x99*m.x86 -
1.98396952622808*m.x100*m.x85 + m.x9 == 0)
m.c11 = Constraint(expr=1.98396952622808*m.x85*m.x100 - 1.98396952622808*m.x86*m.x99 - 1.98396952622808*m.x99*m.x86 +
1.98396952622808*m.x100*m.x85 + m.x10 == 0)
m.c12 = Constraint(expr=0.712002743509966*m.x89*m.x94 - 1.42400548701993*m.x89**2 - 1.5145252284653*m.x89*m.x108 +
0.712002743509966*m.x94*m.x89 + 1.5145252284653*m.x94*m.x103 + 1.5145252284653*m.x103*m.x94 -
1.42400548701993*m.x103**2 + 0.712002743509966*m.x103*m.x108 - 1.5145252284653*m.x108*m.x89 +
0.712002743509966*m.x108*m.x103 + m.x11 == 0)
m.c13 = Constraint(expr=0.712002743509966*m.x89*m.x94 + 1.5145252284653*m.x89*m.x108 + 0.712002743509966*m.x94*m.x89 -
1.42400548701993*m.x94**2 - 1.5145252284653*m.x94*m.x103 - 1.5145252284653*m.x103*m.x94 +
0.712002743509966*m.x103*m.x108 + 1.5145252284653*m.x108*m.x89 + 0.712002743509966*m.x108*m.x103
- 1.42400548701993*m.x108**2 + m.x12 == 0)
m.c14 = Constraint(expr=3.42049033074784*m.x84*m.x85 - 6.84098066149567*m.x84**2 - 10.7892769908458*m.x84*m.x99 +
3.42049033074784*m.x85*m.x84 + 10.7892769908458*m.x85*m.x98 + 10.7892769908458*m.x98*m.x85 -
6.84098066149567*m.x98**2 + 3.42049033074784*m.x98*m.x99 - 10.7892769908458*m.x99*m.x84 +
3.42049033074784*m.x99*m.x98 + m.x13 == 0)
m.c15 = Constraint(expr=3.42049033074784*m.x84*m.x85 + 10.7892769908458*m.x84*m.x99 + 3.42049033074784*m.x85*m.x84 -
6.84098066149567*m.x85**2 - 10.7892769908458*m.x85*m.x98 - 10.7892769908458*m.x98*m.x85 +
3.42049033074784*m.x98*m.x99 + 10.7892769908458*m.x99*m.x84 + 3.42049033074784*m.x99*m.x98 -
6.84098066149567*m.x99**2 + m.x14 == 0)
m.c16 = Constraint(expr=1.54946370191899*m.x86*m.x93 - 3.09892740383799*m.x86**2 - 3.05137772409656*m.x86*m.x107 +
1.54946370191899*m.x93*m.x86 + 3.05137772409656*m.x93*m.x100 + 3.05137772409656*m.x100*m.x93 -
3.09892740383799*m.x100**2 + 1.54946370191899*m.x100*m.x107 - 3.05137772409656*m.x107*m.x86 +
1.54946370191899*m.x107*m.x100 + m.x15 == 0)
m.c17 = Constraint(expr=1.54946370191899*m.x86*m.x93 + 3.05137772409656*m.x86*m.x107 + 1.54946370191899*m.x93*m.x86 -
3.09892740383799*m.x93**2 - 3.05137772409656*m.x93*m.x100 - 3.05137772409656*m.x100*m.x93 +
1.54946370191899*m.x100*m.x107 + 3.05137772409656*m.x107*m.x86 + 1.54946370191899*m.x107*m.x100
- 3.09892740383799*m.x107**2 + m.x16 == 0)
m.c18 = Constraint(expr=2.83848992336077*m.x88*m.x101 - 2.83848992336077*m.x87*m.x102 + 2.83848992336077*m.x101*m.x88 -
2.83848992336077*m.x102*m.x87 + m.x17 == 0)
m.c19 = Constraint(expr=2.83848992336077*m.x87*m.x102 - 2.83848992336077*m.x88*m.x101 - 2.83848992336077*m.x101*m.x88 +
2.83848992336077*m.x102*m.x87 + m.x18 == 0)
m.c20 = Constraint(expr=0.568497078903163*m.x93*m.x94 - 1.13699415780633*m.x93**2 - 1.15748173755268*m.x93*m.x108 +
0.568497078903163*m.x94*m.x93 + 1.15748173755268*m.x94*m.x107 + 1.15748173755268*m.x107*m.x94 -
1.13699415780633*m.x107**2 + 0.568497078903163*m.x107*m.x108 - 1.15748173755268*m.x108*m.x93 +
0.568497078903163*m.x108*m.x107 + m.x19 == 0)
m.c21 = Constraint(expr=0.568497078903163*m.x93*m.x94 + 1.15748173755268*m.x93*m.x108 + 0.568497078903163*m.x94*m.x93 -
1.13699415780633*m.x94**2 - 1.15748173755268*m.x94*m.x107 - 1.15748173755268*m.x107*m.x94 +
0.568497078903163*m.x107*m.x108 + 1.15748173755268*m.x108*m.x93 + 0.568497078903163*m.x108*
m.x107 - 1.13699415780633*m.x108**2 + m.x20 == 0)
m.c22 = Constraint(expr=0.762983720225487*m.x86*m.x92 - 1.52596744045097*m.x86**2 - 1.5879819825147*m.x86*m.x106 +
0.762983720225487*m.x92*m.x86 + 1.5879819825147*m.x92*m.x100 + 1.5879819825147*m.x100*m.x92 -
1.52596744045097*m.x100**2 + 0.762983720225487*m.x100*m.x106 - 1.5879819825147*m.x106*m.x86 +
0.762983720225487*m.x106*m.x100 + m.x21 == 0)
m.c23 = Constraint(expr=0.762983720225487*m.x86*m.x92 + 1.5879819825147*m.x86*m.x106 + 0.762983720225487*m.x92*m.x86 -
1.52596744045097*m.x92**2 - 1.5879819825147*m.x92*m.x100 - 1.5879819825147*m.x100*m.x92 +
0.762983720225487*m.x100*m.x106 + 1.5879819825147*m.x106*m.x86 + 0.762983720225487*m.x106*m.x100
- 1.52596744045097*m.x106**2 + m.x22 == 0)
m.c24 = Constraint(expr=0.97751428158863*m.x86*m.x91 - 1.95502856317726*m.x86**2 - 2.04703717212022*m.x86*m.x105 +
0.97751428158863*m.x91*m.x86 + 2.04703717212022*m.x91*m.x100 + 2.04703717212022*m.x100*m.x91 -
1.95502856317726*m.x100**2 + 0.97751428158863*m.x100*m.x105 - 2.04703717212022*m.x105*m.x86 +
0.97751428158863*m.x105*m.x100 + m.x23 == 0)
m.c25 = Constraint(expr=0.97751428158863*m.x86*m.x91 + 2.04703717212022*m.x86*m.x105 + 0.97751428158863*m.x91*m.x86 -
1.95502856317726*m.x91**2 - 2.04703717212022*m.x91*m.x100 - 2.04703717212022*m.x100*m.x91 +
0.97751428158863*m.x100*m.x105 + 2.04703717212022*m.x105*m.x86 + 0.97751428158863*m.x105*m.x100
- 1.95502856317726*m.x105**2 + m.x24 == 0)
m.c26 = Constraint(expr=1.24451229341096*m.x92*m.x93 - 2.48902458682192*m.x92**2 - 1.12598731308611*m.x92*m.x107 +
1.24451229341096*m.x93*m.x92 + 1.12598731308611*m.x93*m.x106 + 1.12598731308611*m.x106*m.x93 -
2.48902458682192*m.x106**2 + 1.24451229341096*m.x106*m.x107 - 1.12598731308611*m.x107*m.x92 +
1.24451229341096*m.x107*m.x106 + m.x25 == 0)
m.c27 = Constraint(expr=1.24451229341096*m.x92*m.x93 + 1.12598731308611*m.x92*m.x107 + 1.24451229341096*m.x93*m.x92 -
2.48902458682192*m.x93**2 - 1.12598731308611*m.x93*m.x106 - 1.12598731308611*m.x106*m.x93 +
1.24451229341096*m.x106*m.x107 + 1.12598731308611*m.x107*m.x92 + 1.24451229341096*m.x107*m.x106
- 2.48902458682192*m.x107**2 + m.x26 == 0)
m.c28 = Constraint(expr=0.512948727485094*m.x81*m.x85 - 1.02589745497019*m.x81**2 - 2.11749184116742*m.x81*m.x99 +
0.512948727485094*m.x85*m.x81 + 2.11749184116742*m.x85*m.x95 + 2.11749184116742*m.x95*m.x85 -
1.02589745497019*m.x95**2 + 0.512948727485094*m.x95*m.x99 - 2.11749184116742*m.x99*m.x81 +
0.512948727485094*m.x99*m.x95 + m.x27 == 0)
m.c29 = Constraint(expr=0.512948727485094*m.x81*m.x85 + 2.11749184116742*m.x81*m.x99 + 0.512948727485094*m.x85*m.x81 -
1.02589745497019*m.x85**2 - 2.11749184116742*m.x85*m.x95 - 2.11749184116742*m.x95*m.x85 +
0.512948727485094*m.x95*m.x99 + 2.11749184116742*m.x99*m.x81 + 0.512948727485094*m.x99*m.x95 -
1.02589745497019*m.x99**2 + m.x28 == 0)
m.c30 = Constraint(expr=1.95102477622371*m.x89*m.x90 - 3.90204955244743*m.x89**2 - 5.18269706353046*m.x89*m.x104 +
1.95102477622371*m.x90*m.x89 + 5.18269706353046*m.x90*m.x103 + 5.18269706353046*m.x103*m.x90 -
3.90204955244743*m.x103**2 + 1.95102477622371*m.x103*m.x104 - 5.18269706353046*m.x104*m.x89 +
1.95102477622371*m.x104*m.x103 + m.x29 == 0)
m.c31 = Constraint(expr=1.95102477622371*m.x89*m.x90 + 5.18269706353046*m.x89*m.x104 + 1.95102477622371*m.x90*m.x89 -
3.90204955244743*m.x90**2 - 5.18269706353046*m.x90*m.x103 - 5.18269706353046*m.x103*m.x90 +
1.95102477622371*m.x103*m.x104 + 5.18269706353046*m.x104*m.x89 + 1.95102477622371*m.x104*m.x103
- 3.90204955244743*m.x104**2 + m.x30 == 0)
m.c32 = Constraint(expr=2.49956580039902*m.x81*m.x82 - 4.99913160079803*m.x81**2 - 7.63154326158978*m.x81*m.x96 +
2.49956580039902*m.x82*m.x81 + 7.63154326158978*m.x82*m.x95 + 7.63154326158978*m.x95*m.x82 -
4.99913160079803*m.x95**2 + 2.49956580039902*m.x95*m.x96 - 7.63154326158978*m.x96*m.x81 +
2.49956580039902*m.x96*m.x95 + m.x31 == 0)
m.c33 = Constraint(expr=2.49956580039902*m.x81*m.x82 + 7.63154326158978*m.x81*m.x96 + 2.49956580039902*m.x82*m.x81 -
4.99913160079803*m.x82**2 - 7.63154326158978*m.x82*m.x95 - 7.63154326158978*m.x95*m.x82 +
2.49956580039902*m.x95*m.x96 + 7.63154326158978*m.x96*m.x81 + 2.49956580039902*m.x96*m.x95 -
4.99913160079803*m.x96**2 + m.x32 == 0)
m.c34 = Constraint(expr=0.850569833547202*m.x82*m.x85 - 1.7011396670944*m.x82**2 - 2.59696369898486*m.x82*m.x99 +
0.850569833547202*m.x85*m.x82 + 2.59696369898486*m.x85*m.x96 + 2.59696369898486*m.x96*m.x85 -
1.7011396670944*m.x96**2 + 0.850569833547202*m.x96*m.x99 - 2.59696369898486*m.x99*m.x82 +
0.850569833547202*m.x99*m.x96 + m.x33 == 0)
m.c35 = Constraint(expr=0.850569833547202*m.x82*m.x85 + 2.59696369898486*m.x82*m.x99 + 0.850569833547202*m.x85*m.x82 -
1.7011396670944*m.x85**2 - 2.59696369898486*m.x85*m.x96 - 2.59696369898486*m.x96*m.x85 +
0.850569833547202*m.x96*m.x99 + 2.59696369898486*m.x99*m.x82 + 0.850569833547202*m.x99*m.x96 -
1.7011396670944*m.x99**2 + m.x34 == 0)
m.c36 = Constraint(expr=0.99298785496278*m.x83*m.x84 - 1.98597570992556*m.x83**2 - 2.53440848879696*m.x83*m.x98 +
0.99298785496278*m.x84*m.x83 + 2.53440848879696*m.x84*m.x97 + 2.53440848879696*m.x97*m.x84 -
1.98597570992556*m.x97**2 + 0.99298785496278*m.x97*m.x98 - 2.53440848879696*m.x98*m.x83 +
0.99298785496278*m.x98*m.x97 + m.x35 == 0)
m.c37 = Constraint(expr=0.99298785496278*m.x83*m.x84 + 2.53440848879696*m.x83*m.x98 + 0.99298785496278*m.x84*m.x83 -
1.98597570992556*m.x84**2 - 2.53440848879696*m.x84*m.x97 - 2.53440848879696*m.x97*m.x84 +
0.99298785496278*m.x97*m.x98 + 2.53440848879696*m.x98*m.x83 + 0.99298785496278*m.x98*m.x97 -
1.98597570992556*m.x98**2 + m.x36 == 0)
m.c38 = Constraint(expr=0.898989535761804*m.x89*m.x98 - 0.898989535761804*m.x84*m.x103 + 0.898989535761804*m.x98*m.x89
- 0.898989535761804*m.x103*m.x84 + m.x37 == 0)
m.c39 = Constraint(expr=0.898989535761804*m.x84*m.x103 - 0.898989535761804*m.x89*m.x98 - 0.898989535761804*m.x98*m.x89
+ 0.898989535761804*m.x103*m.x84 + m.x38 == 0)
m.c40 = Constraint(expr=0.843016575307471*m.x82*m.x84 - 1.68603315061494*m.x82**2 - 2.55791916293604*m.x82*m.x98 +
0.843016575307471*m.x84*m.x82 + 2.55791916293604*m.x84*m.x96 + 2.55791916293604*m.x96*m.x84 -
1.68603315061494*m.x96**2 + 0.843016575307471*m.x96*m.x98 - 2.55791916293604*m.x98*m.x82 +
0.843016575307471*m.x98*m.x96 + m.x39 == 0)
m.c41 = Constraint(expr=0.843016575307471*m.x82*m.x84 + 2.55791916293604*m.x82*m.x98 + 0.843016575307471*m.x84*m.x82 -
1.68603315061494*m.x84**2 - 2.55791916293604*m.x84*m.x96 - 2.55791916293604*m.x96*m.x84 +
0.843016575307471*m.x96*m.x98 + 2.55791916293604*m.x98*m.x82 + 0.843016575307471*m.x98*m.x96 -
1.68603315061494*m.x98**2 + m.x40 == 0)
m.c42 = Constraint(expr=2.39093157587886*m.x82*m.x83 - 4.75996315175772*m.x82**2 + 0.567509596153698*m.x82*m.x97 +
2.39093157587886*m.x83*m.x82 - 0.567509596153698*m.x83*m.x96 - 0.567509596153698*m.x96*m.x83 -
4.75996315175772*m.x96**2 + 2.39093157587886*m.x96*m.x97 + 0.567509596153698*m.x97*m.x82 +
2.39093157587886*m.x97*m.x96 + m.x41 == 0)
m.c43 = Constraint(expr=2.39093157587886*m.x82*m.x83 - 0.567509596153698*m.x82*m.x97 + 2.39093157587886*m.x83*m.x82 -
4.75996315175772*m.x83**2 + 0.567509596153698*m.x83*m.x96 + 0.567509596153698*m.x96*m.x83 +
2.39093157587886*m.x96*m.x97 - 0.567509596153698*m.x97*m.x82 + 2.39093157587886*m.x97*m.x96 -
4.75996315175772*m.x97**2 + m.x42 == 0)
m.c44 = Constraint(expr=4.54504135987637*m.x87*m.x89 - 9.09008271975275*m.x87**2 + 4.54504135987637*m.x89*m.x87 -
9.09008271975275*m.x101**2 + 4.54504135987637*m.x101*m.x103 + 4.54504135987637*m.x103*m.x101
+ m.x43 == 0)
m.c45 = Constraint(expr=4.54504135987637*m.x87*m.x89 + 4.54504135987637*m.x89*m.x87 - 9.09008271975275*m.x89**2 +
4.54504135987637*m.x101*m.x103 + 4.54504135987637*m.x103*m.x101 - 9.09008271975275*m.x103**2
+ m.x44 == 0)
m.c46 = Constraint(expr=2.20147187473026*m.x90*m.x91 - 4.40294374946052*m.x90**2 + 0.9404423768502*m.x90*m.x105 +
2.20147187473026*m.x91*m.x90 - 0.9404423768502*m.x91*m.x104 - 0.9404423768502*m.x104*m.x91 -
4.40294374946052*m.x104**2 + 2.20147187473026*m.x104*m.x105 + 0.9404423768502*m.x105*m.x90 +
2.20147187473026*m.x105*m.x104 + m.x45 == 0)
m.c47 = Constraint(expr=2.20147187473026*m.x90*m.x91 - 0.9404423768502*m.x90*m.x105 + 2.20147187473026*m.x91*m.x90 -
4.40294374946052*m.x91**2 + 0.9404423768502*m.x91*m.x104 + 0.9404423768502*m.x104*m.x91 +
2.20147187473026*m.x104*m.x105 - 0.9404423768502*m.x105*m.x90 + 2.20147187473026*m.x105*m.x104
- 4.40294374946052*m.x105**2 + m.x46 == 0)
m.c48 = Constraint(expr=2.39097169089518*m.x84*m.x87 - 4.78194338179036*m.x84**2 + 2.39097169089518*m.x87*m.x84 -
4.78194338179036*m.x98**2 + 2.39097169089518*m.x98*m.x101 + 2.39097169089518*m.x101*m.x98
+ m.x47 == 0)
m.c49 = Constraint(expr=2.39097169089518*m.x84*m.x87 + 2.39097169089518*m.x87*m.x84 - 4.78194338179036*m.x87**2 +
2.39097169089518*m.x98*m.x101 + 2.39097169089518*m.x101*m.x98 - 4.78194338179036*m.x101**2
+ m.x48 == 0)
m.c50 = Constraint(expr=1.98396952622808*m.x85*m.x86 - 3.96793905245615*m.x85**2 + 1.98396952622808*m.x86*m.x85 -
3.96793905245615*m.x99**2 + 1.98396952622808*m.x99*m.x100 + 1.98396952622808*m.x100*m.x99
+ m.x49 == 0)
m.c51 = Constraint(expr=1.98396952622808*m.x85*m.x86 + 1.98396952622808*m.x86*m.x85 - 3.96793905245615*m.x86**2 +
1.98396952622808*m.x99*m.x100 + 1.98396952622808*m.x100*m.x99 - 3.96793905245615*m.x100**2
+ m.x50 == 0)
m.c52 = Constraint(expr=1.5145252284653*m.x89*m.x94 - 3.0290504569306*m.x89**2 + 0.712002743509966*m.x89*m.x108 +
1.5145252284653*m.x94*m.x89 - 0.712002743509966*m.x94*m.x103 - 0.712002743509966*m.x103*m.x94 -
3.0290504569306*m.x103**2 + 1.5145252284653*m.x103*m.x108 + 0.712002743509966*m.x108*m.x89 +
1.5145252284653*m.x108*m.x103 + m.x51 == 0)
m.c53 = Constraint(expr=1.5145252284653*m.x89*m.x94 - 0.712002743509966*m.x89*m.x108 + 1.5145252284653*m.x94*m.x89 -
3.0290504569306*m.x94**2 + 0.712002743509966*m.x94*m.x103 + 0.712002743509966*m.x103*m.x94 +
1.5145252284653*m.x103*m.x108 - 0.712002743509966*m.x108*m.x89 + 1.5145252284653*m.x108*m.x103
- 3.0290504569306*m.x108**2 + m.x52 == 0)
m.c54 = Constraint(expr=10.7892769908458*m.x84*m.x85 - 21.5785539816916*m.x84**2 + 3.42049033074784*m.x84*m.x99 +
10.7892769908458*m.x85*m.x84 - 3.42049033074784*m.x85*m.x98 - 3.42049033074784*m.x98*m.x85 -
21.5785539816916*m.x98**2 + 10.7892769908458*m.x98*m.x99 + 3.42049033074784*m.x99*m.x84 +
10.7892769908458*m.x99*m.x98 + m.x53 == 0)
m.c55 = Constraint(expr=10.7892769908458*m.x84*m.x85 - 3.42049033074784*m.x84*m.x99 + 10.7892769908458*m.x85*m.x84 -
21.5785539816916*m.x85**2 + 3.42049033074784*m.x85*m.x98 + 3.42049033074784*m.x98*m.x85 +
10.7892769908458*m.x98*m.x99 - 3.42049033074784*m.x99*m.x84 + 10.7892769908458*m.x99*m.x98 -
21.5785539816916*m.x99**2 + m.x54 == 0)
m.c56 = Constraint(expr=3.05137772409656*m.x86*m.x93 - 6.10275544819311*m.x86**2 + 1.54946370191899*m.x86*m.x107 +
3.05137772409656*m.x93*m.x86 - 1.54946370191899*m.x93*m.x100 - 1.54946370191899*m.x100*m.x93 -
6.10275544819311*m.x100**2 + 3.05137772409656*m.x100*m.x107 + 1.54946370191899*m.x107*m.x86 +
3.05137772409656*m.x107*m.x100 + m.x55 == 0)
m.c57 = Constraint(expr=3.05137772409656*m.x86*m.x93 - 1.54946370191899*m.x86*m.x107 + 3.05137772409656*m.x93*m.x86 -
6.10275544819311*m.x93**2 + 1.54946370191899*m.x93*m.x100 + 1.54946370191899*m.x100*m.x93 +
3.05137772409656*m.x100*m.x107 - 1.54946370191899*m.x107*m.x86 + 3.05137772409656*m.x107*m.x100
- 6.10275544819311*m.x107**2 + m.x56 == 0)
m.c58 = Constraint(expr=2.83848992336077*m.x87*m.x88 - 5.67697984672154*m.x87**2 + 2.83848992336077*m.x88*m.x87 -
5.67697984672154*m.x101**2 + 2.83848992336077*m.x101*m.x102 + 2.83848992336077*m.x102*m.x101
+ m.x57 == 0)
m.c59 = Constraint(expr=2.83848992336077*m.x87*m.x88 + 2.83848992336077*m.x88*m.x87 - 5.67697984672154*m.x88**2 +
2.83848992336077*m.x101*m.x102 + 2.83848992336077*m.x102*m.x101 - 5.67697984672154*m.x102**2
+ m.x58 == 0)
m.c60 = Constraint(expr=1.15748173755268*m.x93*m.x94 - 2.31496347510535*m.x93**2 + 0.568497078903163*m.x93*m.x108 +
1.15748173755268*m.x94*m.x93 - 0.568497078903163*m.x94*m.x107 - 0.568497078903163*m.x107*m.x94
- 2.31496347510535*m.x107**2 + 1.15748173755268*m.x107*m.x108 + 0.568497078903163*m.x108*m.x93
+ 1.15748173755268*m.x108*m.x107 + m.x59 == 0)
m.c61 = Constraint(expr=1.15748173755268*m.x93*m.x94 - 0.568497078903163*m.x93*m.x108 + 1.15748173755268*m.x94*m.x93 -
2.31496347510535*m.x94**2 + 0.568497078903163*m.x94*m.x107 + 0.568497078903163*m.x107*m.x94 +
1.15748173755268*m.x107*m.x108 - 0.568497078903163*m.x108*m.x93 + 1.15748173755268*m.x108*m.x107
- 2.31496347510535*m.x108**2 + m.x60 == 0)
m.c62 = Constraint(expr=1.5879819825147*m.x86*m.x92 - 3.1759639650294*m.x86**2 + 0.762983720225487*m.x86*m.x106 +
1.5879819825147*m.x92*m.x86 - 0.762983720225487*m.x92*m.x100 - 0.762983720225487*m.x100*m.x92 -
3.1759639650294*m.x100**2 + 1.5879819825147*m.x100*m.x106 + 0.762983720225487*m.x106*m.x86 +
1.5879819825147*m.x106*m.x100 + m.x61 == 0)
m.c63 = Constraint(expr=1.5879819825147*m.x86*m.x92 - 0.762983720225487*m.x86*m.x106 + 1.5879819825147*m.x92*m.x86 -
3.1759639650294*m.x92**2 + 0.762983720225487*m.x92*m.x100 + 0.762983720225487*m.x100*m.x92 +
1.5879819825147*m.x100*m.x106 - 0.762983720225487*m.x106*m.x86 + 1.5879819825147*m.x106*m.x100
- 3.1759639650294*m.x106**2 + m.x62 == 0)
m.c64 = Constraint(expr=2.04703717212022*m.x86*m.x91 - 4.09407434424044*m.x86**2 + 0.97751428158863*m.x86*m.x105 +
2.04703717212022*m.x91*m.x86 - 0.97751428158863*m.x91*m.x100 - 0.97751428158863*m.x100*m.x91 -
4.09407434424044*m.x100**2 + 2.04703717212022*m.x100*m.x105 + 0.97751428158863*m.x105*m.x86 +
2.04703717212022*m.x105*m.x100 + m.x63 == 0)
m.c65 = Constraint(expr=2.04703717212022*m.x86*m.x91 - 0.97751428158863*m.x86*m.x105 + 2.04703717212022*m.x91*m.x86 -
4.09407434424044*m.x91**2 + 0.97751428158863*m.x91*m.x100 + 0.97751428158863*m.x100*m.x91 +
2.04703717212022*m.x100*m.x105 - 0.97751428158863*m.x105*m.x86 + 2.04703717212022*m.x105*m.x100
- 4.09407434424044*m.x105**2 + m.x64 == 0)
m.c66 = Constraint(expr=1.12598731308611*m.x92*m.x93 - 2.25197462617221*m.x92**2 + 1.24451229341096*m.x92*m.x107 +
1.12598731308611*m.x93*m.x92 - 1.24451229341096*m.x93*m.x106 - 1.24451229341096*m.x106*m.x93 -
2.25197462617221*m.x106**2 + 1.12598731308611*m.x106*m.x107 + 1.24451229341096*m.x107*m.x92 +
1.12598731308611*m.x107*m.x106 + m.x65 == 0)
m.c67 = Constraint(expr=1.12598731308611*m.x92*m.x93 - 1.24451229341096*m.x92*m.x107 + 1.12598731308611*m.x93*m.x92 -
2.25197462617221*m.x93**2 + 1.24451229341096*m.x93*m.x106 + 1.24451229341096*m.x106*m.x93 +
1.12598731308611*m.x106*m.x107 - 1.24451229341096*m.x107*m.x92 + 1.12598731308611*m.x107*m.x106
- 2.25197462617221*m.x107**2 + m.x66 == 0)
m.c68 = Constraint(expr=2.11749184116742*m.x81*m.x85 - 4.21038368233483*m.x81**2 + 0.512948727485094*m.x81*m.x99 +
2.11749184116742*m.x85*m.x81 - 0.512948727485094*m.x85*m.x95 - 0.512948727485094*m.x95*m.x85 -
4.21038368233483*m.x95**2 + 2.11749184116742*m.x95*m.x99 + 0.512948727485094*m.x99*m.x81 +
2.11749184116742*m.x99*m.x95 + m.x67 == 0)
m.c69 = Constraint(expr=2.11749184116742*m.x81*m.x85 - 0.512948727485094*m.x81*m.x99 + 2.11749184116742*m.x85*m.x81 -
4.21038368233483*m.x85**2 + 0.512948727485094*m.x85*m.x95 + 0.512948727485094*m.x95*m.x85 +
2.11749184116742*m.x95*m.x99 - 0.512948727485094*m.x99*m.x81 + 2.11749184116742*m.x99*m.x95 -
4.21038368233483*m.x99**2 + m.x68 == 0)
m.c70 = Constraint(expr=5.18269706353046*m.x89*m.x90 - 10.3653941270609*m.x89**2 + 1.95102477622371*m.x89*m.x104 +
5.18269706353046*m.x90*m.x89 - 1.95102477622371*m.x90*m.x103 - 1.95102477622371*m.x103*m.x90 -
10.3653941270609*m.x103**2 + 5.18269706353046*m.x103*m.x104 + 1.95102477622371*m.x104*m.x89 +
5.18269706353046*m.x104*m.x103 + m.x69 == 0)
m.c71 = Constraint(expr=5.18269706353046*m.x89*m.x90 - 1.95102477622371*m.x89*m.x104 + 5.18269706353046*m.x90*m.x89 -
10.3653941270609*m.x90**2 + 1.95102477622371*m.x90*m.x103 + 1.95102477622371*m.x103*m.x90 +
5.18269706353046*m.x103*m.x104 - 1.95102477622371*m.x104*m.x89 + 5.18269706353046*m.x104*m.x103
- 10.3653941270609*m.x104**2 + m.x70 == 0)
m.c72 = Constraint(expr=7.63154326158978*m.x81*m.x82 - 15.2366865231796*m.x81**2 + 2.49956580039902*m.x81*m.x96 +
7.63154326158978*m.x82*m.x81 - 2.49956580039902*m.x82*m.x95 - 2.49956580039902*m.x95*m.x82 -
15.2366865231796*m.x95**2 + 7.63154326158978*m.x95*m.x96 + 2.49956580039902*m.x96*m.x81 +
7.63154326158978*m.x96*m.x95 + m.x71 == 0)
m.c73 = Constraint(expr=7.63154326158978*m.x81*m.x82 - 2.49956580039902*m.x81*m.x96 + 7.63154326158978*m.x82*m.x81 -
15.2366865231796*m.x82**2 + 2.49956580039902*m.x82*m.x95 + 2.49956580039902*m.x95*m.x82 +
7.63154326158978*m.x95*m.x96 - 2.49956580039902*m.x96*m.x81 + 7.63154326158978*m.x96*m.x95 -
15.2366865231796*m.x96**2 + m.x72 == 0)
m.c74 = Constraint(expr=2.59696369898486*m.x82*m.x85 - 5.17662739796971*m.x82**2 + 0.850569833547202*m.x82*m.x99 +
2.59696369898486*m.x85*m.x82 - 0.850569833547202*m.x85*m.x96 - 0.850569833547202*m.x96*m.x85 -
5.17662739796971*m.x96**2 + 2.59696369898486*m.x96*m.x99 + 0.850569833547202*m.x99*m.x82 +
2.59696369898486*m.x99*m.x96 + m.x73 == 0)
m.c75 = Constraint(expr=2.59696369898486*m.x82*m.x85 - 0.850569833547202*m.x82*m.x99 + 2.59696369898486*m.x85*m.x82 -
5.17662739796971*m.x85**2 + 0.850569833547202*m.x85*m.x96 + 0.850569833547202*m.x96*m.x85 +
2.59696369898486*m.x96*m.x99 - 0.850569833547202*m.x99*m.x82 + 2.59696369898486*m.x99*m.x96 -
5.17662739796971*m.x99**2 + m.x74 == 0)
m.c76 = Constraint(expr=2.53440848879696*m.x83*m.x84 - 5.06241697759392*m.x83**2 + 0.99298785496278*m.x83*m.x98 +
2.53440848879696*m.x84*m.x83 - 0.99298785496278*m.x84*m.x97 - 0.99298785496278*m.x97*m.x84 -
5.06241697759392*m.x97**2 + 2.53440848879696*m.x97*m.x98 + 0.99298785496278*m.x98*m.x83 +
2.53440848879696*m.x98*m.x97 + m.x75 == 0)
m.c77 = Constraint(expr=2.53440848879696*m.x83*m.x84 - 0.99298785496278*m.x83*m.x98 + 2.53440848879696*m.x84*m.x83 -
5.06241697759392*m.x84**2 + 0.99298785496278*m.x84*m.x97 + 0.99298785496278*m.x97*m.x84 +
2.53440848879696*m.x97*m.x98 - 0.99298785496278*m.x98*m.x83 + 2.53440848879696*m.x98*m.x97 -
5.06241697759392*m.x98**2 + m.x76 == 0)
m.c78 = Constraint(expr=0.898989535761804*m.x84*m.x89 - 1.79797907152361*m.x84**2 + 0.898989535761804*m.x89*m.x84 -
1.79797907152361*m.x98**2 + 0.898989535761804*m.x98*m.x103 + 0.898989535761804*m.x103*m.x98
+ m.x77 == 0)
m.c79 = Constraint(expr=0.898989535761804*m.x84*m.x89 + 0.898989535761804*m.x89*m.x84 - 1.79797907152361*m.x89**2 +
0.898989535761804*m.x98*m.x103 + 0.898989535761804*m.x103*m.x98 - 1.79797907152361*m.x103**2
+ m.x78 == 0)
m.c80 = Constraint(expr=2.55791916293604*m.x82*m.x84 - 5.09883832587208*m.x82**2 + 0.843016575307471*m.x82*m.x98 +
2.55791916293604*m.x84*m.x82 - 0.843016575307471*m.x84*m.x96 - 0.843016575307471*m.x96*m.x84 -
5.09883832587208*m.x96**2 + 2.55791916293604*m.x96*m.x98 + 0.843016575307471*m.x98*m.x82 +
2.55791916293604*m.x98*m.x96 + m.x79 == 0)
m.c81 = Constraint(expr=2.55791916293604*m.x82*m.x84 - 0.843016575307471*m.x82*m.x98 + 2.55791916293604*m.x84*m.x82 -
5.09883832587208*m.x84**2 + 0.843016575307471*m.x84*m.x96 + 0.843016575307471*m.x96*m.x84 +
2.55791916293604*m.x96*m.x98 - 0.843016575307471*m.x98*m.x82 + 2.55791916293604*m.x98*m.x96 -
5.09883832587208*m.x98**2 + m.x80 == 0)
m.c82 = Constraint(expr=m.x1**2 + m.x41**2 <= 9801)
m.c83 = Constraint(expr=m.x2**2 + m.x42**2 <= 9801)
m.c84 = Constraint(expr=m.x3**2 + m.x43**2 <= 9801)
m.c85 = Constraint(expr=m.x4**2 + m.x44**2 <= 9801)
m.c86 = Constraint(expr=m.x5**2 + m.x45**2 <= 9801)
m.c87 = Constraint(expr=m.x6**2 + m.x46**2 <= 9801)
m.c88 = Constraint(expr=m.x7**2 + m.x47**2 <= 9801)
m.c89 = Constraint(expr=m.x8**2 + m.x48**2 <= 9801)
m.c90 = Constraint(expr=m.x9**2 + m.x49**2 <= 9801)
m.c91 = Constraint(expr=m.x10**2 + m.x50**2 <= 9801)
m.c92 = Constraint(expr=m.x11**2 + m.x51**2 <= 9801)
m.c93 = Constraint(expr=m.x12**2 + m.x52**2 <= 9801)
m.c94 = Constraint(expr=m.x13**2 + m.x53**2 <= 9801)
m.c95 = Constraint(expr=m.x14**2 + m.x54**2 <= 9801)
m.c96 = Constraint(expr=m.x15**2 + m.x55**2 <= 9801)
m.c97 = Constraint(expr=m.x16**2 + m.x56**2 <= 9801)
m.c98 = Constraint(expr=m.x17**2 + m.x57**2 <= 9801)
m.c99 = Constraint(expr=m.x18**2 + m.x58**2 <= 9801)
m.c100 = Constraint(expr=m.x19**2 + m.x59**2 <= 9801)
m.c101 = Constraint(expr=m.x20**2 + m.x60**2 <= 9801)
m.c102 = Constraint(expr=m.x21**2 + m.x61**2 <= 9801)
m.c103 = Constraint(expr=m.x22**2 + m.x62**2 <= 9801)
m.c104 = Constraint(expr=m.x23**2 + m.x63**2 <= 9801)
m.c105 = Constraint(expr=m.x24**2 + m.x64**2 <= 9801)
m.c106 = Constraint(expr=m.x25**2 + m.x65**2 <= 9801)
m.c107 = Constraint(expr=m.x26**2 + m.x66**2 <= 9801)
m.c108 = Constraint(expr=m.x27**2 + m.x67**2 <= 9801)
m.c109 = Constraint(expr=m.x28**2 + m.x68**2 <= 9801)
m.c110 = Constraint(expr=m.x29**2 + m.x69**2 <= 9801)
m.c111 = Constraint(expr=m.x30**2 + m.x70**2 <= 9801)
m.c112 = Constraint(expr=m.x31**2 + m.x71**2 <= 9801)
m.c113 = Constraint(expr=m.x32**2 + m.x72**2 <= 9801)
m.c114 = Constraint(expr=m.x33**2 + m.x73**2 <= 9801)
m.c115 = Constraint(expr=m.x34**2 + m.x74**2 <= 9801)
m.c116 = Constraint(expr=m.x35**2 + m.x75**2 <= 9801)
m.c117 = Constraint(expr=m.x36**2 + m.x76**2 <= 9801)
m.c118 = Constraint(expr=m.x37**2 + m.x77**2 <= 9801)
m.c119 = Constraint(expr=m.x38**2 + m.x78**2 <= 9801)
m.c120 = Constraint(expr=m.x39**2 + m.x79**2 <= 9801)
m.c121 = Constraint(expr=m.x40**2 + m.x80**2 <= 9801)
m.c122 = Constraint(expr=m.x81**2 + m.x95**2 <= 1.1236)
m.c123 = Constraint(expr=m.x82**2 + m.x96**2 <= 1.1236)
m.c124 = Constraint(expr=m.x83**2 + m.x97**2 <= 1.1236)
m.c125 = Constraint(expr=m.x84**2 + m.x98**2 <= 1.1236)
m.c126 = Constraint(expr=m.x85**2 + m.x99**2 <= 1.1236)
m.c127 = Constraint(expr=m.x86**2 + m.x100**2 <= 1.1236)
m.c128 = Constraint(expr=m.x87**2 + m.x101**2 <= 1.1236)
m.c129 = Constraint(expr=m.x88**2 + m.x102**2 <= 1.1236)
m.c130 = Constraint(expr=m.x89**2 + m.x103**2 <= 1.1236)
m.c131 = Constraint(expr=m.x90**2 + m.x104**2 <= 1.1236)
m.c132 = Constraint(expr=m.x91**2 + m.x105**2 <= 1.1236)
m.c133 = Constraint(expr=m.x92**2 + m.x106**2 <= 1.1236)
m.c134 = Constraint(expr=m.x93**2 + m.x107**2 <= 1.1236)
m.c135 = Constraint(expr=m.x94**2 + m.x108**2 <= 1.1236)
m.c136 = Constraint(expr=m.x81**2 + m.x95**2 >= 0.8836)
m.c137 = Constraint(expr=m.x82**2 + m.x96**2 >= 0.8836)
m.c138 = Constraint(expr=m.x83**2 + m.x97**2 >= 0.8836)
m.c139 = Constraint(expr=m.x84**2 + m.x98**2 >= 0.8836)
m.c140 = Constraint(expr=m.x85**2 + m.x99**2 >= 0.8836)
m.c141 = Constraint(expr=m.x86**2 + m.x100**2 >= 0.8836)
m.c142 = Constraint(expr=m.x87**2 + m.x101**2 >= 0.8836)
m.c143 = Constraint(expr=m.x88**2 + m.x102**2 >= 0.8836)
m.c144 = Constraint(expr=m.x89**2 + m.x103**2 >= 0.8836)
m.c145 = Constraint(expr=m.x90**2 + m.x104**2 >= 0.8836)
m.c146 = Constraint(expr=m.x91**2 + m.x105**2 >= 0.8836)
m.c147 = Constraint(expr=m.x92**2 + m.x106**2 >= 0.8836)
m.c148 = Constraint(expr=m.x93**2 + m.x107**2 >= 0.8836)
m.c149 = Constraint(expr=m.x94**2 + m.x108**2 >= 0.8836)
m.c150 = Constraint(expr= m.x109 <= 3.324)
m.c151 = Constraint(expr= m.x110 <= 1.4)
m.c152 = Constraint(expr= m.x111 <= 1)
m.c153 = Constraint(expr= m.x112 <= 1)
m.c154 = Constraint(expr= m.x113 <= 1)
m.c155 = Constraint(expr= m.x109 >= 0)
m.c156 = Constraint(expr= m.x110 >= 0)
m.c157 = Constraint(expr= m.x111 >= 0)
m.c158 = Constraint(expr= m.x112 >= 0)
m.c159 = Constraint(expr= m.x113 >= 0)
m.c160 = Constraint(expr= m.x114 <= 0.1)
m.c161 = Constraint(expr= m.x115 <= 0.5)
m.c162 = Constraint(expr= m.x116 <= 0.4)
m.c163 = Constraint(expr= m.x117 <= 0.24)
m.c164 = Constraint(expr= m.x118 <= 0.24)
m.c165 = Constraint(expr= m.x114 >= 0)
m.c166 = Constraint(expr= m.x115 >= -0.4)
m.c167 = Constraint(expr= m.x116 >= 0)
m.c168 = Constraint(expr= m.x117 >= -0.06)
m.c169 = Constraint(expr= m.x118 >= -0.06)
m.c170 = Constraint(expr= m.x95 == 0)
m.c171 = Constraint(expr= m.x27 + m.x31 - m.x109 == 0)
m.c172 = Constraint(expr= m.x1 + m.x32 + m.x33 + m.x39 - m.x110 == -0.217)
m.c173 = Constraint(expr= m.x2 + m.x35 - m.x111 == -0.942)
m.c174 = Constraint(expr= m.x10 + m.x15 + m.x21 + m.x23 - m.x112 == -0.112)
m.c175 = Constraint(expr= m.x18 - m.x113 == 0)
m.c176 = Constraint(expr= m.x67 + m.x71 - m.x114 == 0)
m.c177 = Constraint(expr= m.x41 + m.x72 + m.x73 + m.x79 - m.x115 == -0.127)
m.c178 = Constraint(expr= m.x42 + m.x75 - m.x116 == -0.19)
m.c179 = Constraint(expr= m.x50 + m.x55 + m.x61 + m.x63 - m.x117 == -0.075)
m.c180 = Constraint(expr= m.x58 - m.x118 == 0)
m.c181 = Constraint(expr= m.x7 + m.x13 + m.x36 + m.x37 + m.x40 == -0.478)
m.c182 = Constraint(expr= m.x9 + m.x14 + m.x28 + m.x34 == -0.076)
m.c183 = Constraint(expr= m.x3 + m.x8 + m.x17 == 0)
m.c184 = Constraint(expr= m.x4 + m.x11 + m.x29 + m.x38 == -0.295)
m.c185 = Constraint(expr= m.x5 + m.x30 == -0.09)
m.c186 = Constraint(expr= m.x6 + m.x24 == -0.035)
m.c187 = Constraint(expr= m.x22 + m.x25 == -0.061)
m.c188 = Constraint(expr= m.x16 + m.x19 + m.x26 == -0.135)
m.c189 = Constraint(expr= m.x12 + m.x20 == -0.149)
m.c190 = Constraint(expr= m.x47 + m.x53 + m.x76 + m.x77 + m.x80 == 0.039)
m.c191 = Constraint(expr= m.x49 + m.x54 + m.x68 + m.x74 == -0.016)
m.c192 = Constraint(expr= m.x43 + m.x48 + m.x57 == 0)
m.c193 = Constraint(expr= m.x44 + m.x51 + m.x69 + m.x78 == -0.166)
m.c194 = Constraint(expr= m.x45 + m.x70 == -0.058)
m.c195 = Constraint(expr= m.x46 + m.x64 == -0.018)
m.c196 = Constraint(expr= m.x62 + m.x65 == -0.016)
m.c197 = Constraint(expr= m.x56 + m.x59 + m.x66 == -0.058)
m.c198 = Constraint(expr= m.x52 + m.x60 == -0.05)
| 58.884
| 120
| 0.615832
| 7,206
| 44,163
| 3.774216
| 0.069109
| 0.01559
| 0.060742
| 0.086774
| 0.778909
| 0.732948
| 0.732728
| 0.72986
| 0.619995
| 0.403206
| 0
| 0.45255
| 0.205693
| 44,163
| 749
| 121
| 58.962617
| 0.322757
| 0.015352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00189
| 0
| 0.00189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
eed64899869e65ba62f3c98311a5f6f622287ef0
| 8,043
|
py
|
Python
|
shortest_common_supersequence.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | 6
|
2021-05-21T01:10:42.000Z
|
2021-12-16T16:12:30.000Z
|
shortest_common_supersequence.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
shortest_common_supersequence.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
from typing import MutableMapping, Optional
from collections import defaultdict
# # Method 1 - Recursion: TLE
# class Solution:
# def shortestCommonSupersequence(
# self,
# str1: str,
# str2: str,
# index1: int = 0,
# index2: int = 0,
# ) -> str:
# if index1 >= len(str1):
# return str2[index2:]
#
# if index2 >= len(str2):
# return str1[index1:]
#
# char1 = str1[index1]
# char2 = str2[index2]
#
# if char1 == char2:
# return char1 + self.shortestCommonSupersequence(str1, str2, index1+1, index2+1)
#
# return min(
# char1 + self.shortestCommonSupersequence(str1, str2, index1+1, index2),
# char2 + self.shortestCommonSupersequence(str1, str2, index1, index2+1),
# key=len
# )
# # Method 2 - Memoization: TLE
# import sys
# sys.setrecursionlimit(10000)
# class Solution:
# def shortestCommonSupersequence(
# self,
# str1: str,
# str2: str,
# index1: int = 0,
# index2: int = 0,
# cache: Optional[MutableMapping[int, MutableMapping[int, str]]] = None,
# ) -> str:
# if cache is None:
# cache = defaultdict(lambda: defaultdict(str))
#
# if index1 >= len(str1):
# return str2[index2:]
#
# if index2 >= len(str2):
# return str1[index1:]
#
# if index2 in cache[index1]:
# return cache[index1][index2]
#
# char1 = str1[index1]
# char2 = str2[index2]
#
# if char1 == char2:
# result = char1 + self.shortestCommonSupersequence(str1, str2, index1+1, index2+1, cache)
#
# else:
# result = min(
# char1 + self.shortestCommonSupersequence(str1, str2, index1+1, index2, cache),
# char2 + self.shortestCommonSupersequence(str1, str2, index1, index2+1, cache),
# key=len
# )
#
# cache[index1][index2] = result
# return result
# # Method 3 - Bottom-up DP: TLE
# class Solution:
# def shortestCommonSupersequence(self, str1: str, str2: str) -> str:
# cache: MutableMapping[int, MutableMapping[int, str]] = defaultdict(lambda: defaultdict(str))
#
# # Initialization:
# for index1 in range(1, len(str1)+1):
# cache[index1][0] = str1[:index1]
# for index2 in range(1, len(str2)+1):
# cache[0][index2] = str2[:index2]
#
# for index1, char1 in enumerate(str1, start=1):
# for index2, char2 in enumerate(str2, start=1):
# if char1 == char2:
# cache[index1][index2] = cache[index1-1][index2-1] + char1
# continue
#
# cache[index1][index2] = min(
# cache[index1][index2-1] + char2,
# cache[index1-1][index2] + char1,
# key=len
# )
#
# return cache[len(str1)][len(str2)]
# Method 4 - Bottom-up DP, optimized
class Solution:
def shortestCommonSupersequence(self, str1: str, str2: str) -> str:
# Instead of [index1], you use cache
# Instead of [index1-1], you use prev_cache
# First row initialization
prev_cache: list[str] = [str2[:index2] for index2 in range(len(str2)+1)]
for index1, char1 in enumerate(str1, start=1):
cache = ['' for _ in range(len(str2)+1)]
# First column initialization
cache[0] = str1[:index1]
for index2, char2 in enumerate(str2, start=1):
if char1 == char2:
cache[index2] = prev_cache[index2-1] + char1
continue
cache[index2] = min(
cache[index2-1] + char2, # Instead of [index1][index2-1]
prev_cache[index2] + char1, # Instead of [index1-1][index2]
key=len
)
# Don't forget to set the current row as previous row
prev_cache = cache
return cache[-1]
tests = [
(
("abac", "cab",),
"cabac",
),
(
("akfwg", "fdawcgb",),
"akfdawcgb",
),
(
("atdznrqfwlfbcqkezrltzyeqvqemikzgghxkzenhtapwrmrovwtpzzsyiwongllqmvptwammerobtgmkpowndejvbuwbporfyroknrjoekdgqqlgzxiisweeegxajqlradgcciavbpgqjzwtdetmtallzyukdztoxysggrqkliixnagwzmassthjecvfzmyonglocmvjnxkcwqqvgrzpsswnigjthtkuawirecfuzrbifgwolpnhcapzxwmfhvpfmqapdxgmddsdlhteugqoyepbztspgojbrmpjmwmhnldunskpvwprzrudbmtwdvgyghgprqcdgqjjbyfsujnnssfqvjhnvcotynidziswpzhkdszbblustoxwtlhkowpatbypvkmajumsxqqunlxxvfezayrolwezfzfyzmmneepwshpemynwzyunsxgjflnqmfghsvwpknqhclhrlmnrljwabwpxomwhuhffpfinhnairblcayygghzqmotwrywqayvvgohmujneqlzurxcpnwdipldofyvfdurbsoxdurlofkqnrjomszjimrxbqzyazakkizojwkuzcacnbdifesoiesmkbyffcxhqgqyhwyubtsrqarqagogrnaxuzyggknksrfdrmnoxrctntngdxxechxrsbyhtlbmzgmcqopyixdomhnmvnsafphpkdgndcscbwyhueytaeodlhlzczmpqqmnilliydwtxtpedbncvsqauopbvygqdtcwehffagxmyoalogetacehnbfxlqhklvxfzmrjqofaesvuzfczeuqegwpcmahhpzodsmpvrvkzxxtsdsxwixiraphjlqawxinlwfspdlscdswtgjpoiixbvmpzilxrnpdvigpccnngxmlzoentslzyjjpkxemyiemoluhqifyonbnizcjrlmuylezdkkztcphlmwhnkdguhelqzjgvjtrzofmtpuhifoqnokonhqtzxmimp",
"xjtuwbmvsdeogmnzorndhmjoqnqjnhmfueifqwleggctttilmfokpgotfykyzdhfafiervrsyuiseumzmymtvsdsowmovagekhevyqhifwevpepgmyhnagjtsciaecswebcuvxoavfgejqrxuvnhvkmolclecqsnsrjmxyokbkesaugbydfsupuqanetgunlqmundxvduqmzidatemaqmzzzfjpgmhyoktbdgpgbmjkhmfjtsxjqbfspedhzrxavhngtnuykpapwluameeqlutkyzyeffmqdsjyklmrxtioawcrvmsthbebdqqrpphncthosljfaeidboyekxezqtzlizqcvvxehrcskstshupglzgmbretpyehtavxegmbtznhpbczdjlzibnouxlxkeiedzoohoxhnhzqqaxdwetyudhyqvdhrggrszqeqkqqnunxqyyagyoptfkolieayokryidtctemtesuhbzczzvhlbbhnufjjocporuzuevofbuevuxhgexmckifntngaohfwqdakyobcooubdvypxjjxeugzdmapyamuwqtnqspsznyszhwqdqjxsmhdlkwkvlkdbjngvdmhvbllqqlcemkqxxdlldcfthjdqkyjrrjqqqpnmmelrwhtyugieuppqqtwychtpjmloxsckhzyitomjzypisxzztdwxhddvtvpleqdwamfnhhkszsfgfcdvakyqmmusdvihobdktesudmgmuaoovskvcapucntotdqxkrovzrtrrfvoczkfexwxujizcfiqflpbuuoyfuoovypstrtrxjuuecpjimbutnvqtiqvesaxrvzyxcwslttrgknbdcvvtkfqfzwudspeposxrfkkeqmdvlpazzjnywxjyaquirqpinaennweuobqvxnomuejansapnsrqivcateqngychblywxtdwntancarldwnloqyywrxrganyehkglbdeyshpodpmdchbcc",),
"axjtuwbmvsdzeogmnzorndhmjoqnqjnhmfwlueifbcqkezrwltzyeqvqemggctttilmfokzgpghxotfykyzendhtfapwrmfierovwtpzzrsyuiwongllqseumvpzmymtvsdsowammerobtvagmekpowndhejvbuwbporfyroknrjoekdgqqlgzxihisfweevpepgxajqlrmyhnadgcjtsciavbpgqjzecswtdetmtallzybcukdztovxysgoavfgrejqkliirxuvnagwzmassthjecvfzkmyonglocmvjnxklecwqqvgrzpsswnigsrjthtmxyokuawirbkecfsauzrgbiydfsupuqanetgwounlpqmunhcapzdxwmfhvpfmduqmzidatemaqmzzzfjpdxgmddsdlhyokteubdgqoyepgbzmjkhmfjtspgoxjqbrmfspjmwmedhzrxavhnldugtnsuykpvwaprzrwludbameeqlutwdvgkyghgprqcdgqjjbzyefsujnnssfmqvjhnvcotynidziswpzhjykdszbblustomrxwtlhkiowpatbypwcrvkmajumsxthbebdqqurpphncthoslxxvjfezayreidbolwyekxezfqtzfylizmmneqcvvxepwhrcskstshpemynwzyunsxpgjflnqmfzgmbretpyehstavwpkxegmbtznqhpbclhrzdjlmzibnrouxljwabwpxkeiedzomwhuohffpfinoxhnairblcayygghzqmotwrywqayvvgohmujneqlzurxcpnwdiplwetyudofhyqvfduhrbsoxduggrlofszqeqkqqnrjomszjimrunxbqzyazyakgyoptfkolizeayojwkryidtctemtesuhbzcacnzzvhlbdibhnufjjocporuzuesvoifbuesvuxhgexmckbyfifcxhqntngqyaohfwyubtsrqarqdagkyogrnaxbcoouzbdvygpxjjxeugknksrfzdrmnoxrcapyamuwqtntqspszngdxxecyszhwqdqjxrsbymhtdlkwkvlkdbmzjngvdmhvbllqqlcemkqopyixxdomhnmvnsalldcfpthpkjdgqkyjrrjqqqpndcscbmmelrwyhtyugieyuppqqtaeodlwychtpjmlzoxsckhzyitomjzypqqmnilliysxzztdwtxhddvtvpleqdbwamfnhhkszsfgfcdvsqakyqmmusdvihopbvygqdktcwehffasudmgxmyoualogetovskvcapucehnbfxltotdqhklvxfzmkrjqofaesvuzrtrrfvoczeuqkfegxwpxujizcmahhfiqflpzbuuodsmpyfuoovypstrvkzxxtsdsxwixiraxjuuecphjlimbutnvqtiqvesawxinlrvzyxcwfspdlsttrgknbdcvvtkfqfzwudswtgjpepoiisxbvrfkkeqmdvlpazilzjnywxjyaquirnqpdvigpccnaengnweuobqvxmlznomuejantslzyjjapkxemynsrqivcatemoluhqifyonbnizgycjrhblmuylezwxtdkkzwntancpharlmdwhnkdguhelqzjgvjtrzofmtpuhifoqyywrxrganoyehkonglbdeyshqtzxmimpodpmdchbcc",
),
]
| 54.344595
| 1,700
| 0.734303
| 437
| 8,043
| 13.501144
| 0.187643
| 0.010678
| 0.015424
| 0.039661
| 0.20661
| 0.175254
| 0.175254
| 0.175254
| 0.144068
| 0.089831
| 0
| 0.028797
| 0.196941
| 8,043
| 147
| 1,701
| 54.714286
| 0.884657
| 0.383315
| 0
| 0.088235
| 0
| 0
| 0.766317
| 0.759522
| 0
| 1
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.058824
| 0
| 0.147059
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
eee026427c2c64f667140cdeacffc13fe937567e
| 127
|
py
|
Python
|
gecasmo/__init__.py
|
cornederuijt/gecasmo
|
ec7f67f82595c6609ab10c98d51432adf53bf82a
|
[
"MIT"
] | null | null | null |
gecasmo/__init__.py
|
cornederuijt/gecasmo
|
ec7f67f82595c6609ab10c98d51432adf53bf82a
|
[
"MIT"
] | null | null | null |
gecasmo/__init__.py
|
cornederuijt/gecasmo
|
ec7f67f82595c6609ab10c98d51432adf53bf82a
|
[
"MIT"
] | null | null | null |
"""gecasmo is a package for estimating click models"""
from .GCM import GCM
from .clickdefinitionreader import ClickDefinition
| 31.75
| 54
| 0.811024
| 16
| 127
| 6.4375
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125984
| 127
| 4
| 55
| 31.75
| 0.927928
| 0.377953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
01605a22db720f5493727798259733a0f3f516f9
| 182
|
py
|
Python
|
grasp_det_seg/data_OCID/__init__.py
|
stefan-ainetter/grasp_det_seg_cnn
|
2492d5ec78f831c327e817246e822cdfce9e16ad
|
[
"BSD-3-Clause"
] | 21
|
2022-01-12T16:47:59.000Z
|
2022-03-29T07:33:03.000Z
|
grasp_det_seg/data_OCID/__init__.py
|
stefan-ainetter/grasp_det_seg_cnn
|
2492d5ec78f831c327e817246e822cdfce9e16ad
|
[
"BSD-3-Clause"
] | 6
|
2022-01-18T01:30:46.000Z
|
2022-03-21T12:06:06.000Z
|
grasp_det_seg/data_OCID/__init__.py
|
stefan-ainetter/grasp_det_seg_cnn
|
2492d5ec78f831c327e817246e822cdfce9e16ad
|
[
"BSD-3-Clause"
] | 2
|
2022-02-11T15:29:28.000Z
|
2022-03-23T13:48:22.000Z
|
from .dataset import OCIDDataset, OCIDTestDataset
from .misc import iss_collate_fn, read_boxes_from_file, prepare_frcnn_format
from .transform import OCIDTransform, OCIDTestTransform
| 60.666667
| 76
| 0.879121
| 23
| 182
| 6.652174
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082418
| 182
| 3
| 77
| 60.666667
| 0.916168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0165c66c475d1feb0f36c1787698fc5541796ba8
| 2,771
|
py
|
Python
|
save_image.py
|
revathivijay/VJTI-Navigation
|
29d25c6ffa69937b73b75a064017799d6d583459
|
[
"MIT"
] | null | null | null |
save_image.py
|
revathivijay/VJTI-Navigation
|
29d25c6ffa69937b73b75a064017799d6d583459
|
[
"MIT"
] | null | null | null |
save_image.py
|
revathivijay/VJTI-Navigation
|
29d25c6ffa69937b73b75a064017799d6d583459
|
[
"MIT"
] | 2
|
2020-09-09T11:20:08.000Z
|
2022-02-24T21:20:22.000Z
|
import PIL
import numpy as np
import cv2
def save_image(output_images, count, src_number, dest_number, case_2=True):
if count == 1:
ref = PIL.Image.open(f'resized-new/reference1.jpg')
_, h = ref.size
img = PIL.Image.open(output_images[0])
img = np.array(img)
width = 600
ref = ref.resize((width, h))
ref = np.array(ref)
im_final = cv2.vconcat([img, ref])
im_final = cv2.cvtColor(im_final, cv2.COLOR_BGR2RGB)
cv2.imwrite(f'final-output-images/{src_number}-{dest_number}.jpg', im_final)
elif count == 2:
ref = PIL.Image.open(f'resized-new/reference2.jpg')
if case_2: ##src is 2-0 and dest is 2-1
im1 = cv2.imread(output_images[0])
im2 = cv2.imread(output_images[1])
else:
im1 = cv2.imread(output_images[1])
im2 =cv2.imread(output_images[0])
im_h = cv2.hconcat([im1, im2])
ref = np.array(ref)
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2RGB)
im_final = cv2.vconcat([im_h, ref])
cv2.imwrite(f'final-output-images/{src_number}-{dest_number}.jpg', im_final)
elif count == 3:
ref = PIL.Image.open(f'resized-new/reference3.jpg')
if '-2-1-' in output_images[1] and '-3-0-' in output_images[2]:
im1 = cv2.imread(output_images[1])
im2 = cv2.imread(output_images[0])
im3 = cv2.imread(output_images[2])
else:
im1 = cv2.imread(output_images[0])
im2 = cv2.imread(output_images[1])
im3 = cv2.imread(output_images[2])
im_h = cv2.hconcat([im1, im2, im3])
ref = np.array(ref)
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2RGB)
im_final = cv2.vconcat([im_h, ref])
cv2.imwrite(f'final-output-images/{src_number}-{dest_number}.jpg', im_final)
elif count == 4:
ref = PIL.Image.open(f'resized-new/reference4.jpg')
if ('-2-1-' in output_images[1] and '-3-0-' in output_images[2]):
im1 = cv2.imread(output_images[1])
im2 = cv2.imread(output_images[0])
im3 = cv2.imread(output_images[2])
im4 = cv2.imread(output_images[3])
else:
im1 = cv2.imread(output_images[0])
im2 = cv2.imread(output_images[1])
im3 = cv2.imread(output_images[2])
im4 = cv2.imread(output_images[3])
im_final = cv2.hconcat([im1, im2, im3, im4])
ref = np.array(ref)
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2RGB)
im_final = cv2.vconcat([im_final, ref])
# im_final = cv2.resize(im_final, (960,480))
cv2.imwrite(f'final-output-images/{src_number}-{dest_number}.jpg', im_final)
| 44.693548
| 85
| 0.575604
| 395
| 2,771
| 3.886076
| 0.151899
| 0.218893
| 0.175896
| 0.246254
| 0.758958
| 0.744625
| 0.717264
| 0.649511
| 0.649511
| 0.649511
| 0
| 0.062437
| 0.283291
| 2,771
| 62
| 86
| 44.693548
| 0.710473
| 0.024901
| 0
| 0.566667
| 0
| 0
| 0.122774
| 0.115195
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.05
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
01857d057488a333eb49bcb460bde3f5c4a0bf9f
| 1,602
|
py
|
Python
|
Metodo_super_OOP.py
|
DalfonsoLucia/Programmazione_OOP
|
a314ac9497698cc05ce71bfd0b3b2a042a42835d
|
[
"MIT"
] | null | null | null |
Metodo_super_OOP.py
|
DalfonsoLucia/Programmazione_OOP
|
a314ac9497698cc05ce71bfd0b3b2a042a42835d
|
[
"MIT"
] | null | null | null |
Metodo_super_OOP.py
|
DalfonsoLucia/Programmazione_OOP
|
a314ac9497698cc05ce71bfd0b3b2a042a42835d
|
[
"MIT"
] | null | null | null |
# super(): COME DELEGARE ALLA CLASSE BASE
# Perchè viene utilizzato il metodo super()?
# prendiamo l'esempio che segue:
class Animale():
def __init__(self,specie):
self.specie = specie
def razza(self):
return f"Io sono della specie {self.specie}"
class Cane(Animale):
def __init__(self, specie, pelo):
self.specie = specie # è una duplicazione della linea 3, non è la soluzione migliore
self.pelo = pelo
def razza(self):
return"bau bau"
# L'idea è quella di utilizzare le classi figlie per estendere le funzionalità della clase base e delegare alla classe base tutti
# quegli aspetti tipici di una data famiglia di classi (Animali in questo esempio),
# altrimenti vanifichiamo l'intero concetto di ereditarietà
class Animale():
def __init__(self,specie):
self.specie = specie
def razza(self):
return f"Io sono della specie {self.specie}"
class Cane(Animale):
def __init__(self, specie, pelo):
super().__init__(specie) # super() equivale ad una chiamata alla classe superiore (Animale)
self.pelo = pelo
super().razza()
def razza(self):
return"bau bau"
c = Cane("cane", pelo = "corto") # sto chidedno a python di invocare il metodo init,
#li trova super() che è un modo per chiedere a python
# di invocare il metodo init definito nella superclass (Animale)
# Ecco a cosa serve il metodo super() serve per non vanificare il concetto di ereditarietà
# e di incappare nel meccanismo dell'overide.
| 34.085106
| 130
| 0.662921
| 217
| 1,602
| 4.801843
| 0.43318
| 0.086372
| 0.053743
| 0.069098
| 0.347409
| 0.347409
| 0.301344
| 0.245681
| 0.245681
| 0.245681
| 0
| 0.000842
| 0.259051
| 1,602
| 47
| 131
| 34.085106
| 0.877001
| 0.50437
| 0
| 0.875
| 0
| 0
| 0.116517
| 0
| 0
| 0
| 0
| 0.021277
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
0192151b809aebb1035eb2cce9e54096698bff80
| 165
|
py
|
Python
|
pythons-nest/hello-world.py
|
elliefarrer/first-go-at-python
|
303477f2f26e4d5a9148d30f90b42d3de28d1c8d
|
[
"MIT"
] | null | null | null |
pythons-nest/hello-world.py
|
elliefarrer/first-go-at-python
|
303477f2f26e4d5a9148d30f90b42d3de28d1c8d
|
[
"MIT"
] | null | null | null |
pythons-nest/hello-world.py
|
elliefarrer/first-go-at-python
|
303477f2f26e4d5a9148d30f90b42d3de28d1c8d
|
[
"MIT"
] | null | null | null |
#First go
print("Hello, World!")
message = "Hello, World!"
print(message)
#-------> print "Hello World!" is Python 2 syntax, use print("Hello World!") for Python 3
| 23.571429
| 89
| 0.660606
| 24
| 165
| 4.541667
| 0.541667
| 0.366972
| 0.412844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014184
| 0.145455
| 165
| 6
| 90
| 27.5
| 0.758865
| 0.581818
| 0
| 0
| 0
| 0
| 0.38806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
0196bbaefc787bfce271ca75dee5202b65e218a9
| 49,601
|
py
|
Python
|
tests/test_day_states.py
|
kubamik/Manitobot
|
4185be44a1afd2a3b796020bc819c8c65dca0e07
|
[
"MIT"
] | null | null | null |
tests/test_day_states.py
|
kubamik/Manitobot
|
4185be44a1afd2a3b796020bc819c8c65dca0e07
|
[
"MIT"
] | null | null | null |
tests/test_day_states.py
|
kubamik/Manitobot
|
4185be44a1afd2a3b796020bc819c8c65dca0e07
|
[
"MIT"
] | 1
|
2020-07-01T17:21:48.000Z
|
2020-07-01T17:21:48.000Z
|
import collections
import functools
import unittest
from unittest.mock import AsyncMock, MagicMock
import discord
from manitobot.base_day_states import AcceptedChallenge, Challenge, HangSummary
from manitobot.day_states import InitialState, SearchOnlyState, Voting, Duel, DuelSummary, SearchingSummary, \
SearchingSummaryWithRevote, SearchingSummaryWithRandom, HangIfable, HangIfSummary, HangingSummary, \
HangingSummaryWithRevote, Evening
from bases import BaseStateTest
from manitobot.errors import SelfChallengeError, DuplicateChallenge, ChallengeNotFound, DuelAlreadyAccepted, \
AuthorIsSubjectChallengeError, ReportingLocked, DuelDoublePerson, NotDuelParticipant, MoreSearchedThanSearches, \
IllegalSearch, TooMuchHang, IllegalHang
from manitobot.permissions import SPEC_ROLES
from settings import GUN_ID
class TestInitialState(BaseStateTest):
def test_new_method(self):
self.assertIsInstance(self.state, InitialState)
def test_new_method_class_change(self):
self.day.duels = self.game.duels
state = InitialState(self.game, self.day)
self.assertIsInstance(state, SearchOnlyState)
async def test_end(self):
await self.state.end()
self.utility.get_town_channel.return_value.send.assert_awaited()
self.assertIsInstance(self.state, SearchOnlyState)
class TestReporting(BaseStateTest):
async def test_add_report(self):
author = AsyncMock(discord.Member)
subject = AsyncMock(discord.Member)
await self.state.add_report(author, subject)
reports = self.day.reports
self.assertIn(subject, reports)
self.assertIn(author, reports[subject])
await self.state.add_report(author, subject)
self.assertEqual(reports[subject], [author])
async def test_remove_report(self):
author = AsyncMock(discord.Member)
subject = AsyncMock(discord.Member)
await self.state.remove_report(author, subject) # check if not raising
await self.state.add_report(author, subject)
await self.state.remove_report(author, subject)
reports = self.day.reports
self.assertIn(subject, reports)
self.assertNotIn(author, reports[subject])
async def test_pen_reports(self):
member1, member2, member3 = self.mock_members(3)
await self.state.add_report(member1, member2)
await self.state.add_report(member2, member2)
await self.state.add_report(member3, member1)
await self.state.add_report(member1, member3)
await self.state.remove_report(member1, member3) # test removing empty keys in pen_reports method
channel = AsyncMock()
await self.state.pen_reports(channel)
msg = channel.send.call_args[0][0]
self.assertEqual(msg, '__Zgłoszenia (2):__\n**M2** *przez* M1, M2\n**M1** *przez* M3\n\nLiczba przeszukań: 2')
async def test_voting(self):
member1, member2, member3 = self.mock_members(3)
await self.state.add_report(member1, member2)
await self.state.add_report(member2, member2)
await self.state.add_report(member3, member1)
await self.state.add_report(member1, member3) # being removed later
await self.state.remove_report(member1, member3) # test removing empty keys in voting method
self.game.searches = 1
await self.state.voting()
self.assertIsInstance(self.state, Voting)
self.assertEqual(self.state.title[0], 'Przeszukania')
self.assertEqual(self.state.options, [['1', 'M2'], ['2', 'M1']])
self.assertEqual(self.state.required_votes, self.game.searches)
await self.state.end()
self.assertIsInstance(self.state, SearchingSummary)
async def test_voting_with_draw(self):
member1, member2, member3 = self.mock_members(3)
self.utility.get_player_role().members = self.mock_members(5) + [member1, member2, member3]
await self.state.add_report(member1, member2)
await self.state.add_report(member2, member2)
await self.state.add_report(member3, member1)
await self.state.add_report(member1, member3)
await self.state.remove_report(member1, member3)
await self.state.voting() # voting shouldn't be created
self.assertIsInstance(self.state, SearchingSummary)
async def test_voting_too_less_reports(self):
self.utility.get_player_role().members = self.mock_members(5)
await self.state.voting()
self.assertIsInstance(self.state, SearchingSummaryWithRandom)
class TestChallenging(BaseStateTest):
def mock_decline_role(self, present=True, alive=True):
role = SPEC_ROLES['decline_duels']
self.game.role_map = roles = dict()
if present:
roles[role] = mock = MagicMock()
mock.alive = alive
async def test_adding_challenge_1(self):
member1, member2 = self.mock_members()
self.mock_decline_role(present=False)
await self.state.add_challenge(member1, member2)
self.assertNotIn((member1, member2), self.day.challenges)
channel = self.utility.get_town_channel()
role = SPEC_ROLES['decline_duels']
channel.send.assert_any_await(f'**M1** wyzywa **M2** na pojedynek.\n{role} nie żyje, '
f'więc pojedynek jest automatycznie przyjęty')
self.assertIsInstance(self.state, Duel)
async def test_adding_challenge_2(self):
member1, member2 = self.mock_members()
member2.id = '123456'
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
self.assertIn((member1, member2), self.day.challenges)
self.assertIsInstance(self.day.challenges[0], Challenge)
channel = self.utility.get_town_channel()
channel.send.assert_awaited_once_with('**M1** wyzywa **M2** na pojedynek.\n<@123456> '
'czy chcesz przyjąć pojedynek? Użyj `&przyjmuję` lub `&odrzucam`')
self.assertNotIsInstance(self.state, Duel)
async def test_adding_challenge_3(self):
member1, member2 = self.mock_members()
self.mock_decline_role(alive=False)
await self.state.add_challenge(member1, member2)
self.assertIsInstance(self.state, Duel)
async def test_adding_challenge_4_self_challenge(self):
member1 = AsyncMock()
with self.assertRaises(SelfChallengeError):
await self.state.add_challenge(member1, member1)
async def test_adding_challenge_5_duplicating(self):
member1, member2 = self.mock_members()
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
with self.assertRaises(DuplicateChallenge):
await self.state.add_challenge(member1, member2)
with self.assertRaises(DuplicateChallenge):
await self.state.add_challenge(member2, member1)
async def test_adding_challenge_6_multiple(self):
member1, member2, member3 = self.mock_members(3)
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
self.mock_decline_role(alive=False)
await self.state.add_challenge(member1, member3)
self.assertIn(Challenge(member1, member2), self.day.challenges)
self.assertIn(AcceptedChallenge(member1, member3), self.day.challenges)
channel = self.utility.get_town_channel()
channel.send.assert_awaited_with('Ten pojedynek nie może się teraz rozpocząć\n__Aktualne wyzwania:__\n'
'**M1** vs. **M2**\n**M1** vs. **M3** - *przyjęte*\n'
'\nPozostało pojedynków: {}'.format(self.game.duels - self.day.duels))
self.assertNotIsInstance(self.state, Duel)
async def test_accepting_1(self):
member1, member2 = self.mock_members()
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.accept(member2)
self.assertIsInstance(self.state, Duel)
channel = self.utility.get_town_channel()
channel.send.assert_any_await('**M2** przyjmuje pojedynek od **M1**')
channel.send.assert_awaited_with(f'Rozpoczynamy pojedynek:\n<:legacy_gun:{GUN_ID}> **M1** vs.:shield: **M2**')
async def test_accepting_2(self):
member1, member2, member3 = self.mock_members(3)
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.add_challenge(member1, member3)
channel = self.utility.get_town_channel()
channel.send.reset_mock()
await self.state.accept(member3)
self.assertNotIsInstance(self.state, Duel)
self.assertEqual(channel.send.await_count, 2) # acceptance info + pen_challenges
self.assertIn(AcceptedChallenge(member1, member3), self.day.challenges)
self.assertEqual(len(self.day.challenges), 2)
async def test_accepting_3_not_challenged(self):
member1, member2 = self.mock_members()
self.mock_decline_role()
with self.assertRaises(ChallengeNotFound):
await self.state.accept(member1)
await self.state.add_challenge(member1, member2)
with self.assertRaises(ChallengeNotFound):
await self.state.accept(member1) # prevent accepting by author
async def test_accepting_4_already_accepted(self):
member1, member2, member3 = self.mock_members(3)
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.add_challenge(member1, member3)
await self.state.accept(member3)
with self.assertRaises(DuelAlreadyAccepted):
await self.state.accept(member3)
async def test_declining_1(self):
member1, member2 = self.mock_members()
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.decline(member2)
self.assertNotIsInstance(self.state, Duel)
channel = self.utility.get_town_channel()
channel.send.assert_any_await('**M2** odrzuca pojedynek od **M1**')
self.assertNotIn((member1, member2), self.day.challenges)
async def test_declining_2(self):
member1, member2, member3 = self.mock_members(3)
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.add_challenge(member1, member3)
await self.state.accept(member3)
channel = self.utility.get_town_channel()
channel.send.reset_mock()
await self.state.decline(member2)
self.assertIsInstance(self.state, Duel)
self.assertEqual(channel.send.await_count, 2) # decline info + duel starting
self.assertFalse(self.day.challenges)
async def test_declining_3(self):
member1, member2, member3 = self.mock_members(3)
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.add_challenge(member2, member3)
await self.state.add_challenge(member3, member1)
await self.state.accept(member1)
channel = self.utility.get_town_channel()
channel.send.reset_mock()
await self.state.decline(member2)
self.assertNotIsInstance(self.state, Duel)
self.assertEqual(channel.send.await_count, 1) # only decline info
self.assertEqual(len(self.day.challenges), 2)
async def test_declining_4_not_challenged(self):
member1 = AsyncMock()
with self.assertRaises(ChallengeNotFound):
await self.state.decline(member1)
async def test_pen_challenges_1(self):
member1, member2, member3 = self.mock_members(3)
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.add_challenge(member2, member3)
await self.state.add_challenge(member3, member1)
channel = AsyncMock()
await self.state.pen_challenges(channel)
channel.send.assert_awaited_with('__Wyzwania:__\n**M1** vs. **M2**\n**M2** vs. **M3**\n**M3** vs. **M1**\n\n'
'Pozostało pojedynków: 2')
async def test_pen_challenges_2_no_challenges(self):
channel = AsyncMock()
await self.state.pen_challenges(channel)
channel.send.assert_awaited_with('Nie ma wyzwań\n\nPozostało pojedynków: 2')
channel.send.reset_mock()
self.day.duels += 1
await self.state.pen_challenges(channel)
channel.send.assert_awaited_with('Nie ma wyzwań\n\nPozostało pojedynków: 1')
channel.send.reset_mock()
self.day.duels += 1
await self.state.pen_challenges(channel)
channel.send.assert_awaited_with('Nie ma wyzwań\n\nPozostało pojedynków: 0')
async def test_pen_challenges_3_accepted(self):
member1, member2, member3 = self.mock_members(3)
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.add_challenge(member2, member3)
await self.state.add_challenge(member3, member1)
await self.state.accept(member1)
channel = AsyncMock()
await self.state.pen_challenges(channel)
channel.send.assert_awaited_with('__Wyzwania:__\n**M1** vs. **M2**\n**M2** vs. **M3**\n'
'**M3** vs. **M1** - *przyjęte*\n\nPozostało pojedynków: 2')
async def test_start_duel_1(self):
with self.assertRaises(IndexError):
await self.state.start_duel()
async def test_start_duel_2(self):
member1, member2 = self.mock_members()
self.game.duels = 0
await self.state.start_duel(member1, member2)
town = self.utility.get_town_channel.return_value
town.send.assert_awaited_with('Limit pojedynków został wyczerpany')
self.assertIsInstance(self.state, SearchOnlyState)
async def test_start_duel_3(self):
member1, member2, member3 = self.mock_members(3)
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.add_challenge(member3, member2)
await self.state.start_duel(member2, member3)
self.assertIn((member1, member2), self.day.challenges)
self.assertEqual(len(self.day.challenges), 1)
self.assertIsInstance(self.state, Duel)
async def test_start_duel_4(self):
member1, member2, member3 = self.mock_members(3)
self.mock_decline_role()
await self.state.add_challenge(member1, member2)
await self.state.add_challenge(member3, member2)
await self.state.start_duel(member3, member2) # reverse member order
self.assertIn((member1, member2), self.day.challenges)
self.assertEqual(len(self.day.challenges), 1)
self.assertIsInstance(self.state, Duel)
async def test_start_duel_5(self):
member = self.mock_members(1)
with self.assertRaises(AuthorIsSubjectChallengeError):
await self.state.start_duel(member, member)
class TestSearchOnlyState(TestReporting):
async def asyncSetUp(self) -> None:
await self.state.end()
async def test_lock(self):
member1, member2 = self.mock_members()
await self.state.lock()
self.assertTrue(self.state.locked)
with self.assertRaises(ReportingLocked):
await self.state.add_report(member1, member2)
await self.state.lock()
self.assertFalse(self.state.locked)
await self.state.add_report(member1, member2)
class TestUndoable(BaseStateTest):
async def test_undo(self):
await self.state.end()
await self.state.undo()
self.assertIsInstance(self.state, InitialState)
def duel_decorator(coro):
@functools.wraps(coro)
async def predicate(self):
member1, member2 = self.mock_members()
await self.state.start_duel(member1, member2)
await coro(self, member1, member2)
return predicate
def transform_methods(decorator): # cannot use this in __new__, because of tests construction
def predicate(cls):
for name, meth in vars(cls).items():
if callable(meth) and name.startswith('test'):
setattr(cls, name, decorator(meth))
return cls
return predicate
@transform_methods(duel_decorator)
class TestDuel(BaseStateTest):
async def test_fields(self, member1, member2):
self.assertEqual(self.state.author, member1)
self.assertEqual(self.state.subject, member2)
async def test_cancel(self, *_):
await self.state.cancel()
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Manitou anulował trwający pojedynek')
self.assertIsInstance(self.state, InitialState)
async def test_on_die_1(self, member1, _):
member3 = self.mock_members(1)
await self.state.on_die(member3)
self.assertIsInstance(self.state, Duel)
await self.state.on_die(member1)
self.assertIsInstance(self.state, InitialState)
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Pojedynek został anulowany z powodu śmierci jednego z uczestników.')
async def test_on_die_2(self, _, member2):
await self.state.on_die(member2)
self.assertIsInstance(self.state, InitialState)
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Pojedynek został anulowany z powodu śmierci jednego z uczestników.')
async def test_set_message(self, *_):
msg = AsyncMock(discord.Message)
await self.state.set_message(msg)
msg.edit.assert_awaited_with(content='**Pojedynek:**\n**M1** vs. **M2**', embed=None)
async def test_start_duel_1(self, *_):
member1, member2 = self.mock_members()
await self.state.start_duel(member1, member2)
self.assertIn(AcceptedChallenge(member1, member2), self.day.challenges)
async def test_start_duel_2(self, *_):
member1, member2, member3 = self.mock_members(3)
self.day.challenges = collections.deque([AcceptedChallenge(member1, member2), Challenge(member3, member2)])
await self.state.start_duel(member2, member3)
self.assertEqual(self.day.challenges,
collections.deque([AcceptedChallenge(member2, member3), AcceptedChallenge(member1, member2)]))
async def test_start_duel_3(self, *_):
member = self.mock_members(1)
with self.assertRaises(AuthorIsSubjectChallengeError):
await self.state.start_duel(member, member)
async def test_voting(self, member1, member2):
await self.state.voting()
self.assertIsInstance(self.state, Voting)
self.assertEqual(self.state.title[0], 'Pojedynek')
self.assertEqual(self.state.options, [['1', 'M1'], ['2', 'M2'], ['3', 'Wstrzymuję_Się']])
self.assertEqual(self.state.required_votes, 1)
self.assertEqual(self.state.metadata, {'author': member1, 'subject': member2})
async def test_voting_cancel(self, member1, member2):
await self.state.voting()
await self.state.cancel()
self.assertIsInstance(self.state, Duel)
self.assertEqual(self.state.author, member1)
self.assertEqual(self.state.subject, member2)
class TestDuelSummary(BaseStateTest):
def mock_revoling(self, member1, member2, rev1=False, rev2=False):
p1, p2 = MagicMock(), MagicMock()
self.game.player_map.update({member1: p1, member2: p2})
p1.role_class = r1 = MagicMock()
p2.role_class = r2 = MagicMock()
r1.can_use.return_value = rev1
r2.can_use.return_value = rev2
r1.die = AsyncMock()
r2.die = AsyncMock()
async def change_state(self, summary=None, rev1=False, rev2=False):
member1, member2 = self.mock_members()
if summary is None:
summary = {member1.display_name: [], member2.display_name: [], 'Wstrzymuję_Się': []}
self.mock_revoling(member1, member2, rev1, rev2)
await self.day.push_state(DuelSummary, author=member1, subject=member2, summary=summary)
return member1, member2
async def test_undo(self):
member1, member2 = await self.change_state()
self.assertEqual(self.state.metadata, {'author': member1, 'subject': member2})
await self.state.undo()
self.assertIsInstance(self.state, Duel)
self.assertEqual(self.state.author, member1)
self.assertEqual(self.state.subject, member2)
async def test_set_message(self):
await self.change_state()
msg = AsyncMock(discord.Message)
await self.state.set_message(msg)
msg.edit.assert_awaited_with(content='**Pojedynek - podsumowanie**\n**M1** vs. **M2**', embed=None)
async def test_on_die(self):
member1, member2 = await self.change_state()
await self.state.on_die(member1)
self.assertIsInstance(self.state, InitialState)
async def test_start_duel(self):
await self.change_state()
member1, member2 = self.mock_members()
await self.state.start_duel(member1, member2)
self.assertIn(AcceptedChallenge(member1, member2), self.day.challenges)
async def test_init_1_first_rev(self):
summary = {'M1': [], 'M2': range(10), 'Wstrzymuję_Się': []}
member1, member2 = await self.change_state(summary=summary, rev1=True)
self.assertEqual(self.state.winners, [member1])
self.assertEqual(self.state.losers, [member2])
async def test_init_2_second_rev(self):
summary = {'M1': range(20), 'M2': range(10), 'Wstrzymuję_Się': []}
member1, member2 = await self.change_state(summary=summary, rev2=True)
self.assertEqual(self.state.winners, [member2])
self.assertEqual(self.state.losers, [member1])
async def test_init_3_no_rev(self):
summary = {'M1': [], 'M2': range(10), 'Wstrzymuję_Się': []}
member1, member2 = await self.change_state(summary=summary)
self.assertEqual(self.state.winners, [member2])
self.assertEqual(self.state.losers, [member1])
async def test_init_4_no_rev(self):
summary = {'M1': range(20), 'M2': range(10), 'Wstrzymuję_Się': []}
member1, member2 = await self.change_state(summary=summary)
self.assertEqual(self.state.winners, [member1])
self.assertEqual(self.state.losers, [member2])
async def test_init_5_no_rev(self):
summary = {'M1': range(20), 'M2': range(10), 'Wstrzymuję_Się': range(30)}
member1, member2 = await self.change_state(summary=summary)
self.assertEqual(self.state.winners, [member1])
self.assertEqual(self.state.losers, [member2])
async def test_init_6_two_revs(self):
summary = {'M1': range(5), 'M2': range(10), 'Wstrzymuję_Się': range(30)}
member1, member2 = await self.change_state(summary=summary, rev1=True, rev2=True)
self.assertEqual(self.state.winners, [member2])
self.assertEqual(self.state.losers, [member1])
async def test_init_7_nonzero_draw(self):
summary = {'M1': range(10), 'M2': range(10), 'Wstrzymuję_Się': range(30)}
member1, member2 = await self.change_state(summary=summary)
self.assertEqual(self.state.winners, [])
self.assertEqual(self.state.losers, [member1, member2])
async def test_init_8_nonzero_draw_revs(self):
summary = {'M1': range(10), 'M2': range(10), 'Wstrzymuję_Się': range(30)}
member1, member2 = await self.change_state(summary=summary, rev1=True, rev2=True)
self.assertEqual(self.state.winners, [])
self.assertEqual(self.state.losers, [member1, member2])
async def test_init_9_zero_draw(self):
summary = {'M1': list(), 'M2': list(), 'Wstrzymuję_Się': range(30)}
member1, member2 = await self.change_state(summary=summary)
self.assertEqual(self.state.winners, [member1, member2])
self.assertEqual(self.state.losers, [])
async def test_init_10_zero_draw_revs(self):
summary = {'M1': list(), 'M2': list(), 'Wstrzymuję_Się': range(30)}
member1, member2 = await self.change_state(summary=summary, rev1=True, rev2=True)
self.assertEqual(self.state.winners, [member1, member2])
self.assertEqual(self.state.losers, [])
async def test_async_init_1(self):
summary = {'M1': range(10), 'M2': list(), 'Wstrzymuję_Się': range(30)}
member1, member2 = await self.change_state(summary)
self.utility.add_roles.assert_any_await([member1], self.utility.get_duel_winner_role())
self.utility.add_roles.assert_any_await([member2], self.utility.get_duel_loser_role())
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Pojedynek ma wygrać **M1**. Zginąć ma **M2**')
async def test_async_init_2_nonzero_draw(self):
summary = {'M1': range(10), 'M2': range(10), 'Wstrzymuję_Się': range(30)}
member1, member2 = await self.change_state(summary)
self.utility.add_roles.assert_any_await([], self.utility.get_duel_winner_role())
self.utility.add_roles.assert_any_await([member1, member2], self.utility.get_duel_loser_role())
town = self.utility.get_town_channel()
town.send.assert_awaited_with('W wyniku pojedynku mają zginąć obaj pojedynkujący się')
async def test_async_init_3_zero_draw(self):
summary = {'M1': list(), 'M2': list(), 'Wstrzymuję_Się': range(30)}
member1, member2 = await self.change_state(summary)
self.utility.add_roles.assert_any_await([member1, member2], self.utility.get_duel_winner_role())
self.utility.add_roles.assert_any_await([], self.utility.get_duel_loser_role())
town = self.utility.get_town_channel()
town.send.assert_awaited_with('W wyniku pojedynku nikt nie ginie *(na razie)*')
async def test_cleanup(self):
member1, member2 = await self.change_state()
winner_role = self.utility.get_duel_winner_role()
loser_role = self.utility.get_duel_loser_role()
winner_role.members = [member1, member2]
loser_role.members = []
await self.state.cleanup()
self.utility.remove_roles.assert_awaited_with([member1, member2], winner_role, loser_role)
async def test_change_winner(self):
member1, member2 = await self.change_state()
winner_role = self.utility.get_duel_winner_role()
loser_role = self.utility.get_duel_loser_role()
await self.state.change_winner(member1)
member1.add_roles.assert_awaited_with(winner_role)
member1.remove_roles.assert_awaited_with(loser_role)
member2.add_roles.assert_awaited_with(loser_role)
member2.remove_roles.assert_awaited_with(winner_role)
async def test_end_1(self):
member1, member2 = await self.change_state()
winner_role = self.utility.get_duel_winner_role()
loser_role = self.utility.get_duel_loser_role()
winner_role.members = [member1]
loser_role.members = [member2]
town = self.utility.get_town_channel()
await self.state.end()
town.send.assert_awaited_with('Pojedynek wygrywa **M1**')
self.assertEqual(self.day.duels, 1)
self.game.player_map[member2].role_class.die.assert_awaited_with('duel')
self.game.player_map[member1].role_class.die.assert_not_awaited()
self.assertIsInstance(self.state, InitialState)
async def test_end_2_duel_limit(self):
await self.change_state()
self.day.duels = 1
await self.state.end()
self.assertIsInstance(self.state, SearchOnlyState)
async def test_end_3_two_winners(self):
summary = {'M1': range(10), 'M2': list(), 'Wstrzymuję_Się': range(15)}
member1, member2 = await self.change_state(summary)
winner_role = self.utility.get_duel_winner_role()
loser_role = self.utility.get_duel_loser_role()
winner_role.members = [member2, member1]
loser_role.members = []
town = self.utility.get_town_channel()
await self.state.end()
town.send.assert_awaited_with('W wyniku pojedynku nikt nie ginie')
self.game.player_map[member2].role_class.die.assert_not_awaited()
self.game.player_map[member1].role_class.die.assert_not_awaited()
async def test_end_4_no_winners(self):
member1, member2 = await self.change_state()
winner_role = self.utility.get_duel_winner_role()
loser_role = self.utility.get_duel_loser_role()
winner_role.members = []
loser_role.members = [member2, member1]
town = self.utility.get_town_channel()
await self.state.end()
awaits = town.send.await_args_list
self.assertEqual(len(awaits), 1)
town.send.assert_awaited_with('W wyniku pojedynku nikt nie ginie *(na razie)*')
self.game.player_map[member2].role_class.die.assert_awaited_with('duel')
self.game.player_map[member1].role_class.die.assert_awaited_with('duel')
async def test_end_5_starting_duel(self):
await self.change_state()
member1, member2 = self.mock_members()
await self.state.start_duel(member1, member2)
await self.state.end()
self.assertIsInstance(self.state, Duel)
self.assertEqual(self.state.author, member1)
self.assertEqual(self.state.subject, member2)
async def test_end_6_not_starting_duel(self):
await self.change_state()
member1, member2 = self.mock_members()
challenges = self.day.challenges
challenges.append(Challenge(member1, member2))
await self.state.end()
self.assertIsInstance(self.state, InitialState)
async def test_end_7_double_role(self):
member1, member2 = await self.change_state()
winner_role = self.utility.get_duel_winner_role()
loser_role = self.utility.get_duel_loser_role()
winner_role.members = [member2]
loser_role.members = [member2, member1]
with self.assertRaises(DuelDoublePerson) as cm:
await self.state.end()
self.assertEqual(cm.exception.msg, 'M2 jest zwycięzcą i przegranym jednocześnie')
async def test_end_8_no_participating(self):
await self.change_state()
member = self.mock_members(1)
winner_role = self.utility.get_duel_winner_role()
loser_role = self.utility.get_duel_loser_role()
winner_role.mention = '<WYGRANY>'
winner_role.members = [member]
loser_role.members = []
with self.assertRaises(NotDuelParticipant) as cm:
await self.state.end()
self.assertEqual(cm.exception.msg, 'M1 ma rolę <WYGRANY>, a nie pojedynkuje się')
@duel_decorator
async def test_voting(self, member1, member2):
await self.state.voting()
self.mock_revoling(member1, member2)
await self.state.end()
self.assertIsInstance(self.state, DuelSummary)
self.assertEqual(self.state.author, member1)
self.assertEqual(self.state.subject, member2)
self.assertEqual(self.state.winners, [member1, member2])
self.assertEqual(self.state.losers, [])
class TestSearchingSummary(BaseStateTest):
async def change_state(self, summary=None, searches=0, other=True, reports=0, alive=0, dead=None):
if summary is None:
summary = list()
m = len(summary)
n = m + searches + reports + alive
members = self.mock_members(n)
votes = summary
summary = {}
for mem, v in zip(members[:m], votes):
summary[mem] = range(v)
other = members[:m] if other else None
self.day.reports = dict(zip(members[:m+reports+searches], range(1, n-alive+1)))
self.game.player_map = dict(zip(members, range(n)))
searches = members[m: m+searches]
if dead is not None:
players = [mem for i, mem in enumerate(members) if i not in dead]
else:
players = members
self.utility.get_player_role().members = players
await self.day.push_state(SearchingSummary, summary=summary, searches=searches, other=other)
return members
async def test_init_1(self):
members = await self.change_state([1, 3, 1, 6, 2, 0], other=False)
self.assertIsInstance(self.state, SearchingSummary)
self.assertSetEqual(set(self.state.searches), {members[1], members[3]})
self.assertEqual(set(self.state.other), {members[0], members[2], members[4], members[5]})
async def test_init_2(self):
members = await self.change_state([1, 3, 1, 6, 2, 3], other=False)
self.assertIsInstance(self.state, SearchingSummaryWithRevote)
self.assertEqual(self.state.searches, [members[3]])
self.assertSetEqual(set(self.state.other), {members[1], members[5]})
async def test_init_3(self):
members = await self.change_state(reports=1, alive=2)
self.assertIsInstance(self.state, SearchingSummaryWithRandom)
self.assertEqual(self.state.searches, [members[0]])
self.assertSetEqual(set(self.state.other), {members[1], members[2]})
async def test_init_4(self):
members = await self.change_state([1, 2, 3], 1, reports=1, alive=1)
self.assertIsInstance(self.state, SearchingSummary)
self.assertSetEqual(set(self.state.searches), {members[3], members[2]})
self.assertSetEqual(set(self.state.other), {members[0], members[1]})
async def test_init_5(self):
members = await self.change_state(summary=[1, 2], searches=1, reports=3, dead=[0, 1, 4])
self.assertIsInstance(self.state, SearchingSummaryWithRandom)
self.assertEqual(self.state.searches, [members[2]])
self.assertSetEqual(set(self.state.other), {members[5], members[3]})
async def test_init_6(self):
members = await self.change_state(summary=[7, 5], searches=1, dead=[2], alive=10)
self.assertIsInstance(self.state, SearchingSummary)
self.assertSetEqual(set(self.state.searches), {members[2], members[0]})
self.assertSetEqual(set(self.state.other), {members[1]})
async def test_init_7(self):
members = await self.change_state(summary=[4, 8], searches=1, reports=3, dead=[2], alive=10)
self.assertIsInstance(self.state, SearchingSummary)
self.assertSetEqual(set(self.state.searches), {members[2], members[1]})
self.assertSetEqual(set(self.state.other), {members[0]})
async def test_init_8(self):
members = await self.change_state(summary=[1, 2], searches=1, reports=1, dead=[0, 1, 3], alive=2)
self.assertIsInstance(self.state, SearchingSummaryWithRandom)
self.assertEqual(self.state.searches, [members[2]])
self.assertSetEqual(set(self.state.other), {members[5], members[4]})
async def test_init_9(self):
members = await self.change_state(reports=2, alive=3, other=False)
self.assertIsInstance(self.state, SearchingSummary)
self.assertSetEqual(set(self.state.searches), {members[0], members[1]})
async def test_voting_1(self):
members = await self.change_state(summary=[1, 5, 4, 3, 4], other=False, alive=15)
await self.state.voting()
self.assertIsInstance(self.state, Voting)
self.assertEqual(self.state.options, [['1', 'M3'], ['2', 'M5']])
await self.state.cancel()
self.assertIsInstance(self.state, SearchingSummaryWithRevote)
self.assertEqual(self.state.searches, [members[1]])
self.assertSetEqual(set(self.state.other), {members[2], members[4]})
async def test_voting_2(self):
members = await self.change_state(summary=[1, 5, 4, 3, 4], other=False, alive=15)
await self.state.voting()
await self.state.end()
self.assertIsInstance(self.state, SearchingSummaryWithRevote)
self.assertEqual(self.state.searches, [members[1]])
self.assertSetEqual(set(self.state.other), {members[2], members[4]})
async def test_async_init_1(self):
await self.change_state(summary=[1, 2, 3, 4, 5], other=False, alive=3)
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Przeszukani zostaną:\n**M5**\n**M4**\n')
async def test_async_init_2(self):
await self.change_state(summary=[3, 6, 3, 1], other=False, alive=2)
town = self.utility.get_town_channel()
town.send.assert_awaited_with(
'Przeszukani zostaną:\n**M2**\n\nPotrzebne jest dodatkowe głosowanie dla:\n**M1**\n**M3**\n')
async def test_async_init_3(self):
await self.change_state(summary=[3, 2, 3, 3], other=False, alive=4)
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Na razie nikt nie ma zostać przeszukany\n\n'
'Potrzebne jest dodatkowe głosowanie dla:\n**M1**\n**M3**\n**M4**\n')
async def test_async_init_4(self):
await self.change_state(reports=1, alive=3)
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Przeszukani zostaną:\n**M1**\n')
async def test_async_init_5(self):
await self.change_state(alive=3)
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Na razie nikt nie ma zostać przeszukany\n')
async def test_end_1(self):
await self.change_state(summary=[1, 3, 5, 4])
self.utility.get_searched_role().members = searches = self.state.searches
await self.state.end()
town = self.utility.get_town_channel()
town.send.assert_any_await('Przeszukani zostają:\n**M3**\n**M4**\n')
self.assertIsInstance(self.state, HangIfable)
self.assertEqual(self.state.searched, searches)
async def test_end_2(self):
await self.change_state(summary=[1, 3, 5, 4, 5, 5])
self.utility.get_searched_role().members = self.state.searches
await self.state.end()
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Nikt nie zostaje przeszukany')
async def test_end_3(self):
members = await self.change_state(summary=[1, 3, 4, 5])
self.utility.get_searched_role().members = members
with self.assertRaises(MoreSearchedThanSearches):
await self.state.end()
async def test_end_4(self):
await self.change_state(summary=[1, 3, 4, 5])
self.utility.get_searched_role().members = self.mock_members(2)
with self.assertRaises(IllegalSearch) as cm:
await self.state.end()
self.assertEqual(cm.exception.msg, 'M1 ma zostać przeszukany(-a) a nie gra')
async def test_undo(self):
await self.change_state(reports=2, alive=3)
await self.state.undo()
self.assertIsInstance(self.state, SearchOnlyState)
async def test_random_1(self):
members = await self.change_state(reports=1, alive=4, other=False)
await self.state.random()
self.assertIsInstance(self.state, SearchingSummary)
self.assertEqual(len(self.state.searches), 2)
self.assertIn(members[0], self.state.searches)
self.assertIn(self.state.searches[1], members[1:])
async def test_random_2(self):
members = await self.change_state(summary=[3, 5, 7, 0, 5], alive=3, other=False)
await self.state.random()
self.assertIsInstance(self.state, SearchingSummary)
self.assertEqual(len(self.state.searches), 2)
self.assertIn(members[2], self.state.searches)
self.assertIn(self.state.searches[1], [members[1], members[4]])
class TestHangIfable(BaseStateTest):
async def change_state(self):
members = self.mock_members()
await self.day.push_state(HangIfable, searched=members)
return members
async def test_voting_1(self):
members = await self.change_state()
await self.state.voting()
self.assertIsInstance(self.state, Voting)
self.assertEqual(self.state.title[0], 'Czy wieszamy?')
self.assertEqual(self.state.options, [['t', 'Tak'], ['n', 'Nie']])
await self.state.cancel()
self.assertIsInstance(self.state, HangIfable)
self.assertEqual(self.state.searched, members)
async def test_voting_2(self):
members = await self.change_state()
await self.state.voting()
await self.state.end()
self.assertIsInstance(self.state, HangIfSummary)
self.assertEqual(self.state.searched, members)
class TestHangIfSummary(BaseStateTest):
async def change_state(self, summary=None):
members = self.mock_members()
if summary:
await self.day.push_state(HangIfSummary, summary=dict(zip(['Tak', 'Nie'], (range(i) for i in summary))),
searched=members)
else:
await self.day.push_state(HangIfSummary, searched=members)
return members
async def test_init_1(self):
members = await self.change_state([10, 9])
self.assertEqual(self.state.searched, members)
self.assertTrue(self.state.hang)
async def test_init_2(self):
await self.change_state([9, 9])
self.assertFalse(self.state.hang)
async def test_init_3(self):
await self.change_state([0, 5])
self.assertFalse(self.state.hang)
self.assertIsNotNone(self.state.hang)
async def test_init_4(self):
await self.change_state()
self.assertIsNone(self.state.hang)
async def test_set_message_1(self):
await self.change_state()
msg = AsyncMock()
await self.state.set_message(msg)
msg.edit.assert_awaited_with(content='**Przed wieszaniem** - wieszamy', embed=None)
async def test_set_message_2(self):
await self.change_state([1, 5])
msg = AsyncMock()
await self.state.set_message(msg)
msg.edit.assert_awaited_with(content='**Przed wieszaniem** - nie wieszamy', embed=None)
async def test_set_message_3(self):
await self.change_state([10, 1])
msg = AsyncMock()
await self.state.set_message(msg)
msg.edit.assert_awaited_with(content='**Przed wieszaniem** - wieszamy', embed=None)
async def test_async_init_1(self):
await self.change_state([1, 2])
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Miasto idzie spać.')
async def test_async_init_2(self):
await self.change_state([3, 2])
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Decyzją miasta wieszamy.')
async def test_async_init_3(self):
await self.change_state()
town = self.utility.get_town_channel()
town.send.assert_not_awaited()
async def test_undo(self):
members = await self.change_state([12, 5])
await self.state.undo()
self.assertIsInstance(self.state, HangIfable)
self.assertEqual(self.state.searched, members)
async def test_voting_1(self):
members = await self.change_state([12, 5])
self.utility.get_player_role().members = members
await self.state.voting()
self.assertIsInstance(self.state, Voting)
self.assertEqual(self.state.title[0], 'Wieszanie')
self.assertEqual(self.state.options, [['1', 'M1'], ['2', 'M2']])
await self.state.end()
self.assertIsInstance(self.state, HangSummary) # no matter if properly classified with draw
async def test_voting_2(self):
members = await self.change_state([12, 5])
self.utility.get_player_role().members = members[:1]
await self.state.voting()
self.assertIsInstance(self.state, HangingSummary)
class TestHangingSummary(BaseStateTest):
async def change_state(self, summary=None, searched=0, other=True, dead=None):
if summary is None:
summary = list()
m = len(summary)
n = m + searched
members = self.mock_members(n)
votes = summary
summary = {}
for mem, v in zip(members[:m], votes):
summary[mem] = range(v)
other = members[:m] if other else None
searched = members
self.game.player_map = dict(zip(members, [AsyncMock() for _ in range(n)]))
if dead is not None:
players = [mem for i, mem in enumerate(members) if i not in dead]
else:
players = members
self.utility.get_player_role().members = players
await self.day.push_state(HangingSummary, summary=summary, searched=searched, other=other)
return members
async def test_init_1(self):
members = await self.change_state([1, 3, 5], other=False)
self.assertIsInstance(self.state, HangingSummary)
self.assertEqual(self.state.hanged, members[2])
async def test_init_2(self):
members = await self.change_state([4, 4, 3], other=False)
self.assertIsInstance(self.state, HangingSummaryWithRevote)
self.assertIsNone(self.state.hanged)
self.assertEqual(self.state.other, [members[0], members[1]])
async def test_init_3(self):
members = await self.change_state([3, 4], searched=1)
self.assertIsInstance(self.state, HangingSummary)
self.assertEqual(self.state.hanged, members[1])
async def test_init_4(self):
members = await self.change_state([2, 4], dead=[1])
self.assertIsInstance(self.state, HangingSummary)
self.assertEqual(self.state.hanged, members[0])
async def test_init_5(self):
members = await self.change_state([3, 5, 3], dead=[1])
self.assertIsInstance(self.state, HangingSummaryWithRevote)
self.assertEqual(self.state.other, [members[0], members[2]])
async def test_init_6(self):
members = await self.change_state(searched=2, dead=[0])
self.assertIsInstance(self.state, HangingSummary)
self.assertEqual(self.state.hanged, members[1])
async def test_init_7(self):
await self.change_state([2, 3], other=False, dead=[0, 1])
self.assertIsInstance(self.state, HangingSummary)
self.assertIsNone(self.state.hanged)
async def test_async_init_1(self):
await self.change_state([2, 3], other=False, dead=[0, 1])
town = self.utility.get_town_channel()
town.send.assert_not_awaited()
async def test_async_init_2(self):
members = await self.change_state([2, 3], other=False)
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Powieszony(-a) ma zostać **M2**')
members[1].add_roles.assert_awaited()
async def test_async_init_3(self):
await self.change_state([3, 3], other=False)
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Potrzebne jest głosowanie uzupełniające dla:\n**M1**\n**M2**\n')
async def test_undo(self):
members = await self.change_state([1, 2])
await self.state.undo()
self.assertIsInstance(self.state, HangIfSummary)
self.assertEqual(self.state.searched, members)
self.assertIsNone(self.state.hang)
async def test_end_1(self):
members = await self.change_state([3, 5])
self.utility.get_hanged_role().members = [members[1]]
await self.state.end()
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Powieszony(-a) zostaje **M2**')
self.assertIsInstance(self.state, Evening)
self.game.player_map[members[1]].role_class.die.assert_awaited_with('hang')
async def test_end_2(self):
await self.change_state([2, 5])
self.utility.get_hanged_role().members = []
await self.state.end()
town = self.utility.get_town_channel()
town.send.assert_awaited_with('Nikt nie zostaje powieszony')
async def test_end_3(self):
members = await self.change_state([2, 5])
self.utility.get_hanged_role().members = members
with self.assertRaises(TooMuchHang):
await self.state.end()
async def test_end_4(self):
await self.change_state([2, 5])
self.utility.get_hanged_role().members = [self.mock_members(1)]
with self.assertRaises(IllegalHang) as cm:
await self.state.end()
self.assertEqual(cm.exception.msg, 'M1 ma zostać powieszony(-a) a nie gra lub nie żyje')
async def test_random(self):
members = await self.change_state([5, 5, 5], other=False)
await self.state.random()
self.assertIsInstance(self.state, HangingSummary)
self.assertIn(self.state.hanged, members)
async def test_voting_1(self):
members = await self.change_state([5, 5, 5], other=False)
await self.state.voting()
self.assertIsInstance(self.state, Voting)
self.assertEqual(self.state.title[0], 'Wieszanie - uzupełniające')
self.assertEqual(self.state.options, [['1', 'M1'], ['2', 'M2'], ['3', 'M3']])
await self.state.cancel()
self.assertIsInstance(self.state, HangingSummaryWithRevote)
self.assertSetEqual(set(self.state.other), set(members))
async def test_voting_2(self):
members = await self.change_state([5, 5, 5], other=False)
await self.state.voting()
await self.state.end()
self.assertIsInstance(self.state, HangingSummaryWithRevote)
self.assertSetEqual(set(self.state.other), set(members))
if __name__ == '__main__':
unittest.main()
| 45.463795
| 119
| 0.673595
| 6,216
| 49,601
| 5.210746
| 0.061615
| 0.088083
| 0.06397
| 0.05125
| 0.831059
| 0.791726
| 0.750664
| 0.70105
| 0.657271
| 0.618679
| 0
| 0.024789
| 0.209472
| 49,601
| 1,090
| 120
| 45.505505
| 0.801255
| 0.007802
| 0
| 0.600642
| 0
| 0.005353
| 0.056056
| 0.00691
| 0
| 0
| 0
| 0
| 0.297645
| 1
| 0.007495
| false
| 0
| 0.011777
| 0
| 0.039615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6d7eddac0cb09b0a86ed57ad13e7d16492074e09
| 137
|
py
|
Python
|
pyalp/skins/__init__.py
|
Mause/pyalp
|
fb0f723070e11f8c9ed57e2475eb963599f442a6
|
[
"MIT"
] | null | null | null |
pyalp/skins/__init__.py
|
Mause/pyalp
|
fb0f723070e11f8c9ed57e2475eb963599f442a6
|
[
"MIT"
] | 2
|
2021-06-08T19:32:48.000Z
|
2022-03-11T23:17:45.000Z
|
pyalp/skins/__init__.py
|
Mause/pyalp
|
fb0f723070e11f8c9ed57e2475eb963599f442a6
|
[
"MIT"
] | null | null | null |
from .skins import Skin
def get_skin():
if not hasattr(get_skin, 'skin'):
get_skin.skin = Skin()
return get_skin.skin
| 15.222222
| 37
| 0.642336
| 21
| 137
| 4
| 0.47619
| 0.333333
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.248175
| 137
| 8
| 38
| 17.125
| 0.815534
| 0
| 0
| 0
| 0
| 0
| 0.029197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6d85d7e3c8cd20d258b1a66012cb6ec84a8b2e96
| 163
|
py
|
Python
|
etl/celery_app.py
|
cygilbert/dota2_opgg_clone
|
6b65248248119a4dda6169f4c19bd68714f0ed75
|
[
"Apache-2.0"
] | null | null | null |
etl/celery_app.py
|
cygilbert/dota2_opgg_clone
|
6b65248248119a4dda6169f4c19bd68714f0ed75
|
[
"Apache-2.0"
] | null | null | null |
etl/celery_app.py
|
cygilbert/dota2_opgg_clone
|
6b65248248119a4dda6169f4c19bd68714f0ed75
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Init and config Celery app"""
from celery import Celery
celery = Celery()
celery.config_from_object('celeryconfig')
| 18.111111
| 41
| 0.699387
| 22
| 163
| 5.090909
| 0.681818
| 0.321429
| 0.321429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007042
| 0.128834
| 163
| 8
| 42
| 20.375
| 0.78169
| 0.398773
| 0
| 0
| 0
| 0
| 0.131868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6d8c641c7787a2872f90f4be088ff4b7af29179c
| 1,440
|
py
|
Python
|
tests/src/Health_Card_Index/check_with_healthcard_button.py
|
JalajaTR/cQube
|
6bf58ab25f0c36709630987ab730bbd5d9192c03
|
[
"MIT"
] | null | null | null |
tests/src/Health_Card_Index/check_with_healthcard_button.py
|
JalajaTR/cQube
|
6bf58ab25f0c36709630987ab730bbd5d9192c03
|
[
"MIT"
] | 2
|
2022-02-01T00:55:12.000Z
|
2022-03-29T22:29:09.000Z
|
tests/src/Health_Card_Index/check_with_healthcard_button.py
|
JalajaTR/cQube
|
6bf58ab25f0c36709630987ab730bbd5d9192c03
|
[
"MIT"
] | null | null | null |
import time
from Data.parameters import Data
from reuse_func import GetData
class Health_card_btn():
def __init__(self,driver):
self.driver = driver
def check_dashboard_health_board(self):
self.data = GetData()
count = 0
self.data.page_loading(self.driver)
self.driver.find_element_by_id(Data.home).click()
self.data.page_loading(self.driver)
self.driver.find_element_by_id(Data.Dashboard).click()
time.sleep(2)
self.driver.find_element_by_xpath("//*[@id='healthCard']").click()
self.data.page_loading(self.driver)
if "healthCard" in self.driver.current_url:
print("Health card report is displayed ")
else:
print("Navigation is failed to health card report")
count = count + 1
return count
def check_landing_healthcard_icon(self):
self.data = GetData()
count = 0
self.data.page_loading(self.driver)
self.driver.find_element_by_id(Data.home).click()
self.data.page_loading(self.driver)
self.driver.find_element_by_xpath("//div[@id='healthCard']").click()
self.data.page_loading(self.driver)
if "healthCard" in self.driver.current_url:
print("Health card report is displayed ")
else:
print("Navigation is failed to health card report")
count = count + 1
return count
| 32.727273
| 76
| 0.638889
| 184
| 1,440
| 4.804348
| 0.271739
| 0.169683
| 0.081448
| 0.128959
| 0.774887
| 0.774887
| 0.737557
| 0.737557
| 0.737557
| 0.737557
| 0
| 0.004673
| 0.256944
| 1,440
| 43
| 77
| 33.488372
| 0.821495
| 0
| 0
| 0.666667
| 0
| 0
| 0.147427
| 0.030598
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.25
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
097e923e8f33ac9f1219db8f2f76ae5c98ff80f8
| 13,086
|
py
|
Python
|
tests/test_mapping.py
|
Alzpeta/oarepo-rdm-records
|
4a2d93bd676364d2f1f95fae78fea8cd084b45e5
|
[
"MIT"
] | 1
|
2021-06-16T21:19:39.000Z
|
2021-06-16T21:19:39.000Z
|
tests/test_mapping.py
|
Alzpeta/oarepo-rdm-records
|
4a2d93bd676364d2f1f95fae78fea8cd084b45e5
|
[
"MIT"
] | 2
|
2021-04-20T16:52:30.000Z
|
2021-08-11T13:24:28.000Z
|
tests/test_mapping.py
|
Alzpeta/oarepo-rdm-records
|
4a2d93bd676364d2f1f95fae78fea8cd084b45e5
|
[
"MIT"
] | 7
|
2020-09-16T07:29:22.000Z
|
2021-08-11T10:42:04.000Z
|
# import json
#
#
# def test_mapping(app):
# """Test of mapping."""
# search = app.extensions['invenio-search']
# with open(search.mappings['records-record-v1.0.0']) as f:
# data = json.load(f)
# assert data == {
# "mappings": {
# "date_detection": False,
# "numeric_detection": False,
# "properties": {
# "_access": {
# "type": "object",
# "properties": {
# "metadata_restricted": {
# "type": "boolean"
# },
# "files_restricted": {
# "type": "boolean"
# }
# }
# },
# "_bucket": {
# "enabled": False
# },
# "_conceptrecid": {
# "type": "keyword"
# },
# "_created_by": {
# "type": "integer"
# },
# "_default_preview": {
# "enabled": False
# },
# "_files": {
# "type": "object",
# "properties": {
# "bucket": {
# "type": "keyword"
# },
# "key": {
# "type": "keyword",
# "copy_to": "filename"
# },
# "version_id": {
# "type": "keyword"
# },
# "size": {
# "type": "long"
# },
# "checksum": {
# "type": "keyword"
# },
# "previewer": {
# "type": "keyword"
# },
# "type": {
# "type": "keyword",
# "copy_to": "filetype"
# }
# }
# },
# "_internal_notes": {
# "type": "object",
# "properties": {
# "user": {
# "type": "keyword"
# },
# "note": {
# "type": "text"
# },
# "timestamp": {
# "type": "date"
# }
# }
# },
# "_recid": {
# "type": "keyword"
# },
# "_oai": {
# "type": "object",
# "properties": {
# "id": {
# "type": "keyword"
# },
# "sets": {
# "type": "keyword"
# },
# "updated": {
# "type": "date"
# }
# }
# },
# "_owners": {
# "type": "integer"
# },
# "_embargo_date": {
# "type": "date"
# },
# "_contact": {
# "type": "keyword"
# },
# "_communities": {
# "type": "object",
# "properties": {
# "accepted": {
# "type": "object",
# "properties": {
# "id": {
# "type": "keyword"
# },
# "comid": {
# "type": "keyword"
# },
# "title": {
# "type": "text"
# },
# "request_id": {
# "type": "keyword"
# },
# "created_by": {
# "type": "integer"
# }
# }
# },
# "pending": {
# "type": "object",
# "properties": {
# "id": {
# "type": "keyword"
# },
# "comid": {
# "type": "keyword"
# },
# "title": {
# "type": "text"
# },
# "request_id": {
# "type": "keyword"
# },
# "created_by": {
# "type": "integer"
# }
# }
# },
# "rejected": {
# "type": "object",
# "properties": {
# "id": {
# "type": "keyword"
# },
# "comid": {
# "type": "keyword"
# },
# "title": {
# "type": "text"
# },
# "request_id": {
# "type": "keyword"
# },
# "created_by": {
# "type": "integer"
# }
# }
# }
# }
# },
# "access_right": {
# "type": "keyword"
# },
# "resource_type": {
# "type": "object",
# "properties": {
# "type": {
# "type": "keyword"
# },
# "subtype": {
# "type": "keyword"
# }
# }
# },
# "identifiers": {
# "type": "object"
# },
# "creators": {
# "type": "object",
# "properties": {
# "name": {
# "type": "text"
# },
# "type": {
# "type": "keyword"
# },
# "given_name": {
# "type": "text"
# },
# "family_name": {
# "type": "text"
# },
# "identifiers": {
# "type": "object"
# },
# "affiliations": {
# "type": "object",
# "properties": {
# "name": {
# "type": "text"
# },
# "identifiers": {
# "type": "object"
# }
# }
# }
# }
# },
# "titles": {'type': 'object', 'properties':
# {
# 'cs': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }},
# 'en': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }
# }
# }
# },
# "subjects": {'type': 'object', 'properties':
# {
# 'cs': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }},
# 'en': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }
# }
# }
# },
# "contributors": {
# "type": "object",
# "properties": {
# "name": {
# "type": "text"
# },
# "type": {
# "type": "keyword"
# },
# "given_name": {
# "type": "text"
# },
# "family_name": {
# "type": "text"
# },
# "identifiers": {
# "type": "object"
# },
# "affiliations": {
# "type": "object",
# "properties": {
# "name": {
# "type": "text"
# },
# "identifiers": {
# "type": "object"
# }
# }
# },
# "role": {
# "type": "keyword"
# }
# }
# },
# "dates": {
# "type": "object",
# "properties": {
# "start": {
# "type": "date"
# },
# "end": {
# "type": "date"
# },
# "type": {
# "type": "keyword"
# },
# "description": {'type': 'text'}
# }
# },
# "language": {
# "type": "keyword"
# },
# "related_identifiers": {
# "type": "object",
# "properties": {
# "identifier": {
# "type": "keyword",
# "copy_to": "related.identifier"
# },
# "scheme": {
# "type": "keyword"
# },
# "relation_type": {
# "type": "keyword"
# },
# "resource_type": {
# "properties": {
# "subtype": {
# "type": "keyword"
# },
# "type": {
# "type": "keyword"
# }
# }
# }
# }
# },
# "version": {
# "type": "keyword"
# },
# "licenses": {
# "type": "object",
# "properties": {
# "license": {'type': 'object', 'properties':
# {
# 'cs': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }},
# 'en': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }
# }
# }
# },
# "uri": {
# "type": "keyword"
# },
# "identifier": {
# "type": "keyword"
# },
# "scheme": {
# "type": "keyword"
# }
# }
# },
# "descriptions": {'type': 'object', 'properties':
# {
# 'cs': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }},
# 'en': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }
# }
# }
# },
# "locations": {
# "type": "object",
# "properties": {
# "place": {
# "type": "text"
# },
# "description": {'type': 'object', 'properties':
# {
# 'cs': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }},
# 'en': {'type': 'text',
# 'fields': {
# "keywords": {
# "type": "keyword"
# }
# }
# }
# }
# },
# "point": {
# "type": "object",
# "properties": {
# "lat": {
# "type": "double"
# },
# "lon": {
# "type": "double"
# }
# }
# }
# }
# },
# "references": {
# "type": "object",
# "properties": {
# "reference_string": {
# "type": "text"
# },
# "identifier": {
# "type": "keyword"
# },
# "scheme": {
# "type": "keyword"
# }
# }
# },
# "_created": {
# "type": "date"
# },
# "_updated": {
# "type": "date"
# },
# "$schema": {
# "type": "keyword",
# "index": False
# },
# "extensions": {
# "type": "object",
# "dynamic": False,
# "enabled": False
# },
# "extensions_keywords": {
# "type": "object",
# "properties": {
# "key": {"type": "keyword"},
# "value": {"type": "keyword"}
# }
# },
# "extensions_texts": {
# "type": "object",
# "properties": {
# "key": {"type": "keyword"},
# "value": {"type": "text"}
# }
# },
# "extensions_longs": {
# "type": "object",
# "properties": {
# "key": {"type": "keyword"},
# "value": {"type": "long"}
# }
# },
# "extensions_dates": {
# "type": "object",
# "properties": {
# "key": {"type": "keyword"},
# "value": {"type": "date"}
# }
# },
# "extensions_booleans": {
# "type": "object",
# "properties": {
# "key": {"type": "keyword"},
# "value": {"type": "boolean"}
# }
# }
# }
# }
# }
| 27.093168
| 63
| 0.237735
| 544
| 13,086
| 5.628676
| 0.220588
| 0.204768
| 0.189419
| 0.071848
| 0.489549
| 0.472567
| 0.426845
| 0.426845
| 0.35663
| 0.35663
| 0
| 0.000554
| 0.585817
| 13,086
| 482
| 64
| 27.149378
| 0.564391
| 0.926257
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
09b4ed9e96ca20d6a2c161b070137e1b3ff2a2d5
| 204
|
py
|
Python
|
moto/sqs/__init__.py
|
argos83/moto
|
d3df810065c9c453d40fcc971f9be6b7b2846061
|
[
"Apache-2.0"
] | 1
|
2021-03-06T22:01:41.000Z
|
2021-03-06T22:01:41.000Z
|
moto/sqs/__init__.py
|
marciogh/moto
|
d3df810065c9c453d40fcc971f9be6b7b2846061
|
[
"Apache-2.0"
] | null | null | null |
moto/sqs/__init__.py
|
marciogh/moto
|
d3df810065c9c453d40fcc971f9be6b7b2846061
|
[
"Apache-2.0"
] | 1
|
2017-10-19T00:53:28.000Z
|
2017-10-19T00:53:28.000Z
|
from __future__ import unicode_literals
from .models import sqs_backends
from ..core.models import MockAWS, base_decorator
sqs_backend = sqs_backends['us-east-1']
mock_sqs = base_decorator(sqs_backends)
| 29.142857
| 49
| 0.828431
| 30
| 204
| 5.233333
| 0.566667
| 0.210191
| 0.203822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005435
| 0.098039
| 204
| 6
| 50
| 34
| 0.847826
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
09c6c2f27b20590e0c629fdfe1e9ed57b260aae9
| 130
|
py
|
Python
|
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/pggan_tf_official/dataset.py
|
CSID-DGU/-2020-1-OSSP1-ninetynine-2
|
b1824254882eeea0ee44e4e60896b72c51ef1d2c
|
[
"MIT"
] | 1
|
2020-06-21T13:45:26.000Z
|
2020-06-21T13:45:26.000Z
|
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/pggan_tf_official/dataset.py
|
CSID-DGU/-2020-1-OSSP1-ninetynine-2
|
b1824254882eeea0ee44e4e60896b72c51ef1d2c
|
[
"MIT"
] | null | null | null |
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/pggan_tf_official/dataset.py
|
CSID-DGU/-2020-1-OSSP1-ninetynine-2
|
b1824254882eeea0ee44e4e60896b72c51ef1d2c
|
[
"MIT"
] | 3
|
2020-09-02T03:18:45.000Z
|
2021-01-27T08:24:05.000Z
|
version https://git-lfs.github.com/spec/v1
oid sha256:0ab972a2349578c1f7b7efebac401b0aff82b0ba3c09935514b807b45c9fdc66
size 12111
| 32.5
| 75
| 0.884615
| 13
| 130
| 8.846154
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.379032
| 0.046154
| 130
| 3
| 76
| 43.333333
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
09e85356a42dc0ca9ba85d2e3b7a03ea59d4aa23
| 191
|
py
|
Python
|
kiki/__init__.py
|
deuxksy/kiki
|
d673ebabcd52d557c690edeb77b781d57a5f5e65
|
[
"MIT"
] | null | null | null |
kiki/__init__.py
|
deuxksy/kiki
|
d673ebabcd52d557c690edeb77b781d57a5f5e65
|
[
"MIT"
] | null | null | null |
kiki/__init__.py
|
deuxksy/kiki
|
d673ebabcd52d557c690edeb77b781d57a5f5e65
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
import os
from cryptography.fernet import Fernet
crypto = Fernet(os.getenv('ZZIZILY_KIKI_CRYPTO'))
package_name = 'kiki'
project_home = os.getenv('ZZIZILY_KIKI_HOME')
| 23.875
| 49
| 0.753927
| 27
| 191
| 5.111111
| 0.592593
| 0.115942
| 0.217391
| 0.275362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005848
| 0.104712
| 191
| 8
| 50
| 23.875
| 0.80117
| 0.109948
| 0
| 0
| 0
| 0
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
09f4877a10c47a446c7a362b7aad43c36a1059c1
| 147
|
py
|
Python
|
models/__init__.py
|
HerbertVidela/simple-text-representation
|
808f2ab25d70718aad09b94d0212c9ee3fbbefdc
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
HerbertVidela/simple-text-representation
|
808f2ab25d70718aad09b94d0212c9ee3fbbefdc
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
HerbertVidela/simple-text-representation
|
808f2ab25d70718aad09b94d0212c9ee3fbbefdc
|
[
"MIT"
] | null | null | null |
from .Database import Database
from .TextModel import TextModel
from .ParagraphModel import ParagraphModel
from .SentenceModel import SentenceModel
| 36.75
| 42
| 0.870748
| 16
| 147
| 8
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 147
| 4
| 43
| 36.75
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1102b42bb1a3ebd2f0c7a9cb4e2bf2ba05c2ef2c
| 24
|
py
|
Python
|
hello_world.py
|
aniketAnvekar/Profiles-RestApi
|
a83ecf71ad9493db04d3577c049339a81056ae9b
|
[
"MIT"
] | null | null | null |
hello_world.py
|
aniketAnvekar/Profiles-RestApi
|
a83ecf71ad9493db04d3577c049339a81056ae9b
|
[
"MIT"
] | 5
|
2020-06-06T01:51:28.000Z
|
2022-02-10T11:45:26.000Z
|
hello_world.py
|
aniketAnvekar/Profiles-RestApi
|
a83ecf71ad9493db04d3577c049339a81056ae9b
|
[
"MIT"
] | null | null | null |
print('Hijo de Puta!!')
| 12
| 23
| 0.625
| 4
| 24
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
111375aebc420f2a4c02e9bf6569e3d81f08e0df
| 4,976
|
py
|
Python
|
tests/utils/test_ped.py
|
robinandeer/puzzle
|
9476f05b416d3a5135d25492cb31411fdf831c58
|
[
"MIT"
] | 24
|
2015-10-15T16:29:58.000Z
|
2020-12-08T22:14:13.000Z
|
tests/utils/test_ped.py
|
robinandeer/puzzle
|
9476f05b416d3a5135d25492cb31411fdf831c58
|
[
"MIT"
] | 212
|
2015-10-08T14:28:36.000Z
|
2020-04-29T22:44:10.000Z
|
tests/utils/test_ped.py
|
robinandeer/puzzle
|
9476f05b416d3a5135d25492cb31411fdf831c58
|
[
"MIT"
] | 11
|
2015-10-08T09:26:46.000Z
|
2018-02-02T16:45:07.000Z
|
import os
from puzzle.utils.ped import (get_individuals, get_cases)
class TestGetIndividuals:
def test_get_individuals_from_vcf(self, vcf_file):
individuals = get_individuals(variant_source=vcf_file)
assert len(individuals) == 3
ind_ids = set(['ADM1059A1','ADM1059A2','ADM1059A3'])
assert ind_ids == set([ind.ind_id for ind in individuals])
def test_get_individuals_from_compressed_vcf(self, compressed_vcf_file):
individuals = get_individuals(variant_source=compressed_vcf_file)
assert len(individuals) == 3
ind_ids = set(['ADM1059A1','ADM1059A2','ADM1059A3'])
assert ind_ids == set([ind.ind_id for ind in individuals])
def test_get_individuals_case_lines(self, vcf_file, ped_lines):
individuals = get_individuals(variant_source=vcf_file, case_lines=ped_lines)
assert len(individuals) == 3
ind_ids = set(['ADM1059A1','ADM1059A2','ADM1059A3'])
assert ind_ids == set([ind.ind_id for ind in individuals])
def test_get_individuals_gemini_database(self, gemini_path):
individuals = get_individuals(variant_source=gemini_path, variant_mode='gemini')
assert len(individuals) == 3
ind_ids = set(['NA12878', 'NA12882','NA12877'])
assert ind_ids == set([ind.ind_id for ind in individuals])
def test_get_individuals_from_vcf_no_ind(self, vcf_file_no_ind):
individuals = get_individuals(variant_source=vcf_file_no_ind)
assert len(individuals) == 0
class TestGetCase:
def test_get_case_from_vcf(self, vcf_file):
case_id = os.path.basename(vcf_file)
case_obj = get_cases(vcf_file)[0]
assert case_obj.case_id == case_id
assert case_obj.compressed == False
assert case_obj.tabix_index == False
individuals = case_obj.individuals
assert len(individuals) == 3
ind_ids = set(['ADM1059A1','ADM1059A2','ADM1059A3'])
assert ind_ids == set([ind.ind_id for ind in individuals])
def test_get_case_no_ind(self, vcf_file_no_ind):
case_id = os.path.basename(vcf_file_no_ind)
case_obj = get_cases(vcf_file_no_ind)[0]
assert case_obj.case_id == case_id
assert case_obj.compressed == False
assert case_obj.tabix_index == False
individuals = case_obj.individuals
assert len(individuals) == 0
def test_get_case_from_compressed_vcf(self, compressed_vcf_file):
case_id = os.path.basename(compressed_vcf_file)
case_obj = get_cases(compressed_vcf_file)[0]
assert case_obj.case_id == case_id
assert case_obj.compressed == True
assert case_obj.tabix_index == False
individuals = case_obj.individuals
assert len(individuals) == 3
ind_ids = set(['ADM1059A1','ADM1059A2','ADM1059A3'])
assert ind_ids == set([ind.ind_id for ind in individuals])
def test_get_case_from_indexed_vcf(self, indexed_vcf_file):
case_id = os.path.basename(indexed_vcf_file)
case_obj = get_cases(indexed_vcf_file)[0]
assert case_obj.case_id == case_id
assert case_obj.compressed == True
assert case_obj.tabix_index == True
individuals = case_obj.individuals
assert len(individuals) == 3
ind_ids = set(['ADM1059A1','ADM1059A2','ADM1059A3'])
assert ind_ids == set([ind.ind_id for ind in individuals])
def test_get_case_from_ped(self, vcf_file, ped_lines):
case_id = '636808'
case_obj = get_cases(vcf_file, case_lines=ped_lines)[0]
assert case_obj.case_id == case_id
assert case_obj.compressed == False
assert case_obj.tabix_index == False
individuals = case_obj.individuals
assert len(individuals) == 3
ind_ids = set(['ADM1059A1','ADM1059A2','ADM1059A3'])
assert ind_ids == set([ind.ind_id for ind in individuals])
def test_get_case_from_ped_indexed_vcf(self, indexed_vcf_file, ped_lines):
case_id = '636808'
case_obj = get_cases(indexed_vcf_file, case_lines=ped_lines)[0]
assert case_obj.case_id == case_id
assert case_obj.compressed == True
assert case_obj.tabix_index == True
individuals = case_obj.individuals
assert len(individuals) == 3
ind_ids = set(['ADM1059A1','ADM1059A2','ADM1059A3'])
assert ind_ids == set([ind.ind_id for ind in individuals])
def test_get_case_from_gemini(self, gemini_path):
case_id = '643594'
case_obj = get_cases(gemini_path, variant_mode='gemini')[0]
assert case_obj.case_id == case_id
assert case_obj.compressed == False
assert case_obj.tabix_index == False
individuals = case_obj.individuals
assert len(individuals) == 3
ind_ids = set(['NA12878', 'NA12882','NA12877'])
assert ind_ids == set([ind.ind_id for ind in individuals])
| 42.169492
| 88
| 0.670418
| 667
| 4,976
| 4.662669
| 0.077961
| 0.078778
| 0.087781
| 0.067524
| 0.9209
| 0.868489
| 0.818328
| 0.681994
| 0.681994
| 0.681994
| 0
| 0.048991
| 0.232918
| 4,976
| 118
| 89
| 42.169492
| 0.765785
| 0
| 0
| 0.652632
| 0
| 0
| 0.057866
| 0
| 0
| 0
| 0
| 0
| 0.452632
| 1
| 0.126316
| false
| 0
| 0.021053
| 0
| 0.168421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1115060ef5c8b26273a8fece1cc0a50aa1b693d1
| 63
|
py
|
Python
|
metashare/repository/templatetags/__init__.py
|
zeehio/META-SHARE
|
b796769629734353a63d98db72c84617f725e544
|
[
"BSD-3-Clause"
] | 11
|
2015-07-13T13:36:44.000Z
|
2021-11-15T08:07:25.000Z
|
metashare/repository/templatetags/__init__.py
|
zeehio/META-SHARE
|
b796769629734353a63d98db72c84617f725e544
|
[
"BSD-3-Clause"
] | 13
|
2015-03-21T14:08:31.000Z
|
2021-05-18T18:47:58.000Z
|
metashare/repository/templatetags/__init__.py
|
zeehio/META-SHARE
|
b796769629734353a63d98db72c84617f725e544
|
[
"BSD-3-Clause"
] | 12
|
2015-01-07T02:16:50.000Z
|
2021-05-18T08:25:31.000Z
|
from metashare.repository.templatetags import email_protection
| 31.5
| 62
| 0.904762
| 7
| 63
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 63
| 1
| 63
| 63
| 0.949153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
115d56cdf505f0ecc769fe24f62fb454c5102823
| 36
|
py
|
Python
|
kalliope/signals/geolocation/__init__.py
|
joshuaboniface/kalliope
|
0e040be3165e838485d1e5addc4d2c5df12bfd84
|
[
"MIT"
] | 1
|
2020-03-30T15:03:19.000Z
|
2020-03-30T15:03:19.000Z
|
kalliope/signals/geolocation/__init__.py
|
joshuaboniface/kalliope
|
0e040be3165e838485d1e5addc4d2c5df12bfd84
|
[
"MIT"
] | null | null | null |
kalliope/signals/geolocation/__init__.py
|
joshuaboniface/kalliope
|
0e040be3165e838485d1e5addc4d2c5df12bfd84
|
[
"MIT"
] | null | null | null |
from .geolocation import Geolocation
| 36
| 36
| 0.888889
| 4
| 36
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
116b3969292e9233c8cef006a501496bf6627977
| 43
|
py
|
Python
|
scale_client/__main__.py
|
prav33nv/scale_client
|
dcbd6ed4c8f4a27606ebef5b5f9dabb2e4f3b806
|
[
"BSD-2-Clause-FreeBSD"
] | 3
|
2018-05-24T00:59:05.000Z
|
2020-01-03T08:03:33.000Z
|
scale_client/__main__.py
|
prav33nv/scale_client
|
dcbd6ed4c8f4a27606ebef5b5f9dabb2e4f3b806
|
[
"BSD-2-Clause-FreeBSD"
] | 26
|
2015-01-19T22:47:07.000Z
|
2017-05-03T01:43:10.000Z
|
scale_client/__main__.py
|
prav33nv/scale_client
|
dcbd6ed4c8f4a27606ebef5b5f9dabb2e4f3b806
|
[
"BSD-2-Clause-FreeBSD"
] | 6
|
2015-01-20T20:05:09.000Z
|
2017-06-01T02:19:01.000Z
|
import core.client as client
client.main()
| 14.333333
| 28
| 0.790698
| 7
| 43
| 4.857143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 43
| 2
| 29
| 21.5
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fed3825207f3d298de5fb1a82dd903c79020a25b
| 225
|
py
|
Python
|
elvanto_subgroups/admin.py
|
monty5811/elvanto_subgroups
|
ef7a819787bbb5bf2a8bf6160e8476a613f67fa3
|
[
"MIT"
] | null | null | null |
elvanto_subgroups/admin.py
|
monty5811/elvanto_subgroups
|
ef7a819787bbb5bf2a8bf6160e8476a613f67fa3
|
[
"MIT"
] | null | null | null |
elvanto_subgroups/admin.py
|
monty5811/elvanto_subgroups
|
ef7a819787bbb5bf2a8bf6160e8476a613f67fa3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib import admin
from elvanto_subgroups.models import ElvantoGroup, ElvantoPerson, Link
admin.site.register(ElvantoGroup)
admin.site.register(ElvantoPerson)
admin.site.register(Link)
| 25
| 70
| 0.8
| 28
| 225
| 6.392857
| 0.571429
| 0.150838
| 0.284916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004878
| 0.088889
| 225
| 8
| 71
| 28.125
| 0.868293
| 0.093333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fee2a0a1af8923bf47b77da6a8bdd91b76e87000
| 27
|
py
|
Python
|
base/test-show-scope/func-args-2.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 25
|
2015-04-16T04:31:49.000Z
|
2022-03-10T15:53:28.000Z
|
base/test-show-scope/func-args-2.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 1
|
2018-11-21T22:40:02.000Z
|
2018-11-26T17:53:11.000Z
|
base/test-show-scope/func-args-2.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 1
|
2021-03-26T03:36:19.000Z
|
2021-03-26T03:36:19.000Z
|
x = 9
def f(x):
return x
| 5.4
| 9
| 0.518519
| 7
| 27
| 2
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.333333
| 27
| 4
| 10
| 6.75
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
fee93aa6bd7bb88d080086a038c06cf02a4da4ec
| 86
|
py
|
Python
|
simulation/admin.py
|
FetijeBraha/Blockchain-based-E-Voting
|
c8fd4fadbe727898d1a3dbbe515732f24c4c8819
|
[
"MIT"
] | null | null | null |
simulation/admin.py
|
FetijeBraha/Blockchain-based-E-Voting
|
c8fd4fadbe727898d1a3dbbe515732f24c4c8819
|
[
"MIT"
] | 2
|
2021-02-03T11:52:06.000Z
|
2021-02-03T20:57:59.000Z
|
simulation/admin.py
|
FetijeBraha/Blockchain-based-E-Voting
|
c8fd4fadbe727898d1a3dbbe515732f24c4c8819
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Vote
admin.site.register(Vote)
| 21.5
| 32
| 0.802326
| 13
| 86
| 5.307692
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127907
| 86
| 4
| 33
| 21.5
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3a2a74ba5dadb33b759a04ed3ee9c5feaa107bac
| 2,540
|
py
|
Python
|
monolith/database.py
|
Orionisxoxo/Integracja_aplikacji_2
|
8e93ed476ab215bfb489ac5ac6128dec161c56b8
|
[
"Apache-2.0"
] | null | null | null |
monolith/database.py
|
Orionisxoxo/Integracja_aplikacji_2
|
8e93ed476ab215bfb489ac5ac6128dec161c56b8
|
[
"Apache-2.0"
] | null | null | null |
monolith/database.py
|
Orionisxoxo/Integracja_aplikacji_2
|
8e93ed476ab215bfb489ac5ac6128dec161c56b8
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf8
from werkzeug.security import generate_password_hash, check_password_hash
import enum
from sqlalchemy.orm import relationship
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.Unicode(128), nullable=False)
firstname = db.Column(db.Unicode(128))
lastname = db.Column(db.Unicode(128))
password = db.Column(db.Unicode(128))
gitlab_token = db.Column(db.String(128))
is_active = db.Column(db.Boolean, default=True)
is_admin = db.Column(db.Boolean, default=False)
is_anonymous = False
def __init__(self, *args, **kw):
super(User, self).__init__(*args, **kw)
self._authenticated = False
def set_password(self, password):
self.password = generate_password_hash(password)
@property
def is_authenticated(self):
return self._authenticated
def authenticate(self, password):
checked = check_password_hash(self.password, password)
self._authenticated = checked
return self._authenticated
def get_id(self):
return self.id
class Project(db.Model):
__tablename__ = 'project'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
description = db.Column(db.Unicode(128))
visibility = db.Column(db.Unicode(128))
name = db.Column(db.Unicode(128))
gitlab_id = db.Column(db.Integer)
ssh_url_to_repo = db.Column(db.Unicode(128))
http_url_to_repo = db.Column(db.Unicode(128))
web_url = db.Column(db.Unicode(128))
name_with_namespace = db.Column(db.Unicode(128))
path = db.Column(db.Unicode(128))
path_with_namespace = db.Column(db.Unicode(128))
created_at = db.Column(db.String())
last_activity_at = db.Column(db.String())
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user_project = relationship('User', foreign_keys='Project.user_id')
class Group(db.Model):
__tablename__ = 'group'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.Unicode(128))
path = db.Column(db.Unicode(128))
description = db.Column(db.Unicode(128))
visibility = db.Column(db.Unicode(128))
web_url = db.Column(db.Unicode(128))
full_name = db.Column(db.Unicode(128))
full_path = db.Column(db.Unicode(128))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user_group = relationship('User', foreign_keys='Group.user_id')
| 33.866667
| 73
| 0.69685
| 350
| 2,540
| 4.86
| 0.222857
| 0.145797
| 0.182246
| 0.199882
| 0.527337
| 0.431511
| 0.356261
| 0.329218
| 0.306878
| 0.306878
| 0
| 0.030447
| 0.172441
| 2,540
| 74
| 74
| 34.324324
| 0.778782
| 0.005512
| 0
| 0.288136
| 1
| 0
| 0.026149
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084746
| false
| 0.101695
| 0.067797
| 0.033898
| 0.881356
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
3a5c6e65e1500fdef075bd0be7bb2cefc8232076
| 115
|
py
|
Python
|
authentication/admin.py
|
MarioDavidov/heroku_workout
|
50727305f7e6f05527d3a5fff5e8d27843435985
|
[
"MIT"
] | null | null | null |
authentication/admin.py
|
MarioDavidov/heroku_workout
|
50727305f7e6f05527d3a5fff5e8d27843435985
|
[
"MIT"
] | null | null | null |
authentication/admin.py
|
MarioDavidov/heroku_workout
|
50727305f7e6f05527d3a5fff5e8d27843435985
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from authentication.models import UserProfile
admin.site.register(UserProfile)
| 16.428571
| 45
| 0.843478
| 14
| 115
| 6.928571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104348
| 115
| 6
| 46
| 19.166667
| 0.941748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
28b7795935f6e2db52dc3164b03ba750a9403136
| 93
|
py
|
Python
|
cellardoor/__init__.py
|
movermeyer/cellardoor
|
25192b07224ff7bd33fd29ebac07340bef53a2ed
|
[
"MIT"
] | null | null | null |
cellardoor/__init__.py
|
movermeyer/cellardoor
|
25192b07224ff7bd33fd29ebac07340bef53a2ed
|
[
"MIT"
] | 3
|
2015-01-31T14:53:06.000Z
|
2015-02-01T19:04:30.000Z
|
cellardoor/__init__.py
|
movermeyer/cellardoor
|
25192b07224ff7bd33fd29ebac07340bef53a2ed
|
[
"MIT"
] | 2
|
2015-01-31T14:54:28.000Z
|
2018-03-05T17:33:42.000Z
|
"""
Create REST APIs declaratively.
"""
from version import __version__
from . import errors
| 15.5
| 31
| 0.763441
| 11
| 93
| 6.090909
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150538
| 93
| 5
| 32
| 18.6
| 0.848101
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e92ac7b66997dfd822019420962ff5b56d8eddb1
| 312
|
py
|
Python
|
python/anyascii/_data/_301.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_301.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_301.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
b=' Shen Tuan Lu Du Dan Xia Wei Lan Gai Dong Jia Hong Ji Garon He Xi Tan Shan'
| 312
| 312
| 0.182692
| 19
| 312
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.807692
| 312
| 1
| 312
| 312
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0.984026
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e9395e8642049c183e12926668b1b6f96c5d5249
| 28
|
py
|
Python
|
iDrive/tests.py
|
WilfredWee/iDrive-Django-opensource
|
9fd92f416ba435eea458aceeb250b27dcd9c6284
|
[
"MIT"
] | 1
|
2021-07-07T07:02:50.000Z
|
2021-07-07T07:02:50.000Z
|
iDrive/tests.py
|
wilfredwee/iDrive-Django-opensource
|
9fd92f416ba435eea458aceeb250b27dcd9c6284
|
[
"MIT"
] | null | null | null |
iDrive/tests.py
|
wilfredwee/iDrive-Django-opensource
|
9fd92f416ba435eea458aceeb250b27dcd9c6284
|
[
"MIT"
] | 2
|
2015-08-14T13:31:30.000Z
|
2015-09-13T10:07:49.000Z
|
from iDrive.models import *
| 14
| 27
| 0.785714
| 4
| 28
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e93b99c9aee1944ea607116284191bb65bbde136
| 30
|
py
|
Python
|
modules/__init__.py
|
jacktomcat/python-lesson-study
|
8b15ac61fb5b7779ee758c834d3036e176f1826d
|
[
"Apache-2.0"
] | null | null | null |
modules/__init__.py
|
jacktomcat/python-lesson-study
|
8b15ac61fb5b7779ee758c834d3036e176f1826d
|
[
"Apache-2.0"
] | null | null | null |
modules/__init__.py
|
jacktomcat/python-lesson-study
|
8b15ac61fb5b7779ee758c834d3036e176f1826d
|
[
"Apache-2.0"
] | null | null | null |
#from employee import Employee
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0.966667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3a8c2bc424ff8d7e9772e17cf05f56122fd6a56c
| 50
|
py
|
Python
|
src/pyper/__init__.py
|
maichmueller/per
|
17eddea4d1aa1aac8fb9664f7437ecc1f119dcf5
|
[
"MIT"
] | null | null | null |
src/pyper/__init__.py
|
maichmueller/per
|
17eddea4d1aa1aac8fb9664f7437ecc1f119dcf5
|
[
"MIT"
] | 2
|
2021-12-22T12:14:20.000Z
|
2021-12-23T21:39:14.000Z
|
src/pyper/__init__.py
|
maichmueller/per
|
17eddea4d1aa1aac8fb9664f7437ecc1f119dcf5
|
[
"MIT"
] | null | null | null |
from ._pyper import SumTree, PrioritizedExperience
| 50
| 50
| 0.88
| 5
| 50
| 8.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 50
| 1
| 50
| 50
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3aa77214ad0b4eda60b714549dc947909ff5e8c0
| 60,713
|
py
|
Python
|
seaice/data/test/test_getter.py
|
andypbarrett/nsidc-seaice
|
167a16309f7eaadd5c613b54a7df26eb1f48c2f3
|
[
"MIT"
] | 2
|
2020-08-27T08:40:22.000Z
|
2021-04-14T15:42:09.000Z
|
seaice/data/test/test_getter.py
|
andypbarrett/nsidc-seaice
|
167a16309f7eaadd5c613b54a7df26eb1f48c2f3
|
[
"MIT"
] | null | null | null |
seaice/data/test/test_getter.py
|
andypbarrett/nsidc-seaice
|
167a16309f7eaadd5c613b54a7df26eb1f48c2f3
|
[
"MIT"
] | null | null | null |
from datetime import date
from unittest.mock import patch
import copy
import datetime as dt
import os
import unittest
from nose.tools import assert_equals, assert_true, raises
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as pdt
from seaice.data.errors import DateOutOfRangeError
from seaice.data.errors import YearMonthOutOfRangeError
import seaice.data.getter as getter
import seaice.data.gridset_filters as gridset_filters
import seaice.data.locator as locator
from .util import mock_today
import seaice.nasateam as nt
TEST_DATA = os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir, os.path.pardir,
'test_data', 'seaice.data')
SOUTH_DAILY_FILE = os.path.join(TEST_DATA, 'nt_19871118_f08_v01_s.bin')
NORTH_DAILY_FILE = os.path.join(TEST_DATA, 'nt_20010107_f13_v01_n.bin')
OCEAN = 0
ICE = 1
COAST = 253
LAND = 254
MISSING = 255
GRIDSET_STUB = {'data': np.array([]), 'metadata': {'period': None,
'temporality': 'D',
'period_index': pd.PeriodIndex([], freq='D'),
'valid_data_range': (0, 100),
'flags': {},
'missing_value': None,
'hemi': 'N',
'files': []}}
class Test_concentration_daily(unittest.TestCase):
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.getter.empty_gridset')
@patch('os.walk')
def test_daily_no_file_gets_empty_grid(self, mock_walk, mock_empty_gridset,
mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
# no files found
mock_walk.return_value = [('/anyroot', [], [])]
date_ = date(2015, 9, 1)
hemisphere = nt.NORTH
search_paths = ['/anyroot']
mock_empty_gridset.return_value = {
'data': np.full((448, 304), 255, dtype=np.int),
'metadata': {}
}
# act
getter.concentration_daily(hemisphere, date_, search_paths)
# assert
getter.empty_gridset.assert_called_with((448, 304), 'D')
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.gridset_filters._interpolate_missing')
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.locator.daily_file_path')
def test_daily_single_file_not_interpolated(self, mock_daily_file_path,
_mockgridset_by_filelist,
mock__interpolate_missing,
mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
files = ['files.1_s.bin']
gridset = {'data': [], 'metadata': {'files': []}}
mock_daily_file_path.return_value = files
_mockgridset_by_filelist.return_value = gridset
mock__interpolate_missing.return_value = []
date_ = date(2015, 9, 1)
hemisphere = nt.NORTH
search_paths = ['/anyroot']
# act
getter.concentration_daily(hemisphere, date_, search_paths, 1)
# assert
getter._concentration_gridset_by_filelist.assert_called_with(files)
gridset_filters._interpolate_missing.assert_not_called()
@mock_today(1995, 11, 24)
@raises(DateOutOfRangeError)
def test_daily_throws_error_for_dates_today_or_later(self, ):
getter.concentration_daily(nt.NORTH, date(1995, 11, 24), ['/who/cares'])
@mock_today(1990, 11, 24)
@raises(DateOutOfRangeError)
def test_daily_throws_error_for_future_date(self, ):
getter.concentration_daily(nt.NORTH, date(1992, 1, 10), ['/who/cares'])
@raises(DateOutOfRangeError)
def test_daily_throws_error_before_october_26_1978(self, ):
getter.concentration_daily(nt.NORTH, date(1978, 10, 25), ['/who/cares'])
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@mock_today(2014, 11, 24)
def test_daily_works_with_yesterday(self, mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
actual = getter.concentration_daily(nt.NORTH, date(2014, 11, 23), ['/who/cares'])
assert_equals(actual['data'].shape, (448, 304))
@patch('seaice.datastore.get_bad_days_for_hemisphere')
def test_daily_works_with_october_26_1978(self, mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
actual = getter.concentration_daily(nt.NORTH, date(1978, 10, 26), ['/who/cares'])
assert_equals(actual['data'].shape, (448, 304))
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.gridset_filters._interpolate_missing')
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.locator.daily_file_path')
def test_interpolation_with_skipped_day_in_SMMR_period(self,
mock_daily_file_path,
mock__gridset_by_filelist,
mock__interpolate_missing,
mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
files = ['nt_19810529_n07_v1.1_s.bin',
'nt_19810531_n07_v1.1_s.bin']
gridset = {'data': np.full((2, 2, 2), 4, dtype=np.int),
'metadata': {'files': files}}
mock_daily_file_path.return_value = files
mock__gridset_by_filelist.return_value = gridset
mock__interpolate_missing.return_value = np.full((2, 2), 4, dtype=np.int)
interpolation_radius = 1
nt_hemi = {'short_name': 'N'}
anydate = dt.date(1981, 5, 30)
actual_gridset = getter.concentration_daily(nt_hemi,
anydate,
['/anypaths'],
interpolation_radius=interpolation_radius)
actual = actual_gridset['metadata']['files']
expected = ['nt_19810529_n07_v1.1_s.bin', 'nt_19810531_n07_v1.1_s.bin']
self.assertEqual(actual, expected)
class Test_concentration_daily___failed_qa_logic(unittest.TestCase):
def setUp(self):
self.day_before_grid = np.full(nt.NORTH['shape'], 1, dtype=np.int)
target_grid = np.full(nt.NORTH['shape'], 2, dtype=np.int)
target_grid[0:3, 0:3] = nt.FLAGS['missing']
self.target_grid = target_grid.copy()
self.day_after_grid = np.full(nt.NORTH['shape'], 11, dtype=np.int)
self.cube = np.dstack([self.day_before_grid, target_grid, self.day_after_grid])
target_grid[0:3, 0:3] = (1 + 11) / 2
self.interpolated_grid = target_grid.copy()
self.empty_grid = np.full(nt.NORTH['shape'], nt.FLAGS['missing'], dtype=np.int)
self.target_date = dt.date(1980, 10, 25)
self.file_list = ['nt_19801024_n07_v1.1_n.bin',
'nt_19801025_n07_v1.1_n.bin',
'nt_19801026_n07_v1.1_n.bin']
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.locator.daily_file_path')
@patch('seaice.datastore.get_bad_days_for_hemisphere')
def test_returns_bad_data_gridset(self,
mock_get_bad_days_for_hemisphere,
mock_daily_file_path,
mock__concentration_gridset_by_filelist):
interpolation_radius = 0
mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]
file_list = self.file_list[1:2]
mock_daily_file_path.return_value = file_list
gridset = {'data': self.target_grid,
'metadata': {'files': file_list}}
mock__concentration_gridset_by_filelist.return_value = gridset
actual = getter.concentration_daily(nt.NORTH,
self.target_date,
['/who/cares'],
interpolation_radius=interpolation_radius)
expected_grid = self.target_grid
npt.assert_array_equal(actual['data'], expected_grid)
expected_files = self.file_list[1:2]
self.assertEqual(actual['metadata']['files'], expected_files)
class Test_concentration_monthly(unittest.TestCase):
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_gets_data_when_at_least_twenty_days_present(
self,
mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset,
_mockgridset_by_filelist
):
locator.all_daily_file_paths_for_month.return_value = ['nt_20120901_f08_v01_n.bin'] * 20
locator.monthly_file_path.return_value = 'nt_201209_f08_v01_n.bin'
getter.empty_gridset.return_value = None
getter._concentration_gridset_by_filelist.return_value = {
'data': np.ma.array([1, 2]),
'metadata': {}
}
year = 2012
month = 9
hemisphere = nt.NORTH
search_paths = ['wherever']
getter.concentration_monthly(hemisphere, year, month, search_paths)
getter._concentration_gridset_by_filelist.assert_called_with(['nt_201209_f08_v01_n.bin'])
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_gets_data_when_more_than_twenty_files_present_simmr(
self,
mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset,
_mockgridset_by_filelist
):
locator.all_daily_file_paths_for_month.return_value = ['nt_19781101_n07_v01_n.bin'] * 20
locator.monthly_file_path.return_value = 'nt_197811_n07_v01_n.bin'
getter.empty_gridset.return_value = None
getter._concentration_gridset_by_filelist.return_value = {
'data': np.ma.array([1, 2]),
'metadata': {}
}
year = 1978
month = 11
hemisphere = nt.NORTH
search_paths = ['wherever']
actual = getter.concentration_monthly(hemisphere, year, month, search_paths)
getter._concentration_gridset_by_filelist.assert_called_with(['nt_197811_n07_v01_n.bin'])
npt.assert_array_equal(actual['data'], np.ma.array([1, 2]))
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_uses_daily_for_nrt(
self,
mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset,
_mockgridset_by_filelist
):
daily_files = ['nt_20120915_f08_v01_n.bin'] * 20
locator.all_daily_file_paths_for_month.return_value = daily_files
locator.monthly_file_path.return_value = None
getter.empty_gridset.return_value = None
day1_grid = np.ma.array([[10., 30.], [50., 60.]])
day2_grid = np.ma.array([[20., 50.], [80., 100.]])
getter._concentration_gridset_by_filelist.return_value = {
'data': np.ma.dstack([day1_grid, day2_grid]),
'metadata': {'missing_value': 255., 'valid_data_range': (0., 100.)}
}
year = 1979
month = 3
hemisphere = nt.NORTH
search_paths = ['wherever']
actual = getter.concentration_monthly(hemisphere, year, month, search_paths)
expected = np.ma.array([[15., 40.], [65., 80.]])
getter._concentration_gridset_by_filelist.assert_called_with(daily_files)
npt.assert_array_equal(expected, actual['data'])
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_under_threshold_empty_grid(self, mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset):
locator.all_daily_file_paths_for_month.return_value = []
locator.monthly_file_path.return_value = 'nt_201209_f08_v01_n.bin'
getter.empty_gridset.return_value = None
year = 2012
month = 9
hemisphere = nt.NORTH
search_paths = ['wherever']
getter.concentration_monthly(hemisphere, year, month, search_paths)
getter.empty_gridset.assert_called_with((448, 304), 'M')
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_missing_empty_grid(self, mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset):
locator.all_daily_file_paths_for_month.return_value = []
locator.monthly_file_path.return_value = None
getter.empty_gridset.return_value = None
year = 2012
month = 9
hemisphere = nt.NORTH
search_paths = ['wherever']
getter.concentration_monthly(hemisphere, year, month, search_paths)
getter.empty_gridset.assert_called_with((448, 304), 'M')
@patch('seaice.nasateam.LAST_DAY_WITH_VALID_FINAL_DATA', date(2005, 4, 30))
@patch('seaice.data.getter._concentration_average_gridset_from_daily_filelist')
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.getter.double_weight_smmr_files')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_uses_daily_when_final_month_is_outside_of_valid_final_data(
self,
mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_double_weight_smmr_files,
mock__concentration_gridset_by_filelist,
mock__concentration_average_gridset_from_daily_filelist
):
daily_files = ['some', 'daily', 'files']
mock_monthly_file_path.return_value = ['final_monthly_file']
mock_all_daily_file_paths_for_month.return_value = daily_files
mock_double_weight_smmr_files.return_value = daily_files
mock__concentration_gridset_by_filelist.return_value = {'data': np.array([]),
'metadata': {}}
hemisphere = nt.NORTH
year = 2005
month = 5
search_paths = ['wherever']
getter.concentration_monthly(hemisphere, year, month, search_paths, 3)
# technically _concentration_gridset_by_filelist is called by
# _concentration_average_gridset_from_daily_filelist, but here they are
# both mocked, so they return right away and we can only worry about
# which of these two functions concentration_monthly() calls directly
getter._concentration_gridset_by_filelist.assert_not_called()
getter._concentration_average_gridset_from_daily_filelist.assert_called_with(daily_files)
@mock_today(1995, 11, 24)
@raises(YearMonthOutOfRangeError)
def test_monthly_throws_error_for_current_month(self):
getter.concentration_monthly(nt.NORTH, 1995, 11, ['/who/cares'])
@mock_today(2014, 11, 24)
@raises(YearMonthOutOfRangeError)
def test_monthly_throws_error_for_future_month(self):
getter.concentration_monthly(nt.NORTH, 2014, 12, ['/who/cares'])
@mock_today(2014, 11, 24)
def test_monthly_works_with_last_month(self):
actual = getter.concentration_monthly(nt.NORTH, 2014, 10, ['/who/cares'])
assert_equals(actual['data'].shape, (448, 304))
def test_monthly_works_with_october_1978(self):
actual = getter.concentration_monthly(nt.NORTH, 1978, 10, ['/who/cares'])
assert_equals(actual['data'].shape, (448, 304))
@raises(YearMonthOutOfRangeError)
def test_monthly_throws_error_before_october_1978(self):
getter.concentration_monthly(nt.NORTH, 1978, 9, ['/who/cares'])
class Test_concentration_seasonal(unittest.TestCase):
@patch('seaice.data.getter.concentration_monthly')
def test_metadata(self, _mock_concentration_monthly):
getter.concentration_monthly.side_effect = [
{
'data': np.ma.array([]),
'metadata': {'files': ['nt_201209_f08_v01_n.bin']}
},
{
'data': np.ma.array([]),
'metadata': {'files': ['nt_201210_f08_v01_n.bin']}
},
{
'data': np.ma.array([]),
'metadata': {'files': ['nt_201211_f08_v01_n.bin']}
}
]
year = 2012
months = (9, 10, 11)
hemisphere = nt.NORTH
search_paths = ['wherever']
actual = getter.concentration_seasonal(hemisphere, year, months, search_paths)
expected_metadata = {
'files': [['nt_201209_f08_v01_n.bin'],
['nt_201210_f08_v01_n.bin'],
['nt_201211_f08_v01_n.bin']],
'temporality': 'seasonal',
'hemi': 'N',
'season': (2012, (9, 10, 11)),
'search_paths': ['wherever'],
'valid_data_range': (0.0, 100.0),
'missing_value': 255,
'flags': {
'pole': 251,
'unused': 252,
'coast': 253,
'land': 254
}
}
for key, expected_value in expected_metadata.items():
self.assertEqual(actual['metadata'][key], expected_value)
@patch('seaice.data.getter.concentration_monthly')
def test_averages_monthly_data(self, _mock_concentration_monthly):
getter.concentration_monthly.side_effect = [
{
'data': np.ma.array([[5, 7],
[5, 7]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[9, 3.5],
[9, 3.5]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[10, 6],
[10, 6]]),
'metadata': {'files': []}
}
]
expected_data = np.array([[8, 5.5],
[8, 5.5]])
year = 2012
months = (9, 10, 11)
hemisphere = nt.NORTH
search_paths = ['wherever']
actual = getter.concentration_seasonal(hemisphere, year, months, search_paths)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 2012, 9, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 2012, 10, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 2012, 11, ['wherever'], 20
)
npt.assert_array_equal(actual['data'], expected_data)
@patch('seaice.data.getter.concentration_monthly')
def test_uses_december_from_previous_year(self, _mock_concentration_monthly):
getter.concentration_monthly.return_value = {
'data': np.ma.array([[]]),
'metadata': {'files': []}
}
year = 2012
months = (12, 1, 2)
hemisphere = nt.SOUTH
search_paths = ['wherever']
min_days_for_valid_month = 20
getter.concentration_seasonal(hemisphere,
year,
months,
search_paths,
min_days_for_valid_month)
getter.concentration_monthly.assert_any_call(
nt.SOUTH, 2011, 12, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.SOUTH, 2012, 1, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.SOUTH, 2012, 2, ['wherever'], 20
)
@patch('seaice.data.getter.concentration_monthly')
def test_does_not_average_missing_but_fills_with_flags(self, _mock_concentration_monthly):
getter.concentration_monthly.side_effect = [
{
'data': np.ma.array([[255, 255, 255]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[9, 5, 251]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[10, 6, 251]]),
'metadata': {'files': []}
}
]
year = 2012
months = (9, 10, 11)
hemisphere = nt.NORTH
search_paths = ['wherever']
expected_data = np.array([[9.5, 5.5, 251]])
actual = getter.concentration_seasonal(hemisphere, year, months, search_paths)
npt.assert_array_equal(actual['data'], expected_data)
@patch('seaice.data.getter.concentration_monthly')
def test_takes_values_from_one_month_if_others_are_missing(
self,
_mock_concentration_monthly
):
getter.concentration_monthly.side_effect = [
{
'data': np.ma.array([[255, 255, 255]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[255, 255, 255]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[10, 6, 7]]),
'metadata': {'files': []}
}
]
year = 1988
months = (12, 1, 2)
hemisphere = nt.NORTH
search_paths = ['wherever']
expected_data = np.array([[10, 6, 7]])
actual = getter.concentration_seasonal(hemisphere, year, months, search_paths)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 1987, 12, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 1988, 1, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 1988, 2, ['wherever'], 20
)
npt.assert_array_equal(actual['data'], expected_data)
class Test_concentration_seasonal_over_years(unittest.TestCase):
@patch('seaice.data.getter.concentration_seasonal')
def test_calls_concentration_seasonal_for_every_year_inclusive(
self,
_mock_concentration_seasonal
):
months = (12, 1, 2)
hemisphere = nt.NORTH
search_paths = ['wherever']
min_valid_days = 20
years = [1980, 1981, 1982, 1983, 1984,
1985, 1986, 1987, 1988, 1989,
1990, 1991, 1992, 1993, 1994,
1995, 1996, 1997, 1998, 1999,
2000]
start_year = years[0]
end_year = years[-1]
getter.concentration_seasonal_over_years(
hemisphere, start_year, end_year, months, search_paths, min_valid_days
)
for year in years:
getter.concentration_seasonal.assert_any_call(
hemisphere,
year,
months,
search_paths,
min_valid_days
)
@patch('seaice.data.getter.concentration_seasonal')
def test_data_from_each_season_is_stacked(self, _mock_concentration_seasonal):
grid0 = np.array([[1, 1],
[2, 2]])
grid1 = np.array([[2, 9],
[3, 7]])
grid2 = np.array([[4, 9],
[3, 5]])
getter.concentration_seasonal.side_effect = [
{
'data': grid0,
'metadata': {'files': [],
'valid_data_range': (),
'flags': {},
'missing_value': None}
},
{
'data': grid1,
'metadata': {'files': [],
'valid_data_range': (),
'flags': {},
'missing_value': None}
},
{
'data': grid2,
'metadata': {'files': [],
'valid_data_range': (),
'flags': {},
'missing_value': None}
}
]
months = (3, 4, 5)
hemisphere = nt.NORTH
search_paths = ['wherever']
min_valid_days = 20
start_year = 1980
end_year = 1982
actual = getter.concentration_seasonal_over_years(
hemisphere, start_year, end_year, months, search_paths, min_valid_days
)
expected_data = np.dstack([grid0, grid1, grid2])
npt.assert_array_equal(actual['data'], expected_data)
@patch('seaice.data.getter.concentration_seasonal')
def test_metadata(self, _mock_concentration_seasonal):
the_grid = np.array([[0, 0],
[0, 0]])
getter.concentration_seasonal.side_effect = [
{
'data': the_grid,
'metadata': {
'files': ['file0'],
'temporality': 'seasonal',
'hemi': 'N',
'season': (2012, (9, 10, 11)),
'search_paths': ['wherever'],
'valid_data_range': (0.0, 100.0),
'missing_value': 255,
'flags': {
'pole': 251,
'unused': 252,
'coast': 253,
'land': 254
}
}
},
{
'data': the_grid,
'metadata': {
'files': ['file1'],
'temporality': 'seasonal',
'hemi': 'N',
'season': (2012, (9, 10, 11)),
'search_paths': ['wherever'],
'valid_data_range': (0.0, 100.0),
'missing_value': 255,
'flags': {
'pole': 251,
'unused': 252,
'coast': 253,
'land': 254
}
}
},
{
'data': the_grid,
'metadata': {
'files': ['file2'],
'temporality': 'seasonal',
'hemi': 'N',
'season': (2012, (9, 10, 11)),
'search_paths': ['wherever'],
'valid_data_range': (0.0, 100.0),
'missing_value': 255,
'flags': {
'pole': 251,
'unused': 252,
'coast': 253,
'land': 254
}
}
}
]
months = (3, 4, 5)
hemisphere = nt.NORTH
search_paths = ['wherever']
min_valid_days = 20
start_year = 1980
end_year = 1982
actual = getter.concentration_seasonal_over_years(
hemisphere, start_year, end_year, months, search_paths, min_valid_days
)
expected_metadata = {
'files': [['file0'], ['file1'], ['file2']],
'flags': {
'pole': 251,
'unused': 252,
'coast': 253,
'land': 254
},
'valid_data_range': (0.0, 100.0),
'missing_value': 255,
}
self.assertEqual(actual['metadata'], expected_metadata)
class Test_extent_daily_median(unittest.TestCase):
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.getter.concentration_daily')
def test_extent_daily_median_calls_daily_once_per_year(self, mock_concentration_daily,
mock_get_bad_days):
mock_get_bad_days.return_value = []
hemi = nt.NORTH
start_year = 1981
end_year = 1983
dayofyear = 7
mock_concentration_daily.return_value = GRIDSET_STUB
getter.extent_daily_median(hemi, start_year, end_year, dayofyear,
search_paths=TEST_DATA, interpolation_radius=0)
for year in [1981, 1982, 1983]:
getter.concentration_daily.assert_any_call(nt.NORTH, dt.date(year, 1, 7),
TEST_DATA, 0)
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.getter.concentration_daily')
def test_extent_daily_median_passes_all_parameters(self, mock_concentration_daily,
mock_get_bad_days):
mock_get_bad_days.return_value = []
hemi = nt.NORTH
start_year = 1981
end_year = 1983
dayofyear = 7
mock_concentration_daily.return_value = GRIDSET_STUB
getter.extent_daily_median(hemi, start_year, end_year, dayofyear,
search_paths=TEST_DATA, interpolation_radius=0)
for year in [1981, 1982, 1983]:
getter.concentration_daily.assert_any_call(nt.NORTH, dt.date(year, 1, 7), TEST_DATA, 0)
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.getter.concentration_daily')
def test_extent_daily_median_handles_doy_366(self, mock_concentration_daily,
mock_get_bad_days):
mock_get_bad_days.return_value = []
hemi = nt.NORTH
start_year = 2000
end_year = 2001
dayofyear = 366
mock_concentration_daily.return_value = GRIDSET_STUB
getter.extent_daily_median(hemi, start_year, end_year, dayofyear=dayofyear,
search_paths=TEST_DATA, interpolation_radius=0)
# day 366 of a leap year is Dec 31
getter.concentration_daily.assert_any_call(nt.NORTH, dt.date(2000, 12, 31), TEST_DATA, 0)
# "day 366" of a non-leap year is Jan 1 of the next year
getter.concentration_daily.assert_any_call(nt.NORTH, dt.date(2002, 1, 1), TEST_DATA, 0)
@patch('seaice.data.getter.concentration_daily')
def test_extent_daily_median_returns_grid(self, mock_concentration_daily):
hemi = nt.NORTH
start_year = 1981
end_year = 1983
dayofyear = 7
day_grid = np.zeros(nt.NORTH['shape'])
gridset = copy.deepcopy(GRIDSET_STUB)
gridset['data'] = day_grid
mock_concentration_daily.return_value = gridset
actual = getter.extent_daily_median(hemi, start_year, end_year, dayofyear,
search_paths=TEST_DATA, interpolation_radius=0,
allow_bad_dates=True)
rows, cols = nt.NORTH['shape']
expected = (rows, cols)
assert_equals(expected, actual['data'].shape)
@patch('seaice.data.getter.concentration_daily')
def test_extent_daily_median_metadata(self, mock_daily):
hemi = nt.NORTH
start_year = 1981
end_year = 1983
dayofyear = 7
file_1981 = ['anyroot/nt_19810107_f17_v1.1_n.bin']
file_1982 = ['anyroot/nt_19820107_f17_v1.1_n.bin']
file_1983 = ['anyroot/nt_19830107_f17_v1.1_n.bin']
gridsets = []
for filelist in [file_1981, file_1982, file_1983]:
gridset = copy.deepcopy(GRIDSET_STUB)
gridset['metadata']['files'] = filelist
gridsets.append(gridset)
getter.concentration_daily.side_effect = gridsets
actual = getter.extent_daily_median(hemi, start_year, end_year, dayofyear,
search_paths=TEST_DATA, interpolation_radius=0,
allow_bad_dates=True)
expected = {'years': [1981, 1982, 1983], 'dayofyear': 7,
'files': [file_1981, file_1982, file_1983],
'period_index': [pd.PeriodIndex([], freq='D')] * 3}
for key in ['years', 'dayofyear', 'files']:
self.assertEqual(expected[key], actual['metadata'][key])
for index, expected in enumerate(expected['period_index']):
pdt.assert_index_equal(expected, actual['metadata']['period_index'][index])
class Test_extent_monthly_median(unittest.TestCase):
@patch('seaice.data.getter.concentration_monthly')
def test_extent_monthly_median_calls_concentration_monthly_once_per_year(
self, mock_concentration_monthly):
hemi = nt.NORTH
start_year = 1981
end_year = 1983
month = 1
extent_threshold = 50
month_grid = np.zeros(nt.NORTH['shape'])
gridset = GRIDSET_STUB
gridset['data'] = month_grid
mock_concentration_monthly.return_value = gridset
getter.extent_monthly_median(hemi, start_year, end_year, month,
search_paths=TEST_DATA,
extent_threshold=extent_threshold)
for year in [1981, 1982, 1983]:
getter.concentration_monthly.assert_any_call(nt.NORTH, year, month, TEST_DATA,
nt.MINIMUM_DAYS_FOR_VALID_MONTH)
@patch('seaice.data.getter.concentration_monthly')
def test_extent_monthly_median_returns_grid(self, mock_concentration_monthly):
hemi = nt.NORTH
start_year = 1981
end_year = 1983
month = 1
extent_threshold = 51
month_grid = np.zeros(nt.NORTH['shape'])
gridset = GRIDSET_STUB
gridset['data'] = month_grid
mock_concentration_monthly.return_value = gridset
actual = getter.extent_monthly_median(hemi, start_year, end_year, month,
search_paths=TEST_DATA,
extent_threshold=extent_threshold)
rows, cols = nt.NORTH['shape']
expected = (rows, cols)
assert_equals(expected, actual['data'].shape)
@patch('seaice.data.getter.concentration_monthly')
def test_extent_monthly_median_metadata(self, mock_concentration_monthly):
hemi = nt.NORTH
start_year = 1981
end_year = 1983
month = 1
extent_threshold = 50
file_1981 = ['anyroot/nt_198101_f17_v1.1_n.bin']
file_1982 = ['anyroot/nt_198201_f17_v1.1_n.bin']
file_1983 = ['anyroot/nt_198301_f17_v1.1_n.bin']
gridsets = []
for filelist in [file_1981, file_1982, file_1983]:
gridset = copy.deepcopy(GRIDSET_STUB)
gridset['metadata']['files'] = filelist
gridsets.append(gridset)
getter.concentration_monthly.side_effect = gridsets
actual = getter.extent_monthly_median(hemi, start_year, end_year, month,
search_paths=TEST_DATA,
extent_threshold=extent_threshold)
expected = {'month': 1,
'years': [1981, 1982, 1983],
'files': [file_1981, file_1982, file_1983],
'valid_data_range': (0, 1),
'flags': {'coast': 253, 'land': 254, 'unused': 252, 'coast': 253, 'pole': 251},
'missing_value': 255}
assert_equals(expected, actual['metadata'])
class Test__period_index_from_file_list(unittest.TestCase):
def test_daily_files(self):
expected = pd.PeriodIndex(['2001-01-07', '1987-11-18'], freq='D')
file_list = [NORTH_DAILY_FILE, SOUTH_DAILY_FILE]
actual = getter._period_index_from_file_list(file_list)
pdt.assert_index_equal(actual, expected)
def test_monthly_files(self):
expected = pd.PeriodIndex(['2001-01', '1987-11'], freq='M')
file_list = ['nt_200101_f08_v01_s.bin', 'nt_198711_f08_v01_s.bin']
actual = getter._period_index_from_file_list(file_list)
pdt.assert_index_equal(actual, expected)
class Test__concentration_gridset_by_filelist(unittest.TestCase):
def test_gridset_by_filelist_south_with_two_files(self):
expected = (332, 316, 2)
file_list = [SOUTH_DAILY_FILE, SOUTH_DAILY_FILE]
expected_files = file_list
actual = getter._concentration_gridset_by_filelist(file_list)
assert_equals(actual['data'].shape, expected)
assert_equals(actual['metadata']['files'], expected_files)
pdt.assert_index_equal(actual['metadata']['period_index'],
pd.PeriodIndex(['1987-11-18', '1987-11-18'], freq='D'))
def test_gridset_by_filelist_north_with_one_file(self):
expected_shape = (448, 304)
actual = getter._concentration_gridset_by_filelist([NORTH_DAILY_FILE])
actual_shape = actual['data'].shape
assert_equals(actual_shape, expected_shape)
pdt.assert_index_equal(actual['metadata']['period_index'],
pd.PeriodIndex(['2001-01-07'], freq='D'))
class Test__concentration_average_gridset_from_daily_filelist(unittest.TestCase):
@patch('seaice.data.getter._concentration_gridset_by_filelist')
def test_retains_flagged_values(self, mocked_concentration_gridset_by_filelist):
grid = np.ma.array([[251, 20],
[30, 40]])
cube = np.ma.dstack((grid, grid, grid))
gridset = {'data': cube, 'metadata': {'missing_value': 255.,
'valid_data_range': (0., 100.)}}
mocked_concentration_gridset_by_filelist.return_value = gridset
expected = copy.deepcopy(grid)
actual = getter._concentration_average_gridset_from_daily_filelist(['file_list'])
npt.assert_array_equal(expected, actual['data'])
npt.assert_array_equal(expected.data, actual['data'].data)
@patch('seaice.data.getter._concentration_gridset_by_filelist')
def test_retains_flagged_values_with_missing(self, mocked_concentration_gridset_by_filelist):
grid = np.ma.array([[251, 20],
[30, 40]])
grid2 = np.ma.array([[251, 255.],
[30, 40]])
cube = np.ma.dstack((grid, grid2, grid))
gridset = {'data': cube, 'metadata': {'missing_value': 255.,
'valid_data_range': (0., 100.)}}
mocked_concentration_gridset_by_filelist.return_value = gridset
expected = copy.deepcopy(grid)
actual = getter._concentration_average_gridset_from_daily_filelist(['file_list'])
npt.assert_array_equal(expected, actual['data'])
@patch('seaice.data.getter._concentration_gridset_by_filelist')
def test_flagged_values_become_missing_with_missing_flag(
self, mocked_concentration_gridset_by_filelist):
grid = np.array([[251, 20],
[30, 40]])
grid2 = np.array([[255., 20.],
[30, 40]])
cube = np.ma.dstack((grid, grid2, grid))
gridset = {'data': cube, 'metadata': {'missing_value': 255.,
'valid_data_range': (0., 100.)}}
mocked_concentration_gridset_by_filelist.return_value = gridset
expected = np.array([[255, 20],
[30, 40]])
actual = getter._concentration_average_gridset_from_daily_filelist(['file_list'])
npt.assert_array_equal(expected, actual['data'])
npt.assert_array_equal(expected.data, actual['data'].data)
@patch('seaice.data.getter._concentration_gridset_by_filelist')
def test_retains_missing_values(self, mocked_concentration_gridset_by_filelist):
grid = np.array([[255, 20],
[30, 40]])
grid2 = np.array([[255., 20.],
[30, 40]])
cube = np.ma.dstack((grid, grid2, grid))
gridset = {'data': cube, 'metadata': {'missing_value': 255.,
'valid_data_range': (0., 100.)}}
mocked_concentration_gridset_by_filelist.return_value = gridset
expected = np.array([[255, 20],
[30, 40]])
actual = getter._concentration_average_gridset_from_daily_filelist(['file_list'])
npt.assert_array_equal(expected, actual['data'])
npt.assert_array_equal(expected.data, actual['data'].data)
class Test_double_weight_smmr_files(unittest.TestCase):
def test_does_not_affect_non_n07(self):
paths = ['anyroot/nt_198101_f17_v1.1_n.bin', 'anyroot/nt_198201_f17_v1.1_n.bin']
actual = getter.double_weight_smmr_files(paths)
expected = paths
self.assertEqual(actual, expected)
def test_adds_repeat_of_n07_files(self):
paths = ['anyroot/nt_198101_n07_v1.1_n.bin', 'anyroot/nt_198201_f17_v1.1_n.bin']
actual = getter.double_weight_smmr_files(paths)
expected = ['anyroot/nt_198101_n07_v1.1_n.bin'] + paths
self.assertEqual(actual, expected)
class Test_empty_gridset(unittest.TestCase):
def test_empty_grid_daily(self):
shape = (127, 523)
actual_grid = getter.empty_gridset(shape, 'D')
assert_equals(shape, actual_grid['data'].shape)
assert_equals(actual_grid['metadata']['empty_gridset'], True)
assert_true(np.all(actual_grid['data'] == 255.))
self.assertEqual(actual_grid['metadata']['temporality'], 'D')
def test_empty_grid_monthly(self):
shape = (127, 523)
actual_grid = getter.empty_gridset(shape, 'M')
assert_equals(shape, actual_grid['data'].shape)
assert_equals(actual_grid['metadata']['empty_gridset'], True)
assert_true(np.all(actual_grid['data'] == 255.))
self.assertEqual(actual_grid['metadata']['temporality'], 'M')
class Test__extent_median(unittest.TestCase):
def test_counts_ice_when_ice_fifty_percent_of_time(self):
grid1 = np.array([[OCEAN, OCEAN],
[OCEAN, ICE]])
grid2 = np.array([[OCEAN, OCEAN],
[ICE, ICE]])
data = np.dstack([grid1, grid2])
actual = getter._extent_median(data)
expected = np.array([[OCEAN, OCEAN],
[ICE, ICE]])
npt.assert_array_equal(actual, expected)
def test_always_land_or_missing_becomes_land(self):
grid1 = np.array([[OCEAN, LAND],
[OCEAN, OCEAN]])
grid2 = np.array([[OCEAN, MISSING],
[OCEAN, OCEAN]])
data = np.dstack([grid1, grid2])
actual = getter._extent_median(data)
expected = np.array([[OCEAN, LAND],
[OCEAN, OCEAN]])
npt.assert_array_equal(actual, expected)
def test_always_missing_becomes_land(self):
grid1 = np.array([[OCEAN, MISSING],
[OCEAN, OCEAN]])
grid2 = np.array([[OCEAN, MISSING],
[OCEAN, OCEAN]])
data = np.dstack([grid1, grid2])
actual = getter._extent_median(data)
expected = np.array([[OCEAN, LAND],
[OCEAN, OCEAN]])
npt.assert_array_equal(actual, expected)
class Test__flag_layer_from_cube(unittest.TestCase):
anything = 123.528
ignored = 825.321
def test_with_single_layer(self):
grid1 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
flag_cube = np.ma.dstack([grid1])
actual = getter.flag_layer_from_cube(flag_cube)
expected = np.ma.array([[251, self.ignored],
[self.ignored, self.ignored]],
mask=[[False, True],
[True, True]])
npt.assert_array_equal(expected, actual)
npt.assert_array_equal(expected.mask, actual.mask)
def test_with_single_layer_from_2d_gridset(self):
grid1 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
flag_cube = np.ma.dstack([grid1])
flag_cube = np.ma.squeeze(flag_cube)
actual = getter.flag_layer_from_cube(flag_cube)
expected = np.ma.array([[251, self.ignored],
[self.ignored, self.ignored]],
mask=[[False, True],
[True, True]])
npt.assert_array_equal(expected, actual)
npt.assert_array_equal(expected.mask, actual.mask)
def test_with_multiple_layers_same_flags(self):
grid1 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
grid2 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
flag_cube = np.ma.dstack([grid1, grid2])
actual = getter.flag_layer_from_cube(flag_cube)
expected = np.ma.array([[251, self.ignored],
[self.ignored, self.ignored]],
mask=[[False, True],
[True, True]])
npt.assert_array_equal(expected, actual)
npt.assert_array_equal(expected.mask, actual.mask)
def test_multiple_layers_with_flag_and_missing(self):
grid1 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
grid2 = np.ma.array([[255, self.anything],
[self.anything, self.anything]],
mask=[[True, True],
[True, True]])
flag_cube = np.ma.dstack([grid1, grid2])
actual = getter.flag_layer_from_cube(flag_cube)
# When a value that was flagged, gets a missing value in a different
# layer we know that we have a shrinking pole hole or some other
# magic. The nsidc0081 processing applies a standard mask for pole and
# for land/coast/ocean. Therefore we don't need to worry about the case
# where a pole value goes missing in one layer, but is pole in all
# other layers.
expected = np.ma.array([[self.ignored, self.ignored],
[self.ignored, self.ignored]],
mask=[[True, True],
[True, True]])
npt.assert_array_equal(expected.mask, actual.mask)
def test_shrinking_pole_hole(self):
grid1 = np.ma.array([[251, 251],
[self.anything, self.anything]],
mask=[[False, False],
[True, True]])
grid2 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
flag_cube = np.ma.dstack([grid1, grid2])
actual = getter.flag_layer_from_cube(flag_cube)
expected = np.ma.array([[251, self.ignored],
[self.ignored, self.ignored]],
mask=[[False, True],
[True, True]])
npt.assert_array_equal(expected, actual)
npt.assert_array_equal(expected.mask, actual.mask)
def test_shrinking_pole_hole_flagged_then_missing_then_data_returns_data(self):
grid1 = np.ma.array([[251, 251],
[self.anything, self.anything]],
mask=[[False, False],
[True, True]])
grid2 = np.ma.array([[251, 255],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
grid3 = np.ma.array([[251, 87],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
flag_cube = np.ma.dstack([grid1, grid2, grid3])
actual = getter.flag_layer_from_cube(flag_cube)
expected = np.ma.array([[251, 87 + self.ignored],
[self.ignored, self.ignored]],
mask=[[False, True],
[True, True]])
npt.assert_array_equal(expected, actual)
npt.assert_array_equal(expected.mask, actual.mask)
def test_with_differing_flag_values(self):
grid1 = np.ma.array([[251, 251],
[self.anything, self.anything]],
mask=[[False, False],
[True, True]])
grid2 = np.ma.array([[251, 252],
[self.anything, self.anything]],
mask=[[False, False],
[True, True]])
flag_cube = np.ma.dstack([grid1, grid2])
actual = getter.flag_layer_from_cube(flag_cube)
expected = np.ma.array([[251, self.ignored],
[self.ignored, self.ignored]],
mask=[[False, True],
[True, True]])
npt.assert_array_equal(expected, actual)
npt.assert_array_equal(expected.mask, actual.mask)
def test_ignores_layer_of_all_missing(self):
grid1 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
grid2 = np.ma.array([[255, 255],
[255, 255]],
mask=[[False, False],
[False, False]])
flag_cube = np.ma.dstack([grid1, grid2])
actual = getter.flag_layer_from_cube(flag_cube, missing_value=255)
expected = np.ma.array([[251, self.ignored],
[self.ignored, self.ignored]],
mask=[[False, True],
[True, True]])
npt.assert_array_equal(expected.mask, actual.mask)
def test_ignores_layer_of_all_missing_when_first(self):
grid1 = np.ma.array([[255, 255],
[255, 255]],
mask=[[False, False],
[False, False]])
grid2 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
flag_cube = np.ma.dstack([grid1, grid2])
actual = getter.flag_layer_from_cube(flag_cube, missing_value=255)
expected = np.ma.array([[251, self.ignored],
[self.ignored, self.ignored]],
mask=[[False, True],
[True, True]])
npt.assert_array_equal(expected.mask, actual.mask)
def test_multiple_layers_with_flag_and_missing_and_one_missing_layer(self):
grid1 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
grid2 = np.ma.array([[255, self.anything],
[self.anything, self.anything]],
mask=[[True, True],
[True, True]])
grid3 = np.ma.array([[255, 255],
[255, 255]],
mask=[[False, False],
[False, False]])
flag_cube = np.ma.dstack([grid1, grid2, grid3])
actual = getter.flag_layer_from_cube(flag_cube, missing_value=255)
# When a value that was flagged, gets a missing value in a different
# layer we know that we have a shrinking pole hole or some other
# magic. The nsidc0081 processing applies a standard mask for pole and
# for land/coast/ocean. Therefore we don't need to worry about the case
# where a pole value goes missing in one layer, but is pole in all
# other layers.
expected = np.ma.array([[self.ignored, self.ignored],
[self.ignored, self.ignored]],
mask=[[True, True],
[True, True]])
npt.assert_array_equal(expected.mask, actual.mask)
def test_with_multiple_layers_same_flags_and_one_missing_layer(self):
grid1 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
grid2 = np.ma.array([[251, self.anything],
[self.anything, self.anything]],
mask=[[False, True],
[True, True]])
grid3 = np.ma.array([[255, 255],
[255, 255]],
mask=[[False, False],
[False, False]])
flag_cube = np.ma.dstack([grid1, grid2, grid3])
actual = getter.flag_layer_from_cube(flag_cube, missing_value=255)
expected = np.ma.array([[251, self.ignored],
[self.ignored, self.ignored]],
mask=[[False, True],
[True, True]])
npt.assert_array_equal(expected, actual)
npt.assert_array_equal(expected.mask, actual.mask)
class Test__rows_columns_from_goddard_nasateam_header(unittest.TestCase):
def test_rows_columns_from_file(self):
expected = (448, 304)
with open(NORTH_DAILY_FILE, 'rb') as fp:
header = fp.read(nt.NASATEAM_HEADER_LENGTH)
actual = getter._rows_columns_from_goddard_nasateam_header(header)
assert_equals(expected, actual)
class Test__scale_valid_data(unittest.TestCase):
def test_scales_data(self):
z = np.array([1., 2., 3., 4.])
expected = np.array([1., .2, .3, 4.])
actual = getter._scale_valid_data(z, (2, 3), 10)
npt.assert_array_equal(expected, actual)
class Test_concentration_monthly_over_years(unittest.TestCase):
monthly_stub = {'data': np.zeros(nt.NORTH['shape']),
'metadata': {'files': [],
'valid_data_range': (0, 100),
'flags': {},
'missing_value': None,
'period_index': pd.PeriodIndex([], freq='M')}}
@patch('seaice.data.getter.concentration_monthly')
def test_monthly_over_years_calls_monthly_once_per_year(self, mock_monthly):
hemi = nt.NORTH
start_year = 1981
end_year = 1983
month = 1
mock_monthly.return_value = self.monthly_stub
getter.concentration_monthly_over_years(hemi, start_year, end_year, month,
search_paths=TEST_DATA)
for year in [1981, 1982, 1983]:
getter.concentration_monthly.assert_any_call(nt.NORTH, year, 1, TEST_DATA,
nt.MINIMUM_DAYS_FOR_VALID_MONTH)
@patch('seaice.data.getter.concentration_monthly')
def test_monthly_over_years_data(self, mock_monthly):
hemi = nt.NORTH
start_year = 1981
end_year = 1983
month = 1
mock_monthly.return_value = self.monthly_stub
actual = getter.concentration_monthly_over_years(hemi, start_year, end_year, month,
search_paths=TEST_DATA)
rows, cols = nt.NORTH['shape']
expected = (rows, cols, 3)
assert_equals(expected, actual['data'].shape)
@patch('seaice.data.getter.concentration_monthly')
def test_monthly_over_years_metadata(self, mock_monthly):
hemi = nt.NORTH
start_year = 1981
end_year = 1983
month = 1
file_1981 = ['anyroot/nt_198101_f17_v1.1_n.bin']
file_1982 = ['anyroot/nt_198201_f17_v1.1_n.bin']
file_1983 = ['anyroot/nt_198301_f17_v1.1_n.bin']
monthly_1981 = copy.deepcopy(self.monthly_stub)
monthly_1981['metadata']['files'] = file_1981
monthly_1982 = copy.deepcopy(self.monthly_stub)
monthly_1982['metadata']['files'] = file_1982
monthly_1983 = copy.deepcopy(self.monthly_stub)
monthly_1983['metadata']['files'] = file_1983
getter.concentration_monthly.side_effect = [monthly_1981, monthly_1982, monthly_1983]
actual = getter.concentration_monthly_over_years(hemi, start_year, end_year, month,
search_paths=TEST_DATA)
expected = {'flags': {},
'missing_value': None,
'valid_data_range': (0, 100),
'period_index': pd.PeriodIndex([], freq='M'),
'files': [file_1981, file_1982, file_1983]}
pdt.assert_index_equal(expected.pop('period_index'),
actual['metadata'].pop('period_index'))
assert_equals(expected, actual['metadata'])
@patch('seaice.data.getter.concentration_monthly')
def test_monthly_over_years_metadata_with_one_month_using_average(self, mock_monthly):
hemi = nt.NORTH
start_year = 1981
end_year = 1983
month = 1
file_1981 = ['anyroot/nt_198101_f17_v1.1_n.bin']
file_1982 = ['anyroot/nt_198201_f17_v1.1_n.bin']
file_1983 = ['anyroot/nt_198301{d:02}_f17_v1.1_n.bin'.format(d=d) for d in range(1, 32)]
monthly_1981 = copy.deepcopy(self.monthly_stub)
monthly_1981['metadata']['files'] = file_1981
monthly_1982 = copy.deepcopy(self.monthly_stub)
monthly_1982['metadata']['files'] = file_1982
monthly_1983 = copy.deepcopy(self.monthly_stub)
monthly_1983['metadata']['files'] = file_1983
getter.concentration_monthly.side_effect = [monthly_1981, monthly_1982, monthly_1983]
actual = getter.concentration_monthly_over_years(hemi, start_year, end_year, month,
search_paths=TEST_DATA)
expected = {
'flags': {},
'missing_value': None,
'valid_data_range': (0, 100),
'files': [file_1981, file_1982, file_1983],
'period_index': pd.PeriodIndex([], freq='M')}
pdt.assert_index_equal(expected.pop('period_index'),
actual['metadata'].pop('period_index'))
assert_equals(expected, actual['metadata'])
| 38.547937
| 99
| 0.561774
| 6,585
| 60,713
| 4.856796
| 0.064692
| 0.062973
| 0.026265
| 0.025608
| 0.833219
| 0.797605
| 0.747858
| 0.718248
| 0.678882
| 0.662091
| 0
| 0.054522
| 0.328743
| 60,713
| 1,574
| 100
| 38.572427
| 0.730235
| 0.017904
| 0
| 0.621149
| 0
| 0
| 0.113083
| 0.067229
| 0
| 0
| 0
| 0
| 0.082431
| 1
| 0.057452
| false
| 0.000833
| 0.014988
| 0
| 0.089092
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c92c81c4b4dafd542865d556e13f32b0ab5cfebe
| 37,312
|
py
|
Python
|
src/datadog_api_client/v1/models/__init__.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/models/__init__.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/models/__init__.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from datadog_api_client.v1.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from datadog_api_client.v1.model.api_error_response import APIErrorResponse
from datadog_api_client.v1.model.aws_account import AWSAccount
from datadog_api_client.v1.model.aws_account_and_lambda_request import AWSAccountAndLambdaRequest
from datadog_api_client.v1.model.aws_account_create_response import AWSAccountCreateResponse
from datadog_api_client.v1.model.aws_account_list_response import AWSAccountListResponse
from datadog_api_client.v1.model.aws_logs_async_response import AWSLogsAsyncResponse
from datadog_api_client.v1.model.aws_logs_async_response_errors import AWSLogsAsyncResponseErrors
from datadog_api_client.v1.model.aws_logs_list_response import AWSLogsListResponse
from datadog_api_client.v1.model.aws_logs_list_response_lambdas import AWSLogsListResponseLambdas
from datadog_api_client.v1.model.aws_logs_list_services_response import AWSLogsListServicesResponse
from datadog_api_client.v1.model.aws_logs_services_request import AWSLogsServicesRequest
from datadog_api_client.v1.model.access_role import AccessRole
from datadog_api_client.v1.model.alert_graph_widget_definition import AlertGraphWidgetDefinition
from datadog_api_client.v1.model.alert_graph_widget_definition_type import AlertGraphWidgetDefinitionType
from datadog_api_client.v1.model.alert_value_widget_definition import AlertValueWidgetDefinition
from datadog_api_client.v1.model.alert_value_widget_definition_type import AlertValueWidgetDefinitionType
from datadog_api_client.v1.model.api_key import ApiKey
from datadog_api_client.v1.model.api_key_list_response import ApiKeyListResponse
from datadog_api_client.v1.model.api_key_response import ApiKeyResponse
from datadog_api_client.v1.model.apm_stats_query_column_type import ApmStatsQueryColumnType
from datadog_api_client.v1.model.apm_stats_query_definition import ApmStatsQueryDefinition
from datadog_api_client.v1.model.apm_stats_query_row_type import ApmStatsQueryRowType
from datadog_api_client.v1.model.application_key import ApplicationKey
from datadog_api_client.v1.model.application_key_list_response import ApplicationKeyListResponse
from datadog_api_client.v1.model.application_key_response import ApplicationKeyResponse
from datadog_api_client.v1.model.authentication_validation_response import AuthenticationValidationResponse
from datadog_api_client.v1.model.azure_account import AzureAccount
from datadog_api_client.v1.model.azure_account_list_response import AzureAccountListResponse
from datadog_api_client.v1.model.cancel_downtimes_by_scope_request import CancelDowntimesByScopeRequest
from datadog_api_client.v1.model.canceled_downtimes_ids import CanceledDowntimesIds
from datadog_api_client.v1.model.change_widget_definition import ChangeWidgetDefinition
from datadog_api_client.v1.model.change_widget_definition_type import ChangeWidgetDefinitionType
from datadog_api_client.v1.model.change_widget_request import ChangeWidgetRequest
from datadog_api_client.v1.model.check_can_delete_monitor_response import CheckCanDeleteMonitorResponse
from datadog_api_client.v1.model.check_can_delete_monitor_response_data import CheckCanDeleteMonitorResponseData
from datadog_api_client.v1.model.check_can_delete_slo_response import CheckCanDeleteSLOResponse
from datadog_api_client.v1.model.check_can_delete_slo_response_data import CheckCanDeleteSLOResponseData
from datadog_api_client.v1.model.check_status_widget_definition import CheckStatusWidgetDefinition
from datadog_api_client.v1.model.check_status_widget_definition_type import CheckStatusWidgetDefinitionType
from datadog_api_client.v1.model.creator import Creator
from datadog_api_client.v1.model.dashboard import Dashboard
from datadog_api_client.v1.model.dashboard_delete_response import DashboardDeleteResponse
from datadog_api_client.v1.model.dashboard_layout_type import DashboardLayoutType
from datadog_api_client.v1.model.dashboard_list import DashboardList
from datadog_api_client.v1.model.dashboard_list_delete_response import DashboardListDeleteResponse
from datadog_api_client.v1.model.dashboard_list_list_response import DashboardListListResponse
from datadog_api_client.v1.model.dashboard_summary import DashboardSummary
from datadog_api_client.v1.model.dashboard_summary_dashboards import DashboardSummaryDashboards
from datadog_api_client.v1.model.dashboard_template_variable_preset import DashboardTemplateVariablePreset
from datadog_api_client.v1.model.dashboard_template_variable_preset_value import DashboardTemplateVariablePresetValue
from datadog_api_client.v1.model.dashboard_template_variables import DashboardTemplateVariables
from datadog_api_client.v1.model.deleted_monitor import DeletedMonitor
from datadog_api_client.v1.model.distribution_widget_definition import DistributionWidgetDefinition
from datadog_api_client.v1.model.distribution_widget_definition_type import DistributionWidgetDefinitionType
from datadog_api_client.v1.model.distribution_widget_request import DistributionWidgetRequest
from datadog_api_client.v1.model.downtime import Downtime
from datadog_api_client.v1.model.downtime_recurrence import DowntimeRecurrence
from datadog_api_client.v1.model.event import Event
from datadog_api_client.v1.model.event_alert_type import EventAlertType
from datadog_api_client.v1.model.event_list_response import EventListResponse
from datadog_api_client.v1.model.event_priority import EventPriority
from datadog_api_client.v1.model.event_query_definition import EventQueryDefinition
from datadog_api_client.v1.model.event_response import EventResponse
from datadog_api_client.v1.model.event_stream_widget_definition import EventStreamWidgetDefinition
from datadog_api_client.v1.model.event_stream_widget_definition_type import EventStreamWidgetDefinitionType
from datadog_api_client.v1.model.event_timeline_widget_definition import EventTimelineWidgetDefinition
from datadog_api_client.v1.model.event_timeline_widget_definition_type import EventTimelineWidgetDefinitionType
from datadog_api_client.v1.model.free_text_widget_definition import FreeTextWidgetDefinition
from datadog_api_client.v1.model.free_text_widget_definition_type import FreeTextWidgetDefinitionType
from datadog_api_client.v1.model.gcp_account import GCPAccount
from datadog_api_client.v1.model.gcp_account_list_response import GCPAccountListResponse
from datadog_api_client.v1.model.graph_snapshot import GraphSnapshot
from datadog_api_client.v1.model.group_widget_definition import GroupWidgetDefinition
from datadog_api_client.v1.model.group_widget_definition_type import GroupWidgetDefinitionType
from datadog_api_client.v1.model.http_method import HTTPMethod
from datadog_api_client.v1.model.heat_map_widget_definition import HeatMapWidgetDefinition
from datadog_api_client.v1.model.heat_map_widget_definition_type import HeatMapWidgetDefinitionType
from datadog_api_client.v1.model.heat_map_widget_request import HeatMapWidgetRequest
from datadog_api_client.v1.model.host import Host
from datadog_api_client.v1.model.host_list_response import HostListResponse
from datadog_api_client.v1.model.host_map_request import HostMapRequest
from datadog_api_client.v1.model.host_map_widget_definition import HostMapWidgetDefinition
from datadog_api_client.v1.model.host_map_widget_definition_requests import HostMapWidgetDefinitionRequests
from datadog_api_client.v1.model.host_map_widget_definition_style import HostMapWidgetDefinitionStyle
from datadog_api_client.v1.model.host_map_widget_definition_type import HostMapWidgetDefinitionType
from datadog_api_client.v1.model.host_meta import HostMeta
from datadog_api_client.v1.model.host_metrics import HostMetrics
from datadog_api_client.v1.model.host_mute_response import HostMuteResponse
from datadog_api_client.v1.model.host_mute_settings import HostMuteSettings
from datadog_api_client.v1.model.host_tags import HostTags
from datadog_api_client.v1.model.host_totals import HostTotals
from datadog_api_client.v1.model.i_frame_widget_definition import IFrameWidgetDefinition
from datadog_api_client.v1.model.i_frame_widget_definition_type import IFrameWidgetDefinitionType
from datadog_api_client.v1.model.ip_prefixes_api import IPPrefixesAPI
from datadog_api_client.v1.model.ip_prefixes_apm import IPPrefixesAPM
from datadog_api_client.v1.model.ip_prefixes_agents import IPPrefixesAgents
from datadog_api_client.v1.model.ip_prefixes_logs import IPPrefixesLogs
from datadog_api_client.v1.model.ip_prefixes_process import IPPrefixesProcess
from datadog_api_client.v1.model.ip_prefixes_synthetics import IPPrefixesSynthetics
from datadog_api_client.v1.model.ip_prefixes_webhooks import IPPrefixesWebhooks
from datadog_api_client.v1.model.ip_ranges import IPRanges
from datadog_api_client.v1.model.idp_form_data import IdpFormData
from datadog_api_client.v1.model.idp_response import IdpResponse
from datadog_api_client.v1.model.image_widget_definition import ImageWidgetDefinition
from datadog_api_client.v1.model.image_widget_definition_type import ImageWidgetDefinitionType
from datadog_api_client.v1.model.log import Log
from datadog_api_client.v1.model.log_content import LogContent
from datadog_api_client.v1.model.log_query_definition import LogQueryDefinition
from datadog_api_client.v1.model.log_query_definition_group_by import LogQueryDefinitionGroupBy
from datadog_api_client.v1.model.log_query_definition_search import LogQueryDefinitionSearch
from datadog_api_client.v1.model.log_query_definition_sort import LogQueryDefinitionSort
from datadog_api_client.v1.model.log_stream_widget_definition import LogStreamWidgetDefinition
from datadog_api_client.v1.model.log_stream_widget_definition_type import LogStreamWidgetDefinitionType
from datadog_api_client.v1.model.logs_api_error import LogsAPIError
from datadog_api_client.v1.model.logs_api_error_response import LogsAPIErrorResponse
from datadog_api_client.v1.model.logs_arithmetic_processor import LogsArithmeticProcessor
from datadog_api_client.v1.model.logs_arithmetic_processor_type import LogsArithmeticProcessorType
from datadog_api_client.v1.model.logs_attribute_remapper import LogsAttributeRemapper
from datadog_api_client.v1.model.logs_attribute_remapper_type import LogsAttributeRemapperType
from datadog_api_client.v1.model.logs_category_processor import LogsCategoryProcessor
from datadog_api_client.v1.model.logs_category_processor_categories import LogsCategoryProcessorCategories
from datadog_api_client.v1.model.logs_category_processor_type import LogsCategoryProcessorType
from datadog_api_client.v1.model.logs_date_remapper import LogsDateRemapper
from datadog_api_client.v1.model.logs_date_remapper_type import LogsDateRemapperType
from datadog_api_client.v1.model.logs_exclusion import LogsExclusion
from datadog_api_client.v1.model.logs_exclusion_filter import LogsExclusionFilter
from datadog_api_client.v1.model.logs_filter import LogsFilter
from datadog_api_client.v1.model.logs_geo_ip_parser import LogsGeoIPParser
from datadog_api_client.v1.model.logs_geo_ip_parser_type import LogsGeoIPParserType
from datadog_api_client.v1.model.logs_grok_parser import LogsGrokParser
from datadog_api_client.v1.model.logs_grok_parser_rules import LogsGrokParserRules
from datadog_api_client.v1.model.logs_grok_parser_type import LogsGrokParserType
from datadog_api_client.v1.model.logs_index import LogsIndex
from datadog_api_client.v1.model.logs_index_list_response import LogsIndexListResponse
from datadog_api_client.v1.model.logs_indexes_order import LogsIndexesOrder
from datadog_api_client.v1.model.logs_list_request import LogsListRequest
from datadog_api_client.v1.model.logs_list_request_time import LogsListRequestTime
from datadog_api_client.v1.model.logs_list_response import LogsListResponse
from datadog_api_client.v1.model.logs_lookup_processor import LogsLookupProcessor
from datadog_api_client.v1.model.logs_lookup_processor_type import LogsLookupProcessorType
from datadog_api_client.v1.model.logs_message_remapper import LogsMessageRemapper
from datadog_api_client.v1.model.logs_message_remapper_type import LogsMessageRemapperType
from datadog_api_client.v1.model.logs_pipeline import LogsPipeline
from datadog_api_client.v1.model.logs_pipeline_list import LogsPipelineList
from datadog_api_client.v1.model.logs_pipeline_processor import LogsPipelineProcessor
from datadog_api_client.v1.model.logs_pipeline_processor_type import LogsPipelineProcessorType
from datadog_api_client.v1.model.logs_pipelines_order import LogsPipelinesOrder
from datadog_api_client.v1.model.logs_processor import LogsProcessor
from datadog_api_client.v1.model.logs_query_compute import LogsQueryCompute
from datadog_api_client.v1.model.logs_service_remapper import LogsServiceRemapper
from datadog_api_client.v1.model.logs_service_remapper_type import LogsServiceRemapperType
from datadog_api_client.v1.model.logs_sort import LogsSort
from datadog_api_client.v1.model.logs_status_remapper import LogsStatusRemapper
from datadog_api_client.v1.model.logs_status_remapper_type import LogsStatusRemapperType
from datadog_api_client.v1.model.logs_string_builder_processor import LogsStringBuilderProcessor
from datadog_api_client.v1.model.logs_string_builder_processor_type import LogsStringBuilderProcessorType
from datadog_api_client.v1.model.logs_trace_remapper import LogsTraceRemapper
from datadog_api_client.v1.model.logs_trace_remapper_type import LogsTraceRemapperType
from datadog_api_client.v1.model.logs_url_parser import LogsURLParser
from datadog_api_client.v1.model.logs_url_parser_type import LogsURLParserType
from datadog_api_client.v1.model.logs_user_agent_parser import LogsUserAgentParser
from datadog_api_client.v1.model.logs_user_agent_parser_type import LogsUserAgentParserType
from datadog_api_client.v1.model.metric_metadata import MetricMetadata
from datadog_api_client.v1.model.metric_search_response import MetricSearchResponse
from datadog_api_client.v1.model.metric_search_response_results import MetricSearchResponseResults
from datadog_api_client.v1.model.metrics_list_response import MetricsListResponse
from datadog_api_client.v1.model.metrics_query_response import MetricsQueryResponse
from datadog_api_client.v1.model.metrics_query_response_series import MetricsQueryResponseSeries
from datadog_api_client.v1.model.metrics_query_response_unit import MetricsQueryResponseUnit
from datadog_api_client.v1.model.monitor import Monitor
from datadog_api_client.v1.model.monitor_device_id import MonitorDeviceID
from datadog_api_client.v1.model.monitor_options import MonitorOptions
from datadog_api_client.v1.model.monitor_options_aggregation import MonitorOptionsAggregation
from datadog_api_client.v1.model.monitor_overall_states import MonitorOverallStates
from datadog_api_client.v1.model.monitor_state import MonitorState
from datadog_api_client.v1.model.monitor_state_group import MonitorStateGroup
from datadog_api_client.v1.model.monitor_summary_widget_definition import MonitorSummaryWidgetDefinition
from datadog_api_client.v1.model.monitor_summary_widget_definition_type import MonitorSummaryWidgetDefinitionType
from datadog_api_client.v1.model.monitor_threshold_window_options import MonitorThresholdWindowOptions
from datadog_api_client.v1.model.monitor_thresholds import MonitorThresholds
from datadog_api_client.v1.model.monitor_type import MonitorType
from datadog_api_client.v1.model.monitor_update_request import MonitorUpdateRequest
from datadog_api_client.v1.model.note_widget_definition import NoteWidgetDefinition
from datadog_api_client.v1.model.note_widget_definition_type import NoteWidgetDefinitionType
from datadog_api_client.v1.model.organization import Organization
from datadog_api_client.v1.model.organization_billing import OrganizationBilling
from datadog_api_client.v1.model.organization_create_body import OrganizationCreateBody
from datadog_api_client.v1.model.organization_create_response import OrganizationCreateResponse
from datadog_api_client.v1.model.organization_list_response import OrganizationListResponse
from datadog_api_client.v1.model.organization_response import OrganizationResponse
from datadog_api_client.v1.model.organization_settings import OrganizationSettings
from datadog_api_client.v1.model.organization_settings_saml import OrganizationSettingsSaml
from datadog_api_client.v1.model.organization_settings_saml_autocreate_users_domains import OrganizationSettingsSamlAutocreateUsersDomains
from datadog_api_client.v1.model.organization_settings_saml_idp_initiated_login import OrganizationSettingsSamlIdpInitiatedLogin
from datadog_api_client.v1.model.organization_settings_saml_strict_mode import OrganizationSettingsSamlStrictMode
from datadog_api_client.v1.model.organization_subscription import OrganizationSubscription
from datadog_api_client.v1.model.pager_duty_service import PagerDutyService
from datadog_api_client.v1.model.pager_duty_service_key import PagerDutyServiceKey
from datadog_api_client.v1.model.pager_duty_service_name import PagerDutyServiceName
from datadog_api_client.v1.model.point import Point
from datadog_api_client.v1.model.process_query_definition import ProcessQueryDefinition
from datadog_api_client.v1.model.query_value_widget_definition import QueryValueWidgetDefinition
from datadog_api_client.v1.model.query_value_widget_definition_type import QueryValueWidgetDefinitionType
from datadog_api_client.v1.model.query_value_widget_request import QueryValueWidgetRequest
from datadog_api_client.v1.model.slo_bulk_delete import SLOBulkDelete
from datadog_api_client.v1.model.slo_bulk_delete_response import SLOBulkDeleteResponse
from datadog_api_client.v1.model.slo_bulk_delete_response_data import SLOBulkDeleteResponseData
from datadog_api_client.v1.model.slo_bulk_delete_response_errors import SLOBulkDeleteResponseErrors
from datadog_api_client.v1.model.slo_delete_response import SLODeleteResponse
from datadog_api_client.v1.model.slo_error_timeframe import SLOErrorTimeframe
from datadog_api_client.v1.model.slo_history_metrics import SLOHistoryMetrics
from datadog_api_client.v1.model.slo_history_metrics_series import SLOHistoryMetricsSeries
from datadog_api_client.v1.model.slo_history_metrics_series_metadata import SLOHistoryMetricsSeriesMetadata
from datadog_api_client.v1.model.slo_history_response import SLOHistoryResponse
from datadog_api_client.v1.model.slo_history_response_data import SLOHistoryResponseData
from datadog_api_client.v1.model.slo_history_response_error import SLOHistoryResponseError
from datadog_api_client.v1.model.slo_history_sli_data import SLOHistorySLIData
from datadog_api_client.v1.model.slo_list_response import SLOListResponse
from datadog_api_client.v1.model.slo_response import SLOResponse
from datadog_api_client.v1.model.slo_threshold import SLOThreshold
from datadog_api_client.v1.model.slo_timeframe import SLOTimeframe
from datadog_api_client.v1.model.slo_type import SLOType
from datadog_api_client.v1.model.slo_type_numeric import SLOTypeNumeric
from datadog_api_client.v1.model.slo_widget_definition import SLOWidgetDefinition
from datadog_api_client.v1.model.slo_widget_definition_type import SLOWidgetDefinitionType
from datadog_api_client.v1.model.scatter_plot_request import ScatterPlotRequest
from datadog_api_client.v1.model.scatter_plot_widget_definition import ScatterPlotWidgetDefinition
from datadog_api_client.v1.model.scatter_plot_widget_definition_requests import ScatterPlotWidgetDefinitionRequests
from datadog_api_client.v1.model.scatter_plot_widget_definition_type import ScatterPlotWidgetDefinitionType
from datadog_api_client.v1.model.service_level_objective import ServiceLevelObjective
from datadog_api_client.v1.model.service_level_objective_query import ServiceLevelObjectiveQuery
from datadog_api_client.v1.model.service_level_objective_request import ServiceLevelObjectiveRequest
from datadog_api_client.v1.model.service_map_widget_definition import ServiceMapWidgetDefinition
from datadog_api_client.v1.model.service_map_widget_definition_type import ServiceMapWidgetDefinitionType
from datadog_api_client.v1.model.service_summary_widget_definition import ServiceSummaryWidgetDefinition
from datadog_api_client.v1.model.service_summary_widget_definition_type import ServiceSummaryWidgetDefinitionType
from datadog_api_client.v1.model.synthetics_api_test_result_data import SyntheticsAPITestResultData
from datadog_api_client.v1.model.synthetics_api_test_result_full import SyntheticsAPITestResultFull
from datadog_api_client.v1.model.synthetics_api_test_result_full_check import SyntheticsAPITestResultFullCheck
from datadog_api_client.v1.model.synthetics_api_test_result_short import SyntheticsAPITestResultShort
from datadog_api_client.v1.model.synthetics_api_test_result_short_result import SyntheticsAPITestResultShortResult
from datadog_api_client.v1.model.synthetics_assertion import SyntheticsAssertion
from datadog_api_client.v1.model.synthetics_assertion_json_path_operator import SyntheticsAssertionJSONPathOperator
from datadog_api_client.v1.model.synthetics_assertion_json_path_target import SyntheticsAssertionJSONPathTarget
from datadog_api_client.v1.model.synthetics_assertion_json_path_target_target import SyntheticsAssertionJSONPathTargetTarget
from datadog_api_client.v1.model.synthetics_assertion_operator import SyntheticsAssertionOperator
from datadog_api_client.v1.model.synthetics_assertion_target import SyntheticsAssertionTarget
from datadog_api_client.v1.model.synthetics_assertion_type import SyntheticsAssertionType
from datadog_api_client.v1.model.synthetics_basic_auth import SyntheticsBasicAuth
from datadog_api_client.v1.model.synthetics_browser_error import SyntheticsBrowserError
from datadog_api_client.v1.model.synthetics_browser_error_type import SyntheticsBrowserErrorType
from datadog_api_client.v1.model.synthetics_browser_test_result_data import SyntheticsBrowserTestResultData
from datadog_api_client.v1.model.synthetics_browser_test_result_full import SyntheticsBrowserTestResultFull
from datadog_api_client.v1.model.synthetics_browser_test_result_full_check import SyntheticsBrowserTestResultFullCheck
from datadog_api_client.v1.model.synthetics_browser_test_result_short import SyntheticsBrowserTestResultShort
from datadog_api_client.v1.model.synthetics_browser_test_result_short_result import SyntheticsBrowserTestResultShortResult
from datadog_api_client.v1.model.synthetics_browser_variable import SyntheticsBrowserVariable
from datadog_api_client.v1.model.synthetics_browser_variable_type import SyntheticsBrowserVariableType
from datadog_api_client.v1.model.synthetics_ci_test import SyntheticsCITest
from datadog_api_client.v1.model.synthetics_ci_test_body import SyntheticsCITestBody
from datadog_api_client.v1.model.synthetics_ci_test_metadata import SyntheticsCITestMetadata
from datadog_api_client.v1.model.synthetics_ci_test_metadata_ci import SyntheticsCITestMetadataCi
from datadog_api_client.v1.model.synthetics_ci_test_metadata_git import SyntheticsCITestMetadataGit
from datadog_api_client.v1.model.synthetics_check_type import SyntheticsCheckType
from datadog_api_client.v1.model.synthetics_delete_tests_payload import SyntheticsDeleteTestsPayload
from datadog_api_client.v1.model.synthetics_delete_tests_response import SyntheticsDeleteTestsResponse
from datadog_api_client.v1.model.synthetics_delete_tests_response_deleted_tests import SyntheticsDeleteTestsResponseDeletedTests
from datadog_api_client.v1.model.synthetics_device import SyntheticsDevice
from datadog_api_client.v1.model.synthetics_device_id import SyntheticsDeviceID
from datadog_api_client.v1.model.synthetics_error_code import SyntheticsErrorCode
from datadog_api_client.v1.model.synthetics_get_api_test_latest_results_response import SyntheticsGetAPITestLatestResultsResponse
from datadog_api_client.v1.model.synthetics_get_browser_test_latest_results_response import SyntheticsGetBrowserTestLatestResultsResponse
from datadog_api_client.v1.model.synthetics_global_variable import SyntheticsGlobalVariable
from datadog_api_client.v1.model.synthetics_global_variable_value import SyntheticsGlobalVariableValue
from datadog_api_client.v1.model.synthetics_list_tests_response import SyntheticsListTestsResponse
from datadog_api_client.v1.model.synthetics_location import SyntheticsLocation
from datadog_api_client.v1.model.synthetics_locations import SyntheticsLocations
from datadog_api_client.v1.model.synthetics_playing_tab import SyntheticsPlayingTab
from datadog_api_client.v1.model.synthetics_resource import SyntheticsResource
from datadog_api_client.v1.model.synthetics_resource_type import SyntheticsResourceType
from datadog_api_client.v1.model.synthetics_ssl_certificate import SyntheticsSSLCertificate
from datadog_api_client.v1.model.synthetics_ssl_certificate_issuer import SyntheticsSSLCertificateIssuer
from datadog_api_client.v1.model.synthetics_ssl_certificate_subject import SyntheticsSSLCertificateSubject
from datadog_api_client.v1.model.synthetics_step import SyntheticsStep
from datadog_api_client.v1.model.synthetics_step_detail import SyntheticsStepDetail
from datadog_api_client.v1.model.synthetics_step_detail_warnings import SyntheticsStepDetailWarnings
from datadog_api_client.v1.model.synthetics_step_type import SyntheticsStepType
from datadog_api_client.v1.model.synthetics_test_config import SyntheticsTestConfig
from datadog_api_client.v1.model.synthetics_test_details import SyntheticsTestDetails
from datadog_api_client.v1.model.synthetics_test_details_sub_type import SyntheticsTestDetailsSubType
from datadog_api_client.v1.model.synthetics_test_details_type import SyntheticsTestDetailsType
from datadog_api_client.v1.model.synthetics_test_headers import SyntheticsTestHeaders
from datadog_api_client.v1.model.synthetics_test_monitor_status import SyntheticsTestMonitorStatus
from datadog_api_client.v1.model.synthetics_test_options import SyntheticsTestOptions
from datadog_api_client.v1.model.synthetics_test_options_monitor_options import SyntheticsTestOptionsMonitorOptions
from datadog_api_client.v1.model.synthetics_test_options_retry import SyntheticsTestOptionsRetry
from datadog_api_client.v1.model.synthetics_test_pause_status import SyntheticsTestPauseStatus
from datadog_api_client.v1.model.synthetics_test_process_status import SyntheticsTestProcessStatus
from datadog_api_client.v1.model.synthetics_test_request import SyntheticsTestRequest
from datadog_api_client.v1.model.synthetics_test_request_certificate import SyntheticsTestRequestCertificate
from datadog_api_client.v1.model.synthetics_test_request_certificate_item import SyntheticsTestRequestCertificateItem
from datadog_api_client.v1.model.synthetics_tick_interval import SyntheticsTickInterval
from datadog_api_client.v1.model.synthetics_timing import SyntheticsTiming
from datadog_api_client.v1.model.synthetics_trigger_ci_tests_response import SyntheticsTriggerCITestsResponse
from datadog_api_client.v1.model.synthetics_trigger_ci_tests_response_locations import SyntheticsTriggerCITestsResponseLocations
from datadog_api_client.v1.model.synthetics_trigger_ci_tests_response_results import SyntheticsTriggerCITestsResponseResults
from datadog_api_client.v1.model.synthetics_update_test_pause_status_payload import SyntheticsUpdateTestPauseStatusPayload
from datadog_api_client.v1.model.synthetics_warning_type import SyntheticsWarningType
from datadog_api_client.v1.model.table_widget_cell_display_mode import TableWidgetCellDisplayMode
from datadog_api_client.v1.model.table_widget_definition import TableWidgetDefinition
from datadog_api_client.v1.model.table_widget_definition_type import TableWidgetDefinitionType
from datadog_api_client.v1.model.table_widget_has_search_bar import TableWidgetHasSearchBar
from datadog_api_client.v1.model.table_widget_request import TableWidgetRequest
from datadog_api_client.v1.model.tag_to_hosts import TagToHosts
from datadog_api_client.v1.model.target_format_type import TargetFormatType
from datadog_api_client.v1.model.timeseries_widget_definition import TimeseriesWidgetDefinition
from datadog_api_client.v1.model.timeseries_widget_definition_type import TimeseriesWidgetDefinitionType
from datadog_api_client.v1.model.timeseries_widget_request import TimeseriesWidgetRequest
from datadog_api_client.v1.model.timeseries_widget_request_metadata import TimeseriesWidgetRequestMetadata
from datadog_api_client.v1.model.toplist_widget_definition import ToplistWidgetDefinition
from datadog_api_client.v1.model.toplist_widget_definition_type import ToplistWidgetDefinitionType
from datadog_api_client.v1.model.toplist_widget_request import ToplistWidgetRequest
from datadog_api_client.v1.model.usage_analyzed_logs_hour import UsageAnalyzedLogsHour
from datadog_api_client.v1.model.usage_analyzed_logs_response import UsageAnalyzedLogsResponse
from datadog_api_client.v1.model.usage_billable_summary_body import UsageBillableSummaryBody
from datadog_api_client.v1.model.usage_billable_summary_hour import UsageBillableSummaryHour
from datadog_api_client.v1.model.usage_billable_summary_keys import UsageBillableSummaryKeys
from datadog_api_client.v1.model.usage_billable_summary_response import UsageBillableSummaryResponse
from datadog_api_client.v1.model.usage_custom_reports_attributes import UsageCustomReportsAttributes
from datadog_api_client.v1.model.usage_custom_reports_data import UsageCustomReportsData
from datadog_api_client.v1.model.usage_custom_reports_meta import UsageCustomReportsMeta
from datadog_api_client.v1.model.usage_custom_reports_page import UsageCustomReportsPage
from datadog_api_client.v1.model.usage_custom_reports_response import UsageCustomReportsResponse
from datadog_api_client.v1.model.usage_fargate_hour import UsageFargateHour
from datadog_api_client.v1.model.usage_fargate_response import UsageFargateResponse
from datadog_api_client.v1.model.usage_host_hour import UsageHostHour
from datadog_api_client.v1.model.usage_hosts_response import UsageHostsResponse
from datadog_api_client.v1.model.usage_lambda_hour import UsageLambdaHour
from datadog_api_client.v1.model.usage_lambda_response import UsageLambdaResponse
from datadog_api_client.v1.model.usage_logs_by_index_hour import UsageLogsByIndexHour
from datadog_api_client.v1.model.usage_logs_by_index_response import UsageLogsByIndexResponse
from datadog_api_client.v1.model.usage_logs_hour import UsageLogsHour
from datadog_api_client.v1.model.usage_logs_response import UsageLogsResponse
from datadog_api_client.v1.model.usage_metric_category import UsageMetricCategory
from datadog_api_client.v1.model.usage_network_flows_hour import UsageNetworkFlowsHour
from datadog_api_client.v1.model.usage_network_flows_response import UsageNetworkFlowsResponse
from datadog_api_client.v1.model.usage_network_hosts_hour import UsageNetworkHostsHour
from datadog_api_client.v1.model.usage_network_hosts_response import UsageNetworkHostsResponse
from datadog_api_client.v1.model.usage_profiling_hour import UsageProfilingHour
from datadog_api_client.v1.model.usage_profiling_response import UsageProfilingResponse
from datadog_api_client.v1.model.usage_reports_type import UsageReportsType
from datadog_api_client.v1.model.usage_rum_sessions_hour import UsageRumSessionsHour
from datadog_api_client.v1.model.usage_rum_sessions_response import UsageRumSessionsResponse
from datadog_api_client.v1.model.usage_snmp_hour import UsageSNMPHour
from datadog_api_client.v1.model.usage_snmp_response import UsageSNMPResponse
from datadog_api_client.v1.model.usage_sort import UsageSort
from datadog_api_client.v1.model.usage_sort_direction import UsageSortDirection
from datadog_api_client.v1.model.usage_specified_custom_reports_attributes import UsageSpecifiedCustomReportsAttributes
from datadog_api_client.v1.model.usage_specified_custom_reports_data import UsageSpecifiedCustomReportsData
from datadog_api_client.v1.model.usage_specified_custom_reports_meta import UsageSpecifiedCustomReportsMeta
from datadog_api_client.v1.model.usage_specified_custom_reports_page import UsageSpecifiedCustomReportsPage
from datadog_api_client.v1.model.usage_specified_custom_reports_response import UsageSpecifiedCustomReportsResponse
from datadog_api_client.v1.model.usage_summary_date import UsageSummaryDate
from datadog_api_client.v1.model.usage_summary_date_org import UsageSummaryDateOrg
from datadog_api_client.v1.model.usage_summary_response import UsageSummaryResponse
from datadog_api_client.v1.model.usage_synthetics_api_hour import UsageSyntheticsAPIHour
from datadog_api_client.v1.model.usage_synthetics_api_response import UsageSyntheticsAPIResponse
from datadog_api_client.v1.model.usage_synthetics_browser_hour import UsageSyntheticsBrowserHour
from datadog_api_client.v1.model.usage_synthetics_browser_response import UsageSyntheticsBrowserResponse
from datadog_api_client.v1.model.usage_synthetics_hour import UsageSyntheticsHour
from datadog_api_client.v1.model.usage_synthetics_response import UsageSyntheticsResponse
from datadog_api_client.v1.model.usage_timeseries_hour import UsageTimeseriesHour
from datadog_api_client.v1.model.usage_timeseries_response import UsageTimeseriesResponse
from datadog_api_client.v1.model.usage_top_avg_metrics_hour import UsageTopAvgMetricsHour
from datadog_api_client.v1.model.usage_top_avg_metrics_response import UsageTopAvgMetricsResponse
from datadog_api_client.v1.model.usage_trace_hour import UsageTraceHour
from datadog_api_client.v1.model.usage_trace_response import UsageTraceResponse
from datadog_api_client.v1.model.usage_tracing_without_limits_hour import UsageTracingWithoutLimitsHour
from datadog_api_client.v1.model.usage_tracing_without_limits_response import UsageTracingWithoutLimitsResponse
from datadog_api_client.v1.model.user import User
from datadog_api_client.v1.model.user_disable_response import UserDisableResponse
from datadog_api_client.v1.model.user_list_response import UserListResponse
from datadog_api_client.v1.model.user_response import UserResponse
from datadog_api_client.v1.model.widget import Widget
from datadog_api_client.v1.model.widget_aggregator import WidgetAggregator
from datadog_api_client.v1.model.widget_axis import WidgetAxis
from datadog_api_client.v1.model.widget_change_type import WidgetChangeType
from datadog_api_client.v1.model.widget_color_preference import WidgetColorPreference
from datadog_api_client.v1.model.widget_comparator import WidgetComparator
from datadog_api_client.v1.model.widget_compare_to import WidgetCompareTo
from datadog_api_client.v1.model.widget_conditional_format import WidgetConditionalFormat
from datadog_api_client.v1.model.widget_custom_link import WidgetCustomLink
from datadog_api_client.v1.model.widget_definition import WidgetDefinition
from datadog_api_client.v1.model.widget_display_type import WidgetDisplayType
from datadog_api_client.v1.model.widget_event import WidgetEvent
from datadog_api_client.v1.model.widget_event_size import WidgetEventSize
from datadog_api_client.v1.model.widget_field_sort import WidgetFieldSort
from datadog_api_client.v1.model.widget_grouping import WidgetGrouping
from datadog_api_client.v1.model.widget_image_sizing import WidgetImageSizing
from datadog_api_client.v1.model.widget_layout import WidgetLayout
from datadog_api_client.v1.model.widget_layout_type import WidgetLayoutType
from datadog_api_client.v1.model.widget_line_type import WidgetLineType
from datadog_api_client.v1.model.widget_line_width import WidgetLineWidth
from datadog_api_client.v1.model.widget_live_span import WidgetLiveSpan
from datadog_api_client.v1.model.widget_margin import WidgetMargin
from datadog_api_client.v1.model.widget_marker import WidgetMarker
from datadog_api_client.v1.model.widget_message_display import WidgetMessageDisplay
from datadog_api_client.v1.model.widget_monitor_summary_display_format import WidgetMonitorSummaryDisplayFormat
from datadog_api_client.v1.model.widget_monitor_summary_sort import WidgetMonitorSummarySort
from datadog_api_client.v1.model.widget_node_type import WidgetNodeType
from datadog_api_client.v1.model.widget_order_by import WidgetOrderBy
from datadog_api_client.v1.model.widget_palette import WidgetPalette
from datadog_api_client.v1.model.widget_request_style import WidgetRequestStyle
from datadog_api_client.v1.model.widget_service_summary_display_format import WidgetServiceSummaryDisplayFormat
from datadog_api_client.v1.model.widget_size_format import WidgetSizeFormat
from datadog_api_client.v1.model.widget_sort import WidgetSort
from datadog_api_client.v1.model.widget_style import WidgetStyle
from datadog_api_client.v1.model.widget_summary_type import WidgetSummaryType
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
from datadog_api_client.v1.model.widget_tick_edge import WidgetTickEdge
from datadog_api_client.v1.model.widget_time import WidgetTime
from datadog_api_client.v1.model.widget_time_windows import WidgetTimeWindows
from datadog_api_client.v1.model.widget_view_mode import WidgetViewMode
from datadog_api_client.v1.model.widget_viz_type import WidgetVizType
| 85.187215
| 138
| 0.917908
| 4,938
| 37,312
| 6.549818
| 0.148441
| 0.144544
| 0.183966
| 0.262808
| 0.56176
| 0.555638
| 0.544786
| 0.44328
| 0.323501
| 0.143153
| 0
| 0.012013
| 0.047357
| 37,312
| 437
| 139
| 85.382151
| 0.897904
| 0.010077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016509
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c9362a993263007a286902bee1e116a5815869c7
| 76
|
py
|
Python
|
mmdet/ops/ml_nms_rotated/__init__.py
|
JarvisUSTC/DARDet
|
debbf476e9750030db67f030a40cf8d4f03e46ee
|
[
"Apache-2.0"
] | 23
|
2021-09-22T14:05:49.000Z
|
2022-02-15T09:45:23.000Z
|
mmdet/ops/ml_nms_rotated/__init__.py
|
JarvisUSTC/DARDet
|
debbf476e9750030db67f030a40cf8d4f03e46ee
|
[
"Apache-2.0"
] | 13
|
2021-10-09T07:08:17.000Z
|
2022-01-06T05:53:45.000Z
|
mmdet/ops/ml_nms_rotated/__init__.py
|
JarvisUSTC/DARDet
|
debbf476e9750030db67f030a40cf8d4f03e46ee
|
[
"Apache-2.0"
] | 6
|
2021-11-15T03:16:51.000Z
|
2022-03-20T08:55:19.000Z
|
from .ml_nms_rotated_cuda import ml_nms_rotated
__all__=['ml_nms_rotated']
| 19
| 47
| 0.842105
| 13
| 76
| 4.076923
| 0.538462
| 0.283019
| 0.679245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 76
| 3
| 48
| 25.333333
| 0.757143
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c94bec588e643f434d089a3437ab91898b5dbff5
| 28,835
|
py
|
Python
|
generated/resources/loadbalancer_pool_heat.py
|
atsgen/tf-heat-plugin
|
5c0405eb93287368f60f7e227e5af5ada6bfeed2
|
[
"Apache-2.0"
] | 1
|
2020-04-05T19:43:40.000Z
|
2020-04-05T19:43:40.000Z
|
generated/resources/loadbalancer_pool_heat.py
|
atsgen/tf-heat-plugin
|
5c0405eb93287368f60f7e227e5af5ada6bfeed2
|
[
"Apache-2.0"
] | null | null | null |
generated/resources/loadbalancer_pool_heat.py
|
atsgen/tf-heat-plugin
|
5c0405eb93287368f60f7e227e5af5ada6bfeed2
|
[
"Apache-2.0"
] | 1
|
2020-08-25T12:47:27.000Z
|
2020-08-25T12:47:27.000Z
|
# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
from contrail_heat.resources import contrail
try:
from heat.common.i18n import _
except ImportError:
pass
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
try:
from heat.openstack.common import log as logging
except ImportError:
from oslo_log import log as logging
import uuid
from vnc_api import vnc_api
LOG = logging.getLogger(__name__)
class ContrailLoadbalancerPool(contrail.ContrailResource):
PROPERTIES = (
NAME, FQ_NAME, DISPLAY_NAME, LOADBALANCER_POOL_PROVIDER, LOADBALANCER_POOL_PROPERTIES, LOADBALANCER_POOL_PROPERTIES_STATUS, LOADBALANCER_POOL_PROPERTIES_STATUS_DESCRIPTION, LOADBALANCER_POOL_PROPERTIES_ADMIN_STATE, LOADBALANCER_POOL_PROPERTIES_PROTOCOL, LOADBALANCER_POOL_PROPERTIES_LOADBALANCER_METHOD, LOADBALANCER_POOL_PROPERTIES_SUBNET_ID, LOADBALANCER_POOL_PROPERTIES_SESSION_PERSISTENCE, LOADBALANCER_POOL_PROPERTIES_PERSISTENCE_COOKIE_NAME, LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR, LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_KEY, LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_VALUE, LOADBALANCER_LISTENER_REFS, LOADBALANCER_HEALTHMONITOR_REFS, SERVICE_INSTANCE_REFS, VIRTUAL_MACHINE_INTERFACE_REFS, SERVICE_APPLIANCE_SET_REFS, PROJECT
) = (
'name', 'fq_name', 'display_name', 'loadbalancer_pool_provider', 'loadbalancer_pool_properties', 'loadbalancer_pool_properties_status', 'loadbalancer_pool_properties_status_description', 'loadbalancer_pool_properties_admin_state', 'loadbalancer_pool_properties_protocol', 'loadbalancer_pool_properties_loadbalancer_method', 'loadbalancer_pool_properties_subnet_id', 'loadbalancer_pool_properties_session_persistence', 'loadbalancer_pool_properties_persistence_cookie_name', 'loadbalancer_pool_custom_attributes', 'loadbalancer_pool_custom_attributes_key_value_pair', 'loadbalancer_pool_custom_attributes_key_value_pair_key', 'loadbalancer_pool_custom_attributes_key_value_pair_value', 'loadbalancer_listener_refs', 'loadbalancer_healthmonitor_refs', 'service_instance_refs', 'virtual_machine_interface_refs', 'service_appliance_set_refs', 'project'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('NAME.'),
update_allowed=True,
required=False,
),
FQ_NAME: properties.Schema(
properties.Schema.STRING,
_('FQ_NAME.'),
update_allowed=True,
required=False,
),
DISPLAY_NAME: properties.Schema(
properties.Schema.STRING,
_('DISPLAY_NAME.'),
update_allowed=True,
required=False,
),
LOADBALANCER_POOL_PROVIDER: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_PROVIDER.'),
update_allowed=True,
required=False,
),
LOADBALANCER_POOL_PROPERTIES: properties.Schema(
properties.Schema.MAP,
_('LOADBALANCER_POOL_PROPERTIES.'),
update_allowed=True,
required=False,
schema={
LOADBALANCER_POOL_PROPERTIES_STATUS: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_PROPERTIES_STATUS.'),
update_allowed=True,
required=False,
),
LOADBALANCER_POOL_PROPERTIES_STATUS_DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_PROPERTIES_STATUS_DESCRIPTION.'),
update_allowed=True,
required=False,
),
LOADBALANCER_POOL_PROPERTIES_ADMIN_STATE: properties.Schema(
properties.Schema.BOOLEAN,
_('LOADBALANCER_POOL_PROPERTIES_ADMIN_STATE.'),
update_allowed=True,
required=False,
),
LOADBALANCER_POOL_PROPERTIES_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_PROPERTIES_PROTOCOL.'),
update_allowed=True,
required=False,
constraints=[
constraints.AllowedValues([u'HTTP', u'HTTPS', u'TCP', u'TERMINATED_HTTPS']),
],
),
LOADBALANCER_POOL_PROPERTIES_LOADBALANCER_METHOD: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_PROPERTIES_LOADBALANCER_METHOD.'),
update_allowed=True,
required=False,
constraints=[
constraints.AllowedValues([u'ROUND_ROBIN', u'LEAST_CONNECTIONS', u'SOURCE_IP']),
],
),
LOADBALANCER_POOL_PROPERTIES_SUBNET_ID: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_PROPERTIES_SUBNET_ID.'),
update_allowed=True,
required=False,
),
LOADBALANCER_POOL_PROPERTIES_SESSION_PERSISTENCE: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_PROPERTIES_SESSION_PERSISTENCE.'),
update_allowed=True,
required=False,
constraints=[
constraints.AllowedValues([u'SOURCE_IP', u'HTTP_COOKIE', u'APP_COOKIE']),
],
),
LOADBALANCER_POOL_PROPERTIES_PERSISTENCE_COOKIE_NAME: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_PROPERTIES_PERSISTENCE_COOKIE_NAME.'),
update_allowed=True,
required=False,
),
}
),
LOADBALANCER_POOL_CUSTOM_ATTRIBUTES: properties.Schema(
properties.Schema.MAP,
_('LOADBALANCER_POOL_CUSTOM_ATTRIBUTES.'),
update_allowed=True,
required=False,
schema={
LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR: properties.Schema(
properties.Schema.LIST,
_('LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR.'),
update_allowed=True,
required=False,
schema=properties.Schema(
properties.Schema.MAP,
schema={
LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_KEY: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_KEY.'),
update_allowed=True,
required=False,
),
LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_VALUE: properties.Schema(
properties.Schema.STRING,
_('LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_VALUE.'),
update_allowed=True,
required=False,
),
}
)
),
}
),
LOADBALANCER_LISTENER_REFS: properties.Schema(
properties.Schema.LIST,
_('LOADBALANCER_LISTENER_REFS.'),
update_allowed=True,
required=False,
),
LOADBALANCER_HEALTHMONITOR_REFS: properties.Schema(
properties.Schema.LIST,
_('LOADBALANCER_HEALTHMONITOR_REFS.'),
update_allowed=True,
required=False,
),
SERVICE_INSTANCE_REFS: properties.Schema(
properties.Schema.LIST,
_('SERVICE_INSTANCE_REFS.'),
update_allowed=True,
required=False,
),
VIRTUAL_MACHINE_INTERFACE_REFS: properties.Schema(
properties.Schema.LIST,
_('VIRTUAL_MACHINE_INTERFACE_REFS.'),
update_allowed=True,
required=False,
),
SERVICE_APPLIANCE_SET_REFS: properties.Schema(
properties.Schema.LIST,
_('SERVICE_APPLIANCE_SET_REFS.'),
update_allowed=True,
required=False,
),
PROJECT: properties.Schema(
properties.Schema.STRING,
_('PROJECT.'),
update_allowed=True,
required=False,
),
}
attributes_schema = {
NAME: attributes.Schema(
_('NAME.'),
),
FQ_NAME: attributes.Schema(
_('FQ_NAME.'),
),
DISPLAY_NAME: attributes.Schema(
_('DISPLAY_NAME.'),
),
LOADBALANCER_POOL_PROVIDER: attributes.Schema(
_('LOADBALANCER_POOL_PROVIDER.'),
),
LOADBALANCER_POOL_PROPERTIES: attributes.Schema(
_('LOADBALANCER_POOL_PROPERTIES.'),
),
LOADBALANCER_POOL_CUSTOM_ATTRIBUTES: attributes.Schema(
_('LOADBALANCER_POOL_CUSTOM_ATTRIBUTES.'),
),
LOADBALANCER_LISTENER_REFS: attributes.Schema(
_('LOADBALANCER_LISTENER_REFS.'),
),
LOADBALANCER_HEALTHMONITOR_REFS: attributes.Schema(
_('LOADBALANCER_HEALTHMONITOR_REFS.'),
),
SERVICE_INSTANCE_REFS: attributes.Schema(
_('SERVICE_INSTANCE_REFS.'),
),
VIRTUAL_MACHINE_INTERFACE_REFS: attributes.Schema(
_('VIRTUAL_MACHINE_INTERFACE_REFS.'),
),
SERVICE_APPLIANCE_SET_REFS: attributes.Schema(
_('SERVICE_APPLIANCE_SET_REFS.'),
),
PROJECT: attributes.Schema(
_('PROJECT.'),
),
}
update_allowed_keys = ('Properties',)
def handle_create(self):
parent_obj = None
if parent_obj is None and self.properties.get(self.PROJECT):
try:
parent_obj = self.vnc_lib().project_read(id=self.properties.get(self.PROJECT))
except vnc_api.NoIdError:
parent_obj = self.vnc_lib().project_read(fq_name_str=self.properties.get(self.PROJECT))
except:
parent_obj = None
if parent_obj is None:
tenant_id = self.stack.context.tenant_id
parent_obj = self.vnc_lib().project_read(id=str(uuid.UUID(tenant_id)))
if parent_obj is None:
raise Exception('Error: parent is not specified in template!')
obj_0 = vnc_api.LoadbalancerPool(name=self.properties[self.NAME],
parent_obj=parent_obj)
if self.properties.get(self.DISPLAY_NAME) is not None:
obj_0.set_display_name(self.properties.get(self.DISPLAY_NAME))
if self.properties.get(self.LOADBALANCER_POOL_PROVIDER) is not None:
obj_0.set_loadbalancer_pool_provider(self.properties.get(self.LOADBALANCER_POOL_PROVIDER))
if self.properties.get(self.LOADBALANCER_POOL_PROPERTIES) is not None:
obj_1 = vnc_api.LoadbalancerPoolType()
if self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_STATUS) is not None:
obj_1.set_status(self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_STATUS))
if self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_STATUS_DESCRIPTION) is not None:
obj_1.set_status_description(self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_STATUS_DESCRIPTION))
if self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_ADMIN_STATE) is not None:
obj_1.set_admin_state(self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_ADMIN_STATE))
if self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_PROTOCOL) is not None:
obj_1.set_protocol(self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_PROTOCOL))
if self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_LOADBALANCER_METHOD) is not None:
obj_1.set_loadbalancer_method(self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_LOADBALANCER_METHOD))
if self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_SUBNET_ID) is not None:
obj_1.set_subnet_id(self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_SUBNET_ID))
if self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_SESSION_PERSISTENCE) is not None:
obj_1.set_session_persistence(self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_SESSION_PERSISTENCE))
if self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_PERSISTENCE_COOKIE_NAME) is not None:
obj_1.set_persistence_cookie_name(self.properties.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_PERSISTENCE_COOKIE_NAME))
obj_0.set_loadbalancer_pool_properties(obj_1)
if self.properties.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES) is not None:
obj_1 = vnc_api.KeyValuePairs()
if self.properties.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR) is not None:
for index_1 in range(len(self.properties.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR))):
obj_2 = vnc_api.KeyValuePair()
if self.properties.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR, {})[index_1].get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_KEY) is not None:
obj_2.set_key(self.properties.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR, {})[index_1].get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_KEY))
if self.properties.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR, {})[index_1].get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_VALUE) is not None:
obj_2.set_value(self.properties.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR, {})[index_1].get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_VALUE))
obj_1.add_key_value_pair(obj_2)
obj_0.set_loadbalancer_pool_custom_attributes(obj_1)
# reference to loadbalancer_listener_refs
if self.properties.get(self.LOADBALANCER_LISTENER_REFS):
for index_0 in range(len(self.properties.get(self.LOADBALANCER_LISTENER_REFS))):
try:
ref_obj = self.vnc_lib().loadbalancer_listener_read(
id=self.properties.get(self.LOADBALANCER_LISTENER_REFS)[index_0]
)
except vnc_api.NoIdError:
ref_obj = self.vnc_lib().loadbalancer_listener_read(
fq_name_str=self.properties.get(self.LOADBALANCER_LISTENER_REFS)[index_0]
)
obj_0.add_loadbalancer_listener(ref_obj)
# reference to loadbalancer_healthmonitor_refs
if self.properties.get(self.LOADBALANCER_HEALTHMONITOR_REFS):
for index_0 in range(len(self.properties.get(self.LOADBALANCER_HEALTHMONITOR_REFS))):
try:
ref_obj = self.vnc_lib().loadbalancer_healthmonitor_read(
id=self.properties.get(self.LOADBALANCER_HEALTHMONITOR_REFS)[index_0]
)
except vnc_api.NoIdError:
ref_obj = self.vnc_lib().loadbalancer_healthmonitor_read(
fq_name_str=self.properties.get(self.LOADBALANCER_HEALTHMONITOR_REFS)[index_0]
)
obj_0.add_loadbalancer_healthmonitor(ref_obj)
# reference to service_instance_refs
if self.properties.get(self.SERVICE_INSTANCE_REFS):
for index_0 in range(len(self.properties.get(self.SERVICE_INSTANCE_REFS))):
try:
ref_obj = self.vnc_lib().service_instance_read(
id=self.properties.get(self.SERVICE_INSTANCE_REFS)[index_0]
)
except vnc_api.NoIdError:
ref_obj = self.vnc_lib().service_instance_read(
fq_name_str=self.properties.get(self.SERVICE_INSTANCE_REFS)[index_0]
)
obj_0.add_service_instance(ref_obj)
# reference to virtual_machine_interface_refs
if self.properties.get(self.VIRTUAL_MACHINE_INTERFACE_REFS):
for index_0 in range(len(self.properties.get(self.VIRTUAL_MACHINE_INTERFACE_REFS))):
try:
ref_obj = self.vnc_lib().virtual_machine_interface_read(
id=self.properties.get(self.VIRTUAL_MACHINE_INTERFACE_REFS)[index_0]
)
except vnc_api.NoIdError:
ref_obj = self.vnc_lib().virtual_machine_interface_read(
fq_name_str=self.properties.get(self.VIRTUAL_MACHINE_INTERFACE_REFS)[index_0]
)
obj_0.add_virtual_machine_interface(ref_obj)
# reference to service_appliance_set_refs
if self.properties.get(self.SERVICE_APPLIANCE_SET_REFS):
for index_0 in range(len(self.properties.get(self.SERVICE_APPLIANCE_SET_REFS))):
try:
ref_obj = self.vnc_lib().service_appliance_set_read(
id=self.properties.get(self.SERVICE_APPLIANCE_SET_REFS)[index_0]
)
except vnc_api.NoIdError:
ref_obj = self.vnc_lib().service_appliance_set_read(
fq_name_str=self.properties.get(self.SERVICE_APPLIANCE_SET_REFS)[index_0]
)
obj_0.add_service_appliance_set(ref_obj)
try:
obj_uuid = super(ContrailLoadbalancerPool, self).resource_create(obj_0)
except:
raise Exception(_('loadbalancer-pool %s could not be updated.') % self.name)
self.resource_id_set(obj_uuid)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
try:
obj_0 = self.vnc_lib().loadbalancer_pool_read(
id=self.resource_id
)
except:
raise Exception(_('loadbalancer-pool %s not found.') % self.name)
if prop_diff.get(self.DISPLAY_NAME) is not None:
obj_0.set_display_name(prop_diff.get(self.DISPLAY_NAME))
if prop_diff.get(self.LOADBALANCER_POOL_PROVIDER) is not None:
obj_0.set_loadbalancer_pool_provider(prop_diff.get(self.LOADBALANCER_POOL_PROVIDER))
if prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES) is not None:
obj_1 = vnc_api.LoadbalancerPoolType()
if prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_STATUS) is not None:
obj_1.set_status(prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_STATUS))
if prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_STATUS_DESCRIPTION) is not None:
obj_1.set_status_description(prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_STATUS_DESCRIPTION))
if prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_ADMIN_STATE) is not None:
obj_1.set_admin_state(prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_ADMIN_STATE))
if prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_PROTOCOL) is not None:
obj_1.set_protocol(prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_PROTOCOL))
if prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_LOADBALANCER_METHOD) is not None:
obj_1.set_loadbalancer_method(prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_LOADBALANCER_METHOD))
if prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_SUBNET_ID) is not None:
obj_1.set_subnet_id(prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_SUBNET_ID))
if prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_SESSION_PERSISTENCE) is not None:
obj_1.set_session_persistence(prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_SESSION_PERSISTENCE))
if prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_PERSISTENCE_COOKIE_NAME) is not None:
obj_1.set_persistence_cookie_name(prop_diff.get(self.LOADBALANCER_POOL_PROPERTIES, {}).get(self.LOADBALANCER_POOL_PROPERTIES_PERSISTENCE_COOKIE_NAME))
obj_0.set_loadbalancer_pool_properties(obj_1)
if prop_diff.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES) is not None:
obj_1 = vnc_api.KeyValuePairs()
if prop_diff.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR) is not None:
for index_1 in range(len(prop_diff.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR))):
obj_2 = vnc_api.KeyValuePair()
if prop_diff.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR, {})[index_1].get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_KEY) is not None:
obj_2.set_key(prop_diff.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR, {})[index_1].get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_KEY))
if prop_diff.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR, {})[index_1].get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_VALUE) is not None:
obj_2.set_value(prop_diff.get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES, {}).get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR, {})[index_1].get(self.LOADBALANCER_POOL_CUSTOM_ATTRIBUTES_KEY_VALUE_PAIR_VALUE))
obj_1.add_key_value_pair(obj_2)
obj_0.set_loadbalancer_pool_custom_attributes(obj_1)
# reference to loadbalancer_listener_refs
ref_obj_list = []
ref_data_list = []
if self.LOADBALANCER_LISTENER_REFS in prop_diff:
for index_0 in range(len(prop_diff.get(self.LOADBALANCER_LISTENER_REFS) or [])):
try:
ref_obj = self.vnc_lib().loadbalancer_listener_read(
id=prop_diff.get(self.LOADBALANCER_LISTENER_REFS)[index_0]
)
except:
ref_obj = self.vnc_lib().loadbalancer_listener_read(
fq_name_str=prop_diff.get(self.LOADBALANCER_LISTENER_REFS)[index_0]
)
ref_obj_list.append(ref_obj.fq_name)
obj_0.set_loadbalancer_listener_list(ref_obj_list)
# End: reference to loadbalancer_listener_refs
# reference to loadbalancer_healthmonitor_refs
ref_obj_list = []
ref_data_list = []
if self.LOADBALANCER_HEALTHMONITOR_REFS in prop_diff:
for index_0 in range(len(prop_diff.get(self.LOADBALANCER_HEALTHMONITOR_REFS) or [])):
try:
ref_obj = self.vnc_lib().loadbalancer_healthmonitor_read(
id=prop_diff.get(self.LOADBALANCER_HEALTHMONITOR_REFS)[index_0]
)
except:
ref_obj = self.vnc_lib().loadbalancer_healthmonitor_read(
fq_name_str=prop_diff.get(self.LOADBALANCER_HEALTHMONITOR_REFS)[index_0]
)
ref_obj_list.append(ref_obj.fq_name)
obj_0.set_loadbalancer_healthmonitor_list(ref_obj_list)
# End: reference to loadbalancer_healthmonitor_refs
# reference to service_instance_refs
ref_obj_list = []
ref_data_list = []
if self.SERVICE_INSTANCE_REFS in prop_diff:
for index_0 in range(len(prop_diff.get(self.SERVICE_INSTANCE_REFS) or [])):
try:
ref_obj = self.vnc_lib().service_instance_read(
id=prop_diff.get(self.SERVICE_INSTANCE_REFS)[index_0]
)
except:
ref_obj = self.vnc_lib().service_instance_read(
fq_name_str=prop_diff.get(self.SERVICE_INSTANCE_REFS)[index_0]
)
ref_obj_list.append(ref_obj.fq_name)
obj_0.set_service_instance_list(ref_obj_list)
# End: reference to service_instance_refs
# reference to virtual_machine_interface_refs
ref_obj_list = []
ref_data_list = []
if self.VIRTUAL_MACHINE_INTERFACE_REFS in prop_diff:
for index_0 in range(len(prop_diff.get(self.VIRTUAL_MACHINE_INTERFACE_REFS) or [])):
try:
ref_obj = self.vnc_lib().virtual_machine_interface_read(
id=prop_diff.get(self.VIRTUAL_MACHINE_INTERFACE_REFS)[index_0]
)
except:
ref_obj = self.vnc_lib().virtual_machine_interface_read(
fq_name_str=prop_diff.get(self.VIRTUAL_MACHINE_INTERFACE_REFS)[index_0]
)
ref_obj_list.append(ref_obj.fq_name)
obj_0.set_virtual_machine_interface_list(ref_obj_list)
# End: reference to virtual_machine_interface_refs
# reference to service_appliance_set_refs
ref_obj_list = []
ref_data_list = []
if self.SERVICE_APPLIANCE_SET_REFS in prop_diff:
for index_0 in range(len(prop_diff.get(self.SERVICE_APPLIANCE_SET_REFS) or [])):
try:
ref_obj = self.vnc_lib().service_appliance_set_read(
id=prop_diff.get(self.SERVICE_APPLIANCE_SET_REFS)[index_0]
)
except:
ref_obj = self.vnc_lib().service_appliance_set_read(
fq_name_str=prop_diff.get(self.SERVICE_APPLIANCE_SET_REFS)[index_0]
)
ref_obj_list.append(ref_obj.fq_name)
obj_0.set_service_appliance_set_list(ref_obj_list)
# End: reference to service_appliance_set_refs
try:
self.vnc_lib().loadbalancer_pool_update(obj_0)
except:
raise Exception(_('loadbalancer-pool %s could not be updated.') % self.name)
def handle_delete(self):
if self.resource_id is None:
return
try:
self.vnc_lib().loadbalancer_pool_delete(id=self.resource_id)
except Exception as ex:
self._ignore_not_found(ex)
LOG.warn(_('loadbalancer_pool %s already deleted.') % self.name)
def _show_resource(self):
obj = self.vnc_lib().loadbalancer_pool_read(id=self.resource_id)
obj_dict = obj.serialize_to_json()
return obj_dict
def resource_mapping():
return {
'OS::ContrailV2::LoadbalancerPool': ContrailLoadbalancerPool,
}
| 55.990291
| 856
| 0.654448
| 3,136
| 28,835
| 5.574298
| 0.050383
| 0.16475
| 0.128254
| 0.136834
| 0.89051
| 0.847148
| 0.784852
| 0.730107
| 0.648418
| 0.58944
| 0
| 0.004676
| 0.265719
| 28,835
| 514
| 857
| 56.099222
| 0.820951
| 0.024033
| 0
| 0.488017
| 1
| 0
| 0.075949
| 0.061798
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010893
| false
| 0.002179
| 0.023965
| 0.002179
| 0.052288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c9501f6a2e7dd3911c3c286d0abff281219721cf
| 87
|
py
|
Python
|
Python/Hello_Hacktoberfest.py
|
kennethsequeira/Hello-world
|
464227bc7d9778a4a2a4044fe415a629003ea77f
|
[
"MIT"
] | 1,428
|
2018-10-03T15:15:17.000Z
|
2019-03-31T18:38:36.000Z
|
Python/Hello_Hacktoberfest.py
|
kennethsequeira/Hello-world
|
464227bc7d9778a4a2a4044fe415a629003ea77f
|
[
"MIT"
] | 1,162
|
2018-10-03T15:05:49.000Z
|
2018-10-18T14:17:52.000Z
|
Python/Hello_Hacktoberfest.py
|
kennethsequeira/Hello-world
|
464227bc7d9778a4a2a4044fe415a629003ea77f
|
[
"MIT"
] | 3,909
|
2018-10-03T15:07:19.000Z
|
2019-03-31T18:39:08.000Z
|
from datetime import datetime
print "Hello, Hacktoberfest " + str(datetime.now().year)
| 29
| 56
| 0.770115
| 11
| 87
| 6.090909
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114943
| 87
| 3
| 56
| 29
| 0.87013
| 0
| 0
| 0
| 0
| 0
| 0.238636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
a309fb01a5409fb387f7bf039dd6e6068a686b84
| 229
|
py
|
Python
|
app/djproject/restaurantes/forms.py
|
mmaguero/cloud-based-tool-SA
|
4dbc10e4e4e59c6351e002b53da59f44f917e503
|
[
"MIT"
] | null | null | null |
app/djproject/restaurantes/forms.py
|
mmaguero/cloud-based-tool-SA
|
4dbc10e4e4e59c6351e002b53da59f44f917e503
|
[
"MIT"
] | null | null | null |
app/djproject/restaurantes/forms.py
|
mmaguero/cloud-based-tool-SA
|
4dbc10e4e4e59c6351e002b53da59f44f917e503
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django import forms
class ComputeForm(forms.Form):
title = forms.CharField(required=True, label='Title')
description = forms.CharField(required=True, label='Review', widget=forms.Textarea)
| 32.714286
| 85
| 0.716157
| 28
| 229
| 5.857143
| 0.678571
| 0.170732
| 0.268293
| 0.317073
| 0.378049
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005051
| 0.135371
| 229
| 6
| 86
| 38.166667
| 0.823232
| 0.091703
| 0
| 0
| 0
| 0
| 0.053398
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
a340cea6e6598303882f5293c7d09ff6e457cee6
| 45
|
py
|
Python
|
py/junkoda_cellularlib/cellularroot.py
|
junkoda/junkoda_cellularlib
|
bc97d6ab419d8e9e1c295a7662d94cfd1f5b3501
|
[
"MIT"
] | null | null | null |
py/junkoda_cellularlib/cellularroot.py
|
junkoda/junkoda_cellularlib
|
bc97d6ab419d8e9e1c295a7662d94cfd1f5b3501
|
[
"MIT"
] | null | null | null |
py/junkoda_cellularlib/cellularroot.py
|
junkoda/junkoda_cellularlib
|
bc97d6ab419d8e9e1c295a7662d94cfd1f5b3501
|
[
"MIT"
] | null | null | null |
_dir = '/Users/junkoda/Hack/kaggle/cellular'
| 22.5
| 44
| 0.755556
| 6
| 45
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 45
| 1
| 45
| 45
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0.777778
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a374fb97d3f606344581b7f47a61e45459c2066b
| 72
|
py
|
Python
|
benchmark/plotting/__init__.py
|
NeurIPS-Challenge-Team-11/big-ann-benchmarks
|
042f75e759247518140b284e70072f890906ca97
|
[
"MIT"
] | 75
|
2021-07-25T07:50:11.000Z
|
2022-03-25T04:18:54.000Z
|
benchmark/plotting/__init__.py
|
NeurIPS-Challenge-Team-11/big-ann-benchmarks
|
042f75e759247518140b284e70072f890906ca97
|
[
"MIT"
] | 54
|
2021-07-26T02:23:32.000Z
|
2022-02-15T05:44:23.000Z
|
benchmark/plotting/__init__.py
|
NeurIPS-Challenge-Team-11/big-ann-benchmarks
|
042f75e759247518140b284e70072f890906ca97
|
[
"MIT"
] | 21
|
2021-07-27T08:44:22.000Z
|
2022-03-18T07:56:23.000Z
|
from __future__ import absolute_import
from benchmark.plotting import *
| 24
| 38
| 0.861111
| 9
| 72
| 6.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 72
| 2
| 39
| 36
| 0.890625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6e771e1f079080b0bd56a22b984552e5d329c420
| 192
|
py
|
Python
|
src/bgmtinygrail/strategy/manual_control.py
|
no1xsyzy/bgmtinygrail
|
4e762a58337f3021440a070967f1cb7a0213f8a6
|
[
"MIT"
] | 5
|
2020-05-17T02:41:01.000Z
|
2020-07-01T23:24:41.000Z
|
src/bgmtinygrail/strategy/manual_control.py
|
no1xsyzy/bgmtinygrail
|
4e762a58337f3021440a070967f1cb7a0213f8a6
|
[
"MIT"
] | null | null | null |
src/bgmtinygrail/strategy/manual_control.py
|
no1xsyzy/bgmtinygrail
|
4e762a58337f3021440a070967f1cb7a0213f8a6
|
[
"MIT"
] | 1
|
2021-02-09T04:41:15.000Z
|
2021-02-09T04:41:15.000Z
|
from ._base import *
class ManualControlStrategy(ABCCharaStrategy):
strategy = Strategy.MANUAL_CONTROL
def transition(self):
return self
def output(self):
pass
| 16
| 46
| 0.682292
| 19
| 192
| 6.789474
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 192
| 11
| 47
| 17.454545
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0.142857
| 0.142857
| 0.857143
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
6e82f812b26bb0f890ac5aa600a24661f243269c
| 316
|
py
|
Python
|
python/bbgo/utils/__init__.py
|
RicoToothless/bbgo
|
dc487c9194f6a336660b1b51a6adc1e7f970813f
|
[
"MIT"
] | null | null | null |
python/bbgo/utils/__init__.py
|
RicoToothless/bbgo
|
dc487c9194f6a336660b1b51a6adc1e7f970813f
|
[
"MIT"
] | null | null | null |
python/bbgo/utils/__init__.py
|
RicoToothless/bbgo
|
dc487c9194f6a336660b1b51a6adc1e7f970813f
|
[
"MIT"
] | null | null | null |
from .convert import parse_float
from .convert import parse_time
from .grpc_utils import get_credentials_from_env
from .grpc_utils import get_grpc_cert_file_from_env
from .grpc_utils import get_grpc_key_file_from_env
from .grpc_utils import get_insecure_channel
from .grpc_utils import get_insecure_channel_from_env
| 39.5
| 53
| 0.889241
| 54
| 316
| 4.722222
| 0.296296
| 0.156863
| 0.254902
| 0.372549
| 0.709804
| 0.623529
| 0.623529
| 0.623529
| 0
| 0
| 0
| 0
| 0.088608
| 316
| 7
| 54
| 45.142857
| 0.885417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6e950f91049f6c53f9558d3c4727da6b341caba2
| 39
|
py
|
Python
|
pyknotid/spacecurves/periodicline.py
|
SPOCKnots/pyknotid
|
514a3f0f64d980100dc5f1086551f2d809c14907
|
[
"MIT"
] | 17
|
2019-02-07T11:39:38.000Z
|
2022-03-31T13:14:29.000Z
|
pyknotid/spacecurves/periodicline.py
|
SPOCKnots/pyknotid
|
514a3f0f64d980100dc5f1086551f2d809c14907
|
[
"MIT"
] | 5
|
2017-11-10T15:12:30.000Z
|
2021-11-01T16:36:22.000Z
|
pyknotid/spacecurves/periodicline.py
|
SPOCKnots/pyknotid
|
514a3f0f64d980100dc5f1086551f2d809c14907
|
[
"MIT"
] | 7
|
2017-11-10T14:23:46.000Z
|
2021-03-28T06:05:04.000Z
|
import numpy as n
class PeriodicKnot
| 7.8
| 18
| 0.794872
| 6
| 39
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 39
| 4
| 19
| 9.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6ec2dc5fb482dfa3aefa999eefd7ac74ea8809be
| 108
|
py
|
Python
|
python_basics/smart_light_switch.py
|
almoratalla/mimo-python-projects
|
3e1cd48c4bb72c3408b444194e200f0111bfc62d
|
[
"MIT"
] | null | null | null |
python_basics/smart_light_switch.py
|
almoratalla/mimo-python-projects
|
3e1cd48c4bb72c3408b444194e200f0111bfc62d
|
[
"MIT"
] | null | null | null |
python_basics/smart_light_switch.py
|
almoratalla/mimo-python-projects
|
3e1cd48c4bb72c3408b444194e200f0111bfc62d
|
[
"MIT"
] | null | null | null |
is_day = False
lights_on = not is_day
print("Daytime?")
print(is_day)
print("Lights on?")
print(lights_on)
| 13.5
| 22
| 0.731481
| 19
| 108
| 3.894737
| 0.421053
| 0.202703
| 0.27027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12037
| 108
| 8
| 23
| 13.5
| 0.778947
| 0
| 0
| 0
| 0
| 0
| 0.165138
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
6ee987b92db7110ddc4664225c32ecbd3fcb236f
| 22,219
|
py
|
Python
|
spark_fhir_schemas/stu3/complex_types/practitionerrole.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/stu3/complex_types/practitionerrole.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/stu3/complex_types/practitionerrole.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
BooleanType,
DataType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class PractitionerRoleSchema:
"""
A specific set of Roles/Locations/specialties/services that a practitioner may
perform at an organization for a period of time.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A specific set of Roles/Locations/specialties/services that a practitioner may
perform at an organization for a period of time.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a PractitionerRole resource
identifier: Business Identifiers that are specific to a role/location.
active: Whether this practitioner's record is in active use.
period: The period during which the person is authorized to act as a practitioner in
these role(s) for the organization.
practitioner: Practitioner that is able to provide the defined services for the organation.
organization: The organization where the Practitioner performs the roles associated.
code: Roles which this practitioner is authorized to perform for the organization.
specialty: Specific specialty of the practitioner.
location: The location(s) at which this practitioner provides care.
healthcareService: The list of healthcare services that this worker provides for this role's
Organization/Location(s).
telecom: Contact details that are specific to the role/location/service.
availableTime: A collection of times that the Service Site is available.
notAvailable: The HealthcareService is not available during this period of time due to the
provided reason.
availabilityExceptions: A description of site availability exceptions, e.g. public holiday
availability. Succinctly describing all possible exceptions to normal site
availability as details in the available Times and not available Times.
endpoint: Technical endpoints providing access to services operated for the practitioner
with this role.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.contactpoint import (
ContactPointSchema,
)
from spark_fhir_schemas.stu3.complex_types.practitionerrole_availabletime import (
PractitionerRole_AvailableTimeSchema,
)
from spark_fhir_schemas.stu3.complex_types.practitionerrole_notavailable import (
PractitionerRole_NotAvailableSchema,
)
if (
max_recursion_limit
and nesting_list.count("PractitionerRole") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["PractitionerRole"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a PractitionerRole resource
StructField("resourceType", StringType(), True),
# Business Identifiers that are specific to a role/location.
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Whether this practitioner's record is in active use.
StructField("active", BooleanType(), True),
# The period during which the person is authorized to act as a practitioner in
# these role(s) for the organization.
StructField(
"period",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Practitioner that is able to provide the defined services for the organation.
StructField(
"practitioner",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The organization where the Practitioner performs the roles associated.
StructField(
"organization",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Roles which this practitioner is authorized to perform for the organization.
StructField(
"code",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Specific specialty of the practitioner.
StructField(
"specialty",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The location(s) at which this practitioner provides care.
StructField(
"location",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The list of healthcare services that this worker provides for this role's
# Organization/Location(s).
StructField(
"healthcareService",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Contact details that are specific to the role/location/service.
StructField(
"telecom",
ArrayType(
ContactPointSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A collection of times that the Service Site is available.
StructField(
"availableTime",
ArrayType(
PractitionerRole_AvailableTimeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The HealthcareService is not available during this period of time due to the
# provided reason.
StructField(
"notAvailable",
ArrayType(
PractitionerRole_NotAvailableSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A description of site availability exceptions, e.g. public holiday
# availability. Succinctly describing all possible exceptions to normal site
# availability as details in the available Times and not available Times.
StructField("availabilityExceptions", StringType(), True),
# Technical endpoints providing access to services operated for the practitioner
# with this role.
StructField(
"endpoint",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 48.832967
| 100
| 0.544624
| 1,993
| 22,219
| 5.845961
| 0.142499
| 0.071067
| 0.045061
| 0.065917
| 0.799416
| 0.781821
| 0.781821
| 0.757102
| 0.748176
| 0.706892
| 0
| 0.002914
| 0.413115
| 22,219
| 454
| 101
| 48.940529
| 0.890567
| 0.279941
| 0
| 0.648148
| 0
| 0
| 0.026113
| 0.001415
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003086
| false
| 0
| 0.040123
| 0
| 0.052469
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
42ccf215885b92a543cb9e7d7859a04346ba208e
| 104
|
py
|
Python
|
abc/136/b.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | 3
|
2019-06-25T06:17:38.000Z
|
2019-07-13T15:18:51.000Z
|
abc/136/b.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | null | null | null |
abc/136/b.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | null | null | null |
N = int(input())
ans = min(9, N) + max(0, min(999, N) - 99) + max(0, min(99999, N) - 9999)
print(ans)
| 17.333333
| 73
| 0.528846
| 21
| 104
| 2.619048
| 0.619048
| 0.145455
| 0.254545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207317
| 0.211538
| 104
| 5
| 74
| 20.8
| 0.463415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
42e0add8629cac64830a86d7beab9de71276d1c2
| 1,607
|
py
|
Python
|
quantarhei/builders/aggregates.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 14
|
2016-10-16T13:26:05.000Z
|
2021-11-09T11:40:52.000Z
|
quantarhei/builders/aggregates.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 61
|
2016-09-19T10:45:56.000Z
|
2021-11-10T13:53:06.000Z
|
quantarhei/builders/aggregates.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 21
|
2016-08-30T09:09:28.000Z
|
2022-03-30T03:16:35.000Z
|
# -*- coding: utf-8 -*-
"""
This is the class representing tightly organized molecular aggregates such
as photosynthetic antenna and light-harvesting complexes in
Quantarhei. Appart from represeting data, this class also provides a
simplified interface to much of Quantarhei's functionality, such as
calculation of spectra and dynamics. In order to make the core more
organized, the class `Aggregate` is the tip of series of mutually
inheriting classes. They start with AggregateBase, a class which implements
some of the core functionality and add functionality in classes like
`AggregateSpectroscopy`, `AggregateExcitonAnalysis` etc.
Inheritance in Aggregate class
------------------------------
The dependency of the classes is the following
AggregateBase :
basic functionality of the Aggregate
AggregateSpectroscopy :
adds Liouville pathway generation
AggregateExcitonAnalysis :
adds analysis of excitons
AggregatePureDephasing :
adds calculation of effective pure dephasing rates
Aggregate :
wraps everything up
Class Details
-------------
"""
from .aggregate_pdeph import AggregatePureDephasing
class Aggregate(AggregatePureDephasing):
"""
This clas wraps up the definition of the Aggregate class. It is the end
of a long series of mutually inheriting classes starting with
AggregateBase.
"""
pass
| 30.903846
| 79
| 0.646546
| 164
| 1,607
| 6.329268
| 0.542683
| 0.019268
| 0.030829
| 0.050096
| 0.063584
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000884
| 0.296204
| 1,607
| 52
| 80
| 30.903846
| 0.916888
| 0.821406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
6e002f05f308fd882f50833cdf720e6b71b2b56e
| 117
|
py
|
Python
|
mc/__init__.py
|
aspuru-guzik-group/mission_control
|
bfe930e1038e9e0d6c4bb327474766e85b2190cb
|
[
"Apache-2.0"
] | 3
|
2017-09-01T19:49:59.000Z
|
2018-06-04T10:30:01.000Z
|
mc/__init__.py
|
aspuru-guzik-group/mission_control
|
bfe930e1038e9e0d6c4bb327474766e85b2190cb
|
[
"Apache-2.0"
] | null | null | null |
mc/__init__.py
|
aspuru-guzik-group/mission_control
|
bfe930e1038e9e0d6c4bb327474766e85b2190cb
|
[
"Apache-2.0"
] | 1
|
2018-12-13T19:48:27.000Z
|
2018-12-13T19:48:27.000Z
|
VERSION = (0, 0, 1,)
def get_version(): return '.'.join([str(part) for part in VERSION])
__version__ = get_version()
| 29.25
| 67
| 0.675214
| 18
| 117
| 4.055556
| 0.611111
| 0.273973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029703
| 0.136752
| 117
| 3
| 68
| 39
| 0.693069
| 0
| 0
| 0
| 0
| 0
| 0.008547
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
6e233fb2c1bc7d8c7146556628c4e1f1961a2287
| 117
|
py
|
Python
|
compass/apps.py
|
osule/bookworm
|
21332fb0bd6381d4304b3e4c6fed60c169339bf4
|
[
"MIT"
] | null | null | null |
compass/apps.py
|
osule/bookworm
|
21332fb0bd6381d4304b3e4c6fed60c169339bf4
|
[
"MIT"
] | 13
|
2021-10-04T22:07:21.000Z
|
2022-03-21T15:11:24.000Z
|
compass/apps.py
|
osule/bookworm
|
21332fb0bd6381d4304b3e4c6fed60c169339bf4
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CompassConfig(AppConfig):
name = 'compass'
verbose_name = "compass"
| 19.5
| 33
| 0.735043
| 13
| 117
| 6.538462
| 0.769231
| 0.258824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 117
| 5
| 34
| 23.4
| 0.885417
| 0
| 0
| 0
| 0
| 0
| 0.119658
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.75
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
28496ba28a2a6f02e790eed034b8615dabbc5ede
| 85
|
py
|
Python
|
vnpy/api/ctp/__init__.py
|
xiumingxu/vnpy-xx
|
8b2d9ecdabcb7931d46fd92fad2d3701b7e66975
|
[
"MIT"
] | null | null | null |
vnpy/api/ctp/__init__.py
|
xiumingxu/vnpy-xx
|
8b2d9ecdabcb7931d46fd92fad2d3701b7e66975
|
[
"MIT"
] | null | null | null |
vnpy/api/ctp/__init__.py
|
xiumingxu/vnpy-xx
|
8b2d9ecdabcb7931d46fd92fad2d3701b7e66975
|
[
"MIT"
] | null | null | null |
# from .vnctpmd import MdApi
# from .vnctptd import TdApi
from .ctp_constant import *
| 28.333333
| 28
| 0.776471
| 12
| 85
| 5.416667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152941
| 85
| 3
| 29
| 28.333333
| 0.902778
| 0.623529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
284d5eeee24e9aadd53722ffa6fe56ee06d89246
| 329
|
py
|
Python
|
pay/forms.py
|
litchfield/django-pay
|
d563f9d7d612bd949fd577cee623314d0695c6fd
|
[
"MIT"
] | null | null | null |
pay/forms.py
|
litchfield/django-pay
|
d563f9d7d612bd949fd577cee623314d0695c6fd
|
[
"MIT"
] | null | null | null |
pay/forms.py
|
litchfield/django-pay
|
d563f9d7d612bd949fd577cee623314d0695c6fd
|
[
"MIT"
] | null | null | null |
from django import forms
class CreditCardForm(forms.Form):
"Form with credit card details"
class PaymentMethodForm(CreditCardForm, forms.ModelForm):
"Create/update a payment method, which can be used for subscriptions/transactions"
class TransactionForm(CreditCardForm, forms.ModelForm):
"Create transaction"
| 20.5625
| 86
| 0.781155
| 37
| 329
| 6.945946
| 0.72973
| 0.22179
| 0.217899
| 0.264591
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 329
| 15
| 87
| 21.933333
| 0.917857
| 0.392097
| 0
| 0
| 0
| 0
| 0.388379
| 0.079511
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
28632786fa61c1a960cfde33e5cea230960d3f3e
| 34
|
py
|
Python
|
notebooks/solutions/timeseries_departure.py
|
cpaniaguam/pandas-head-to-tail
|
c809b6ae5834057c51006ecc908266e6d5d05b15
|
[
"CC-BY-4.0"
] | 88
|
2016-12-29T06:49:10.000Z
|
2022-03-19T20:37:27.000Z
|
notebooks/solutions/timeseries_departure.py
|
paritosh666/pandas-head-to-tail
|
891a72ea5a21f8e0c8f6a6d22c03a1de26a6f30b
|
[
"CC-BY-4.0"
] | 8
|
2018-06-17T21:47:27.000Z
|
2018-07-11T22:31:17.000Z
|
notebooks/solutions/timeseries_departure.py
|
paritosh666/pandas-head-to-tail
|
891a72ea5a21f8e0c8f6a6d22c03a1de26a6f30b
|
[
"CC-BY-4.0"
] | 76
|
2016-12-30T08:56:28.000Z
|
2022-02-27T08:05:26.000Z
|
flights.dep + flights.dep_delay_td
| 34
| 34
| 0.852941
| 6
| 34
| 4.5
| 0.666667
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 34
| 1
| 34
| 34
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2869ea9d26f307df3545541f66173b9ebf27f2d4
| 140
|
py
|
Python
|
umusicfy/user_profile/admin.py
|
CarlosMart626/umusicfy
|
97e2166fe26d1fbe36df6bea435044ef3d367edf
|
[
"Apache-2.0"
] | null | null | null |
umusicfy/user_profile/admin.py
|
CarlosMart626/umusicfy
|
97e2166fe26d1fbe36df6bea435044ef3d367edf
|
[
"Apache-2.0"
] | 8
|
2020-06-05T18:08:05.000Z
|
2022-01-13T00:44:30.000Z
|
umusicfy/user_profile/admin.py
|
CarlosMart626/umusicfy
|
97e2166fe26d1fbe36df6bea435044ef3d367edf
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import PlayList, UserProfile
admin.site.register(PlayList)
admin.site.register(UserProfile)
| 20
| 41
| 0.828571
| 18
| 140
| 6.444444
| 0.555556
| 0.155172
| 0.293103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092857
| 140
| 6
| 42
| 23.333333
| 0.913386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
956604227c48182f66ec9135985dfd0dfe0731e8
| 1,760
|
py
|
Python
|
investment_atlas/tests/test_helpers.py
|
froddd/great-international-ui
|
414bcb09d701cd7e0c5748d1ac8c587d704f92da
|
[
"MIT"
] | 1
|
2019-03-22T09:45:00.000Z
|
2019-03-22T09:45:00.000Z
|
investment_atlas/tests/test_helpers.py
|
froddd/great-international-ui
|
414bcb09d701cd7e0c5748d1ac8c587d704f92da
|
[
"MIT"
] | 556
|
2019-01-31T15:31:05.000Z
|
2022-03-24T09:44:26.000Z
|
investment_atlas/tests/test_helpers.py
|
froddd/great-international-ui
|
414bcb09d701cd7e0c5748d1ac8c587d704f92da
|
[
"MIT"
] | 6
|
2019-03-07T12:57:49.000Z
|
2021-11-02T15:23:51.000Z
|
from investment_atlas import helpers
def test_get_sectors_label():
page = { # NOQA
'related_sectors': [
{'related_sector': {'title': 'Housing'}},
{'related_sector': {'title': 'Aerospace'}}
],
'sub_sectors': ['Green housing', 'Urban', 'Renting']
}
assert helpers.get_sectors_label(page) == '(Housing, Aerospace, Green housing, Urban, Renting)'
def test_get_sectors_label_undefined_sectors():
page = {}
assert helpers.get_sectors_label(page) == ''
def test_get_sectors_label_undefined_sub_sectors():
page = { # NOQA
'related_sectors': [
{'related_sector': {'title': 'Housing'}}
]
}
assert helpers.get_sectors_label(page) == 'Housing'
def test_get_sectors_label_no_sector():
page = { # NOQA
'related_sectors': [],
'sub_sectors': ['Green housing', 'Urban', 'Renting']
}
assert helpers.get_sectors_label(page) == '(Green housing, Urban, Renting)'
def test_get_sectors_label_no_subsectors():
page = { # NOQA
'related_sectors': [
{'related_sector': {'title': 'Housing'}},
{'related_sector': {'title': 'Aerospace'}}
],
'sub_sectors': []
}
assert helpers.get_sectors_label(page) == 'Housing, Aerospace'
def test_get_sectors_label_blank_sector():
page = { # NOQA
'related_sectors': [
{'related_sector': []}
]
}
assert helpers.get_sectors_label(page) == ''
def test_get_sectors_label_blank_sub_sector():
page = { # NOQA
'related_sectors': [
{'related_sector': {'title': 'Housing'}},
],
'sub_sectors': ['']
}
assert helpers.get_sectors_label(page) == 'Housing'
| 24.444444
| 99
| 0.592045
| 177
| 1,760
| 5.508475
| 0.146893
| 0.14359
| 0.215385
| 0.155897
| 0.932308
| 0.881026
| 0.803077
| 0.714872
| 0.592821
| 0.408205
| 0
| 0
| 0.259091
| 1,760
| 71
| 100
| 24.788732
| 0.747699
| 0.016477
| 0
| 0.5
| 0
| 0
| 0.273782
| 0
| 0
| 0
| 0
| 0
| 0.14
| 1
| 0.14
| false
| 0
| 0.02
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
957ed743ac408469f50ca8907251f5bcc08f5288
| 72
|
py
|
Python
|
winney/__init__.py
|
olivetree123/Winney
|
60068d64ee891bd92b93ce32b599984374eb66ed
|
[
"MIT"
] | null | null | null |
winney/__init__.py
|
olivetree123/Winney
|
60068d64ee891bd92b93ce32b599984374eb66ed
|
[
"MIT"
] | null | null | null |
winney/__init__.py
|
olivetree123/Winney
|
60068d64ee891bd92b93ce32b599984374eb66ed
|
[
"MIT"
] | 2
|
2021-07-05T03:43:44.000Z
|
2021-07-05T06:20:20.000Z
|
#coding:utf-8
from winney.winney import Winney, Address, retry, Result
| 18
| 56
| 0.777778
| 11
| 72
| 5.090909
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.125
| 72
| 3
| 57
| 24
| 0.873016
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
95989783f3390d3bf69bcdf266ae40935a18511f
| 119
|
py
|
Python
|
tests/test_main.py
|
K0lb3/_travis_tests
|
827f068ada26083a75e5bb00127c38c9a917aab4
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
K0lb3/_travis_tests
|
827f068ada26083a75e5bb00127c38c9a917aab4
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
K0lb3/_travis_tests
|
827f068ada26083a75e5bb00127c38c9a917aab4
|
[
"MIT"
] | null | null | null |
from sys import platform
import os
root = os.path.dirname(os.path.abspath(__file__))
def test_none():
return True
| 17
| 49
| 0.747899
| 19
| 119
| 4.421053
| 0.789474
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151261
| 119
| 7
| 50
| 17
| 0.831683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
95b734d47274dba383ce3a50c35cfeeb2a4f2fd1
| 55
|
py
|
Python
|
iperf3/__init__.py
|
Austinpayne/iperf3-python
|
4535d9e96ba5ef3f503e7f1e48f0f8ce51615ddd
|
[
"MIT"
] | 87
|
2016-09-19T10:58:31.000Z
|
2022-03-30T01:36:44.000Z
|
iperf3/__init__.py
|
Austinpayne/iperf3-python
|
4535d9e96ba5ef3f503e7f1e48f0f8ce51615ddd
|
[
"MIT"
] | 61
|
2016-08-17T14:46:59.000Z
|
2022-03-02T15:28:51.000Z
|
iperf3/__init__.py
|
Austinpayne/iperf3-python
|
4535d9e96ba5ef3f503e7f1e48f0f8ce51615ddd
|
[
"MIT"
] | 47
|
2016-09-12T14:51:57.000Z
|
2022-01-31T17:46:49.000Z
|
from .iperf3 import Client, Server, TestResult, IPerf3
| 27.5
| 54
| 0.8
| 7
| 55
| 6.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.127273
| 55
| 1
| 55
| 55
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2528466a7641c6333630e71ef039246852ec4527
| 135
|
py
|
Python
|
__init__.py
|
anthill-gaming/game_master
|
aa0af04b7e6f2073a1b71878bdce46ca534a107e
|
[
"MIT"
] | 1
|
2019-07-17T16:06:11.000Z
|
2019-07-17T16:06:11.000Z
|
__init__.py
|
anthill-gaming/game_master
|
aa0af04b7e6f2073a1b71878bdce46ca534a107e
|
[
"MIT"
] | null | null | null |
__init__.py
|
anthill-gaming/game_master
|
aa0af04b7e6f2073a1b71878bdce46ca534a107e
|
[
"MIT"
] | null | null | null |
from anthill.framework.utils.version import get_version
VERSION = (0, 0, 1, 'alpha', 1)
version = __version__ = get_version(VERSION)
| 22.5
| 55
| 0.748148
| 19
| 135
| 5
| 0.526316
| 0.442105
| 0.357895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034188
| 0.133333
| 135
| 5
| 56
| 27
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c27fc08eb62fc9b4b62c41e90807f383c1757771
| 50
|
py
|
Python
|
flexmeasures/api/v2_0/implementations/__init__.py
|
SeitaBV/flexmeasures
|
f715012c9c35d38d3382bd88d36ef86ce9728d10
|
[
"Apache-2.0"
] | 37
|
2021-02-16T11:18:20.000Z
|
2021-11-04T22:04:56.000Z
|
flexmeasures/api/v2_0/implementations/__init__.py
|
SeitaBV/flexmeasures
|
f715012c9c35d38d3382bd88d36ef86ce9728d10
|
[
"Apache-2.0"
] | 165
|
2021-02-16T15:27:20.000Z
|
2021-12-06T14:19:20.000Z
|
flexmeasures/api/v2_0/implementations/__init__.py
|
SeitaBV/flexmeasures
|
f715012c9c35d38d3382bd88d36ef86ce9728d10
|
[
"Apache-2.0"
] | 5
|
2021-02-23T12:05:42.000Z
|
2021-11-04T13:58:40.000Z
|
from . import assets, sensors, users # noqa F401
| 25
| 49
| 0.72
| 7
| 50
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0.2
| 50
| 1
| 50
| 50
| 0.825
| 0.18
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c27fc8bafb5d92d8b7da3503abf30ba305c1ea06
| 59
|
py
|
Python
|
{{ cookiecutter.repo_name }}/app/provider/predict.py
|
ShilpaGopal/cookiecutter-ml-flask-serving
|
9d1f56d0cda248fb2834b714390df7078ad24a22
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.repo_name }}/app/provider/predict.py
|
ShilpaGopal/cookiecutter-ml-flask-serving
|
9d1f56d0cda248fb2834b714390df7078ad24a22
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.repo_name }}/app/provider/predict.py
|
ShilpaGopal/cookiecutter-ml-flask-serving
|
9d1f56d0cda248fb2834b714390df7078ad24a22
|
[
"MIT"
] | null | null | null |
def predict(model, image):
return model.predict(image)
| 29.5
| 31
| 0.728814
| 8
| 59
| 5.375
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152542
| 59
| 2
| 31
| 29.5
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
c280688cdca76ad9b264fd758d01403361a4622e
| 88
|
py
|
Python
|
models/__init__.py
|
modeconnectivity/modeconnectivity
|
4cc6558cd9c366fd41f9a853a0a18f4b0884c913
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
modeconnectivity/modeconnectivity
|
4cc6558cd9c366fd41f9a853a0a18f4b0884c913
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
modeconnectivity/modeconnectivity
|
4cc6558cd9c366fd41f9a853a0a18f4b0884c913
|
[
"MIT"
] | null | null | null |
from . import cnn
from . import fcn
from . import pretrained
from . import classifiers
| 14.666667
| 25
| 0.761364
| 12
| 88
| 5.583333
| 0.5
| 0.597015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193182
| 88
| 5
| 26
| 17.6
| 0.943662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c290cd006f6aab8232ee3149187e032c2ce1e52a
| 64
|
py
|
Python
|
pyClarion/utils/__init__.py
|
jlichter/pyClarion
|
326a9b7ac03baaaf8eba49a42954f88542c191e9
|
[
"MIT"
] | 25
|
2018-09-21T17:51:09.000Z
|
2022-03-08T12:24:35.000Z
|
pyClarion/utils/__init__.py
|
jlichter/pyClarion
|
326a9b7ac03baaaf8eba49a42954f88542c191e9
|
[
"MIT"
] | 9
|
2018-07-01T00:44:02.000Z
|
2022-02-10T10:56:30.000Z
|
pyClarion/utils/__init__.py
|
jlichter/pyClarion
|
326a9b7ac03baaaf8eba49a42954f88542c191e9
|
[
"MIT"
] | 10
|
2018-09-21T17:51:13.000Z
|
2022-03-03T07:58:37.000Z
|
"""Provides miscellaneous utilities."""
from .pprint import *
| 12.8
| 39
| 0.71875
| 6
| 64
| 7.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 64
| 4
| 40
| 16
| 0.836364
| 0.515625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
c2c2c6acbbee18630fcd7f9cc309712310a9d168
| 112
|
py
|
Python
|
src/briefcase/apps/basedoc/admin.py
|
Briefcase/Briefcase
|
34403c69c19cee1e682293a2c3c3f17c631b9246
|
[
"BSD-2-Clause"
] | 2
|
2017-10-19T15:39:31.000Z
|
2022-02-09T02:59:27.000Z
|
src/briefcase/apps/basedoc/admin.py
|
Briefcase/Briefcase
|
34403c69c19cee1e682293a2c3c3f17c631b9246
|
[
"BSD-2-Clause"
] | 2
|
2021-06-16T02:08:42.000Z
|
2021-12-06T07:43:32.000Z
|
src/briefcase/apps/basedoc/admin.py
|
Briefcase/Briefcase
|
34403c69c19cee1e682293a2c3c3f17c631b9246
|
[
"BSD-2-Clause"
] | 2
|
2016-05-25T07:28:13.000Z
|
2021-04-02T03:55:08.000Z
|
#spreadsheet.admin.py
from models import Basedoc
from django.contrib import admin
admin.site.register(Basedoc)
| 18.666667
| 32
| 0.830357
| 16
| 112
| 5.8125
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098214
| 112
| 5
| 33
| 22.4
| 0.920792
| 0.178571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c2f5900430fd8c70f2917f615afe55ae589cb8b0
| 179
|
py
|
Python
|
bin/libtf/logparsers/TFExceptions.py
|
ThreshingFloor/splunk.reaper.threshingfloor.io
|
4e5f19abd3bf9e15b7b59423018e94e533e28f43
|
[
"MIT"
] | null | null | null |
bin/libtf/logparsers/TFExceptions.py
|
ThreshingFloor/splunk.reaper.threshingfloor.io
|
4e5f19abd3bf9e15b7b59423018e94e533e28f43
|
[
"MIT"
] | null | null | null |
bin/libtf/logparsers/TFExceptions.py
|
ThreshingFloor/splunk.reaper.threshingfloor.io
|
4e5f19abd3bf9e15b7b59423018e94e533e28f43
|
[
"MIT"
] | null | null | null |
class TFException(Exception):
pass
class TFAPIUnavailable(Exception):
pass
class TFLogParsingException(Exception):
def __init__(self, type):
self.type = type
| 19.888889
| 39
| 0.72067
| 18
| 179
| 6.944444
| 0.555556
| 0.208
| 0.288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195531
| 179
| 9
| 40
| 19.888889
| 0.868056
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.285714
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
c2f99e55c7c7835a02348183b9c4e6c6a9b039f5
| 141
|
py
|
Python
|
claripy/vsa/errors.py
|
embg/claripy
|
1a5e0ca61d3f480e541226f103900e983f025e4a
|
[
"BSD-2-Clause"
] | 211
|
2015-08-06T23:25:01.000Z
|
2022-03-26T19:34:49.000Z
|
claripy/vsa/errors.py
|
embg/claripy
|
1a5e0ca61d3f480e541226f103900e983f025e4a
|
[
"BSD-2-Clause"
] | 175
|
2015-09-03T11:09:18.000Z
|
2022-03-09T20:24:33.000Z
|
claripy/vsa/errors.py
|
embg/claripy
|
1a5e0ca61d3f480e541226f103900e983f025e4a
|
[
"BSD-2-Clause"
] | 99
|
2015-08-07T10:30:08.000Z
|
2022-03-26T10:32:09.000Z
|
from ..errors import ClaripyError
class ClaripyVSAError(ClaripyError):
pass
class ClaripyVSAOperationError(ClaripyVSAError):
pass
| 15.666667
| 48
| 0.794326
| 12
| 141
| 9.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 141
| 8
| 49
| 17.625
| 0.933333
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
6c19db59eaa988e5782aa1e2fed33b2c108a8b2a
| 202
|
py
|
Python
|
pattern/text/xx/__main__.py
|
huihui7987/pattern
|
d25511f9ca7ed9356b801d8663b8b5168464e68f
|
[
"BSD-3-Clause"
] | 6,201
|
2015-01-01T17:40:43.000Z
|
2022-03-30T21:28:15.000Z
|
pattern/text/xx/__main__.py
|
WZBSocialScienceCenter/patternlite
|
99271c8f20afdc3ae3f05246c43100dc00604e3f
|
[
"BSD-3-Clause"
] | 199
|
2015-01-03T10:24:13.000Z
|
2022-03-14T12:53:34.000Z
|
pattern/text/xx/__main__.py
|
WZBSocialScienceCenter/patternlite
|
99271c8f20afdc3ae3f05246c43100dc00604e3f
|
[
"BSD-3-Clause"
] | 1,537
|
2015-01-07T06:45:24.000Z
|
2022-03-31T07:30:03.000Z
|
#### PATTERN | XX | PARSER COMMAND-LINE ############################################################
from __future__ import absolute_import
from .__init__ import parse, commandline
commandline(parse)
| 28.857143
| 100
| 0.549505
| 17
| 202
| 6
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09901
| 202
| 6
| 101
| 33.666667
| 0.56044
| 0.168317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6c73901c30e186ec399d632011e7e84b5ef927d0
| 38
|
py
|
Python
|
test.py
|
hlong0806/BullshitGenerator
|
dbcd8e5c910a28b273d7c938f485f6df440a1ec5
|
[
"MIT"
] | null | null | null |
test.py
|
hlong0806/BullshitGenerator
|
dbcd8e5c910a28b273d7c938f485f6df440a1ec5
|
[
"MIT"
] | null | null | null |
test.py
|
hlong0806/BullshitGenerator
|
dbcd8e5c910a28b273d7c938f485f6df440a1ec5
|
[
"MIT"
] | 2
|
2019-11-14T00:46:46.000Z
|
2020-06-10T02:53:23.000Z
|
#!/bin/python
#This is an empty file
| 9.5
| 22
| 0.684211
| 7
| 38
| 3.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 3
| 23
| 12.666667
| 0.83871
| 0.868421
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6696b8caf3e37d6f8ec1f50b44c56f5558e4f1db
| 138
|
py
|
Python
|
measurement/array-operations/vless3.py
|
quepas/performance-estimation-array-operations
|
b209ba5efebf5dee60ec5fca0fa711ca2e766e17
|
[
"MIT"
] | null | null | null |
measurement/array-operations/vless3.py
|
quepas/performance-estimation-array-operations
|
b209ba5efebf5dee60ec5fca0fa711ca2e766e17
|
[
"MIT"
] | null | null | null |
measurement/array-operations/vless3.py
|
quepas/performance-estimation-array-operations
|
b209ba5efebf5dee60ec5fca0fa711ca2e766e17
|
[
"MIT"
] | null | null | null |
import numpy as np
# Element-wise comparision of three vectors (less then)
def vless3(V1, V2, V3):
R = np.less(np.less(V1, V2), V3)
| 19.714286
| 55
| 0.673913
| 25
| 138
| 3.72
| 0.72
| 0.086022
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063063
| 0.195652
| 138
| 6
| 56
| 23
| 0.774775
| 0.384058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
66a002928dd93c303d08073b6c56775dbc584991
| 45
|
py
|
Python
|
samples/infinite.py
|
Leedehai/ctimer
|
cfb3c69a19d1b25e4baa2054ac96ff6f09bfb04d
|
[
"MIT"
] | null | null | null |
samples/infinite.py
|
Leedehai/ctimer
|
cfb3c69a19d1b25e4baa2054ac96ff6f09bfb04d
|
[
"MIT"
] | null | null | null |
samples/infinite.py
|
Leedehai/ctimer
|
cfb3c69a19d1b25e4baa2054ac96ff6f09bfb04d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
while True:
pass
| 7.5
| 21
| 0.622222
| 7
| 45
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.244444
| 45
| 5
| 22
| 9
| 0.823529
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
66bcd22dce12c14c20e8d22b889f0489f2128033
| 161
|
py
|
Python
|
sales_project/sales_app/admin.py
|
alineayumi/django-sales-dataset
|
0b179c918ed3360f3413277b069fef76014468cf
|
[
"MIT"
] | null | null | null |
sales_project/sales_app/admin.py
|
alineayumi/django-sales-dataset
|
0b179c918ed3360f3413277b069fef76014468cf
|
[
"MIT"
] | null | null | null |
sales_project/sales_app/admin.py
|
alineayumi/django-sales-dataset
|
0b179c918ed3360f3413277b069fef76014468cf
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from sales_app.models import Product, Sale
# Register your models here.
admin.site.register(Product)
admin.site.register(Sale)
| 23
| 42
| 0.813665
| 24
| 161
| 5.416667
| 0.583333
| 0.138462
| 0.261538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10559
| 161
| 6
| 43
| 26.833333
| 0.902778
| 0.161491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
dd1d1797de6eeb0ad17e1fc6cbaff5eb344338c1
| 9
|
py
|
Python
|
12.py
|
fzyy/test007
|
422c5b066d24780f5b77c2af69d879921e8ce6d0
|
[
"MIT"
] | null | null | null |
12.py
|
fzyy/test007
|
422c5b066d24780f5b77c2af69d879921e8ce6d0
|
[
"MIT"
] | null | null | null |
12.py
|
fzyy/test007
|
422c5b066d24780f5b77c2af69d879921e8ce6d0
|
[
"MIT"
] | null | null | null |
aa = 111
| 4.5
| 8
| 0.555556
| 2
| 9
| 2.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0.333333
| 9
| 1
| 9
| 9
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dd2aa0024a41112cd35dc28b808cf0a88682bd30
| 615
|
py
|
Python
|
tests/data/expected/main/main_json_reuse_model/output.py
|
adaamz/datamodel-code-generator
|
3b34573f35f8d420e4668a85047c757fd1da7754
|
[
"MIT"
] | 891
|
2019-07-23T04:23:32.000Z
|
2022-03-31T13:36:33.000Z
|
tests/data/expected/main/main_json_reuse_model/output.py
|
adaamz/datamodel-code-generator
|
3b34573f35f8d420e4668a85047c757fd1da7754
|
[
"MIT"
] | 663
|
2019-07-23T09:50:26.000Z
|
2022-03-29T01:56:55.000Z
|
tests/data/expected/main/main_json_reuse_model/output.py
|
adaamz/datamodel-code-generator
|
3b34573f35f8d420e4668a85047c757fd1da7754
|
[
"MIT"
] | 108
|
2019-07-23T08:50:37.000Z
|
2022-03-09T10:50:22.000Z
|
# generated by datamodel-codegen:
# filename: duplicate_models.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from pydantic import BaseModel, Field
class ArmRight(BaseModel):
Joint_1: int = Field(..., alias='Joint 1')
Joint_2: int = Field(..., alias='Joint 2')
Joint_3: int = Field(..., alias='Joint 3')
class ArmLeft(ArmRight):
pass
class Head(BaseModel):
Joint_1: int = Field(..., alias='Joint 1')
class Model(BaseModel):
Arm_Right: ArmRight = Field(..., alias='Arm Right')
Arm_Left: ArmLeft = Field(..., alias='Arm Left')
Head: Head
| 21.964286
| 55
| 0.663415
| 82
| 615
| 4.841463
| 0.426829
| 0.151134
| 0.130982
| 0.18136
| 0.171285
| 0.171285
| 0.171285
| 0.171285
| 0
| 0
| 0
| 0.051793
| 0.18374
| 615
| 27
| 56
| 22.777778
| 0.739044
| 0.170732
| 0
| 0.142857
| 1
| 0
| 0.088933
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.071429
| 0.142857
| 0
| 0.928571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
dd4bcfeabd6b6fc06c45f28738f2c4b02465dd04
| 19,928
|
py
|
Python
|
sdk/python/pulumi_google_native/cloudtasks/v2beta2/task.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/cloudtasks/v2beta2/task.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/cloudtasks/v2beta2/task.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TaskArgs', 'Task']
@pulumi.input_type
class TaskArgs:
def __init__(__self__, *,
queue_id: pulumi.Input[str],
app_engine_http_request: Optional[pulumi.Input['AppEngineHttpRequestArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
pull_message: Optional[pulumi.Input['PullMessageArgs']] = None,
response_view: Optional[pulumi.Input['TaskResponseView']] = None,
schedule_time: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Task resource.
:param pulumi.Input['AppEngineHttpRequestArgs'] app_engine_http_request: App Engine HTTP request that is sent to the task's target. Can be set only if app_engine_http_target is set on the queue. An App Engine task is a task that has AppEngineHttpRequest set.
:param pulumi.Input[str] name: Optionally caller-specified in CreateTask. The task name. The task name must have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the task's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum length is 100 characters. * `TASK_ID` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_). The maximum length is 500 characters.
:param pulumi.Input['PullMessageArgs'] pull_message: LeaseTasks to process the task. Can be set only if pull_target is set on the queue. A pull task is a task that has PullMessage set.
:param pulumi.Input['TaskResponseView'] response_view: The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource.
:param pulumi.Input[str] schedule_time: The time when the task is scheduled to be attempted. For App Engine queues, this is when the task will be attempted or retried. For pull queues, this is the time when the task is available to be leased; if a task is currently leased, this is the time when the current lease expires, that is, the time that the task was leased plus the lease_duration. `schedule_time` will be truncated to the nearest microsecond.
"""
pulumi.set(__self__, "queue_id", queue_id)
if app_engine_http_request is not None:
pulumi.set(__self__, "app_engine_http_request", app_engine_http_request)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if pull_message is not None:
pulumi.set(__self__, "pull_message", pull_message)
if response_view is not None:
pulumi.set(__self__, "response_view", response_view)
if schedule_time is not None:
pulumi.set(__self__, "schedule_time", schedule_time)
@property
@pulumi.getter(name="queueId")
def queue_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "queue_id")
@queue_id.setter
def queue_id(self, value: pulumi.Input[str]):
pulumi.set(self, "queue_id", value)
@property
@pulumi.getter(name="appEngineHttpRequest")
def app_engine_http_request(self) -> Optional[pulumi.Input['AppEngineHttpRequestArgs']]:
"""
App Engine HTTP request that is sent to the task's target. Can be set only if app_engine_http_target is set on the queue. An App Engine task is a task that has AppEngineHttpRequest set.
"""
return pulumi.get(self, "app_engine_http_request")
@app_engine_http_request.setter
def app_engine_http_request(self, value: Optional[pulumi.Input['AppEngineHttpRequestArgs']]):
pulumi.set(self, "app_engine_http_request", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Optionally caller-specified in CreateTask. The task name. The task name must have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the task's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum length is 100 characters. * `TASK_ID` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_). The maximum length is 500 characters.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="pullMessage")
def pull_message(self) -> Optional[pulumi.Input['PullMessageArgs']]:
"""
LeaseTasks to process the task. Can be set only if pull_target is set on the queue. A pull task is a task that has PullMessage set.
"""
return pulumi.get(self, "pull_message")
@pull_message.setter
def pull_message(self, value: Optional[pulumi.Input['PullMessageArgs']]):
pulumi.set(self, "pull_message", value)
@property
@pulumi.getter(name="responseView")
def response_view(self) -> Optional[pulumi.Input['TaskResponseView']]:
"""
The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource.
"""
return pulumi.get(self, "response_view")
@response_view.setter
def response_view(self, value: Optional[pulumi.Input['TaskResponseView']]):
pulumi.set(self, "response_view", value)
@property
@pulumi.getter(name="scheduleTime")
def schedule_time(self) -> Optional[pulumi.Input[str]]:
"""
The time when the task is scheduled to be attempted. For App Engine queues, this is when the task will be attempted or retried. For pull queues, this is the time when the task is available to be leased; if a task is currently leased, this is the time when the current lease expires, that is, the time that the task was leased plus the lease_duration. `schedule_time` will be truncated to the nearest microsecond.
"""
return pulumi.get(self, "schedule_time")
@schedule_time.setter
def schedule_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schedule_time", value)
class Task(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_engine_http_request: Optional[pulumi.Input[pulumi.InputType['AppEngineHttpRequestArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
pull_message: Optional[pulumi.Input[pulumi.InputType['PullMessageArgs']]] = None,
queue_id: Optional[pulumi.Input[str]] = None,
response_view: Optional[pulumi.Input['TaskResponseView']] = None,
schedule_time: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a task and adds it to a queue. Tasks cannot be updated after creation; there is no UpdateTask command. * For App Engine queues, the maximum task size is 100KB. * For pull queues, the maximum task size is 1MB.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AppEngineHttpRequestArgs']] app_engine_http_request: App Engine HTTP request that is sent to the task's target. Can be set only if app_engine_http_target is set on the queue. An App Engine task is a task that has AppEngineHttpRequest set.
:param pulumi.Input[str] name: Optionally caller-specified in CreateTask. The task name. The task name must have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the task's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum length is 100 characters. * `TASK_ID` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_). The maximum length is 500 characters.
:param pulumi.Input[pulumi.InputType['PullMessageArgs']] pull_message: LeaseTasks to process the task. Can be set only if pull_target is set on the queue. A pull task is a task that has PullMessage set.
:param pulumi.Input['TaskResponseView'] response_view: The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource.
:param pulumi.Input[str] schedule_time: The time when the task is scheduled to be attempted. For App Engine queues, this is when the task will be attempted or retried. For pull queues, this is the time when the task is available to be leased; if a task is currently leased, this is the time when the current lease expires, that is, the time that the task was leased plus the lease_duration. `schedule_time` will be truncated to the nearest microsecond.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TaskArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a task and adds it to a queue. Tasks cannot be updated after creation; there is no UpdateTask command. * For App Engine queues, the maximum task size is 100KB. * For pull queues, the maximum task size is 1MB.
:param str resource_name: The name of the resource.
:param TaskArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TaskArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_engine_http_request: Optional[pulumi.Input[pulumi.InputType['AppEngineHttpRequestArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
pull_message: Optional[pulumi.Input[pulumi.InputType['PullMessageArgs']]] = None,
queue_id: Optional[pulumi.Input[str]] = None,
response_view: Optional[pulumi.Input['TaskResponseView']] = None,
schedule_time: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TaskArgs.__new__(TaskArgs)
__props__.__dict__["app_engine_http_request"] = app_engine_http_request
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["pull_message"] = pull_message
if queue_id is None and not opts.urn:
raise TypeError("Missing required property 'queue_id'")
__props__.__dict__["queue_id"] = queue_id
__props__.__dict__["response_view"] = response_view
__props__.__dict__["schedule_time"] = schedule_time
__props__.__dict__["create_time"] = None
__props__.__dict__["status"] = None
__props__.__dict__["view"] = None
super(Task, __self__).__init__(
'google-native:cloudtasks/v2beta2:Task',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Task':
"""
Get an existing Task resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TaskArgs.__new__(TaskArgs)
__props__.__dict__["app_engine_http_request"] = None
__props__.__dict__["create_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["pull_message"] = None
__props__.__dict__["schedule_time"] = None
__props__.__dict__["status"] = None
__props__.__dict__["view"] = None
return Task(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appEngineHttpRequest")
def app_engine_http_request(self) -> pulumi.Output['outputs.AppEngineHttpRequestResponse']:
"""
App Engine HTTP request that is sent to the task's target. Can be set only if app_engine_http_target is set on the queue. An App Engine task is a task that has AppEngineHttpRequest set.
"""
return pulumi.get(self, "app_engine_http_request")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
The time that the task was created. `create_time` will be truncated to the nearest second.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Optionally caller-specified in CreateTask. The task name. The task name must have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the task's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum length is 100 characters. * `TASK_ID` can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_). The maximum length is 500 characters.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pullMessage")
def pull_message(self) -> pulumi.Output['outputs.PullMessageResponse']:
"""
LeaseTasks to process the task. Can be set only if pull_target is set on the queue. A pull task is a task that has PullMessage set.
"""
return pulumi.get(self, "pull_message")
@property
@pulumi.getter(name="scheduleTime")
def schedule_time(self) -> pulumi.Output[str]:
"""
The time when the task is scheduled to be attempted. For App Engine queues, this is when the task will be attempted or retried. For pull queues, this is the time when the task is available to be leased; if a task is currently leased, this is the time when the current lease expires, that is, the time that the task was leased plus the lease_duration. `schedule_time` will be truncated to the nearest microsecond.
"""
return pulumi.get(self, "schedule_time")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.TaskStatusResponse']:
"""
The task status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def view(self) -> pulumi.Output[str]:
"""
The view specifies which subset of the Task has been returned.
"""
return pulumi.get(self, "view")
| 64.701299
| 923
| 0.687726
| 2,648
| 19,928
| 4.984894
| 0.098943
| 0.044167
| 0.053258
| 0.033333
| 0.809394
| 0.752576
| 0.729015
| 0.699318
| 0.686591
| 0.645227
| 0
| 0.003768
| 0.214322
| 19,928
| 307
| 924
| 64.912052
| 0.839305
| 0.493125
| 0
| 0.38
| 1
| 0
| 0.129895
| 0.040096
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145
| false
| 0.005
| 0.04
| 0.015
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dd8b15b8050ab5e27501efa685f93310c7fa298e
| 314
|
py
|
Python
|
generated-libraries/python/netapp/autosupport/asup_destination.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/autosupport/asup_destination.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/autosupport/asup_destination.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
class AsupDestination(basestring):
"""
smtp|http|noteto|retransmit
Possible values:
<ul>
<li> "smtp" ,
<li> "http" ,
<li> "noteto" ,
<li> "retransmit"
</ul>
"""
@staticmethod
def get_api_name():
return "asup-destination"
| 18.470588
| 35
| 0.484076
| 27
| 314
| 5.555556
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375796
| 314
| 16
| 36
| 19.625
| 0.765306
| 0.452229
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
dd9591c8346506530ff8a8f5430298c51cfa1b12
| 597
|
py
|
Python
|
quik/namespaces/quotes.py
|
nusov/QuikPython
|
d992b9d5aaf68cdda3031a08705221fe461a7780
|
[
"MIT"
] | null | null | null |
quik/namespaces/quotes.py
|
nusov/QuikPython
|
d992b9d5aaf68cdda3031a08705221fe461a7780
|
[
"MIT"
] | null | null | null |
quik/namespaces/quotes.py
|
nusov/QuikPython
|
d992b9d5aaf68cdda3031a08705221fe461a7780
|
[
"MIT"
] | null | null | null |
from quik.base import QuikNamespace
class QuikQuotesNamespace(QuikNamespace):
def subscribe(self, class_code, sec_code):
return self.quik.invoke("Subscribe_Level_II_Quotes", class_code, sec_code)
def unsubscribe(self, class_code, sec_code):
return self.quik.invoke("Unsubscribe_Level_II_Quotes", class_code, sec_code)
def is_subscribed(self, class_code, sec_code):
return self.quik.invoke("IsSubscribed_Level_II_Quotes", class_code, sec_code)
def pull(self, class_code, sec_code):
return self.quik.invoke("getQuoteLevel2", class_code, sec_code)
| 39.8
| 85
| 0.755444
| 82
| 597
| 5.182927
| 0.280488
| 0.169412
| 0.225882
| 0.301176
| 0.602353
| 0.602353
| 0.602353
| 0.602353
| 0.376471
| 0
| 0
| 0.001972
| 0.150754
| 597
| 15
| 86
| 39.8
| 0.836292
| 0
| 0
| 0
| 0
| 0
| 0.157191
| 0.133779
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.1
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
06bfb9a5c4c503d7ca8e8c62a2ecc4341118f795
| 481
|
py
|
Python
|
OpenGLCffi/GL/EXT/ARB/texture_storage_multisample.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GL/EXT/ARB/texture_storage_multisample.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GL/EXT/ARB/texture_storage_multisample.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
from OpenGLCffi.GL import params
@params(api='gl', prms=['target', 'samples', 'internalformat', 'width', 'height', 'fixedsamplelocations'])
def glTexStorage2DMultisample(target, samples, internalformat, width, height, fixedsamplelocations):
pass
@params(api='gl', prms=['target', 'samples', 'internalformat', 'width', 'height', 'depth', 'fixedsamplelocations'])
def glTexStorage3DMultisample(target, samples, internalformat, width, height, depth, fixedsamplelocations):
pass
| 40.083333
| 115
| 0.756757
| 45
| 481
| 8.088889
| 0.4
| 0.142857
| 0.296703
| 0.351648
| 0.747253
| 0.747253
| 0.532967
| 0.291209
| 0.291209
| 0
| 0
| 0.004556
| 0.087318
| 481
| 11
| 116
| 43.727273
| 0.824601
| 0
| 0
| 0.285714
| 0
| 0
| 0.26096
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.285714
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b075f9fe98d07c99e321b7906fe37c77f51fe6d7
| 377
|
py
|
Python
|
katas/beta/only_readable_once_list.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/beta/only_readable_once_list.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/beta/only_readable_once_list.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
class SecureList(object):
def __init__(self, lst):
self.lst = list(lst)
def __getitem__(self, item):
return self.lst.pop(item)
def __len__(self):
return len(self.lst)
def __repr__(self):
tmp, self.lst = self.lst, []
return repr(tmp)
def __str__(self):
tmp, self.lst = self.lst, []
return str(tmp)
| 20.944444
| 36
| 0.564987
| 49
| 377
| 3.938776
| 0.326531
| 0.290155
| 0.170984
| 0.217617
| 0.279793
| 0.279793
| 0.279793
| 0
| 0
| 0
| 0
| 0
| 0.30504
| 377
| 17
| 37
| 22.176471
| 0.736641
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.384615
| false
| 0
| 0
| 0.153846
| 0.769231
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b07ec2c2c1e92149b6ee36e34da990cdf795c36a
| 146
|
py
|
Python
|
myCompany/rules/admin.py
|
Rom4eg/myCompany
|
31846a861d8b0560191e2e1d9791f101b88874df
|
[
"MIT"
] | null | null | null |
myCompany/rules/admin.py
|
Rom4eg/myCompany
|
31846a861d8b0560191e2e1d9791f101b88874df
|
[
"MIT"
] | null | null | null |
myCompany/rules/admin.py
|
Rom4eg/myCompany
|
31846a861d8b0560191e2e1d9791f101b88874df
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from rules.models import Rule
class RuleAdmin(admin.ModelAdmin):
pass
admin.site.register(Rule, RuleAdmin)
| 18.25
| 36
| 0.794521
| 20
| 146
| 5.8
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130137
| 146
| 7
| 37
| 20.857143
| 0.913386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
b092d322a246878477f87ff47ad050a2a68a4c5b
| 54
|
py
|
Python
|
Python/Tests/TestData/AddImport/ImportFunctionFromExistingFromImportParensTrailingComma.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 404
|
2019-05-07T02:21:57.000Z
|
2022-03-31T17:03:04.000Z
|
Python/Tests/TestData/AddImport/ImportFunctionFromExistingFromImportParensTrailingComma.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/TestData/AddImport/ImportFunctionFromExistingFromImportParensTrailingComma.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
from test_module import (module_func_2,)
module_func()
| 27
| 40
| 0.833333
| 9
| 54
| 4.555556
| 0.666667
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.074074
| 54
| 2
| 41
| 27
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b0e2f000fc745c779507ce9414d52e57d1c38e00
| 5,359
|
py
|
Python
|
tests/test_note.py
|
samuelrey/Note-Picker
|
fc71fcaad8c1562288811f9b8dab4c084632aae0
|
[
"Unlicense"
] | 1
|
2017-10-12T18:09:16.000Z
|
2017-10-12T18:09:16.000Z
|
tests/test_note.py
|
samuelrey/Note-Picker
|
fc71fcaad8c1562288811f9b8dab4c084632aae0
|
[
"Unlicense"
] | null | null | null |
tests/test_note.py
|
samuelrey/Note-Picker
|
fc71fcaad8c1562288811f9b8dab4c084632aae0
|
[
"Unlicense"
] | 1
|
2015-02-18T23:19:49.000Z
|
2015-02-18T23:19:49.000Z
|
# Filename: getFrequency.py
#
# Summary: unit tests getFrequency method.
#
# Author: Samuel Villavicencio
#
# Last Updated: Oct 09 2015
import traceback
import unittest
import _write
import note
class GetFrequencyTest(unittest.TestCase):
def testEmptySignal(self):
n = note.Note()
_write.note(1, 2, 44100, 0, 440)
try:
signal, sample_rate = _write.read()
n.getFrequency(signal, sample_rate)
except:
_write.clean()
pass
else:
_write.clean()
self.fail()
def testMaxFrequency(self):
# values between sample_rate * n and sample_rate * (n + 1) are mistakenly valid.
n = note.Note()
_write.note(1, 2, 44100, 1, 22049)
try:
signal, sample_rate = _write.read()
n.setTotalLength(signal)
n.setStart(0)
n.setLength(len(signal))
self.assertAlmostEqual(22049, n.getFrequency(signal, sample_rate), 1)
except:
_write.clean()
self.fail(traceback.format_exc())
else:
_write.clean()
pass
def testMinFrequency(self):
n = note.Note()
_write.note(1, 2, 44100, 1, 1)
try:
signal, sample_rate = _write.read()
n.setTotalLength(signal)
n.setStart(0)
n.setLength(len(signal))
self.assertAlmostEqual(1, n.getFrequency(signal, sample_rate), 1)
except:
_write.clean()
self.fail(traceback.format_exc())
else:
_write.clean()
pass
def testPositiveUniformSignal(self):
n = note.Note()
_write.uniform(1, 1)
try:
signal, sample_rate = _write.read()
n.setTotalLength(signal)
n.setStart(0)
n.setLength(len(signal))
n.getFrequency(signal, sample_rate)
except:
_write.clean()
self.fail(traceback.format_exc())
else:
_write.clean()
pass
def testZeroUniformSignal(self):
n = note.Note()
_write.uniform(0, 1)
try:
signal, sample_rate = _write.read()
n.setTotalLength(signal)
n.setStart(0)
n.setLength(len(signal))
n.getFrequency(signal, sample_rate)
except:
_write.clean()
self.fail(traceback.format_exc())
else:
_write.clean()
pass
def testNegativeUniformSignal(self):
n = note.Note()
_write.uniform(-1, 1)
try:
signal, sample_rate = _write.read()
n.setTotalLength(signal)
n.setStart(0)
n.setLength(len(signal))
n.getFrequency(signal, sample_rate)
except:
_write.clean()
self.fail(traceback.format_exc())
else:
_write.clean()
pass
def testRandomSignal(self):
n = note.Note()
_write.rand(1)
try:
signal, sample_rate = _write.read()
n.setTotalLength(signal)
n.setStart(0)
n.setLength(len(signal))
n.getFrequency(signal, sample_rate)
except:
_write.clean()
self.fail(traceback.format_exc())
else:
_write.clean()
pass
def testAccuracy(self):
try:
for frequency in range(40, 22040, 2000):
n = note.Note()
_write.note(1, 2, 44100, 1, frequency)
signal, sample_rate = _write.read()
n.setTotalLength(signal)
n.setStart(0)
n.setLength(len(signal))
calculated = n.getFrequency(signal, sample_rate)
_write.clean()
self.assertAlmostEqual(frequency, calculated, 0)
except:
self.fail(traceback.format_exc())
else:
pass
class GetNotationTest(unittest.TestCase):
def testNoMatchingFrequency(self):
for frequency in [7680.5]: # find more frequencies
n = note.Note()
_write.note(1, 2, 44100, 1, frequency)
try:
signal, sample_rate = _write.read()
n.setTotalLength(signal)
n.setStart(0)
n.setLength(len(signal))
frequency = n.getFrequency(signal, sample_rate)
n.getNotation(frequency)
except:
_write.clean()
pass
else:
_write.clean()
self.fail(traceback.format_exc())
def testNoMatchingOctave(self):
for frequency in [8000.0]:
n = note.Note()
_write.note(1, 2, 44100, 1, frequency)
try:
signal, sample_rate = _write.read()
n.setTotalLength(signal)
n.setStart(0)
n.setLength(len(signal))
frequency = n.getFrequency(signal, sample_rate)
n.getNotation(frequency)
except:
_write.clean()
pass
else:
_write.clean()
self.fail(traceback.format_exc())
if __name__ == '__main__':
unittest.main()
| 28.354497
| 88
| 0.515208
| 524
| 5,359
| 5.118321
| 0.164122
| 0.082028
| 0.119314
| 0.08613
| 0.738628
| 0.721104
| 0.700597
| 0.690157
| 0.679344
| 0.632364
| 0
| 0.033374
| 0.38496
| 5,359
| 188
| 89
| 28.505319
| 0.78034
| 0.041426
| 0
| 0.79375
| 0
| 0
| 0.00156
| 0
| 0
| 0
| 0
| 0
| 0.01875
| 1
| 0.0625
| false
| 0.0625
| 0.025
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
9fd8c7e05fe58e788eeb0c6d398b5d43cda29688
| 833
|
py
|
Python
|
twitter/lists.py
|
kwnktks0515/Twitter_with_Python
|
80dff5e0f0080a7e5b64dfa134f2e33aba0ed5f8
|
[
"MIT"
] | null | null | null |
twitter/lists.py
|
kwnktks0515/Twitter_with_Python
|
80dff5e0f0080a7e5b64dfa134f2e33aba0ed5f8
|
[
"MIT"
] | null | null | null |
twitter/lists.py
|
kwnktks0515/Twitter_with_Python
|
80dff5e0f0080a7e5b64dfa134f2e33aba0ed5f8
|
[
"MIT"
] | null | null | null |
"""lists"""
#import json
class Lists:
"""lists"""
def __init__(self, twitter):
self.twitter = twitter
def list(self, params):
"""Hello"""
pass
def members(self, params):
"""Hello"""
pass
def memberships(self, params):
"""Hello"""
pass
def ownerships(self, params):
"""Hello"""
pass
def show(self, params):
"""Hello"""
pass
def statuses(self, params):
"""Hello"""
pass
def subscribers(self, params):
"""Hello"""
pass
def subscriptions(self, params):
"""Hello"""
pass
def create(self, params):
"""Hello"""
pass
def destory(self, params):
"""Hello"""
pass
def update(self, params):
"""Hello"""
pass
| 20.317073
| 36
| 0.478992
| 79
| 833
| 5
| 0.278481
| 0.278481
| 0.417722
| 0.529114
| 0.556962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.364946
| 833
| 40
| 37
| 20.825
| 0.746692
| 0.106843
| 0
| 0.44
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.48
| false
| 0.44
| 0
| 0
| 0.52
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
9fdc95daa3705491004c662e2fbecb8bdf8f0b1e
| 821
|
py
|
Python
|
tests/fstrips/test_walker.py
|
phoeft670/tarski
|
7d955e535fbbca012bfd1a12402b97febc6b35b9
|
[
"Apache-2.0"
] | 29
|
2018-11-26T20:31:04.000Z
|
2021-12-29T11:08:40.000Z
|
tests/fstrips/test_walker.py
|
phoeft670/tarski
|
7d955e535fbbca012bfd1a12402b97febc6b35b9
|
[
"Apache-2.0"
] | 101
|
2018-06-07T13:10:01.000Z
|
2022-03-11T11:54:00.000Z
|
tests/fstrips/test_walker.py
|
phoeft670/tarski
|
7d955e535fbbca012bfd1a12402b97febc6b35b9
|
[
"Apache-2.0"
] | 18
|
2018-11-01T22:44:39.000Z
|
2022-02-28T04:57:15.000Z
|
from tarski.benchmarks.blocksworld import generate_fstrips_blocksworld_problem
# def test_fstrips_problem_walker():
# problem = generate_fstrips_blocksworld_problem(
# nblocks=2,
# init=[('b1', 'b2'), ('b2', 'table')],
# goal=[('b2', 'table'), ('b1', 'table')]
# )
# lang = problem.language
# b1, b2, clear, loc, table = lang.get('b1', 'b2', 'clear', 'loc', 'table')
#
# walker = NestedExpressionWalker(problem)
#
# node = walker.visit_expression((loc(b1) == table))
# assert str(node) == '=(loc(b1),table)' and not walker.nested_symbols # Nothing is changed
#
# node = walker.visit_expression(land(clear(b1) & clear(loc(b1)) & clear(loc(b2)), flat=True))
# assert str(node) == '((clear(b1) and =(_clear_fun(loc(b1)),True)) and =(_clear_fun(loc(b2)),True))'
| 41.05
| 105
| 0.62363
| 102
| 821
| 4.862745
| 0.392157
| 0.064516
| 0.104839
| 0.133065
| 0.068548
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026706
| 0.17905
| 821
| 19
| 106
| 43.210526
| 0.709199
| 0.859927
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9ff5a5ee62d2ada8f035ed015df587c46f476bb6
| 302
|
py
|
Python
|
codes/Ex074.py
|
BelfortJoao/Curso-phyton01
|
79376233be228f39bf548f90b8d9bd5419ac067a
|
[
"MIT"
] | 3
|
2021-08-17T14:02:14.000Z
|
2021-08-19T02:37:30.000Z
|
codes/Ex074.py
|
BelfortJoao/Curso-phyton01
|
79376233be228f39bf548f90b8d9bd5419ac067a
|
[
"MIT"
] | null | null | null |
codes/Ex074.py
|
BelfortJoao/Curso-phyton01
|
79376233be228f39bf548f90b8d9bd5419ac067a
|
[
"MIT"
] | null | null | null |
from random import randint
x = (randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10))
print('sorteei os numeros:', end='')
for n in x:
print(f'{n}', end=", ")
print(f"\nO maior valor na ordem foi {max(x)}")
print(f"\nO menor valor na ordem foi {min(x)}")
| 37.75
| 100
| 0.625828
| 56
| 302
| 3.375
| 0.446429
| 0.253968
| 0.31746
| 0.449735
| 0.31746
| 0.31746
| 0.31746
| 0.31746
| 0.31746
| 0.31746
| 0
| 0.070866
| 0.15894
| 302
| 7
| 101
| 43.142857
| 0.673228
| 0
| 0
| 0
| 0
| 0
| 0.324503
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b0165e61ac75f9dc5b83642b5609b9a9b7f6b8be
| 1,085
|
py
|
Python
|
tests/test_query.py
|
vision-consensus/vision-python-sdk
|
663eefe6c47cc024738b59aaf38f11d25094f21e
|
[
"MIT"
] | 3
|
2021-04-28T09:12:18.000Z
|
2021-06-26T14:40:55.000Z
|
tests/test_query.py
|
vision-consensus/vision-python-sdk
|
663eefe6c47cc024738b59aaf38f11d25094f21e
|
[
"MIT"
] | null | null | null |
tests/test_query.py
|
vision-consensus/vision-python-sdk
|
663eefe6c47cc024738b59aaf38f11d25094f21e
|
[
"MIT"
] | 2
|
2021-06-26T12:03:29.000Z
|
2021-11-05T10:20:47.000Z
|
from visionpy import Vision, AsyncVision
import pytest
def test_query_account():
client = Vision(network='vtest')
# There are many VRC10 token named `tt`
with pytest.raises(Exception):
btt = client.get_asset_from_name("tt")
print(btt)
bals = client.get_account_asset_balances("VDGXn73Qgf6V1aGbm8eigoHyPJRJpALN9F")
print(bals)
assert len(bals) > 0
bal = client.get_account_asset_balance("VDGXn73Qgf6V1aGbm8eigoHyPJRJpALN9F", 1000007)
print(bal)
assert bal > 0
@pytest.mark.asyncio
async def test_async_query_account():
async with AsyncVision(network='vtest') as client:
# There are many VRC10 token named `tt`
with pytest.raises(Exception):
btt = await client.get_asset_from_name("tt")
print(btt)
bals = await client.get_account_asset_balances("VDGXn73Qgf6V1aGbm8eigoHyPJRJpALN9F")
print(bals)
assert len(bals) > 0
bal = await client.get_account_asset_balance("VDGXn73Qgf6V1aGbm8eigoHyPJRJpALN9F", 1000007)
print(bal)
assert bal > 0
| 29.324324
| 99
| 0.693088
| 126
| 1,085
| 5.785714
| 0.333333
| 0.074074
| 0.087792
| 0.115226
| 0.751715
| 0.737997
| 0.737997
| 0.737997
| 0.737997
| 0.639232
| 0
| 0.054374
| 0.220277
| 1,085
| 36
| 100
| 30.138889
| 0.807329
| 0.069124
| 0
| 0.48
| 0
| 0
| 0.148957
| 0.135055
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.12
| 0.24
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b01f06294bec79256719a213133705404299955c
| 163
|
py
|
Python
|
netqasm/examples/apps/multiple_files/shared/myfuncs.py
|
Doomsk/netqasm
|
5d6c6ad00c4e0f9ab0ec05518cfa827675f357e7
|
[
"MIT"
] | 6
|
2021-11-10T15:03:59.000Z
|
2022-02-16T19:35:01.000Z
|
netqasm/examples/apps/multiple_files/shared/myfuncs.py
|
Doomsk/netqasm
|
5d6c6ad00c4e0f9ab0ec05518cfa827675f357e7
|
[
"MIT"
] | 13
|
2021-11-26T09:19:46.000Z
|
2022-03-29T09:21:42.000Z
|
netqasm/examples/apps/multiple_files/shared/myfuncs.py
|
Doomsk/netqasm
|
5d6c6ad00c4e0f9ab0ec05518cfa827675f357e7
|
[
"MIT"
] | 4
|
2021-11-19T15:46:17.000Z
|
2022-01-23T18:59:15.000Z
|
def custom_send(socket):
socket.send("message from mod.myfunc()")
def custom_recv(socket):
socket.recv()
def custom_measure(q):
return q.measure()
| 14.818182
| 44
| 0.687117
| 23
| 163
| 4.73913
| 0.521739
| 0.247706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171779
| 163
| 10
| 45
| 16.3
| 0.807407
| 0
| 0
| 0
| 0
| 0
| 0.153374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b042b8e6234f330e5708ecd8c7f83cb0493efcb4
| 34
|
py
|
Python
|
string_between/__init__.py
|
sfinktah/string_between
|
dd6b3767050a68f9ccafd8f81eba79ecb4d0b050
|
[
"MIT"
] | null | null | null |
string_between/__init__.py
|
sfinktah/string_between
|
dd6b3767050a68f9ccafd8f81eba79ecb4d0b050
|
[
"MIT"
] | null | null | null |
string_between/__init__.py
|
sfinktah/string_between
|
dd6b3767050a68f9ccafd8f81eba79ecb4d0b050
|
[
"MIT"
] | null | null | null |
from .sf_string_between import *
| 11.333333
| 32
| 0.794118
| 5
| 34
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 2
| 33
| 17
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c65e50f5c05e507ac10817affaf1d4f87ca430fc
| 285
|
py
|
Python
|
5. WEB/app/external_sources/places/services/places_service.py
|
doyaguillo1997/Data2Gether
|
125e3e54060b342a473480f8cb1a913fc54f55ed
|
[
"MIT"
] | 1
|
2021-10-03T10:19:14.000Z
|
2021-10-03T10:19:14.000Z
|
5. WEB/app/external_sources/places/services/places_service.py
|
doyaguillo1997/Data2Gether
|
125e3e54060b342a473480f8cb1a913fc54f55ed
|
[
"MIT"
] | null | null | null |
5. WEB/app/external_sources/places/services/places_service.py
|
doyaguillo1997/Data2Gether
|
125e3e54060b342a473480f8cb1a913fc54f55ed
|
[
"MIT"
] | null | null | null |
from django.contrib.gis.geos import Point
from django.contrib.gis.measure import D
from app.external_sources.places.models import GoogleElement
def get_places_in_circle(center: Point, radius: int):
return GoogleElement.objects.filter(coord__distance_lte=(center, D(km=radius)))
| 31.666667
| 83
| 0.814035
| 42
| 285
| 5.357143
| 0.690476
| 0.088889
| 0.151111
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094737
| 285
| 8
| 84
| 35.625
| 0.872093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
c6707e6936067b79264eef38b30a0955941543d1
| 238
|
py
|
Python
|
kevin/machine_learning/dataset/face/verification/__init__.py
|
cantbeblank96/kevin_toolbox
|
a258b2a42c9b4d042decb193354ecb7419bd837c
|
[
"MIT"
] | null | null | null |
kevin/machine_learning/dataset/face/verification/__init__.py
|
cantbeblank96/kevin_toolbox
|
a258b2a42c9b4d042decb193354ecb7419bd837c
|
[
"MIT"
] | null | null | null |
kevin/machine_learning/dataset/face/verification/__init__.py
|
cantbeblank96/kevin_toolbox
|
a258b2a42c9b4d042decb193354ecb7419bd837c
|
[
"MIT"
] | null | null | null |
#
from .factory import Face_Verification_DataSet_Factory as Factory
#
from .get_executor_ls import by_block
from .get_executor_ls import by_samples
#
from .build_generator import build_generator
from .build_iterator import build_iterator
| 26.444444
| 65
| 0.861345
| 35
| 238
| 5.485714
| 0.457143
| 0.072917
| 0.15625
| 0.177083
| 0.260417
| 0.260417
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105042
| 238
| 8
| 66
| 29.75
| 0.901408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c691a0b22e205f9a91f24b16c166c95bff718863
| 199
|
py
|
Python
|
relu.py
|
hegman12/Deep-learning-in-numpy
|
1dff1793728434672f1843a3582596cbe857b03c
|
[
"Apache-2.0"
] | null | null | null |
relu.py
|
hegman12/Deep-learning-in-numpy
|
1dff1793728434672f1843a3582596cbe857b03c
|
[
"Apache-2.0"
] | null | null | null |
relu.py
|
hegman12/Deep-learning-in-numpy
|
1dff1793728434672f1843a3582596cbe857b03c
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
def relu_forword(activations):
return np.maximum(0,activations)
def relu_backword(dout,cache):
return_value=dout
return_value[cache<=0]=0
return return_value
| 19.9
| 36
| 0.733668
| 29
| 199
| 4.862069
| 0.517241
| 0.234043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018405
| 0.180905
| 199
| 10
| 37
| 19.9
| 0.846626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.142857
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
05a245c63a10851b984054090d721a30aeea4cb9
| 292
|
py
|
Python
|
src/modules/polynomial/__init__.py
|
ychnlgy/TIMIT-diarization
|
1fbf410cbb643de60201d2d351f1654273885674
|
[
"MIT"
] | 1
|
2021-08-19T14:28:45.000Z
|
2021-08-19T14:28:45.000Z
|
src/modules/polynomial/__init__.py
|
ychnlgy/TIMIT-diarization
|
1fbf410cbb643de60201d2d351f1654273885674
|
[
"MIT"
] | null | null | null |
src/modules/polynomial/__init__.py
|
ychnlgy/TIMIT-diarization
|
1fbf410cbb643de60201d2d351f1654273885674
|
[
"MIT"
] | 1
|
2022-03-11T07:20:06.000Z
|
2022-03-11T07:20:06.000Z
|
from . import chebyshev
from .LagrangeBasis import LagrangeBasis
from .Activation import Activation
from .ChebyshevActivation import ChebyshevActivation
from .RegActivation import RegActivation
from .ActivationVisualizer import ActivationVisualizer
from .LinkActivation import LinkActivation
| 36.5
| 54
| 0.880137
| 27
| 292
| 9.518519
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09589
| 292
| 7
| 55
| 41.714286
| 0.973485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
05ca79a686f0d81e06a0bce6b2a5a798f70618ac
| 48
|
py
|
Python
|
pythonExample/ex-27.py
|
jeffierw/learnPython
|
5e8cab47bbbd4451252c9cd22c1b864b19e42228
|
[
"MIT"
] | null | null | null |
pythonExample/ex-27.py
|
jeffierw/learnPython
|
5e8cab47bbbd4451252c9cd22c1b864b19e42228
|
[
"MIT"
] | null | null | null |
pythonExample/ex-27.py
|
jeffierw/learnPython
|
5e8cab47bbbd4451252c9cd22c1b864b19e42228
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# ex-27: 记住逻辑
# 之前学习的C学过了感觉挺简单的
| 12
| 17
| 0.6875
| 7
| 48
| 4.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0.166667
| 48
| 4
| 17
| 12
| 0.75
| 0.854167
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
af09a5ac5a60e5ce75d760abae41aaa38e1be10c
| 114
|
py
|
Python
|
services/yelp/__init__.py
|
Ovakefali13/buerro
|
1476f6e708f95a09a2d73f67ae8aa2cb3bb836af
|
[
"MIT"
] | 2
|
2020-03-26T19:20:31.000Z
|
2020-03-30T13:09:07.000Z
|
services/yelp/__init__.py
|
Ovakefali13/buerro
|
1476f6e708f95a09a2d73f67ae8aa2cb3bb836af
|
[
"MIT"
] | 51
|
2020-03-05T09:04:21.000Z
|
2021-12-13T20:34:22.000Z
|
services/yelp/__init__.py
|
Ovakefali13/buerro
|
1476f6e708f95a09a2d73f67ae8aa2cb3bb836af
|
[
"MIT"
] | null | null | null |
from .yelp_service import YelpService, YelpServiceRemote, YelpServiceModule
from .yelp_request import YelpRequest
| 38
| 75
| 0.877193
| 12
| 114
| 8.166667
| 0.75
| 0.163265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087719
| 114
| 2
| 76
| 57
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
af0e9d9164e56dd34b1cb72be2979903eb9db903
| 59
|
py
|
Python
|
questions/question#6.py
|
seunghk1206/1-Manhattan-FullStack-Development
|
315696197d67b5ed46df50b87f24f55a7209a6b0
|
[
"MIT"
] | 1
|
2020-04-16T15:40:29.000Z
|
2020-04-16T15:40:29.000Z
|
questions/question#6.py
|
seunghk1206/Python-basic
|
315696197d67b5ed46df50b87f24f55a7209a6b0
|
[
"MIT"
] | null | null | null |
questions/question#6.py
|
seunghk1206/Python-basic
|
315696197d67b5ed46df50b87f24f55a7209a6b0
|
[
"MIT"
] | null | null | null |
N = int(input())
if 0 < N <= 10000: print(1000*(N-1) + 666)
| 29.5
| 42
| 0.542373
| 12
| 59
| 2.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 0.186441
| 59
| 2
| 42
| 29.5
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.