hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
8a58cb34b8c9ce5b83974e59a07a04d7bc934385
711
py
Python
utils/gpu_utils.py
veritas9872/Knowledge-Distillation-Task
d260b1057c96cfc52af8ff7a0775befbd102f59d
[ "MIT" ]
2
2020-02-16T13:30:27.000Z
2021-01-18T14:18:26.000Z
utils/gpu_utils.py
veritas9872/Knowledge-Distillation-Task
d260b1057c96cfc52af8ff7a0775befbd102f59d
[ "MIT" ]
null
null
null
utils/gpu_utils.py
veritas9872/Knowledge-Distillation-Task
d260b1057c96cfc52af8ff7a0775befbd102f59d
[ "MIT" ]
null
null
null
import torch from torch import nn def get_gpu_if_available(gpu: int = None): # Device agnostic setting. return torch.device(f'cuda:{gpu}') if torch.cuda.is_available() and (gpu is not None) else torch.device('cpu') def get_single_model_device(model: nn.Module) -> torch.device: """Function for retrieving device of a model, assuming that it is on a single device. If the model is on multiple devices, this function will return the first device. There will be a silent error. This should be fixed if possible. Args: model: The model, assumed to be on a single device. Returns: The device that the model is in. """ return next(model.parameters()).device
30.913043
114
0.703235
112
711
4.401786
0.482143
0.066937
0.036511
0.060852
0
0
0
0
0
0
0
0
0.21519
711
22
115
32.318182
0.883513
0.511955
0
0
0
0
0.041667
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.166667
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
4
8a619c88d08938613878f8018030baf1ebd6aa78
122
py
Python
src/s3labeler/__main__.py
karlrink/s3labeler
60f8ea19fc5895bd2e5e7f8a8eef231888462f89
[ "MIT" ]
null
null
null
src/s3labeler/__main__.py
karlrink/s3labeler
60f8ea19fc5895bd2e5e7f8a8eef231888462f89
[ "MIT" ]
null
null
null
src/s3labeler/__main__.py
karlrink/s3labeler
60f8ea19fc5895bd2e5e7f8a8eef231888462f89
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """s3labeler.__main__: execute when directory is called.""" from .s3labeler import main main()
15.25
59
0.672131
15
122
5.2
0.8
0
0
0
0
0
0
0
0
0
0
0.029126
0.155738
122
7
60
17.428571
0.728155
0.622951
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
8a78f5f2b5195b5b42a9594ba0949fd7ca0bd378
51
py
Python
genre_recognizer.py
salubinseid/am-genre-reco
b7036c7dd9148ad4bdd03e7d3edaa1ac7d8b7a02
[ "MIT" ]
null
null
null
genre_recognizer.py
salubinseid/am-genre-reco
b7036c7dd9148ad4bdd03e7d3edaa1ac7d8b7a02
[ "MIT" ]
null
null
null
genre_recognizer.py
salubinseid/am-genre-reco
b7036c7dd9148ad4bdd03e7d3edaa1ac7d8b7a02
[ "MIT" ]
null
null
null
# A code which load a model and made a prediction
25.5
50
0.745098
10
51
3.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.235294
51
1
51
51
0.974359
0.921569
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
8a84db0b3c402e33cdf9c7aa1666f6df4ceb8668
376
py
Python
home/admin.py
DeepanshuPratik/Docon
ad4236e66cf19b668ac69ee3d43711e64a729867
[ "MIT" ]
1
2021-06-22T18:00:05.000Z
2021-06-22T18:00:05.000Z
home/admin.py
DeepanshuPratik/Docon
ad4236e66cf19b668ac69ee3d43711e64a729867
[ "MIT" ]
null
null
null
home/admin.py
DeepanshuPratik/Docon
ad4236e66cf19b668ac69ee3d43711e64a729867
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import UserDetails from .models import Contact from .models import Book from .models import Diagnostic from .models import Report # Registered all the models here in the database admin.site.register(UserDetails) admin.site.register(Contact) admin.site.register(Book) admin.site.register(Diagnostic) admin.site.register(Report)
25.066667
49
0.819149
53
376
5.811321
0.358491
0.162338
0.25974
0
0
0
0
0
0
0
0
0
0.109043
376
15
50
25.066667
0.919403
0.12234
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.545455
0
0.545455
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
8a8f3ecf67bc94a52bb65d2b3bc558883bbd3474
203
py
Python
src/vmcontroller.host/vmcontroller/host/services/__init__.py
dgquintas/vmcontroller.unstable
131c0af19c5923ef57c74006246dc41c65f24120
[ "BSD-3-Clause" ]
null
null
null
src/vmcontroller.host/vmcontroller/host/services/__init__.py
dgquintas/vmcontroller.unstable
131c0af19c5923ef57c74006246dc41c65f24120
[ "BSD-3-Clause" ]
null
null
null
src/vmcontroller.host/vmcontroller/host/services/__init__.py
dgquintas/vmcontroller.unstable
131c0af19c5923ef57c74006246dc41c65f24120
[ "BSD-3-Clause" ]
null
null
null
try: from HostStompEngine import * from HostServices import * from HostWords import * except ImportError, e: print "Import error in %s : %s" % (__name__, e) import sys sys.exit()
22.555556
51
0.650246
25
203
5.12
0.64
0.15625
0
0
0
0
0
0
0
0
0
0
0.261084
203
8
52
25.375
0.853333
0
0
0
0
0
0.1133
0
0
0
0
0
0
0
null
null
0
0.75
null
null
0.125
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
4
8a8fe78acefa95d1af7ec08489d855a0707a7dc3
154
py
Python
services/traction/api/endpoints/models/tenant_schema.py
Open-Earth-Foundation/traction
908b555a7f408a88541b7692d3730e37a297c919
[ "Apache-2.0" ]
12
2022-01-29T20:30:03.000Z
2022-03-29T11:46:14.000Z
services/traction/api/endpoints/models/tenant_schema.py
Open-Earth-Foundation/traction
908b555a7f408a88541b7692d3730e37a297c919
[ "Apache-2.0" ]
38
2021-11-22T17:52:50.000Z
2022-03-31T17:52:00.000Z
services/traction/api/endpoints/models/tenant_schema.py
Open-Earth-Foundation/traction
908b555a7f408a88541b7692d3730e37a297c919
[ "Apache-2.0" ]
9
2021-11-22T18:05:48.000Z
2022-03-29T11:25:08.000Z
from api.db.models.base import BaseSchema class TenantSchemaRequest(BaseSchema): schema_name: str schema_version: str attributes: list[str]
19.25
41
0.75974
19
154
6.052632
0.789474
0
0
0
0
0
0
0
0
0
0
0
0.168831
154
7
42
22
0.898438
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.2
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
4
8ac1b29e5998d9fe8f4ae803c15e96047bff8204
122
py
Python
src/java/python/__init__.py
bastie/PythonVampire
b5102f7389f3d583c41ec85c574ce5a72bbf4460
[ "Apache-2.0" ]
1
2020-09-05T14:02:11.000Z
2020-09-05T14:02:11.000Z
src/java/util/__init__.py
bastie/PythonVampire
b5102f7389f3d583c41ec85c574ce5a72bbf4460
[ "Apache-2.0" ]
null
null
null
src/java/util/__init__.py
bastie/PythonVampire
b5102f7389f3d583c41ec85c574ce5a72bbf4460
[ "Apache-2.0" ]
null
null
null
# SPDX-FileCopyrightText: 2020 - Sebastian Ritter <bastie@users.noreply.github.com> # SPDX-License-Identifier: Apache-2.0
40.666667
83
0.786885
16
122
6
0.9375
0
0
0
0
0
0
0
0
0
0
0.053571
0.081967
122
2
84
61
0.803571
0.959016
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
8acfacc8115035f5c7f9346338deed04f4d415e9
749
py
Python
auxiliary/config.py
yanlinqian/Temporal-Color-Constancy
ebd363962fa8ae0908252cabaf97355da3da8a80
[ "MIT" ]
8
2020-09-04T08:55:41.000Z
2021-07-16T01:51:57.000Z
auxiliary/config.py
yanlinqian/Temporal-Color-Constancy
ebd363962fa8ae0908252cabaf97355da3da8a80
[ "MIT" ]
3
2021-11-04T02:35:09.000Z
2021-11-24T12:37:28.000Z
auxiliary/config.py
yanlinqian/Temporal-Color-Constancy
ebd363962fa8ae0908252cabaf97355da3da8a80
[ "MIT" ]
1
2020-10-15T13:20:42.000Z
2020-10-15T13:20:42.000Z
#superset of config ### C4 or FC4 # FCN_INPUT_SIZE = 512 # # Use data augmentation? # AUGMENTATION = True # # Rotation angle # AUGMENTATION_ANGLE = 60 # # Patch scale # AUGMENTATION_SCALE = [0.1, 1.0] # # Color rescaling? # AUGMENTATION_COLOR = 0.8 # BOARD_FILL_COLOR = 1e-5 ### ffcc #FCN_INPUT_SIZE = 512 # Use data augmentation? #AUGMENTATION = True # Rotation angle #AUGMENTATION_ANGLE = 0 # Patch scale #AUGMENTATION_SCALE = [1.0,1,0]#[0.8, 1.0] # Color rescaling? #AUGMENTATION_COLOR = 0 #BOARD_FILL_COLOR = 0 ### RCC-Net FCN_INPUT_SIZE = 512 # Use data augmentation? AUGMENTATION = True # Rotation angle AUGMENTATION_ANGLE = 15 # Patch scale AUGMENTATION_SCALE = [0.8, 1.0] # Color rescaling? AUGMENTATION_COLOR = 0 BOARD_FILL_COLOR = 1e-5
19.710526
42
0.723632
109
749
4.779817
0.293578
0.019194
0.069098
0.086372
0.861804
0.71785
0.71785
0.652591
0.652591
0.652591
0
0.065183
0.160214
749
37
43
20.243243
0.763116
0.714286
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
8ad66a12455c75eba1193467be036df654ae6c1a
2,601
py
Python
__init__.py
jmaggio14/borealis
e61a02671fbfb910cd9526e717a33c93d1880773
[ "MIT" ]
null
null
null
__init__.py
jmaggio14/borealis
e61a02671fbfb910cd9526e717a33c93d1880773
[ "MIT" ]
null
null
null
__init__.py
jmaggio14/borealis
e61a02671fbfb910cd9526e717a33c93d1880773
[ "MIT" ]
null
null
null
# @Author: Jeff Maggio <jmaggio> # @Date: 2017-07-30T15:37:09-07:00 # @Email: jmaggio@planetaryresources.com # @Project: sam (framing camera simulator) # @Last modified by: jmaggio # @Last modified time: 2017-07-30T15:37:53-07:00 # @Copyright: #!/usr/bin/env python ################################################################################ # Copyright (c) 2017 Planetary Resources Inc. # Planetary Resources Proprietary # NOTICE: # All information contained herein is, and remains the property of Planetary # Resources Incorporated, its subsidiaries and its suppliers, if any. The # intellectual and technical concepts contained herein are proprietary to # Planetary Resources Incorporated, its subsidiaries, and its suppliers and # may be covered by U.S. and Foreign Patents, patents in process, and are # protected by trade secret or copyright law. Dissemination of this # information or reproduction of this material is strictly forbidden unless # prior written permission is obtained from Planetary Resources Inc. ################################################################################ ## Doxygen header # @author <your name> # @brief <description> # Standard library imports ################################################################################ # Third party library imports ################################################################################ # Standard PRI imports ################################################################################ # External component imports ################################################################################ # Internal component imports ################################################################################ # Constant definitions ################################################################################ # Utility function definitions ################################################################################ # Public class definitions ################################################################################ # Public function definitions ################################################################################ # Private class definitions ################################################################################ # Private function definitions ################################################################################ # Main function and argument parsing ################################################################################ from .path import * from .render import * from .terrain import * from .tube import *
37.695652
80
0.433295
189
2,601
5.962963
0.592593
0.079858
0.019521
0.02307
0.106477
0.106477
0.106477
0.106477
0
0
0
0.017036
0.09727
2,601
68
81
38.25
0.462947
0.49827
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
76d6ba281bb2c79903b5de6a7f400af0f63a86e4
370
py
Python
organice/settings.py
bittner/django-organice
7621e4cf2361db84b42d77e5e72e341559eb9906
[ "Apache-2.0" ]
34
2015-04-22T12:47:32.000Z
2022-03-18T02:16:17.000Z
organice/settings.py
TebelloX/django-organice
7621e4cf2361db84b42d77e5e72e341559eb9906
[ "Apache-2.0" ]
13
2015-07-24T05:25:56.000Z
2020-09-02T17:38:35.000Z
organice/settings.py
TebelloX/django-organice
7621e4cf2361db84b42d77e5e72e341559eb9906
[ "Apache-2.0" ]
14
2015-05-01T20:42:49.000Z
2022-03-25T01:12:34.000Z
"""Default settings for django Organice""" from django.conf import settings URL_PATH_ADMIN = getattr(settings, 'ORGANICE_URL_PATH_ADMIN', 'admin') URL_PATH_BLOG = getattr(settings, 'ORGANICE_URL_PATH_BLOG', 'blog') URL_PATH_NEWSLETTER = getattr(settings, 'ORGANICE_URL_PATH_NEWSLETTER', 'newsletter') URL_PATH_TODO = getattr(settings, 'ORGANICE_URL_PATH_TODO', 'todo')
46.25
85
0.802703
50
370
5.54
0.3
0.202166
0.33213
0.375451
0.433213
0
0
0
0
0
0
0
0.081081
370
7
86
52.857143
0.814706
0.097297
0
0
0
0
0.359756
0.289634
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
76e786acc34f0b2ad15c66ff0074cbab39b06e92
60
py
Python
cached_result/tests/factories.py
darthwade/django-cached-result
0e6a9f258db4fc98e0d9c8190adde3aca0b95782
[ "MIT" ]
1
2016-08-29T20:26:40.000Z
2016-08-29T20:26:40.000Z
cached_result/tests/factories.py
darthwade/django-cached-result
0e6a9f258db4fc98e0d9c8190adde3aca0b95782
[ "MIT" ]
null
null
null
cached_result/tests/factories.py
darthwade/django-cached-result
0e6a9f258db4fc98e0d9c8190adde3aca0b95782
[ "MIT" ]
null
null
null
"""Factories for the cached_result app.""" # import factory
20
42
0.733333
8
60
5.375
1
0
0
0
0
0
0
0
0
0
0
0
0.133333
60
2
43
30
0.826923
0.866667
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
0a0a43a57eddfd50c80bd36c49d4becf4a751886
22
py
Python
questionary/version.py
philastrophist/questionary
512d52f5216e3494902a124f660453ec7fa4fa16
[ "MIT" ]
2,245
2017-03-31T14:43:44.000Z
2022-03-30T01:17:25.000Z
trackerjacker/version.py
Warlockk/trackerjacker
692262b2646a2e784733a258da8b8d6bfe79e3d7
[ "MIT" ]
46
2020-06-27T18:13:41.000Z
2021-07-12T10:49:00.000Z
trackerjacker/version.py
Warlockk/trackerjacker
692262b2646a2e784733a258da8b8d6bfe79e3d7
[ "MIT" ]
191
2017-05-20T20:25:49.000Z
2022-03-03T05:44:49.000Z
__version__ = "1.9.0"
11
21
0.636364
4
22
2.5
1
0
0
0
0
0
0
0
0
0
0
0.157895
0.136364
22
1
22
22
0.368421
0
0
0
0
0
0.227273
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0a264c59a07b3a3f0e95e5c8c2c675aba96c9906
220
py
Python
little_mees/little_mees/doctype/global_defaults/test_global_defaults.py
ayhamkht/little_mees
6acd7a347a7786bbdb2f5b06f60ba5c22059eb19
[ "MIT" ]
null
null
null
little_mees/little_mees/doctype/global_defaults/test_global_defaults.py
ayhamkht/little_mees
6acd7a347a7786bbdb2f5b06f60ba5c22059eb19
[ "MIT" ]
null
null
null
little_mees/little_mees/doctype/global_defaults/test_global_defaults.py
ayhamkht/little_mees
6acd7a347a7786bbdb2f5b06f60ba5c22059eb19
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2021, Kunhi Mohamed and Contributors # See license.txt from __future__ import unicode_literals # import frappe import unittest class TestGlobalDefaults(unittest.TestCase): pass
20
52
0.768182
27
220
6.074074
0.888889
0
0
0
0
0
0
0
0
0
0
0.026455
0.140909
220
10
53
22
0.84127
0.463636
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
4
0a656cc99cb1f62b7de712de53aab5716e41ab05
131
py
Python
bunq/sdk/exception/bunq_exception.py
mwiekens/sdk_python
9333636083bc63dca4353e8f497588f57617efec
[ "MIT" ]
88
2017-08-01T18:39:46.000Z
2022-02-21T12:34:16.000Z
bunq/sdk/exception/bunq_exception.py
mwiekens/sdk_python
9333636083bc63dca4353e8f497588f57617efec
[ "MIT" ]
136
2017-08-02T13:54:41.000Z
2021-04-25T20:31:08.000Z
bunq/sdk/exception/bunq_exception.py
mwiekens/sdk_python
9333636083bc63dca4353e8f497588f57617efec
[ "MIT" ]
30
2017-08-15T09:35:42.000Z
2021-05-06T12:42:06.000Z
class BunqException(Exception): def __init__(self, message: str) -> None: super(BunqException, self).__init__(message)
32.75
52
0.709924
14
131
6.071429
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.167939
131
3
53
43.666667
0.779817
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
0a66793300d4bd961723ef76e259170dabe2366c
517
py
Python
src/mysql_bigquery/prefect_utils/__init__.py
beirving/mysql-to-bigquery
75dfe3390f1e1a0dc54d5cace0cf1a89a0560ae6
[ "MIT" ]
null
null
null
src/mysql_bigquery/prefect_utils/__init__.py
beirving/mysql-to-bigquery
75dfe3390f1e1a0dc54d5cace0cf1a89a0560ae6
[ "MIT" ]
null
null
null
src/mysql_bigquery/prefect_utils/__init__.py
beirving/mysql-to-bigquery
75dfe3390f1e1a0dc54d5cace0cf1a89a0560ae6
[ "MIT" ]
null
null
null
import os from mysql_bigquery.prefect_utils import install from mysql_bigquery.prefect_utils import jobs if os.environ.get('MYSQL_BIG_QUERY_DEFINITIONS') is None: os.environ['MYSQL_BIG_QUERY_DEFINITIONS'] = '/credentials/definitions.json' if os.environ.get('MYSQL_BIG_QUERY_GOOGLE_AUTH') is None: os.environ['MYSQL_BIG_QUERY_GOOGLE_AUTH'] = '/credentials/google_auth.json' if os.environ.get('MYSQL_BIG_QUERY_MYSQL_CONFIG') is None: os.environ['MYSQL_BIG_QUERY_MYSQL_CONFIG'] = '/credentials/config.ini'
43.083333
79
0.804642
78
517
4.987179
0.294872
0.138817
0.200514
0.107969
0.732648
0.624679
0.44473
0.159383
0
0
0
0
0.087041
517
12
80
43.083333
0.824153
0
0
0
0
0
0.472973
0.472973
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
6a544f8fee4e4b8776c18503a376e27fe4af0326
506
py
Python
OpenGLCffi/GL/EXT/SGIX/async.py
cydenix/OpenGLCffi
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
[ "MIT" ]
null
null
null
OpenGLCffi/GL/EXT/SGIX/async.py
cydenix/OpenGLCffi
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
[ "MIT" ]
null
null
null
OpenGLCffi/GL/EXT/SGIX/async.py
cydenix/OpenGLCffi
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
[ "MIT" ]
null
null
null
from OpenGLCffi.GL import params @params(api='gl', prms=['marker']) def glAsyncMarkerSGIX(marker): pass @params(api='gl', prms=['markerp']) def glFinishAsyncSGIX(markerp): pass @params(api='gl', prms=['markerp']) def glPollAsyncSGIX(markerp): pass @params(api='gl', prms=['range']) def glGenAsyncMarkersSGIX(range): pass @params(api='gl', prms=['marker', 'range']) def glDeleteAsyncMarkersSGIX(marker, range): pass @params(api='gl', prms=['marker']) def glIsAsyncMarkerSGIX(marker): pass
15.8125
44
0.705534
61
506
5.852459
0.295082
0.151261
0.184874
0.252101
0.498599
0.498599
0.330532
0
0
0
0
0
0.106719
506
31
45
16.322581
0.789823
0
0
0.526316
0
0
0.107143
0
0
0
0
0
0
1
0.315789
false
0.315789
0.052632
0
0.368421
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
6a690851e616a20a4cc455e792561102402f3c7e
38,316
py
Python
bayes_filter/vi.py
Joshuaalbert/bayes_filter
2997d60d8cf07f875e42c0b5f07944e9ab7e9d33
[ "Apache-2.0" ]
null
null
null
bayes_filter/vi.py
Joshuaalbert/bayes_filter
2997d60d8cf07f875e42c0b5f07944e9ab7e9d33
[ "Apache-2.0" ]
3
2019-02-21T16:00:53.000Z
2020-03-31T01:33:00.000Z
bayes_filter/vi.py
Joshuaalbert/bayes_filter
2997d60d8cf07f875e42c0b5f07944e9ab7e9d33
[ "Apache-2.0" ]
null
null
null
import tensorflow as tf import numpy as np import tensorflow_probability as tfp from .sgd import adam_stochastic_gradient_descent, natural_adam_stochastic_gradient_descent, natural_adam_stochastic_gradient_descent_with_linesearch, natural_adam_stochastic_gradient_descent_with_linesearch_minibatch from . import float_type, TEC_CONV from .misc import sqrt_with_finite_grads, safe_cholesky, flatten_batch_dims, log_normal_cdf_solve from .kernels import DTECIsotropicTimeGeneral class Likelihood(object): def __init__(self): pass def log_prob(self, *args): pass class VariationalPosterior(object): def __init__(self, event_size): self._event_size = event_size self._distribution = None def sample(self, num_samples): if self._distribution is None: raise ValueError("no distribution defined") return self._distribution.sample(num_samples) def _build_distribution(self, *params): """ Build the distribution. :param params: :return: """ raise NotImplementedError() def initial_variational_params(self, batch_size): raise NotImplementedError() class WhitenedVariationalPosterior(VariationalPosterior): def __init__(self, event_size): super(WhitenedVariationalPosterior, self).__init__(event_size=event_size) def _build_distribution(self, loc, scale): """ Build the MultivariateNormalDiagWithSoftplusScale distribution :param loc: :param scale: :return: """ return tfp.distributions.MultivariateNormalDiag(loc, scale_diag=tf.nn.softplus(scale)) def initial_variational_params(self, batch_size=None): """ Gets the initial parameters :param batch_size: :return: """ if batch_size is not None: m = tf.zeros(shape=[batch_size, self._event_size], dtype=float_type) S_inverse = tfp.distributions.softplus_inverse( tf.ones(shape=[batch_size, self._event_size], dtype=float_type)) return m, S_inverse m = tf.zeros(shape=[self._event_size], dtype=float_type) S_inverse = tfp.distributions.softplus_inverse( tf.ones(shape=[self._event_size], dtype=float_type)) return m, S_inverse class LaplaceLikelihood(Likelihood): def __init__(self, Yreal, Yimag, freqs, transform_fn): super(LaplaceLikelihood, self).__init__() self._Yreal = Yreal self._Yimag = Yimag self._invfreqs = tf.constant(TEC_CONV, float_type) * tf.math.reciprocal(freqs) self._transform_fn = transform_fn def log_prob(self, white_dtec, y_sigma): """ Represents log P(Yreal, Yimag | white_dtec, hyperparams) where P is the product of Laplace distributions over frequency and coordinate index log P = Sum_i Sum_nu (-log(2) - log(y_sigma) - (|Yreal(i,nu) - Yreal_model(i, nu)| + |Yimag(i, nu) - Yimag_model(i, nu)|) / y_sigma) :param white_dtec: tf.Tensor [A, N] :param log_y_sigma: tf.Tensor [B, 1] :return: tf.Tensor [A, B] """ Nf = tf.cast(tf.shape(self._invfreqs)[0],float_type) # A, B, N dtec = self._transform_fn(white_dtec) # [A, B, N, Nf] phase = dtec[..., None] * self._invfreqs Yreal_model = tf.cos(phase) Yimag_model = tf.sin(phase) # B, 1 log_y_sigma = tf.math.log(y_sigma) # [A, B, N, Nf] likelihood = -tf.math.reciprocal(y_sigma[..., None]) * sqrt_with_finite_grads( tf.math.square(self._Yimag - Yimag_model) + tf.math.square(self._Yreal - Yreal_model))\ - log_y_sigma[..., None] - tf.math.log(tf.constant(2., float_type)) # A, B #TODO: is div by Nf right? likelihood = tf.reduce_sum(likelihood, axis=[-2, -1])/Nf prior = tf.reduce_mean(tfp.distributions.Normal(loc=tf.constant(0.05, float_type), scale=tf.constant(0.05, float_type)).log_prob(y_sigma)) return likelihood + prior class VariationalBayesHeirarchical(object): def __init__(self, Yreal, Yimag, freqs, X, Xstar, dtec_samples=10, hyperparam_samples=10, mean_hyperparam_approx=True, obs_type='DTEC', fed_kernel='RBF'): self._Yreal = Yreal self._Yimag = Yimag self._freqs = freqs self._invfreqs = tf.constant(TEC_CONV, float_type) * tf.math.reciprocal(freqs) self._X = X self._Xstar = Xstar self._Xconcat = tf.concat([self._X, self._Xstar], axis=0) self.N = tf.shape(self._X)[0] self.Ns = tf.shape(self._Xstar)[0] self._obs_type = obs_type self._fed_kernel = fed_kernel self._dtec_samples = tf.convert_to_tensor(dtec_samples, tf.int32, name='num_dtec_samples') self._hyperparam_samples = tf.convert_to_tensor(hyperparam_samples, tf.int32, name='num_hyperparam_samples') self._mean_hyperparam_approx = mean_hyperparam_approx self._white_posterior = WhitenedVariationalPosterior(event_size=tf.shape(self._X)[0]) # amp, lengthscales, a, b, timescales, y_sigma self._hyperparam_posterior = WhitenedVariationalPosterior(event_size=6) self._hyperparam_bijectors = [ tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(3., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(15., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(250., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(100., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(50., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(0.1, float_type)), tfp.bijectors.Softplus()]) ] def _initial_states(self, batch_size=None): return self._white_posterior.initial_variational_params( batch_size), self._hyperparam_posterior.initial_variational_params(batch_size) def _constrain_hyperparams(self, sampled_hyperparams): """ Constrains the samples of hyperparams. :param sampled_hyperparams: tf.Tensor [samples, 6] :return: Tuple of tf.Tensor Each of shape [samples, 1] """ constrained_hyperparams = [] for i in range(len(self._hyperparam_bijectors)): bijector = self._hyperparam_bijectors[i] # num_hyperparams, 1 s = sampled_hyperparams[:, i:i + 1] constrained_hyperparams.append(bijector.forward(s)) return constrained_hyperparams def _loss_fn(self, white_dtec_mean, white_dtec_scale, hyperparam_mean, hyperparam_scale): white_vi_params, hyperparam_vi_params = (white_dtec_mean, white_dtec_scale), (hyperparam_mean, hyperparam_scale) hyperparam_dist = self._hyperparam_posterior._build_distribution(*hyperparam_vi_params) if self._mean_hyperparam_approx: #1, 6 sampled_hyperparams = hyperparam_vi_params[0][None,:] else: # num_hyperparams, 6 sampled_hyperparams = hyperparam_dist.sample(self._hyperparam_samples) amp, lengthscales, a, b, timescale, y_sigma = self._constrain_hyperparams(sampled_hyperparams) kern = DTECIsotropicTimeGeneral(variance=tf.math.square(amp), lengthscales=lengthscales, a=a, b=b, timescale=timescale, fed_kernel=self._fed_kernel, obs_type=self._obs_type, squeeze=False) # num_hyperparams, N, N K = kern.K(self._X, None) # num_hyperparams, N, N L = safe_cholesky(K) # no mean right now def transform_fn(white_dtec): """ Constrain white_dtec to tec mean_approx_hyperparams :param white_dtec: tf.Tensor [b0,..., bB, N] :param data_only: tf.bool :return: tf.Tensor [b0,...,bB, b0,...,bC,N] """ # TODO: add mean # L[d,i,j].white_dtec[b,j] -> [b,d,i] # b0,..., bB, , b0,...,bC,N return tf.tensordot(white_dtec, L, axes=[[-1], [-1]]) white_dist = self._white_posterior._build_distribution(*white_vi_params) # num_dtec, N white_dtec = white_dist.sample(self._dtec_samples) likelihood = LaplaceLikelihood(self._Yreal, self._Yimag, self._freqs, transform_fn=transform_fn) # num_hyperparams #TODO: derive better var_exp var_exp = tf.reduce_mean(likelihood.log_prob(white_dtec, y_sigma), axis=0) # num_hyperparams dtec_prior_KL = self._dtec_prior_kl(white_vi_params, L) # scalar hyperparam_prior_KL = self._hyperparams_prior_kl(hyperparam_vi_params) # scalar elbo = tf.reduce_mean(var_exp - dtec_prior_KL, axis=0) - hyperparam_prior_KL with tf.control_dependencies([tf.print('elbo', elbo, 'var_exp', var_exp, 'dtec_prior', dtec_prior_KL, 'hyperparam_prior', hyperparam_prior_KL, 'amp', amp, 'lengthscales', lengthscales, 'a', a, 'b', b, 'timescale', timescale, 'y_sigma', y_sigma)]): loss = tf.math.negative(elbo, name='loss') return loss def _hyperparams_prior_kl(self, hyperparams_params): """The KL-div[ Q(hyperparams) || P(hyperparams) ] P(hyperparams) = U[-infty, infty](hyperparams) = N[0, infty](hyperparams) :param hyperparams_params: tf.Tensor [6] mean [6] diag_scale :return: tf.Tensor scalar """ variance = tf.math.square(tf.nn.softplus(hyperparams_params[1])) entropy = tf.reduce_sum(tf.constant(0.5, float_type) * tf.math.log(tf.constant(2 * np.pi * np.exp(1), float_type) * variance)) return -entropy def _dtec_prior_kl(self, white_dtec_params, L): """ Get the KL-div [ Q(white_dtec) || P(white_dtec | hyperparams)] where Q = N[m, S] and S is diagonal and P = |L|^{-1} N[0,I] KL-div [ N[m, S] || |L|^{-1} N[0,I]] = KL-div [ N[m, S] || |L|^{-1} N[0,I]] + log |L| :param white_dtec_params: tuple of tf.Tensor [N+Ns] mean [N+Ns] unconstrained scale :param L: tf.Tensor [num_hyperparams, N, N] :return: tf.Tensor [num_hyperparams] """ # [N+Ns] mean, S = white_dtec_params variance = tf.math.square(tf.nn.softplus(S)) # num_hyperparams logdetL = tf.reduce_sum(tf.math.log(tf.linalg.diag_part(L)), axis=-1) return 0.5 * tf.reduce_sum( variance + tf.math.square(mean) - tf.constant(1., float_type) - 2. * tf.math.log(variance), axis=-1) + logdetL def _build_variational_posteriors(self, white_vi_params, hyperparam_vi_params): hyperparam_dist = self._hyperparam_posterior._build_distribution(*hyperparam_vi_params) white_dist = self._white_posterior._build_distribution(*white_vi_params) return white_dist, hyperparam_dist def solve_variational_posterior(self, param_warmstart, hyperparams_warmstart, iters=100, learning_rate=0.001, parallel_iterations=10): (white_dtec_mean, white_dtec_scale), (hyperparam_mean, hyperparam_scale) = self._initial_states() ((white_dtec_mean, white_dtec_scale), (hyperparam_mean, hyperparam_scale)) = \ tf.cond(tf.reduce_all(tf.equal(param_warmstart[0], 0.)), lambda: ((white_dtec_mean, white_dtec_scale), (hyperparam_mean, hyperparam_scale)), lambda: (param_warmstart, hyperparams_warmstart), strict=True) # [white_dtec_mean, white_dtec_scale, hyperparam_mean, hyperparam_scale], loss = \ # adam_stochastic_gradient_descent(self._loss_fn, # [white_dtec_mean, white_dtec_scale, hyperparam_mean, hyperparam_scale], # iters=iters, # learning_rate=learning_rate, # parallel_iterations=parallel_iterations) [white_dtec_mean, white_dtec_scale], [hyperparam_mean, hyperparam_scale], loss = \ natural_adam_stochastic_gradient_descent(self._loss_fn, [white_dtec_mean, white_dtec_scale], [hyperparam_mean, hyperparam_scale], iters=iters, learning_rate=learning_rate, parallel_iterations=parallel_iterations) ### # produce the posterior distributions needed hyperparam_dist = self._hyperparam_posterior._build_distribution(hyperparam_mean, hyperparam_scale) if self._mean_hyperparam_approx: # 1, 6 sampled_hyperparams = hyperparam_mean[None, :] else: # num_hyperparams, 6 sampled_hyperparams = hyperparam_dist.sample(self._hyperparam_samples) amp, lengthscales, a, b, timescale, y_sigma = self._constrain_hyperparams(sampled_hyperparams) kern = DTECIsotropicTimeGeneral(variance=tf.math.square(amp), lengthscales=lengthscales, a=a, b=b, timescale=timescale, fed_kernel=self._fed_kernel, obs_type=self._obs_type, squeeze=False) # num_hyperparams, N, N K_xx = kern.K(self._X, None) # num_hyperparams, N, N L_xx = safe_cholesky(K_xx) # num_hyperparams, M, N K_yx = kern.K(self._X, self._Xstar) q_mean, q_sqrt = white_dtec_mean, tf.nn.softplus(white_dtec_scale) dtec_data_dist = conditional_same_points(q_mean, q_sqrt, L_xx) dtec_screen_dist = conditional_different_points(q_mean, q_sqrt, L_xx, K_xx, K_yx) return loss, dtec_data_dist, dtec_screen_dist, (amp, lengthscales, a, b, timescale, y_sigma), ( white_dtec_mean, white_dtec_scale), (hyperparam_mean, hyperparam_scale) class VariationalBayesZIsX(object): def __init__(self, Yreal, Yimag, freqs, X, Xstar, y_sigma, dtec_samples=10, kernel_params=None, minibatch_size=None, quadrature_var_exp=False): self._Yreal = Yreal self._Yimag = Yimag self._freqs = freqs self._y_sigma = y_sigma self._invfreqs = tf.constant(TEC_CONV, float_type) * tf.math.reciprocal(freqs) self._X = X self._Xstar = Xstar self._Xconcat = tf.concat([self._X, self._Xstar], axis=0) self.N = tf.shape(self._X)[0] self.Ns = tf.shape(self._Xstar)[0] self._kernel_params = kernel_params self._dtec_samples = tf.convert_to_tensor(dtec_samples, tf.int32, name='num_dtec_samples') self._minibatch_size = tf.convert_to_tensor(minibatch_size,tf.int64) if minibatch_size is not None else None if self._minibatch_size is not None: self._scale = tf.cast(self.N, float_type)/tf.cast(self._minibatch_size, float_type) else: self._scale = tf.constant(1., float_type) self._quadrature_var_exp = quadrature_var_exp self._white_posterior = WhitenedVariationalPosterior(event_size=tf.shape(self._X)[0]) self._hyperparam_bijectors = [ tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(3., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(15., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(250., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(100., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(50., float_type)), tfp.bijectors.Softplus()]) ] def _initial_states(self, batch_size=None): return self._white_posterior.initial_variational_params( batch_size), (tfp.distributions.softplus_inverse(tf.ones((1,5),float_type)),) def _constrain_hyperparams(self, sampled_hyperparams): """ Constrains the samples of hyperparams. :param sampled_hyperparams: tf.Tensor [samples, 6] :return: Tuple of tf.Tensor Each of shape [samples, 1] """ constrained_hyperparams = [] for i in range(len(self._hyperparam_bijectors)): bijector = self._hyperparam_bijectors[i] # num_hyperparams, 1 s = sampled_hyperparams[:, i:i + 1] constrained_hyperparams.append(bijector.forward(s)) return constrained_hyperparams def _loss_fn(self, white_dtec_mean, white_dtec_scale, hyperparams_unconstrained): white_vi_params = (white_dtec_mean, white_dtec_scale) #each 1,1 amp, lengthscales, a, b, timescale = self._constrain_hyperparams(hyperparams_unconstrained) kern = DTECIsotropicTimeGeneral(variance=tf.math.square(amp), lengthscales=lengthscales, a=a, b=b, timescale=timescale, squeeze=False, **self._kernel_params) # num_hyperparams, N, N K = kern.K(self._X, None) # num_hyperparams, N, N L = safe_cholesky(K) # no mean right now def transform_fn(white_dtec): """ Constrain white_dtec to tec L is [B, N, N] :param white_dtec: tf.Tensor [A, N] :return: tf.Tensor [A,B,N] """ # TODO: add mean # L[d,i,j].white_dtec[b,j] -> [b,d,i] # A, B, N return tf.tensordot(white_dtec, L, axes=[[-1], [-1]]) var_exp = self._calculate_var_exp(transform_fn, white_vi_params) dtec_prior_KL = self._dtec_prior_kl(white_vi_params, L) # scalar elbo = var_exp*self._scale - dtec_prior_KL ### # priors on parameters. with tf.control_dependencies([tf.print('elbo', elbo, 'var_exp', var_exp, 'dtec_prior', dtec_prior_KL, 'amp', amp, 'lengthscales', lengthscales, 'a', a, 'b', b, 'timescale', timescale, 'y_sigma', self._y_sigma)]): loss = tf.math.negative(elbo, name='loss') return loss def _calculate_var_exp(self, transform_fn, white_vi_params): if not self._quadrature_var_exp: white_dist = self._white_posterior._build_distribution(*white_vi_params) # num_dtec, N white_dtec = white_dist.sample(self._dtec_samples) likelihood = LaplaceLikelihood(self._Yreal, self._Yimag, self._freqs, transform_fn=transform_fn) # TODO: derive better var_exp var_exp = tf.reduce_mean(likelihood.log_prob(white_dtec, self._y_sigma)) return var_exp ## Use Gauss Hermite Quadrature def _dtec_prior_kl(self, white_dtec_params, L): """ Get the KL-div [ Q(white_dtec) || P(white_dtec | hyperparams)] where Q = N[m, S] and S is diagonal and P = |L|^{-1} N[0,I] KL-div [ N[m, S] || |L|^{-1} N[0,I]] = KL-div [ N[m, S] || |L|^{-1} N[0,I]] + log |L| :param white_dtec_params: tuple of tf.Tensor [N+Ns] mean [N+Ns] unconstrained scale :param L: tf.Tensor [num_hyperparams, N, N] :return: tf.Tensor [num_hyperparams] """ logdetL = tf.reduce_sum(tf.math.log(tf.linalg.diag_part(L))) # [N+Ns] q_mean, q_scale = white_dtec_params q_sqrt = tf.nn.softplus(q_scale) q_var = tf.math.square(q_sqrt) trace = tf.reduce_sum(q_var) mahalanobis = tf.reduce_sum(tf.math.square(q_mean)) constant = -tf.cast(tf.size(q_mean, out_type=tf.int64), float_type) logdet_qcov = tf.reduce_sum(tf.math.log(q_var)) twoKL = mahalanobis + constant - logdet_qcov + trace - logdetL return 0.5 * twoKL # # num_hyperparams # # return 0.5 * tf.reduce_sum( # variance + tf.math.square(mean) - tf.constant(1., float_type) - 2. * tf.math.log(variance), # axis=-1) + logdetL def _build_variational_posteriors(self, white_vi_params): white_dist = self._white_posterior._build_distribution(*white_vi_params) return white_dist def solve_variational_posterior(self, param_warmstart, hyperparams_warmstart, solver_params=None, parallel_iterations=10): (white_dtec_mean, white_dtec_scale), (hyperparams_unconstrained,) = self._initial_states() # ((white_dtec_mean, white_dtec_scale), (hyperparams_unconstrained,)) = \ # tf.cond(tf.reduce_all(tf.equal(param_warmstart[0], 0.)), # lambda: ((white_dtec_mean, white_dtec_scale), (hyperparams_unconstrained,)), # lambda: (param_warmstart, hyperparams_warmstart), strict=True) (white_dtec_mean, white_dtec_scale) = \ tf.cond(tf.reduce_all(tf.equal(param_warmstart[0], 0.)), lambda: (white_dtec_mean, white_dtec_scale), lambda: param_warmstart, strict=True) # TODO: mini batch and choose larger basis # TODO: speed up kernel computation ^^ help # TODO: fix screen approximation [white_dtec_mean, white_dtec_scale], [hyperparams_unconstrained], loss = \ natural_adam_stochastic_gradient_descent_with_linesearch(self._loss_fn, [white_dtec_mean, white_dtec_scale], [hyperparams_unconstrained], parallel_iterations=parallel_iterations, **solver_params) ### # produce the posterior distributions needed amp, lengthscales, a, b, timescale = self._constrain_hyperparams(hyperparams_unconstrained) kern = DTECIsotropicTimeGeneral(variance=tf.math.square(amp), lengthscales=lengthscales, a=a, b=b, timescale=timescale, squeeze=False, **self._kernel_params) # num_hyperparams, N, N K_x_x = kern.K(self._X, None) # num_hyperparams, N, N L_x_x = safe_cholesky(K_x_x) # num_hyperparams, M, N K_x_xstar = kern.K(self._X, self._Xstar) K_xstar_xstar = kern.K(self._Xstar, None) q_mean, q_sqrt = white_dtec_mean, tf.nn.softplus(white_dtec_scale) dtec_data_dist = conditional_same_points(q_mean, q_sqrt, L_x_x) dtec_screen_dist = conditional_different_points(q_mean, q_sqrt, L_x_x, K_xstar_xstar, K_x_xstar) return loss, dtec_data_dist, dtec_screen_dist, (amp, lengthscales, a, b, timescale), ( white_dtec_mean, white_dtec_scale), (hyperparams_unconstrained,) class VariationalBayes(object): def __init__(self, Yreal, Yimag, freqs, X, Xstar, Z, y_sigma, dtec_samples=10, kernel_params=None, minibatch_size=None): self._Yreal = Yreal self._Yimag = Yimag self._freqs = freqs self._y_sigma = y_sigma self._invfreqs = tf.constant(TEC_CONV, float_type) * tf.math.reciprocal(freqs) self._X = X self._Xstar = Xstar self._Z = Z self._Xconcat = tf.concat([self._X, self._Xstar], axis=0) self.N = tf.shape(self._X)[0] self.Ns = tf.shape(self._Xstar)[0] self.Nz = tf.shape(self._Z)[0] self._kernel_params = kernel_params self._dtec_samples = tf.convert_to_tensor(dtec_samples, tf.int32, name='num_dtec_samples') self._minibatch_size = tf.convert_to_tensor(minibatch_size,tf.int64) if minibatch_size is not None else None if self._minibatch_size is not None: self._scale = tf.cast(self.N, float_type)/tf.cast(self._minibatch_size, float_type) else: self._scale = tf.constant(1., float_type) self._white_posterior = WhitenedVariationalPosterior(event_size=self.Nz) self._hyperparam_bijectors = [ tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(3., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(15., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(250., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(100., float_type)), tfp.bijectors.Softplus()]), tfp.bijectors.Chain( [tfp.bijectors.AffineScalar(scale=tf.constant(50., float_type)), tfp.bijectors.Softplus()]) ] self._hyperparam_distributions = [ None,#tfp.distributions.LogNormal(*log_normal_cdf_solve(2., 8., as_tensor=True), name='pert_ant_lengthscale') None, None, None, None ] def _initial_states(self, batch_size=None): return self._white_posterior.initial_variational_params( batch_size), (tfp.distributions.softplus_inverse(tf.ones((1,8),float_type)),) def _constrain_hyperparams(self, sampled_hyperparams): """ Constrains the samples of hyperparams. :param sampled_hyperparams: tf.Tensor [samples, 6] :return: Tuple of tf.Tensor Each of shape [samples, 1] """ constrained_hyperparams = [] for i in range(len(self._hyperparam_bijectors)): bijector = self._hyperparam_bijectors[i] # num_hyperparams, 1 s = sampled_hyperparams[:, i:i + 1] constrained_hyperparams.append(bijector.forward(s)) return constrained_hyperparams def _hyperparam_priors(self, *constrained_hyperparams): priors = [] for hp, bij, dist in zip(constrained_hyperparams, self._hyperparam_bijectors, self._hyperparam_distributions): if dist is None: continue priors.append(tf.reduce_sum(dist.log_prob(hp)) - tf.reduce_sum(bij.inverse_log_det_jacobian(hp, 1))) if len(priors) == 0: return tf.constant(0., float_type) return tf.math.accumulate_n(priors, shape=()) def _loss_fn(self, q_mean, q_scale, hyperparams_unconstrained, X, Y): #each 1,1 amp, lengthscales, a, b, timescale = self._constrain_hyperparams(hyperparams_unconstrained) kern = DTECIsotropicTimeGeneral(variance=tf.math.square(amp), lengthscales=lengthscales, a=a, b=b, timescale=timescale, squeeze=False, **self._kernel_params) # 1, 1, Nz, Nz K_z_z = kern.K(self._Z, None) L_z_z = safe_cholesky(K_z_z) # q_mean, q_scale = white_vi_params q_sqrt = tf.nn.softplus(q_scale) dtec_prior_KL = self._dtec_prior_kl(q_mean, q_sqrt, L_z_z) if self._minibatch_size is not None: K_z_xmini = kern.K(self._Z, X) K_xmini_xmini = kern.K(X, None) q_dist = conditional_different_points(q_mean, q_sqrt, L_z_z, K_xmini_xmini, K_z_xmini) dtec_samples = q_dist.sample(self._dtec_samples) likelihood = LaplaceLikelihood(Y[0], Y[1], self._freqs, transform_fn=lambda x: x) # TODO: derive better var_exp var_exp = tf.reduce_mean(likelihood.log_prob(dtec_samples, self._y_sigma)) else: # num_dtec, num_hyperparams, N, N L_expanded = tf.tile(tf.expand_dims(L_z_z, 0), [self._dtec_samples, 1, 1, 1]) def transform_fn(white_dtec): """ Constrain white_dtec to tec L is [A, B, N, N] :param white_dtec: tf.Tensor [A, B, N, 1] :return: tf.Tensor [A,B,N] """ # white_dtec[a,b,j,1].L[a,b,i,j] -> white_dtec^T[a,b,1,j].L^T[a,b,j,i] -> [a,b, 1, i] return tf.matmul(white_dtec, L_expanded, transpose_a=True, transpose_b=True)[:,:,0, :] # # A, B, N # return tf.tensordot(white_dtec, L_z_z, axes=[[-1], [-1]]) white_dist = self._white_posterior._build_distribution(q_mean, q_scale) # num_dtec, N white_dtec = white_dist.sample(self._dtec_samples) # num_dtec, 1, N, 1 white_dtec = white_dtec[:, None, :, None] likelihood = LaplaceLikelihood(Y[0], Y[1], self._freqs, transform_fn=transform_fn) var_exp = tf.reduce_mean(likelihood.log_prob(white_dtec, self._y_sigma)) # scalar elbo = var_exp*self._scale - dtec_prior_KL + self._hyperparam_priors(amp, lengthscales, a, b, timescale) # with tf.control_dependencies([tf.print('elbo', elbo, # 'var_exp', var_exp, 'dtec_prior', dtec_prior_KL, # 'amp', amp, 'lengthscales', lengthscales, 'a', a, 'b', b, 'timescale', # timescale, 'y_sigma', self._y_sigma)]): loss = tf.math.negative(elbo, name='loss') return loss def _dtec_prior_kl(self, q_mean, q_sqrt, L): """ Get the KL-div [ Q(white_dtec) || P(white_dtec | hyperparams)] where Q = N[m, S] and S is diagonal and P = |L|^{-1} N[0,I] KL-div [ N[m, S] || |L|^{-1} N[0,I]] = KL-div [ N[m, S] || |L|^{-1} N[0,I]] + log |L| :param white_dtec_params: tuple of tf.Tensor [N+Ns] mean [N+Ns] unconstrained scale :param L: tf.Tensor [num_hyperparams, N, N] :return: tf.Tensor [num_hyperparams] """ logdetL = tf.reduce_sum(tf.math.log(tf.linalg.diag_part(L))) # [N+Ns] q_var = tf.math.square(q_sqrt) trace = tf.reduce_sum(q_var) mahalanobis = tf.reduce_sum(tf.math.square(q_mean)) constant = -tf.cast(tf.size(q_mean, out_type=tf.int64), float_type) logdet_qcov = tf.reduce_sum(tf.math.log(q_var)) twoKL = mahalanobis + constant - logdet_qcov + trace - logdetL return 0.5 * twoKL # # num_hyperparams # # return 0.5 * tf.reduce_sum( # variance + tf.math.square(mean) - tf.constant(1., float_type) - 2. * tf.math.log(variance), # axis=-1) + logdetL def _build_variational_posteriors(self, white_vi_params): white_dist = self._white_posterior._build_distribution(*white_vi_params) return white_dist def solve_variational_posterior(self, param_warmstart, solver_params=None, parallel_iterations=10): param_init, (hyperparams_unconstrained,) = self._initial_states() param_warmstart = \ tf.cond(tf.reduce_all(tf.equal(param_warmstart[0], 0.)), lambda: param_init, lambda: param_warmstart, strict=True) # TODO: speed up kernel computation with tf.device('/device:GPU:0' if tf.test.is_gpu_available() else '/device:CPU:0'): learned_params, [learned_hyperparams_unconstrained], loss, t = \ natural_adam_stochastic_gradient_descent_with_linesearch_minibatch(self._loss_fn, self._X, (self._Yreal, self._Yimag), self._minibatch_size, param_warmstart, [hyperparams_unconstrained], parallel_iterations=parallel_iterations, **solver_params) ### # produce the posterior distributions needed # each 1,1 amp, lengthscales, a, b, timescale = self._constrain_hyperparams( learned_hyperparams_unconstrained) kern = DTECIsotropicTimeGeneral(variance=tf.math.square(amp), lengthscales=lengthscales, a=a, b=b, timescale=timescale, squeeze=False, **self._kernel_params) # 1, 1, Nz, Nz K_z_z = kern.K(self._Z, None) L_z_z = safe_cholesky(K_z_z) # num_hyperparams, M, N K_z_xstar = kern.K(self._Z, self._Xstar) K_xstar_xstar = kern.K(self._Xstar, None) q_mean, q_scale = learned_params q_sqrt = tf.nn.softplus(q_scale) dtec_screen_dist = conditional_different_points(q_mean, q_sqrt, L_z_z, K_xstar_xstar, K_z_xstar) # num_hyperparams, M, N K_z_x = kern.K(self._Z, self._X) K_x_x = kern.K(self._X, None) dtec_data_dist = conditional_different_points(q_mean, q_sqrt, L_z_z, K_x_x, K_z_x) dtec_basis_dist = conditional_same_points(q_mean, q_sqrt, L_z_z) return t, loss, dtec_basis_dist, dtec_data_dist, dtec_screen_dist, (amp, lengthscales, a, b, timescale), ( q_mean, q_scale) def conditional_same_points(q_mean, q_sqrt, L, prior_mean=None): """ Computes P(tau(X) | Y) = int P(tau(X) | x(X)) Q(x(X)) dx(X) = N[prior_mean + L.q_mean, L.q_sqrt^2.L^T] :param q_mean: tf.Tensor [N] :param q_sqrt: tf.Tensor [N] :param L: tf.Tensor [num_hyperparams, N, N] :param prior_mean: tf.Tensor [num_hyperparams, N] :return: tfp.distributions.MultivariateNormalTriL batch_shape is [num_hyperparams] event_shape is [N] """ #num_hyperparams, N mean = tf.tensordot(L, q_mean, axes=[[-1], [-1]]) # num_hyperparams, N, N scale_tril = L*q_sqrt[None, :]#tf.tensordot(L, q_sqrt, axes=[[-1], [-1]]) if prior_mean is None: return tfp.distributions.MultivariateNormalTriL(loc=mean, scale_tril=scale_tril) return tfp.distributions.MultivariateNormalTriL(loc=prior_mean + mean, scale_tril=scale_tril) def conditional_different_points(q_mean, q_sqrt, L, K_xstar_xstar, K_x_xstar, prior_mean=None): """ Computes P(tau(X) | Y) = int P(tau(Xstar) | x(X)) Q(x(X)) dx(X) = |L(X,X)| N[m(Xstar) + K(Xstar, X) L(X,X)^-T.q_mean, K(Xstar,Xstar) + K(Xstar,X) L(X,X)^-T(q_sqrt^2 - I) L(X,X)^-1 K(X,Xstar)] :param q_mean: tf.Tensor [N] :param q_sqrt: tf.Tensor [N] :param L: tf.Tensor [num_hyperparams, N, N] :param K_xx: tf.Tensor [num_hyperparams, M, M] :param K_yx: tf.Tensor [num_hyperparams, N, M] :param prior_mean: tf.Tensor [num_hyperparams, M] :return: tfp.distributions.MultivariateNormalTriL batch_shape is [num_hyperparams] event_shape is [N] """ ### # conditional one first # [num_hyperparams, M, N] A = tf.linalg.triangular_solve(L, K_x_xstar) B = q_sqrt[:, None] * A # num_hyperparams, N mean = tf.tensordot(A, q_mean, axes=[[-2], [-1]]) f_cov = K_xstar_xstar - tf.matmul(A,A,transpose_a=True) + tf.matmul(B,B,transpose_a=True) L = safe_cholesky(f_cov) if prior_mean is None: return tfp.distributions.MultivariateNormalTriL(loc=mean, scale_tril=L) return tfp.distributions.MultivariateNormalTriL(loc=prior_mean + mean, scale_tril=L)
42.526082
217
0.578192
4,514
38,316
4.60833
0.070448
0.037208
0.013749
0.017306
0.783867
0.757235
0.730987
0.706999
0.680752
0.647822
0
0.008835
0.317648
38,316
900
218
42.573333
0.786804
0.182065
0
0.598712
0
0
0.008747
0.000734
0
0
0
0.006667
0
1
0.085837
false
0.004292
0.015021
0.006438
0.188841
0.004292
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6ab4ee0fad6a311f66ab7ca26f9bd019c65774ff
2,612
py
Python
nova/objects/__init__.py
venusource/nova
0c6e6f180eebe71a3431abf726a0fd0c66578162
[ "Apache-2.0" ]
7
2015-09-22T11:27:16.000Z
2015-11-02T12:33:46.000Z
nova/objects/__init__.py
venusource/nova
0c6e6f180eebe71a3431abf726a0fd0c66578162
[ "Apache-2.0" ]
2
2015-09-07T22:14:46.000Z
2020-08-12T08:51:56.000Z
nova/objects/__init__.py
venusource/nova
0c6e6f180eebe71a3431abf726a0fd0c66578162
[ "Apache-2.0" ]
4
2015-09-09T16:48:56.000Z
2022-03-15T20:52:57.000Z
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(comstud): You may scratch your head as you see code that imports # this module and then accesses attributes for objects such as Instance, # etc, yet you do not see these attributes in here. Never fear, there is # a little bit of magic. When objects are registered, an attribute is set # on this module automatically, pointing to the newest/latest version of # the object. def register_all(): # NOTE(danms): You must make sure your object gets imported in this # function in order for it to be registered by services that may # need to receive it via RPC. __import__('nova.objects.agent') __import__('nova.objects.aggregate') __import__('nova.objects.bandwidth_usage') __import__('nova.objects.block_device') __import__('nova.objects.compute_node') __import__('nova.objects.dns_domain') __import__('nova.objects.ec2') __import__('nova.objects.external_event') __import__('nova.objects.fixed_ip') __import__('nova.objects.flavor') __import__('nova.objects.floating_ip') __import__('nova.objects.hv_spec') __import__('nova.objects.instance') __import__('nova.objects.instance_action') __import__('nova.objects.instance_fault') __import__('nova.objects.instance_group') __import__('nova.objects.instance_info_cache') __import__('nova.objects.instance_numa_topology') __import__('nova.objects.instance_pci_requests') __import__('nova.objects.keypair') __import__('nova.objects.migration') __import__('nova.objects.network') __import__('nova.objects.network_request') __import__('nova.objects.numa') __import__('nova.objects.pci_device') __import__('nova.objects.pci_device_pool') __import__('nova.objects.tag') __import__('nova.objects.quotas') __import__('nova.objects.security_group') __import__('nova.objects.security_group_rule') __import__('nova.objects.service') __import__('nova.objects.virt_cpu_topology') __import__('nova.objects.virtual_interface')
43.533333
78
0.741194
346
2,612
5.132948
0.465318
0.185811
0.315878
0.098536
0.063063
0
0
0
0
0
0
0.004091
0.157734
2,612
59
79
44.271186
0.803182
0.420368
0
0
0
0
0.540323
0.415995
0
0
0
0
0
1
0.029412
true
0
0.970588
0
1
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
6ac1d9757ccd2bbe3df86cb15bb6378a4a32ae5b
78
py
Python
basic_python_practice/functions.py
MylesWritesCode/web_dev_practice
769bad96cd19afeeda3b1dbbed1c34823fe1502f
[ "MIT" ]
null
null
null
basic_python_practice/functions.py
MylesWritesCode/web_dev_practice
769bad96cd19afeeda3b1dbbed1c34823fe1502f
[ "MIT" ]
null
null
null
basic_python_practice/functions.py
MylesWritesCode/web_dev_practice
769bad96cd19afeeda3b1dbbed1c34823fe1502f
[ "MIT" ]
null
null
null
#! /usr/bin/python print 'Content-type: text/html' print '' print 'Testing'
11.142857
31
0.679487
11
78
4.818182
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.141026
78
6
32
13
0.791045
0.217949
0
0
0
0
0.5
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
4
6aca50a78b1cd1bf3a4d7c2626fdf5ca3b762357
8,430
py
Python
challenges/Cereal_Mixup__A_Cereal_Vending_Machine_Controller/support/breakfast.py
pingjuiliao/cb-multios
64ededd0b87030eda7c40c4388a4ad8283712d8e
[ "MIT" ]
473
2016-08-01T12:48:16.000Z
2022-03-09T18:13:14.000Z
challenges/Cereal_Mixup__A_Cereal_Vending_Machine_Controller/support/breakfast.py
pingjuiliao/cb-multios
64ededd0b87030eda7c40c4388a4ad8283712d8e
[ "MIT" ]
71
2016-08-01T03:33:44.000Z
2022-03-09T18:37:04.000Z
challenges/Cereal_Mixup__A_Cereal_Vending_Machine_Controller/support/breakfast.py
pingjuiliao/cb-multios
64ededd0b87030eda7c40c4388a4ad8283712d8e
[ "MIT" ]
121
2016-08-01T04:07:53.000Z
2022-03-07T11:08:09.000Z
#!/usr/bin/env python # # Copyright (C) 2014 Narf Industries <info@narfindustries.com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from random import choice, randint import support as sp from common import DEBUG, CONFIG # Plain content, send SVU as UINT32 and STI as UCHAR # Serialized content, send SVU as UINT32, STI as UINT32, and name as series of chars. class Liquids(object): def __init__(self): self.serialVersionUID = 0 self.typeName = "Liquids" self.subTypeID = 0 def rand_content(self): self.subTypeID = randint(0,4) def get_plain_content(self): pc = '' pc += sp.pack_single_uint32(self.serialVersionUID) pc += sp.pack_single_uint8(self.subTypeID) return pc def get_serialized_content(self): sc = '' sc += sp.pack_single_uint32(self.serialVersionUID) sc += sp.pack_single_string(self.typeName) sc += sp.pack_single_uint32(self.subTypeID) return sc def __eq__(self, other): return self.serialVersionUID == other.serialVersionUID \ and self.subTypeID == other.subTypeID def __hash__(self): return hash( ("serialVersionUID", self.serialVersionUID, "subTypeID", self.subTypeID)) def __str__(self): return "SVU {0}, {1}, SubType {2}".format(self.serialVersionUID, self.typeName, self.subTypeID) def __repr__(self): return self.__str__() class Cereals(object): def __init__(self): self.serialVersionUID = 1 self.typeName = "Cereals" self.subTypeID = 0 def rand_content(self): self.subTypeID = randint(0,6) def get_plain_content(self): pc = '' pc += sp.pack_single_uint32(self.serialVersionUID) pc += sp.pack_single_uint8(self.subTypeID) return pc def get_serialized_content(self): sc = '' sc += sp.pack_single_uint32(self.serialVersionUID) sc += sp.pack_single_string(self.typeName) sc += sp.pack_single_uint32(self.subTypeID) return sc def __eq__(self, other): return self.serialVersionUID == other.serialVersionUID \ and self.subTypeID == other.subTypeID def __hash__(self): return hash( ("serialVersionUID", self.serialVersionUID, "subTypeID", self.subTypeID)) def __str__(self): return "SVU {0}, {1}, SubType {2}".format(self.serialVersionUID, self.typeName, self.subTypeID) def __repr__(self): return self.__str__() class Toppings(object): def __init__(self): self.serialVersionUID = 2 self.typeName = "Toppings" self.subTypeID = 0 def rand_content(self): self.subTypeID = randint(0,4) def get_plain_content(self): pc = '' pc += sp.pack_single_uint32(self.serialVersionUID) pc += sp.pack_single_uint8(self.subTypeID) return pc def get_serialized_content(self): sc = '' sc += sp.pack_single_uint32(self.serialVersionUID) sc += sp.pack_single_string(self.typeName) sc += sp.pack_single_uint32(self.subTypeID) return sc def __eq__(self, other): return self.serialVersionUID == other.serialVersionUID \ and self.subTypeID == other.subTypeID def __hash__(self): return hash( ("serialVersionUID", self.serialVersionUID, "subTypeID", self.subTypeID)) def __str__(self): return "SVU {0}, {1}, SubType {2}".format(self.serialVersionUID, self.typeName, self.subTypeID) def __repr__(self): return self.__str__() class GenericString(object): def __init__(self): self.serialVersionUID = 3 self.typeName = "GenericString" self.str = "" def rand_content(self): self.str = sp.random_string(randint(5, 25)) def get_plain_content(self): pc = '' pc += sp.pack_single_uint32(self.serialVersionUID) pc += sp.pack_single_string(self.str) pc += sp.pack_single_char('\0') return pc def get_serialized_content(self): sc = '' sc += sp.pack_single_uint32(self.serialVersionUID) sc += sp.pack_single_string(self.typeName) sc += sp.pack_single_string(self.str) sc += sp.pack_single_char('\0') return sc def __str__(self): return "SVU {0}, {1}, Str {2}".format(self.serialVersionUID, self.typeName, self.str) def __repr__(self): return self.__str__() class PrinterString(object): def __init__(self): self.serialVersionUID = 4 self.typeName = "PrinterString" self.str = "" def rand_content(self): self.str = sp.random_string(randint(5, 25)) def get_plain_content(self): pc = '' pc += sp.pack_single_uint32(self.serialVersionUID) pc += sp.pack_single_string(self.str) pc += sp.pack_single_char('\0') return pc def get_serialized_content(self): sc = '' sc += sp.pack_single_uint32(self.serialVersionUID) sc += sp.pack_single_string(self.typeName) sc += sp.pack_single_string(self.str) sc += sp.pack_single_char('\0') return sc def __str__(self): return "SVU {0}, {1}, Str {2}".format(self.serialVersionUID, self.typeName, self.str) def __repr__(self): return self.__str__() class CommandRunner(object): def __init__(self): self.serialVersionUID = 5 self.typeName = "CommandRunner" self.fn_addr = '' self.args = [] def rand_content(self): self.fn_addr = 'AADD' self.args = [128, 1024, 4096] def get_plain_content(self): pc = '' pc += sp.pack_single_uint32(self.serialVersionUID) pc += sp.pack_single_uint16(1 + len(self.args)) pc += sp.pack_single_string(self.fn_addr) pc += sp.pack_single_char(' ') pc += sp.pack_single_uint32(self.args[0]) pc += sp.pack_single_char(' ') pc += sp.pack_single_uint32(self.args[1]) pc += sp.pack_single_char(' ') pc += sp.pack_single_uint32(self.args[2]) pc += sp.pack_single_char('\0') return pc def get_serialized_content(self): sc = '' sc += sp.pack_single_uint32(self.serialVersionUID) sc += sp.pack_single_string(self.typeName) sc += sp.pack_single_uint16(1 + len(self.args)) sc += sp.pack_single_string(self.fn_addr) sc += sp.pack_single_char(' ') sc += sp.pack_single_uint32(self.args[0]) sc += sp.pack_single_char(' ') sc += sp.pack_single_uint32(self.args[1]) sc += sp.pack_single_char(' ') sc += sp.pack_single_uint32(self.args[2]) sc += sp.pack_single_char('\0') return sc def __str__(self): return "SVU {0}, {1}, fn_addr 0x{2}, args {3}".format(self.serialVersionUID, self.typeName, self.fn_addr, self.args) def __repr__(self): return self.__str__() if __name__ == '__main__': b = [] for item_type in [Liquids, Cereals, Toppings, GenericString, PrinterString, CommandRunner]: item = item_type() item.rand_content() print item print ''.join(["\\x{0:02x}".format(ord(c)) for c in item.get_plain_content()]) print ''.join(["\\x{0:02x}".format(ord(c)) for c in item.get_serialized_content()]) b.append(item) print b
32.548263
124
0.641874
1,079
8,430
4.761817
0.167748
0.058388
0.116777
0.076294
0.719735
0.707474
0.64383
0.631763
0.624757
0.624757
0
0.020568
0.244484
8,430
258
125
32.674419
0.786152
0.146738
0
0.741758
0
0
0.04744
0
0
0
0
0
0
0
null
null
0
0.016484
null
null
0.021978
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
0a725ad236bbad80eef198a278c063064e7d696e
192
py
Python
tenable/ad/preference/schema.py
Rogdham/pyTenable
79f3f7360f8ef31b964f1db99d0c7b8a0bc25d7a
[ "MIT" ]
1
2022-03-01T17:17:19.000Z
2022-03-01T17:17:19.000Z
tenable/ad/preference/schema.py
Rogdham/pyTenable
79f3f7360f8ef31b964f1db99d0c7b8a0bc25d7a
[ "MIT" ]
null
null
null
tenable/ad/preference/schema.py
Rogdham/pyTenable
79f3f7360f8ef31b964f1db99d0c7b8a0bc25d7a
[ "MIT" ]
1
2022-03-01T17:17:30.000Z
2022-03-01T17:17:30.000Z
from marshmallow import fields from tenable.ad.base.schema import CamelCaseSchema class PreferenceSchema(CamelCaseSchema): language = fields.Str() preferred_profile_id = fields.Int()
27.428571
50
0.796875
22
192
6.863636
0.772727
0
0
0
0
0
0
0
0
0
0
0
0.130208
192
7
51
27.428571
0.904192
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
0a7cec209e7d0c5ce38da5abe7abce9dfe627d69
147
py
Python
app/tests/test_models.py
vmasten/stock_portfolio
75a7f17e4891b1ca1374b5e1e5d83dd891b9fddd
[ "MIT" ]
null
null
null
app/tests/test_models.py
vmasten/stock_portfolio
75a7f17e4891b1ca1374b5e1e5d83dd891b9fddd
[ "MIT" ]
1
2018-12-07T03:57:51.000Z
2018-12-07T03:57:51.000Z
app/tests/test_models.py
vmasten/stock_portfolio
75a7f17e4891b1ca1374b5e1e5d83dd891b9fddd
[ "MIT" ]
null
null
null
from ..models import db, Company import pytest def test_company_all(session): companies = Company.query.all() assert len(companies) == 0
18.375
35
0.721088
20
147
5.2
0.75
0
0
0
0
0
0
0
0
0
0
0.008264
0.176871
147
7
36
21
0.85124
0
0
0
0
0
0
0
0
0
0
0
0.2
1
0.2
false
0
0.4
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
0a8ed9e5e4062d85b79f14a7e4b14e26b204c710
124
py
Python
castoredc_api_client/__init__.py
andreasCastor/castoredc_api
ef0bd4eb8ac2efaa7e98e8462de7e5a7aa65a7f0
[ "MIT" ]
null
null
null
castoredc_api_client/__init__.py
andreasCastor/castoredc_api
ef0bd4eb8ac2efaa7e98e8462de7e5a7aa65a7f0
[ "MIT" ]
null
null
null
castoredc_api_client/__init__.py
andreasCastor/castoredc_api
ef0bd4eb8ac2efaa7e98e8462de7e5a7aa65a7f0
[ "MIT" ]
null
null
null
from .castoredc_api_client import CastorClient from .exceptions.exceptions import CastorException, castor_exception_handler
41.333333
76
0.895161
14
124
7.642857
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.072581
124
2
77
62
0.930435
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
0aaf100092b165c2ae2f26adb86157b1723133db
160
py
Python
src/main/python/app/controllers/WelcomeController.py
karlpet/WadLauncher
512f5d28de5c57e4dffdc642b170891a99a00ea8
[ "MIT" ]
2
2020-09-06T11:16:30.000Z
2020-09-15T17:11:34.000Z
src/main/python/app/controllers/WelcomeController.py
karlpet/WadLauncher
512f5d28de5c57e4dffdc642b170891a99a00ea8
[ "MIT" ]
74
2020-09-07T16:40:54.000Z
2021-06-18T00:22:39.000Z
src/main/python/app/controllers/WelcomeController.py
karlpet/WadLauncher
512f5d28de5c57e4dffdc642b170891a99a00ea8
[ "MIT" ]
null
null
null
import sys from app.views.WelcomeView import WelcomeView class WelcomeController: def __init__(self, root, models): self.view = WelcomeView(root)
20
45
0.74375
19
160
6.052632
0.736842
0
0
0
0
0
0
0
0
0
0
0
0.18125
160
7
46
22.857143
0.877863
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
0adc4c79b6410b58cb55a5a74adf42e173a6dfef
42
py
Python
pywizlight/_version.py
fabaff/pywizlight
395e63846dd8bcfc99a65d50252c6a71e02590c4
[ "MIT" ]
1
2021-04-02T17:22:52.000Z
2021-04-02T17:22:52.000Z
pywizlight/_version.py
fabaff/pywizlight
395e63846dd8bcfc99a65d50252c6a71e02590c4
[ "MIT" ]
null
null
null
pywizlight/_version.py
fabaff/pywizlight
395e63846dd8bcfc99a65d50252c6a71e02590c4
[ "MIT" ]
null
null
null
"""PyPi Version.""" __version__ = "0.3.4"
14
21
0.595238
6
42
3.5
0.833333
0
0
0
0
0
0
0
0
0
0
0.081081
0.119048
42
2
22
21
0.486486
0.309524
0
0
0
0
0.217391
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0ae36535434841c27eb6d1a1d3d898b7d0cdfd3c
280
py
Python
appengine_config.py
mayankgosain/python_slack_bot-master
03d83384fdd7bc248ecf8713ca4d80013ba17808
[ "Apache-2.0" ]
null
null
null
appengine_config.py
mayankgosain/python_slack_bot-master
03d83384fdd7bc248ecf8713ca4d80013ba17808
[ "Apache-2.0" ]
1
2015-07-28T10:27:52.000Z
2015-07-28T10:27:52.000Z
appengine_config.py
mayankgosain/python_slack_bot-master
03d83384fdd7bc248ecf8713ca4d80013ba17808
[ "Apache-2.0" ]
null
null
null
"""`appengine_config` gets loaded when starting a new application instance.""" import site import os.path # add `lib` subdirectory as a site packages directory, so our `main` module can load # third-party libraries. site.addsitedir(os.path.join(os.path.dirname(__file__), 'lib'))
40
84
0.764286
42
280
4.97619
0.785714
0.086124
0
0
0
0
0
0
0
0
0
0
0.117857
280
6
85
46.666667
0.846154
0.639286
0
0
0
0
0.031915
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
7c25f480a7e4db2b9fea4c8ef910e2b139377c72
1,360
py
Python
beta.py
joenghl/SwarmSim
b06f27e91f7a6cba886aa06734f38f0ac006d6c0
[ "MIT" ]
null
null
null
beta.py
joenghl/SwarmSim
b06f27e91f7a6cba886aa06734f38f0ac006d6c0
[ "MIT" ]
null
null
null
beta.py
joenghl/SwarmSim
b06f27e91f7a6cba886aa06734f38f0ac006d6c0
[ "MIT" ]
null
null
null
import numpy as np import torch import argparse from torch import Tensor from torch import autograd from torch.autograd import Variable import sys print(sys.executable) print(torch.cuda.is_available()) # import airsim # client = airsim.MultirotorClient() # client.confirmConnection() # client.enableApiControl(True, "Drone1") # client.enableApiControl(True, "Drone2") # client.armDisarm(True, "Drone1") # client.armDisarm(True, "Drone2") # raise Exception("valid") # # a = client.getMultirotorState(vehicle_name="Drone1").kinematics_estimated.position # # b = client.getMultirotorState(vehicle_name="Drone2").kinematics_estimated.position # f1 = client.takeoffAsync(vehicle_name="Drone1") # f2 = client.takeoffAsync(vehicle_name="Drone2") # f1.join() # f2.join() # f1 = client.moveToPositionAsync(5, 5, -10, 5, vehicle_name="Drone1") # f2 = client.moveToPositionAsync(5, 5, -10, 5, vehicle_name="Drone2") # f1.join() # f2.join() # a = client.getGpsData(vehicle_name="Drone1") # b = client.getMultirotorState(vehicle_name="Drone2").gps_location.latitude # print(a, b) # airsim.wait_key('Press any key to reset to original state') # client.armDisarm(False, "Drone1") # client.armDisarm(False, "Drone2") # client.reset() # # that's enough fun for now. let's quit cleanly # client.enableApiControl(False, "Drone1") # client.enableApiControl(False, "Drone2")
30.909091
86
0.751471
172
1,360
5.866279
0.377907
0.087215
0.067393
0.104063
0.243806
0.2111
0.127849
0.081269
0
0
0
0.027869
0.102941
1,360
44
87
30.909091
0.79918
0.802941
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.777778
0
0.777778
0.222222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
7c280d829598b4adc58515af003be7dcab324652
188
py
Python
setup.py
zoomie/homemade_steganog
1ab0a140b6a2e0d9d36073d067a2c808c97adf38
[ "MIT" ]
1
2019-03-12T13:25:43.000Z
2019-03-12T13:25:43.000Z
setup.py
zoomie/homemade_encryption
1ab0a140b6a2e0d9d36073d067a2c808c97adf38
[ "MIT" ]
4
2020-03-24T16:43:01.000Z
2022-03-11T23:39:53.000Z
setup.py
zoomie/homemade_encryption
1ab0a140b6a2e0d9d36073d067a2c808c97adf38
[ "MIT" ]
null
null
null
from distutils.core import setup setup( name='homemade_steganog', version='0.3.0', packages=['homemade_steganog',], install_requires=['scikit-image'], license='MIT', )
20.888889
38
0.670213
22
188
5.590909
0.818182
0.260163
0
0
0
0
0
0
0
0
0
0.019108
0.164894
188
9
39
20.888889
0.764331
0
0
0
0
0
0.285714
0
0
0
0
0
0
1
0
true
0
0.125
0
0.125
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
7c4414ad42002173728a604f0b54bdc51ac158a5
547
py
Python
lfs/customer/migrations/0005_auto_20210406_1513.py
michael-hahn/django-lfs
26c3471a8f8d88269c84f714f507b952dfdb6397
[ "BSD-3-Clause" ]
null
null
null
lfs/customer/migrations/0005_auto_20210406_1513.py
michael-hahn/django-lfs
26c3471a8f8d88269c84f714f507b952dfdb6397
[ "BSD-3-Clause" ]
null
null
null
lfs/customer/migrations/0005_auto_20210406_1513.py
michael-hahn/django-lfs
26c3471a8f8d88269c84f714f507b952dfdb6397
[ "BSD-3-Clause" ]
null
null
null
# Generated by Django 3.1.2 on 2021-04-06 15:13 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('customer', '0004_auto_20210406_1434'), ] operations = [ migrations.RemoveField( model_name='customer', name='synthesized', ), migrations.RemoveField( model_name='customer', name='taints', ), migrations.RemoveField( model_name='customer', name='trusted', ), ]
21.038462
48
0.550274
49
547
6.020408
0.632653
0.213559
0.264407
0.305085
0.427119
0.427119
0
0
0
0
0
0.085635
0.338208
547
25
49
21.88
0.729282
0.082267
0
0.473684
1
0
0.158
0.046
0
0
0
0
0
1
0
false
0
0.052632
0
0.210526
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
7c58d45604fb37f69a3ee612ec766061ac0618d5
232
py
Python
mmdet/utils/deployment/operations_domain.py
morkovka1337/mmdetection
5187d94b6c96084b17817249622d6e4520213ae6
[ "Apache-2.0" ]
58
2020-09-21T08:17:26.000Z
2022-03-31T19:38:14.000Z
mmdet/utils/deployment/operations_domain.py
morkovka1337/mmdetection
5187d94b6c96084b17817249622d6e4520213ae6
[ "Apache-2.0" ]
170
2020-09-08T12:29:06.000Z
2022-03-31T18:28:09.000Z
mmdet/utils/deployment/operations_domain.py
morkovka1337/mmdetection
5187d94b6c96084b17817249622d6e4520213ae6
[ "Apache-2.0" ]
21
2020-10-06T13:49:41.000Z
2022-03-30T14:52:45.000Z
# Copyright (C) 2020-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # DOMAIN_CUSTOM_OPS_NAME = 'org.openvinotoolkit' def add_domain(name_operator: str) -> str: return DOMAIN_CUSTOM_OPS_NAME + '::' + name_operator
29
56
0.758621
32
232
5.21875
0.71875
0.143713
0.179641
0.227545
0
0
0
0
0
0
0
0.049261
0.125
232
7
57
33.142857
0.773399
0.331897
0
0
0
0
0.139073
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
7c596c3c8b72ea6024b27bd0c56984709592b557
240
py
Python
trajectory/utils/__init__.py
hyyh28/trajectory-transformer
4a369b6d1c950c76d1792cf004644fa13040319c
[ "MIT" ]
63
2021-11-23T08:00:27.000Z
2022-03-31T04:03:05.000Z
trajectory/utils/__init__.py
hyyh28/trajectory-transformer
4a369b6d1c950c76d1792cf004644fa13040319c
[ "MIT" ]
7
2021-12-08T04:01:13.000Z
2022-03-31T07:42:37.000Z
trajectory/utils/__init__.py
hyyh28/trajectory-transformer
4a369b6d1c950c76d1792cf004644fa13040319c
[ "MIT" ]
12
2021-12-13T10:55:32.000Z
2022-03-24T09:06:22.000Z
from .setup import Parser, watch from .arrays import * from .serialization import * from .progress import Progress, Silent from .rendering import make_renderer # from .video import * from .config import Config from .training import Trainer
26.666667
38
0.795833
32
240
5.9375
0.5
0.157895
0
0
0
0
0
0
0
0
0
0
0.145833
240
8
39
30
0.926829
0.083333
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
7c74ac5a8df263460eb4d995c96d663a8ecb20f1
436
py
Python
Admission Counselling For Direct Second Year/Web-Application/AdmissionDirectSecondYear/TopColleges/models.py
atharvaagrawal/direct-second-year-admission-analysis
4744753c5b69d5e06211f006d56150997793c5bf
[ "MIT" ]
null
null
null
Admission Counselling For Direct Second Year/Web-Application/AdmissionDirectSecondYear/TopColleges/models.py
atharvaagrawal/direct-second-year-admission-analysis
4744753c5b69d5e06211f006d56150997793c5bf
[ "MIT" ]
1
2020-03-25T11:06:18.000Z
2020-03-25T11:06:18.000Z
Admission Counselling For Direct Second Year/Web-Application/AdmissionDirectSecondYear/TopColleges/models.py
atharvaagrawal/direct-second-year-admission-analysis
4744753c5b69d5e06211f006d56150997793c5bf
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class TopCollegesModel(models.Model): institute_id = models.CharField(max_length=300) college_name = models.CharField(max_length=300) city = models.CharField(max_length=300) state = models.CharField(max_length=300) score = models.FloatField() crank = models.IntegerField() class Meta: db_table = "IndiaTop200College2019"
27.25
53
0.697248
50
436
5.94
0.56
0.20202
0.242424
0.323232
0.363636
0
0
0
0
0
0
0.055394
0.213303
436
15
54
29.066667
0.810496
0.055046
0
0
0
0
0.055838
0.055838
0
0
0
0
0
1
0
false
0
0.1
0
0.9
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
7c7e189ba8ab8260e471ec212e19a2773d5fcf83
174
py
Python
environments/obstacle_car_o/colors.py
lhk/baselines
18eab9df4f74b5dac276bce64c13554d518618f7
[ "MIT" ]
null
null
null
environments/obstacle_car_o/colors.py
lhk/baselines
18eab9df4f74b5dac276bce64c13554d518618f7
[ "MIT" ]
null
null
null
environments/obstacle_car_o/colors.py
lhk/baselines
18eab9df4f74b5dac276bce64c13554d518618f7
[ "MIT" ]
1
2021-03-17T13:26:49.000Z
2021-03-17T13:26:49.000Z
import pygame red = pygame.Color(255, 0, 0) green = pygame.Color(0, 255, 0) blue = pygame.Color(0, 0, 255) white = pygame.Color(255, 255, 255) black = pygame.Color(0, 0, 0)
21.75
35
0.666667
32
174
3.625
0.3125
0.474138
0.310345
0.224138
0
0
0
0
0
0
0
0.184932
0.16092
174
7
36
24.857143
0.609589
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
7c9489c18fb126a2e30051daf58cddbd901b41de
1,164
py
Python
dlrnapi_client/__init__.py
ssbarnea/dlrnapi_client
e09d278713cdd16cea69bfc64689166d2a886f19
[ "Apache-2.0" ]
null
null
null
dlrnapi_client/__init__.py
ssbarnea/dlrnapi_client
e09d278713cdd16cea69bfc64689166d2a886f19
[ "Apache-2.0" ]
null
null
null
dlrnapi_client/__init__.py
ssbarnea/dlrnapi_client
e09d278713cdd16cea69bfc64689166d2a886f19
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ DLRN API DLRN API client OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import # import models into sdk package from dlrnapi_client.models.ci_vote import CIVote # noqa from dlrnapi_client.models.metrics import Metrics # noqa from dlrnapi_client.models.metrics import MetricsRequest # noqa from dlrnapi_client.models.model_import import ModelImport # noqa from dlrnapi_client.models.params import Params # noqa from dlrnapi_client.models.params_1 import Params1 # noqa from dlrnapi_client.models.params_2 import Params2 # noqa from dlrnapi_client.models.params_3 import Params3 # noqa from dlrnapi_client.models.promotion import Promotion # noqa from dlrnapi_client.models.promotion_query import PromotionQuery # noqa from dlrnapi_client.models.repo import Repo # noqa # import apis into sdk package from dlrnapi_client.apis.default_api import DefaultApi # noqa # import ApiClient from dlrnapi_client.api_client import ApiClient # noqa from dlrnapi_client.configuration import Configuration configuration = Configuration()
30.631579
72
0.804124
160
1,164
5.68125
0.31875
0.169417
0.261826
0.278328
0.440044
0.380638
0.088009
0
0
0
0
0.00997
0.138316
1,164
37
73
31.459459
0.896311
0.237973
0
0
1
0
0
0
0
0
0
0
0
1
0
false
0
0.9375
0
0.9375
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
7c9a3aaea871b1e086e68d4e1b27037efb98d7d5
89
py
Python
general/glassnode_api.py
RichardRed0x/checkonchain
2a2c1b50fb9f31c9afc01e97095ca09d62b41860
[ "ISC" ]
null
null
null
general/glassnode_api.py
RichardRed0x/checkonchain
2a2c1b50fb9f31c9afc01e97095ca09d62b41860
[ "ISC" ]
null
null
null
general/glassnode_api.py
RichardRed0x/checkonchain
2a2c1b50fb9f31c9afc01e97095ca09d62b41860
[ "ISC" ]
null
null
null
#bd859ac9-3f51-4e09-a41e-5dcdfd3e99ec client = 'https://api.glassnode.com/v1/metrics/'
17.8
48
0.752809
12
89
5.583333
1
0
0
0
0
0
0
0
0
0
0
0.204819
0.067416
89
5
48
17.8
0.60241
0.404494
0
0
0
0
0.72549
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
7c9ade3fd0e041b55112f1301882f6fd3419bf11
138
py
Python
nigsp/tests/test_viz.py
smoia/nigsp
75eab5e428e5b28b4a5c174b3aeb69b8172cf9f5
[ "Apache-2.0" ]
2
2022-03-21T14:53:39.000Z
2022-03-24T15:39:45.000Z
nigsp/tests/test_viz.py
smoia/nigsp
75eab5e428e5b28b4a5c174b3aeb69b8172cf9f5
[ "Apache-2.0" ]
6
2022-03-21T14:57:12.000Z
2022-03-28T12:55:52.000Z
nigsp/tests/test_viz.py
smoia/nigsp
75eab5e428e5b28b4a5c174b3aeb69b8172cf9f5
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 """Tests for viz.""" from pytest import mark, raises from nigsp import viz # ### Unit tests # ### Break tests
11.5
31
0.652174
20
138
4.5
0.75
0
0
0
0
0
0
0
0
0
0
0.009009
0.195652
138
11
32
12.545455
0.801802
0.442029
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
7c9c2300ca97b500eac86c8d54d83d9fb9245ad0
157
py
Python
spine_json_lib/__init__.py
jesdi/spine-json-lib
15767302d829738ddd21e0249ceba21874a6a052
[ "MIT" ]
null
null
null
spine_json_lib/__init__.py
jesdi/spine-json-lib
15767302d829738ddd21e0249ceba21874a6a052
[ "MIT" ]
null
null
null
spine_json_lib/__init__.py
jesdi/spine-json-lib
15767302d829738ddd21e0249ceba21874a6a052
[ "MIT" ]
null
null
null
__version__ = '0.2.4' __url__ = 'https://github.com/socialpoint-labs/spine-json-lib' from spine_json_lib.spine_animation_editor import SpineAnimationEditor
31.4
70
0.815287
22
157
5.272727
0.818182
0.155172
0.206897
0
0
0
0
0
0
0
0
0.020548
0.070064
157
4
71
39.25
0.773973
0
0
0
0
0
0.350318
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
7ca8c319a0444c442f2da164641f164f6047cdcf
280
py
Python
django/db/backends/base/validation.py
wfxiang08/django197
c760d7f7cfbbb54a248dd303ef73206c43d80fbd
[ "BSD-3-Clause" ]
null
null
null
django/db/backends/base/validation.py
wfxiang08/django197
c760d7f7cfbbb54a248dd303ef73206c43d80fbd
[ "BSD-3-Clause" ]
null
null
null
django/db/backends/base/validation.py
wfxiang08/django197
c760d7f7cfbbb54a248dd303ef73206c43d80fbd
[ "BSD-3-Clause" ]
null
null
null
# -*- coding:utf-8 -*- class BaseDatabaseValidation(object): """ This class encapsulates all backend-specific model validation. """ def __init__(self, connection): self.connection = connection def check_field(self, field, **kwargs): return []
25.454545
66
0.646429
28
280
6.285714
0.75
0.159091
0
0
0
0
0
0
0
0
0
0.00463
0.228571
280
10
67
28
0.810185
0.3
0
0
0
0
0
0
0
0
0
0
0
1
0.4
false
0
0
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
7cb6167a5a519c96613e6d16d7b003ebdc20fa6e
393
py
Python
scripts/rpc/net.py
zealoussnow/spdk
3148c48079731e121ec2fc81fb15293219da1789
[ "BSD-3-Clause" ]
13
2021-08-23T03:37:46.000Z
2022-02-16T03:00:09.000Z
scripts/rpc/net.py
zealoussnow/spdk
3148c48079731e121ec2fc81fb15293219da1789
[ "BSD-3-Clause" ]
2
2021-11-12T10:19:47.000Z
2021-12-21T14:26:36.000Z
scripts/rpc/net.py
zealoussnow/spdk
3148c48079731e121ec2fc81fb15293219da1789
[ "BSD-3-Clause" ]
4
2021-09-03T13:55:05.000Z
2021-11-09T10:59:33.000Z
def add_ip_address(client, args): params = {'ifc_index': args.ifc_index, 'ip_address': args.ip_addr} return client.call('add_ip_address', params) def delete_ip_address(client, args): params = {'ifc_index': args.ifc_index, 'ip_address': args.ip_addr} return client.call('delete_ip_address', params) def get_interfaces(client, args): return client.call('get_interfaces')
30.230769
70
0.73028
58
393
4.637931
0.258621
0.200743
0.178439
0.141264
0.594796
0.594796
0.594796
0.594796
0.594796
0.594796
0
0
0.13486
393
12
71
32.75
0.791176
0
0
0.25
0
0
0.211196
0
0
0
0
0
0
1
0.375
false
0
0
0.125
0.75
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
7cba37af37a0c49a828584ad7f3bc1c775fd47ad
200
py
Python
src/expression_evaluator/operators/basic/lesser_than.py
Xett/expression_evaluator
eca895d79f015843a262e9636b86c6dd3d06a69d
[ "MIT" ]
null
null
null
src/expression_evaluator/operators/basic/lesser_than.py
Xett/expression_evaluator
eca895d79f015843a262e9636b86c6dd3d06a69d
[ "MIT" ]
null
null
null
src/expression_evaluator/operators/basic/lesser_than.py
Xett/expression_evaluator
eca895d79f015843a262e9636b86c6dd3d06a69d
[ "MIT" ]
null
null
null
from expression_evaluator.token import * class LesserThan(BasicOperator): symbols = ['<'] priority = PriorityLevel.Boolean @classmethod def _function(cls, a, b): return a < b
22.222222
40
0.67
21
200
6.285714
0.904762
0.030303
0
0
0
0
0
0
0
0
0
0
0.23
200
9
41
22.222222
0.857143
0
0
0
0
0
0.004975
0
0
0
0
0
0
1
0.142857
false
0
0.142857
0.142857
0.857143
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
4
7cd548bf9630c9951f1f88bc54abce74dbe50b51
11,832
py
Python
neuroprob/utils/biophysical.py
davindicode/universal_count_model
f91294b66f16a5701dc8e0b5a825ac55bff83da2
[ "MIT" ]
1
2022-01-14T19:27:55.000Z
2022-01-14T19:27:55.000Z
neuroprob/utils/biophysical.py
davindicode/universal_count_model
f91294b66f16a5701dc8e0b5a825ac55bff83da2
[ "MIT" ]
null
null
null
neuroprob/utils/biophysical.py
davindicode/universal_count_model
f91294b66f16a5701dc8e0b5a825ac55bff83da2
[ "MIT" ]
null
null
null
import torch import torch.nn as nn import numpy as np from tqdm.autonotebook import tqdm # Continuous models class Hodgkin_Huxley(): r""" Hodgkin-Huxley model via Euler integration """ def __init__(self, G_na=120, G_k=36, G_l=0.3, E_na=50, E_k=-77, E_l=-54.4): r""" units are in mV, microS, nF, mA, ms """ self.G_na = G_na self.G_k = G_k self.G_l = G_l self.E_na = E_na self.E_k = E_k self.E_l = E_l def euler_int(self, T, runs, I_ext, ic, dt=0.001, prin=1000): r""" Integrate the HH dynamics, the state array (v, m, h, n) is represented by 4 floating-point values :param int T: timesteps to run the simulation for :param int runs: number of trials to run (I_ext and i.c. can differ per run) :param np.array I_ext: external input current, with shape (runs, timesteps) :param np.array ic: neuron initial conditions, with shape (runs, 4) :returns: neuron state over the simulation :rtype: np.array """ alpha_m = lambda V: (2.5-0.1*(V+65)) / (np.exp(2.5-0.1*(V+65)) -1) beta_m = lambda V: 4.0 * np.exp(-(V+65)/18) alpha_h = lambda V: 0.07 * np.exp(-(V+65)/20) beta_h = lambda V: 1.0 / (np.exp(3.0-0.1*(V+65)) + 1) alpha_n = lambda V: (0.1-0.01*(V+65)) / (np.exp(1-0.1*(V+65)) - 1) beta_n = lambda V: 0.125 * np.exp(-(V+65)/80) state = np.zeros((runs, T, 4)) # vector v, m, h, n for k in range(runs): state[k, 0, :] = ic[k, :]#[-6.49997224e+01, 5.29342176e-02, 5.96111046e-01, 3.17681168e-01] ds = np.zeros((runs, 4)) iterator = tqdm(range(T-1)) for t in iterator: ds[:, 0] = -(G_l*(state[:, t, 0] - E_l) + \ G_k*np.power(state[:, t, 3], 4)*(state[:, t, 0] - E_k) + \ G_na*np.power(state[:, t, 1], 3)*state[:, t, 2]*(state[:, t, 0] - E_na)) + I_ext[:, t] ds[:, 1] = alpha_m(state[:, t, 0]) * (1 - state[:, t, 1]) - beta_m(state[:, t, 0]) * state[:, t, 1] ds[:, 2] = alpha_h(state[:, t, 0]) * (1 - state[:, t, 2]) - beta_h(state[:, t, 0]) * state[:, t, 2] ds[:, 3] = alpha_n(state[:, t, 0]) * (1 - state[:, t, 3]) - beta_n(state[:, t, 0]) * state[:, t, 3] state[:, t+1] = state[:, t] + ds * dt return state class FitzHugh_Nagumo(): r""" A 2D reduction of the Hodgkin-Huxley model to the phase plane. """ def __init__(self, b_0, b_1, tau_u, tau_w): r""" units are in mV, microS, nF, mA, ms """ self.b_0 = b_0 self.b_1 = b_1 self.tau_u = tau_u self.tau_w = tau_w def euler_int(self, T, runs, I_ext, ic, dt=0.001, prin=1000): r""" Integrate the HH dynamics, the state array (v, m, h, n) is represented by 4 floating-point values :param int T: timesteps to run the simulation for :param int runs: number of trials to run (I_ext and i.c. can differ per run) :param np.array I_ext: external input current, with shape (runs, timesteps) :param np.array ic: neuron initial conditions, with shape (runs, 4) :returns: neuron state over the simulation :rtype: np.array """ state = np.zeros((runs, T, 2)) # vector u, w for k in range(runs): state[k, 0, :] = ic[k, :]#[-6.49997224e+01, 5.29342176e-02, 5.96111046e-01, 3.17681168e-01] ds = np.zeros((runs, 2)) iterator = tqdm(range(T-1)) for t in iterator: ds[:, 0] = 1/self.tau_u * (state[:, t, 0] - state[:, t, 0]**3/3. - state[:, t, 1] + I_ext) ds[:, 1] = 1/self.tau_w * (self.b_0 + self.b_1*state[:, t, 0] - state[:, t, 1]) state[:, t+1] = state[:, t] + ds * dt return state class Morris_Lecar(): r""" A 2D reduction of the Hodgkin-Huxley model to the phase plane. """ def __init__(self, G_na=120, G_k=36, G_l=0.3, E_na=50, E_k=-77, E_l=-54.4): r""" units are in mV, microS, nF, mA, ms """ self.G_na = G_na self.G_k = G_k self.G_l = G_l self.E_na = E_na self.E_k = E_k self.E_l = E_l def euler_int(self, T, runs, I_ext, ic, dt=0.001, prin=1000): r""" Integrate the HH dynamics, the state array (v, m, h, n) is represented by 4 floating-point values :param int T: timesteps to run the simulation for :param int runs: number of trials to run (I_ext and i.c. can differ per run) :param np.array I_ext: external input current, with shape (runs, timesteps) :param np.array ic: neuron initial conditions, with shape (runs, 4) :returns: neuron state over the simulation :rtype: np.array """ alpha_m = lambda V: (2.5-0.1*(V+65)) / (np.exp(2.5-0.1*(V+65)) -1) beta_m = lambda V: 4.0 * np.exp(-(V+65)/18) alpha_h = lambda V: 0.07 * np.exp(-(V+65)/20) beta_h = lambda V: 1.0 / (np.exp(3.0-0.1*(V+65)) + 1) alpha_n = lambda V: (0.1-0.01*(V+65)) / (np.exp(1-0.1*(V+65)) - 1) beta_n = lambda V: 0.125 * np.exp(-(V+65)/80) state = np.zeros((runs, T, 4)) # vector v, m, h, n for k in range(runs): state[k, 0, :] = ic[k, :]#[-6.49997224e+01, 5.29342176e-02, 5.96111046e-01, 3.17681168e-01] ds = np.zeros((runs, 4)) iterator = tqdm(range(T-1)) for t in iterator: ds[:, 0] = -(G_l*(state[:, t, 0] - E_l) + \ G_k*np.power(state[:, t, 3], 4)*(state[:, t, 0] - E_k) + \ G_na*np.power(state[:, t, 1], 3)*state[:, t, 2]*(state[:, t, 0] - E_na)) + I_ext[:, t] ds[:, 1] = alpha_m(state[:, t, 0]) * (1 - state[:, t, 1]) - beta_m(state[:, t, 0]) * state[:, t, 1] ds[:, 2] = alpha_h(state[:, t, 0]) * (1 - state[:, t, 2]) - beta_h(state[:, t, 0]) * state[:, t, 2] ds[:, 3] = alpha_n(state[:, t, 0]) * (1 - state[:, t, 3]) - beta_n(state[:, t, 0]) * state[:, t, 3] state[:, t+1] = state[:, t] + ds * dt return state def count_APs(V, lim=20.0): r""" Action potential counter """ idx = (V > lim).astype(float) idf = np.diff(idx) == 1 return idf.sum() # Integrate-and-fire models class Izhikevich(): r""" Biophysically inspired Izhikevich model (2003/2004) [1], a nonlinear integrate-and-fire model. References: [1] """ def __init__(self, a, b, c, d): self.a = a self.b = b self.c = c self.d = d def euler_int(self, T, runs, I_ext, ic, dt=0.1, prin=1000): r""" Euler integration of the dynamics, with state array (v, u) """ state = np.zeros((runs, T, 2)) # vector v, u spiketrain = np.zeros((runs, T)) reset_state = np.empty((runs, 2)) reset_state[:, 0].fill(self.c) for k in range(runs): state[k, 0, :] = ic[k, :] ds = np.zeros((runs, 2)) iterator = tqdm(range(T-1)) for t in iterator: ds[:, 0] = 0.04*state[:, t, 0]**2 + 5.*state[:, t, 0] + 140. - state[:, t, 1] + I_ext[:, t] ds[:, 1] = self.a*(self.b*state[:, t, 0] - state[:, t, 1]) reset = (state[:, t, 0] >= 30.) if reset.sum() > 0: reset_state[:, 1] = (state[:, t, 1] + self.d) state[:, t+1] = reset[:, None]*reset_state + (1-reset)[:, None]*(state[:, t] + ds * dt) spiketrain[:, t+1] = reset else: state[:, t+1] = state[:, t] + ds * dt return state, spiketrain class AdExIF(): r""" Adaptive exponential integrate-and-fire model. [1] References: [1] `Neuronal Dynamics`, Wulfram Gerstner, Werner M. Kistler, Richard Naud and Liam Paninski. """ def __init__(self, a, b, c, d): self.a = a self.b = b self.c = c self.d = d def euler_int(self, T, runs, I_ext, ic, dt=0.001, prin=1000): r""" Euler integration of the dynamics, with state array (v, u) """ state = np.zeros((runs, T, 2)) # vector v, u spiketrain = np.zeros((runs, T)) reset_state = np.empty((runs, 2)) reset_state[:, 0].fill(self.c) for k in range(runs): state[k, 0, :] = ic[k, :] ds = np.zeros((runs, 2)) iterator = tqdm(range(T-1)) for t in iterator: ds[:, 0] = 0.04*state[:, t, 0]**2 + 5.*state[:, t, 0] + 140. - state[:, t, 1] + I_ext[:, t] ds[:, 1] = self.a*(self.b*state[:, t, 0] - state[:, t, 1]) reset = (state[:, t, 0] >= 30.) if reset.sum() > 0: reset_state[:, 1] = (state[:, t, 1] + self.d) state[:, t+1] = reset[:, None]*reset_state + (1-reset)[:, None]*(state[:, t] + ds * dt) spiketrain[:, t+1] = reset else: state[:, t+1] = state[:, t] + ds * dt return state, spiketrain def neuron_model(dynamics, model_type): r""" Neuronal dynamics library of parameter values. Izhikevich parameters from [1]. References: [1] `Capturing the Dynamical Repertoire of Single Neurons with Generalized Linear Models`, Alison I. Weber & Jonathan W. Pillow """ if model_type == 'Izhikevich': # dt in ms if dynamics == 'tonic_spiking': model = Izhikevich(0.02, 0.2, -65, 6) I = 14 dt = 0.1 elif dynamics == 'phasic_spiking': model = Izhikevich(0.02, 0.2, -65, 6) I = 0.5 dt = 0.1 elif dynamics == 'tonic_bursting': model = Izhikevich(0.02, 0.2, -50, 2) I = 10 dt = 0.1 elif dynamics == 'phasic_bursting': model = Izhikevich(0.02, 0.25, -55, 0.05) I = 0.6 dt = 0.1 elif dynamics == 'mixed': model = Izhikevich(0.02, 0.2, -55, 4) I = 10 dt = 0.1 elif dynamics == 'frequency_adaptation': model = Izhikevich(0.01, 0.2, -65, 5) I = 20 dt = 0.1 elif dynamics == 'type_I': model = Izhikevich(0.02, -0.1, -55, 6) I = 25 dt = 1. elif dynamics == 'type_II': model = Izhikevich(0.2, 0.26, -65, 0) I = 0.5 dt = 1. elif dynamics == 'spike_latency': model = Izhikevich(0.02, 0.2, -65, 6) I = 3.49 dt = 0.1 elif dynamics == 'resonator': model = Izhikevich(0.1, 0.26, -60, -1) I = 0.3 dt = 0.5 elif dynamics == 'integrator': model = Izhikevich(0.02, -0.1, -66, 6) I = 27.4 dt = 0.5 elif dynamics == 'rebound_spike': model = Izhikevich(0.03, 0.25, -60, 4) I = -5. dt = 0.1 elif dynamics == 'rebound_burst': model = Izhikevich(0.03, 0.25, -52, 0) I = -5. dt = 0.1 elif dynamics == 'threshold_variability': model = Izhikevich(0.03, 0.25, -60, 4) I = 2.3 dt = 1. elif dynamics == 'bistability_I': model = Izhikevich(1., 1.5, -60, 0) I = 30. dt = 0.05 elif dynamics == 'bistability_II': model = Izhikevich(1., 1.5, -60, 0) I = 40. dt = 0.05 else: raise NotImplementedError return model, I, dt else: raise NotImplementedError
34.8
111
0.483942
1,803
11,832
3.088741
0.124792
0.07434
0.036452
0.021548
0.778416
0.756868
0.721853
0.704435
0.696534
0.683247
0
0.088285
0.349983
11,832
340
112
34.8
0.635808
0.214672
0
0.665158
0
0
0.023989
0.002399
0
0
0
0
0
1
0.054299
false
0
0.0181
0
0.126697
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
7ce2a2f7a600f572ddad417b0f26991aa5156a69
64
py
Python
helloworld.py
sheki/snipy
311ccbc043fc317b98393f16ebabb1f0f2ad2574
[ "WTFPL" ]
1
2017-12-19T22:44:19.000Z
2017-12-19T22:44:19.000Z
helloworld.py
sheki/snipy
311ccbc043fc317b98393f16ebabb1f0f2ad2574
[ "WTFPL" ]
null
null
null
helloworld.py
sheki/snipy
311ccbc043fc317b98393f16ebabb1f0f2ad2574
[ "WTFPL" ]
null
null
null
print 'Content-Type: text/plain' print '' print 'Hello, world!'
16
32
0.703125
9
64
5
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.125
64
3
33
21.333333
0.803571
0
0
0
0
0
0.578125
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
4
7cefc6e968d9c54af26c949e17f947dcd46300be
556
py
Python
oscar/lib/python2.7/site-packages/phonenumbers/shortdata/region_WS.py
AMuratTuran/mkn
557086426773ced10d82c969304bd349414a601e
[ "BSD-3-Clause" ]
4
2018-10-19T04:36:20.000Z
2020-02-13T16:14:09.000Z
oscar/lib/python2.7/site-packages/phonenumbers/shortdata/region_WS.py
AMuratTuran/mkn
557086426773ced10d82c969304bd349414a601e
[ "BSD-3-Clause" ]
null
null
null
oscar/lib/python2.7/site-packages/phonenumbers/shortdata/region_WS.py
AMuratTuran/mkn
557086426773ced10d82c969304bd349414a601e
[ "BSD-3-Clause" ]
null
null
null
"""Auto-generated file, do not edit by hand. WS metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_WS = PhoneMetadata(id='WS', country_code=None, international_prefix=None, general_desc=PhoneNumberDesc(national_number_pattern='9\\d{2}', possible_length=(3,)), emergency=PhoneNumberDesc(national_number_pattern='99[4-6]', example_number='994', possible_length=(3,)), short_code=PhoneNumberDesc(national_number_pattern='99[4-6]', example_number='994', possible_length=(3,)), short_data=True)
61.777778
110
0.77518
72
556
5.736111
0.583333
0.16707
0.210654
0.261501
0.368039
0.368039
0.368039
0.368039
0.368039
0.368039
0
0.037255
0.082734
556
8
111
69.5
0.772549
0.095324
0
0
1
0
0.05835
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
7cefef909e7458f99684bc978e406b37332343a3
71
py
Python
nasbench301/surrogate_models/bananas/bananas_src/bo/fn/__init__.py
Basvanstein/nasbench301
2984dec45c760d47762f50efe39b71e9d1ac22e0
[ "Apache-2.0" ]
167
2019-10-26T19:54:49.000Z
2021-12-14T15:25:32.000Z
nasbench301/surrogate_models/bananas/bananas_src/bo/fn/__init__.py
Basvanstein/nasbench301
2984dec45c760d47762f50efe39b71e9d1ac22e0
[ "Apache-2.0" ]
12
2020-11-07T12:50:19.000Z
2022-01-21T08:52:53.000Z
nasbench301/surrogate_models/bananas/bananas_src/bo/fn/__init__.py
Basvanstein/nasbench301
2984dec45c760d47762f50efe39b71e9d1ac22e0
[ "Apache-2.0" ]
23
2019-10-28T12:26:32.000Z
2020-10-12T12:31:39.000Z
""" Code for synthetic functions to query (perform experiment on). """
17.75
62
0.71831
9
71
5.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.15493
71
3
63
23.666667
0.85
0.873239
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
7cf280e691010fa2d069889268a0b02995b8964b
5,086
py
Python
paxes_cinder/volume/discovery_driver.py
windskyer/k_cinder
000ee539ee4842a158071d26ee99d12c7c0a87da
[ "Apache-2.0" ]
null
null
null
paxes_cinder/volume/discovery_driver.py
windskyer/k_cinder
000ee539ee4842a158071d26ee99d12c7c0a87da
[ "Apache-2.0" ]
null
null
null
paxes_cinder/volume/discovery_driver.py
windskyer/k_cinder
000ee539ee4842a158071d26ee99d12c7c0a87da
[ "Apache-2.0" ]
null
null
null
# # # ================================================================= # ================================================================= """Extended Volume Driver interface to discover/query resource information""" class VolumeDiscoveryDriver(object): """ Extended Volume Driver interface for drivers to implementation to discover/query additional resource information from the managed system. The base VolumeDriver interface provides methods to take actions on the Storage Provider and its Volumes but given the premise that OpenStack is the authoritative management source for the Hosts being managed, it assumes the resources created by OpenStack are an accurate representation of the current state of the resources, so it provides very limited information through the driver interface about those resources. This interface extends those driver capabilities by asking the driver to provide 4 levels of information about the resources being managed: 1) discover - provide a list of all Volumes that exist on the Provider 2) query - provide enough info about the Volumes to import into OS 3) inventory - provide additional details about the Volumes specified 4) metrics - provide additional metric information about the Volumes """ def discover_volumes(self, context, filters=None): """ Returns a list of all of the Volumes that exist on the given Provider. For each Volumes the driver needs to return a dictionary containing the following attributes: name: The Name of the Volume defined on the Storage Provider status: The Status of the Volume, matching the status definition uuid: The UUID of the VM when created thru OS (Optional) status: The Status of the Volume, matching the definition size: The Size of the Volume in GB restricted_metadata: The Additional Meta-data from the Driver vdisk_id: The Identifier for the Volume on the Back-end vdisk_name: The Name of the Volume on the Back-end support: Dictionary stating whether the Volume can be managed status: Whether or not it is "supported" or "not_supported" reasons: List of Text Strings as to why it isn't supported :param context: The security context for the query :param filters: The filters to apply, such as {'wwpns': ['wwpn1',..] """ raise NotImplementedError() def query_volumes(self, context, volumes, server_info={}, mark_boot=True): """ Returns a list of Volumes (matching those specified on input) with enough additional details about each Volume to be able to import the Volume into the Cinder Database such as OpenStack can start managing. For each Volume the driver needs to return a dictionary containing the following attributes: uuid: The UUID of the Volume when created thru OS name: The Name of the Volume defined on the Storage Provider status: The Status of the Volume, matching the definition size: The Size of the Volume in GB restricted_metadata: The Additional Meta-data from the Driver vdisk_id: The Identifier for the Volume on the Back-end vdisk_name: The Name of the Volume on the Back-end :param context: The security context for the query :param volumes: A list of dictionary objects for each Volume with: uuid: The UUID of the Volume when created thru OS name: The Name of the Volume defined on the Storage Provider restricted_metadata: The Additional Meta-data from the Driver vdisk_id: The Identifier for the Volume on the Back-end vdisk_name: The Name of the Volume on the Back-end :param server_info: The host info for the attached servers """ #Currently the discover/query methods return the same data, so we can #return the values passed in since discover was called to get the info return volumes def inventory_volumes(self, context, volumes): """ Provides a mechanism for the Driver to gather Inventory-related information for the Volumes provided off of the Back-end at periodic intervals. The Driver is free from there to populate the information directly in the Database rather than return it. :param context: The security context for the query :param volumes: A list of dictionary objects for each Volume with: uuid: The UUID of the Volume when created thru OS name: The Name of the Volume defined on the Storage Provider restricted_metadata: The Additional Meta-data from the Driver vdisk_id: The Identifier for the Volume on the Back-end vdisk_name: The Name of the Volume on the Back-end """ pass
53.536842
79
0.66044
678
5,086
4.926254
0.268437
0.059281
0.052695
0.031138
0.438024
0.420659
0.420659
0.420659
0.420659
0.406886
0
0.001641
0.280967
5,086
94
80
54.106383
0.911676
0.832088
0
0
0
0
0
0
0
0
0
0
0
1
0.428571
false
0.142857
0
0
0.714286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
6b0b873fff92b5ab3b7c7fe753e498cb0574572b
280
py
Python
utils/__init__.py
ntrlmt/jetson_benchmarks
0a3d56ed806aa1aa0ee27ddeba0dd60daf0eb65d
[ "MIT" ]
224
2020-04-28T17:28:21.000Z
2022-03-28T15:27:19.000Z
utils/__init__.py
nikunjpansari/jetson_benchmarks
d0e474ff5e797c6842cd7ae8b2daddc82b7be423
[ "MIT" ]
23
2020-05-25T03:46:33.000Z
2022-03-13T09:38:51.000Z
utils/__init__.py
nikunjpansari/jetson_benchmarks
d0e474ff5e797c6842cd7ae8b2daddc82b7be423
[ "MIT" ]
38
2020-05-29T02:51:57.000Z
2022-03-23T06:45:44.000Z
from .download_models import download_models from .load_store_engine import load_store_engine from .read_write_data import read_write_data from .utilities import utilities from .benchmark_argparser import benchmark_argparser from .run_benchmark_models import run_benchmark_models
40
54
0.892857
40
280
5.85
0.35
0.119658
0.128205
0
0
0
0
0
0
0
0
0
0.085714
280
6
55
46.666667
0.914063
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
6b0d3d28748b79c66f155d77253c7374050f983f
26
py
Python
midterm2/pick_area.py
williamtrang/DSC20
eb72c1003ee98e30f63040568e82eedcb8d3af52
[ "Apache-2.0" ]
null
null
null
midterm2/pick_area.py
williamtrang/DSC20
eb72c1003ee98e30f63040568e82eedcb8d3af52
[ "Apache-2.0" ]
null
null
null
midterm2/pick_area.py
williamtrang/DSC20
eb72c1003ee98e30f63040568e82eedcb8d3af52
[ "Apache-2.0" ]
null
null
null
def pick_area(input):
13
21
0.653846
4
26
4
1
0
0
0
0
0
0
0
0
0
0
0
0.230769
26
2
22
13
0.8
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
6b13d14922bb0aa6053368fb4eec6baa39c0a5ba
523
py
Python
app/migrations/0005_auto_20211205_2323.py
RhonaJoyKe/Insta-Clone
18a742f93c3e69ab03e88d69b28aced77fcce109
[ "Unlicense" ]
null
null
null
app/migrations/0005_auto_20211205_2323.py
RhonaJoyKe/Insta-Clone
18a742f93c3e69ab03e88d69b28aced77fcce109
[ "Unlicense" ]
null
null
null
app/migrations/0005_auto_20211205_2323.py
RhonaJoyKe/Insta-Clone
18a742f93c3e69ab03e88d69b28aced77fcce109
[ "Unlicense" ]
null
null
null
# Generated by Django 3.2.9 on 2021-12-05 20:23 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0004_image_user'), ] operations = [ migrations.RemoveField( model_name='comments', name='postee', ), migrations.RemoveField( model_name='image', name='postee', ), migrations.RemoveField( model_name='image', name='profile', ), ]
20.115385
47
0.529637
48
523
5.666667
0.625
0.231618
0.286765
0.330882
0.345588
0.345588
0.345588
0.345588
0
0
0
0.056213
0.353728
523
25
48
20.92
0.748521
0.086042
0
0.526316
1
0
0.115546
0
0
0
0
0
0
1
0
false
0
0.052632
0
0.210526
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6b26e5fee369f5d6578e8f4df9c4ff323d3510c6
102
py
Python
mmdetection/mmdet/models/necks/__init__.py
taikis/kaggle-kuzushiji-recognition
63c063f03f1ff5d53d411ae1709b4e328e170ace
[ "MIT" ]
859
2019-09-29T05:36:03.000Z
2022-03-15T08:33:03.000Z
mmdetection/mmdet/models/necks/__init__.py
taikis/kaggle-kuzushiji-recognition
63c063f03f1ff5d53d411ae1709b4e328e170ace
[ "MIT" ]
69
2019-10-14T11:07:51.000Z
2022-03-10T14:39:00.000Z
mmdetection/mmdet/models/necks/__init__.py
taikis/kaggle-kuzushiji-recognition
63c063f03f1ff5d53d411ae1709b4e328e170ace
[ "MIT" ]
165
2019-10-05T02:59:29.000Z
2022-03-28T02:30:11.000Z
from .bfp import BFP from .fpn import FPN from .hrfpn import HRFPN __all__ = ['FPN', 'BFP', 'HRFPN']
17
33
0.686275
16
102
4.125
0.375
0
0
0
0
0
0
0
0
0
0
0
0.176471
102
5
34
20.4
0.785714
0
0
0
0
0
0.107843
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
6b43fccfadfdf79c05274698c8c5c16cec3bb845
780
py
Python
aiotdlib/api/types/ok.py
jraylan/aiotdlib
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
[ "MIT" ]
37
2021-05-04T10:41:41.000Z
2022-03-30T13:48:05.000Z
aiotdlib/api/types/ok.py
jraylan/aiotdlib
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
[ "MIT" ]
13
2021-07-17T19:54:51.000Z
2022-02-26T06:50:00.000Z
aiotdlib/api/types/ok.py
jraylan/aiotdlib
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
[ "MIT" ]
7
2021-09-22T21:27:11.000Z
2022-02-20T02:33:19.000Z
# =============================================================================== # # # # This file has been generated automatically!! Do not change this manually! # # # # =============================================================================== # from __future__ import annotations from pydantic import Field from ..base_object import BaseObject class Ok(BaseObject): """ An object of this type is returned on a successful function call for certain functions """ ID: str = Field("ok", alias="@type") @staticmethod def read(q: dict) -> Ok: return Ok.construct(**q)
32.5
90
0.383333
58
780
5.068966
0.758621
0
0
0
0
0
0
0
0
0
0
0
0.353846
780
23
91
33.913043
0.583333
0.629487
0
0
1
0
0.027132
0
0
0
0
0
0
1
0.125
false
0
0.375
0.125
0.875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
4
860c6cfe5eb3073994889280bbad52cf77a18d3a
16,471
py
Python
lib/risksense_api/__subject/__uploads/__uploads.py
arockiachristopher-git/risksense_tools
1564cd93505a4d4ccd546f68310e0a09f888e590
[ "Apache-2.0" ]
1
2021-06-08T23:58:55.000Z
2021-06-08T23:58:55.000Z
lib/risksense_api/__subject/__uploads/__uploads.py
arockiachristopher-git/risksense_tools
1564cd93505a4d4ccd546f68310e0a09f888e590
[ "Apache-2.0" ]
1
2021-08-05T17:39:38.000Z
2021-08-05T17:51:17.000Z
lib/risksense_api/__subject/__uploads/__uploads.py
risksense/risksense_tools
1564cd93505a4d4ccd546f68310e0a09f888e590
[ "Apache-2.0" ]
5
2022-02-25T21:09:08.000Z
2022-03-31T06:16:44.000Z
""" ******************************************************************************************************************* | | Name : __uploads.py | Module : risksense_api | Description : A class to be used for interacting with uploads on the RiskSense Platform. | Copyright : (c) RiskSense, Inc. | License : Apache-2.0 (http://www.apache.org/licenses/LICENSE-2.0) | ******************************************************************************************************************* """ import json from ...__subject import Subject from ..._api_request_handler import * class Uploads(Subject): """ Uploads class """ def __init__(self, profile): """ Initialization of Uploads object. :param profile: Profile Object :type profile: _profile """ self.subject_name = "upload" Subject.__init__(self, profile, self.subject_name) def get_uploads(self, assessment_id, page_num=0, page_size=150, client_id=None): """ Get uploads associated with an assessment. :param assessment_id: The assessment ID. :type assessment_id: int :param page_num: The page number of results to return. :type page_num: int :param page_size: The number of results per page to return. :type page_size: int :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: The JSON response from the platform is returned. :rtype: dict :raises RequestFailed: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) params = { "assessmentId": assessment_id, "size": page_size, "page": page_num } try: raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url, params=params) except RequestFailed: raise jsonified_response = json.dumps(raw_response.text) return jsonified_response def create(self, name, assessment_id, network_id, client_id=None): """ Create a new upload. :param name: The name to be associated with the upload. :type name: str :param assessment_id: The assessment ID. :type assessment_id: int :param network_id: The network ID. :type network_id: int :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: The Upload ID :rtype: int :raises RequestFailed: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) body = { "assessmentId": assessment_id, "networkId": network_id, "name": name } try: raw_response = self.request_handler.make_request(ApiRequestHandler.POST, url, body=body) except RequestFailed: raise jsonified_response = json.loads(raw_response.text) upload_id = jsonified_response['id'] return upload_id def check_state(self, upload_id, client_id=None): """ Check the state of an upload. :param upload_id: The upload ID. :type upload_id: int :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: The current state of the upload is returned. :rtype: str :raises RequestFailed: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}".format(str(upload_id)) try: raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url) except RequestFailed: raise jsonified_response = json.loads(raw_response.text) state = jsonified_response['state'] return state def update(self, upload_id, client_id=None, **kwargs): """ Update an upload. Uploads can only be updated before they have been processed. :param upload_id: The upload ID. :type upload_id: int :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :keyword name: str Name of upload :keyword assessment_id: int Assessment ID. :keyword network_id: int Network ID. :return: The job ID is returned. :rtype: int :raises RequestFailed: :raises ValueError: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}".format(str(upload_id)) name = kwargs.get('name', None) assessment_id = kwargs.get('assessment_id', None) network_id = kwargs.get('network_id', None) body = { "name": name, "assessmentId": assessment_id, "networkId": network_id } body = self._strip_nones_from_dict(body) if body == {}: raise ValueError("Body is empty. Please provide name, assessment_id, and/or network_id") try: raw_response = self.request_handler.make_request(ApiRequestHandler.PUT, url, body=body) except RequestFailed: raise jsonified_response = json.loads(raw_response.text) job_id = jsonified_response['id'] return job_id def delete(self, upload_id, client_id=None): """ Delete an Upload. :param upload_id: The upload ID :type upload_id: int :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: True/False reflecting whether or not the operation was successful. :rtype: bool :raises RequestFailed: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}".format(str(upload_id)) try: self.request_handler.make_request(ApiRequestHandler.DELETE, url) except RequestFailed: raise success = True return success def list_files(self, upload_id, page_num=0, page_size=150, client_id=None): """ List files in an upload. :param upload_id: The upload ID :type upload_id: int :param page_num: The page number to be returned. :type page_num: int :param page_size: The number of results to return per page. :type page_size: int :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: A paginated JSON response from the platform. :rtype: dict :raises RequestFailed: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}/file".format(str(upload_id)) params = { "size": page_size, "page": page_num } try: raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url, params=params) except RequestFailed: raise jsonified_response = json.loads(raw_response.text) return jsonified_response def add_file(self, upload_id, file_name, path_to_file, client_id=None): """ Add a file to an upload. :param upload_id: Upload ID :type upload_id: int :param file_name: The name to be used for the uploaded file. :type file_name: str :param path_to_file: Full path to the file to be uploaded. :type path_to_file: str :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: The file ID is returned. :rtype: int :raises RequestFailed: :raises FileNotFoundError: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}/file".format(str(upload_id)) upload_file = {'scanFile': (file_name, open(path_to_file, 'rb'))} try: raw_response = self.request_handler.make_request(ApiRequestHandler.POST, url, files=upload_file) except RequestFailed: raise except FileNotFoundError: raise jsonified_response = json.loads(raw_response.text) file_id = jsonified_response[0]['id'] return file_id def update_file(self, upload_id, file_id, client_id=None, **kwargs): """ Update an uploaded file. Will only work if the file has not yet been processed. :param upload_id: The upload ID. :type upload_id: int :param file_id: The file ID. :type file_id: str :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :keyword assessment_id: The assessment ID the upload should be associated with. Integer. :keyword network_id: The network ID the upload should be associated with. Integer. :keyword application_id: The application ID the upload should be associated with. Integer. :return: The upload ID is returned :rtype: int :raises RequestFailed: :raises ValueError: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}/file/{}".format(str(upload_id), str(file_id)) assessment_id = kwargs.get('assessment_id', None) network_id = kwargs.get('network_id', None) application_id = kwargs.get('application_id', None) body = { "assessmentId": assessment_id, "networkId": network_id, "applicationId": application_id } body = self._strip_nones_from_dict(body) if body == {}: raise ValueError('Body empty. Please provide assessment_id, network_id, and/or application_id missing.') try: raw_response = self.request_handler.make_request(ApiRequestHandler.PUT, url, body=body) except RequestFailed: raise jsonified_response = json.loads(raw_response) returned_id = jsonified_response['id'] return returned_id def delete_file(self, upload_id, file_id, client_id=None): """ Delete an uploaded file. :param upload_id: The upload ID. :type upload_id: int :param file_id: The file ID. :type file_id: int :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: True/False reflecting whether or not the operation was successfully submitted. :rtype: bool :raises RequestFailed: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}/file/{}".format(str(upload_id), str(file_id)) try: self.request_handler.make_request(ApiRequestHandler.DELETE, url) except RequestFailed: raise success = True return success def download_file(self, upload_id, file_destination, client_id=None): """ Download a previously uploaded file. :param upload_id: The upload ID :type upload_id: int :param file_destination: The local destination for the downloaded file. :type file_destination: str :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: True/False reflecting whether or not the operation was successful. :rtype: bool :raises RequestFailed: :raises FileNotFoundError: :raises FileExistsError: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}/file/download".format(str(upload_id)) try: raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url) except RequestFailed: raise try: open(file_destination, "wb").write(raw_response.content) success = True except (FileNotFoundError, FileExistsError): raise return success def fetch_file_by_uuid(self, upload_id, file_uuid, file_destination, client_id=None): """ Download a file by UUID. :param upload_id: The upload ID :type upload_id: int :param file_uuid: The file UUID :type file_uuid: str :param file_destination: The local destination for the downloaded file. :type file_destination: str :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: True/False reflecting whether or not the operation was successful. :rtype: bool :raises RequestFailed: :raises FileNotFoundError: :raises FileExistsError: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}/file/{}".format(str(upload_id), str(file_uuid)) try: raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url) except RequestFailed: raise try: open(file_destination, "wb").write(raw_response.content) success = True except (FileNotFoundError, FileExistsError): raise return success def start_processing(self, upload_id, auto_urba=False, client_id=None): """ Initiate processing of an upload. :param upload_id: The upload ID :type upload_id: int :param auto_urba: Indicator for whether or not auto-URBA should be run after upload is processed. :type auto_urba: bool :param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID. :type client_id: int :return: True/False reflecting whether or not the operation was successfully submitted. :rtype: bool :raises RequestFailed: """ if client_id is None: client_id = self._use_default_client_id()[0] url = self.api_base_url.format(str(client_id)) + "/{}/start".format(str(upload_id)) body = { "autoUrba": auto_urba } try: self.request_handler.make_request(ApiRequestHandler.POST, url, body=body) success = True except RequestFailed: raise return success """ Copyright 2021 RiskSense, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """
30.277574
119
0.592192
1,984
16,471
4.72631
0.106855
0.09214
0.038392
0.024315
0.746934
0.713661
0.701717
0.672283
0.643596
0.62632
0
0.002917
0.313217
16,471
543
120
30.333333
0.826025
0.387772
0
0.703297
0
0
0.055665
0
0
0
0
0
0
1
0.071429
false
0
0.016484
0
0.159341
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
8625bc8c94c83d1dc2c1cfefab93a80ff30f3ce2
5,752
py
Python
pymtl3/passes/translator/structural/test/StructuralTranslatorL4_test.py
hsqforfun/pymtl3
05e06601cf262a663a95d1235cb99056ece84580
[ "BSD-3-Clause" ]
1
2019-11-12T12:26:01.000Z
2019-11-12T12:26:01.000Z
pymtl3/passes/translator/structural/test/StructuralTranslatorL4_test.py
hsqforfun/pymtl3
05e06601cf262a663a95d1235cb99056ece84580
[ "BSD-3-Clause" ]
null
null
null
pymtl3/passes/translator/structural/test/StructuralTranslatorL4_test.py
hsqforfun/pymtl3
05e06601cf262a663a95d1235cb99056ece84580
[ "BSD-3-Clause" ]
null
null
null
#========================================================================= # StructuralTranslatorL4_test.py #========================================================================= # Author : Peitian Pan # Date : May 21, 2019 """Test the level 3 structural translators.""" import pytest from pymtl3.datatypes import Bits1, Bits32 from pymtl3.dsl import Component, InPort, Interface, OutPort, connect from pymtl3.passes.rtlir.errors import RTLIRConversionError from pymtl3.passes.rtlir.util.test_utility import do_test, expected_failure from pymtl3.passes.translator.structural.StructuralTranslatorL4 import ( StructuralTranslatorL4, ) from .TestStructuralTranslator import mk_TestStructuralTranslator def local_do_test( m ): tr = mk_TestStructuralTranslator(StructuralTranslatorL4)(m) tr.clear( m ) tr.translate_structural(m) for comp in m._ref_comps.keys(): decl_comp = tr.structural.decl_subcomps[comp] assert decl_comp == m._ref_comps[comp] for comp in m._ref_conns.keys(): connections = tr.structural.connections[comp] assert connections == m._ref_conns[comp] def test_multi_components( do_test ): class B( Component ): def construct( s ): s.out_b = OutPort( Bits32 ) @s.update def upblk(): s.out_b = Bits32(0) class A( Component ): def construct( s ): s.out_a = OutPort( Bits32 ) s.b = B() connect( s.b.out_b, s.out_a ) a = A() a.elaborate() a._ref_comps = { a : \ """\ component_decls: component_decl: b Component B component_ports: component_port: out_b Port of Vector32 component_ifcs: """, a.b : \ """\ component_decls: """} a._ref_conns = { a : \ """\ connections: connection: SubCompAttr CurCompAttr b out_b -> CurCompAttr out_a """, a.b : \ """\ connections: """} a._ref_src = \ """\ component B ( port_decls: port_decl: out_b Port of Vector32 interface_decls: ); const_decls: freevars: wire_decls: component_decls: tmpvars: upblk_srcs: upblk_src: upblk connections: endcomponent component A ( port_decls: port_decl: out_a Port of Vector32 interface_decls: ); const_decls: freevars: wire_decls: component_decls: component_decl: b Component B component_ports: component_port: out_b Port of Vector32 component_ifcs: tmpvars: upblk_srcs: connections: connection: SubCompAttr CurCompAttr b out_b -> CurCompAttr out_a endcomponent """ do_test( a ) def test_multi_components_ifc_hierarchy_connect( do_test ): class OutIfc( Interface ): def construct( s ): s.msg = OutPort( Bits32 ) s.rdy = InPort( Bits1 ) s.val = OutPort( Bits1 ) class B( Component ): def construct( s ): s.out_b = OutPort( Bits32 ) s.ifc_b = OutIfc() connect( s.out_b, 0 ) connect( s.ifc_b.msg, 0 ) connect( s.ifc_b.val, 1 ) class A( Component ): def construct( s ): s.out_a = OutPort( Bits32 ) s.b = B() s.ifc_a = OutIfc() connect( s.b.out_b, s.out_a ) connect( s.b.ifc_b, s.ifc_a ) a = A() a.elaborate() a._ref_comps = { a : \ """\ component_decls: component_decl: b Component B component_ports: component_port: out_b Port of Vector32 component_ifcs: component_ifc: ifc_b InterfaceView OutIfc component_ifc_ports: component_ifc_port: msg Port of Vector32 component_ifc_port: rdy Port of Vector1 component_ifc_port: val Port of Vector1 """, a.b : \ """\ component_decls: """} a._ref_conns = { a : \ """\ connections: connection: SubCompAttr CurCompAttr b out_b -> CurCompAttr out_a connection: IfcAttr SubCompAttr CurCompAttr b ifc_b msg -> IfcAttr CurCompAttr ifc_a msg connection: IfcAttr CurCompAttr ifc_a rdy -> IfcAttr SubCompAttr CurCompAttr b ifc_b rdy connection: IfcAttr SubCompAttr CurCompAttr b ifc_b val -> IfcAttr CurCompAttr ifc_a val """, a.b : \ """\ connections: connection: Bits32(0) -> CurCompAttr out_b connection: Bits32(0) -> IfcAttr CurCompAttr ifc_b msg connection: Bits1(1) -> IfcAttr CurCompAttr ifc_b val """} a._ref_src = \ """\ component B ( port_decls: port_decl: out_b Port of Vector32 interface_decls: interface_decl: ifc_b InterfaceView OutIfc interface_ports: interface_port: msg Port of Vector32 interface_port: rdy Port of Vector1 interface_port: val Port of Vector1 ); const_decls: freevars: wire_decls: component_decls: tmpvars: upblk_srcs: connections: connection: Bits32(0) -> CurCompAttr out_b connection: Bits32(0) -> IfcAttr CurCompAttr ifc_b msg connection: Bits1(1) -> IfcAttr CurCompAttr ifc_b val endcomponent component A ( port_decls: port_decl: out_a Port of Vector32 interface_decls: interface_decl: ifc_a InterfaceView OutIfc interface_ports: interface_port: msg Port of Vector32 interface_port: rdy Port of Vector1 interface_port: val Port of Vector1 ); const_decls: freevars: wire_decls: component_decls: component_decl: b Component B component_ports: component_port: out_b Port of Vector32 component_ifcs: component_ifc: ifc_b InterfaceView OutIfc component_ifc_ports: component_ifc_port: msg Port of Vector32 component_ifc_port: rdy Port of Vector1 component_ifc_port: val Port of Vector1 tmpvars: upblk_srcs: connections: connection: SubCompAttr CurCompAttr b out_b -> CurCompAttr out_a connection: IfcAttr SubCompAttr CurCompAttr b ifc_b msg -> IfcAttr CurCompAttr ifc_a msg connection: IfcAttr CurCompAttr ifc_a rdy -> IfcAttr SubCompAttr CurCompAttr b ifc_b rdy connection: IfcAttr SubCompAttr CurCompAttr b ifc_b val -> IfcAttr CurCompAttr ifc_a val endcomponent """ do_test( a ) __all__ = [s for s in dir() if s.startswith('test_')]
25.451327
90
0.690716
765
5,752
4.959477
0.129412
0.031629
0.04428
0.015814
0.722984
0.709278
0.709278
0.709278
0.691618
0.691618
0
0.01839
0.196453
5,752
225
91
25.564444
0.802466
0.045376
0
0.486842
0
0
0.002395
0
0
0
0
0
0.026316
1
0.118421
false
0.039474
0.092105
0
0.276316
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
864d337c82ef94b51d95ff66100a6168388f04c6
248
py
Python
src/rul/gbdt_trainer.py
LongxingTan/Survival_analysis
4e132edc79423e286fb9d4c61b42547ed758a15c
[ "MIT" ]
9
2020-01-13T10:01:56.000Z
2022-01-23T06:06:09.000Z
src/rul/gbdt_trainer.py
LongxingTan/Survival_analysis
4e132edc79423e286fb9d4c61b42547ed758a15c
[ "MIT" ]
3
2020-09-25T22:23:43.000Z
2022-02-10T02:09:10.000Z
src/rul/gbdt_trainer.py
LongxingTan/Survival_analysis
4e132edc79423e286fb9d4c61b42547ed758a15c
[ "MIT" ]
5
2020-12-28T01:40:42.000Z
2022-03-15T03:01:31.000Z
# https://tianchi.aliyun.com/notebook-ai/detail?spm=5176.12586969.1002.6.3f9a7084HBDkFB&postId=107251 class LGBTrainer(object): def __init__(self): pass def train(self, x_train, y_train, x_valid=None, y_valid=None): pass
24.8
101
0.705645
36
248
4.638889
0.75
0.107784
0
0
0
0
0
0
0
0
0
0.140097
0.165323
248
9
102
27.555556
0.666667
0.399194
0
0.4
0
0
0
0
0
0
0
0
0
1
0.4
false
0.4
0
0
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
865edf37cca78e50a9b9671f01e57c611d08cc82
162
py
Python
services/wallet/signer/exceptions.py
snario/zksnark-nft
1f2fc5c3f4885c50465bf839ef478ec048ec754d
[ "MIT" ]
76
2018-09-09T00:35:10.000Z
2022-02-22T16:54:34.000Z
services/wallet/signer/exceptions.py
ejhanrienaOut/zknifty
9285b573d4befe4ddeed9edba321af65c545bf39
[ "MIT" ]
null
null
null
services/wallet/signer/exceptions.py
ejhanrienaOut/zknifty
9285b573d4befe4ddeed9edba321af65c545bf39
[ "MIT" ]
17
2018-09-09T17:35:08.000Z
2022-02-22T07:25:22.000Z
class RequestFailedException(Exception): """request failed without success http status""" class WalletException(Exception): """wallet operation failed"""
32.4
52
0.759259
15
162
8.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.12963
162
5
53
32.4
0.87234
0.407407
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
4
867c39291a7d226f3704ce663d558c6588f987a8
197
py
Python
OP3/op3/__init__.py
gvx/op3
888ab5975a3f911fc9ed9afea983928de3110033
[ "MIT" ]
null
null
null
OP3/op3/__init__.py
gvx/op3
888ab5975a3f911fc9ed9afea983928de3110033
[ "MIT" ]
null
null
null
OP3/op3/__init__.py
gvx/op3
888ab5975a3f911fc9ed9afea983928de3110033
[ "MIT" ]
null
null
null
# based on http://opensoundcontrol.org/spec-1_0 from .payload_types import RGBA, MIDIMessage, ASAP from .messages import Element, Message, Bundle from .parser import parse __version__ = '0.0.1'
21.888889
50
0.771574
29
197
5.034483
0.758621
0
0
0
0
0
0
0
0
0
0
0.02924
0.13198
197
8
51
24.625
0.824561
0.228426
0
0
0
0
0.033557
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
867eaf02bbbd25be7d40654bc084d3dd4d311ee7
26
py
Python
python/testData/mover/emptyLine_afterUp.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/mover/emptyLine_afterUp.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/mover/emptyLine_afterUp.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
if True: a = 1 b = 2
5.2
9
0.384615
6
26
1.666667
1
0
0
0
0
0
0
0
0
0
0
0.153846
0.5
26
4
10
6.5
0.615385
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
86938ffd5a61560719e40314836bd9e543038b28
87
py
Python
scp4tw/center/apps.py
iblis17/scp4tw
faa07aa8c6ea4905db843fbccdb4057043bc9b9a
[ "MIT" ]
null
null
null
scp4tw/center/apps.py
iblis17/scp4tw
faa07aa8c6ea4905db843fbccdb4057043bc9b9a
[ "MIT" ]
null
null
null
scp4tw/center/apps.py
iblis17/scp4tw
faa07aa8c6ea4905db843fbccdb4057043bc9b9a
[ "MIT" ]
null
null
null
from django.apps import AppConfig class CenterConfig(AppConfig): name = 'center'
14.5
33
0.747126
10
87
6.5
0.9
0
0
0
0
0
0
0
0
0
0
0
0.172414
87
5
34
17.4
0.902778
0
0
0
0
0
0.068966
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
869712e600d5a57afef86843f8444e9409b06aa3
351
py
Python
members-backend/core/serializers.py
LombaAnderson/angular-django-integration
ea7a7bf639c484f6c812fc2657d8c82880759cf2
[ "MIT" ]
null
null
null
members-backend/core/serializers.py
LombaAnderson/angular-django-integration
ea7a7bf639c484f6c812fc2657d8c82880759cf2
[ "MIT" ]
null
null
null
members-backend/core/serializers.py
LombaAnderson/angular-django-integration
ea7a7bf639c484f6c812fc2657d8c82880759cf2
[ "MIT" ]
null
null
null
from rest_framework import serializers from .models import Member class MemberSerializer(serializers.ModelSerializer): class Meta: model = Member fields = ['id', 'name', 'surname', 'phone', 'photo'] class MemberSimpleSerializer(serializers.ModelSerializer): class Meta: model = Member fields = ['id', 'name']
27
60
0.678063
34
351
6.970588
0.558824
0.219409
0.261603
0.295359
0.489451
0.489451
0.489451
0.489451
0.489451
0
0
0
0.213675
351
12
61
29.25
0.858696
0
0
0.4
0
0
0.082621
0
0
0
0
0
0
1
0
false
0
0.2
0
0.6
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
86aaf79edf4b0dfa0ead7f6d90d7b88d14f5fc46
161
py
Python
A_MIAR3QtGui/MainWindow.py
FukukouSSJouhou/A_MIA_R3
970e80f6b71ba3c6eab013470b52f1e76fb04d3c
[ "MIT" ]
null
null
null
A_MIAR3QtGui/MainWindow.py
FukukouSSJouhou/A_MIA_R3
970e80f6b71ba3c6eab013470b52f1e76fb04d3c
[ "MIT" ]
null
null
null
A_MIAR3QtGui/MainWindow.py
FukukouSSJouhou/A_MIA_R3
970e80f6b71ba3c6eab013470b52f1e76fb04d3c
[ "MIT" ]
1
2022-03-29T03:30:36.000Z
2022-03-29T03:30:36.000Z
from PySide2 import QtCore class MainWindowConnect(QtCore.QObject): def __init__(self,parent=None): super(MainWindowConnect,self).__init__(parent)
23
54
0.763975
18
161
6.388889
0.722222
0
0
0
0
0
0
0
0
0
0
0.007246
0.142857
161
6
55
26.833333
0.826087
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
86d027b23ed92eb1e3c11dcd36430966ed5b2332
538
py
Python
stitcher.py
jarsba/meow
1120e6a97bcf549d3177b959fcae5375d05d5e47
[ "MIT" ]
null
null
null
stitcher.py
jarsba/meow
1120e6a97bcf549d3177b959fcae5375d05d5e47
[ "MIT" ]
1
2021-08-24T21:11:09.000Z
2021-08-25T14:54:48.000Z
stitcher.py
jarsba/meow
1120e6a97bcf549d3177b959fcae5375d05d5e47
[ "MIT" ]
null
null
null
import cv2 class Stitcher: def __init__(self, input_path_1: str, input_path_2: str, output: str = "output.mp4"): self.input_path_1 = input_path_1 self.input_path_2 = input_path_2 self.output = output @staticmethod def read_stream(self, input_path: str): capture = cv2.VideoCapture(input_path) return capture def stitch(self, video_stream_1, video_stream_2, output="output.mp4"): # TODO: finish pass def save_output(self): # TODO: finish pass
25.619048
89
0.644981
73
538
4.424658
0.356164
0.22291
0.160991
0.086687
0
0
0
0
0
0
0
0.030457
0.267658
538
20
90
26.9
0.78934
0.046468
0
0.142857
0
0
0.039216
0
0
0
0
0.05
0
1
0.285714
false
0.142857
0.071429
0
0.5
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
0
0
0
0
0
4
86e125c31f710969b3c59c9fb2bfaae6cbc52795
121
py
Python
abc/abc059/abc059b.py
c-yan/atcoder
940e49d576e6a2d734288fadaf368e486480a948
[ "MIT" ]
1
2019-08-21T00:49:34.000Z
2019-08-21T00:49:34.000Z
abc/abc059/abc059b.py
c-yan/atcoder
940e49d576e6a2d734288fadaf368e486480a948
[ "MIT" ]
null
null
null
abc/abc059/abc059b.py
c-yan/atcoder
940e49d576e6a2d734288fadaf368e486480a948
[ "MIT" ]
null
null
null
A = int(input()) B = int(input()) if A > B: print('GREATER') elif A < B: print('LESS') else: print('EQUAL')
12.1
20
0.520661
19
121
3.315789
0.578947
0.253968
0.222222
0
0
0
0
0
0
0
0
0
0.256198
121
9
21
13.444444
0.7
0
0
0
0
0
0.132231
0
0
0
0
0
0
1
0
false
0
0
0
0
0.375
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
86fb9a82459f258016626e1ff2aeaf4cbef4b9db
188
py
Python
lemon/protocol/Image/CdnImageUploadStatus.py
lemon-chat/lemon-server-python
5947b52b3c4535ae54fe2705a830db07fdaf741d
[ "MIT" ]
null
null
null
lemon/protocol/Image/CdnImageUploadStatus.py
lemon-chat/lemon-server-python
5947b52b3c4535ae54fe2705a830db07fdaf741d
[ "MIT" ]
null
null
null
lemon/protocol/Image/CdnImageUploadStatus.py
lemon-chat/lemon-server-python
5947b52b3c4535ae54fe2705a830db07fdaf741d
[ "MIT" ]
null
null
null
# automatically generated by the FlatBuffers compiler, do not modify # namespace: Image class CdnImageUploadStatus(object): Success = 0 FailUnknown = 1 FailInvalidToken = 2
18.8
68
0.739362
20
188
6.95
1
0
0
0
0
0
0
0
0
0
0
0.020134
0.207447
188
9
69
20.888889
0.912752
0.441489
0
0
1
0
0
0
0
0
0
0
0
1
0
false
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
811f1b4beeae3f90dcce91aa0b6deb6b7792e972
26,622
py
Python
Packs/Orca/Integrations/Orca/Orca_test.py
matan-xmcyber/content
7f02301c140b35956af3cd20cb8dfc64f34afb3e
[ "MIT" ]
null
null
null
Packs/Orca/Integrations/Orca/Orca_test.py
matan-xmcyber/content
7f02301c140b35956af3cd20cb8dfc64f34afb3e
[ "MIT" ]
null
null
null
Packs/Orca/Integrations/Orca/Orca_test.py
matan-xmcyber/content
7f02301c140b35956af3cd20cb8dfc64f34afb3e
[ "MIT" ]
null
null
null
from datetime import datetime import pytest import json from Orca import OrcaClient, ORCA_API_DNS_NAME, BaseClient, DEMISTO_OCCURRED_FORMAT, fetch_incidents import demistomock as demisto @pytest.fixture def orca_client() -> OrcaClient: api_key = "dummy api key" client = BaseClient( base_url=ORCA_API_DNS_NAME, verify=True, headers={ 'Authorization': f'Bearer {api_key}' }, proxy=True) return OrcaClient(client=client) def test_get_alerts_by_type_malware_should_succeed(requests_mock, orca_client: OrcaClient) -> None: mock_response = { "version": "0.1.0", "status": "success", "total_items": 6, "total_ungrouped_items": 6, "total_supported_items": 10000, "data": [ { "type": "malware", "rule_id": "r1111ea1111", "type_string": "Malware", "type_key": "/test_eicar_file", "category": "Malware", "description": "Malware EICAR-Test-File found on asset", "details": "We have detected a file infected with EICAR-Test-File on the asset.", "recommendation": "Remediate the host and attend additional alerts on the host to close the infection path.", "alert_labels": [ "malware_found" ], "asset_category": "Storage", "cloud_provider_id": "111111111111", "cloud_provider": "aws", "cloud_account_id": "10b11111-1111-1111-91d5-11111de11111", "cloud_vendor_id": "111111111111", "account_name": "111111111111", "asset_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "asset_name": "scan-me-s3-bucket-s8rrr", "asset_type": "storage", "asset_type_string": "AWS S3 Bucket", "group_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "group_name": "scan-me-s3-bucket-s8rrr", "group_type": "storage", "group_type_string": "NonGroup", "group_val": "nongroup", "cluster_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "cluster_name": "scan-me-s3-bucket-s8rrr", "cluster_type": "storage", "level": 0, "asset_state": "enabled", "asset_labels": [ "internet_facing", "pii" ], "asset_vendor_id": "scan-me-s3-bucket-s8rrr", "asset_regions": [ "us-east-1" ], "asset_regions_names": [ "N. Virginia" ], "source": "test_eicar_file", "findings": { "malware": [ { "type": "malware", "labels": [ "malware_found" ], "virus_names": [ "EICAR-Test-File" ], "modification_time": "2020-04-26T14:26:11+00:00", "file": "/test_eicar_file", "sha256": "275a021bbfb6489e54d471899f7db9d1663fc695ec2fe2a2c4538aabf651fd0f", "sha1": "3395856ce81f2b7382dee72602f798b642f14140", "md5": "44d88612fea8a8f36de82e1278abb02f", "has_macro": False } ] }, "configuration": { "user_status": "closed", "jira_issue_link": "https://www.jira.com/myproject", "jira_issue": "TP-41" }, "state": { "alert_id": "orca-59", "status": "in_progress", "status_time": "2020-12-30T09:57:33+00:00", "created_at": "2020-11-08T12:58:52+00:00", "last_seen": "2020-12-30T10:35:46+00:00", "score": 1, "severity": "compromised", "low_since": None, "high_since": "2020-12-15T15:33:49+00:00", "in_verification": None }, "priv": { "key": "3ea22222274111114b011111bb311111", "score": 1, "orig_score": 1, "alert_id": "orca-59", "full_scan_time": "2020-12-30T10:35:46+00:00", "organization_id": "11111111-1111-1111-1111-c111881c1111", "organization_name": "Orca Security", "context": "data", "account_action_id_ctx": { "data": "11111111-1111-1111-1111-8a529a011111" }, "scan_id_ctx": { "data": "11111111-1111-1111-1111-8a529a011111_111111111111_bucket-111111e11111-us-east-1" }, "first_seen": "2020-11-08T13:03:37+00:00" }, "hdr": { "asset_category": "Storage", "organization_id": "11111111-1111-1111-1111-c111881c1111", "organization_name": "Orca Security", "cloud_provider": "aws", "cloud_provider_id": "111111111111", "cloud_account_id": "10b11111-1111-1111-91d5-11111de11111", "context": "data", "asset_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "asset_type": "storage", "asset_type_string": "AWS S3 Bucket", "asset_name": "scan-me-s3-bucket-s8rrr", "group_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "group_name": "scan-me-s3-bucket-s8rrr", "group_type": "storage", "group_type_string": "NonGroup", "cluster_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "cluster_type": "storage", "cluster_name": "scan-me-s3-bucket-s8rrr", "level": 0, "group_val": "nongroup", "asset_vendor_id": "scan-me-s3-bucket-s8rrr", "cloud_vendor_id": "111111111111", "asset_state": "enabled", "account_name": "111111111111", "asset_labels": [ "internet_facing" ] }, "insert_time": "2020-12-30T10:45:21+00:00" } ] } requests_mock.get(f"{ORCA_API_DNS_NAME}/alerts?type=malware", json=mock_response) res = orca_client.get_alerts_by_filter(alert_type="malware") assert res[0] == mock_response['data'][0] def test_get_alerts_by_non_existent_type_should_return_empty_list(requests_mock, orca_client: OrcaClient) -> None: NON_EXISTENT_ALERT_TYPE = "non_existent_alert_type" mock_response = { "version": "0.1.0", "status": "success", "total_items": 0, "total_ungrouped_items": 0, "total_supported_items": 10000, "data": []} requests_mock.get(f"{ORCA_API_DNS_NAME}/alerts?type={NON_EXISTENT_ALERT_TYPE}", json=mock_response) res = orca_client.get_alerts_by_filter(alert_type=NON_EXISTENT_ALERT_TYPE) assert res == [] def test_fetch_incidents_first_run_should_succeed(mocker, requests_mock, orca_client: OrcaClient) -> None: mock_response = { "version": "0.1.0", "status": "success", "total_items": 58, "total_ungrouped_items": 58, "total_supported_items": 10000, "data": [ { "type": "malware", "rule_id": "r1111ea1111", "type_string": "Malware", "type_key": "/test_eicar_file", "category": "Malware", "description": "Malware EICAR-Test-File found on asset", "details": "We have detected a file infected with EICAR-Test-File on the asset.", "recommendation": "Remediate the host and attend additional alerts on the host to close the infection path.", "alert_labels": [ "malware_found" ], "asset_category": "Storage", "cloud_provider_id": "111111111111", "cloud_provider": "aws", "cloud_account_id": "10b11111-1111-1111-91d5-11111de11111", "cloud_vendor_id": "111111111111", "account_name": "111111111111", "asset_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "asset_name": "scan-me-s3-bucket-s8rrr", "asset_type": "storage", "asset_type_string": "AWS S3 Bucket", "group_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "group_name": "scan-me-s3-bucket-s8rrr", "group_type": "storage", "group_type_string": "NonGroup", "group_val": "nongroup", "cluster_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "cluster_name": "scan-me-s3-bucket-s8rrr", "cluster_type": "storage", "level": 0, "asset_state": "enabled", "asset_labels": [ "internet_facing", "pii" ], "asset_vendor_id": "scan-me-s3-bucket-s8rrr", "asset_regions": [ "us-east-1" ], "asset_regions_names": [ "N. Virginia" ], "source": "test_eicar_file", "findings": { "malware": [ { "type": "malware", "labels": [ "malware_found" ], "virus_names": [ "EICAR-Test-File" ], "modification_time": "2020-04-26T14:26:11+00:00", "file": "/test_eicar_file", "sha256": "275a021bbfb6489e54d471899f7db9d1663fc695ec2fe2a2c4538aabf651fd0f", "sha1": "3395856ce81f2b7382dee72602f798b642f14140", "md5": "44d88612fea8a8f36de82e1278abb02f", "has_macro": False } ] }, "configuration": { "user_status": "closed", "jira_issue_link": "https://www.jira.com/myproject", "jira_issue": "TP-41" }, "state": { "alert_id": "orca-59", "status": "in_progress", "status_time": "2020-12-30T09:57:33+00:00", "created_at": "2020-11-08T12:58:52+00:00", "last_seen": "2020-12-30T10:35:46+00:00", "score": 1, "severity": "compromised", "low_since": None, "high_since": "2020-12-15T15:33:49+00:00", "in_verification": None }, "priv": { "key": "3ea22222274111114b011111bb311111", "score": 1, "orig_score": 1, "alert_id": "orca-59", "full_scan_time": "2020-12-30T10:35:46+00:00", "organization_id": "11111111-1111-1111-1111-c111881c1111", "organization_name": "Orca Security", "context": "data", "account_action_id_ctx": { "data": "11111111-1111-1111-1111-8a529a011111" }, "scan_id_ctx": { "data": "11111111-1111-1111-1111-8a529a011111_111111111111_bucket-111111e11111-us-east-1" }, "first_seen": "2020-11-08T13:03:37+00:00" }, "hdr": { "asset_category": "Storage", "organization_id": "11111111-1111-1111-1111-c111881c1111", "organization_name": "Orca Security", "cloud_provider": "aws", "cloud_provider_id": "111111111111", "cloud_account_id": "10b11111-1111-1111-91d5-11111de11111", "context": "data", "asset_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "asset_type": "storage", "asset_type_string": "AWS S3 Bucket", "asset_name": "scan-me-s3-bucket-s8rrr", "group_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "group_name": "scan-me-s3-bucket-s8rrr", "group_type": "storage", "group_type_string": "NonGroup", "cluster_unique_id": "storage_111111e11111_scan-me-s3-bucket-s8rrr", "cluster_type": "storage", "cluster_name": "scan-me-s3-bucket-s8rrr", "level": 0, "group_val": "nongroup", "asset_vendor_id": "scan-me-s3-bucket-s8rrr", "cloud_vendor_id": "111111111111", "asset_state": "enabled", "account_name": "111111111111", "asset_labels": [ "internet_facing" ] }, "insert_time": "2020-12-30T10:45:21+00:00" }, { "type": "malware", "rule_id": "r1111ea1111", "type_string": "Malware", "type_key": "/usr/local/bin/eicarcom2.zip", "category": "Malware", "description": "Malware EICAR-Test-File found on asset", "details": "We have detected a file infected with EICAR-Test-File on the asset.", "recommendation": "Remediate the host and attend additional alerts on the host to close the infection path.", "alert_labels": [ "malware_found" ], "asset_category": "Image", "cloud_provider_id": "111111111111", "cloud_provider": "aws", "cloud_account_id": "10b11111-1111-1111-91d5-11111de11111", "cloud_vendor_id": "111111111111", "account_name": "111111111111", "asset_unique_id": "vmimage_111111e11111_ami-11111c111111d7911", "asset_name": "my_test_image-1231asdasjdn", "asset_type": "vmimage", "asset_type_string": "VM Image", "group_unique_id": "vmimage_111111e11111_ami-11111c111111d7911", "group_name": "my_test_image-1231asdasjdn", "group_type": "vmimage", "group_type_string": "NonGroup", "group_val": "nongroup", "cluster_unique_id": "vmimage_111111e11111_ami-11111c111111d7911", "cluster_name": "my_test_image-1231asdasjdn", "cluster_type": "vmimage", "level": 0, "asset_vendor_id": "ami-11111c111111d7911", "asset_distribution_name": "Ubuntu", "asset_distribution_version": "18.04", "asset_role_names": [ "mysql", "ssh", "haproxy", "postgresql" ], "source": "eicarcom2.zip", "findings": { "malware": [ { "type": "malware", "labels": [ "malware_found" ], "virus_names": [ "EICAR-Test-File" ], "modification_time": "2019-07-09T21:16:26+00:00", "file": "/usr/local/bin/eicarcom2.zip", "sha256": "e1105070ba828007508566e28a2b8d4c65d192e9eaf3b7868382b7cae747b397", "sha1": "bec1b52d350d721c7e22a6d4bb0a92909893a3ae", "md5": "e4968ef99266df7c9a1f0637d2389dab", "has_macro": False } ] }, "configuration": {}, "state": { "alert_id": "orca-242", "status": "open", "status_time": "2020-11-08T12:58:54+00:00", "created_at": "2020-11-08T12:58:54+00:00", "last_seen": "2020-12-30T10:35:48+00:00", "score": 1, "severity": "compromised", "low_since": None, "high_since": "2020-11-08T13:04:32+00:00", "in_verification": None }, "priv": { "key": "3696080647d937b881eee2cfdd6c3943", "score": 1, "orig_score": 1, "alert_id": "orca-242", "full_scan_time": "2020-12-30T10:35:48+00:00", "organization_id": "11111111-1111-1111-1111-c111881c1111", "organization_name": "Orca Security", "context": "data", "account_action_id_ctx": { "data": "11111111-1111-1111-1111-8a529a011111" }, "scan_id_ctx": { "data": "11111111-1111-1111-1111-8a529a011111_111111111111_ami-11111c111111d7911" }, "first_seen": "2020-11-08T13:04:32+00:00" }, "hdr": { "asset_category": "Image", "organization_id": "11111111-1111-1111-1111-c111881c1111", "organization_name": "Orca Security", "cloud_provider": "aws", "cloud_provider_id": "111111111111", "cloud_account_id": "10b11111-1111-1111-91d5-11111de11111", "context": "data", "asset_unique_id": "vmimage_111111e11111_ami-11111c111111d7911", "asset_type": "vmimage", "asset_type_string": "VM Image", "asset_name": "my_test_image-1231asdasjdn", "group_unique_id": "vmimage_111111e11111_ami-11111c111111d7911", "group_name": "my_test_image-1231asdasjdn", "group_type": "vmimage", "group_type_string": "NonGroup", "cluster_unique_id": "vmimage_111111e11111_ami-11111c111111d7911", "cluster_type": "vmimage", "cluster_name": "my_test_image-1231asdasjdn", "level": 0, "group_val": "nongroup", "asset_vendor_id": "ami-11111c111111d7911", "cloud_vendor_id": "111111111111", "account_name": "111111111111" }, "insert_time": "2020-12-30T10:44:11+00:00" } ] } mocker.patch.object(demisto, 'getLastRun', return_value={'lastRun': None}) requests_mock.get(f"{ORCA_API_DNS_NAME}/query/alerts", json=mock_response) fetched_incidents = fetch_incidents(orca_client, max_fetch=20, pull_existing_alerts=True) assert fetched_incidents[0]['name'] == 'orca-59' loaded_raw_alert = json.loads(fetched_incidents[0]['rawJSON']) assert loaded_raw_alert['demisto_score'] == 4 assert fetched_incidents[1]['name'] == 'orca-242' loaded_raw_alert = json.loads(fetched_incidents[1]['rawJSON']) assert loaded_raw_alert['demisto_score'] == 4 def test_fetch_incidents_not_first_run_return_empty(mocker, orca_client: OrcaClient) -> None: # validates that fetch-incidents is returning an a empty list when it is not the first run mocker.patch.object(demisto, 'getLastRun', return_value={'lastRun': datetime.now().strftime(DEMISTO_OCCURRED_FORMAT), "incidents_for_next_run": []}) fetched_incidents = fetch_incidents(orca_client, max_fetch=20, pull_existing_alerts=True) assert fetched_incidents == [] def test_get_asset_should_succeed(requests_mock, orca_client: OrcaClient) -> None: mock_response = { "type": "vmimage", "asset_category": "Image", "asset_subcategory": "VM Image", "cloud_provider_id": "111111111111", "cloud_provider": "aws", "cloud_account_id": "10b11111-1111-1111-91d5-11111de11111", "cloud_vendor_id": "111111111111", "account_name": "111111111111", "asset_unique_id": "vmimage_111111e11111_ami-11111c111111d7911", "asset_name": "my_test_image-1231asdasjdn", "asset_type": "vmimage", "asset_type_string": "VM Image", "group_unique_id": "vmimage_111111e11111_ami-11111c111111d7911", "group_name": "my_test_image-1231asdasjdn", "group_type": "vmimage", "group_type_string": "NonGroup", "cluster_unique_id": "vmimage_111111e11111_ami-11111c111111d7911", "cluster_name": "my_test_image-1231asdasjdn", "cluster_type": "vmimage", "level": 0, "asset_vendor_id": "ami-11111c111111d7911", "internet_facing": False, "internet_facing_new": False, "create_time": "2020-07-28T09:10:01+00:00", "container_id": "main", "compute": { "distribution_name": "Ubuntu", "distribution_version": "18.04", "disks": [ { "size": "7.75 GB", "used": "2.06 GB" } ], "total_disks_bytes": 8326123520, "roles": [ { "type": "database", "name": "mysql", "is_public": False, "detected_evidence": [ "/var/lib/mysql/mysqldb2", "/var/lib/mysql/mysqldb1" ] }, { "type": "ssh", "name": "ssh", "is_public": False }, { "type": "web", "name": "haproxy", "is_public": False }, { "type": "database", "name": "postgresql", "is_public": False, "detected_evidence": [ "/var/lib/postgresql/10/main/base/1", "/var/lib/postgresql/10/main", "/var/lib/postgresql/10/main/base/13017", "/var/lib/postgresql/10/main/base/16384", "/var/lib/postgresql/10/main/base/13018" ] } ] }, "vmimage": { "image_id": "ami-11111c111111d7911", "image_owner_id": "111111111111", "image_name": "my_test_image-1231asdasjdn", "image_description": "" }, "configuration": {}, "state": { "status": "exists", "status_time": "2020-11-08T13:04:34+00:00", "created_at": "2020-11-08T13:04:34+00:00", "last_seen": "2020-12-30T10:44:11+00:00", "score": 1, "severity": "compromised", "safe_since": None, "unsafe_since": "2020-11-08T13:04:34+00:00" } } requests_mock.get(f"{ORCA_API_DNS_NAME}/assets/vmimage_111111e11111_ami-11111c111111d7911", json=mock_response) res = orca_client.get_asset(asset_unique_id="vmimage_111111e11111_ami-11111c111111d7911") assert res == mock_response def test_get_asset_nonexistent(requests_mock, orca_client: OrcaClient) -> None: mock_response = {"error": ""} requests_mock.get(f"{ORCA_API_DNS_NAME}/assets/1234567", json=mock_response) res = orca_client.get_asset(asset_unique_id="1234567") assert res == "Asset Not Found" def test_test_module_success(requests_mock, orca_client: OrcaClient) -> None: mock_response = { "status": "success", "data": { "user_id": "77777634-7777-7777-7777-f49f77777777", "email": "system_testing@orca.security", "first": "System", "last": "Testing", "full_name": "System Testing", "profile_picture": "", "organization_id": "e3dab69a-5555-5555-5555-c5b8881cd2fe", "organization_name": "Orca Security", "feature_flags": {}, "has_cloud_accounts": True, "has_scanned_cloud_accounts": True } } requests_mock.get(f"{ORCA_API_DNS_NAME}/user/action?", json=mock_response) res = orca_client.validate_api_key() assert res == "ok" def test_test_module_fail(requests_mock, orca_client: OrcaClient) -> None: mock_response = { "detail": "Given token not valid for any token type", "code": "token_not_valid", "messages": [ { "token_class": "AccessTokenWithExpiration", "token_type": "access", "message": "Token is invalid or expired" } ], "status_code": 403 } requests_mock.get(f"{ORCA_API_DNS_NAME}/user/action?", json=mock_response) res = orca_client.validate_api_key() assert res == "Test failed becasue the Orca API key that was entered is invalid, please provide a valid API key"
44.742857
125
0.485989
2,344
26,622
5.242747
0.149744
0.020832
0.018228
0.031898
0.798438
0.778989
0.755961
0.720238
0.675645
0.672227
0
0.159501
0.391931
26,622
594
126
44.818182
0.599642
0.003306
0
0.648601
0
0
0.427672
0.177145
0
0
0
0
0.019231
1
0.015734
false
0
0.008741
0
0.026224
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
812b9ce9ae3a1fa3685d90f524bbba8c1aa76074
132
py
Python
rate/apps.py
muneneee/work
b16e273bd8ee626b41cdbb5366013b76ff6c373a
[ "MIT" ]
null
null
null
rate/apps.py
muneneee/work
b16e273bd8ee626b41cdbb5366013b76ff6c373a
[ "MIT" ]
6
2021-03-19T11:22:07.000Z
2022-02-10T12:03:40.000Z
rate/apps.py
muneneee/work
b16e273bd8ee626b41cdbb5366013b76ff6c373a
[ "MIT" ]
null
null
null
from django.apps import AppConfig class RateConfig(AppConfig): name = 'rate' def ready(self): import rate.signals
16.5
33
0.681818
16
132
5.625
0.8125
0
0
0
0
0
0
0
0
0
0
0
0.234848
132
8
34
16.5
0.891089
0
0
0
0
0
0.030075
0
0
0
0
0
0
1
0.2
false
0
0.4
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
813782e57a6454b60ba1203e5b4409c5d31f2273
108
py
Python
pybots/src/magpy/magpy_backend/test.py
aivian/robots
6827886916e36432ce1d806f0a78edef6c9270d9
[ "MIT" ]
null
null
null
pybots/src/magpy/magpy_backend/test.py
aivian/robots
6827886916e36432ce1d806f0a78edef6c9270d9
[ "MIT" ]
null
null
null
pybots/src/magpy/magpy_backend/test.py
aivian/robots
6827886916e36432ce1d806f0a78edef6c9270d9
[ "MIT" ]
1
2021-09-24T17:08:30.000Z
2021-09-24T17:08:30.000Z
import magpy_backend for i in range(1000): a = magpy_backend.magpy_backend('WMM.COF', 0,0,0, 2015,1,1)
21.6
63
0.703704
21
108
3.47619
0.666667
0.493151
0
0
0
0
0
0
0
0
0
0.141304
0.148148
108
4
64
27
0.652174
0
0
0
0
0
0.064815
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
8142625755ae5070679653dad8e6b654d64f2817
140
py
Python
peach/models/quartznet_recognizer.py
sergevkim/SpeechRecognition
c81024a3c9e6b022c7a44777bea3e6bc4b3cc35a
[ "MIT" ]
1
2020-10-11T19:04:35.000Z
2020-10-11T19:04:35.000Z
peach/models/quartznet_recognizer.py
sergevkim/SpeechRecognition
c81024a3c9e6b022c7a44777bea3e6bc4b3cc35a
[ "MIT" ]
2
2020-10-16T07:46:33.000Z
2020-10-18T18:39:07.000Z
peach/models/quartznet_recognizer.py
sergevkim/SpeechRecognition
c81024a3c9e6b022c7a44777bea3e6bc4b3cc35a
[ "MIT" ]
null
null
null
from torch.nn import Module class QuartzNetRecognizer(Module): def __init__(self): pass def forward(self): pass
12.727273
34
0.642857
16
140
5.375
0.75
0.186047
0
0
0
0
0
0
0
0
0
0
0.285714
140
10
35
14
0.86
0
0
0.333333
0
0
0
0
0
0
0
0
0
1
0.333333
false
0.333333
0.166667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
d4ae716bd2621324198aa969fe60e79877aee2ee
109
py
Python
pysurf/spp/model/model.py
MFSJMenger/pysurf
99c6a94d4cb5046f16a0961b907061d989ffb6dc
[ "Apache-2.0" ]
7
2020-10-28T13:46:08.000Z
2021-05-27T06:41:56.000Z
pysurf/spp/model/model.py
MFSJMenger/pysurf
99c6a94d4cb5046f16a0961b907061d989ffb6dc
[ "Apache-2.0" ]
2
2020-10-27T19:15:12.000Z
2020-10-27T19:15:25.000Z
pysurf/spp/model/model.py
MFSJMenger/pysurf
99c6a94d4cb5046f16a0961b907061d989ffb6dc
[ "Apache-2.0" ]
2
2021-04-15T05:54:30.000Z
2022-02-08T00:10:10.000Z
from abc import ABC, abstractmethod class Model(ABC): @abstractmethod def get(self): pass
12.111111
35
0.651376
13
109
5.461538
0.769231
0.478873
0
0
0
0
0
0
0
0
0
0
0.275229
109
8
36
13.625
0.898734
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0.2
0.2
0
0.6
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
d4b03e894832c5d7260c51b4ea9128f3e10215ee
327
py
Python
Desafios/Mundo 3/ex074.py
ZaikoXander/Python
7e7243edb02dd33991c5f63f02c983ad060fc3ca
[ "Unlicense" ]
null
null
null
Desafios/Mundo 3/ex074.py
ZaikoXander/Python
7e7243edb02dd33991c5f63f02c983ad060fc3ca
[ "Unlicense" ]
null
null
null
Desafios/Mundo 3/ex074.py
ZaikoXander/Python
7e7243edb02dd33991c5f63f02c983ad060fc3ca
[ "Unlicense" ]
null
null
null
from random import randint numeros = (randint(-999, 999), randint(-999, 999), randint(-999, 999), randint(-999, 999), randint(-999, 999)) print('Os números sorteados foram: ', end='') for num in numeros: print(f'| {num} ', end='') print('|', end='\n') print(f'O menor valor é {min(numeros)} e o maior é {max(numeros)}')
29.727273
110
0.636086
51
327
4.078431
0.490196
0.240385
0.3125
0.384615
0.3125
0.3125
0.3125
0.3125
0.3125
0.3125
0
0.107914
0.149847
327
10
111
32.7
0.640288
0
0
0
0
0
0.293578
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.571429
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
d4b2e2a88b394e145386346e42b82631170c0c31
226
py
Python
examples/example_ql_mf.py
markjunior/RL_policy
b88f191e8064342e7723df241a946d72bbfe5298
[ "MIT" ]
null
null
null
examples/example_ql_mf.py
markjunior/RL_policy
b88f191e8064342e7723df241a946d72bbfe5298
[ "MIT" ]
null
null
null
examples/example_ql_mf.py
markjunior/RL_policy
b88f191e8064342e7723df241a946d72bbfe5298
[ "MIT" ]
1
2021-01-28T12:49:19.000Z
2021-01-28T12:49:19.000Z
import sys sys.path.append('./../') from q_learning.ql_mf import q_learning_model_free test_model = q_learning_model_free(env_name='MountainCar-v0', num_s=20) test_model.run(mode='q_learning') # test_model.run(mode='sarsa')
25.111111
71
0.778761
39
226
4.153846
0.564103
0.222222
0.17284
0.222222
0
0
0
0
0
0
0
0.014286
0.070796
226
8
72
28.25
0.757143
0.123894
0
0
0
0
0.147959
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
d4c0715d0361b109b2bd901ab261c21294d0e2c2
149
py
Python
24. Exam Prep/exam_19dec/project/supply/water_supply.py
elenaborisova/Python-OOP
584882c08f84045b12322917f0716c7c7bd9befc
[ "MIT" ]
1
2021-03-27T16:56:30.000Z
2021-03-27T16:56:30.000Z
24. Exam Prep/exam_19dec/project/supply/water_supply.py
elenaborisova/Python-OOP
584882c08f84045b12322917f0716c7c7bd9befc
[ "MIT" ]
null
null
null
24. Exam Prep/exam_19dec/project/supply/water_supply.py
elenaborisova/Python-OOP
584882c08f84045b12322917f0716c7c7bd9befc
[ "MIT" ]
1
2021-03-15T14:50:39.000Z
2021-03-15T14:50:39.000Z
from exam_19dec.project.supply.supply import Supply class WaterSupply(Supply): def __init__(self): super().__init__(needs_increase=40)
21.285714
51
0.744966
19
149
5.315789
0.789474
0
0
0
0
0
0
0
0
0
0
0.031746
0.154362
149
6
52
24.833333
0.769841
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
d4e69ced5ad38a9922dee4e91db668d18d241565
153
py
Python
src/missions_lib/__init__.py
phopley/rodney_missions
4222314960aa6e9f77b10bd2c64607f9c0ed5eea
[ "Apache-2.0" ]
1
2021-04-02T04:37:44.000Z
2021-04-02T04:37:44.000Z
src/missions_lib/__init__.py
phopley/rodney_missions
4222314960aa6e9f77b10bd2c64607f9c0ed5eea
[ "Apache-2.0" ]
3
2019-01-14T10:45:01.000Z
2019-01-28T10:12:02.000Z
src/missions_lib/__init__.py
phopley/rodney_missions
4222314960aa6e9f77b10bd2c64607f9c0ed5eea
[ "Apache-2.0" ]
1
2021-04-02T04:37:45.000Z
2021-04-02T04:37:45.000Z
# __init__.py from .take_message_to import Mission1StateMachine from .greet_all import Mission2StateMachine from .go_home import Mission4StateMachine
19.125
49
0.856209
18
153
6.833333
0.777778
0
0
0
0
0
0
0
0
0
0
0.022059
0.111111
153
7
50
21.857143
0.882353
0.071895
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
d4f2b2d60dc0bbe60e71eb885a7e104d2c110c2a
1,092
py
Python
bcrypt/__init__.py
propellr/python-bcrypt
ad2f53aa36ffbe3a51edca12c97371ec154f9354
[ "ISC" ]
5
2016-04-29T08:15:05.000Z
2021-01-23T21:49:44.000Z
bcrypt/__init__.py
propellr/python-bcrypt
ad2f53aa36ffbe3a51edca12c97371ec154f9354
[ "ISC" ]
2
2021-06-08T21:32:02.000Z
2022-03-12T00:29:24.000Z
bcrypt/__init__.py
propellr/python-bcrypt
ad2f53aa36ffbe3a51edca12c97371ec154f9354
[ "ISC" ]
1
2017-07-15T22:15:56.000Z
2017-07-15T22:15:56.000Z
"""OpenBSD Blowfish password hashing. This module implements the OpenBSD Blowfish password hashing algorithm, as described in "A Future-Adaptable Password Scheme" by Niels Provos and David Mazieres. This system hashes passwords using a version of Bruce Schneier's Blowfish block cipher with modifications designed to raise the cost of off-line password cracking. The computation cost of the algorithm is parametised, so it can be increased as computers get faster. Passwords are hashed using the hashpw() routine: hashpw(password, salt) -> hashed_password Salts for the the second parameter may be randomly generated using the gensalt() function: gensalt(log_rounds = 12) -> random_salt The parameter "log_rounds" defines the complexity of the hashing. The cost increases as 2**log_rounds. """ import os from bcrypt._bcrypt import * def gensalt(log_rounds = 12): """Generate a random text salt for use with hashpw(). "log_rounds" defines the complexity of the hashing, increasing the cost as 2**log_rounds.""" return encode_salt(os.urandom(16), min(max(log_rounds, 4), 31))
32.117647
70
0.782051
166
1,092
5.078313
0.554217
0.074733
0.054567
0.071174
0.097272
0.097272
0.097272
0.097272
0
0
0
0.011879
0.152015
1,092
33
71
33.090909
0.898488
0.858974
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.5
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
4
be2bd9bcd1dd9d2f444c870a5674e9acf9e60ec9
153
py
Python
images2pdf.py
kweusuf/Manga-Scraper
446dc3fa083e789f032623c7a040f1ea4f317d52
[ "MIT" ]
null
null
null
images2pdf.py
kweusuf/Manga-Scraper
446dc3fa083e789f032623c7a040f1ea4f317d52
[ "MIT" ]
null
null
null
images2pdf.py
kweusuf/Manga-Scraper
446dc3fa083e789f032623c7a040f1ea4f317d52
[ "MIT" ]
null
null
null
import img2pdf, os with open(f"Chapter 11.pdf", "wb") as f: f.write(img2pdf.convert([i for i in sorted(os.listdir(), key=len) if i.endswith(".jpg")]))
51
93
0.673203
29
153
3.551724
0.793103
0
0
0
0
0
0
0
0
0
0
0.029851
0.124183
153
3
93
51
0.738806
0
0
0
0
0
0.12987
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
076bf8f990154a66f9111dc354046ed5db8d8e0d
8
py
Python
__init__.py
Laende/Bacheloroppgave-droneteknologi
15d9b2cd0eeba47fd2e9615fb01d598516826194
[ "MIT" ]
null
null
null
__init__.py
Laende/Bacheloroppgave-droneteknologi
15d9b2cd0eeba47fd2e9615fb01d598516826194
[ "MIT" ]
11
2021-07-04T13:46:30.000Z
2021-07-13T08:30:40.000Z
__init__.py
Laende/Bacheloroppgave-droneteknologi
15d9b2cd0eeba47fd2e9615fb01d598516826194
[ "MIT" ]
1
2021-07-04T10:45:30.000Z
2021-07-04T10:45:30.000Z
# Init
4
7
0.5
1
8
4
1
0
0
0
0
0
0
0
0
0
0
0
0.375
8
1
8
8
0.8
0.5
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
076f34f1db98255000ac1f3e917de83904fcc282
3,254
py
Python
curve_pg.py
Templarrr/hearthtools
6e13611c6f76e198c00802afadb6e360fa42d6ef
[ "CC0-1.0" ]
1
2015-11-10T20:14:36.000Z
2015-11-10T20:14:36.000Z
curve_pg.py
Templarrr/hearthtools
6e13611c6f76e198c00802afadb6e360fa42d6ef
[ "CC0-1.0" ]
null
null
null
curve_pg.py
Templarrr/hearthtools
6e13611c6f76e198c00802afadb6e360fa42d6ef
[ "CC0-1.0" ]
1
2020-11-05T11:06:37.000Z
2020-11-05T11:06:37.000Z
import json import random from perfect_curve_monte_carlo import ManaCurve, ImitateGame cached_file = 'mana_curves_json' try: with open(cached_file, 'r') as f: mc_results_cache = json.load(f) best_mana_unspent = min(mc_results_cache.values()) best_mc = mc_results_cache.keys()[mc_results_cache.values().index(best_mana_unspent)] except Exception as e: best_mana_unspent = 200 best_mc = '-' mc_results_cache = { best_mc:best_mana_unspent } print 'best cached curve %s %f' % (best_mc, best_mana_unspent) print 'mana curves processed %d' % len(mc_results_cache) for i in range(1000): mc = ManaCurve() while not mc.is_unusable(): if str(mc) in mc_results_cache: mc.push_mana_curve() continue mc_results = [] for i in range(1000): ig = ImitateGame(mc.get_deck(), 12) mc_results.append(ig.imitate_game()) avg_mana_unspent = sum(mc_results)/float(len(mc_results)) if avg_mana_unspent<best_mana_unspent: best_mana_unspent = avg_mana_unspent best_mc = str(mc) print 'Average mana lost for %s is %f' % (best_mc, best_mana_unspent) mc_results_cache[str(mc)] = avg_mana_unspent if random.randint(1,10): with open(cached_file, 'w') as f: json.dump(mc_results_cache, f) mc.push_mana_curve() # With hero power # 0:0:0:9:7:3:2:4:5:0:0 is 1.511200 - Shaman or Paladin deck? Something that can use double hp in the start # More realistic variants # Average mana lost for 0:1:2:4:8:5:2:3:3:1:1 is 1.795500 # Average mana lost for 0:1:2:2:9:6:2:2:4:1:1 is 1.746900 # Average mana lost for 0:1:2:2:8:7:2:2:4:1:1 is 1.743600 # Without hero power # Average mana lost for 0:1:1:5:4:8:4:4:0:2:1 is 6.930100 # Average mana lost for 0:1:1:4:8:4:4:4:2:0:2 is 6.683000 # Average mana lost for 0:0:0:3:5:7:4:4:2:2:3 is 6.632000 # Average mana lost for 0:0:3:5:5:6:4:2:3:1:1 is 6.547000 # Average mana lost for 0:0:3:5:5:5:4:3:3:0:2 is 6.426000 # Average mana lost for 0:0:1:6:5:5:5:3:3:0:2 is 6.343000 # Average mana lost for 0:0:2:4:6:7:4:3:1:1:2 is 6.280000 # Average mana lost for 0:0:1:5:6:6:5:3:1:1:2 is 6.128000 # Average mana lost for 0:0:1:5:5:6:3:3:2:2:3 is 6.047000 # Average mana lost for 0:0:0:6:4:7:3:3:2:2:3 is 6.045000 # Average mana lost for 0:0:3:5:5:7:2:3:1:2:2 is 5.989000 # Average mana lost for 0:0:2:5:7:5:2:3:1:2:3 is 5.952000 # Average mana lost for 0:0:2:6:5:3:5:2:2:1:4 is 5.930000 # Average mana lost for 0:0:0:8:5:3:4:2:2:1:5 is 5.898000 # Average mana lost for 0:0:0:7:5:4:3:3:2:1:5 is 5.854000 # With double penalty on skipped turns # Average mana lost for 0:1:2:5:7:5:4:2:2:2:0 is 9.989000 # Average mana lost for 0:1:2:5:7:4:4:3:2:2:0 is 9.871000 # Average mana lost for 0:1:2:6:7:6:3:2:1:1:1 is 9.868000 # Average mana lost for 0:1:2:6:7:5:4:2:1:1:1 is 9.680000 # Average mana lost for 0:0:3:5:8:5:4:2:1:1:1 is 9.541000 # Average mana lost for 0:0:2:6:9:3:4:1:2:2:1 is 9.536000 # Average mana lost for 0:0:2:6:9:3:4:1:2:1:2 is 9.147000 # Average mana lost for 0:0:1:9:4:5:5:1:2:1:2 is 9.001000 # Average mana lost for 0:0:5:6:4:4:2:3:3:1:2 is 8.754000 # Average mana lost for 0:0:5:6:4:4:2:3:2:2:2 is 8.697000
42.25974
107
0.657345
750
3,254
2.762667
0.169333
0.153958
0.209942
0.251931
0.460907
0.392857
0.328185
0.201255
0.128861
0.051158
0
0.204563
0.191764
3,254
77
108
42.25974
0.58327
0.543639
0
0.108108
0
0
0.066116
0
0
0
0
0
0
0
null
null
0
0.081081
null
null
0.081081
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
07785d069e42bc7cf23f5c347e79e74622d996fb
899
py
Python
src/score.py
akshayelangovan/fuzzy_template_akshay
b4e13741f18a2acad5b998d7487c992feec303fd
[ "MIT" ]
1
2022-02-22T23:54:34.000Z
2022-02-22T23:54:34.000Z
src/score.py
akshayelangovan/fuzzy_template_akshay
b4e13741f18a2acad5b998d7487c992feec303fd
[ "MIT" ]
null
null
null
src/score.py
akshayelangovan/fuzzy_template_akshay
b4e13741f18a2acad5b998d7487c992feec303fd
[ "MIT" ]
null
null
null
from fuzzy_asteroids.util import Score from fuzzy_asteroids.game import AsteroidGame class SampleScore(Score): """ Sample of how to modify the Score class """ def __init__(self): """ Define constructor """ # TODO add your own attributes/properties to this claass # Constructor for this class should not miss call to parent class constructor super().__init__() def timestep_update(self, environment: AsteroidGame) -> None: """ This function is called after the evaluation of each game time step :param environment: AsteroidGame environment instance """ pass def final_update(self, environment: AsteroidGame) -> None: """ This function is called after the completion of the game :param environment: AsteroidGame environment instance """ pass
27.242424
85
0.648498
99
899
5.767677
0.535354
0.161121
0.063047
0.115587
0.406305
0.406305
0.227671
0.227671
0.227671
0.227671
0
0
0.286986
899
32
86
28.09375
0.890796
0.472748
0
0.222222
0
0
0
0
0
0
0
0.03125
0
1
0.333333
false
0.222222
0.222222
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
0
0
1
0
0
4
079157a8c2e7675b8c20e7fe32c935bbf42bee57
101
py
Python
flambe/cluster/utils.py
ethan-asapp/flambe
70257167058c7b82ee39f74167a6161bd264ad18
[ "MIT" ]
148
2019-08-29T21:19:03.000Z
2022-03-18T06:13:53.000Z
flambe/cluster/utils.py
cle-ros/flambe
0dc2f5b2b286694defe8abf450fe5be9ae12c097
[ "MIT" ]
108
2019-09-03T14:36:10.000Z
2020-05-13T15:53:14.000Z
flambe/cluster/utils.py
cle-ros/flambe
0dc2f5b2b286694defe8abf450fe5be9ae12c097
[ "MIT" ]
21
2019-09-08T14:09:45.000Z
2020-12-27T04:12:33.000Z
from collections import namedtuple RemoteCommand = namedtuple('RemoteCommand', ['success', 'msg'])
20.2
63
0.762376
9
101
8.555556
0.777778
0.597403
0
0
0
0
0
0
0
0
0
0
0.108911
101
4
64
25.25
0.855556
0
0
0
0
0
0.227723
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
07a8b2778231d9db56bfa96782ae5093eb797b1d
36
py
Python
dataLoader/constants.py
perazim-io/layout-bot
b01c440aa4ecd266e65596a1bd4cc7fcb722f715
[ "MIT" ]
null
null
null
dataLoader/constants.py
perazim-io/layout-bot
b01c440aa4ecd266e65596a1bd4cc7fcb722f715
[ "MIT" ]
null
null
null
dataLoader/constants.py
perazim-io/layout-bot
b01c440aa4ecd266e65596a1bd4cc7fcb722f715
[ "MIT" ]
null
null
null
screenWidth = 1080 screenHeight=1920
18
18
0.861111
4
36
7.75
1
0
0
0
0
0
0
0
0
0
0
0.242424
0.083333
36
2
19
18
0.69697
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
07ca87a284cd8b935f791e6a5d0109045936f1cb
1,001
py
Python
polymorphism_duck_typing/lab/image_area.py
Minkov/python-oop-2021-02
bd387dde165f4338eed66c4bc0b4b516ee085340
[ "MIT" ]
2
2021-02-22T22:55:31.000Z
2021-04-05T18:25:10.000Z
polymorphism_duck_typing/lab/image_area.py
Minkov/python-oop-2021-02
bd387dde165f4338eed66c4bc0b4b516ee085340
[ "MIT" ]
null
null
null
polymorphism_duck_typing/lab/image_area.py
Minkov/python-oop-2021-02
bd387dde165f4338eed66c4bc0b4b516ee085340
[ "MIT" ]
2
2021-04-05T18:35:11.000Z
2021-04-08T12:18:19.000Z
class ImageArea: def __init__(self, width, height): self.width = width self.height = height def get_area(self): return self.width * self.height def __eq__(self, other): return self.get_area() == other.get_area() def __ne__(self, other): return self.get_area() != other.get_area() def __gt__(self, other): return self.get_area() > other.get_area() def __ge__(self, other): return self.get_area() >= other.get_area() def __lt__(self, other): return self.get_area() < other.get_area() def __le__(self, other): return self.get_area() <= other.get_area() class SquareImageArea(ImageArea): def __init__(self, side): super().__init__(side, side) a1 = SquareImageArea(7) # 70 a2 = SquareImageArea(7) # 70 a3 = SquareImageArea(8) # 72 print(a1 == a2) # True print(a1 != a3) # True print(a1 != a2) # False print(a1 >= a3) # False print(a1 <= a2) # True print(a1 < a3) # True
23.833333
50
0.612388
135
1,001
4.177778
0.222222
0.161348
0.159574
0.202128
0.52305
0.52305
0.52305
0.52305
0.430851
0.363475
0
0.031873
0.247752
1,001
41
51
24.414634
0.717131
0.03996
0
0
0
0
0
0
0
0
0
0
0
1
0.3
false
0
0
0.233333
0.6
0.2
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
07daa757de1ee8eee0049602aa4f5133636b9ecb
89
py
Python
threads/apps.py
akiyoss-git/MineLearningMirror
bf183738f6a95e6717f7b22081628279f9d6f20b
[ "MIT" ]
250
2018-05-09T06:46:08.000Z
2022-03-08T09:37:58.000Z
threads/apps.py
akiyoss-git/MineLearningMirror
bf183738f6a95e6717f7b22081628279f9d6f20b
[ "MIT" ]
14
2019-05-28T06:32:23.000Z
2022-03-11T23:20:37.000Z
threads/apps.py
akiyoss-git/MineLearningMirror
bf183738f6a95e6717f7b22081628279f9d6f20b
[ "MIT" ]
78
2018-07-29T07:44:42.000Z
2022-03-02T11:04:48.000Z
from django.apps import AppConfig class ThreadsConfig(AppConfig): name = 'threads'
14.833333
33
0.752809
10
89
6.7
0.9
0
0
0
0
0
0
0
0
0
0
0
0.168539
89
5
34
17.8
0.905405
0
0
0
0
0
0.078652
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
07effbf459c1404294f9771e491288c6a59a4a4c
285
py
Python
absynthe/cfg/__init__.py
chaturv3di/absynthe
e2dcc97747ca6f17c4d39ae2cf16808751742d03
[ "Apache-2.0" ]
6
2019-06-17T16:16:24.000Z
2019-10-18T11:20:51.000Z
absynthe/cfg/__init__.py
chaturv3di/absynthe
e2dcc97747ca6f17c4d39ae2cf16808751742d03
[ "Apache-2.0" ]
null
null
null
absynthe/cfg/__init__.py
chaturv3di/absynthe
e2dcc97747ca6f17c4d39ae2cf16808751742d03
[ "Apache-2.0" ]
1
2019-09-15T12:02:29.000Z
2019-09-15T12:02:29.000Z
from __future__ import absolute_import from .node import Node, UniformNode, BinomialNode from .logger_node import LoggerNode, SimpleLoggerNode from .graph import Graph __all__ = ["Node", "UniformNode", "BinomialNode", "LoggerNode", "SimpleLoggerNode", "Graph"]
28.5
53
0.729825
28
285
7.071429
0.428571
0.10101
0.272727
0
0
0
0
0
0
0
0
0
0.178947
285
9
54
31.666667
0.846154
0
0
0
0
0
0.203509
0
0
0
0
0
0
1
0
false
0
0.571429
0
0.571429
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
ed0dfd34ec3bfca4fa03f38d12d214660da5ad29
63
py
Python
core/arxiv/submission/services/classic/tests/test_store_annotations.py
NeolithEra/arxiv-submission-core
d4f20be62a882d2d5f3d1584eda69e7d90ca2c12
[ "MIT" ]
14
2019-05-26T22:52:17.000Z
2021-11-05T12:26:46.000Z
core/arxiv/submission/services/classic/tests/test_store_annotations.py
NeolithEra/arxiv-submission-core
d4f20be62a882d2d5f3d1584eda69e7d90ca2c12
[ "MIT" ]
30
2018-01-31T19:16:08.000Z
2018-12-08T08:41:04.000Z
core/arxiv/submission/services/classic/tests/test_store_annotations.py
NeolithEra/arxiv-submission-core
d4f20be62a882d2d5f3d1584eda69e7d90ca2c12
[ "MIT" ]
8
2019-01-10T22:01:39.000Z
2021-11-20T21:44:51.000Z
"""Test persistence of annotations in the classic database."""
31.5
62
0.761905
8
63
6
1
0
0
0
0
0
0
0
0
0
0
0
0.126984
63
1
63
63
0.872727
0.888889
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
ed33f97291796a9d9583163462b39ada06e5428b
10,532
py
Python
tests/test_backend_six_graph.py
166MMX/hiro-python-library
fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5
[ "MIT" ]
null
null
null
tests/test_backend_six_graph.py
166MMX/hiro-python-library
fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5
[ "MIT" ]
null
null
null
tests/test_backend_six_graph.py
166MMX/hiro-python-library
fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5
[ "MIT" ]
null
null
null
from types import MappingProxyType from typing import Generator, Optional, Type from uuid import uuid4 import pytest from arago.hiro.client.client import HiroClient from arago.hiro.model.graph.attribute import FreeAttribute, SystemAttribute, FinalAttribute from arago.hiro.model.graph.edge import Edge from arago.hiro.model.graph.history import HistoryFormat, HistoryEntry, HistoryDiff from arago.hiro.model.graph.vertex import VertexId, Vertex, ExternalVertexId, VERTEX_TYPE_T, VERTEX_T, VERTEX_T_co # noinspection PyPackageRequirements from arago.hiro.model.storage import TimeSeriesVertex, BlobVertex from arago.ogit import OgitEntity, OgitVerb, OgitAttribute from arago.ontology import Attribute def uuid() -> str: return str(uuid4()) class TestClassGraphVertexCreate: @pytest.mark.parametrize('vertex_type', [ OgitEntity.OGIT_COMMENT, OgitEntity.OGIT_COMMENT.value, OgitEntity.OGIT_COMMENT.value.name.uri ]) def test_type_no_data(self, client: HiroClient, vertex_type: VERTEX_TYPE_T): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) vertex = graph.vertex.create(vertex_type) assert isinstance(vertex, Vertex) vertex.delete() pass @pytest.mark.parametrize('vertex_type', [ OgitEntity.OGIT_COMMENT, OgitEntity.OGIT_COMMENT.value, OgitEntity.OGIT_COMMENT.value.name.uri ]) @pytest.mark.parametrize('vertex_data', [ Vertex({OgitAttribute.OGIT_CONTENT: 'foo'}), BlobVertex({OgitAttribute.OGIT_CONTENT: 'foo'}), TimeSeriesVertex({OgitAttribute.OGIT_CONTENT: 'foo'}), {SystemAttribute.OGIT__XID: uuid()}, {FinalAttribute.OGIT__XID: uuid()}, {OgitAttribute.OGIT_CONTENT: 'foo'}, {OgitAttribute.OGIT_CONTENT.value: 'foo'}, {OgitAttribute.OGIT_CONTENT.value.name.uri: 'foo'}, {FreeAttribute('/bar'): 'foo'}, ]) def test_type_data(self, client: HiroClient, vertex_type: VERTEX_TYPE_T, vertex_data: Optional[VERTEX_T]): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) vertex = graph.vertex.create(vertex_type, vertex_data) assert isinstance(vertex, Vertex) vertex.delete() pass @pytest.mark.parametrize('vertex_data', [ Vertex({ FinalAttribute.OGIT__TYPE: OgitEntity.OGIT_COMMENT, OgitAttribute.OGIT_CONTENT: 'foo'}), { FinalAttribute.OGIT__TYPE: OgitEntity.OGIT_COMMENT, SystemAttribute.OGIT__XID: uuid()}, { FinalAttribute.OGIT__TYPE: OgitEntity.OGIT_COMMENT, FinalAttribute.OGIT__XID: uuid()}, { FinalAttribute.OGIT__TYPE: OgitEntity.OGIT_COMMENT, OgitAttribute.OGIT_CONTENT: 'foo'}, { FinalAttribute.OGIT__TYPE: OgitEntity.OGIT_COMMENT, OgitAttribute.OGIT_CONTENT.value: 'foo'}, { FinalAttribute.OGIT__TYPE: OgitEntity.OGIT_COMMENT, OgitAttribute.OGIT_CONTENT.value.name.uri: 'foo'}, { FinalAttribute.OGIT__TYPE: OgitEntity.OGIT_COMMENT, FreeAttribute('/bar'): 'foo'}, { FinalAttribute.OGIT__TYPE: OgitEntity.OGIT_COMMENT.value, FreeAttribute('/bar'): 'foo'}, { FinalAttribute.OGIT__TYPE: OgitEntity.OGIT_COMMENT.value.name.uri, FreeAttribute('/bar'): 'foo'}, ]) def test_data_no_type(self, client: HiroClient, vertex_data: Optional[VERTEX_T]): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) vertex = graph.vertex.create(vertex_data) assert isinstance(vertex, Vertex) vertex.delete() pass @pytest.mark.parametrize('vertex_type,cls', [ (OgitEntity.OGIT_TIME_SERIES, TimeSeriesVertex), (OgitEntity.OGIT_ATTACHMENT, BlobVertex), ]) def test_upcast(self, client: HiroClient, vertex_type: VERTEX_TYPE_T, cls: Type[VERTEX_T_co]): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) res = graph.vertex.create(vertex_type) assert isinstance(res, cls) res.delete() pass def test_ts(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) res = graph.vertex.create(OgitEntity.OGIT_TIME_SERIES) assert isinstance(res, TimeSeriesVertex) res.delete() pass def test_blob_ogit(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) res = graph.vertex.create(OgitEntity.OGIT_ATTACHMENT) assert isinstance(res, BlobVertex) res.delete() pass def test_blob_ogit_map(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) res = graph.vertex.create(OgitEntity.OGIT_ATTACHMENT, { OgitAttribute.OGIT_NAME: 'foo' }) assert isinstance(res, BlobVertex) assert res[OgitAttribute.OGIT_NAME] == 'foo' res.delete() pass def test_blob_ogit_v(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) res = graph.vertex.create(OgitEntity.OGIT_ATTACHMENT, Vertex({ OgitAttribute.OGIT_NAME: 'foo' })) assert isinstance(res, BlobVertex) assert res[OgitAttribute.OGIT_NAME] == 'foo' res.delete() pass def test_blob_ontology(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) res = graph.vertex.create(OgitEntity.OGIT_ATTACHMENT.value) assert isinstance(res, BlobVertex) res.delete() pass class TestClassGraphVertexGet: def test_id(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) vertex_id = VertexId(OgitEntity.OGIT_COMMENT.value.name.uri) res = graph.vertex.get(vertex_id) assert isinstance(res, Vertex) pass def test_str(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) vertex_id = OgitEntity.OGIT_COMMENT.value.name.uri res = graph.vertex.get(vertex_id) assert isinstance(res, Vertex) pass def test_xid(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) vertex_id = ExternalVertexId('arago.co') res = graph.vertex.get(vertex_id) assert isinstance(res, Vertex) pass class TestClassGraphVertexUpdate: def test_vertex_update_model(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) comment_v = graph.vertex.create(OgitEntity.OGIT_COMMENT) res = graph.vertex.update(comment_v, {}) assert isinstance(res, Vertex) pass class TestClassGraphVertexDelete: def test_vertex_delete_model(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) comment_v = graph.vertex.create(OgitEntity.OGIT_COMMENT) res = graph.vertex.delete(comment_v) assert isinstance(res, Vertex) pass class TestClassGraphVertexHistory: def test_vertex_history_model_element(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel res_1 = client.root.model.search.index(rf'''ogit\/_type:"{OgitEntity.OGIT_LICENSE_REQUEST.value.name.uri!s}"''') vertex = next(res_1) graph = Hiro6GraphModel(client) res_2 = graph.vertex.history(vertex, res_format=HistoryFormat.ELEMENT) assert isinstance(res_2, Generator) vertex = next(res_2) assert isinstance(vertex, Vertex) pass def test_vertex_history_model_full(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel res_1 = client.root.model.search.index(rf'''ogit\/_type:"{OgitEntity.OGIT_LICENSE_REQUEST.value.name.uri!s}"''') vertex = next(res_1) graph = Hiro6GraphModel(client) res_2 = graph.vertex.history(vertex, res_format=HistoryFormat.FULL) assert isinstance(res_2, Generator) entry = next(res_2) assert isinstance(entry, HistoryEntry) vertex = entry.data assert isinstance(vertex, Vertex) pass def test_vertex_history_model_diff(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel res_1 = client.root.model.search.index(rf'''ogit\/_type:"{OgitEntity.OGIT_LICENSE_REQUEST.value.name.uri!s}"''') vertex = next(res_1) graph = Hiro6GraphModel(client) res_2 = graph.vertex.history(vertex, res_format=HistoryFormat.DIFF) assert isinstance(res_2, Generator) diff = next(res_2) diff = next(res_2) assert isinstance(diff, HistoryDiff) replaced = diff.replaced assert isinstance(replaced, MappingProxyType) keys = iter(replaced) key = next(keys) assert isinstance(key, Attribute) pass class TestClassGraphEdgeCreate: def test_edge_create_model(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) vertex_a = graph.vertex.create(OgitEntity.OGIT_ATTACHMENT) vertex_b = graph.vertex.create(OgitEntity.OGIT_COMMENT) res = graph.edge.create(vertex_a, OgitVerb.OGIT_BELONGS, vertex_b) isinstance(res, Edge) pass class TestClassGraphEdgeDelete: def test_edge_delete_model(self, client: HiroClient): from arago.hiro.backend.six.graph import Hiro6GraphModel graph = Hiro6GraphModel(client) vertex_a = graph.vertex.create(OgitEntity.OGIT_ATTACHMENT) vertex_b = graph.vertex.create(OgitEntity.OGIT_COMMENT) edge_c = graph.edge.create(vertex_a, OgitVerb.OGIT_BELONGS, vertex_b) res = graph.edge.delete(edge_c) isinstance(res, Edge) pass
39.593985
120
0.682491
1,166
10,532
5.993997
0.09434
0.066104
0.046502
0.054371
0.784948
0.731864
0.714981
0.678495
0.66018
0.638146
0
0.006843
0.223035
10,532
265
121
39.743396
0.847244
0.003228
0
0.559829
0
0
0.031345
0.018293
0
0
0
0
0.106838
1
0.08547
false
0.081197
0.132479
0.004274
0.252137
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
ed798aed54e01d84a8645c2c45e49553ec075ca0
192
py
Python
main/management/commands/uploadcleanup.py
kristianmk/tator
0eb75ee9333316b06f773de2b75e8e797a98ffdb
[ "MIT" ]
50
2019-09-18T14:32:18.000Z
2022-03-31T16:26:07.000Z
main/management/commands/uploadcleanup.py
kristianmk/tator
0eb75ee9333316b06f773de2b75e8e797a98ffdb
[ "MIT" ]
566
2019-09-18T16:33:40.000Z
2022-03-31T20:01:38.000Z
main/management/commands/uploadcleanup.py
kristianmk/tator
0eb75ee9333316b06f773de2b75e8e797a98ffdb
[ "MIT" ]
19
2019-09-21T20:08:12.000Z
2022-03-17T14:53:11.000Z
from django.core.management.base import BaseCommand from main.util import cleanup_object_uploads class Command(BaseCommand): def handle(self, **options): cleanup_object_uploads()
27.428571
51
0.78125
24
192
6.083333
0.75
0.178082
0.273973
0
0
0
0
0
0
0
0
0
0.140625
192
6
52
32
0.884848
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
ed7a38f14feb45876e8d2fc53b967044ac058fc2
252
py
Python
models/model_abc.py
zmcx16/stock-forecast
c4ffcbc4215e135776fbb4d5ff384b069b7e631c
[ "MIT" ]
3
2021-11-27T13:21:11.000Z
2021-11-28T07:57:27.000Z
models/model_abc.py
zmcx16/stock-forecast
c4ffcbc4215e135776fbb4d5ff384b069b7e631c
[ "MIT" ]
null
null
null
models/model_abc.py
zmcx16/stock-forecast
c4ffcbc4215e135776fbb4d5ff384b069b7e631c
[ "MIT" ]
3
2021-11-26T17:39:52.000Z
2022-03-22T20:52:21.000Z
import abc class Model(abc.ABC): @abc.abstractmethod def run_validate(self, data): print(data) return NotImplemented @abc.abstractmethod def run_predict(self, data): print(data) return NotImplemented
16.8
33
0.642857
28
252
5.714286
0.5
0.075
0.25
0.2875
0.4625
0.4625
0
0
0
0
0
0
0.277778
252
14
34
18
0.879121
0
0
0.6
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.1
0
0.6
0.2
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
ed885666106e0e31922f343b0cc4ba3d6bde8173
827
py
Python
data_importer/importers/__init__.py
zhangchn/data-importer
faef624a19f97c76a157d8350bb05b819f1cb9f2
[ "BSD-2-Clause-FreeBSD" ]
62
2015-01-27T09:29:00.000Z
2021-02-28T09:56:11.000Z
data_importer/importers/__init__.py
zhangchn/data-importer
faef624a19f97c76a157d8350bb05b819f1cb9f2
[ "BSD-2-Clause-FreeBSD" ]
40
2015-01-16T11:57:17.000Z
2022-03-13T14:13:00.000Z
data_importer/importers/__init__.py
zhangchn/data-importer
faef624a19f97c76a157d8350bb05b819f1cb9f2
[ "BSD-2-Clause-FreeBSD" ]
34
2015-01-27T15:06:56.000Z
2021-02-28T09:56:14.000Z
# encoding: utf-8 from data_importer.importers.base import BaseImporter from data_importer.importers.csv_importer import CSVImporter from data_importer.importers.xls_importer import XLSImporter from data_importer.importers.xlsx_importer import XLSXImporter from data_importer.importers.xml_importer import XMLImporter from data_importer.importers.generic import GenericImporter from data_importer.core.exceptions import StopImporter from data_importer.core.exceptions import UnsuportedFile from data_importer.core.exceptions import InvalidModel from data_importer.core.exceptions import InvalidDescriptor __all__ = ( 'BaseImporter', 'CSVImporter', 'XLSImporter', 'XLSXImporter', 'XMLImporter', 'GenericImporter', 'StopImporter', 'UnsuportedFile', 'InvalidModel', 'InvalidDescriptor', )
33.08
62
0.813785
88
827
7.443182
0.306818
0.122137
0.244275
0.229008
0.219847
0.219847
0
0
0
0
0
0.001372
0.118501
827
24
63
34.458333
0.897119
0.018138
0
0
0
0
0.15679
0
0
0
0
0
0
1
0
false
0
0.772727
0
0.772727
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
71eb391b86d48766f6ee823eba37e5d27e46cd63
370
py
Python
accelbyte_py_sdk/api/platform/operations/currency/__init__.py
encyphered/accelbyte-python-sdk
09c1e989d7251de308150fdcd3119d662ca2d205
[ "MIT" ]
null
null
null
accelbyte_py_sdk/api/platform/operations/currency/__init__.py
encyphered/accelbyte-python-sdk
09c1e989d7251de308150fdcd3119d662ca2d205
[ "MIT" ]
null
null
null
accelbyte_py_sdk/api/platform/operations/currency/__init__.py
encyphered/accelbyte-python-sdk
09c1e989d7251de308150fdcd3119d662ca2d205
[ "MIT" ]
null
null
null
# pylint: disable=line-too-long from .list_currencies import ListCurrencies from .create_currency import CreateCurrency from .get_currency_summary import GetCurrencySummary from .update_currency import UpdateCurrency from .delete_currency import DeleteCurrency from .get_currency_config import GetCurrencyConfig from .public_list_currencies import PublicListCurrencies
37
56
0.881081
43
370
7.348837
0.55814
0.132911
0.126582
0
0
0
0
0
0
0
0
0
0.086486
370
9
57
41.111111
0.934911
0.078378
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
9c086d4bf9b3fd5227953cbe2e9dbda7f0425383
305
py
Python
rpiRobot/src/cortex/domain/iItemChooser.py
olgam4/design3
6e05d123a24deae7dda646df535844a158ef5cc0
[ "WTFPL" ]
null
null
null
rpiRobot/src/cortex/domain/iItemChooser.py
olgam4/design3
6e05d123a24deae7dda646df535844a158ef5cc0
[ "WTFPL" ]
null
null
null
rpiRobot/src/cortex/domain/iItemChooser.py
olgam4/design3
6e05d123a24deae7dda646df535844a158ef5cc0
[ "WTFPL" ]
null
null
null
from abc import ABC, abstractmethod from typing import List from cortex.domain.objective.item import Item from cortex.domain.objective.objective import Objective class IItemChooser(ABC): @abstractmethod def choose_from(self, objective: Objective, items: List[Item]) -> List[Item]: pass
25.416667
81
0.763934
39
305
5.948718
0.435897
0.146552
0.137931
0.215517
0
0
0
0
0
0
0
0
0.157377
305
11
82
27.727273
0.902724
0
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0.125
0.5
0
0.75
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
4
9c1f095049bf8491264048877b1d7c95a42ab65d
198
py
Python
contracts/apps.py
City-of-Helsinki/berth-reservations
a3b1a8c2176f132505527acdf6da3a62199401db
[ "MIT" ]
3
2020-10-13T07:58:48.000Z
2020-12-22T09:41:50.000Z
contracts/apps.py
City-of-Helsinki/berth-reservations
a3b1a8c2176f132505527acdf6da3a62199401db
[ "MIT" ]
422
2018-10-25T10:57:05.000Z
2022-03-30T05:47:14.000Z
contracts/apps.py
City-of-Helsinki/berth-reservations
a3b1a8c2176f132505527acdf6da3a62199401db
[ "MIT" ]
1
2020-04-03T07:38:03.000Z
2020-04-03T07:38:03.000Z
from django.apps import AppConfig class ContractsConfig(AppConfig): name = "contracts" def ready(self): from .services import load_services_config load_services_config()
18
50
0.712121
22
198
6.227273
0.681818
0.175182
0.262774
0
0
0
0
0
0
0
0
0
0.222222
198
10
51
19.8
0.88961
0
0
0
0
0
0.045455
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.833333
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
9c498bb43e411bf876af0c313338346a395c7824
8,576
py
Python
nimbleclient/v1/api/protection_schedules.py
prachiruparelia-hpe/nimble-python-sdk
a3e99d89e647291caf7936300ae853d21d94d6e5
[ "Apache-2.0" ]
1
2020-05-28T19:48:59.000Z
2020-05-28T19:48:59.000Z
nimbleclient/v1/api/protection_schedules.py
prachiruparelia-hpe/nimble-python-sdk
a3e99d89e647291caf7936300ae853d21d94d6e5
[ "Apache-2.0" ]
null
null
null
nimbleclient/v1/api/protection_schedules.py
prachiruparelia-hpe/nimble-python-sdk
a3e99d89e647291caf7936300ae853d21d94d6e5
[ "Apache-2.0" ]
null
null
null
# # © Copyright 2020 Hewlett Packard Enterprise Development LP # # This file was auto-generated by the Python SDK generator; DO NOT EDIT. # from ...resource import Resource, Collection class ProtectionSchedule(Resource): """Manage protection schedules used in protection templates. # Parameters id : Identifier for protection schedule. name : Name of snapshot schedule to create. description : Description of the schedule. volcoll_or_prottmpl_type : Type of the protection policy this schedule is attached to. Valid values are protection_template and volume_collection. volcoll_or_prottmpl_id : Identifier of the protection policy (protection_template or volume_collection) in which this protection schedule is attached to. period : Repeat interval for snapshots with respect to the period_unit. For example, a value of 2 with the 'period_unit' of 'hours' results in one snapshot every 2 hours. period_unit : Time unit over which to take the number of snapshots specified in 'period'. For example, a value of 'days' with a 'period' of '1' results in one snapshot every day. at_time : Time of day when snapshot should be taken. In case repeat frequency specifies more than one snapshot in a day then the until_time option specifies until what time of day to take snapshots. until_time : Time of day to stop taking snapshots. Applicable only when repeat frequency specifies more than one snapshot in a day. days : Specifies which days snapshots should be taken. num_retain : Number of snapshots to retain. If replication is enabled on this schedule the array will always retain the latest replicated snapshot, which may exceed the specified retention value. This is necessary to ensure efficient replication performance. downstream_partner : Specifies the partner name if snapshots created by this schedule should be replicated. downstream_partner_name : Specifies the partner name if snapshots created by this schedule should be replicated. downstream_partner_id : Specifies the partner ID if snapshots created by this schedule should be replicated. In an update operation, if snapshots should be replicated, set this attribute to the ID of the replication partner. If snapshots should not be replicated, set this attribute to the empty string. upstream_partner_name : Specifies the partner name from which snapshots created by this schedule are replicated. upstream_partner_id : Specifies the partner ID from which snapshots created by this schedule are replicated. replicate_every : Specifies which snapshots should be replicated. If snapshots are replicated and this option is not specified, every snapshot is replicated. num_retain_replica : Number of snapshots to retain on the replica. repl_alert_thres : Replication alert threshold in seconds. If the replication of a snapshot takes more than this amount of time to complete an alert will be generated. Enter 0 to disable this alert. snap_verify : Run verification tool on snapshot created by this schedule. This option can only be used with snapshot schedules of a protection template that has application synchronization. The tool used to verify snapshot depends on the type of application. For example, if application synchronization is VSS and the application ID is Exchange, eseutil tool is run on the snapshots. If verification fails, the logs are not truncated. skip_db_consistency_check : Skip consistency check for database files on snapshots created by this schedule. This option only applies to snapshot schedules of a protection template with application synchronization set to VSS, application ID set to MS Exchange 2010 or later w/DAG, this schedule's snap_verify option set to yes, and its disable_appsync option set to false. Skipping consistency checks is only recommended if each database in a DAG has multiple copies. disable_appsync : Disables application synchronized snapshots and creates crash consistent snapshots instead. schedule_type : Normal schedules have internal timers which drive snapshot creation. An externally driven schedule has no internal timers. All snapshot activity is driven by an external trigger. In other words, these schedules are used only for externally driven manual snapshots. active : A schedule is active only if it is owned by the same owner as the volume collection. Only active schedules of a volume collection participate in the creation of snapshots and replication. creation_time : Time when this protection schedule was created. last_modified : Time when this protection schedule was last modified. last_mod_sched_time : Time when the timing of the protection schedule was last modified. last_replicated_snapcoll_name : Specifies the name of last replicated snapshot collection. last_replicated_snapcoll_id : Specifies the snapshot collection ID of last replicated snapshot collection. last_replicated_at_time : Time when last snapshot collection was replicated. last_snap_time : Time when last snapshot was taken. next_snap_time : Time when next snapshot will be taken. next_repl_snap_time : Time when next snapshot will be replicated. snap_counter : This is only used by custom read handler for internal calculations. sched_owner_id : Identifier of the group that owns this schedule. sched_owner_name : Name of the group that owns this schedule. last_config_change_time : The last timing configutation changed. vol_status_list : The list of the replication status of volumes undergoing replication. sync_repl_vol_status_list : A list of the replication status of volumes undergoing synchronous replication. use_downstream_for_DR : Break synchronous replication for the specified volume collection and present downstream volumes to host(s). Downstream volumes in the volume collection will be set to online and presented to the host(s) using new serial and LUN numbers. No changes will be made to the upstream volumes, their serial and LUN numbers, and their online state. The existing ACLs on the upstream volumes will be copied to the downstream volumes. Use this in conjunction with an empty downstream_partner_id. This unconfigures synchronous replication when the partner is removed from the last replicating schedule in the specified volume collection and presents the downstream volumes to host(s). Host(s) will need to be configured to access the new volumes with the newly assigned serial and LUN numbers. Use this option to expose downstream volumes in a synchronously replicated volume collection to host(s) only when the upstream partner is confirmed to be down and there is no communication between partners. Do not execute this operation if a previous Group Management Service takeover has been performed on a different array. Do not perform a subsequent Group Management Service takeover on a different array as it will lead to irreconcilable conflicts. This limitation is cleared once the Group management service backup array has successfully synchronized after reconnection. """ class ProtectionScheduleList(Collection): resource = ProtectionSchedule resource_type = "protection_schedules"
102.095238
179
0.677239
1,074
8,576
5.333333
0.273743
0.02514
0.015887
0.025663
0.255412
0.202514
0.14176
0.103003
0.075419
0.048534
0
0.002002
0.301189
8,576
83
180
103.325301
0.953613
0.943214
0
0
1
0
0.092593
0
0
0
0
0
0
1
0
false
0
0.2
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
9c8067e05bcd9d9b23fcb289f2d044e2ab28a2a7
223
py
Python
ceilometer/compute/server_pollsters/__init__.py
VeinFu/ceilometer_ha
fb0d3834d4db8a9eaeb8f5da088a2894c615770f
[ "Apache-2.0" ]
null
null
null
ceilometer/compute/server_pollsters/__init__.py
VeinFu/ceilometer_ha
fb0d3834d4db8a9eaeb8f5da088a2894c615770f
[ "Apache-2.0" ]
null
null
null
ceilometer/compute/server_pollsters/__init__.py
VeinFu/ceilometer_ha
fb0d3834d4db8a9eaeb8f5da088a2894c615770f
[ "Apache-2.0" ]
null
null
null
import abc import six from ceilometer.agent import plugin_base @six.add_metaclass(abc.ABCMeta) class ServerPollster(plugin_base.PollsterBase): @property def default_discovery(self): return 'local_node'
15.928571
47
0.762332
28
223
5.892857
0.785714
0.121212
0
0
0
0
0
0
0
0
0
0
0.165919
223
13
48
17.153846
0.887097
0
0
0
0
0
0.044843
0
0
0
0
0
0
1
0.125
false
0
0.375
0.125
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
4
92df3e8bfe9e4e097632a94c19899a4061dee94a
25
py
Python
resolwe_bio/kb/management/__init__.py
gregorjerse/resolwe-bio
80f1e354cf0014a1eeff00acc112c622a2a044a9
[ "Apache-2.0" ]
12
2015-12-07T18:29:27.000Z
2022-03-16T08:00:18.000Z
resolwe_bio/kb/management/__init__.py
gregorjerse/resolwe-bio
80f1e354cf0014a1eeff00acc112c622a2a044a9
[ "Apache-2.0" ]
480
2015-11-20T21:46:43.000Z
2022-03-28T12:40:57.000Z
resolwe_bio/kb/management/__init__.py
gregorjerse/resolwe-bio
80f1e354cf0014a1eeff00acc112c622a2a044a9
[ "Apache-2.0" ]
45
2015-11-19T14:54:07.000Z
2022-02-13T21:36:50.000Z
"""Management module."""
12.5
24
0.64
2
25
8
1
0
0
0
0
0
0
0
0
0
0
0
0.08
25
1
25
25
0.695652
0.72
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
92f34e945a88648920cc266868cb926485ba4001
71
py
Python
boa/interop/Ontology/Native.py
JasonZhouPW/neo-boa
84f4309c1876bd796b22a720b680d982b328c357
[ "MIT" ]
null
null
null
boa/interop/Ontology/Native.py
JasonZhouPW/neo-boa
84f4309c1876bd796b22a720b680d982b328c357
[ "MIT" ]
null
null
null
boa/interop/Ontology/Native.py
JasonZhouPW/neo-boa
84f4309c1876bd796b22a720b680d982b328c357
[ "MIT" ]
null
null
null
def Invoke(param,method,contractAddress,ver): """ """ pass
14.2
45
0.591549
7
71
6
1
0
0
0
0
0
0
0
0
0
0
0
0.239437
71
5
46
14.2
0.777778
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
1305c86bf3cab5001f7c50323d32759e3a5bcf5d
81
py
Python
CSV/apps.py
jzadeh/chiron
493bf4e17f9970ee6118cc2ea6f1d87fb95ef26b
[ "Apache-2.0" ]
15
2017-08-08T10:19:47.000Z
2022-01-20T09:48:25.000Z
CSV/apps.py
jzadeh/chiron
493bf4e17f9970ee6118cc2ea6f1d87fb95ef26b
[ "Apache-2.0" ]
null
null
null
CSV/apps.py
jzadeh/chiron
493bf4e17f9970ee6118cc2ea6f1d87fb95ef26b
[ "Apache-2.0" ]
2
2019-12-11T20:14:27.000Z
2022-02-26T13:18:32.000Z
from django.apps import AppConfig class CsvConfig(AppConfig): name = 'CSV'
13.5
33
0.728395
10
81
5.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.185185
81
5
34
16.2
0.893939
0
0
0
0
0
0.037037
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
1312cac7c6d02a4e26d8eb7392c2c9d0d2b8a81c
103
py
Python
stac_exceptions.py
vincentsarago/stac-validator
2bc527cdf5b05bab499100e88254a3bdd3d65fe1
[ "Apache-2.0" ]
null
null
null
stac_exceptions.py
vincentsarago/stac-validator
2bc527cdf5b05bab499100e88254a3bdd3d65fe1
[ "Apache-2.0" ]
null
null
null
stac_exceptions.py
vincentsarago/stac-validator
2bc527cdf5b05bab499100e88254a3bdd3d65fe1
[ "Apache-2.0" ]
null
null
null
""" Description: Exceptions for the STAC Validator. """ class VersionException(Exception): pass
11.444444
47
0.718447
10
103
7.4
1
0
0
0
0
0
0
0
0
0
0
0
0.174757
103
8
48
12.875
0.870588
0.456311
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
131f6f2da3f2c69cf8ca88de131984155cc0cfa4
185
py
Python
accounts/forms.py
iidamakinen/OHSIHA2018
76c4f2d754045cc82d57062453e7248d63e5bf4d
[ "MIT" ]
null
null
null
accounts/forms.py
iidamakinen/OHSIHA2018
76c4f2d754045cc82d57062453e7248d63e5bf4d
[ "MIT" ]
null
null
null
accounts/forms.py
iidamakinen/OHSIHA2018
76c4f2d754045cc82d57062453e7248d63e5bf4d
[ "MIT" ]
null
null
null
from django import forms from .models import Tapahtuma class TapahtumaForm(forms.ModelForm): class Meta: model = Tapahtuma fields = ['name', 'description', 'date']
23.125
48
0.681081
20
185
6.3
0.75
0
0
0
0
0
0
0
0
0
0
0
0.221622
185
7
49
26.428571
0.875
0
0
0
0
0
0.102703
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
13277a1b3a4ae43c4f559219f3cf4b39a83b23db
241
py
Python
iexfinance/apidata/__init__.py
jto-d/iexfinance
8bf958f269638b6f8d2dbdd857c0ef2ba324cdd4
[ "Apache-2.0" ]
653
2018-01-02T21:03:49.000Z
2022-03-24T06:37:10.000Z
iexfinance/apidata/__init__.py
jto-d/iexfinance
8bf958f269638b6f8d2dbdd857c0ef2ba324cdd4
[ "Apache-2.0" ]
219
2017-12-09T21:44:43.000Z
2022-03-23T20:21:46.000Z
iexfinance/apidata/__init__.py
jto-d/iexfinance
8bf958f269638b6f8d2dbdd857c0ef2ba324cdd4
[ "Apache-2.0" ]
155
2018-02-07T17:08:18.000Z
2022-03-13T23:36:57.000Z
from iexfinance.apidata.base import APIReader def get_api_status(**kwargs): """ IEX Cloud API status Reference: https://iexcloud.io/docs/api/#status Data Weighting: ``Free`` """ return APIReader(**kwargs).fetch()
18.538462
51
0.6639
29
241
5.448276
0.793103
0.170886
0
0
0
0
0
0
0
0
0
0
0.195021
241
12
52
20.083333
0.814433
0.394191
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
0
0
0
4
13371afc97c339e0b6234da0975d033dca6367d5
183
py
Python
Lab2/Lab2/models/delmodel.py
sanchaez/python_labs
b90ab02c0fae82511c3db5a054b7ea8dda5d0a22
[ "MIT" ]
null
null
null
Lab2/Lab2/models/delmodel.py
sanchaez/python_labs
b90ab02c0fae82511c3db5a054b7ea8dda5d0a22
[ "MIT" ]
null
null
null
Lab2/Lab2/models/delmodel.py
sanchaez/python_labs
b90ab02c0fae82511c3db5a054b7ea8dda5d0a22
[ "MIT" ]
null
null
null
from lab2 import db as DataBase def delete(table, id): DataBase.delete(DataBase.escapeBySymbol(table, "`"), where = 'WHERE `id` = ' + DataBase.escapeBySymbol(id, "'")) return 1
36.6
114
0.688525
23
183
5.478261
0.608696
0.15873
0
0
0
0
0
0
0
0
0
0.012903
0.153005
183
5
115
36.6
0.8
0
0
0
0
0
0.083333
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
138eb354451a65835f63fadf4e9bfac58f0c50c5
270
py
Python
students/k3342/laboratory_works/Shipitcyna_Daria/laboratary_work_1/hotels_django/hotels_app/admin.py
shipa99/ITMO_ICT_WebProgramming_2020
86b6b1faa606fd3487193e426830daffd70e801c
[ "MIT" ]
null
null
null
students/k3342/laboratory_works/Shipitcyna_Daria/laboratary_work_1/hotels_django/hotels_app/admin.py
shipa99/ITMO_ICT_WebProgramming_2020
86b6b1faa606fd3487193e426830daffd70e801c
[ "MIT" ]
null
null
null
students/k3342/laboratory_works/Shipitcyna_Daria/laboratary_work_1/hotels_django/hotels_app/admin.py
shipa99/ITMO_ICT_WebProgramming_2020
86b6b1faa606fd3487193e426830daffd70e801c
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Facilities from .models import Room_types from .models import Hotel from .models import Comment admin.site.register(Facilities) admin.site.register(Room_types) admin.site.register(Hotel) admin.site.register(Comment)
24.545455
32
0.82963
39
270
5.692308
0.333333
0.18018
0.288288
0
0
0
0
0
0
0
0
0
0.092593
270
10
33
27
0.906122
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.555556
0
0.555556
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
1395f94bbf093bda189114b31e44869460210116
1,340
gyp
Python
third_party/jsoncpp/jsoncpp.gyp
nagineni/chromium-crosswalk
5725642f1c67d0f97e8613ec1c3e8107ab53fdf8
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
212
2015-01-31T11:55:58.000Z
2022-02-22T06:35:11.000Z
third_party/jsoncpp/jsoncpp.gyp
1065672644894730302/Chromium
239dd49e906be4909e293d8991e998c9816eaa35
[ "BSD-3-Clause" ]
5
2015-03-27T14:29:23.000Z
2019-09-25T13:23:12.000Z
third_party/jsoncpp/jsoncpp.gyp
1065672644894730302/Chromium
239dd49e906be4909e293d8991e998c9816eaa35
[ "BSD-3-Clause" ]
221
2015-01-07T06:21:24.000Z
2022-02-11T02:51:12.000Z
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'targets': [ { 'target_name': 'jsoncpp', 'type': 'static_library', 'defines': [ 'JSON_USE_EXCEPTION=0', ], 'sources': [ 'source/include/json/assertions.h', 'source/include/json/autolink.h', 'source/include/json/config.h', 'source/include/json/features.h', 'source/include/json/forwards.h', 'source/include/json/json.h', 'source/include/json/reader.h', 'overrides/include/json/value.h', 'source/include/json/writer.h', 'source/src/lib_json/json_batchallocator.h', 'source/src/lib_json/json_reader.cpp', 'source/src/lib_json/json_tool.h', 'overrides/src/lib_json/json_value.cpp', 'source/src/lib_json/json_writer.cpp', ], 'include_dirs': [ 'overrides/include/', 'source/include/', 'source/src/lib_json/', ], 'direct_dependent_settings': { 'include_dirs': [ 'overrides/include/', 'source/include/', ], }, }, ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2:
27.346939
72
0.58806
158
1,340
4.867089
0.468354
0.169051
0.176853
0.163849
0.218466
0.218466
0
0
0
0
0
0.008048
0.258209
1,340
48
73
27.916667
0.765594
0.191791
0
0.289474
0
0
0.608007
0.433892
0
0
0
0
0.026316
1
0
true
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4