hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1963a4d3380bd9b67d22fd94ac0497fcbea8e10
| 93
|
py
|
Python
|
funcsfa/__init__.py
|
NKI-CCB/funcsfa
|
336655531f08aeec0077a443eeb76b92d6677b3a
|
[
"Apache-2.0"
] | 1
|
2019-01-28T11:47:21.000Z
|
2019-01-28T11:47:21.000Z
|
funcsfa/__init__.py
|
NKI-CCB/funcsfa
|
336655531f08aeec0077a443eeb76b92d6677b3a
|
[
"Apache-2.0"
] | 1
|
2018-11-21T09:37:45.000Z
|
2018-11-21T15:39:27.000Z
|
funcsfa/__init__.py
|
NKI-CCB/funcsfa
|
336655531f08aeec0077a443eeb76b92d6677b3a
|
[
"Apache-2.0"
] | null | null | null |
from funcsfa._data_matrix import DataMatrix, StackedDataMatrix
from funcsfa._core import SFA
| 31
| 62
| 0.870968
| 12
| 93
| 6.5
| 0.75
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 93
| 2
| 63
| 46.5
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e1abf4ee5d17a29a78ea68b79627d91130ada804
| 111
|
py
|
Python
|
dislash/application_commands/_modifications/__init__.py
|
Bakersbakebread/dislash.py
|
afe19c3ebf0b95a3abdec08f3fde8b455024aabd
|
[
"MIT"
] | 371
|
2021-02-07T07:47:38.000Z
|
2022-02-17T13:06:51.000Z
|
dislash/application_commands/_modifications/__init__.py
|
Bakersbakebread/dislash.py
|
afe19c3ebf0b95a3abdec08f3fde8b455024aabd
|
[
"MIT"
] | 39
|
2021-03-22T06:06:05.000Z
|
2021-12-20T05:46:36.000Z
|
dislash/application_commands/_modifications/__init__.py
|
Bakersbakebread/dislash.py
|
afe19c3ebf0b95a3abdec08f3fde8b455024aabd
|
[
"MIT"
] | 51
|
2021-02-08T18:12:40.000Z
|
2022-01-17T14:14:50.000Z
|
# This is an internal subpackage created for
# careful modifications of discord.py and discord.py 2.0 methods.
| 37
| 65
| 0.792793
| 18
| 111
| 4.888889
| 0.888889
| 0.204545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.153153
| 111
| 2
| 66
| 55.5
| 0.914894
| 0.954955
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e1cc1a2046abad870a1d388526b4e833a9c43064
| 71
|
py
|
Python
|
app/infrastructure/database/repositories/__init__.py
|
AnViSe/cost_confirmation_bot
|
f8eaa39c3df742bef0fc79b8b7ce0231f1b18749
|
[
"MIT"
] | 13
|
2021-12-27T19:46:19.000Z
|
2022-03-19T07:55:25.000Z
|
app/infrastructure/database/repositories/__init__.py
|
AnViSe/cost_confirmation_bot
|
f8eaa39c3df742bef0fc79b8b7ce0231f1b18749
|
[
"MIT"
] | null | null | null |
app/infrastructure/database/repositories/__init__.py
|
AnViSe/cost_confirmation_bot
|
f8eaa39c3df742bef0fc79b8b7ce0231f1b18749
|
[
"MIT"
] | 1
|
2022-02-07T10:48:18.000Z
|
2022-02-07T10:48:18.000Z
|
from .access_level import AccessLevelReader
from .user import UserRepo
| 23.666667
| 43
| 0.859155
| 9
| 71
| 6.666667
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 71
| 2
| 44
| 35.5
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
83479dc4c76920e80ea3ecbd1812e99c34ea47a4
| 392
|
py
|
Python
|
octicons16px/markdown.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | 1
|
2021-01-28T06:47:39.000Z
|
2021-01-28T06:47:39.000Z
|
octicons16px/markdown.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
octicons16px/markdown.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
OCTICON_MARKDOWN = """
<svg class="octicon octicon-markdown" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M14.85 3H1.15C.52 3 0 3.52 0 4.15v7.69C0 12.48.52 13 1.15 13h13.69c.64 0 1.15-.52 1.15-1.15v-7.7C16 3.52 15.48 3 14.85 3zM9 11H7V8L5.5 9.92 4 8v3H2V5h2l1.5 2L7 5h2v6zm2.99.5L9.5 8H11V5h2v3h1.5l-2.51 3.5z"></path></svg>
"""
| 78.4
| 363
| 0.688776
| 88
| 392
| 3.056818
| 0.636364
| 0.033457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.366762
| 0.109694
| 392
| 4
| 364
| 98
| 0.404011
| 0
| 0
| 0
| 0
| 0.333333
| 0.933504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
55cec2a1d89830a1f480d6393829e7375c2ebfe7
| 165
|
py
|
Python
|
tm1doc/__init__.py
|
kaylon/tm1doc
|
b4d50b5367603ad24de6213b227ad347ffc6aa1e
|
[
"MIT"
] | 6
|
2016-11-10T10:43:08.000Z
|
2020-07-29T17:52:12.000Z
|
tm1doc/__init__.py
|
kaylon/tm1doc
|
b4d50b5367603ad24de6213b227ad347ffc6aa1e
|
[
"MIT"
] | 2
|
2016-11-10T10:53:18.000Z
|
2019-02-21T15:52:57.000Z
|
tm1doc/__init__.py
|
kaylon/tm1doc
|
b4d50b5367603ad24de6213b227ad347ffc6aa1e
|
[
"MIT"
] | 2
|
2017-07-10T16:05:11.000Z
|
2021-03-07T22:34:46.000Z
|
from config import Config
from flask import Flask
tm1doc = Flask(__name__,static_url_path='/static')
tm1doc.config.from_object(Config)
from tm1doc import routes
| 16.5
| 50
| 0.806061
| 24
| 165
| 5.25
| 0.458333
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02069
| 0.121212
| 165
| 9
| 51
| 18.333333
| 0.848276
| 0
| 0
| 0
| 0
| 0
| 0.042945
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
55dbb7214ef4e04e6e0d2210be8d04ca66a3d0e5
| 70
|
py
|
Python
|
tasks/youtube-subtitles/youtube_subtitles/voice/__init__.py
|
HackerDom/qctf-starter-2018
|
f4eef0fd41d777661b9fbcc61dcee9709d9f6268
|
[
"MIT"
] | 8
|
2018-03-15T12:07:11.000Z
|
2020-12-01T15:02:46.000Z
|
tasks/youtube-subtitles/youtube_subtitles/voice/__init__.py
|
irdkwmnsb/lkshl-ctf
|
e5c0200ddc8ba73df5f321b87b9763fb1bbaba57
|
[
"MIT"
] | 17
|
2020-01-28T22:17:42.000Z
|
2022-03-11T23:18:09.000Z
|
tasks/youtube-subtitles/youtube_subtitles/voice/__init__.py
|
irdkwmnsb/lkshl-ctf
|
e5c0200ddc8ba73df5f321b87b9763fb1bbaba57
|
[
"MIT"
] | 2
|
2018-11-26T18:54:27.000Z
|
2018-12-05T17:37:32.000Z
|
from youtube_subtitles.voice.sapi_synthesizer import create_recording
| 35
| 69
| 0.914286
| 9
| 70
| 6.777778
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 70
| 1
| 70
| 70
| 0.924242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
55f642b2170987a2861d18a75c461d50a85fddcf
| 22,949
|
py
|
Python
|
Gaia (UV filter checking tool)/fast_mcat_uvcheck_V.1.1.py
|
prajwel/UVIT-POC
|
eaaeb26f11b2c6e19cd96d3a99017b7bb39ee7aa
|
[
"Apache-2.0"
] | null | null | null |
Gaia (UV filter checking tool)/fast_mcat_uvcheck_V.1.1.py
|
prajwel/UVIT-POC
|
eaaeb26f11b2c6e19cd96d3a99017b7bb39ee7aa
|
[
"Apache-2.0"
] | null | null | null |
Gaia (UV filter checking tool)/fast_mcat_uvcheck_V.1.1.py
|
prajwel/UVIT-POC
|
eaaeb26f11b2c6e19cd96d3a99017b7bb39ee7aa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2.7
'''A tool for determining the UVIT FUV and NUV filters.
Copyright 2017 Prajwel Joseph
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Changes; when, what
-------------------
Dec 22, 2017: bug fixes.
Dec 23, 2017: deals with cases where FUV is not present.
The author would like to acknowledge inputs from Dr. Koshy George
which greatly helped the developement of this script.
'''
import os
import sys
import string
import urllib
import numpy as np
from astropy.io import fits
from requests import Session
from bs4 import BeautifulSoup
from astropy import units as u
from astropy.table import Table, hstack
from astropy.coordinates import SkyCoord
# To get the user input.
instrument = str(sys.argv[1])
RA = str(sys.argv[2])
DEC = str(sys.argv[3])
#instrument = 'uvit'
#RA = "7:36:51.396"
#DEC = "65:36:9.170"
# To check the RA, DEC user input.
if (DEC.count(':') == RA.count(':') == 2):
pass
else:
sys.exit(1)
# To do all the stuff in a specific directory.
working_arena = '.'
os.chdir(working_arena)
# instrument and radius of search in arsec.
field_radius = {'uvit' : 1200,
'sxt' : 1500,
'czti' : 1680,
'laxpc' : 1680}
# Functions to convert magnitude to UVIT count rates.
def countfuv(mg):
caf2 = 1.0
baf2 = 0.85
sapphire = 0.63
silica = 0.22
mg1 = 18.22
if mg <= 15.0:
mg_c = 5.371 + (20.0 * mg - 210.2) ** 0.5
else:
mg_c = mg
cr1 = caf2 * 10.0 ** ((mg1 - mg_c) * 0.4)
cr2 = baf2 * 10.0 ** ((mg1 - mg_c) * 0.4)
cr3 = sapphire *10.0 ** ((mg1 - mg_c) * 0.4)
cr4 = silica * 10.0 ** ((mg1 - mg_c) * 0.4)
return mg, mg_c, cr1, cr2, cr3, cr4
countfuv = np.vectorize(countfuv)
def countfuv_abs(mg): # for cases where FUV is absent.
caf2 = 1.0
baf2 = 0.85
sapphire = 0.63
silica = 0.22
mg1 = 18.22
if mg <= 15.0:
mg_c = 2.634 + (26.316 * mg - 245.329) ** 0.5
else:
mg_c = mg
mg_c = mg_c - 1.65
cr1 = caf2 * 10.0 ** ((mg1 - mg_c) * 0.4)
cr2 = baf2 * 10.0 ** ((mg1 - mg_c) * 0.4)
cr3 = sapphire *10.0 ** ((mg1 - mg_c) * 0.4)
cr4 = silica * 10.0 ** ((mg1 - mg_c) * 0.4)
return mg, mg_c, cr1, cr2, cr3, cr4
countfuv_abs = np.vectorize(countfuv_abs)
def countnuv(mg):
silica = 1.0
b4 = 0.22
b13 = 0.27
b15 = 0.074
n2 = 0.055
mg1 = 20.0
if mg <= 15.0:
mg_c = 2.634 + (26.316 * mg - 245.329) ** 0.5
else:
mg_c = mg
cr1 = silica * 10.0 ** ((mg1 - mg_c) * 0.4)
cr2 = b4 * 10 ** ((mg1-mg_c) * 0.4)
cr3 = b13 * 10 ** ((mg1 - mg_c) * 0.4)
cr4 = b15 * 10 ** ((mg1 - mg_c) * 0.4)
cr5 = n2 * 10 ** ((mg1 - mg_c) * 0.4)
return mg, mg_c, cr1, cr2, cr3, cr4, cr5
countnuv = np.vectorize(countnuv)
# Function to find seperation in celestial coordinates.
cc = SkyCoord(RA, DEC, unit = (u.hourangle, u.deg))
def cel_separation(a, b):
coo = SkyCoord(a, b, frame = 'icrs', unit = 'deg')
return coo.separation(cc)
# To get on with MAST website queries.
ra = string.replace(RA, ':', '+')
dec = string.replace(DEC, ':', '+')
# Mast website form data.
mastdata = {'__EVENTTARGET': '""',
'__EVENTARGUMENT' : '""',
'__VIEWSTATE' : '/wEPDwUKMTUwNjg2NDc5Ng8WAh4TVmFsaWRhdGVSZXF1ZXN0TW9kZQIBFgQCAQ8WAh4JaW5uZXJodG1sBRNNQVNULkdhbGV4LlRpbGVMaXN0ZAIDD2QWAgIBDxYKHgtjZWxsc3BhY2luZwUBMB4LY2VsbHBhZGRpbmcFATAeBXdpZHRoBQM3NjAeBmJvcmRlcgUBMB4FYWxpZ24FBmNlbnRlchYGZg9kFgJmD2QWAmYPZBYOAgEPPCsABQEDFCsAARAWCB4GSXRlbUlEBRZfY3RsMl9NQVNULW1lbnVJdGVtMDAxHghJdGVtVGV4dAUETUFTVB4HSXRlbVVSTAUYaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1HhBNZW51SXRlbUNzc0NsYXNzBQt0b3BuYXZjb2xvcmRkZAIDDzwrAAUBAxQrAAEQFggfBwUXX2N0bDJfU1RTY0ktbWVudUl0ZW0wMDEfCAUFU1RTY0kfCQUUaHR0cDovL3d3dy5zdHNjaS5lZHUfCgULdG9wbmF2Y29sb3JkZGQCBQ88KwAFAQMUKwABEBYKHwcFIF9jdGwyX1NlYXJjaGVzX1Rvb2xzLW1lbnVJdGVtMDAxHwgFBVRvb2xzHwkFJmh0dHA6Ly9hcmNoaXZlLnN0c2NpLmVkdS9zZWFyY2hlcy5odG1sHg1JdGVtTGVmdEltYWdlBREuLi9NZW51cy9kb3duLmdpZh4SSXRlbUxlZnRJbWFnZUFsaWduCyokU3lzdGVtLldlYi5VSS5XZWJDb250cm9scy5JbWFnZUFsaWduAhQrAAkQFgYfBwU0X2N0bDJfU2VhcmNoZXNfVG9vbHMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwMB8IBQZBbGFkaW4fCQU5aHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2NnaS1iaW4vbnBoLWFsYWRpbi5wbD9mcm9tPVNUU2NJZGQQFgYfBwU0X2N0bDJfU2VhcmNoZXNfVG9vbHMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwMR8IBQlTY3JhcGJvb2sfCQUmaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L3NjcmFwYm9vay5waHBkZBAWBh8HBTRfY3RsMl9TZWFyY2hlc19Ub29scy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDAyHwgFEFZpemllUi9NQVNUIFhjb3IfCQUjaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L3Zpemllci5waHBkZBAWBh8HBTRfY3RsMl9TZWFyY2hlc19Ub29scy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDAzHwgFDU5FRC9NQVNUIFhjb3IfCQUgaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L25lZC5waHBkZBAWBh8HBTRfY3RsMl9TZWFyY2hlc19Ub29scy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDA0HwgFCUNvcGxvdHRlch8JBSlodHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUvbWFzdF9jb3Bsb3QuaHRtbGRkEBYGHwcFNF9jdGwyX1NlYXJjaGVzX1Rvb2xzLW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDUfCAUIU3BlY3ZpZXcfCQU5aHR0cDovL3d3dy5zdHNjaS5lZHUvcmVzb3VyY2VzL3NvZnR3YXJlX2hhcmR3YXJlL3NwZWN2aWV3ZGQQFgYfBwU0X2N0bDJfU2VhcmNoZXNfVG9vbHMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwNh8IBQhTdGFyVmlldx8JBR9odHRwOi8vc3RhcnZpZXcuc3RzY2kuZWR1L2h0bWwvZGQQFgYfBwU0X2N0bDJfU2VhcmNoZXNfVG9vbHMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwNx8IBQlBYnN0cmFjdHMfCQUnaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2Fic3RyYWN0cy5odG1sZGQQFgYfBwU0X2N0bDJfU2VhcmNoZXNfVG9vbHMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwOB8IBQdtb3JlLi4uHwkFJmh0dHA6Ly9hcmNoaXZlLnN0c2NpLmVkdS9zZWFyY2hlcy5odG1sZGRkZAIHDzwrAAUBAxQrAAEQFgofBwUaX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEfCAUOTWlzc2lvbiBTZWFyY2gfCQUmaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L21pc3Npb25zLmh0bWwfCwURLi4vTWVudXMvZG93bi5naWYfDAsrBAIUKwAdEBYGHwcFLl9jdGwyX01pc3Npb25zLW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDAfCAURIDxiPiBIdWJibGUgPC9iPiAfCQUnaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2hzdC9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwMR8IBSAgPGI+IEh1YmJsZSBMZWdhY3kgQXJjaGl2ZSA8L2I+IB8JBSFodHRwOi8vaGxhLnN0c2NpLmVkdS9obGF2aWV3Lmh0bWxkZBAWBh8HBS5fY3RsMl9NaXNzaW9ucy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDAyHwgFFCA8Yj4gSFNUb25saW5lIDwvYj4gHwkFLWh0dHA6Ly9hcmNoaXZlLnN0c2NpLmVkdS9oc3RvbmxpbmUvc2VhcmNoLnBocGRkEBYGHwcFLl9jdGwyX01pc3Npb25zLW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDMfCAUjIDxiPiBIU1QgUHJlc3MgUmVsZWFzZSBJbWFnZXMgPC9iPiAfCQUoaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L3N0cHIvc2VhcmNoLnBocGRkEBYGHwcFLl9jdGwyX01pc3Npb25zLW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDQfCAUOIDxiPiBEU1MgIDwvYj4fCQUqaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2NnaS1iaW4vZHNzX2Zvcm0vZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwNR8IBRQgPGI+IEdBTEVYVmlldyAgPC9iPh8JBQsvR2FsZXhWaWV3L2RkEBYGHwcFLl9jdGwyX01pc3Npb25zLW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDYfCAUQIDxiPiBHQUxFWCAgPC9iPh8JBRMvR1I2Lz9wYWdlPW1hc3Rmb3JtZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwNx8IBRsgPGI+IEpXU1QgU0lEIEFyY2hpdmUgIDwvYj4fCQUzaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2p3c3Qvc2lkYXJjaGl2ZS9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwOB8IBRUgPGI+IEtlcGxlciBEYXRhIDwvYj4fCQU2aHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2tlcGxlci9kYXRhX3NlYXJjaC9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwOR8IBRggPGI+IEtlcGxlciBUYXJnZXRzIDwvYj4fCQU1aHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2tlcGxlci9rZXBsZXJfZm92L3NlYXJjaC5waHBkZBAWBh8HBS5fY3RsMl9NaXNzaW9ucy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDEwHwgFEyA8Yj4gU3dpZnRVVk9UIDwvYj4fCQUtaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L3N3aWZ0dXZvdC9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAxMR8IBREgPGI+IFhNTS1PTSAgPC9iPh8JBSpodHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUveG1tLW9tL3NlYXJjaC5waHBkZBAWBh8HBS5fY3RsMl9NaXNzaW9ucy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDEyHwgFDiBCRUZTIChPUkZFVVMpHwkFKGh0dHA6Ly9hcmNoaXZlLnN0c2NpLmVkdS9iZWZzL3NlYXJjaC5waHBkZBAWBh8HBS5fY3RsMl9NaXNzaW9ucy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDEzHwgFDyBDb3Blcm5pY3VzLXJhdx8JBS5odHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUvY29wZXJuaWN1cy9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAxNB8IBREgQ29wZXJuaWN1cy1jb2FkZB8JBTRodHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUvY29wZXJuaWN1cy9jb2FkZC9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAxNR8IBQYgRVBPQ0gfCQU4aHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2Vwb2NoL2Vwb2NoX21hc3RfZGlyZWN0b3J5Lmh0bWxkZBAWBh8HBS5fY3RsMl9NaXNzaW9ucy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDE2HwgFBiBFVVZFIB8JBShodHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUvZXV2ZS9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAxNx8IBRFGVVNFIE9ic2VydmF0aW9ucx8JBShodHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUvZnVzZS9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAxOB8IBQ5GVVNFIEV4cG9zdXJlcx8JBTFodHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUvZnVzZS9leHBvc3VyZS9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAxOR8IBQUgR1NDIB8JBTdodHRwOi8vZ3Nzcy5zdHNjaS5lZHUvd2Vic2VydmljZXMvR1NDMi9HU0MyV2ViRm9ybS5hc3B4ZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAyMB8IBQYgSFBPTCAfCQUoaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2hwb2wvc2VhcmNoLnBocGRkEBYGHwcFLl9jdGwyX01pc3Npb25zLW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMjEfCAUFIEhVVCAfCQUnaHR0cDovL2FyY2hpdmUuc3RzY2kuZWR1L2h1dC9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAyMh8IBRAgSU1BUFMgKE9SRkVVUykgHwkFKWh0dHA6Ly9hcmNoaXZlLnN0c2NpLmVkdS9pbWFwcy9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAyMx8IBQUgSVVFIB8JBSdodHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUvaXVlL3NlYXJjaC5waHBkZBAWBh8HBS5fY3RsMl9NaXNzaW9ucy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDI0HwgFDyBUVUVTIChPUkZFVVMpIB8JBShodHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUvdHVlcy9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAyNR8IBQUgVUlUIB8JBSdodHRwOi8vYXJjaGl2ZS5zdHNjaS5lZHUvdWl0L3NlYXJjaC5waHBkZBAWBh8HBS5fY3RsMl9NaXNzaW9ucy1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDI2HwgFCyBWTEEtRklSU1QgHwkFLGh0dHA6Ly9hcmNoaXZlLnN0c2NpLmVkdS92bGFmaXJzdC9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAyNx8IBQcgV1VQUEUgHwkFKWh0dHA6Ly9hcmNoaXZlLnN0c2NpLmVkdS93dXBwZS9zZWFyY2gucGhwZGQQFgYfBwUuX2N0bDJfTWlzc2lvbnMtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAyOB8IBQdtb3JlLi4uHwkFL2h0dHA6Ly9hcmNoaXZlLnN0c2NpLmVkdS9zZWFyY2hlcy5odG1sI21pc3Npb25zZGRkZAIJDzwrAAUBAxQrAAEQFgYfBwUbX2N0bDJfVHV0b3JpYWxzLW1lbnVJdGVtMDAxHwgFCFR1dG9yaWFsHwkFLGh0dHA6Ly9hcmNoaXZlLnN0c2NpLmVkdS90dXRvcmlhbC9pbmRleC5odG1sZGRkAgsPPCsABQEDFCsAARAWCB8HBRxfY3RsMl9TaXRlU2VhcmNoLW1lbnVJdGVtMDAxHwgFC1NpdGUgU2VhcmNoHwkFEi4vP3BhZ2U9c2l0ZXNlYXJjaB8KBQt0b3BuYXZjb2xvcmRkZAINDzwrAAUBAxQrAAEQFggfBwUaX2N0bDJfRm9sbG93VXMtbWVudUl0ZW0wMDAfCAUJRm9sbG93IFVzHwsFES4uL01lbnVzL2Rvd24uZ2lmHwwLKwQCFCsAAhAWBh8HBS5fY3RsMl9Gb2xsb3dVcy1tZW51SXRlbTAwMC1zdWJNZW51LW1lbnVJdGVtMDAwHwgFCiBGYWNlYm9vayAfCQUjaHR0cDovL3d3dy5mYWNlYm9vay5jb20vTUFTVEFyY2hpdmVkZBAWBh8HBS5fY3RsMl9Gb2xsb3dVcy1tZW51SXRlbTAwMC1zdWJNZW51LW1lbnVJdGVtMDAxHwgFCSBUd2l0dGVyIB8JBR5odHRwczovL3R3aXR0ZXIuY29tL01BU1RfTmV3cy9kZGRkAgIPZBYEZg9kFgJmD2QWAgIBDzwrAAUBAxQrAAoQFggfBwUaX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDEfCAUSU2VhcmNoICYgUmV0cmlldmFsHwsFEi4uL01lbnVzL2Fycm93LmdpZh8MCysEAhQrAAMQFgYfBwUuX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwMB8IBRVTb3VyY2UgQ2F0YWxvZyBTZWFyY2gfCQUQLi8/cGFnZT1tYXN0Zm9ybWRkEBYGHwcFLl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDEfCAUKU1FMIFNlYXJjaB8JBQ8uLz9wYWdlPXNxbGZvcm1kZBAWCB8HBS5fY3RsOF9sZWZ0TWVudS1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDAyHwgFC1RpbGUgU2VhcmNoHwsFEi4uL01lbnVzL2Fycm93LmdpZh8MCysEAhQrAAgQFgYfBwVCX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwMi1zdWJNZW51LW1lbnVJdGVtMDAwHwgFGjxiPkFJUzwvYj46IEFsbCBTa3kgU3VydmV5Hg9JdGVtQ29tbWFuZE5hbWUFA2Fpc2RkEBYGHwcFQl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDItc3ViTWVudS1tZW51SXRlbTAwMR8IBR88Yj5ESVM8L2I+OiBEZWVwIEltYWdpbmcgU3VydmV5Hw0FA2Rpc2RkEBYGHwcFQl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDItc3ViTWVudS1tZW51SXRlbTAwMh8IBSE8Yj5NSVM8L2I+OiBNZWRpdW0gSW1hZ2luZyBTdXJ2ZXkfDQUDbWlzZGQQFgYfBwVCX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwMi1zdWJNZW51LW1lbnVJdGVtMDAzHwgFIjxiPk5HUzwvYj46IE5lYXJieSBHYWxheGllcyBTdXJ2ZXkfDQUDbmdzZGQQFgYfBwVCX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwMi1zdWJNZW51LW1lbnVJdGVtMDA0HwgFIzxiPkdJSTwvYj46IEd1ZXN0IEludmVzdGlnYXRvciBEYXRhHw0FA2dpaWRkEBYGHwcFQl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDItc3ViTWVudS1tZW51SXRlbTAwNR8IBR48Yj5DQUk8L2I+OiBDYWxpYnJhdGlvbiBTdXJ2ZXkfDQUDY2FpZGQQFgYfBwVCX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDEtc3ViTWVudS1tZW51SXRlbTAwMi1zdWJNZW51LW1lbnVJdGVtMDA2HwgFKDxiPlNQRUNUUkE8L2I+OiBGcm9tIEFsbCBBdmFpbGFibGUgVGlsZXMfDQUHc3BlY3RyYWRkEBYGHwcFQl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDItc3ViTWVudS1tZW51SXRlbTAwNx8IBRI8Yj5BTEwgU1VSVkVZUzwvYj4fDQUKYWxsc3VydmV5c2RkZGQQFgofBwUaX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDMfCAUTR3Vlc3QgSW52ZXN0aWdhdG9ycx8JBQ4uLz9wYWdlPWdpbGlzdB8LBRMuLi9NZW51cy9zZWN1cmUuZ2lmHwwLKwQCZGQQFggfBwUaX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDUfCAUNRG9jdW1lbnRhdGlvbh8LBRIuLi9NZW51cy9hcnJvdy5naWYfDAsrBAIUKwACEBYIHwcFLl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDA1LXN1Yk1lbnUtbWVudUl0ZW0wMDAfCAULPGI+TUFTVDwvYj4fCwUSLi4vTWVudXMvYXJyb3cuZ2lmHwwLKwQCFCsABRAWBh8HBUJfY3RsOF9sZWZ0TWVudS1tZW51SXRlbTAwNS1zdWJNZW51LW1lbnVJdGVtMDAwLXN1Yk1lbnUtbWVudUl0ZW0wMDAfCAUKSGlnaCBMZXZlbB8JBRIuLz9wYWdlPWdlbmVyYWxmYXFkZBAWBh8HBUJfY3RsOF9sZWZ0TWVudS1tZW51SXRlbTAwNS1zdWJNZW51LW1lbnVJdGVtMDAwLXN1Yk1lbnUtbWVudUl0ZW0wMDEfCAUHUXVlcmllcx8JBQ4uLz9wYWdlPXNxbGZhcWRkEBYGHwcFQl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDA1LXN1Yk1lbnUtbWVudUl0ZW0wMDAtc3ViTWVudS1tZW51SXRlbTAwMh8IBRBEYXRhIERlc2NyaXB0aW9uHwkFDS4vP3BhZ2U9ZGRmYXFkZBAWBh8HBUJfY3RsOF9sZWZ0TWVudS1tZW51SXRlbTAwNS1zdWJNZW51LW1lbnVJdGVtMDAwLXN1Yk1lbnUtbWVudUl0ZW0wMDMfCAUOVXNlciBTdWJtaXR0ZWQfCQUPLi8/cGFnZT11c2VyZmFxZGQQFgYfBwVCX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDUtc3ViTWVudS1tZW51SXRlbTAwMC1zdWJNZW51LW1lbnVJdGVtMDA1HwgFCFR1dG9yaWFsHwkFEC4vP3BhZ2U9dHV0b3JpYWxkZGQQFggfBwUuX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDUtc3ViTWVudS1tZW51SXRlbTAwMR8IBRM8Yj5DYWx0ZWNoIEZBUXM8L2I+HwsFEi4uL01lbnVzL2Fycm93LmdpZh8MCysEAhQrAAIQFgYfBwVCX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDUtc3ViTWVudS1tZW51SXRlbTAwMS1zdWJNZW51LW1lbnVJdGVtMDAwHwgFEENhbHRlY2ggTWV0YWRhdGEfCQULLi8/cGFnZT1mYXFkZBAWBh8HBUJfY3RsOF9sZWZ0TWVudS1tZW51SXRlbTAwNS1zdWJNZW51LW1lbnVJdGVtMDAxLXN1Yk1lbnUtbWVudUl0ZW0wMDEfCAURQ2FsdGVjaCBUZWNoIERvY3MfCQU1aHR0cDovL3d3dy5nYWxleC5jYWx0ZWNoLmVkdS9yZXNlYXJjaGVyL3RlY2hkb2NzLmh0bWxkZGRkEBYGHwcFGl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDA3HwgFDURhdGFiYXNlIEluZm8fCQUMP3BhZ2U9ZGJpbmZvZGQQFgYfBwUaX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMDkfCAUUQ29udHJpYnV0ZWQgU29mdHdhcmUfCQUQLi8/cGFnZT1zb2Z0d2FyZWRkEBYIHwcFGl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDExHwgFF0d1ZXN0IEludmVzdGlnYXRvciBTaXRlHwsFEi4uL01lbnVzL2Fycm93LmdpZh8MCysEAhQrAAMQFgYfBwUuX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMTEtc3ViTWVudS1tZW51SXRlbTAwMB8IBQlIb21lIFBhZ2UfCQUdaHR0cDovL2dhbGV4Z2kuZ3NmYy5uYXNhLmdvdi9kZBAWBh8HBS5fY3RsOF9sZWZ0TWVudS1tZW51SXRlbTAxMS1zdWJNZW51LW1lbnVJdGVtMDAxHwgFD0luc3RydW1lbnRhdGlvbh8JBUFodHRwOi8vZ2FsZXhnaS5nc2ZjLm5hc2EuZ292L0RvY3VtZW50cy9FUk9fZGF0YV9kZXNjcmlwdGlvbl8yLmh0bWRkEBYGHwcFLl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDExLXN1Yk1lbnUtbWVudUl0ZW0wMDIfCAUNRGF0YSBQaXBlbGluZR8JBUFodHRwOi8vZ2FsZXhnaS5nc2ZjLm5hc2EuZ292L0RvY3VtZW50cy9FUk9fZGF0YV9kZXNjcmlwdGlvbl8zLmh0bWRkZBAWBh8HBRpfY3RsOF9sZWZ0TWVudS1tZW51SXRlbTAxMx8IBQ1SZWxhdGVkIFNpdGVzHwkFFC4vP3BhZ2U9cmVsYXRlZHNpdGVzZGQQFgYfBwUaX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMTUfCAUPQWNrbm93bGVkZ21lbnRzHwkFFy4vP3BhZ2U9YWNrbm93bGVkZ21lbnRzZGQQFhQfCQULL0dhbGV4Vmlldy8eD01lbnVJdGVtVG9vbFRpcAUdR2FsZXhWaWV3IChRdWljayBTZWFyY2gpIFRvb2weCkl0ZW1UYXJnZXQFBl9ibGFuax4QSXRlbUltYWdlQWx0VGV4dAUdR2FsZXhWaWV3IChRdWljayBTZWFyY2gpIFRvb2wfCAUKR2FsZXhWaWV3Oh4NTWVudUl0ZW1XaWR0aBsAAAAAAMBiQAEAAAAfBwUaX2N0bDhfbGVmdE1lbnUtbWVudUl0ZW0wMTYeDkl0ZW1SaWdodEltYWdlBRlpbWFnZXMvR2FsZXhWaWV3VGh1bWIucG5nHhNJdGVtUmlnaHRJbWFnZUFsaWduCysEAR4OTWVudUl0ZW1IZWlnaHQbAAAAAADAYkABAAAAZGQQFhQfCQUJL2Nhc2pvYnMvHw4FG0Nhc0pvYnMgKERhdGFiYXNlIFNRTCkgVG9vbB8PBQZfYmxhbmsfEAUbQ2FzSm9icyAoRGF0YWJhc2UgU1FMKSBUb29sHwgFCENhc0pvYnM6HxEbAAAAAADAYkABAAAAHwcFGl9jdGw4X2xlZnRNZW51LW1lbnVJdGVtMDE3HxIFF2ltYWdlcy9DYXNKb2JzVGh1bWIucG5nHxMLKwQBHxQbAAAAAABAYEABAAAAZGRkAgEPZBYCZg8PFgQeCXNvcnRPcmRlcgUHcmFfY2VudB4Mc2hvd0FsbFRpbGVzBQVmYWxzZWQWBAIBDw8WAh4EVGV4dAXKAjxiPlRoZXJlIGFyZSA0NTE5NCB0b3RhbCB0aWxlcyBpbiBhbGwgdGhlIEdBTEVYIHN1cnZleXMuPC9iPjxicj48Zm9udCBzaXplPSctMScgY29sb3I9J2dyYXknPlBsZWFzZSBub3RlOiBTZWFyY2hlcyBpbiB0aGlzIHBhZ2UgYXBwbHkgb25seSB0byBUSUxFIGxldmVsIHByb2R1Y3RzLjxicj5JZiB5b3Ugd2FudCB0byBzZWFyY2ggR0FMRVggb2JqZWN0IGNhdGFsb2dzLCBwbGVhc2UgdXNlIGVpdGhlciB0aGUgPGEgaHJlZj0nP3BhZ2U9bWFzdGZvcm0nPkNhdGFsb2cgT2JqZWN0IFNlYXJjaDwvYT4gb3IgdGhlIDxhIGhyZWY9Jz9wYWdlPXNxbGZvcm0nPlNRTCBTZWFyY2g8L2E+LmRkAhUPDxYCHgdWaXNpYmxlaGQWAgIDDzwrAAsAZAIDD2QWAmYPZBYCZg9kFgICBQ8PFgIfFwUrTGFzdCBNb2RpZmllZCBEYXRlOjxicj4xMi81LzIwMTYgMTo1MTozOSBQTWRkZBOt2pbUX66uvUbSuy3q9kQU8fEC',
'__VIEWSTATEGENERATOR': 'C84C2718',
'__EVENTVALIDATION': '/wEdAAue+6xrb6xgp2ityzurA/pfWsTF2CBs9ziYHlDmus7EnHXVqisK/ch+FuYDN4RJj9bNygAwoalISibjyjYgoB7/Pb1PMsXU2LG7o+i6/zoft2ZmqVWZEJyWTGlJer/5/ymk9SeG9Y8RLbkbyiuf4BcRXP2SoyGCMZyu6LfyUjL5ZgAB13huDNxtBirRDFLR6zW3raPnQUy5sK21W/3eiEs/KUQOVtp9GallVy/IsFMIp4yMEruOYx0KrV7GUndYi0m5y40+',
'_ctl10:txtTargetName': '',
'_ctl10:resolverDropList': 'SIMBAD',
'_ctl10:txtRadius': '0.4',
'_ctl10:txtRA': ra,
'_ctl10:txtDec': dec,
'_ctl10:btnSearch': 'Search'}
# Header for the site.
header = {'Host': 'galex.stsci.edu',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://galex.stsci.edu/GR6/?page=tilelist&survey=allsurveys',
'Connection': 'keep-alive'}
# Post request.
session = Session()
response = session.post(url = 'http://galex.stsci.edu/GR6/?page=tilelist&survey=allsurveys',
data = mastdata,
headers = header)
# To make sense of the mess that is MAST.
soup = BeautifulSoup(response.text, 'html.parser')
ss = soup.find(id = '_ctl10_TileGrid_imgLink_0').get('href')
parent = 'http://galex.stsci.edu/GR6'
son = parent + ss[1:]
# To make sense of the download page.
response2 = session.get(son)
soup2 = BeautifulSoup(response2.text, 'html.parser')
pp = soup2.find_all("a")
# To download the mcat (galex catalogue).
catalogue_link = []
for link in pp:
somel = link.get('href')
try:
if somel[-12:] == 'mcat.fits.gz':
catalogue_link.append(somel)
except:
pass
if len(catalogue_link) != 0:
catalogue = catalogue_link[0].split('/')[-1]
urllib.urlretrieve(catalogue_link[0], catalogue)
else:
sys.exit(1)
# Reading coordinates from catalogue.
hdu = fits.open(catalogue)
alpha = hdu[1].data['alpha_j2000_merged']
delta = hdu[1].data['delta_j2000_merged']
# NUV
nuv_mag = hdu[1].data['nuv_mag']
refined_set = [(al, de, nm) for al, de, nm
in zip(alpha, delta, nuv_mag)
if int(nm) != -999 and nm <= 22.]
nalpha, ndelta, nuv_mag = zip(*refined_set)
confined_set = [(nm, al, de) for al, de, nm
in zip(nalpha, ndelta, nuv_mag)
if cel_separation(al, de) <= field_radius[instrument] * u.arcsec]
nd = np.array(sorted(confined_set))[0:5]
ma, ma_c, ta, tb, tc, td, te = countnuv(nd[:,0])
nuv_res = Table([ma, ma_c, ta, tb, tc, td, te],
names = ('Mag',
'Mag_corrected',
'silica',
'b4',
'b13',
'b15',
'n2'),
meta = {'name': 'NUV counts'})
nuv_res['Mag'].format = '4.2f'
nuv_res['Mag_corrected'].format = '4.2f'
nuv_res['silica'].format = '4.2f'
nuv_res['b4'].format = '4.2f'
nuv_res['b13'].format = '4.2f'
nuv_res['b15'].format = '4.2f'
nuv_res['n2'].format = '4.2f'
# To convert ra_deg and dec_deg to ra_hms and dec_dms.
coord = SkyCoord(zip(nd[:,1], nd[:,2]),
frame = 'icrs',
unit = 'deg')
RAhms_DECdms = coord.to_string('hmsdms', sep = ':')
ra_hms, dec_dms = zip(*[hmdm.split(' ') for hmdm in RAhms_DECdms])
xy_tab = Table([ra_hms, dec_dms], names = ('ra_hms', 'dec_dms'))
nuv_res = hstack([xy_tab, nuv_res])
print('\n\n### NUV\n\n{}\n'.format(nuv_res))
# FUV
fuv_mag = hdu[1].data['fuv_mag']
fuv_absent = 'no'
if len(np.unique(fuv_mag)) == 1: # when FUV data is absent.
fd = nd
print('\nFUV observations seem to be absent! Using M_fuv = M_nuv - 1.65.')
fuv_absent = 'yes'
else:
refined_set = [(al, de, fm) for al, de, fm
in zip(alpha, delta, fuv_mag)
if int(fm) != -999 and fm <= 22.]
falpha, fdelta, fuv_mag = zip(*refined_set)
confined_set = [(fm, al, de) for al, de, fm
in zip(falpha, fdelta, fuv_mag)
if cel_separation(al, de) <= field_radius[instrument] * u.arcsec]
fd = np.array(sorted(confined_set))[0:5]
if fuv_absent == 'no':
ma, ma_c, ta, tb, tc, td = countfuv(fd[:,0])
else:
ma, ma_c, ta, tb, tc, td = countfuv_abs(fd[:,0])
fuv_res = Table([ma, ma_c, ta, tb, tc, td],
names = ('Mag',
'Mag_corrected',
'caf2',
'baf2',
'sapphire',
'silica'),
meta = {'name': 'FUV counts'})
fuv_res['Mag'].format = '4.2f'
fuv_res['Mag_corrected'].format = '4.2f'
fuv_res['caf2'].format = '4.2f'
fuv_res['baf2'].format = '4.2f'
fuv_res['sapphire'].format = '4.2f'
fuv_res['silica'].format = '4.2f'
# To convert ra_deg and dec_deg to ra_hms and dec_dms.
coord = SkyCoord(zip(fd[:,1], fd[:,2]),
frame = 'icrs',
unit = 'deg')
RAhms_DECdms = coord.to_string('hmsdms', sep = ':')
ra_hms, dec_dms = zip(*[hmdm.split(' ') for hmdm in RAhms_DECdms])
xy_tab = Table([ra_hms, dec_dms], names = ('ra_hms', 'dec_dms'))
fuv_res = hstack([xy_tab, fuv_res])
print('\n### FUV \n\n{}\n\nDone!\n'.format(fuv_res))
| 74.752443
| 13,631
| 0.825047
| 1,361
| 22,949
| 13.800882
| 0.296841
| 0.003833
| 0.004153
| 0.004845
| 0.085556
| 0.073577
| 0.063142
| 0.059628
| 0.055422
| 0.048075
| 0
| 0.113968
| 0.111813
| 22,949
| 306
| 13,632
| 74.996732
| 0.807536
| 0.069938
| 0
| 0.273632
| 0
| 0.019901
| 0.706159
| 0.657742
| 0
| 1
| 0
| 0
| 0
| 1
| 0.019901
| false
| 0.00995
| 0.054726
| 0
| 0.094527
| 0.014925
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
363978cb16bfce5c0adc2b8fb0765a2283b25b63
| 164
|
py
|
Python
|
exoatlas/whatsup/__init__.py
|
zkbt/exopop
|
5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8
|
[
"MIT"
] | 4
|
2020-06-24T16:38:27.000Z
|
2022-01-23T01:57:19.000Z
|
exoatlas/whatsup/__init__.py
|
zkbt/exopop
|
5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8
|
[
"MIT"
] | 4
|
2018-09-20T23:12:30.000Z
|
2019-05-15T15:31:58.000Z
|
exoatlas/whatsup/__init__.py
|
zkbt/exopop
|
5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .plan import Plan
from .night import Night
from .observatory import Observatory
from .transit import Transit
from .block import Block
| 20.5
| 36
| 0.756098
| 23
| 164
| 5.391304
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007246
| 0.158537
| 164
| 7
| 37
| 23.428571
| 0.891304
| 0.128049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
36a846c2aaefab0706178c3cc264997412f74e7e
| 4,403
|
py
|
Python
|
vote/tests/util.py
|
jnegrete2005/JuradoFMS
|
25848037e51de1781c419155615d0fb41edc07ec
|
[
"MIT"
] | 2
|
2021-02-24T21:57:50.000Z
|
2021-03-15T08:44:09.000Z
|
vote/tests/util.py
|
jnegrete2005/JuradoFMS
|
25848037e51de1781c419155615d0fb41edc07ec
|
[
"MIT"
] | null | null | null |
vote/tests/util.py
|
jnegrete2005/JuradoFMS
|
25848037e51de1781c419155615d0fb41edc07ec
|
[
"MIT"
] | null | null | null |
from json import loads
from ..models import Competitor, VotingPoll
from graphene_django.utils.testing import graphql_query
def setUp(self):
# Create Competitors
c1_a = Competitor.objects.create(
name='Si',
easy=[1,1,1,1,1,1,1,1,1],
hard=[1,1,1,1,1,1,1,1,1],
tematicas_1=[1,1,1,1,1,1,1],
tematicas_2=[1,1,1,1,1,1,1],
random_score=[1,1,1,1,1,1,1,1,1,1,1],
min1=[1,1,1,1,1,1,1,1,1],
min2=[1,1,1,1,1,1,1,1,1],
deluxe=[1,1,1,1,1,1,1,1,1,1,1,1,1,1]
)
c1_b = Competitor.objects.create(
name='No',
easy=[2,2,9,2,2,2,2,2,9],
hard=[2,2,2,2,2,2,2,2,2],
tematicas_1=[2,2,2,2,2,2,2],
tematicas_2=[2,2,2,2,2,2,2],
random_score=[2,2,2,2,2,2,2,2,2,2,2],
min1=[2,2,2,2,2,2,2,2,2],
min2=[2,2,2,2,2,2,2,2,2],
deluxe=[2,2,2,2,2,2,2,2,2,2,2,2,2,2]
)
c2_a = Competitor.objects.create(
name='Replica 1',
easy=[3,3,3,3,3,3,3,3,3],
hard=[3,3,3,3,3,3,3,3,3],
tematicas_1=[3,3,3,3,3,3,3],
tematicas_2=[3,3,3,3,3,3,3],
random_score=[3,3,3,3,3,3,3,3,3,3,3],
min1=[3,3,3,3,3,3,3,3,3],
min2=[3,3,3,3,3,3,3,3,3],
deluxe=[3,3,3,3,3,3,3,3,3,3,3,3,3,3],
replica=[3,3,3,3,3,3,3,9,9]
)
c2_b = Competitor.objects.create(
name='Replica 2',
easy=[3,3,3,3,3,3,3,3,9],
hard=[3,3,3,3,3,3,3,3,3],
tematicas_1=[3,3,3,3,3,3,3],
tematicas_2=[3,3,3,3,3,3,3],
random_score=[3,3,3,3,3,3,3,3,3,3,3],
min1=[3,3,3,3,3,3,3,3,3],
min2=[3,3,3,3,3,3,3,3,3],
deluxe=[3,3,3,3,3,3,3,3,3,3,3,3,3,3],
replica=[3,3,3,3,3,3,3,3,3]
)
# Create VotingPolls
poll1 = VotingPoll.objects.create(
comp_1=c1_a,
comp_2=c1_b
)
poll2 = VotingPoll.objects.create(
comp_1=c2_a,
comp_2=c2_b
)
@classmethod
def setUpTestData(cls):
# Create Competitors
c1_a = Competitor.objects.create(
name='Si',
easy=[1,1,1,1,1,1,1,1,1],
hard=[1,1,1,1,1,1,1,1,1],
tematicas_1=[1,1,1,1,1,1,1],
tematicas_2=[1,1,1,1,1,1,1],
random_score=[1,1,1,1,1,1,1,1,1,1,1],
min1=[1,1,1,1,1,1,1,1,1],
min2=[1,1,1,1,1,1,1,1,1],
deluxe=[1,1,1,1,1,1,1,1,1,1,1,1,1,1]
)
c1_b = Competitor.objects.create(
name='No',
easy=[2,2,9,2,2,2,2,2,9],
hard=[2,2,2,2,2,2,2,2,2],
tematicas_1=[2,2,2,2,2,2,2],
tematicas_2=[2,2,2,2,2,2,2],
random_score=[2,2,2,2,2,2,2,2,2,2,2],
min1=[2,2,2,2,2,2,2,2,2],
min2=[2,2,2,2,2,2,2,2,2],
deluxe=[2,2,2,2,2,2,2,2,2,2,2,2,2,2]
)
c2_a = Competitor.objects.create(
name='Replica 1',
easy=[3,3,3,3,3,3,3,3,3],
hard=[3,3,3,3,3,3,3,3,3],
tematicas_1=[3,3,3,3,3,3,3],
tematicas_2=[3,3,3,3,3,3,3],
random_score=[3,3,3,3,3,3,3,3,3,3,3],
min1=[3,3,3,3,3,3,3,3,3],
min2=[3,3,3,3,3,3,3,3,3],
deluxe=[3,3,3,3,3,3,3,3,3,3,3,3,3,3],
replica=[3,3,3,3,3,3,3,9,9]
)
c2_b = Competitor.objects.create(
name='Replica 2',
easy=[3,3,3,3,3,3,3,3,9],
hard=[3,3,3,3,3,3,3,3,3],
tematicas_1=[3,3,3,3,3,3,3],
tematicas_2=[3,3,3,3,3,3,3],
random_score=[3,3,3,3,3,3,3,3,3,3,3],
min1=[3,3,3,3,3,3,3,3,3],
min2=[3,3,3,3,3,3,3,3,3],
deluxe=[3,3,3,3,3,3,3,3,3,3,3,3,3,3],
replica=[3,3,3,3,3,3,3,3,3]
)
# Create VotingPolls
poll1 = VotingPoll.objects.create(
comp_1=c1_a,
comp_2=c1_b
)
poll2 = VotingPoll.objects.create(
comp_1=c2_a,
comp_2=c2_b
)
class Query:
""" Will query what you pass in, with the variables """
def __init__(self, query: str, variables = {}):
self.response = graphql_query(query, variables=variables)
self.content = loads(self.response.content)
def get_key_by_val(my_dict: dict, val: str or int):
for key, value in my_dict.items():
if val == value:
return key
raise Exception('Key doesn\'t exist')
def index_dict(dictionary, n=0):
if n < 0:
n += len(dictionary)
for i, key in enumerate(dictionary.keys()):
if i == n:
return dictionary[key]
raise IndexError("dictionary index out of range")
modes_to_int = {
'easy': 0,
'hard': 1,
'tematicas_1': 2,
'tematicas_2': 3,
'random_score': 4,
'min1': 5,
'min2': 6,
'deluxe': 7,
'end': 8,
'replica': 9,
}
mode_aliases = {
'easy': 'Easy Mode',
'hard': 'Hard Mode',
'tematicas_1': 'Primera Temática',
'tematicas_2': 'Segunda Temática',
'random_score': 'Random Mode',
'min1': 'Primer Minuto',
'min2': 'Segundo Minuto',
'deluxe': 'Deluxe',
'end': 'end',
'replica': 'Réplica',
}
| 25.9
| 61
| 0.563707
| 1,018
| 4,403
| 2.369352
| 0.104126
| 0.243781
| 0.320896
| 0.368159
| 0.668325
| 0.668325
| 0.668325
| 0.668325
| 0.668325
| 0.668325
| 0
| 0.199115
| 0.178742
| 4,403
| 170
| 62
| 25.9
| 0.46792
| 0.02839
| 0
| 0.623377
| 0
| 0
| 0.074725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032468
| false
| 0
| 0.019481
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
36dd7237873f27d87db89881434bba85374f5569
| 420
|
py
|
Python
|
Desafios Python/m01/ex005.py
|
joaquimjfernandes/Curso-de-Python
|
3356211248d355eaa67098cebe5cdb8cf6badc75
|
[
"MIT"
] | null | null | null |
Desafios Python/m01/ex005.py
|
joaquimjfernandes/Curso-de-Python
|
3356211248d355eaa67098cebe5cdb8cf6badc75
|
[
"MIT"
] | null | null | null |
Desafios Python/m01/ex005.py
|
joaquimjfernandes/Curso-de-Python
|
3356211248d355eaa67098cebe5cdb8cf6badc75
|
[
"MIT"
] | null | null | null |
print('\033[32;1mDESAFIO 05 - Antecessor e Sucessor\033[m')
print('\033[32;1mALUNO:\033[m \033[36;1mJoaquim Fernandes\033[m')
print('-' * 50)
# -----------------------------------------------------------------------
num = int(input('Digite um Número: '))
print(f'Analisando o Valor {num}...\n Seu Antecessor é {num-1} \n Seu Sucessor é {num+1}')
# -----------------------------------------------------------------------
| 52.5
| 90
| 0.440476
| 48
| 420
| 3.854167
| 0.583333
| 0.064865
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0.083333
| 420
| 7
| 91
| 60
| 0.394805
| 0.340476
| 0
| 0
| 0
| 0.4
| 0.748175
| 0.080292
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.8
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
7fd5095ef94fdc73cb43f6adca64db171d75c95d
| 6,596
|
py
|
Python
|
get_index_data.py
|
briggslalor/Numisma
|
f8e67832991d0ddd16d334870c8e5e0657e2ff4e
|
[
"MIT"
] | null | null | null |
get_index_data.py
|
briggslalor/Numisma
|
f8e67832991d0ddd16d334870c8e5e0657e2ff4e
|
[
"MIT"
] | null | null | null |
get_index_data.py
|
briggslalor/Numisma
|
f8e67832991d0ddd16d334870c8e5e0657e2ff4e
|
[
"MIT"
] | null | null | null |
import CryptoDownloadData as coinData
import pandas as pd
from pycoingecko import CoinGeckoAPI
cg = CoinGeckoAPI()
coinbase = {'BTC' : {'CoinGeckID' : 'bitcoin'},
'ETH' : {'CoinGeckID' : 'ethereum'},
'BNB' : {'CoinGeckID' : 'binancecoin'},
'USDC' : {'CoinGeckID' : 'usd-coin'},
'XRP' : {'CoinGeckID' : 'ripple'},
'ADA' : {'CoinGeckID' : 'cardano'},
'SOL' : {'CoinGeckID' : 'solana'},
'LUNA' : {'CoinGeckID' : 'terra-luna'},
'AVAX' : {'CoinGeckID' : 'avalanche-2'}}
metaverse = {'MANA' : {'CoinGeckID' : 'decentraland'},
'SAND' : {'CoinGeckID' : 'the-sandbox'},
'AXS' : {'CoinGeckID' : 'axie-infinity'},
'THETA' : {'CoinGeckID' : 'theta-token'},
'ENJ' : {'CoinGeckID' : 'enjincoin'},
'WEMIX' : {'CoinGeckID' : 'wemix-token'},
'WAXP' : {'CoinGeckID' : 'wax'},
'RNDR' : {'CoinGeckID' : 'render-token'},
'SUSHI' : {'CoinGeckID' : 'sushi'},
'ONT' : {'CoinGeckID' : 'ontology'},
'UOS' : {'CoinGeckID' : 'ultra'},
'PLA' : {'CoinGeckID' : 'playdapp'},
'CEEK' : {'CoinGeckID' : 'ceek'},
'CHR' : {'CoinGeckID' : 'chromaway'}}
yieldfarm = {'CAKE' : {'CoinGeckID' : 'pancakeswap-token'},
'AAVE' : {'CoinGeckID' : 'aave'},
'CRV' : {'CoinGeckID' : 'curve-dao-token'},
'RLY' : {'CoinGeckID' : 'rally-2'},
'SNX' : {'CoinGeckID' : 'havven'},
'SUSHI' : {'CoinGeckID' : 'sushi'},
'RGT' : {'CoinGeckID' : 'rari-governance-token'},
'REEF' : {'CoinGeckID' : 'reef-finance'}}
allindex = {'CAKE' : {'CoinGeckID' : 'pancakeswap-token'},
'AAVE' : {'CoinGeckID' : 'aave'},
'CRV' : {'CoinGeckID' : 'curve-dao-token'},
'RLY' : {'CoinGeckID' : 'rally-2'},
'SNX' : {'CoinGeckID' : 'havven'},
'SUSHI' : {'CoinGeckID' : 'sushi'},
'RGT' : {'CoinGeckID' : 'rari-governance-token'},
'REEF' : {'CoinGeckID' : 'reef-finance'},
'BTC' : {'CoinGeckID' : 'bitcoin'},
'ETH' : {'CoinGeckID' : 'ethereum'},
'BNB' : {'CoinGeckID' : 'binancecoin'},
'USDC' : {'CoinGeckID' : 'usd-coin'},
'XRP' : {'CoinGeckID' : 'ripple'},
'ADA' : {'CoinGeckID' : 'cardano'},
'SOL' : {'CoinGeckID' : 'solana'},
'LUNA' : {'CoinGeckID' : 'terra-luna'},
'AVAX' : {'CoinGeckID' : 'avalanche-2'},
'MANA' : {'CoinGeckID' : 'decentraland'},
'SAND' : {'CoinGeckID' : 'the-sandbox'},
'AXS' : {'CoinGeckID' : 'axie-infinity'},
'THETA' : {'CoinGeckID' : 'theta-token'},
'ENJ' : {'CoinGeckID' : 'enjincoin'},
'WEMIX' : {'CoinGeckID' : 'wemix-token'},
'WAXP' : {'CoinGeckID' : 'wax'},
'RNDR' : {'CoinGeckID' : 'render-token'},
'SUSHI' : {'CoinGeckID' : 'sushi'},
'ONT' : {'CoinGeckID' : 'ontology'},
'UOS' : {'CoinGeckID' : 'ultra'},
'PLA' : {'CoinGeckID' : 'playdapp'},
'CEEK' : {'CoinGeckID' : 'ceek'},
'CHR' : {'CoinGeckID' : 'chromaway'}}
def get_index_prices(selected_index):
if selected_index == 'Coinbase100':
index_coins = ['BTC', 'ETH', 'BNB', 'USDC', 'XRP', 'ADA', 'SOL', 'LUNA', 'AVAX']
elif selected_index == 'TopMetaverseTokens':
index_coins = ['MANA', 'SAND', 'AXS', 'THETA', 'ENJ','WEMIX', 'WAXP', 'RNDR', 'SUSHI', 'ONT', 'UOS', 'PLA', 'CEEK', 'CHR']
elif selected_index == 'YieldFarmingTokens':
index_coins = ['CAKE', 'AAVE', 'CRV', 'RLY', 'SNX', 'SUSHI', 'RGT', 'REEF']
else:
return ('ERROR: Invalid Index Entry.')
index_prices = pd.DataFrame()
for i in index_coins:
prices_df = coinData.get_px_history(i)
prices_list = prices_df['adjClose'].values.tolist()
index_prices.insert(0, i, prices_list[0:365], True)
return index_prices
def get_index_coins(selected_index):
if selected_index == 'Coinbase100':
index_coins = ['BTC', 'ETH', 'BNB', 'USDC', 'XRP', 'ADA', 'SOL', 'LUNA', 'AVAX']
elif selected_index == 'TopMetaverseTokens':
index_coins = ['MANA', 'SAND', 'AXS', 'THETA', 'ENJ','WEMIX', 'WAXP', 'RNDR', 'SUSHI', 'ONT', 'UOS', 'PLA', 'CEEK', 'CHR']
elif selected_index == 'YieldFarmingTokens':
index_coins = ['CAKE', 'AAVE', 'CRV', 'RLY', 'SNX', 'SUSHI', 'RGT', 'REEF']
else:
return ('ERROR: Invalid Index Entry.')
return index_coins
def get_index_df(coins, weights):
index_df = pd.DataFrame({'Coins' : coins, 'Weights' : weights})
return index_df
def get_coin_values(investment, selected_index, index_df):
if selected_index == 'Coinbase100':
index_dict = coinbase
index_coins = ['BTC', 'ETH', 'BNB', 'USDC', 'XRP', 'ADA', 'SOL', 'LUNA', 'AVAX']
elif selected_index == 'TopMetaverseTokens':
index_dict = metaverse
index_coins = ['MANA', 'SAND', 'AXS', 'THETA', 'ENJ','WEMIX', 'WAXP', 'RNDR', 'SUSHI', 'ONT', 'UOS', 'PLA', 'CEEK', 'CHR']
elif selected_index == 'YieldFarmingTokens':
index_dict = yieldfarm
index_coins = ['CAKE', 'AAVE', 'CRV', 'RLY', 'SNX', 'SUSHI', 'RGT', 'REEF']
else:
return ('ERROR: Invalid Index Entry.')
index_df['Investment per Coin (USD)'] = investment * index_df['Weights']
coin_prices = []
for i in index_coins:
coin = i
cg_id = index_dict[coin]['CoinGeckID']
price = cg.get_price(ids = cg_id, vs_currencies = 'usd')
coin_prices.append(price[cg_id]['usd'])
index_df['Coin Price (USD)'] = coin_prices
index_df['Coins Owned'] = index_df['Investment per Coin (USD)'] / index_df['Coin Price (USD)']
return index_df
def get_coin_values_by_weight_df(investment, weight_df):
index_dict = allindex
weight_df['weight'] = pd.to_numeric(weight_df['weight'])
weight_df['investment'] = investment * weight_df['weight']
coin_prices = []
for symbol in weight_df['symbol']:
cg_id = index_dict[symbol]['CoinGeckID']
price = cg.get_price(ids = cg_id, vs_currencies = 'usd')
coin_prices.append(price[cg_id]['usd'])
weight_df['coin_px'] = coin_prices
weight_df['coin_cnt'] = weight_df['investment'] / weight_df['coin_px']
return weight_df
| 40.716049
| 130
| 0.52456
| 632
| 6,596
| 5.327532
| 0.197785
| 0.03861
| 0.030294
| 0.023166
| 0.777547
| 0.743392
| 0.717553
| 0.700327
| 0.700327
| 0.700327
| 0
| 0.003789
| 0.279867
| 6,596
| 162
| 131
| 40.716049
| 0.705053
| 0
| 0
| 0.671875
| 0
| 0
| 0.33106
| 0.006367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039063
| false
| 0
| 0.023438
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7fe5d795444748b3e0524b627821be13b655a753
| 49
|
py
|
Python
|
tests/helpers/__init__.py
|
intdata-bsc/idact
|
54cb65a711c145351e205970c27c83e6393cccf5
|
[
"MIT"
] | 5
|
2018-12-06T15:40:34.000Z
|
2019-06-19T11:22:58.000Z
|
tests/helpers/__init__.py
|
garstka/idact
|
b9c8405c94db362c4a51d6bfdf418b14f06f0da1
|
[
"MIT"
] | 9
|
2018-12-06T16:35:26.000Z
|
2019-04-28T19:01:40.000Z
|
tests/helpers/__init__.py
|
intdata-bsc/idact
|
54cb65a711c145351e205970c27c83e6393cccf5
|
[
"MIT"
] | 2
|
2019-04-28T19:18:58.000Z
|
2019-06-17T06:56:28.000Z
|
"""Contains helper functionality for testing."""
| 24.5
| 48
| 0.755102
| 5
| 49
| 7.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 49
| 1
| 49
| 49
| 0.840909
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7ff49d032a7d28a5ebf0a3c6550d132ccc214895
| 85
|
py
|
Python
|
src/features/transforms/Identity.py
|
maranedah/music_inpainting_benchmark
|
567f4ccfe135a7a6c0578a0672145414b61fd227
|
[
"MIT"
] | null | null | null |
src/features/transforms/Identity.py
|
maranedah/music_inpainting_benchmark
|
567f4ccfe135a7a6c0578a0672145414b61fd227
|
[
"MIT"
] | null | null | null |
src/features/transforms/Identity.py
|
maranedah/music_inpainting_benchmark
|
567f4ccfe135a7a6c0578a0672145414b61fd227
|
[
"MIT"
] | null | null | null |
import torch
class Identity:
def __call__(self, data):
return data
| 14.166667
| 30
| 0.623529
| 10
| 85
| 4.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.317647
| 85
| 6
| 31
| 14.166667
| 0.844828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
3d268b201223090d5a5c636beb59617cb7884e9e
| 26,125
|
py
|
Python
|
meltingpot/python/configs/bots/__init__.py
|
Rohan138/meltingpot
|
d4e3839225b78babcedbbbf95cf747ff9e0a87b5
|
[
"Apache-2.0"
] | null | null | null |
meltingpot/python/configs/bots/__init__.py
|
Rohan138/meltingpot
|
d4e3839225b78babcedbbbf95cf747ff9e0a87b5
|
[
"Apache-2.0"
] | null | null | null |
meltingpot/python/configs/bots/__init__.py
|
Rohan138/meltingpot
|
d4e3839225b78babcedbbbf95cf747ff9e0a87b5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of stored bots for MeltingPot scenarios."""
import dataclasses
import os
import re
from typing import Mapping, Optional
import immutabledict
from meltingpot.python.utils.bots import puppeteer_functions
MODELS_ROOT = re.sub('meltingpot/python/.*', 'meltingpot/assets/saved_models',
__file__)
@dataclasses.dataclass(frozen=True)
class BotConfig:
"""Bot config.
Attributes:
substrate: the substrate the bot was trained for.
model_path: the path to the bot's saved model.
puppeteer_fn: an optional puppeteer function used to control the bot.
"""
substrate: str
model_path: str
puppeteer_fn: Optional[puppeteer_functions.PuppeteerFn] = None
def _saved_model(substrate: str,
model: str,
models_root: str = MODELS_ROOT) -> BotConfig:
"""Returns the config for a saved model bot.
Args:
substrate: the substrate on which the bot was trained.
model: the name of the model.
models_root: The path to the directory containing the saved_models.
"""
model_path = os.path.join(models_root, substrate, model)
return BotConfig(substrate=substrate, model_path=model_path)
def _puppet(substrate: str,
puppeteer_fn: puppeteer_functions.PuppeteerFn,
models_root: str = MODELS_ROOT) -> BotConfig:
"""Returns the config for a puppet bot.
Args:
substrate: the substrate on which the bot was trained.
puppeteer_fn: the puppeteer function that controls the puppet.
models_root: The path to the directory containing the saved_models.
"""
puppet_path = os.path.join(models_root, substrate, 'puppet')
return BotConfig(
substrate=substrate, model_path=puppet_path, puppeteer_fn=puppeteer_fn)
BOT_CONFIGS: Mapping[str, BotConfig] = immutabledict.immutabledict(
# keep-sorted start numeric=yes block=yes
ah3gs_bot_finding_berry_two_the_most_tasty_0=_saved_model(
substrate='allelopathic_harvest',
model='ah3gs_bot_finding_berry_two_the_most_tasty_0',
),
ah3gs_bot_finding_berry_two_the_most_tasty_1=_saved_model(
substrate='allelopathic_harvest',
model='ah3gs_bot_finding_berry_two_the_most_tasty_1',
),
ah3gs_bot_finding_berry_two_the_most_tasty_4=_saved_model(
substrate='allelopathic_harvest',
model='ah3gs_bot_finding_berry_two_the_most_tasty_4',
),
ah3gs_bot_finding_berry_two_the_most_tasty_5=_saved_model(
substrate='allelopathic_harvest',
model='ah3gs_bot_finding_berry_two_the_most_tasty_5',
),
arena_rws_free_0=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_free_0',
),
arena_rws_free_1=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_free_1',
),
arena_rws_free_2=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_free_2',
),
arena_rws_pure_paper_0=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_paper_0',
),
arena_rws_pure_paper_1=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_paper_1',
),
arena_rws_pure_paper_2=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_paper_2',
),
arena_rws_pure_paper_3=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_paper_3',
),
arena_rws_pure_rock_0=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_rock_0',
),
arena_rws_pure_rock_1=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_rock_1',
),
arena_rws_pure_rock_2=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_rock_2',
),
arena_rws_pure_rock_3=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_rock_3',
),
arena_rws_pure_scissors_0=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_scissors_0',
),
arena_rws_pure_scissors_1=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_scissors_1',
),
arena_rws_pure_scissors_2=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_scissors_2',
),
arena_rws_pure_scissors_3=_saved_model(
substrate='arena_running_with_scissors_in_the_matrix',
model='arena_rws_pure_scissors_3',
),
bach_fan_0=_saved_model(
substrate='bach_or_stravinsky_in_the_matrix',
model='bach_fan_0',
),
bach_fan_1=_saved_model(
substrate='bach_or_stravinsky_in_the_matrix',
model='bach_fan_1',
),
bach_fan_2=_saved_model(
substrate='bach_or_stravinsky_in_the_matrix',
model='bach_fan_2',
),
chemistry_branched_chain_reaction_X_specialist_0=_saved_model(
substrate='chemistry_branched_chain_reaction',
model='chemistry_branched_chain_reaction_X_specialist_0',
),
chemistry_branched_chain_reaction_X_specialist_1=_saved_model(
substrate='chemistry_branched_chain_reaction',
model='chemistry_branched_chain_reaction_X_specialist_1',
),
chemistry_branched_chain_reaction_X_specialist_2=_saved_model(
substrate='chemistry_branched_chain_reaction',
model='chemistry_branched_chain_reaction_X_specialist_2',
),
chemistry_branched_chain_reaction_Y_specialist_0=_saved_model(
substrate='chemistry_branched_chain_reaction',
model='chemistry_branched_chain_reaction_Y_specialist_0',
),
chemistry_branched_chain_reaction_Y_specialist_1=_saved_model(
substrate='chemistry_branched_chain_reaction',
model='chemistry_branched_chain_reaction_Y_specialist_1',
),
chemistry_branched_chain_reaction_Y_specialist_2=_saved_model(
substrate='chemistry_branched_chain_reaction',
model='chemistry_branched_chain_reaction_Y_specialist_2',
),
chemistry_metabolic_cycles_food1_specialist_0=_saved_model(
substrate='chemistry_metabolic_cycles',
model='chemistry_metabolic_cycles_food1_specialist_0',
),
chemistry_metabolic_cycles_food1_specialist_1=_saved_model(
substrate='chemistry_metabolic_cycles',
model='chemistry_metabolic_cycles_food1_specialist_1',
),
chemistry_metabolic_cycles_food2_specialist_0=_saved_model(
substrate='chemistry_metabolic_cycles',
model='chemistry_metabolic_cycles_food2_specialist_0',
),
chemistry_metabolic_cycles_food2_specialist_1=_saved_model(
substrate='chemistry_metabolic_cycles',
model='chemistry_metabolic_cycles_food2_specialist_1',
),
chicken_free_0=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_free_0',
),
chicken_free_1=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_free_1',
),
chicken_free_2=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_free_2',
),
chicken_free_3=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_free_3',
),
chicken_puppet_grim=_puppet(
substrate='chicken_in_the_matrix',
puppeteer_fn=puppeteer_functions.GrimTwoResourceInTheMatrix(2),
),
chicken_pure_dove_0=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_pure_dove_0',
),
chicken_pure_dove_1=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_pure_dove_1',
),
chicken_pure_dove_2=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_pure_dove_2',
),
chicken_pure_dove_3=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_pure_dove_3',
),
chicken_pure_hawk_0=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_pure_hawk_0',
),
chicken_pure_hawk_1=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_pure_hawk_1',
),
chicken_pure_hawk_2=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_pure_hawk_2',
),
chicken_pure_hawk_3=_saved_model(
substrate='chicken_in_the_matrix',
model='chicken_pure_hawk_3',
),
classic_rws_free_0=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_free_0',
),
classic_rws_free_1=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_free_1',
),
classic_rws_free_2=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_free_2',
),
classic_rws_pure_paper_0=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_paper_0',
),
classic_rws_pure_paper_1=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_paper_1',
),
classic_rws_pure_paper_2=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_paper_2',
),
classic_rws_pure_paper_3=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_paper_3',
),
classic_rws_pure_rock_0=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_rock_0',
),
classic_rws_pure_rock_1=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_rock_1',
),
classic_rws_pure_rock_2=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_rock_2',
),
classic_rws_pure_rock_3=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_rock_3',
),
classic_rws_pure_scissors_0=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_scissors_0',
),
classic_rws_pure_scissors_1=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_scissors_1',
),
classic_rws_pure_scissors_2=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_scissors_2',
),
classic_rws_pure_scissors_3=_saved_model(
substrate='running_with_scissors_in_the_matrix',
model='classic_rws_pure_scissors_3',
),
cleanup_cleaner_1=_saved_model(
substrate='clean_up',
model='cleanup_cleaner_1',
),
cleanup_cleaner_2=_saved_model(
substrate='clean_up',
model='cleanup_cleaner_2',
),
cleanup_consumer_0=_saved_model(
substrate='clean_up',
model='cleanup_consumer_0',
),
cleanup_consumer_1=_saved_model(
substrate='clean_up',
model='cleanup_consumer_1',
),
cleanup_consumer_2=_saved_model(
substrate='clean_up',
model='cleanup_consumer_2',
),
cleanup_puppet_alternate_clean_first=_puppet(
substrate='clean_up',
puppeteer_fn=puppeteer_functions.cleanup_alternate_clean_first,
),
cleanup_puppet_alternate_eat_first=_puppet(
substrate='clean_up',
puppeteer_fn=puppeteer_functions.cleanup_alternate_eat_first,
),
cleanup_puppet_reciprocator_threshold_low=_puppet(
substrate='clean_up',
puppeteer_fn=puppeteer_functions.ConditionalCleaner(1),
),
cleanup_puppet_reciprocator_threshold_mid=_puppet(
substrate='clean_up',
puppeteer_fn=puppeteer_functions.ConditionalCleaner(2),
),
closed_commons_zapper_0=_saved_model(
substrate='commons_harvest_closed',
model='closed_commons_zapper_0',
),
closed_commons_zapper_1=_saved_model(
substrate='commons_harvest_closed',
model='closed_commons_zapper_1',
),
closed_commons_zapper_2=_saved_model(
substrate='commons_harvest_closed',
model='closed_commons_zapper_2',
),
closed_commons_zapper_3=_saved_model(
substrate='commons_harvest_closed',
model='closed_commons_zapper_3',
),
collaborative_cooking_impassable_vmpo_pop_size_ten_0=_saved_model(
substrate='collaborative_cooking_impassable',
model='collaborative_cooking_impassable_vmpo_pop_size_ten_0',
),
collaborative_cooking_impassable_vmpo_pop_size_ten_2=_saved_model(
substrate='collaborative_cooking_impassable',
model='collaborative_cooking_impassable_vmpo_pop_size_ten_2',
),
collaborative_cooking_impassable_vmpo_pop_size_ten_3=_saved_model(
substrate='collaborative_cooking_impassable',
model='collaborative_cooking_impassable_vmpo_pop_size_ten_3',
),
collaborative_cooking_impassable_vmpo_pop_size_ten_4=_saved_model(
substrate='collaborative_cooking_impassable',
model='collaborative_cooking_impassable_vmpo_pop_size_ten_4',
),
collaborative_cooking_impassable_vmpo_pop_size_ten_6=_saved_model(
substrate='collaborative_cooking_impassable',
model='collaborative_cooking_impassable_vmpo_pop_size_ten_6',
),
collaborative_cooking_impassable_vmpo_pop_size_ten_7=_saved_model(
substrate='collaborative_cooking_impassable',
model='collaborative_cooking_impassable_vmpo_pop_size_ten_7',
),
collaborative_cooking_impassable_vmpo_pop_size_ten_9=_saved_model(
substrate='collaborative_cooking_impassable',
model='collaborative_cooking_impassable_vmpo_pop_size_ten_9',
),
collaborative_cooking_passable_vmpo_pop_size_ten_5=_saved_model(
substrate='collaborative_cooking_passable',
model='collaborative_cooking_passable_vmpo_pop_size_ten_5',
),
ctf_pseudorewards_for_main_game_events_a3c_2=_saved_model(
substrate='capture_the_flag',
model='ctf_pseudorewards_for_main_game_events_a3c_2',
),
ctf_pseudorewards_for_main_game_events_a3c_6=_saved_model(
substrate='capture_the_flag',
model='ctf_pseudorewards_for_main_game_events_a3c_6',
),
ctf_pseudorewards_for_main_game_events_vmpo_0=_saved_model(
substrate='capture_the_flag',
model='ctf_pseudorewards_for_main_game_events_vmpo_0',
),
ctf_pseudorewards_for_main_game_events_vmpo_3=_saved_model(
substrate='capture_the_flag',
model='ctf_pseudorewards_for_main_game_events_vmpo_3',
),
ctf_pseudorewards_for_main_game_events_vmpo_4=_saved_model(
substrate='capture_the_flag',
model='ctf_pseudorewards_for_main_game_events_vmpo_4',
),
ctf_pseudorewards_for_main_game_events_vmpo_6=_saved_model(
substrate='capture_the_flag',
model='ctf_pseudorewards_for_main_game_events_vmpo_6',
),
ctf_pseudorewards_for_main_game_events_vmpo_7=_saved_model(
substrate='capture_the_flag',
model='ctf_pseudorewards_for_main_game_events_vmpo_7',
),
koth_default_vmpo_0=_saved_model(
substrate='king_of_the_hill',
model='koth_default_vmpo_0',
),
koth_default_vmpo_1=_saved_model(
substrate='king_of_the_hill',
model='koth_default_vmpo_1',
),
koth_default_vmpo_2=_saved_model(
substrate='king_of_the_hill',
model='koth_default_vmpo_2',
),
koth_default_vmpo_3=_saved_model(
substrate='king_of_the_hill',
model='koth_default_vmpo_3',
),
koth_default_vmpo_4=_saved_model(
substrate='king_of_the_hill',
model='koth_default_vmpo_4',
),
koth_default_vmpo_5=_saved_model(
substrate='king_of_the_hill',
model='koth_default_vmpo_5',
),
koth_default_vmpo_6=_saved_model(
substrate='king_of_the_hill',
model='koth_default_vmpo_6',
),
koth_default_vmpo_7=_saved_model(
substrate='king_of_the_hill',
model='koth_default_vmpo_7',
),
koth_zap_while_in_control_a3c_0=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_a3c_0',
),
koth_zap_while_in_control_a3c_1=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_a3c_1',
),
koth_zap_while_in_control_a3c_2=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_a3c_2',
),
koth_zap_while_in_control_a3c_3=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_a3c_3',
),
koth_zap_while_in_control_a3c_4=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_a3c_4',
),
koth_zap_while_in_control_a3c_5=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_a3c_5',
),
koth_zap_while_in_control_a3c_6=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_a3c_6',
),
koth_zap_while_in_control_a3c_7=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_a3c_7',
),
koth_zap_while_in_control_vmpo_0=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_vmpo_0',
),
koth_zap_while_in_control_vmpo_1=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_vmpo_1',
),
koth_zap_while_in_control_vmpo_2=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_vmpo_2',
),
koth_zap_while_in_control_vmpo_3=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_vmpo_3',
),
koth_zap_while_in_control_vmpo_4=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_vmpo_4',
),
koth_zap_while_in_control_vmpo_5=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_vmpo_5',
),
koth_zap_while_in_control_vmpo_6=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_vmpo_6',
),
koth_zap_while_in_control_vmpo_7=_saved_model(
substrate='king_of_the_hill',
model='koth_zap_while_in_control_vmpo_7',
),
open_commons_zapper_0=_saved_model(
substrate='commons_harvest_open',
model='open_commons_zapper_0',
),
open_commons_zapper_1=_saved_model(
substrate='commons_harvest_open',
model='open_commons_zapper_1',
),
partnership_commons_putative_good_partner_4=_saved_model(
substrate='commons_harvest_partnership',
model='partnership_commons_putative_good_partner_4',
),
partnership_commons_putative_good_partner_5=_saved_model(
substrate='commons_harvest_partnership',
model='partnership_commons_putative_good_partner_5',
),
partnership_commons_putative_good_partner_7=_saved_model(
substrate='commons_harvest_partnership',
model='partnership_commons_putative_good_partner_7',
),
partnership_commons_zapper_1=_saved_model(
substrate='commons_harvest_partnership',
model='partnership_commons_zapper_1',
),
partnership_commons_zapper_2=_saved_model(
substrate='commons_harvest_partnership',
model='partnership_commons_zapper_2',
),
prisoners_dilemma_cooperator_2=_saved_model(
substrate='prisoners_dilemma_in_the_matrix',
model='prisoners_dilemma_cooperator_2',
),
prisoners_dilemma_cooperator_4=_saved_model(
substrate='prisoners_dilemma_in_the_matrix',
model='prisoners_dilemma_cooperator_4',
),
prisoners_dilemma_defector_0=_saved_model(
substrate='prisoners_dilemma_in_the_matrix',
model='prisoners_dilemma_defector_0',
),
prisoners_dilemma_defector_2=_saved_model(
substrate='prisoners_dilemma_in_the_matrix',
model='prisoners_dilemma_defector_2',
),
prisoners_dilemma_free_0=_saved_model(
substrate='prisoners_dilemma_in_the_matrix',
model='prisoners_dilemma_free_0',
),
prisoners_dilemma_free_1=_saved_model(
substrate='prisoners_dilemma_in_the_matrix',
model='prisoners_dilemma_free_1',
),
prisoners_dilemma_free_2=_saved_model(
substrate='prisoners_dilemma_in_the_matrix',
model='prisoners_dilemma_free_2',
),
prisoners_dilemma_puppet_grim_threshold_high=_puppet(
substrate='prisoners_dilemma_in_the_matrix',
puppeteer_fn=puppeteer_functions.GrimTwoResourceInTheMatrix(2),
),
prisoners_dilemma_puppet_grim_threshold_low=_puppet(
substrate='prisoners_dilemma_in_the_matrix',
puppeteer_fn=puppeteer_functions.GrimTwoResourceInTheMatrix(1),
),
pure_coordination_type_1_specialist_0=_saved_model(
substrate='pure_coordination_in_the_matrix',
model='pure_coordination_type_1_specialist_0',
),
pure_coordination_type_1_specialist_1=_saved_model(
substrate='pure_coordination_in_the_matrix',
model='pure_coordination_type_1_specialist_1',
),
pure_coordination_type_2_specialist_0=_saved_model(
substrate='pure_coordination_in_the_matrix',
model='pure_coordination_type_2_specialist_0',
),
pure_coordination_type_2_specialist_1=_saved_model(
substrate='pure_coordination_in_the_matrix',
model='pure_coordination_type_2_specialist_1',
),
pure_coordination_type_3_specialist_0=_saved_model(
substrate='pure_coordination_in_the_matrix',
model='pure_coordination_type_3_specialist_0',
),
pure_coordination_type_3_specialist_1=_saved_model(
substrate='pure_coordination_in_the_matrix',
model='pure_coordination_type_3_specialist_1',
),
rationalizable_coordination_type_1_specialist_0=_saved_model(
substrate='rationalizable_coordination_in_the_matrix',
model='rationalizable_coordination_type_1_specialist_0',
),
rationalizable_coordination_type_1_specialist_1=_saved_model(
substrate='rationalizable_coordination_in_the_matrix',
model='rationalizable_coordination_type_1_specialist_1',
),
rationalizable_coordination_type_2_specialist_0=_saved_model(
substrate='rationalizable_coordination_in_the_matrix',
model='rationalizable_coordination_type_2_specialist_0',
),
rationalizable_coordination_type_2_specialist_1=_saved_model(
substrate='rationalizable_coordination_in_the_matrix',
model='rationalizable_coordination_type_2_specialist_1',
),
rationalizable_coordination_type_3_specialist_0=_saved_model(
substrate='rationalizable_coordination_in_the_matrix',
model='rationalizable_coordination_type_3_specialist_0',
),
rationalizable_coordination_type_3_specialist_1=_saved_model(
substrate='rationalizable_coordination_in_the_matrix',
model='rationalizable_coordination_type_3_specialist_1',
),
stag_hunt_hare_specialist_0=_saved_model(
substrate='stag_hunt_in_the_matrix',
model='stag_hunt_hare_specialist_0',
),
stag_hunt_hare_specialist_1=_saved_model(
substrate='stag_hunt_in_the_matrix',
model='stag_hunt_hare_specialist_1',
),
stag_hunt_hare_specialist_2=_saved_model(
substrate='stag_hunt_in_the_matrix',
model='stag_hunt_hare_specialist_2',
),
stag_hunt_puppet_grim=_puppet(
substrate='stag_hunt_in_the_matrix',
puppeteer_fn=puppeteer_functions.GrimTwoResourceInTheMatrix(1),
),
stag_hunt_stag_specialist_3=_saved_model(
substrate='stag_hunt_in_the_matrix',
model='stag_hunt_stag_specialist_3',
),
stag_hunt_stag_specialist_5=_saved_model(
substrate='stag_hunt_in_the_matrix',
model='stag_hunt_stag_specialist_5',
),
stravinsky_fan_0=_saved_model(
substrate='bach_or_stravinsky_in_the_matrix',
model='stravinsky_fan_0',
),
stravinsky_fan_1=_saved_model(
substrate='bach_or_stravinsky_in_the_matrix',
model='stravinsky_fan_1',
),
stravinsky_fan_2=_saved_model(
substrate='bach_or_stravinsky_in_the_matrix',
model='stravinsky_fan_2',
),
territory_closed_reply_to_zapper_0=_saved_model(
substrate='territory_rooms',
model='territory_closed_reply_to_zapper_0',
),
territory_closed_reply_to_zapper_1=_saved_model(
substrate='territory_rooms',
model='territory_closed_reply_to_zapper_1',
),
territory_open_painter_0=_saved_model(
substrate='territory_open',
model='territory_open_painter_0',
),
territory_open_painter_1=_saved_model(
substrate='territory_open',
model='territory_open_painter_1',
),
territory_open_painter_2=_saved_model(
substrate='territory_open',
model='territory_open_painter_2',
),
territory_open_painter_3=_saved_model(
substrate='territory_open',
model='territory_open_painter_3',
),
# keep-sorted end
)
| 37.53592
| 78
| 0.737455
| 3,276
| 26,125
| 5.212759
| 0.070208
| 0.087837
| 0.164666
| 0.067459
| 0.867892
| 0.847924
| 0.80828
| 0.753352
| 0.68238
| 0.644317
| 0
| 0.017264
| 0.184077
| 26,125
| 695
| 79
| 37.589928
| 0.783871
| 0.050373
| 0
| 0.487616
| 0
| 0
| 0.341296
| 0.283578
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003096
| false
| 0.037152
| 0.009288
| 0
| 0.021672
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3d4ec61a695a8dd3259127399fe17dc85d227a82
| 163
|
py
|
Python
|
mip_tool/__init__.py
|
SaitoTsutomu/mip-tool
|
50e02a1ba928690c5d1c79a4ed0b17b81576e4fa
|
[
"MIT"
] | null | null | null |
mip_tool/__init__.py
|
SaitoTsutomu/mip-tool
|
50e02a1ba928690c5d1c79a4ed0b17b81576e4fa
|
[
"MIT"
] | null | null | null |
mip_tool/__init__.py
|
SaitoTsutomu/mip-tool
|
50e02a1ba928690c5d1c79a4ed0b17b81576e4fa
|
[
"MIT"
] | null | null | null |
from .util import monotone_decreasing, monotone_increasing # noqa isort:skip
from .util import add_line, add_lines, add_lines_conv, show_model # noqa isort:skip
| 54.333333
| 84
| 0.815951
| 25
| 163
| 5.04
| 0.6
| 0.126984
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122699
| 163
| 2
| 85
| 81.5
| 0.881119
| 0.190184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e9eaacba12a645ede05c09672b9bf0e686e83f5a
| 239
|
py
|
Python
|
model/encryption.py
|
vjohndo/casino
|
749315eda707f15616a1a9bee6098fa28774cc68
|
[
"MIT"
] | null | null | null |
model/encryption.py
|
vjohndo/casino
|
749315eda707f15616a1a9bee6098fa28774cc68
|
[
"MIT"
] | null | null | null |
model/encryption.py
|
vjohndo/casino
|
749315eda707f15616a1a9bee6098fa28774cc68
|
[
"MIT"
] | null | null | null |
import bcrypt
def is_password_correct(login_password, password_hash):
return bcrypt.checkpw(login_password.encode(), password_hash.encode())
def hashpw(password):
return bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
| 34.142857
| 74
| 0.782427
| 30
| 239
| 6.033333
| 0.466667
| 0.143646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09205
| 239
| 7
| 75
| 34.142857
| 0.834101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.8
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
181e9df70c6863a312c9b7dc2000bf4536aeab91
| 254
|
py
|
Python
|
inserindo cores no terminal .py
|
Danielporcela/Meus-exercicios-phyton
|
11e44fba4c4f4c1a9e6926daa4b33fc1a3792399
|
[
"MIT"
] | 2
|
2021-04-03T00:20:05.000Z
|
2021-04-05T09:55:44.000Z
|
inserindo cores no terminal .py
|
Danielporcela/Meus-exercicios-phyton
|
11e44fba4c4f4c1a9e6926daa4b33fc1a3792399
|
[
"MIT"
] | null | null | null |
inserindo cores no terminal .py
|
Danielporcela/Meus-exercicios-phyton
|
11e44fba4c4f4c1a9e6926daa4b33fc1a3792399
|
[
"MIT"
] | null | null | null |
#Nessa aula, vamos aprender como utilizar os códigos de escape sequence ANSI para configurar cores para os seus programas em Python. Veja como utilizar o código \033[m com todas as suas principais possibilidades.
print(" \033[0;32m meu texto \033[m")
| 50.8
| 212
| 0.779528
| 42
| 254
| 4.714286
| 0.833333
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056338
| 0.161417
| 254
| 4
| 213
| 63.5
| 0.873239
| 0.830709
| 0
| 0
| 0
| 0
| 0.707317
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1852fd0627ffbfb770b63dec98d7d74d07d5a8c0
| 3,572
|
py
|
Python
|
python_grpc_demo/demo_pb2_grpc.py
|
noahzaozao/python_frameworks
|
2f1ed0be3be322d4b8251d893b886312db41859c
|
[
"Apache-2.0"
] | null | null | null |
python_grpc_demo/demo_pb2_grpc.py
|
noahzaozao/python_frameworks
|
2f1ed0be3be322d4b8251d893b886312db41859c
|
[
"Apache-2.0"
] | null | null | null |
python_grpc_demo/demo_pb2_grpc.py
|
noahzaozao/python_frameworks
|
2f1ed0be3be322d4b8251d893b886312db41859c
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import demo_pb2 as demo__pb2
class DemoServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ApiCreate = channel.unary_unary(
'/demo.DemoService/ApiCreate',
request_serializer=demo__pb2.RequestData.SerializeToString,
response_deserializer=demo__pb2.ResponseData.FromString,
)
self.ApiUpdate = channel.unary_unary(
'/demo.DemoService/ApiUpdate',
request_serializer=demo__pb2.RequestData.SerializeToString,
response_deserializer=demo__pb2.ResponseData.FromString,
)
self.ApiList = channel.unary_unary(
'/demo.DemoService/ApiList',
request_serializer=demo__pb2.RequestData.SerializeToString,
response_deserializer=demo__pb2.ResponseData.FromString,
)
self.ApiDelete = channel.unary_unary(
'/demo.DemoService/ApiDelete',
request_serializer=demo__pb2.RequestData.SerializeToString,
response_deserializer=demo__pb2.ResponseData.FromString,
)
class DemoServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def ApiCreate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ApiUpdate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ApiList(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ApiDelete(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DemoServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ApiCreate': grpc.unary_unary_rpc_method_handler(
servicer.ApiCreate,
request_deserializer=demo__pb2.RequestData.FromString,
response_serializer=demo__pb2.ResponseData.SerializeToString,
),
'ApiUpdate': grpc.unary_unary_rpc_method_handler(
servicer.ApiUpdate,
request_deserializer=demo__pb2.RequestData.FromString,
response_serializer=demo__pb2.ResponseData.SerializeToString,
),
'ApiList': grpc.unary_unary_rpc_method_handler(
servicer.ApiList,
request_deserializer=demo__pb2.RequestData.FromString,
response_serializer=demo__pb2.ResponseData.SerializeToString,
),
'ApiDelete': grpc.unary_unary_rpc_method_handler(
servicer.ApiDelete,
request_deserializer=demo__pb2.RequestData.FromString,
response_serializer=demo__pb2.ResponseData.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'demo.DemoService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 36.44898
| 71
| 0.737402
| 364
| 3,572
| 6.967033
| 0.181319
| 0.049685
| 0.053628
| 0.087539
| 0.789432
| 0.738959
| 0.738959
| 0.679022
| 0.679022
| 0.679022
| 0
| 0.006167
| 0.182811
| 3,572
| 97
| 72
| 36.824742
| 0.862624
| 0.12626
| 0
| 0.520548
| 1
| 0
| 0.109819
| 0.034238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0.082192
| 0.027397
| 0
| 0.136986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
43ee56ef0a193a3e360c154e968980499d9cf210
| 22
|
py
|
Python
|
hello.py
|
projeffboy/github-playground
|
17f8b0d5767a731e77ec69dc38f580aafc5d997f
|
[
"MIT"
] | null | null | null |
hello.py
|
projeffboy/github-playground
|
17f8b0d5767a731e77ec69dc38f580aafc5d997f
|
[
"MIT"
] | null | null | null |
hello.py
|
projeffboy/github-playground
|
17f8b0d5767a731e77ec69dc38f580aafc5d997f
|
[
"MIT"
] | null | null | null |
print(hello good sir)
| 11
| 21
| 0.772727
| 4
| 22
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
43ef36c9653c216668ef56b2205276a97548f7b7
| 5,763
|
py
|
Python
|
data_creator/prepare_two_phase2.py
|
neduchal/io_classification_experiment
|
f22a0eac84c23dbbcb9eed35c00c37a1e1a1342b
|
[
"MIT"
] | null | null | null |
data_creator/prepare_two_phase2.py
|
neduchal/io_classification_experiment
|
f22a0eac84c23dbbcb9eed35c00c37a1e1a1342b
|
[
"MIT"
] | null | null | null |
data_creator/prepare_two_phase2.py
|
neduchal/io_classification_experiment
|
f22a0eac84c23dbbcb9eed35c00c37a1e1a1342b
|
[
"MIT"
] | null | null | null |
import tarfile
import os
# import os.path
import numpy as np
import cv2
import h5py
import random
import description as d
from sklearn import preprocessing
import common_two_phase2 as common
# Parameters
hist_settings = [32,64,128,256]
dataset_name_base = "two_phase_basic2"
directory = "./data"
categories_filename = "./categories_io.txt"
output_path = "./prepared"
val_path = "./miniplaces/data/val.txt"
types = ["rgb", "hsv"]
print("loading classes")
desc_file = open(categories_filename, "r").read().split("\n")
if desc_file[-1] == "":
desc_file = desc_file[:-1]
classes = []
classes_nums = []
classes_io = []
for row in desc_file:
items = row.split(" ")
classes.append(items[0])
classes_nums.append(items[1])
classes_io.append(items[2])
train_directory = os.path.join(directory, "train")
test_directory = os.path.join(directory, "test")
val_directory = os.path.join(directory, "val")
fname_begin_train = len(train_directory)
fname_begin_test = len(test_directory)
fname_begin_val = len(val_directory)
val_names = common.get_all_files(val_directory)
classes_val = open(val_path).read().split("\n")
#train_names = common.get_all_files(train_directory)
#random.shuffle(train_names)
train_names = open("filelist.txt", "r").read().split("\n")
if len(train_names[-1]) == 0:
train_names = train_names[:-1]
test_names = train_names[0:int(0.1 * len(train_names))]
train_names = train_names[int(0.1 * len(train_names)):]
"""
for hs in hist_settings:
for t in types:
dataset_name = t + "_color_" + dataset_name_base + "_" + str(hs)
print("Creating dataset " + dataset_name)
output_directory = os.path.join(output_path, dataset_name)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
#print("VAL")
#val_data_x, val_data_y, val_data_y_io = common.process_hist(val_names, classes_val, classes_io, classes_nums, fname_begin_val, True, t, hs)
#for i in range(16):
# common.save_to_h5(os.path.join(output_directory, 'val_'+str(i)+'.h5'), val_data_x[i], val_data_y, val_data_y_io)
print("TRAIN")
train_data_x, train_data_y, train_data_y_io = common.process_hist(train_names, classes, classes_io, classes_nums, fname_begin_train, False, t, hs)
for i in range(16):
common.save_to_h5(os.path.join(output_directory, 'train_'+str(i)+'.h5'), train_data_x[i], train_data_y, train_data_y_io)
print("TEST")
test_data_x, test_data_y, test_data_y_io = common.process_hist(test_names, classes, classes_io, classes_nums, fname_begin_train, False, t, hs)
for i in range(16):
common.save_to_h5(os.path.join(output_directory, 'test_'+str(i)+'.h5'), test_data_x[i], test_data_y, test_data_y_io)
print("DONE")
#del val_data_x, val_data_y, val_data_y_io
del train_data_x, train_data_y, train_data_y_io
del test_data_x, test_data_y, test_data_y_io
for hs in hist_settings:
dataset_name = "texture_" + dataset_name_base + "_" + str(hs)
print("Creating dataset " + dataset_name)
output_directory = os.path.join(output_path, dataset_name)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("VAL")
#val_data_x, val_data_y, val_data_y_io = common.process_centrist(val_names, classes_val, classes_io, classes_nums, fname_begin_val, True, hs)
#for i in range(16):
# common.save_to_h5(os.path.join(output_directory, 'val_'+str(i)+'.h5'), val_data_x[i], val_data_y, val_data_y_io)
print("TRAIN")
train_data_x, train_data_y, train_data_y_io = common.process_centrist(train_names, classes, classes_io, classes_nums, fname_begin_train, False, hs)
for i in range(16):
common.save_to_h5(os.path.join(output_directory, 'train_'+str(i)+'.h5'), train_data_x[i], train_data_y, train_data_y_io)
print("TEST")
test_data_x, test_data_y, test_data_y_io = common.process_centrist(test_names, classes, classes_io, classes_nums, fname_begin_train, False, hs)
for i in range(16):
common.save_to_h5(os.path.join(output_directory, 'test_'+str(i)+'.h5'), test_data_x[i], test_data_y, test_data_y_io)
print("DONE")
#del val_data_x, val_data_y, val_data_y_io
del train_data_x, train_data_y, train_data_y_io
del test_data_x, test_data_y, test_data_y_io
"""
for hs in hist_settings:
for t in types:
dataset_name = t + "_ct_" + dataset_name_base + "_" + str(hs)
print("Creating dataset " + dataset_name)
output_directory = os.path.join(output_path, dataset_name)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
#print("VAL")
#val_data_x, val_data_y, val_data_y_io = common.process_color_centrist(val_names, classes_val, classes_io, classes_nums, fname_begin_val, True, t, hs)
#common.save_to_h5(os.path.join(output_directory, 'val.h5'), val_data_x, val_data_y, val_data_y_io)
print("TRAIN")
train_data_x, train_data_y, train_data_y_io = common.process_color_centrist(train_names, classes, classes_io, classes_nums, fname_begin_train, False, t, hs)
common.save_to_h5(os.path.join(output_directory, 'train.h5'), train_data_x, train_data_y_io)
print("TEST")
test_data_x, test_data_y, test_data_y_io = common.process_color_centrist(test_names, classes, classes_io, classes_nums, fname_begin_train, False, t, hs)
common.save_to_h5(os.path.join(output_directory, 'test.h5'), test_data_x, test_data_y_io)
print("DONE")
#del val_data_x, val_data_y, val_data_y_io
#del train_data_x, train_data_y, train_data_y_io
#del test_data_x, test_data_y, test_data_y_io
| 47.237705
| 164
| 0.711435
| 934
| 5,763
| 3.993576
| 0.107066
| 0.069705
| 0.05067
| 0.051475
| 0.772386
| 0.731099
| 0.713137
| 0.709115
| 0.709115
| 0.709115
| 0
| 0.011873
| 0.166927
| 5,763
| 122
| 165
| 47.237705
| 0.765049
| 0.085719
| 0
| 0
| 0
| 0
| 0.077754
| 0.010799
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.163636
| 0
| 0.163636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a1191da1daa081572a217a78bd586993c38ae6f1
| 9,226
|
py
|
Python
|
test/geometry/epipolar/test_projection.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/geometry/epipolar/test_projection.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/geometry/epipolar/test_projection.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import pytest
import torch
from torch.autograd import gradcheck
from torch.testing import assert_allclose
import kornia.geometry.epipolar as epi
class TestIntrinsicsLike:
def test_smoke(self, device, dtype):
image = torch.rand(1, 3, 4, 4, device=device, dtype=dtype)
focal = torch.rand(1, device=device, dtype=dtype)
camera_matrix = epi.intrinsics_like(focal, image)
assert camera_matrix.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size", [1, 2, 4, 9])
def test_shape(self, batch_size, device, dtype):
B: int = batch_size
focal: float = 100.0
image = torch.rand(B, 3, 4, 4, device=device, dtype=dtype)
camera_matrix = epi.intrinsics_like(focal, image)
assert camera_matrix.shape == (B, 3, 3)
assert camera_matrix.device == image.device
assert camera_matrix.dtype == image.dtype
class TestScaleIntrinsics:
def test_smoke_float(self, device, dtype):
scale_factor: float = 1.0
camera_matrix = torch.rand(1, 3, 3, device=device, dtype=dtype)
camera_matrix_scale = epi.scale_intrinsics(camera_matrix, scale_factor)
assert camera_matrix_scale.shape == (1, 3, 3)
def test_smoke_tensor(self, device, dtype):
scale_factor = torch.tensor(1.0)
camera_matrix = torch.rand(1, 3, 3, device=device, dtype=dtype)
camera_matrix_scale = epi.scale_intrinsics(camera_matrix, scale_factor)
assert camera_matrix_scale.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size", [1, 2, 4, 9])
def test_shape(self, batch_size, device, dtype):
B: int = batch_size
scale_factor = torch.rand(B, device=device, dtype=dtype)
camera_matrix = torch.rand(B, 3, 3, device=device, dtype=dtype)
camera_matrix_scale = epi.scale_intrinsics(camera_matrix, scale_factor)
assert camera_matrix_scale.shape == (B, 3, 3)
def test_scale_double(self, device, dtype):
scale_factor = torch.tensor(0.5)
camera_matrix = torch.tensor([[
[100., 0., 50.],
[0., 100., 50.],
[0., 0., 1.],
]], device=device, dtype=dtype)
camera_matrix_expected = torch.tensor(
[[
[50., 0., 25.],
[0., 50., 25.],
[0., 0., 1.],
]], device=device, dtype=dtype
)
camera_matrix_scale = epi.scale_intrinsics(camera_matrix, scale_factor)
assert_allclose(camera_matrix_scale, camera_matrix_expected, atol=1e-4, rtol=1e-4)
def test_gradcheck(self, device):
scale_factor = torch.ones(1, device=device, dtype=torch.float64, requires_grad=True)
camera_matrix = torch.ones(1, 3, 3, device=device, dtype=torch.float64)
assert gradcheck(epi.scale_intrinsics, (
camera_matrix,
scale_factor,
), raise_exception=True)
class TestProjectionFromKRt:
def test_smoke(self, device, dtype):
K = torch.rand(1, 3, 3, device=device, dtype=dtype)
R = torch.rand(1, 3, 3, device=device, dtype=dtype)
t = torch.rand(1, 3, 1, device=device, dtype=dtype)
P = epi.projection_from_KRt(K, R, t)
assert P.shape == (1, 3, 4)
@pytest.mark.parametrize("batch_size", [1, 2, 4])
def test_shape(self, batch_size, device, dtype):
B: int = batch_size
K = torch.rand(B, 3, 3, device=device, dtype=dtype)
R = torch.rand(B, 3, 3, device=device, dtype=dtype)
t = torch.rand(B, 3, 1, device=device, dtype=dtype)
P = epi.projection_from_KRt(K, R, t)
assert P.shape == (B, 3, 4)
def test_simple(self, device, dtype):
K = torch.tensor([[
[10., 0., 30.],
[0., 20., 40.],
[0., 0., 1.],
]], device=device, dtype=dtype)
R = torch.tensor([[
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
]], device=device, dtype=dtype)
t = torch.tensor([
[[1.], [2.], [3.]],
], device=device, dtype=dtype)
P_expected = torch.tensor(
[[
[10., 0., 30., 100.],
[0., 20., 40., 160.],
[0., 0., 1., 3.],
]], device=device, dtype=dtype
)
P_estimated = epi.projection_from_KRt(K, R, t)
assert_allclose(P_estimated, P_expected, atol=1e-4, rtol=1e-4)
def test_krt_from_projection(self, device, dtype):
P = torch.tensor(
[[
[10., 0., 30., 100.],
[0., 20., 40., 160.],
[0., 0., 1., 3.],
]], device=device, dtype=dtype
)
K_expected = torch.tensor([[
[10., 0., 30.],
[0., 20., 40.],
[0., 0., 1.],
]], device=device, dtype=dtype)
R_expected = torch.tensor([[
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
]], device=device, dtype=dtype)
t_expected = torch.tensor([[[1.], [2.], [3.]]], device=device, dtype=dtype)
K_estimated, R_estimated, t_estimated = epi.KRt_from_projection(P)
assert_allclose(K_estimated, K_expected, atol=1e-4, rtol=1e-4)
assert_allclose(R_estimated, R_expected, atol=1e-4, rtol=1e-4)
assert_allclose(t_estimated, t_expected, atol=1e-4, rtol=1e-4)
def test_gradcheck(self, device):
K = torch.rand(1, 3, 3, device=device, dtype=torch.float64, requires_grad=True)
R = torch.rand(1, 3, 3, device=device, dtype=torch.float64)
t = torch.rand(1, 3, 1, device=device, dtype=torch.float64)
assert gradcheck(epi.projection_from_KRt, (
K,
R,
t,
), raise_exception=True)
class TestProjectionsFromFundamental:
def test_smoke(self, device, dtype):
F_mat = torch.rand(1, 3, 3, device=device, dtype=dtype)
P = epi.projections_from_fundamental(F_mat)
assert P.shape == (1, 3, 4, 2)
@pytest.mark.parametrize("batch_size", [1, 2, 4])
def test_shape(self, batch_size, device, dtype):
B: int = batch_size
F_mat = torch.rand(B, 3, 3, device=device, dtype=dtype)
P = epi.projections_from_fundamental(F_mat)
assert P.shape == (B, 3, 4, 2)
def test_gradcheck(self, device):
F_mat = torch.rand(1, 3, 3, device=device, dtype=torch.float64, requires_grad=True)
assert gradcheck(epi.projections_from_fundamental, (F_mat, ), raise_exception=True)
class TestKRtFromProjection:
def test_smoke(self, device, dtype):
P = torch.randn(1, 3, 4, device=device, dtype=dtype)
K, R, t = epi.KRt_from_projection(P)
assert K.shape == (1, 3, 3)
assert R.shape == (1, 3, 3)
assert t.shape == (1, 3, 1)
@pytest.mark.parametrize("batch_size", [1, 2, 4])
def test_shape(self, batch_size, device, dtype):
B: int = batch_size
P = torch.rand(B, 3, 4, device=device, dtype=dtype)
K, R, t = epi.KRt_from_projection(P)
assert K.shape == (B, 3, 3)
assert R.shape == (B, 3, 3)
assert t.shape == (B, 3, 1)
def test_simple(self, device, dtype):
P = torch.tensor(
[[[308., 139., 231., 84.], [481., 161., 358., 341.], [384., 387., 459., 102.]]], device=device, dtype=dtype
)
K_expected = torch.tensor(
[[[17.006138, 122.441254, 390.211426], [0.0, 228.743622, 577.167480], [0.0, 0.0, 712.675232]]],
device=device,
dtype=dtype
)
R_expected = torch.tensor(
[[[0.396559, 0.511023, -0.762625], [0.743249, -0.666318, -0.060006], [0.538815, 0.543024, 0.644052]]],
device=device,
dtype=dtype
)
t_expected = torch.tensor([[[-6.477699], [1.129624], [0.143123]]], device=device, dtype=dtype)
K_estimated, R_estimated, t_estimated = epi.KRt_from_projection(P)
assert_allclose(K_estimated, K_expected, atol=1e-4, rtol=1e-4)
assert_allclose(R_estimated, R_expected, atol=1e-4, rtol=1e-4)
assert_allclose(t_estimated, t_expected, atol=1e-4, rtol=1e-4)
def test_projection_from_krt(self, device, dtype):
K = torch.tensor(
[[[17.006138, 122.441254, 390.211426], [0.0, 228.743622, 577.167480], [0.0, 0.0, 712.675232]]],
device=device,
dtype=dtype
)
R = torch.tensor(
[[[0.396559, 0.511023, -0.762625], [0.743249, -0.666318, -0.060006], [0.538815, 0.543024, 0.644052]]],
device=device,
dtype=dtype
)
t = torch.tensor([
[[-6.477699], [1.129624], [0.143123]],
], device=device, dtype=dtype)
P_expected = torch.tensor(
[[[308., 139., 231., 84.], [481., 161., 358., 341.], [384., 387., 459., 102.]]], device=device, dtype=dtype
)
P_estimated = epi.projection_from_KRt(K, R, t)
assert_allclose(P_estimated, P_expected, atol=1e-4, rtol=1e-4)
def test_gradcheck(self, device):
P_mat = torch.rand(1, 3, 4, device=device, dtype=torch.float64, requires_grad=True)
assert gradcheck(epi.KRt_from_projection, (P_mat, ), raise_exception=True)
| 36.611111
| 119
| 0.573596
| 1,249
| 9,226
| 4.093675
| 0.096878
| 0.12478
| 0.139644
| 0.150597
| 0.860356
| 0.823783
| 0.769216
| 0.737727
| 0.69998
| 0.640915
| 0
| 0.097368
| 0.275309
| 9,226
| 251
| 120
| 36.756972
| 0.667365
| 0
| 0
| 0.537313
| 0
| 0
| 0.005419
| 0
| 0
| 0
| 0
| 0
| 0.154229
| 1
| 0.099502
| false
| 0
| 0.024876
| 0
| 0.149254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a13b4dceee9d1e66bf8d908ee68ae02c16b23727
| 19
|
py
|
Python
|
beeline/version.py
|
SeanHood/beeline-python
|
017a08bd91c9284ce4a5f5b5e9d9f3cb286e8dec
|
[
"Apache-2.0"
] | null | null | null |
beeline/version.py
|
SeanHood/beeline-python
|
017a08bd91c9284ce4a5f5b5e9d9f3cb286e8dec
|
[
"Apache-2.0"
] | null | null | null |
beeline/version.py
|
SeanHood/beeline-python
|
017a08bd91c9284ce4a5f5b5e9d9f3cb286e8dec
|
[
"Apache-2.0"
] | null | null | null |
VERSION = '2.11.0'
| 9.5
| 18
| 0.578947
| 4
| 19
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.157895
| 19
| 1
| 19
| 19
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a1addaf6f91811fec0a90bc034f2c73c1e4305bd
| 4,863
|
py
|
Python
|
epytope/Data/pssms/arb/mat/B_4001_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/arb/mat/B_4001_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/arb/mat/B_4001_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
B_4001_9 = {0: {'A': 0.2077610562759658, 'C': -0.2669933043368034, 'E': -0.12792660087725455, 'D': -0.3626558736049774, 'G': 0.35729098554184324, 'F': -0.18289329450865147, 'I': -0.16070579048536174, 'H': 0.1088676729196258, 'K': -0.07451273025393997, 'M': 0.121759908391039, 'L': 0.1722960731632381, 'N': -0.12615871403421008, 'Q': 0.05879046627513036, 'P': -0.36881018484686606, 'S': 0.07666039236356763, 'R': 0.13193868623618532, 'T': -0.05274583178037905, 'W': 0.1351401599930177, 'V': -0.2305214242077551, 'Y': -0.09566282887938869}, 1: {'A': -0.4484212332863034, 'C': -0.2521839193203798, 'E': 1.9419896385458908, 'D': -0.44852853181941355, 'G': -0.4701824055743268, 'F': -0.39628100613040884, 'I': -0.47911668117371137, 'H': -0.38438987207938613, 'K': -0.44110799678748347, 'M': -0.4088413616948616, 'L': -0.522131446637918, 'N': -0.4628157659654495, 'Q': 0.195962413108542, 'P': -0.5335898451353425, 'S': -0.4339902480016938, 'R': -0.4712486457739077, 'T': -0.47865842114325996, 'W': -0.429435264218871, 'V': -0.49129794408344646, 'Y': -0.5171256163758323}, 2: {'A': 0.010819187864985112, 'C': 0.2500405364643315, 'E': 0.08034060255010227, 'D': -0.1315786664358783, 'G': -0.03564285075522801, 'F': 0.05557513380965841, 'I': 0.31841918533325775, 'H': 0.2796769117223349, 'K': -0.2899508744308094, 'M': 0.2630279147125642, 'L': 0.2153568333723438, 'N': 0.17091513024037117, 'Q': -0.19385623682810071, 'P': -0.3415951485600066, 'S': -0.028958807903552205, 'R': -0.4072492315582124, 'T': 0.047481293210211786, 'W': 0.15541982088464643, 'V': 0.36087442134600356, 'Y': -0.136081696231375}, 3: {'A': 0.25233005858909424, 'C': 0.08602094879774362, 'E': -0.11790276637629579, 'D': -0.09607075235091556, 'G': 0.21841431122002622, 'F': 0.1076187943490457, 'I': -0.09120416968797997, 'H': -0.12071082502762819, 'K': -0.04550194002977235, 'M': -0.03585982900235549, 'L': 0.12408805485136665, 'N': 0.025607595090719554, 'Q': -0.041614773250583145, 'P': -0.23104740440077845, 'S': 0.12569787094508614, 'R': -0.17496108972667135, 'T': 0.008370871877586795, 'W': -0.07321478848843309, 'V': 0.02255967076877008, 'Y': 0.03601146312295931}, 4: {'A': 0.14676951098519908, 'C': 0.008984777805746295, 'E': -0.19644140377358327, 'D': 0.1897919130749556, 'G': 0.13577483673116136, 'F': 0.04895399220080016, 'I': 0.13460803143218958, 'H': -0.021284821788172627, 'K': -0.32949589274844887, 'M': 0.15947509138344684, 'L': 0.18682811777904618, 'N': -0.05519722769205147, 'Q': -0.19221853577416212, 'P': -0.10474824373012424, 'S': 0.1196792190131309, 'R': -0.28504107820225355, 'T': 0.01807523046613372, 'W': -0.16108224580987077, 'V': -0.018416645790632794, 'Y': -0.06479810720334485}, 5: {'A': 0.23166225312259542, 'C': 0.011236919486698763, 'E': 0.16679261468817147, 'D': -0.02167416714357672, 'G': -0.12438226983268912, 'F': -0.0044095531997541335, 'I': -0.1031656419743817, 'H': 0.30097390290937576, 'K': 0.010457995804453743, 'M': -0.18443181422831692, 'L': 0.12137006560810265, 'N': 0.052469273948480374, 'Q': -0.23730923269407517, 'P': -0.1221351751822463, 'S': -0.036230300106786656, 'R': -0.1660058519206502, 'T': -0.05325342336015521, 'W': 0.15003847818564886, 'V': 0.07599117432103443, 'Y': 0.18074300095161233}, 6: {'A': -0.17753053068009028, 'C': 0.19931910111016013, 'E': 0.22704340650059657, 'D': -0.1689927372320863, 'G': 0.05848361021323065, 'F': 0.15878725954894723, 'I': 0.14667796947866943, 'H': 0.09529400570740078, 'K': -0.3307659594743624, 'M': -0.25832771766176865, 'L': 0.17267822574186065, 'N': -0.09288836819881052, 'Q': -0.19752285888100277, 'P': -0.03862309705201802, 'S': -0.17076156927633204, 'R': -0.13617753061760332, 'T': 0.12996005015102396, 'W': 0.04667543911501483, 'V': 0.06651283679379297, 'Y': 0.17116642312771296}, 7: {'A': -0.0736980516792725, 'C': -0.014026174889597169, 'E': -0.05094528097445851, 'D': -0.21029235985238978, 'G': -0.08117413327510974, 'F': 0.048441415093627975, 'I': 0.4500393951027163, 'H': -0.11295795811536642, 'K': 0.021174698257636234, 'M': -0.12636829433030738, 'L': 0.1480922749232907, 'N': -0.18971941646754, 'Q': -0.17335988062463334, 'P': 0.05210973274718894, 'S': 0.10483502207372639, 'R': -0.22202660790649542, 'T': -0.08322239896252853, 'W': 0.093014237944162, 'V': 0.14294552632182347, 'Y': 0.07336927188427979}, 8: {'A': -0.16430518068451083, 'C': -0.453485714645266, 'E': -0.4314475194111367, 'D': -0.3399081220102916, 'G': -0.40106393599554013, 'F': 0.12393378995778476, 'I': 0.24586487103529037, 'H': -0.3730447354803529, 'K': -0.4965796461686587, 'M': 0.08810949842847934, 'L': 0.6591246260176412, 'N': -0.3125312199397761, 'Q': -0.3053841587886124, 'P': 0.20513796921799254, 'S': -0.3356347261964649, 'R': -0.4465944744479048, 'T': -0.11124205503169173, 'W': -0.5118377135991584, 'V': 0.03700758030145539, 'Y': -0.49652029597326053}, -1: {'slope': 0.11232339687530213, 'intercept': -0.44037546518031584}}
| 4,863
| 4,863
| 0.707999
| 559
| 4,863
| 6.155635
| 0.386404
| 0.005231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.723262
| 0.077113
| 4,863
| 1
| 4,863
| 4,863
| 0.043449
| 0
| 0
| 0
| 0
| 0
| 0.039885
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a1d629eaf804d024018896233ad01b649cbaf5c6
| 113
|
py
|
Python
|
nablapps/meeting_records/__init__.py
|
Amund211/nablaweb
|
8105c34615d4b67637e982545fbc6489a131c1f3
|
[
"MIT"
] | 17
|
2019-10-07T15:10:58.000Z
|
2022-01-21T14:18:07.000Z
|
nablapps/meeting_records/__init__.py
|
Amund211/nablaweb
|
8105c34615d4b67637e982545fbc6489a131c1f3
|
[
"MIT"
] | 222
|
2019-10-07T15:04:51.000Z
|
2022-03-24T12:14:16.000Z
|
nablapps/meeting_records/__init__.py
|
Amund211/nablaweb
|
8105c34615d4b67637e982545fbc6489a131c1f3
|
[
"MIT"
] | 7
|
2019-10-10T18:53:42.000Z
|
2021-10-18T02:13:09.000Z
|
"""
Django app for meeting records
"""
default_app_config = "nablapps.meeting_records.apps.MeetingRecordsConfig"
| 22.6
| 73
| 0.80531
| 13
| 113
| 6.769231
| 0.769231
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088496
| 113
| 4
| 74
| 28.25
| 0.854369
| 0.265487
| 0
| 0
| 0
| 0
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a1e1a2c6280a34e4e1c7d9486eb61a8a107ccbb3
| 359
|
py
|
Python
|
beerpy/units/concentration.py
|
MrLeeh/beerpy
|
1bbec29a39b01a9d8e54c475de29c768dfd27597
|
[
"MIT"
] | null | null | null |
beerpy/units/concentration.py
|
MrLeeh/beerpy
|
1bbec29a39b01a9d8e54c475de29c768dfd27597
|
[
"MIT"
] | null | null | null |
beerpy/units/concentration.py
|
MrLeeh/beerpy
|
1bbec29a39b01a9d8e54c475de29c768dfd27597
|
[
"MIT"
] | null | null | null |
"""
Concentration unit
"""
GRAMS_PER_LITER = "g/l"
_units = (GRAMS_PER_LITER)
class Concentration:
def __init__(self, value: float):
self.value = value
self._unit = GRAMS_PER_LITER
def __repr__(self):
return "Concentration: {}{}".format(self.value, self.unit)
@property
def unit(self):
return self._unit
| 16.318182
| 66
| 0.632312
| 43
| 359
| 4.883721
| 0.418605
| 0.114286
| 0.185714
| 0.161905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245125
| 359
| 21
| 67
| 17.095238
| 0.774908
| 0.050139
| 0
| 0
| 0
| 0
| 0.066265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0
| 0.181818
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b80abec632fb8f136abb57a8c476de0407c324cf
| 43
|
py
|
Python
|
heaphopper/__init__.py
|
angr/heaphopper
|
fa0efa8e71908bd05cd07a2ac0c73dbc5d445ae1
|
[
"BSD-2-Clause"
] | 185
|
2018-08-05T03:03:47.000Z
|
2022-02-15T06:32:22.000Z
|
heaphopper/__init__.py
|
angr/heaphopper
|
fa0efa8e71908bd05cd07a2ac0c73dbc5d445ae1
|
[
"BSD-2-Clause"
] | 11
|
2018-08-06T11:09:07.000Z
|
2020-07-14T05:26:23.000Z
|
heaphopper/__init__.py
|
angr/heaphopper
|
fa0efa8e71908bd05cd07a2ac0c73dbc5d445ae1
|
[
"BSD-2-Clause"
] | 19
|
2018-08-05T03:03:50.000Z
|
2022-02-24T10:44:00.000Z
|
from .analysis import *
from .gen import *
| 14.333333
| 23
| 0.72093
| 6
| 43
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 2
| 24
| 21.5
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
62a99fa452b78e9c4d32e9051b57cec1400b8925
| 66,040
|
py
|
Python
|
salt/transport/road/raet/transacting.py
|
Jiaion/salt
|
a7d2444a60f33942a293680a41e894eec98f5707
|
[
"Apache-2.0"
] | null | null | null |
salt/transport/road/raet/transacting.py
|
Jiaion/salt
|
a7d2444a60f33942a293680a41e894eec98f5707
|
[
"Apache-2.0"
] | null | null | null |
salt/transport/road/raet/transacting.py
|
Jiaion/salt
|
a7d2444a60f33942a293680a41e894eec98f5707
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
stacking.py raet protocol stacking classes
'''
# pylint: skip-file
# pylint: disable=W0611
# Import python libs
import socket
import binascii
import struct
try:
import simplejson as json
except ImportError:
import json
# Import ioflo libs
from ioflo.base.odicting import odict
from ioflo.base import aiding
from . import raeting
from . import nacling
from . import packeting
from . import estating
from ioflo.base.consoling import getConsole
console = getConsole()
class Transaction(object):
'''
RAET protocol transaction class
'''
Timeout = 5.0 # default timeout
def __init__(self, stack=None, kind=None, timeout=None,
reid=None, rmt=False, bcst=False, sid=None, tid=None,
txData=None, txPacket=None, rxPacket=None):
'''
Setup Transaction instance
timeout of 0.0 means no timeout go forever
'''
self.stack = stack
self.kind = kind or raeting.PACKET_DEFAULTS['tk']
if timeout is None:
timeout = self.Timeout
self.timeout = timeout
self.timer = aiding.StoreTimer(self.stack.store, duration=self.timeout)
# local estate is the .stack.estate
self.reid = reid # remote estate eid
self.rmt = rmt
self.bcst = bcst
self.sid = sid
self.tid = tid
self.txData = txData or odict() # data used to prepare last txPacket
self.txPacket = txPacket # last tx packet needed for retries
self.rxPacket = rxPacket # last rx packet needed for index
@property
def index(self):
'''
Property is transaction tuple (rf, le, re, si, ti, bf,)
'''
le = self.stack.estate.eid
if le == 0: #bootstapping onto channel use ha
le = self.stack.estate.ha
re = self.reid
if re == 0: #bootstapping onto channel use ha
re = self.stack.estates[self.reid].ha
return ((self.rmt, le, re, self.sid, self.tid, self.bcst,))
def process(self):
'''
Process time based handling of transaction like timeout or retries
'''
pass
def receive(self, packet):
'''
Process received packet Subclasses should super call this
'''
self.rxPacket = packet
def transmit(self, packet):
'''
Queue tx duple on stack transmit queue
'''
try:
self.stack.txUdp(packet.packed, self.reid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(packet.index)
return
self.txPacket = packet
def add(self, index=None):
'''
Add self to stack transactions
'''
if not index:
index = self.index
self.stack.addTransaction(index, self)
def remove(self, index=None):
'''
Remove self from stack transactions
'''
if not index:
index = self.index
self.stack.removeTransaction(index, transaction=self)
def statKey(self):
'''
Return the stat name key from class name
'''
return ("{0}_transaction_failure".format(self.__class__.__name__.lower()))
class Initiator(Transaction):
'''
RAET protocol initiator transaction class
'''
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
kwa['rmt'] = False # force rmt to False
super(Initiator, self).__init__(**kwa)
def process(self):
'''
Process time based handling of transaction like timeout or retries
'''
if self.timeout > 0.0 and self.timer.expired:
self.stack.removeTransaction(self.index, transaction=self)
class Correspondent(Transaction):
'''
RAET protocol correspondent transaction class
'''
Requireds = ['sid', 'tid', 'rxPacket']
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
kwa['rmt'] = True # force rmt to True
missing = []
for arg in self.Requireds:
if arg not in kwa:
missing.append(arg)
if missing:
emsg = "Missing required keyword arguments: '{0}'".format(missing)
raise TypeError(emsg)
super(Correspondent, self).__init__(**kwa)
class Staler(Initiator):
'''
RAET protocol Staler initiator transaction class
'''
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
for key in ['kind', 'reid', 'sid', 'tid', 'rxPacket']:
if key not in kwa:
emsg = "Missing required keyword argumens: '{0}'".format(key)
raise TypeError(emsg)
super(Staler, self).__init__(**kwa)
self.prep()
def prep(self):
'''
Prepare .txData for nack to stale
'''
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=self.rxPacket.data['sh'],
dp=self.rxPacket.data['sp'],
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,
ck=raeting.coatKinds.nada,
fk=raeting.footKinds.nada)
def nack(self):
'''
Send nack to stale packet from correspondent.
This is used when a correspondent packet is received but no matching
Initiator transaction is found. So create a dummy initiator and send
a nack packet back. Do not add transaction so don't need to remove it.
'''
ha = (self.rxPacket.data['sh'], self.rxPacket.data['sp'])
emsg = "{0} Stale Transaction from {1} dropping ...".format(self.stack.name, ha )
console.terse(emsg + '\n')
self.stack.incStat('stale_correspondent_attempt')
if self.reid not in self.stack.estates:
emsg = "Unknown correspondent estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('unknown_correspondent_eid')
#return #maybe we should return and not respond at all in this case
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
return
self.stack.txes.append((packet.packed, ha))
console.terse("Nack stale correspondent at {0}\n".format(self.stack.store.stamp))
self.stack.incStat('stale_correspondent_nack')
class Joiner(Initiator):
'''
RAET protocol Joiner Initiator class Dual of Joinent
'''
RedoTimeoutMin = 1.0 # initial timeout
RedoTimeoutMax = 4.0 # max timeout
def __init__(self, mha = None, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup Transaction instance
'''
kwa['kind'] = raeting.trnsKinds.join
super(Joiner, self).__init__(**kwa)
if mha is None:
mha = ('127.0.0.1', raeting.RAET_PORT)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
if self.reid is None:
if not self.stack.estates: # no channel master so make one
master = estating.RemoteEstate(eid=0, ha=mha)
try:
self.stack.addRemote(master)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
return
self.reid = self.stack.estates.values()[0].eid # zeroth is channel master
self.sid = 0
self.tid = self.stack.estates[self.reid].nextTid()
self.prep()
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Joiner, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.join:
if packet.data['pk'] == raeting.pcktKinds.ack: #pending
self.pend() #set timer for redo
elif packet.data['pk'] == raeting.pcktKinds.response:
self.accept()
elif packet.data['pk'] == raeting.pcktKinds.nack: #rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
if self.txPacket and self.txPacket.data['pk'] == raeting.pcktKinds.request:
self.remove(self.txPacket.index) #index changes after accept
else:
self.remove(self.index) # in case never sent txPacket
console.concise("Joiner timed out at {0}\n".format(self.stack.store.stamp))
return
# need keep sending join until accepted or timed out
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMin)
self.redoTimer.restart(duration=duration)
if (self.txPacket and
self.txPacket.data['pk'] == raeting.pcktKinds.request):
self.transmit(self.txPacket) #redo
console.concise("Joiner Redo Join at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=self.stack.estates[self.reid].host,
dp=self.stack.estates[self.reid].port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,
ck=raeting.coatKinds.nada,
fk=raeting.footKinds.nada)
def join(self):
'''
Send join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat(self.statKey())
self.remove()
return
body = odict([('name', self.stack.estate.name),
('verhex', self.stack.estate.signer.verhex),
('pubhex', self.stack.estate.priver.pubhex)])
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.request,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Joiner Do Join at {0}\n".format(self.stack.store.stamp))
def pend(self):
'''
Process ack to join packet
'''
if not self.stack.parseInner(self.rxPacket):
return
pass
def accept(self):
'''
Perform acceptance in response to join response packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
leid = body.get('leid')
if not leid:
emsg = "Missing local estate id in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
reid = body.get('reid')
if not reid:
emsg = "Missing remote estate id in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
name = body.get('name')
if not name:
emsg = "Missing remote name in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
verhex = body.get('verhex')
if not verhex:
emsg = "Missing remote verifier key in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
pubhex = body.get('pubhex')
if not pubhex:
emsg = "Missing remote crypt key in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
self.stack.estate.eid = leid
self.stack.dumpLocal()
remote = self.stack.estates[self.reid]
if remote.eid != reid: #move remote estate to new index
try:
self.stack.moveRemote(old=remote.eid, new=reid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.txPacket.index)
return
if remote.name != name: # rename remote estate to new name
try:
self.stack.renameRemote(old=remote.name, new=name)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.txPacket.index)
return
self.reid = reid
# we are assuming for now that the joiner cannot talk peer to peer only
# to main estate otherwise we need to ensure unique eid, name, and ha on road
# check if remote keys of main estate are accepted here
status = self.stack.safe.statusRemoteEstate(remote,
verhex=verhex,
pubhex=pubhex,
main=False)
if status == raeting.acceptances.rejected:
self.nackAccept()
else:
remote.joined = True #accepted
remote.nextSid()
self.ackAccept()
self.stack.dumpRemote(remote)
def rejected(self):
'''
Process nack to join packet
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove(self.txPacket.index)
console.terse("Joiner Rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def ackAccept(self):
'''
Send ack to accept response
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.txPacket.index)
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.txPacket.index)
return
self.transmit(packet)
self.remove(self.rxPacket.index)
console.concise("Joiner Do Accept at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("join_initiate_complete")
def nackAccept(self):
'''
Send nack to accept response
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.txPacket.index)
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.txPacket.index)
return
self.transmit(packet)
self.remove(self.txPacket.index)
console.terse("Joiner Do Reject at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Joinent(Correspondent):
'''
RAET protocol Joinent transaction class, dual of Joiner
'''
RedoTimeoutMin = 0.1 # initial timeout
RedoTimeoutMax = 2.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup Transaction instance
'''
kwa['kind'] = raeting.trnsKinds.join
super(Joinent, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store, duration=0.0)
self.prep()
# Since corresponding bootstrap transaction use packet.index not self.index
self.add(self.rxPacket.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Joinent, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.join:
if packet.data['pk'] == raeting.pcktKinds.ack: #accepted by joiner
self.joined()
elif packet.data['pk'] == raeting.pcktKinds.nack: #rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.nackJoin()
console.concise("Joinent timed out at {0}\n".format(self.stack.store.stamp))
return
# need to perform the check for accepted status and then send accept
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMax)
self.redoTimer.restart(duration=duration)
if (self.txPacket and
self.txPacket.data['pk'] == raeting.pcktKinds.response):
self.transmit(self.txPacket) #redo
console.concise("Joinent Redo Accept at {0}\n".format(self.stack.store.stamp))
else: #check to see if status has changed to accept
remote = self.stack.estates[self.reid]
if remote:
data = self.stack.safe.loadRemoteEstate(remote)
if data:
status = self.stack.safe.statusRemoteEstate(remote,
data['verhex'],
data['pubhex'])
if status == raeting.acceptances.accepted:
self.accept()
def prep(self):
'''
Prepare .txData
'''
#since bootstrap transaction use the reversed seid and deid from packet
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
se=self.rxPacket.data['de'],
de=self.rxPacket.data['se'],
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,
ck=raeting.coatKinds.nada,
fk=raeting.footKinds.nada,)
def join(self):
'''
Process join packet
Respond based on acceptance status of remote estate.
Rules for Colliding Estates
Apply the rules to ensure no colliding estates on (host, port)
If matching name estate found then return
Rules:
Only one estate with given eid is allowed on road
Only one estate with given name is allowed on road.
Only one estate with given ha on road is allowed on road.
Are multiple estates with same keys but different name (ha) allowed?
Current logic ignores same keys or not
Since creating new estate assigns unique eid,
we are looking for preexisting estates with any eid.
Processing steps:
I) Search remote estates for matching name
A) Found remote
1) HA not match
Search remotes for other matching HA but different name
If found other delete
Reuse found remote to be updated and joined
B) Not found
Search remotes for other matching HA
If found delete for now
Create new remote and update
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
name = body.get('name')
if not name:
emsg = "Missing remote name in join packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_join')
self.remove(self.rxPacket.index)
return
#raise raeting.TransactionError(emsg)
verhex = body.get('verhex')
if not verhex:
emsg = "Missing remote verifier key in join packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_join')
self.remove(self.rxPacket.index)
return
#raise raeting.TransactionError(emsg)
pubhex = body.get('pubhex')
if not pubhex:
emsg = "Missing remote crypt key in join packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_join')
self.remove(self.rxPacket.index)
return
#raise raeting.TransactionError(emsg)
host = data['sh']
port = data['sp']
self.txData.update( dh=host, dp=port,) # responses use received host port
remote = self.stack.fetchRemoteByName(name)
if remote:
if not (host == remote.host and port == remote.port):
other = self.stack.fetchRemoteByHostPort(host, port)
if other and other is not remote: #may need to terminate transactions
try:
self.stack.removeRemote(other.eid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.rxPacket.index)
return
remote.host = host
remote.port = port
remote.rsid = self.sid
remote.rtid = self.tid
status = self.stack.safe.statusRemoteEstate(remote,
verhex=verhex,
pubhex=pubhex)
else:
other = self.stack.fetchRemoteByHostPort(host, port)
if other: #may need to terminate transactions
try:
self.stack.removeRemote(other.eid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.rxPacket.index)
return
remote = estating.RemoteEstate( stack=self.stack,
name=name,
host=host,
port=port,
acceptance=None,
verkey=verhex,
pubkey=pubhex,
rsid=self.sid,
rtid=self.tid, )
try:
self.stack.addRemote(remote) #provisionally add .accepted is None
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.rxPacket.index)
return
status = self.stack.safe.statusRemoteEstate(remote,
verhex=verhex,
pubhex=pubhex)
self.stack.dumpRemote(remote)
self.reid = remote.eid # auto generated at instance creation above
if status == None or status == raeting.acceptances.pending:
self.ackJoin()
elif status == raeting.acceptances.accepted:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMax)
self.redoTimer.restart(duration=duration)
self.accept()
else:
self.nackJoin()
emsg = "Estate {0} eid {1} keys rejected\n".format(
remote.name, remote.eid)
console.terse(emsg)
def ackJoin(self):
'''
Send ack to join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.rxPacket.index)
return
#since bootstrap transaction use updated self.reid
#self.txData.update( dh=self.stack.estates[self.reid].host,
#dp=self.stack.estates[self.reid].port,)
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.rxPacket.index)
return
self.transmit(packet)
console.concise("Joinent Pending Accept at {0}\n".format(self.stack.store.stamp))
def accept(self):
'''
Send accept response to join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.rxPacket.index)
return
remote = self.stack.estates[self.reid]
body = odict([ ('leid', self.reid),
('reid', self.stack.estate.eid),
('name', self.stack.estate.name),
('verhex', self.stack.estate.signer.verhex),
('pubhex', self.stack.estate.priver.pubhex)])
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.response,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.rxPacket.index)
return
self.transmit(packet)
console.concise("Joinent Do Accept at {0}\n".format(self.stack.store.stamp))
def joined(self):
'''
process ack to accept response
'''
if not self.stack.parseInner(self.rxPacket):
return
remote = self.stack.estates[self.reid]
remote.joined = True # accepted
remote.nextSid()
self.stack.dumpRemote(remote)
self.remove(self.rxPacket.index)
self.stack.incStat("join_correspond_complete")
def rejected(self):
'''
Process nack to accept response or stale
'''
if not self.stack.parseInner(self.rxPacket):
return
remote = self.stack.estates[self.reid]
# use presence to remove remote
self.remove(self.rxPacket.index)
console.terse("Joinent Rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def nackJoin(self):
'''
Send nack to join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.rxPacket.index)
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.rxPacket.index)
return
self.transmit(packet)
self.remove(self.rxPacket.index)
console.terse("Joinent Reject at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Allower(Initiator):
'''
RAET protocol Allower Initiator class Dual of Allowent
CurveCP handshake
'''
Timeout = 4.0
RedoTimeoutMin = 0.25 # initial timeout
RedoTimeoutMax = 1.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.allow
super(Allower, self).__init__(**kwa)
self.oreo = None # cookie from correspondent needed until handshake completed
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
if self.reid is None:
self.reid = self.stack.estates.values()[0].eid # zeroth is channel master
remote = self.stack.estates[self.reid]
if not remote.joined:
emsg = "Must be joined first"
console.terse(emsg + '\n')
self.stack.incStat('unjoined_allow_attempt')
return
#raise raeting.TransactionError(emsg)
remote.refresh() # refresh short term keys and .allowed
self.sid = remote.sid
self.tid = remote.nextTid()
self.prep() # prepare .txData
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Allower, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.allow:
if packet.data['pk'] == raeting.pcktKinds.cookie:
self.cookie()
elif packet.data['pk'] == raeting.pcktKinds.ack:
self.allow()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.remove()
console.concise("Allower timed out at {0}\n".format(self.stack.store.stamp))
return
# need keep sending join until accepted or timed out
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMin)
self.redoTimer.restart(duration=duration)
if self.txPacket:
if self.txPacket.data['pk'] == raeting.pcktKinds.hello:
self.transmit(self.txPacket) # redo
console.concise("Allower Redo Hello at {0}\n".format(self.stack.store.stamp))
if self.txPacket.data['pk'] == raeting.pcktKinds.initiate:
self.transmit(self.txPacket) # redo
console.concise("Allower Redo Initiate at {0}\n".format(self.stack.store.stamp))
if self.txPacket.data['pk'] == raeting.pcktKinds.ack:
self.transmit(self.txPacket) # redo
console.concise("Allower Redo Ack Final at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid, )
def hello(self):
'''
Send hello request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
remote = self.stack.estates[self.reid]
plain = binascii.hexlify("".rjust(32, '\x00'))
cipher, nonce = remote.privee.encrypt(plain, remote.pubber.key)
body = raeting.HELLO_PACKER.pack(plain, remote.privee.pubraw, cipher, nonce)
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.hello,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allower Do Hello at {0}\n".format(self.stack.store.stamp))
def cookie(self):
'''
Process cookie packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
if not isinstance(body, basestring):
emsg = "Invalid format of cookie packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raise raeting.TransactionError(emsg)
if len(body) != raeting.COOKIE_PACKER.size:
emsg = "Invalid length of cookie packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raise raeting.TransactionError(emsg)
cipher, nonce = raeting.COOKIE_PACKER.unpack(body)
remote = self.stack.estates[self.reid]
msg = remote.privee.decrypt(cipher, nonce, remote.pubber.key)
if len(msg) != raeting.COOKIESTUFF_PACKER.size:
emsg = "Invalid length of cookie stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raise raeting.TransactionError(emsg)
shortraw, seid, deid, oreo = raeting.COOKIESTUFF_PACKER.unpack(msg)
if seid != remote.eid or deid != self.stack.estate.eid:
emsg = "Invalid seid or deid fields in cookie stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raeting.TransactionError(emsg)
self.oreo = binascii.hexlify(oreo)
remote.publee = nacling.Publican(key=shortraw)
self.initiate()
def initiate(self):
'''
Send initiate request to cookie response to hello request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
remote = self.stack.estates[self.reid]
vcipher, vnonce = self.stack.estate.priver.encrypt(remote.privee.pubraw,
remote.pubber.key)
fqdn = remote.fqdn.ljust(128, ' ')
stuff = raeting.INITIATESTUFF_PACKER.pack(self.stack.estate.priver.pubraw,
vcipher,
vnonce,
fqdn)
cipher, nonce = remote.privee.encrypt(stuff, remote.publee.key)
oreo = binascii.unhexlify(self.oreo)
body = raeting.INITIATE_PACKER.pack(remote.privee.pubraw,
oreo,
cipher,
nonce)
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.initiate,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allower Do Initiate at {0}\n".format(self.stack.store.stamp))
def allow(self):
'''
Process ackInitiate packet
Perform allowment in response to ack to initiate packet
'''
if not self.stack.parseInner(self.rxPacket):
return
self.stack.estates[self.reid].allowed = True
self.ackFinal()
#self.remove()
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Allower rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def ackFinal(self):
'''
Send ack to ack Initiate to terminate transaction
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = ""
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
self.remove()
console.concise("Allower Ack Final at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("allow_initiate_complete")
class Allowent(Correspondent):
'''
RAET protocol Allowent Correspondent class Dual of Allower
CurveCP handshake
'''
Timeout = 4.0
RedoTimeoutMin = 0.25 # initial timeout
RedoTimeoutMax = 1.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.allow
if 'reid' not in kwa:
emsg = "Missing required keyword argumens: '{0}'".format('reid')
raise TypeError(emsg)
super(Allowent, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
remote = self.stack.estates[self.reid]
if not remote.joined:
emsg = "Must be joined first"
console.terse(emsg + '\n')
self.stack.incStat('unjoined_allow_attempt')
return
#raise raeting.TransactionError(emsg)
#Current .sid was set by stack from rxPacket.data sid so it is the new rsid
if not remote.validRsid(self.sid):
emsg = "Stale sid '{0}' in packet".format(self.sid)
console.terse(emsg + '\n')
self.stack.incStat('stale_sid_allow_attempt')
return
#raise raeting.TransactionError(emsg)
remote.rsid = self.sid #update last received rsid for estate
remote.rtid = self.tid #update last received rtid for estate
self.oreo = None #keep locally generated oreo around for redos
remote.refresh() # refresh short term keys and .allowed
self.prep() # prepare .txData
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Allowent, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.allow:
if packet.data['pk'] == raeting.pcktKinds.hello:
self.hello()
elif packet.data['pk'] == raeting.pcktKinds.initiate:
self.initiate()
elif packet.data['pk'] == raeting.pcktKinds.ack:
self.final()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.nack()
console.concise("Allowent timed out at {0}\n".format(self.stack.store.stamp))
return
# need to perform the check for accepted status and then send accept
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMax)
self.redoTimer.restart(duration=duration)
if self.txPacket:
if self.txPacket.data['pk'] == raeting.pcktKinds.cookie:
self.transmit(self.txPacket) #redo
console.concise("Allowent Redo Cookie at {0}\n".format(self.stack.store.stamp))
if self.txPacket.data['pk'] == raeting.pcktKinds.ack:
self.transmit(self.txPacket) #redo
console.concise("Allowent Redo Ack at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid, )
def hello(self):
'''
Process hello packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
if not isinstance(body, basestring):
emsg = "Invalid format of hello packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_hello')
self.remove()
return
#raise raeting.TransactionError(emsg)
if len(body) != raeting.HELLO_PACKER.size:
emsg = "Invalid length of hello packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_hello')
self.remove()
return
#raise raeting.TransactionError(emsg)
plain, shortraw, cipher, nonce = raeting.HELLO_PACKER.unpack(body)
remote = self.stack.estates[self.reid]
remote.publee = nacling.Publican(key=shortraw)
msg = self.stack.estate.priver.decrypt(cipher, nonce, remote.publee.key)
if msg != plain :
emsg = "Invalid plain not match decrypted cipher"
console.terse(emsg + '\n')
self.stack.incStat('invalid_hello')
self.remove()
return
#raise raeting.TransactionError(emsg)
self.cookie()
def cookie(self):
'''
Send Cookie Packet
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
remote = self.stack.estates[self.reid]
oreo = self.stack.estate.priver.nonce()
self.oreo = binascii.hexlify(oreo)
stuff = raeting.COOKIESTUFF_PACKER.pack(remote.privee.pubraw,
self.stack.estate.eid,
remote.eid,
oreo)
cipher, nonce = self.stack.estate.priver.encrypt(stuff, remote.publee.key)
body = raeting.COOKIE_PACKER.pack(cipher, nonce)
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.cookie,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allowent Do Cookie at {0}\n".format(self.stack.store.stamp))
def initiate(self):
'''
Process initiate packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
if not isinstance(body, basestring):
emsg = "Invalid format of initiate packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
if len(body) != raeting.INITIATE_PACKER.size:
emsg = "Invalid length of initiate packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
shortraw, oreo, cipher, nonce = raeting.INITIATE_PACKER.unpack(body)
remote = self.stack.estates[self.reid]
if shortraw != remote.publee.keyraw:
emsg = "Mismatch of short term public key in initiate packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
if (binascii.hexlify(oreo) != self.oreo):
emsg = "Stale or invalid cookie in initiate packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
msg = remote.privee.decrypt(cipher, nonce, remote.publee.key)
if len(msg) != raeting.INITIATESTUFF_PACKER.size:
emsg = "Invalid length of initiate stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
pubraw, vcipher, vnonce, fqdn = raeting.INITIATESTUFF_PACKER.unpack(msg)
if pubraw != remote.pubber.keyraw:
emsg = "Mismatch of long term public key in initiate stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
fqdn = fqdn.rstrip(' ')
if fqdn != self.stack.estate.fqdn:
emsg = "Mismatch of fqdn in initiate stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
vouch = self.stack.estate.priver.decrypt(vcipher, vnonce, remote.pubber.key)
if vouch != remote.publee.keyraw or vouch != shortraw:
emsg = "Short term key vouch failed"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
self.ackInitiate()
def ackInitiate(self):
'''
Send ack to initiate request
'''
if self.reid not in self.stack.estates:
msg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(msg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = ""
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allowent Do Ack at {0}\n".format(self.stack.store.stamp))
self.allow()
def allow(self):
'''
Perform allowment
'''
self.stack.estates[self.reid].allowed = True
def final(self):
'''
Process ackFinal packet
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Allowent Do Final at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("allow_correspond_complete")
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Allowent rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def nack(self):
'''
Send nack to terminate allower transaction
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
self.remove()
console.concise("Allowent Reject at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Messenger(Initiator):
'''
RAET protocol Messenger Initiator class Dual of Messengent
Generic messages
'''
Timeout = 10.0
RedoTimeoutMin = 1.0 # initial timeout
RedoTimeoutMax = 3.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.message
super(Messenger, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
if self.reid is None:
self.reid = self.stack.estates.values()[0].eid # zeroth is channel master
remote = self.stack.estates[self.reid]
if not remote.allowed:
emsg = "Must be allowed first"
console.terse(emsg + '\n')
self.stack.incStat('unallowed_message_attempt')
return
#raise raeting.TransactionError(emsg)
self.sid = remote.sid
self.tid = remote.nextTid()
self.prep() # prepare .txData
self.tray = packeting.TxTray(stack=self.stack)
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Messenger, self).receive(packet)
if packet.data['tk'] == raeting.trnsKinds.message:
if packet.data['pk'] == raeting.pcktKinds.ack:
self.again()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.remove()
console.concise("Messenger timed out at {0}\n".format(self.stack.store.stamp))
return
# need keep sending message until completed or timed out
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMin)
self.redoTimer.restart(duration=duration)
if self.txPacket:
if self.txPacket.data['pk'] == raeting.pcktKinds.message:
self.transmit(self.txPacket) # redo
console.concise("Messenger Redo Segment {0} at {1}\n".format(
self.tray.current, self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,)
def message(self, body=None):
'''
Send message
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
if not self.tray.packets:
try:
self.tray.pack(data=self.txData, body=body)
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
if self.tray.current >= len(self.tray.packets):
return
packet = self.tray.packets[self.tray.current]
self.transmit(packet)
self.stack.incStat("message_segment_tx")
console.concise("Messenger Do Message Segment {0} at {1}\n".format(
self.tray.current, self.stack.store.stamp))
self.tray.current += 1
def again(self):
'''
Process ack packet
'''
if self.tray.current >= len(self.tray.packets):
self.complete()
else:
self.message()
def complete(self):
'''
Complete transaction and remove
'''
self.remove()
console.concise("Messenger Done at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("message_initiate_complete")
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Messenger rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Messengent(Correspondent):
'''
RAET protocol Messengent Correspondent class Dual of Messenger
Generic Messages
'''
Timeout = 10.0
RedoTimeoutMin = 1.0 # initial timeout
RedoTimeoutMax = 3.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.message
if 'reid' not in kwa:
emsg = "Missing required keyword argumens: '{0}'".format('reid')
raise TypeError(emsg)
super(Messengent, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
remote = self.stack.estates[self.reid]
if not remote.allowed:
emsg = "Must be allowed first"
console.terse(emsg + '\n')
self.stack.incStat('unallowed_message_attempt')
return
#raise raeting.TransactionError(emsg)
#Current .sid was set by stack from rxPacket.data sid so it is the new rsid
if not remote.validRsid(self.sid):
emsg = "Stale sid '{0}' in packet".format(self.sid)
console.terse(emsg + '\n')
self.stack.incStat('stale_sid_message_attempt')
return
#raise raeting.TransactionError(emsg)
remote.rsid = self.sid #update last received rsid for estate
remote.rtid = self.tid #update last received rtid for estate
self.prep() # prepare .txData
self.tray = packeting.RxTray(stack=self.stack)
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Messengent, self).receive(packet)
# resent message
if packet.data['tk'] == raeting.trnsKinds.message:
if packet.data['pk'] == raeting.pcktKinds.message:
self.message()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.nack()
console.concise("Messengent timed out at {0}\n".format(self.stack.store.stamp))
return
# need to include current segment in ack or resend
#if self.redoTimer.expired:
#duration = min(
#max(self.redoTimeoutMin,
#self.redoTimer.duration) * 2.0,
#self.redoTimeoutMax)
#self.redoTimer.restart(duration=duration)
#if self.txPacket:
#if self.txPacket.data['pk'] == raeting.pcktKinds.ack:
#self.transmit(self.txPacket) #redo
#console.concise("Messengent Redo Ack at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,)
def message(self):
'''
Process message packet
'''
try:
body = self.tray.parse(self.rxPacket)
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.incStat('parsing_message_error')
self.remove()
return
self.ackMessage()
if self.tray.complete:
console.verbose("{0} received message body\n{1}\n".format(
self.stack.name, body))
self.stack.rxMsgs.append(body)
self.complete()
def ackMessage(self):
'''
Send ack to message
'''
if self.reid not in self.stack.estates:
msg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(msg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
self.stack.incStat("message_segment_rx")
console.concise("Messengent Do Ack Segment at {0}\n".format(
self.stack.store.stamp))
def complete(self):
'''
Complete transaction and remove
'''
self.remove()
console.concise("Messengent Complete at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("messagent_correspond_complete")
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Messengent rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
| 36.166484
| 101
| 0.53569
| 6,812
| 66,040
| 5.163241
| 0.068262
| 0.070624
| 0.038667
| 0.032384
| 0.760662
| 0.739224
| 0.722706
| 0.701496
| 0.684152
| 0.660923
| 0
| 0.003953
| 0.364173
| 66,040
| 1,825
| 102
| 36.186301
| 0.833675
| 0.136024
| 0
| 0.727569
| 0
| 0
| 0.083186
| 0.007479
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058728
| false
| 0.001631
| 0.010604
| 0
| 0.165579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
62d4cd45674461744b21a8a5ec5220501c5b99c3
| 42
|
py
|
Python
|
src/strava_offline/__main__.py
|
liskin/strava-offline
|
f572a4658fba98d91cde995edb8241509fe1a9f3
|
[
"MIT"
] | 24
|
2020-09-28T19:37:49.000Z
|
2022-02-28T05:35:17.000Z
|
src/strava_offline/__main__.py
|
liskin/strava-offline
|
f572a4658fba98d91cde995edb8241509fe1a9f3
|
[
"MIT"
] | 1
|
2020-09-28T13:04:46.000Z
|
2020-09-28T13:04:54.000Z
|
src/strava_offline/__main__.py
|
liskin/strava-offline
|
f572a4658fba98d91cde995edb8241509fe1a9f3
|
[
"MIT"
] | 2
|
2020-09-29T09:41:12.000Z
|
2021-04-22T02:27:44.000Z
|
from strava_offline.cli import cli
cli()
| 10.5
| 34
| 0.785714
| 7
| 42
| 4.571429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 42
| 3
| 35
| 14
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c50bb6dc7392aa08bc4c6cf2be07c70e0d80403f
| 179
|
py
|
Python
|
natml/__init__.py
|
natsuite/NatML-Py
|
42c86bb64b5550a329860230ee54ef02ff12e03c
|
[
"Apache-2.0"
] | 2
|
2022-01-27T05:29:34.000Z
|
2022-01-27T20:20:01.000Z
|
natml/__init__.py
|
natmlx/NatML-Py
|
42c86bb64b5550a329860230ee54ef02ff12e03c
|
[
"Apache-2.0"
] | 1
|
2021-11-06T14:39:59.000Z
|
2021-11-06T17:38:00.000Z
|
natml/__init__.py
|
natmlx/NatML-Py
|
42c86bb64b5550a329860230ee54ef02ff12e03c
|
[
"Apache-2.0"
] | null | null | null |
#
# NatML
# Copyright (c) 2022 Yusuf Olokoba.
#
from .predictor import MLModelData
from .model import MLModel
from .session import MLSession
from .version import __version__
| 19.888889
| 37
| 0.765363
| 22
| 179
| 6.045455
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.173184
| 179
| 9
| 38
| 19.888889
| 0.871622
| 0.22905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c51965d17f0a01348cbe02c4d40f4d7b3b7db07e
| 124
|
py
|
Python
|
src/petronia/defimpl/extensions/caches/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 19
|
2017-06-21T10:28:24.000Z
|
2021-12-31T11:49:28.000Z
|
src/petronia/defimpl/extensions/caches/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 10
|
2016-11-11T18:57:57.000Z
|
2021-02-01T15:33:43.000Z
|
src/petronia/defimpl/extensions/caches/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 3
|
2017-09-17T03:29:35.000Z
|
2019-06-03T10:43:08.000Z
|
"""
Default cache implementations.
"""
from ..defs import ExtensionStorageCache
from .file import DirectoryExtensionCache
| 15.5
| 41
| 0.798387
| 11
| 124
| 9
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120968
| 124
| 7
| 42
| 17.714286
| 0.908257
| 0.241935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c52270d8092075f8a93034c15f8a420545521bad
| 118
|
py
|
Python
|
django/django/forms/templates/django/forms/widgets/input.html.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | 1
|
2021-05-16T03:20:23.000Z
|
2021-05-16T03:20:23.000Z
|
django/django/forms/templates/django/forms/widgets/input.html.py
|
roshanba/mangal
|
f7b428811dc07214009cc33f0beb665ead402038
|
[
"bzip2-1.0.6",
"MIT"
] | 4
|
2021-03-30T14:06:09.000Z
|
2021-09-22T19:26:31.000Z
|
venv/lib/python3.8/site-packages/django/forms/templates/django/forms/widgets/input.html.py
|
Solurix/Flashcards-Django
|
03c863f6722936093927785a2b20b6b668bb743d
|
[
"MIT"
] | null | null | null |
XXXXXX XXXXXXX XXXXXXXBB BBBBBBBBBBBB BB BBBB XXXXXXXFFFFFFFFFFFFFFFFXBBBBBBBBBBBB BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBX
| 59
| 117
| 0.932203
| 8
| 118
| 13.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 118
| 1
| 118
| 118
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c52660a541a40a843e2f530bc7ae1be34cd3cf86
| 76
|
py
|
Python
|
src/mud/manager/authentication/__init__.py
|
Martakan/YUMS
|
57c8c02eac70a62233f9a48d78becb1a25bed64e
|
[
"MIT"
] | null | null | null |
src/mud/manager/authentication/__init__.py
|
Martakan/YUMS
|
57c8c02eac70a62233f9a48d78becb1a25bed64e
|
[
"MIT"
] | null | null | null |
src/mud/manager/authentication/__init__.py
|
Martakan/YUMS
|
57c8c02eac70a62233f9a48d78becb1a25bed64e
|
[
"MIT"
] | null | null | null |
from .authentication import AuthenticationManager # only give access to this
| 76
| 76
| 0.855263
| 9
| 76
| 7.222222
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118421
| 76
| 1
| 76
| 76
| 0.970149
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c546df2d857630395cc540d1ed92191aee37e012
| 38
|
py
|
Python
|
python/testData/resolve/pyToJava/Simple.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/resolve/pyToJava/Simple.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/resolve/pyToJava/Simple.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from java.util import ArrayL<ref>ist
| 12.666667
| 36
| 0.789474
| 7
| 38
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 2
| 37
| 19
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c54fa781d24407e933230d43b1bc5188ac6ac896
| 10,980
|
py
|
Python
|
experiments/01_benchmark/plot_individual.py
|
fsschneider/cockpit-experiments
|
a9eaf3dc5da5a58356ac0eef25a11235bf0891c4
|
[
"MIT"
] | 7
|
2021-11-02T11:23:49.000Z
|
2022-02-16T13:25:47.000Z
|
experiments/01_benchmark/plot_individual.py
|
fsschneider/cockpit-experiments
|
a9eaf3dc5da5a58356ac0eef25a11235bf0891c4
|
[
"MIT"
] | null | null | null |
experiments/01_benchmark/plot_individual.py
|
fsschneider/cockpit-experiments
|
a9eaf3dc5da5a58356ac0eef25a11235bf0891c4
|
[
"MIT"
] | 2
|
2021-11-02T11:23:54.000Z
|
2022-02-02T15:56:03.000Z
|
"""Benchmark Bar Plot of the Overhead of Individual Instruments."""
import copy
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import run_individual
import seaborn as sns
from benchmark_utils import _fix_dev_naming, _fix_tp_naming, _quantity_naming, read_data
sys.path.append(os.getcwd())
from experiments.utils.plotting import _get_plot_size, _set_plotting_params # noqa
HERE = os.path.abspath(__file__)
HEREDIR = os.path.dirname(HERE)
SAVEDIR = os.path.join(HEREDIR, "output/fig_individual/")
os.makedirs(SAVEDIR, exist_ok=True)
PLOT_FRACTION = 0.6
PLOT_HEIGHT = 0.4
APP_PLOT_FRACTION = 0.65
APP_PLOT_FRACTION_EXP = 0.34
APP_PLOT_HEIGHT = 0.13
def plot_data(df, show=True, save=True, title=False, appendix=False):
"""Create a bar plot from the benchmarking data.
The bar plot shows the relative run time compared to an empty cockpit for
individual instruments. The run time is averaged over multiple seeds.
Args:
df (pandas.DataFrame): DataFrame holding the benchmark data.
show (bool, optional): Whether to show the plot. Defaults to True.
save (bool, optional): Whether to save the plot. Defaults to True.
title (bool, optional): Whether to show a title. Defaults to False.
appendix (bool, optional): Whether the plot will be used in the appendix.
Defaults to False.
"""
fraction = APP_PLOT_FRACTION if appendix else PLOT_FRACTION
fig, ax = plt.subplots(
figsize=_get_plot_size(
textwidth="neurips", fraction=fraction, height_ratio=PLOT_HEIGHT
)
)
# Verify that the data is from a single test problem and use it as a title
testproblem_set = df.testproblem.unique()
assert len(testproblem_set) == 1
tp_name = str(testproblem_set[0])
tp_name_fixed = _fix_tp_naming(tp_name)
device_set = df.device.unique()
assert len(device_set) == 1
dev_name = str(device_set[0])
dev_name_fixed = _fix_dev_naming(dev_name)
ax = plot_data_ax(ax, df)
if title:
ax.set_title(
f"Computational Overhead for {tp_name_fixed} ({dev_name_fixed})",
fontweight="bold",
)
if save:
savename = "benchmark_" + tp_name + "_" + dev_name
savename += "_app" if appendix else ""
savename += ".pdf"
savepath = os.path.join(SAVEDIR, savename)
plt.savefig(savepath, bbox_inches="tight")
if show:
plt.show()
def plot_data_ax(ax, df):
"""Plot the barplot into a given axis.
Args:
ax (plt.ax): Axis to plot into.
df (pandas.DataFrame): DataFrame holding the benchmark data.
Returns:
[plt.ax]: Axis to plot into.
"""
# Smaller font size for quantities
plt.rcParams.update({"xtick.labelsize": 6})
width_capsize = 0.25
width_errorbars = 0.75
ci = "sd"
hline_color = "gray"
hline_style = ":"
color_palette = "husl" # "rocket_r", "tab10" "Set2"
drop = [
# Remove cockpit_configurations
"full",
"business",
"economy",
"HessMaxEV",
"GradHist2d",
]
for d in drop:
df.drop(df[(df.quantities == d)].index, inplace=True)
# Compute mean time for basline
mean_baseline = df.loc[df["quantities"] == "baseline"].mean(axis=0).time_per_step
df["relative_overhead"] = df["time_per_step"].div(mean_baseline)
# Order from smallest to largest
grp_order = df.groupby("quantities").time_per_step.agg("mean").sort_values().index
# but put "baseline" always in front:
idx_baseline = np.where(grp_order._index_data == "baseline")[0][0]
order_list = list(grp_order._index_data)
order_list.insert(0, order_list.pop(idx_baseline))
grp_order._index_data = order_list
grp_order._data = order_list
sns.barplot(
x="quantities",
y="relative_overhead",
data=df,
order=grp_order,
ax=ax,
capsize=width_capsize,
errwidth=width_errorbars,
ci=ci,
estimator=np.mean,
palette=color_palette,
)
# Line at 1
ax.axhline(
y=1,
color=hline_color,
linestyle=hline_style,
)
ax.set_xlabel("")
ax.set_ylabel("Run Time Overhead")
ax.set_xticklabels(_quantity_naming(x.get_text()) for x in ax.get_xticklabels())
plt.tight_layout()
# Fix to make the bar plot for the paper a bit more appealing
ylims = list(ax.get_ylim())
ylims[1] = max(3.0, ylims[1])
ax.set_ylim(ylims)
return ax
def plot_expensive_data(df, show=True, save=True, title=False, appendix=False):
"""Create a bar plot from the expensive instruments.
The bar plot shows the relative run time compared to an empty cockpit for
individual instruments. The run time is averaged over multiple seeds.
Args:
df (pandas.DataFrame): DataFrame holding the benchmark data.
show (bool, optional): Whether to show the plot. Defaults to True.
save (bool, optional): Whether to save the plot. Defaults to True.
title (bool, optional): Whether to show a title. Defaults to False.
appendix (bool, optional): Whether the plot will be used in the appendix.
Defaults to False.
"""
fig, ax = plt.subplots(
figsize=_get_plot_size(
textwidth="neurips",
fraction=APP_PLOT_FRACTION_EXP,
height_ratio=PLOT_HEIGHT * APP_PLOT_FRACTION / APP_PLOT_FRACTION_EXP,
)
)
# Verify that the data is from a single test problem and use it as a title
testproblem_set = df.testproblem.unique()
assert len(testproblem_set) == 1
tp_name = str(testproblem_set[0])
tp_name_fixed = _fix_tp_naming(tp_name)
device_set = df.device.unique()
assert len(device_set) == 1
dev_name = str(device_set[0])
dev_name_fixed = _fix_dev_naming(dev_name)
ax = plot_expensive_data_ax(ax, df)
if title:
ax.set_title(
f"Computational Overhead for {tp_name_fixed} ({dev_name_fixed})",
fontweight="bold",
)
if save:
savename = "benchmark_expensive_" + tp_name + "_" + dev_name
savename += "_app" if appendix else ""
savename += ".pdf"
savepath = os.path.join(SAVEDIR, savename)
plt.savefig(savepath, bbox_inches="tight")
if show:
plt.show()
def plot_expensive_data_ax(ax, df):
"""Plot the barplot for the expensive quantities into a given axis.
Args:
ax (plt.ax): Axis to plot into.
df (pandas.DataFrame): DataFrame holding the benchmark data.
Returns:
[plt.ax]: Axis to plot into.
"""
# Plotting Params #
_set_plotting_params()
# Smaller font size for quantities
plt.rcParams.update({"xtick.labelsize": 6})
width_capsize = 0.25
width_errorbars = 0.75
ci = "sd"
hline_color = "gray"
hline_style = ":"
color_palette = "husl" # "rocket_r", "tab10" "Set2"
keep = [
"baseline",
"HessMaxEV",
"GradHist2d",
]
drop = [c for c in set(df.quantities) if c not in keep]
for d in drop:
df.drop(df[(df.quantities == d)].index, inplace=True)
# Compute mean time for basline
mean_baseline = df.loc[df["quantities"] == "baseline"].mean(axis=0).time_per_step
df["relative_overhead"] = df["time_per_step"].div(mean_baseline)
# Order from smallest to largest
grp_order = df.groupby("quantities").time_per_step.agg("mean").sort_values().index
# but put "baseline" always in front:
idx_baseline = np.where(grp_order._index_data == "baseline")[0][0]
order_list = list(grp_order._index_data)
order_list.insert(0, order_list.pop(idx_baseline))
grp_order._index_data = order_list
grp_order._data = order_list
sns.barplot(
x="quantities",
y="relative_overhead",
data=df,
order=grp_order,
ax=ax,
capsize=width_capsize,
errwidth=width_errorbars,
ci=ci,
estimator=np.mean,
palette=color_palette,
)
# Line at 1
ax.axhline(
y=1,
color=hline_color,
linestyle=hline_style,
)
ax.set_xlabel("")
ax.set_ylabel("")
ax.set_xticklabels(_quantity_naming(x.get_text()) for x in ax.get_xticklabels())
plt.tight_layout()
# Fix to make the bar plot for the paper a bit more appealing
ylims = list(ax.get_ylim())
ylims[1] = max(3.0, ylims[1])
ax.set_ylim(ylims)
return ax
def plot_combined_app(df, show=True, save=True):
"""Plot both the regular and the expensive data into a single figure.
Args:
df (pandas.DataFrame): DataFrame holding the benchmark data.
show (bool, optional): Whether to show the plot. Defaults to True.
save (bool, optional): Whether to save the plot. Defaults to True.
"""
# Plotting Params #
_set_plotting_params()
fig, ax = plt.subplots(
figsize=_get_plot_size(
textwidth="neurips",
fraction=1.0,
height_ratio=APP_PLOT_HEIGHT,
subplots=(2, 1),
),
ncols=2,
gridspec_kw={"width_ratios": [APP_PLOT_FRACTION, APP_PLOT_FRACTION_EXP]},
)
fig.tight_layout()
# Verify that the data is from a single test problem and use it as a title
testproblem_set = df.testproblem.unique()
assert len(testproblem_set) == 1
tp_name = str(testproblem_set[0])
device_set = df.device.unique()
assert len(device_set) == 1
dev_name = str(device_set[0])
ax[0] = plot_data_ax(ax[0], copy.deepcopy(df))
ax[1] = plot_expensive_data_ax(ax[1], copy.deepcopy(df))
if save:
savename = "benchmark_combined_" + tp_name + "_" + dev_name
savename += ".pdf"
savepath = os.path.join(SAVEDIR, savename)
plt.savefig(savepath, bbox_inches="tight")
if show:
plt.show()
if __name__ == "__main__":
# Plotting Params #
_set_plotting_params()
PLOT_APPENDIX = True
# # Main Plot
# MAIN_PROBLEM = ("cifar10_3c3d", "cuda")
# MAIN_PROBLEM_FILE = run_individual.get_savefile(*MAIN_PROBLEM)
# df, testproblem_set = read_data(MAIN_PROBLEM_FILE)
# plot_data(copy.deepcopy(df[MAIN_PROBLEM[0]]), show=True, save=False)
# Appendix Plots
if PLOT_APPENDIX:
APPENDIX_RUNS = [
# GPU
("mnist_logreg", "cuda"),
("mnist_mlp", "cuda"),
("cifar10_3c3d", "cuda"),
("fmnist_2c2d", "cuda"),
# CPU
("mnist_logreg", "cpu"),
("mnist_mlp", "cpu"),
("cifar10_3c3d", "cpu"),
("fmnist_2c2d", "cpu"),
]
for (testproblem, device) in APPENDIX_RUNS:
filepath = run_individual.get_savefile(testproblem, device)
df, testproblem_set = read_data(filepath)
for tp in testproblem_set:
plot_combined_app(copy.deepcopy(df[tp]), show=False)
| 30.247934
| 88
| 0.637614
| 1,478
| 10,980
| 4.521651
| 0.170501
| 0.025138
| 0.02843
| 0.025138
| 0.767919
| 0.736346
| 0.733653
| 0.716594
| 0.716594
| 0.716594
| 0
| 0.011354
| 0.254007
| 10,980
| 362
| 89
| 30.331492
| 0.804542
| 0.266849
| 0
| 0.635556
| 0
| 0
| 0.092429
| 0.002809
| 0
| 0
| 0
| 0
| 0.026667
| 1
| 0.022222
| false
| 0
| 0.04
| 0
| 0.071111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c562201de3d11a2f557ca4acd44eb36b3001c5c9
| 98
|
py
|
Python
|
src/tests/test_app/admin.py
|
RamezIssac/simple-review
|
5de5036001ea8416e51ec74f7fc3f994bd57114a
|
[
"MIT"
] | null | null | null |
src/tests/test_app/admin.py
|
RamezIssac/simple-review
|
5de5036001ea8416e51ec74f7fc3f994bd57114a
|
[
"MIT"
] | null | null | null |
src/tests/test_app/admin.py
|
RamezIssac/simple-review
|
5de5036001ea8416e51ec74f7fc3f994bd57114a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Information
admin.site.register(Information)
| 24.5
| 32
| 0.846939
| 13
| 98
| 6.384615
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091837
| 98
| 4
| 33
| 24.5
| 0.932584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3d6aa36c0e0045e2ae4b66123b938bed5c223510
| 4,943
|
py
|
Python
|
pymatflow/elk/base/time_dependent.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 6
|
2020-03-06T16:13:08.000Z
|
2022-03-09T07:53:34.000Z
|
pymatflow/elk/base/time_dependent.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-10-02T02:23:08.000Z
|
2021-11-08T13:29:37.000Z
|
pymatflow/elk/base/time_dependent.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-07-10T16:28:14.000Z
|
2021-07-10T16:28:14.000Z
|
class absorbing_boundaries:
"""
"""
def __init__(self):
self.params = {}
def to_string(self):
out = ""
for item in self.params:
if self.params[item] == None:
continue
out += "%s = %s\n" % (item, self.params[item])
out += "\n"
return out
def set_params(self, params):
"""
"""
for item in params:
if len(item.split("/")) == 3:
self.params[item.split("/")[-1]] = params[item]
continue
class photoelectronspectrum:
"""
"""
def __init__(self):
self.params = {}
def to_string(self):
out = ""
for item in self.params:
if self.params[item] == None:
continue
out += "%s = %s\n" % (item, self.params[item])
out += "\n"
return out
def set_params(self, params):
"""
"""
for item in params:
if len(item.split("/")) == 3:
self.params[item.split("/")[-1]] = params[item]
continue
class propagation:
"""
"""
def __init__(self):
self.params = {}
def to_string(self):
out = ""
for item in self.params:
if self.params[item] == None:
continue
out += "%s = %s\n" % (item, self.params[item])
out += "\n"
return out
def set_params(self, params):
"""
"""
for item in params:
if len(item.split("/")) == 3:
self.params[item.split("/")[-1]] = params[item]
continue
class response:
"""
"""
def __init__(self):
self.params = {}
def to_string(self):
out = ""
for item in self.params:
if self.params[item] == None:
continue
out += "%s = %s\n" % (item, self.params[item])
out += "\n"
return out
def set_params(self, params):
"""
"""
for item in params:
if len(item.split("/")) == 3:
self.params[item.split("/")[-1]] = params[item]
continue
class td_output:
"""
"""
def __init__(self):
self.params = {}
def to_string(self):
out = ""
for item in self.params:
if self.params[item] == None:
continue
out += "%s = %s\n" % (item, self.params[item])
out += "\n"
return out
def set_params(self, params):
"""
"""
for item in params:
if len(item.split("/")) == 3:
self.params[item.split("/")[-1]] = params[item]
continue
class time_dependent:
"""
"""
def __init__(self):
self.params = {}
self.absorbing_boundaries = absorbing_boundaries()
self.photoelectronspectrum = photoelectronspectrum()
self.propagation = propagation()
self.response = response()
self.td_output = td_output()
def to_string(self):
out = ""
for item in self.params:
if self.params[item] == None:
continue
out += "%s = %s\n" % (item, self.params[item])
out += "\n"
out += self.absorbing_boundaries.to_string()
out += self.photoelectronspectrum.to_string()
out += self.propagation.to_string()
out += self.response.to_string()
out += self.td_output.to_string()
return out
def set_params(self, params):
"""
"""
for item in params:
if len(item.split("/")) == 2:
self.params[item.split("/")[-1]] = params[item]
continue
if item.split("/")[1] == "Absorbing Boundaries":
self.absorbing_boundaries.set_params({item: params[item]})
elif item.split("/")[1] == "PhotoElectronSpectrum":
self.photoelectronspectrum.set_params({item: params[item]})
elif item.split("/")[1] == "Propagation":
self.propagation.set_params({item: params[item]})
elif item.split("/")[1] == "Response":
self.response.set_params({item: params[item]})
elif item.split("/")[1] == "TD Output":
self.td_output.set_params({item: params[item]})
else:
pass
| 30.325153
| 76
| 0.422011
| 457
| 4,943
| 4.439825
| 0.076586
| 0.177427
| 0.124199
| 0.044357
| 0.725973
| 0.704288
| 0.704288
| 0.704288
| 0.685559
| 0.612617
| 0
| 0.006166
| 0.442242
| 4,943
| 163
| 77
| 30.325153
| 0.729779
| 0
| 0
| 0.754237
| 0
| 0
| 0.032865
| 0.004541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152542
| false
| 0.008475
| 0
| 0
| 0.254237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3dc6d9f120e9c13528d956fba78c83eea2e0ee72
| 36
|
py
|
Python
|
chest/__main__.py
|
quantumsheep/chest
|
7b45cd353f0a3d907f6e9ccbc19732fb0b068f9e
|
[
"MIT"
] | null | null | null |
chest/__main__.py
|
quantumsheep/chest
|
7b45cd353f0a3d907f6e9ccbc19732fb0b068f9e
|
[
"MIT"
] | null | null | null |
chest/__main__.py
|
quantumsheep/chest
|
7b45cd353f0a3d907f6e9ccbc19732fb0b068f9e
|
[
"MIT"
] | null | null | null |
from .app import Chest
Chest.run()
| 9
| 22
| 0.722222
| 6
| 36
| 4.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 36
| 3
| 23
| 12
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
3dcbe12b695ea5fd7e098c1b07cadde4ce1cc8ce
| 4,712
|
py
|
Python
|
tests/test_user_endpoints.py
|
coosoti/Osoti-Small
|
ccd147768aeae234eb45bf0a78257aafc3a8a16f
|
[
"MIT"
] | null | null | null |
tests/test_user_endpoints.py
|
coosoti/Osoti-Small
|
ccd147768aeae234eb45bf0a78257aafc3a8a16f
|
[
"MIT"
] | 2
|
2018-04-26T17:14:47.000Z
|
2018-04-30T12:04:14.000Z
|
tests/test_user_endpoints.py
|
coosoti/Osoti-Small
|
ccd147768aeae234eb45bf0a78257aafc3a8a16f
|
[
"MIT"
] | null | null | null |
import unittest
import json
from api import db
from api.models.models import User
from tests.main import MainTestCase
class TestUserEndpoint(MainTestCase):
def test_registration(self):
""" Test for user signup"""
with self.client:
response = self.client.post('api/v2/auth/register',
data=json.dumps(dict(
username='kulundeng',
email='vicharles@gmail.com',
designation='caterer',
password='password1',
confirm_password='password1'
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertTrue(data['message'] ==
'You are successfully registered.')
self.assertEqual(response.status_code, 201)
def test_registered_user_login(self):
""" Test for login of registered-user login """
with self.client:
response = self.client.post(
'api/v2/auth/register',
data=json.dumps(dict(
username='victorvenosa',
email='osoticharles@gmail.com',
designation='customer',
password='kulundeng',
confirm_password='kulundeng'
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertTrue(
data['message'] == 'You are successfully registered.'
)
self.assertEqual(response.status_code, 201)
response = self.client.post(
'api/v2/auth/login',
data=json.dumps(dict(
email='osoticharles@gmail.com',
password='kulundeng'
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertTrue(data['message'] ==
'You have successfully logged in')
self.assertEqual(response.status_code, 200)
def test_non_registered_user_login(self):
""" Test for login of non-registered user """
with self.client:
response = self.client.post(
'api/v2/auth/login',
data=json.dumps(dict(
email='victorvenosa@gmail.com',
password='thatisthewayitis'
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertTrue(data['message'] == 'User does not exist.')
self.assertEqual(response.status_code, 404)
def test_valid_logout(self):
""" Test for logout before token expires """
with self.client:
response = self.client.post(
'api/v2/auth/register',
data=json.dumps(dict(
username='victorvenosa',
email='osoticharles@gmail.com',
designation='customer',
password='kulundeng',
confirm_password='kulundeng'
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertTrue(
data['message'] == 'You are successfully registered.')
self.assertEqual(response.status_code, 201)
response = self.client.post(
'api/v2/auth/login',
data=json.dumps(dict(
email='osoticharles@gmail.com',
password='kulundeng'
)),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertTrue(data['message'] ==
'You have successfully logged in')
self.assertEqual(response.status_code, 200)
response = self.client.post(
'api/v2/auth/logout',
headers=dict(
Authorization='Bearer ' + json.loads(
response.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
| 39.596639
| 72
| 0.480688
| 391
| 4,712
| 5.703325
| 0.204604
| 0.046637
| 0.060987
| 0.075336
| 0.776682
| 0.749776
| 0.735426
| 0.70583
| 0.672646
| 0.672646
| 0
| 0.010953
| 0.418718
| 4,712
| 118
| 73
| 39.932203
| 0.803213
| 0.029075
| 0
| 0.632075
| 0
| 0
| 0.163333
| 0.024181
| 0
| 0
| 0
| 0
| 0.122642
| 1
| 0.037736
| false
| 0.084906
| 0.04717
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
3df7af6ff2a5b5a3bd52942780ece1b7a754a650
| 48
|
py
|
Python
|
testpackage.py
|
saithalavi/nessaid_cli
|
ed2f9052533030f283e91c1ecb2d0e37b2d1b132
|
[
"MIT"
] | null | null | null |
testpackage.py
|
saithalavi/nessaid_cli
|
ed2f9052533030f283e91c1ecb2d0e37b2d1b132
|
[
"MIT"
] | 1
|
2021-07-11T12:43:28.000Z
|
2021-07-12T13:26:01.000Z
|
testpackage.py
|
saithalavi/nessaid_cli
|
ed2f9052533030f283e91c1ecb2d0e37b2d1b132
|
[
"MIT"
] | null | null | null |
from nessaid_cli_tests import doTests
doTests()
| 16
| 37
| 0.854167
| 7
| 48
| 5.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 48
| 3
| 38
| 16
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9a814402b9c3e95e8490df1c07216ca5b947f3c9
| 41
|
py
|
Python
|
tests/components/universal/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/universal/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/universal/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Tests for the universal component."""
| 20.5
| 40
| 0.707317
| 5
| 41
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 1
| 41
| 41
| 0.805556
| 0.829268
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9a955c02ee44bda6704b4cfdd15d20200fb41124
| 234
|
py
|
Python
|
python/0764.calculate-circumference-and-area.py
|
Ubastic/lintcode
|
9f600eece075410221a24859331a810503c76014
|
[
"MIT"
] | 6
|
2019-10-02T02:24:49.000Z
|
2021-11-18T10:08:07.000Z
|
python/0764.calculate-circumference-and-area.py
|
Ubastic/lintcode
|
9f600eece075410221a24859331a810503c76014
|
[
"MIT"
] | 1
|
2020-02-28T03:42:36.000Z
|
2020-03-07T09:26:00.000Z
|
src/0764.calculate-circumference-and-area/0764.calculate-circumference-and-area.py
|
jiangshanmeta/lintcode
|
7d7003825b5a7b9fd5b0be57aa2d84391e0d1fa5
|
[
"MIT"
] | 2
|
2020-07-25T08:42:38.000Z
|
2021-05-07T06:16:46.000Z
|
class Solution:
"""
@param r: a Integer represent radius
@return: the circle's circumference nums[0] and area nums[1]
"""
def calculate(self, r):
# write your code here
return [2*3.14*r,3.14*(r**2)]
| 29.25
| 64
| 0.589744
| 36
| 234
| 3.833333
| 0.777778
| 0.043478
| 0.057971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05848
| 0.269231
| 234
| 8
| 65
| 29.25
| 0.748538
| 0.508547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9ad3027880890136b730883a25f074e38bd14415
| 89
|
py
|
Python
|
bbc_lib/bbc/shang.py
|
zhgu999/SyncBtca
|
4e4b41c515fc26cde0f71ff38652b4a84e71a2a7
|
[
"MIT"
] | null | null | null |
bbc_lib/bbc/shang.py
|
zhgu999/SyncBtca
|
4e4b41c515fc26cde0f71ff38652b4a84e71a2a7
|
[
"MIT"
] | null | null | null |
bbc_lib/bbc/shang.py
|
zhgu999/SyncBtca
|
4e4b41c515fc26cde0f71ff38652b4a84e71a2a7
|
[
"MIT"
] | 1
|
2020-12-01T10:46:45.000Z
|
2020-12-01T10:46:45.000Z
|
#!/usr/bin/env python3
# -*- codeing : utf-8 -*-
def bbc_test2():
print("bbc_test")
| 17.8
| 26
| 0.58427
| 13
| 89
| 3.846154
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041096
| 0.179775
| 89
| 5
| 27
| 17.8
| 0.643836
| 0.516854
| 0
| 0
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b1151548cf18b036ca6810fcf1807d41092d7136
| 57
|
py
|
Python
|
uiza/api_resources/callback/__init__.py
|
uizaio/api-wrapper-python
|
e67c162e711857341f7ef5752178219e94f604d3
|
[
"MIT"
] | 2
|
2019-04-22T11:39:36.000Z
|
2020-05-26T04:01:43.000Z
|
uiza/api_resources/callback/__init__.py
|
uizaio/api-wrapper-python
|
e67c162e711857341f7ef5752178219e94f604d3
|
[
"MIT"
] | null | null | null |
uiza/api_resources/callback/__init__.py
|
uizaio/api-wrapper-python
|
e67c162e711857341f7ef5752178219e94f604d3
|
[
"MIT"
] | 2
|
2019-02-11T09:34:03.000Z
|
2019-02-12T10:31:41.000Z
|
from uiza.api_resources.callback.callback import Callback
| 57
| 57
| 0.894737
| 8
| 57
| 6.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 57
| 1
| 57
| 57
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b11aafdb166bb1ecdf756bdd15504a72df0cafbf
| 255
|
py
|
Python
|
flake8_too_many/messages.py
|
queensferryme/flake8-too-many
|
f19a40eeedf9056a2afc6223bdf359dbbcef90f3
|
[
"MIT"
] | 4
|
2021-09-02T20:50:19.000Z
|
2022-01-11T04:00:35.000Z
|
flake8_too_many/messages.py
|
queensferryme/flake8-too-many
|
f19a40eeedf9056a2afc6223bdf359dbbcef90f3
|
[
"MIT"
] | 8
|
2021-08-16T17:11:22.000Z
|
2022-02-28T18:30:07.000Z
|
flake8_too_many/messages.py
|
queensferryme/flake8-too-many
|
f19a40eeedf9056a2afc6223bdf359dbbcef90f3
|
[
"MIT"
] | null | null | null |
TMN001 = "TMN001 function has too many arguments ({} > {})."
TMN002 = "TMN002 function returns too many values ({} > {})."
TMN003 = "TMN003 function has too many return statements ({} > {})."
TMN004 = "TMN004 unpacking has too many targets ({} > {})."
| 31.875
| 68
| 0.639216
| 29
| 255
| 5.62069
| 0.517241
| 0.171779
| 0.184049
| 0.220859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 0.188235
| 255
| 7
| 69
| 36.428571
| 0.671498
| 0
| 0
| 0
| 0
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b14a9234d247927edefc314877e143bd7008984e
| 278
|
py
|
Python
|
continents/models.py
|
brapastor/pygeographic
|
3b1522b62bf06430dca007d64a5b71243fdb71f0
|
[
"MIT"
] | null | null | null |
continents/models.py
|
brapastor/pygeographic
|
3b1522b62bf06430dca007d64a5b71243fdb71f0
|
[
"MIT"
] | null | null | null |
continents/models.py
|
brapastor/pygeographic
|
3b1522b62bf06430dca007d64a5b71243fdb71f0
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Continent(models.Model):
name = models.CharField(max_length=255)
translate = models.CharField(max_length=255)
color = models.CharField(max_length=255)
def __str__(self):
return self.name
| 19.857143
| 48
| 0.715827
| 37
| 278
| 5.189189
| 0.594595
| 0.234375
| 0.28125
| 0.375
| 0.421875
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.190647
| 278
| 13
| 49
| 21.384615
| 0.813333
| 0.086331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0.142857
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b176d1276ec799566dfb2fce1237cc79bf46d926
| 111
|
py
|
Python
|
lab/device/__init__.py
|
ParanoiaSYT/Qulab-backup
|
09ec5457145b3789d4c1ac02c43dd3e6dfafc96f
|
[
"MIT"
] | null | null | null |
lab/device/__init__.py
|
ParanoiaSYT/Qulab-backup
|
09ec5457145b3789d4c1ac02c43dd3e6dfafc96f
|
[
"MIT"
] | null | null | null |
lab/device/__init__.py
|
ParanoiaSYT/Qulab-backup
|
09ec5457145b3789d4c1ac02c43dd3e6dfafc96f
|
[
"MIT"
] | null | null | null |
from ._driver import BaseDriver
from ._quant import QBool, QInteger, QOption, QReal, QString, QVector, QList
| 37
| 77
| 0.783784
| 14
| 111
| 6.071429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144144
| 111
| 2
| 78
| 55.5
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b1816887f51805cf18ad9f260911f1f98a6d538a
| 136
|
py
|
Python
|
tests/asp/cautious/failing.test3.choice-disjunction.gringo.cautious.asp.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19
|
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/asp/cautious/failing.test3.choice-disjunction.gringo.cautious.asp.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80
|
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/asp/cautious/failing.test3.choice-disjunction.gringo.cautious.asp.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6
|
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
1 2 1 0 3
1 3 1 0 2
3 1 4 0 0
3 1 3 1 0 4
3 1 5 0 0
8 2 2 3 1 0 5
0
4 e
3 a
5 c
2 b
0
B+
0
B-
1
0
1
"""
output = """
{}
"""
| 5.666667
| 13
| 0.441176
| 51
| 136
| 1.176471
| 0.254902
| 0.233333
| 0.15
| 0.133333
| 0.2
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0.551282
| 0.426471
| 136
| 23
| 14
| 5.913043
| 0.217949
| 0
| 0
| 0.086957
| 0
| 0
| 0.772059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b192828b7c753e4c546d346a8c3e1d1b058dbbd8
| 175
|
py
|
Python
|
org.conqat.engine.sourcecode/test-data/org.conqat.engine.sourcecode.shallowparser/python/exceptions_with_comment.py
|
assessorgeneral/ConQAT
|
2a462f23f22c22aa9d01a7a204453d1be670ba60
|
[
"Apache-2.0"
] | 1
|
2020-04-28T20:06:30.000Z
|
2020-04-28T20:06:30.000Z
|
org.conqat.engine.sourcecode/test-data/org.conqat.engine.sourcecode.shallowparser/python/exceptions_with_comment.py
|
SvenPeldszus/conqat
|
28fe004a49453894922aeb27ee3467b1748d23e9
|
[
"Apache-2.0"
] | null | null | null |
org.conqat.engine.sourcecode/test-data/org.conqat.engine.sourcecode.shallowparser/python/exceptions_with_comment.py
|
SvenPeldszus/conqat
|
28fe004a49453894922aeb27ee3467b1748d23e9
|
[
"Apache-2.0"
] | null | null | null |
try:
pass
except ImportError:
# tkinter may be missing
pass
return
try:
pass
except locale.Error: # Workaround for "locale.Error: unsupported locale setting"
pass
return
| 14.583333
| 80
| 0.771429
| 24
| 175
| 5.625
| 0.625
| 0.103704
| 0.192593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165714
| 175
| 12
| 81
| 14.583333
| 0.924658
| 0.457143
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.1
| 0
| 0.3
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b19925fb675ec56a39d256c14f6a209d9ea1ac97
| 20,351
|
py
|
Python
|
SBaaS_resequencing/stage01_resequencing_omniExpressExome_query.py
|
dmccloskey/SBaaS_resequencing
|
760049eec734a3f02a1172af68b20198713b785e
|
[
"MIT"
] | null | null | null |
SBaaS_resequencing/stage01_resequencing_omniExpressExome_query.py
|
dmccloskey/SBaaS_resequencing
|
760049eec734a3f02a1172af68b20198713b785e
|
[
"MIT"
] | null | null | null |
SBaaS_resequencing/stage01_resequencing_omniExpressExome_query.py
|
dmccloskey/SBaaS_resequencing
|
760049eec734a3f02a1172af68b20198713b785e
|
[
"MIT"
] | null | null | null |
from .stage01_resequencing_omniExpressExome_postgresql_models import *
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_template_query import sbaas_template_query
class stage01_resequencing_omniExpressExome_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for stage01_resequencing_omniExpressExome
'''
tables_supported = {'data_stage01_resequencing_omniExpressExome':data_stage01_resequencing_omniExpressExome,
'data_stage01_resequencing_omniExpressExome_annotations':data_stage01_resequencing_omniExpressExome_annotations,
'data_stage01_resequencing_omniExpressExome_header':data_stage01_resequencing_omniExpressExome_header,
'data_stage01_resequencing_omniExpressExome_annotations2':data_stage01_resequencing_omniExpressExome_annotations2,
'data_stage01_resequencing_omniExpressExomeFiltered':data_stage01_resequencing_omniExpressExomeFiltered,
};
self.set_supportedTables(tables_supported);
#SPLIT 2:
def reset_data_stage01_resequencing_omniExpressExome(self,
tables_I = ['data_stage01_resequencing_omniExpressExome',
'data_stage01_resequencing_omniExpressExome_header'],
experiment_id_I = None,
warn_I=True):
try:
querydelete = sbaas_base_query_delete(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data);
for table in tables_I:
query = {};
query['delete_from'] = [{'table_name':table}];
query['where'] = [{
'table_name':table,
'column_name':'experiment_id',
'value':experiment_id_I,
#'value':self.convert_string2StringString(experiment_id_I),
'operator':'LIKE',
'connector':'AND'
}
];
table_model = self.convert_tableStringList2SqlalchemyModelDict([table]);
query = querydelete.make_queryFromString(table_model,query);
querydelete.reset_table_sqlalchemyModel(query_I=query,warn_I=warn_I);
except Exception as e:
print(e);
def _get_rows_experimentID_dataStage01ResequencingOmniExpressExome(self,
experiment_id_I,
query_I={},
output_O='listDict',
dictColumn_I=None):
'''Query rows by experiment_id from data_stage01_resequencing_omniExpressExome
INPUT:
experiment_id_I = string
output_O = string
dictColumn_I = string
OPTIONAL INPUT:
query_I = additional query blocks
OUTPUT:
data_O = output specified by output_O and dictColumn_I
'''
tables = ['data_stage01_resequencing_omniExpressExome'];
# get the listDict data
data_O = [];
query = {};
query['select'] = [{"table_name":tables[0]}];
query['where'] = [
{"table_name":tables[0],
'column_name':'experiment_id',
'value':experiment_id_I,
'operator':'LIKE',
'connector':'AND'
},
{"table_name":tables[0],
'column_name':'used_',
'value':'true',
'operator':'IS',
'connector':'AND'
},
];
query['order_by'] = [
{"table_name":tables[0],
'column_name':'experiment_id',
'order':'ASC',
},
{"table_name":tables[0],
'column_name':'sample_name',
'order':'ASC',
},
{"table_name":tables[0],
'column_name':'SNP_Name',
'order':'ASC',
},
];
#additional blocks
for k,v in query_I.items():
if k not in query.items():
query[k]=[];
for r in v:
query[k].append(r);
data_O = self.get_rows_tables(
tables_I=tables,
query_I=query,
output_O=output_O,
dictColumn_I=dictColumn_I);
return data_O;
def getJoin_rows_experimentIDs_dataStage01ResequecingOmniExpressExomeAndAnnotationsAndAnnotations2(
self,
experiment_ids_I='',
sample_names_I='',
raise_I = False):
'''Join rows between omniExpressExome, annotations, and annotations auxillary
INPUT:
experiment_id_I = string
OUTPUT:
data_O = output specified by output_O and dictColumn_I
'''
data_O = [];
try:
subquery2 = '''SELECT "data_stage01_resequencing_omniExpressExome"."experiment_id",
"data_stage01_resequencing_omniExpressExome"."sample_name",
"data_stage01_resequencing_omniExpressExome"."SNP_Name",
"data_stage01_resequencing_omniExpressExome"."Sample_ID",
"data_stage01_resequencing_omniExpressExome"."Allele1_Top",
"data_stage01_resequencing_omniExpressExome"."Allele2_Top",
"data_stage01_resequencing_omniExpressExome"."GC_Score"
'''
subquery2+= '''FROM "data_stage01_resequencing_omniExpressExome" '''
subquery2+= '''WHERE "data_stage01_resequencing_omniExpressExome"."used_" '''
if experiment_ids_I:
cmd_q = '''AND "data_stage01_resequencing_omniExpressExome"."experiment_id" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(experiment_ids_I));
subquery2+=cmd_q;
if sample_names_I:
cmd_q = '''AND "data_stage01_resequencing_omniExpressExome"."sample_name" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(sample_names_I));
subquery2+=cmd_q;
subquery2+= '''ORDER BY "data_stage01_resequencing_omniExpressExome"."experiment_id" ASC, '''
subquery2+= '''"data_stage01_resequencing_omniExpressExome"."sample_name" ASC '''
subquery1 = '''SELECT "subquery2"."experiment_id",
"subquery2"."sample_name",
"subquery2"."SNP_Name",
"subquery2"."Sample_ID",
"subquery2"."Allele1_Top",
"subquery2"."Allele2_Top",
"subquery2"."GC_Score",
"data_stage01_resequencing_omniExpressExome_annotations2"."Name",
"data_stage01_resequencing_omniExpressExome_annotations2"."RsID"
'''
subquery1+= '''FROM "data_stage01_resequencing_omniExpressExome_annotations2", (%s) AS subquery2 ''' %subquery2
subquery1+= '''WHERE "subquery2"."SNP_Name" = "data_stage01_resequencing_omniExpressExome_annotations2"."Name" '''
subquery1+= '''ORDER BY "subquery2"."experiment_id" ASC, '''
subquery1+= '''"subquery2"."sample_name" ASC, '''
subquery1+= '''"data_stage01_resequencing_omniExpressExome_annotations2"."Name" ASC '''
query_cmd = '''SELECT "subquery1"."experiment_id",
"subquery1"."sample_name",
"subquery1"."SNP_Name",
"subquery1"."Sample_ID",
"subquery1"."Allele1_Top",
"subquery1"."Allele2_Top",
"subquery1"."GC_Score",
"data_stage01_resequencing_omniExpressExome_annotations"."IlmnID",
"data_stage01_resequencing_omniExpressExome_annotations"."Name",
"data_stage01_resequencing_omniExpressExome_annotations"."IlmnStrand",
"data_stage01_resequencing_omniExpressExome_annotations"."SNP",
"data_stage01_resequencing_omniExpressExome_annotations"."AddressA_ID",
"data_stage01_resequencing_omniExpressExome_annotations"."AlleleA_ProbeSeq",
"data_stage01_resequencing_omniExpressExome_annotations"."AddressB_ID",
"data_stage01_resequencing_omniExpressExome_annotations"."AlleleB_ProbeSeq",
"data_stage01_resequencing_omniExpressExome_annotations"."GenomeBuild",
"data_stage01_resequencing_omniExpressExome_annotations"."Chr",
"data_stage01_resequencing_omniExpressExome_annotations"."MapInfo",
"data_stage01_resequencing_omniExpressExome_annotations"."Ploidy",
"data_stage01_resequencing_omniExpressExome_annotations"."Species",
"data_stage01_resequencing_omniExpressExome_annotations"."Source",
"data_stage01_resequencing_omniExpressExome_annotations"."SourceVersion",
"data_stage01_resequencing_omniExpressExome_annotations"."SourceStrand",
"data_stage01_resequencing_omniExpressExome_annotations"."SourceSeq",
"data_stage01_resequencing_omniExpressExome_annotations"."TopGenomicSeq",
"data_stage01_resequencing_omniExpressExome_annotations"."BeadSetID",
"data_stage01_resequencing_omniExpressExome_annotations"."Exp_Clusters",
"data_stage01_resequencing_omniExpressExome_annotations"."RefStrand" '''
query_cmd+= '''FROM "data_stage01_resequencing_omniExpressExome_annotations", (%s) AS subquery1 ''' %subquery1
query_cmd+= '''WHERE ("data_stage01_resequencing_omniExpressExome_annotations"."Name" ="subquery1"."RsID"
OR "data_stage01_resequencing_omniExpressExome_annotations"."Name" LIKE '%,' || "subquery1"."RsID"
OR "data_stage01_resequencing_omniExpressExome_annotations"."Name" LIKE "subquery1"."RsID" || ',%'
OR "data_stage01_resequencing_omniExpressExome_annotations"."Name" LIKE '%,' || "subquery1"."RsID" || ',%') '''
query_cmd+= '''ORDER BY "subquery1"."experiment_id" ASC, '''
query_cmd+= '''"subquery1"."sample_name" ASC, '''
query_cmd+= '''"data_stage01_resequencing_omniExpressExome_annotations"."Chr" ASC, '''
query_cmd+= '''"data_stage01_resequencing_omniExpressExome_annotations"."MapInfo" ASC '''
query_cmd+= ';';
query_select = sbaas_base_query_select(self.session,self.engine,self.settings)
data_O = [dict(d) for d in query_select.execute_select(query_cmd)];
except Exception as e:
if raise_I: raise;
else: print(e);
return data_O;
def getJoin_rows_experimentIDs_dataStage01ResequecingOmniExpressExomeAndAnnotations(
self,
experiment_ids_I='',
sample_names_I='',
gc_score_I = 0.15,
include_nan_I = False,
raise_I = False):
'''Join rows between omniExpressExome, annotations, and annotations auxillary
INPUT:
experiment_ids_I = string or list
sample_names_I = string or list
gc_score_I = float
include_nan_I = boolean
OUTPUT:
data_O = output specified by output_O and dictColumn_I
"GC_Score" != (SELECT CAST ('NaN' AS FLOAT))
'''
data_O = [];
try:
subquery1 = '''SELECT "data_stage01_resequencing_omniExpressExome"."experiment_id",
"data_stage01_resequencing_omniExpressExome"."sample_name",
"data_stage01_resequencing_omniExpressExome"."SNP_Name",
"data_stage01_resequencing_omniExpressExome"."Sample_ID",
"data_stage01_resequencing_omniExpressExome"."Allele1_Top",
"data_stage01_resequencing_omniExpressExome"."Allele2_Top",
"data_stage01_resequencing_omniExpressExome"."GC_Score"
'''
subquery1+= '''FROM "data_stage01_resequencing_omniExpressExome" '''
subquery1+= '''WHERE "data_stage01_resequencing_omniExpressExome"."used_" '''
if experiment_ids_I:
cmd_q = '''AND "data_stage01_resequencing_omniExpressExome"."experiment_id" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(experiment_ids_I));
subquery1+=cmd_q;
if sample_names_I:
cmd_q = '''AND "data_stage01_resequencing_omniExpressExome"."sample_name" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(sample_names_I));
subquery1+=cmd_q;
if gc_score_I:
cmd_q = '''AND "data_stage01_resequencing_omniExpressExome"."GC_Score" < %s ''' %(gc_score_I);
subquery1+=cmd_q;
if not include_nan_I:
cmd_q = '''AND "data_stage01_resequencing_omniExpressExome"."GC_Score" != (SELECT CAST ('NaN' AS FLOAT)) ''';
subquery1+=cmd_q;
subquery1+= '''ORDER BY "data_stage01_resequencing_omniExpressExome"."experiment_id" ASC, '''
subquery1+= '''"data_stage01_resequencing_omniExpressExome"."sample_name" ASC '''
query_cmd = '''SELECT "subquery1"."experiment_id",
"subquery1"."sample_name",
"subquery1"."SNP_Name",
"subquery1"."Sample_ID",
"subquery1"."Allele1_Top",
"subquery1"."Allele2_Top",
"subquery1"."GC_Score",
"data_stage01_resequencing_omniExpressExome_annotations"."IlmnID",
"data_stage01_resequencing_omniExpressExome_annotations"."Name",
"data_stage01_resequencing_omniExpressExome_annotations"."IlmnStrand",
"data_stage01_resequencing_omniExpressExome_annotations"."SNP",
"data_stage01_resequencing_omniExpressExome_annotations"."AddressA_ID",
"data_stage01_resequencing_omniExpressExome_annotations"."AlleleA_ProbeSeq",
"data_stage01_resequencing_omniExpressExome_annotations"."AddressB_ID",
"data_stage01_resequencing_omniExpressExome_annotations"."AlleleB_ProbeSeq",
"data_stage01_resequencing_omniExpressExome_annotations"."GenomeBuild",
"data_stage01_resequencing_omniExpressExome_annotations"."Chr",
"data_stage01_resequencing_omniExpressExome_annotations"."MapInfo",
"data_stage01_resequencing_omniExpressExome_annotations"."Ploidy",
"data_stage01_resequencing_omniExpressExome_annotations"."Species",
"data_stage01_resequencing_omniExpressExome_annotations"."Source",
"data_stage01_resequencing_omniExpressExome_annotations"."SourceVersion",
"data_stage01_resequencing_omniExpressExome_annotations"."SourceStrand",
"data_stage01_resequencing_omniExpressExome_annotations"."SourceSeq",
"data_stage01_resequencing_omniExpressExome_annotations"."TopGenomicSeq",
"data_stage01_resequencing_omniExpressExome_annotations"."BeadSetID",
"data_stage01_resequencing_omniExpressExome_annotations"."Exp_Clusters",
"data_stage01_resequencing_omniExpressExome_annotations"."RefStrand" '''
query_cmd+= '''FROM "data_stage01_resequencing_omniExpressExome_annotations", (%s) AS subquery1 ''' %subquery1
query_cmd+= '''WHERE "data_stage01_resequencing_omniExpressExome_annotations"."Name" = "subquery1"."SNP_Name" '''
query_cmd+= '''ORDER BY "subquery1"."experiment_id" ASC, '''
query_cmd+= '''"subquery1"."sample_name" ASC, '''
query_cmd+= '''"data_stage01_resequencing_omniExpressExome_annotations"."Chr" ASC, '''
query_cmd+= '''"data_stage01_resequencing_omniExpressExome_annotations"."MapInfo" ASC '''
query_cmd+= ';';
query_select = sbaas_base_query_select(self.session,self.engine,self.settings)
data_O = [dict(d) for d in query_select.execute_select(query_cmd)];
except Exception as e:
if raise_I: raise;
else: print(e);
return data_O;
#SPLIT 1:
def get_rows_experimentID_dataStage01ResequencingOmniExpressExome(self,experiment_id_I):
'''Query rows that are used from the analysis'''
try:
data = self.session.query(data_stage01_resequencing_omniExpressExome).filter(
data_stage01_resequencing_omniExpressExome.experiment_id.like(experiment_id_I),
data_stage01_resequencing_omniExpressExome.used_.is_(True)).all();
rows_O = [d.__repr__dict__() for d in data];
return rows_O;
except SQLAlchemyError as e:
print(e);
def get_rows_experimentID_dataStage01ResequencingOmniExpressExomeFiltered(self,experiment_id_I):
'''Query rows that are used from the analysis'''
try:
data = self.session.query(data_stage01_resequencing_omniExpressExomeFiltered).filter(
data_stage01_resequencing_omniExpressExomeFiltered.experiment_id.like(experiment_id_I),
data_stage01_resequencing_omniExpressExomeFiltered.used_.is_(True)).all();
rows_O = [d.__repr__dict__() for d in data];
return rows_O;
except SQLAlchemyError as e:
print(e);
def get_rows_experimentIDsAndSampleNames_dataStage01ResequencingOmniExpressExomeFiltered(
self,
experiment_id_I='',
sample_names_I='',
chromosomes_I='',
raise_I = False):
'''Query rows that are used from the analysis
INPUT:
experiment_ids_I = string or list
sample_names_I = string or list
chromosomes_I = string or list
OUTPUT:
data_O = output specified by output_O and dictColumn_I
'''
data_O=[]
try:
query_cmd = '''SELECT "data_stage01_resequencing_omniExpressExomeFiltered"."id",
"data_stage01_resequencing_omniExpressExomeFiltered"."experiment_id",
"data_stage01_resequencing_omniExpressExomeFiltered"."sample_name",
"data_stage01_resequencing_omniExpressExomeFiltered"."SNP_Name",
"data_stage01_resequencing_omniExpressExomeFiltered"."GenomeBuild",
"data_stage01_resequencing_omniExpressExomeFiltered"."Chr",
"data_stage01_resequencing_omniExpressExomeFiltered"."MapInfo",
"data_stage01_resequencing_omniExpressExomeFiltered"."mutation_data",
"data_stage01_resequencing_omniExpressExomeFiltered"."used_",
"data_stage01_resequencing_omniExpressExomeFiltered"."comment_" '''
query_cmd+= '''FROM "data_stage01_resequencing_omniExpressExomeFiltered" '''
#query_cmd+= '''WHERE "data_stage01_resequencing_omniExpressExomeFiltered"."used_" '''
query_cmd+= '''WHERE '''
if experiment_id_I:
cmd_q = '''"data_stage01_resequencing_omniExpressExomeFiltered"."experiment_id" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(experiment_id_I));
#cmd_q = '''AND "data_stage01_resequencing_omniExpressExomeFiltered"."experiment_id" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(experiment_id_I));
query_cmd+=cmd_q;
if sample_names_I:
cmd_q = '''AND "data_stage01_resequencing_omniExpressExomeFiltered"."sample_name" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(sample_names_I));
query_cmd+=cmd_q;
if chromosomes_I:
cmd_q = '''AND "data_stage01_resequencing_omniExpressExomeFiltered"."Chr" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(chromosomes_I));
query_cmd+=cmd_q;
#query_cmd+= '''AND "data_stage01_resequencing_omniExpressExomeFiltered"."Chr"='1' ''' #testing only
query_cmd+= '''ORDER BY experiment_id ASC, sample_name ASC,
"Chr" ASC, "MapInfo" ASC '''
query_cmd+= ';';
query_select = sbaas_base_query_select(self.session,self.engine,self.settings)
data_O = [dict(d) for d in query_select.execute_select(query_cmd)];
except Exception as e:
if raise_I: raise;
else: print(e);
return data_O;
| 55.452316
| 172
| 0.640116
| 1,853
| 20,351
| 6.577982
| 0.090664
| 0.197965
| 0.233981
| 0.32636
| 0.795389
| 0.74961
| 0.680203
| 0.658134
| 0.604233
| 0.566905
| 0
| 0.024322
| 0.258562
| 20,351
| 367
| 173
| 55.452316
| 0.783418
| 0.022308
| 0
| 0.583893
| 0
| 0
| 0.548262
| 0.40491
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.013423
| null | null | 0.020134
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b19d76322efd5396f2bee733bc2f33110d63a231
| 12,514
|
py
|
Python
|
tests/test_hfdata.py
|
usda-ars-ussl/fluxpart
|
c519f319a9b09df785a30f2bcc23abbd1e052921
|
[
"CC0-1.0"
] | 33
|
2016-08-22T10:26:16.000Z
|
2022-02-15T08:02:41.000Z
|
tests/test_hfdata.py
|
usda-ars-ussl/fluxpart
|
c519f319a9b09df785a30f2bcc23abbd1e052921
|
[
"CC0-1.0"
] | 8
|
2018-04-10T14:49:07.000Z
|
2021-05-27T17:31:59.000Z
|
tests/test_hfdata.py
|
usda-ars-ussl/fluxpart
|
c519f319a9b09df785a30f2bcc23abbd1e052921
|
[
"CC0-1.0"
] | 11
|
2016-11-29T10:57:07.000Z
|
2021-01-15T02:55:20.000Z
|
import io
import os
import numpy as np
import numpy.testing as npt
import pandas as pd
from fluxpart.hfdata import HFData, HFDataSource
from fluxpart.fluxpart import _converter_func, _peektime
TESTDIR = os.path.dirname(os.path.realpath(__file__))
DATADIR = os.path.join(TESTDIR, "data")
def test_hfdata_reader():
toy_data = (
"foobar baz\n"
"'asdf',0,2,3,4,5,6,7,9,0\n"
"'asdf',1,2,3,4,5,6,7,9,0\n"
"'asdf',2,2,3,4,5,6,7,9,1\n"
"'asdf',3,2,3,4,5,6,,9,0\n"
"'asdf',4,2,3,4,5,6,7,9,0\n"
"# foo\n"
"'asdf',5,2,3,4,5,6,7,9,0\n"
"'asdf',6,2,3,4,5,6,7,xxx,0\n"
"'asdf',7,???,3,4,5,6,7,9,0\n"
"'asdf',8,2,3,4,5,6,7,9,0\n"
"'asdf',9,2,3,4,5,6,7,9,0\n"
"'asdf',10,2,3,4,5,6,7,9,0\n"
"'asdf',11,-2,3,4,5,6,7,9,0\n"
)
source = HFDataSource(
files=[io.BytesIO(toy_data.encode())],
filetype="csv",
cols=(1, 2, 3, 6, 7, 4, 5),
comment="#",
skiprows=1,
na_values="???",
converters={"q": _converter_func(10., 0)},
flags=(9, 0),
delimiter=",",
)
toy = HFData(next(source.reader(interval=None)))
toy.cleanse(rd_tol=0.1, ad_tol=2, bounds={"v": (0, np.inf)})
npt.assert_allclose(toy.dataframe["u"], [4, 5, 6])
npt.assert_allclose(toy.dataframe["v"], 3 * [2])
npt.assert_allclose(toy.dataframe["w"], 3 * [3])
npt.assert_allclose(toy.dataframe["q"], 3 * [70])
npt.assert_allclose(toy.dataframe["c"], 3 * [6])
npt.assert_allclose(toy.dataframe["T"], 3 * [4])
npt.assert_allclose(toy.dataframe["P"], 3 * [5])
# missing time series data
toy_data = (
"foobar baz\n"
"'2013-01-14 08:21:48' ,0,2,3,4,5,6,7,9,0\n"
"'2013-01-14 08:21:48.1',1,2,3,4,5,6,7,9,0\n"
"'2013-01-14 08:21:48.2',2,2,3,4,5,6,7,9,1\n"
"'2013-01-14 08:21:48.3',3,2,3,4,5,6,,9,0\n"
"'2013-01-14 08:21:48.4',4,2,3,4,5,6,7,9,0\n"
"'2013-01-14 09:20:10' ,5,2,3,4,5,6,7,9,0\n"
"'2013-01-14 09:20:10.1',6,2,3,4,5,6,7,9,0\n"
"'2013-01-14 09:20:10.2',7,2,3,4,5,6,7,9,0\n"
"'2013-01-14 09:20:10.3',8,2,3,4,5,6,7,9,0\n"
"'2013-01-14 09:20:10.4',9,2,3,4,5,6,7,9,0\n"
"'2013-01-14 09:20:10.5',10,2,3,4,5,6,7,9,0\n"
"'2013-01-14 09:20:10.6',11,-2,3,4,5,6,7,9,0\n"
)
source = HFDataSource(
files=[io.BytesIO(toy_data.encode())],
filetype="csv",
cols=(1, 2, 3, 6, 7, 4, 5),
time_col=0,
skiprows=1,
converters={"q": _converter_func(10., 0)},
delimiter=",",
)
toy = HFData(next(source.reader(interval=None)))
toy.cleanse(rd_tol=0.1, ad_tol=2)
assert int(toy.dataframe.iloc[0]["u"]) == 6
assert int(toy.dataframe.iloc[-1]["u"]) == 11
# toa5
cols = (2, 3, 4, 5, 6, 7, 8)
fname = os.path.join(DATADIR, "TOA5_6843.ts_Above_2012_06_07_1300.dat")
kws = dict(
skiprows=4,
converters={
"T": _converter_func(1, 273.15),
"q": _converter_func(1e-3, 0),
"c": _converter_func(1e-6, 0),
"P": _converter_func(1e3, 0),
},
flags=[(9, 0)],
)
source = HFDataSource(files=[fname], filetype="csv", cols=cols, **kws)
reader = source.reader(interval=None)
data = HFData(next(reader))
assert_1300_read(data)
kws = dict(
skiprows=4,
time_col=0,
converters={
"T": _converter_func(1, 273.15),
"q": _converter_func(1e-3, 0),
"c": _converter_func(1e-6, 0),
"P": _converter_func(1e3, 0),
},
flags=(9, 0),
)
source = HFDataSource(files=[fname], filetype="csv", cols=cols, **kws)
reader = source.reader(interval="15min")
data = HFData(next(reader))
assert data.dataframe.index[0] == pd.to_datetime("2012-06-07 13:00:00.05")
assert data.dataframe.index[-1] == pd.to_datetime("2012-06-07 13:14:59.95")
assert_1300_interval_read(data)
fnames = [
os.path.join(DATADIR, "TOA5_6843.ts_Above_2012_06_07_1245.dat"),
os.path.join(DATADIR, "TOA5_6843.ts_Above_2012_06_07_1300.dat"),
]
source = HFDataSource(fnames, filetype="csv", cols=cols, **kws)
reader = source.reader(interval="10min")
for cnt, df in enumerate(reader):
assert_10min_chunk_read(cnt, df)
reader = source.reader(interval="15min")
for cnt, df in enumerate(reader):
assert_15min_chunk_read(cnt, df)
reader = source.reader(interval="20min")
for cnt, df in enumerate(reader):
assert_20min_chunk_read(cnt, df)
# tob
fname = os.path.join(DATADIR, "testing.tob")
kws = dict(
filetype="tob1",
cols=(3, 4, 5, 6, 7, 8, 9),
converters={
"T": _converter_func(1, 273.15),
"q": _converter_func(1e-3, 0),
"c": _converter_func(1e-6, 0),
"P": _converter_func(1e3, 0),
},
)
source = HFDataSource(files=[fname], **kws)
data = HFData(next(source.reader(interval="30min")))
assert_tob_read(data)
datetime = _peektime([fname], **kws)[0]
assert datetime.round(freq="100ms") == pd.to_datetime(
"2017-08-03 00:00:00.1"
)
reader = source.reader(interval="5s")
for cnt, df in enumerate(reader):
assert_5S_tobchunk_read(cnt, df)
reader = source.reader(interval="1min")
for cnt, df in enumerate(reader):
assert_1min_tobchunk_read(cnt, df)
# ghg tab delimited data file
fname = os.path.join(DATADIR, "head_ghg.data")
kws = {
'filetype': 'csv',
'sep': '\t',
'cols': (11, 12, 13, 7, 8, 9, 10),
'time_col': [5, 6],
'skiprows': 8,
'na_values': 'NAN',
'to_datetime_kws': {"format": "%Y-%m-%d %H:%M:%S:%f"}
}
source = HFDataSource(files=[fname], **kws)
data = HFData(next(source.reader(interval=None)))
assert_ghg_read(data)
def assert_1300_read(data):
npt.assert_allclose(data["u"].iloc[0], 0.468)
npt.assert_allclose(data["v"].iloc[0], -0.9077501)
npt.assert_allclose(data["w"].iloc[0], 0.1785)
npt.assert_allclose(data["c"].iloc[0], 659.7584e-6)
npt.assert_allclose(data["q"].iloc[0], 9.530561e-3)
npt.assert_allclose(data["T"].iloc[0], 28.52527 + 273.15)
npt.assert_allclose(data["P"].iloc[0], 100.1938e3)
npt.assert_allclose(data["u"].iloc[-1], 1.3675)
npt.assert_allclose(data["v"].iloc[-1], -0.75475)
npt.assert_allclose(data["w"].iloc[-1], -0.1775)
npt.assert_allclose(data["c"].iloc[-1], 658.2624e-6)
npt.assert_allclose(data["q"].iloc[-1], 9.404386e-3)
npt.assert_allclose(data["T"].iloc[-1], 28.35199 + 273.15)
npt.assert_allclose(data["P"].iloc[-1], 100.1938e3)
npt.assert_allclose(data["u"].mean(), 1.43621, atol=1e-4)
npt.assert_allclose(data["v"].mean(), -0.634818, atol=1e-4)
npt.assert_allclose(data["w"].mean(), 0.0619483, atol=1e-4)
npt.assert_allclose(data["c"].mean(), 659.052e-6, atol=1e-9)
npt.assert_allclose(data["q"].mean(), 9.56732e-3, atol=1e-7)
npt.assert_allclose(data["T"].mean(), 28.5431 + 273.15, atol=1e-4)
npt.assert_allclose(data["P"].mean(), 100.179e3, atol=1e0)
def assert_1300_interval_read(data):
"""last line in file is not read because belongs to next interval"""
npt.assert_allclose(data["u"].iloc[0], 0.468)
npt.assert_allclose(data["v"].iloc[0], -0.9077501)
npt.assert_allclose(data["w"].iloc[0], 0.1785)
npt.assert_allclose(data["c"].iloc[0], 659.7584e-6)
npt.assert_allclose(data["q"].iloc[0], 9.530561e-3)
npt.assert_allclose(data["T"].iloc[0], 28.52527 + 273.15)
npt.assert_allclose(data["P"].iloc[0], 100.1938e3)
npt.assert_allclose(data["u"].iloc[-1], 1.20050)
npt.assert_allclose(data["v"].iloc[-1], -0.775)
npt.assert_allclose(data["w"].iloc[-1], -0.1610)
npt.assert_allclose(data["c"].iloc[-1], 658.3277e-6)
npt.assert_allclose(data["q"].iloc[-1], 9.394239e-3)
npt.assert_allclose(data["T"].iloc[-1], 28.35199 + 273.15)
npt.assert_allclose(data["P"].iloc[-1], 100.1678e3)
npt.assert_allclose(data["u"].mean(), 1.43621, atol=1e-4)
npt.assert_allclose(data["v"].mean(), -0.634818, atol=1e-4)
npt.assert_allclose(data["w"].mean(), 0.0619483, atol=1e-4)
npt.assert_allclose(data["c"].mean(), 659.052e-6, atol=1e-9)
npt.assert_allclose(data["q"].mean(), 9.56732e-3, atol=1e-7)
npt.assert_allclose(data["T"].mean(), 28.5431 + 273.15, atol=1e-4)
npt.assert_allclose(data["P"].mean(), 100.179e3, atol=1e0)
def assert_tob_read(data):
npt.assert_allclose(data["u"].iloc[0], -2.57175016)
npt.assert_allclose(data["v"].iloc[0], 1.6450001)
npt.assert_allclose(data["w"].iloc[0], -0.12725)
npt.assert_allclose(data["c"].iloc[0], 612.54150391e-6)
npt.assert_allclose(data["q"].iloc[0], 13.11471748e-3)
npt.assert_allclose(data["T"].iloc[0], 23.29580116 + 273.15)
npt.assert_allclose(data["P"].iloc[0], 85.04070282e3)
npt.assert_allclose(data["u"].iloc[-1], -2.4402502)
npt.assert_allclose(data["v"].iloc[-1], 1.5402501)
npt.assert_allclose(data["w"].iloc[-1], -0.11375)
npt.assert_allclose(data["c"].iloc[-1], 615.627e-6)
npt.assert_allclose(data["q"].iloc[-1], 13.200139e-3)
npt.assert_allclose(data["T"].iloc[-1], 23.015879 + 273.15)
npt.assert_allclose(data["P"].iloc[-1], 85.0407e3)
assert data.dataframe.index[0].round(freq="100ms") == pd.to_datetime(
"2017-08-03 00:00:00.1"
)
assert data.dataframe.index[-1].round(freq="100ms") == pd.to_datetime(
"2017-08-03 00:00:14.4"
)
def assert_ghg_read(data):
npt.assert_allclose(data["u"].iloc[0], 0.752103)
npt.assert_allclose(data["v"].iloc[0], -0.591885)
npt.assert_allclose(data["w"].iloc[0], 0.199506)
npt.assert_allclose(data["c"].iloc[0], 15.1805)
npt.assert_allclose(data["q"].iloc[0], 817.852)
npt.assert_allclose(data["T"].iloc[0], 27.6979)
npt.assert_allclose(data["P"].iloc[0], 96.9071)
npt.assert_allclose(data["u"].iloc[-1], 0.781697)
npt.assert_allclose(data["v"].iloc[-1], -0.542902)
npt.assert_allclose(data["w"].iloc[-1], 0.195424)
npt.assert_allclose(data["c"].iloc[-1], 15.1822)
npt.assert_allclose(data["q"].iloc[-1], 817.922)
npt.assert_allclose(data["T"].iloc[-1], 27.6979)
npt.assert_allclose(data["P"].iloc[-1], 96.9071)
assert data.dataframe.index[0].round(freq="100ms") == pd.to_datetime(
"2017-08-02 11:30:00"
)
assert data.dataframe.index[-1].round(freq="100ms") == pd.to_datetime(
"2017-08-02 11:30:00.1"
)
def assert_10min_chunk_read(ichunk, df):
starts = [
"2012-06-07 12:45:00.05",
"2012-06-07 12:50:00",
"2012-06-07 13:00:00",
"2012-06-07 13:10:00",
]
stops = [
"2012-06-07 12:49:59.95",
"2012-06-07 12:59:59.95",
"2012-06-07 13:09:59.95",
"2012-06-07 13:15:00",
]
assert df.index[0] == pd.to_datetime(starts[ichunk])
assert df.index[-1] == pd.to_datetime(stops[ichunk])
def assert_15min_chunk_read(ichunk, df):
starts = [
"2012-06-07 12:45:00.05",
"2012-06-07 13:00:00",
"2012-06-07 13:15:00",
]
stops = [
"2012-06-07 12:59:59.95",
"2012-06-07 13:14:59.95",
"2012-06-07 13:15:00",
]
assert df.index[0] == pd.to_datetime(starts[ichunk])
assert df.index[-1] == pd.to_datetime(stops[ichunk])
def assert_20min_chunk_read(ichunk, df):
starts = ["2012-06-07 12:45:00.05", "2012-06-07 13:00:00"]
stops = ["2012-06-07 12:59:59.95", "2012-06-07 13:15:00"]
assert df.index[0] == pd.to_datetime(starts[ichunk])
assert df.index[-1] == pd.to_datetime(stops[ichunk])
def assert_5S_tobchunk_read(cnt, df):
starts = [
"2017-08-03 00:00:00.1",
"2017-08-03 00:00:05",
"2017-08-03 00:00:10",
]
stops = [
"2017-08-03 00:00:04.9",
"2017-08-03 00:00:09.9",
"2017-08-03 00:00:14.4",
]
assert df.index[0].round(freq="100ms") == pd.to_datetime(starts[cnt])
assert df.index[-1].round(freq="100ms") == pd.to_datetime(stops[cnt])
def assert_1min_tobchunk_read(cnt, df):
starts = ["2017-08-03 00:00:00.1"]
stops = ["2017-08-03 00:00:14.4"]
assert df.index[0].round(freq="100ms") == pd.to_datetime(starts[cnt])
assert df.index[-1].round(freq="100ms") == pd.to_datetime(stops[cnt])
if __name__ == "__main__":
test_hfdata_reader()
| 35.95977
| 79
| 0.5895
| 2,109
| 12,514
| 3.391655
| 0.113798
| 0.096882
| 0.183
| 0.205508
| 0.831819
| 0.76765
| 0.735216
| 0.649378
| 0.566056
| 0.531246
| 0
| 0.177443
| 0.197938
| 12,514
| 347
| 80
| 36.063401
| 0.53522
| 0.009989
| 0
| 0.397306
| 0
| 0.074074
| 0.160016
| 0.060097
| 0
| 0
| 0
| 0
| 0.383838
| 1
| 0.03367
| false
| 0
| 0.023569
| 0
| 0.057239
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
490ed0fd88b53c21775d44b5f53a1e0df1d343c7
| 190
|
py
|
Python
|
fromzeroton/library_fun.py
|
vierth/fromZerotoN
|
d4041744c985496ed8bddd6f0000d49028f7899a
|
[
"Apache-2.0"
] | 7
|
2020-12-15T23:31:52.000Z
|
2021-01-20T19:55:48.000Z
|
fromzeroton/library_fun.py
|
vierth/fromZerotoN
|
d4041744c985496ed8bddd6f0000d49028f7899a
|
[
"Apache-2.0"
] | null | null | null |
fromzeroton/library_fun.py
|
vierth/fromZerotoN
|
d4041744c985496ed8bddd6f0000d49028f7899a
|
[
"Apache-2.0"
] | 6
|
2021-01-16T19:27:00.000Z
|
2021-12-19T16:39:02.000Z
|
# some good libraries
import math, random, re, os, json
import nltk, matplotlib, pandas, numpy
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.pyplot as plt
| 27.142857
| 39
| 0.768421
| 29
| 190
| 5.034483
| 0.62069
| 0.109589
| 0.150685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 190
| 7
| 40
| 27.142857
| 0.941935
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
49265f1d8f87f50b9ef9f188cf443f72a5859b7c
| 126
|
py
|
Python
|
mqttrpc/__init__.py
|
litnimax/python-mqttrpc
|
21dc8bc0754bade9430ae3db2cab4583d904413f
|
[
"MIT"
] | 10
|
2018-03-11T14:09:46.000Z
|
2022-01-30T20:35:01.000Z
|
mqttrpc/__init__.py
|
litnimax/python-mqttrpc
|
21dc8bc0754bade9430ae3db2cab4583d904413f
|
[
"MIT"
] | 1
|
2019-11-06T17:08:30.000Z
|
2019-11-07T09:10:10.000Z
|
mqttrpc/__init__.py
|
litnimax/python-mqttrpc
|
21dc8bc0754bade9430ae3db2cab4583d904413f
|
[
"MIT"
] | 2
|
2018-04-01T19:22:40.000Z
|
2019-11-06T15:41:56.000Z
|
from .mqttrpc import MQTTRPC, dispatcher
from .rpcproxy import RPCProxy, OdooRPCProxy
from hbmqtt.client import mqtt_connected
| 42
| 44
| 0.857143
| 16
| 126
| 6.6875
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103175
| 126
| 3
| 45
| 42
| 0.946903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4928ecbc8cf1183615b5139ef457e02a613434c6
| 58
|
py
|
Python
|
gc_bias/__init__.py
|
nahanoo/gc_bias
|
fa21b18c6cbaeba441d89bba8bd98b93e39ce937
|
[
"MIT"
] | null | null | null |
gc_bias/__init__.py
|
nahanoo/gc_bias
|
fa21b18c6cbaeba441d89bba8bd98b93e39ce937
|
[
"MIT"
] | null | null | null |
gc_bias/__init__.py
|
nahanoo/gc_bias
|
fa21b18c6cbaeba441d89bba8bd98b93e39ce937
|
[
"MIT"
] | null | null | null |
from .gc_coverage import GC
from .plotting import Plotting
| 29
| 30
| 0.844828
| 9
| 58
| 5.333333
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 58
| 2
| 30
| 29
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
492ecf2b595e48ceb94fca123d004f048aecdf63
| 79
|
py
|
Python
|
flask_table/compat.py
|
nullptrT/flask_table
|
d4577307bf3b790fb1d91238019577beb477ee4a
|
[
"BSD-3-Clause"
] | 215
|
2015-01-09T12:18:19.000Z
|
2022-01-31T00:18:29.000Z
|
flask_table/compat.py
|
nullptrT/flask_table
|
d4577307bf3b790fb1d91238019577beb477ee4a
|
[
"BSD-3-Clause"
] | 93
|
2015-02-03T22:39:02.000Z
|
2022-01-26T04:12:16.000Z
|
flask_table/compat.py
|
nullptrT/flask_table
|
d4577307bf3b790fb1d91238019577beb477ee4a
|
[
"BSD-3-Clause"
] | 48
|
2015-04-29T09:23:34.000Z
|
2022-01-21T13:50:39.000Z
|
def with_metaclass(meta, base=object):
return meta("NewBase", (base,), {})
| 26.333333
| 39
| 0.658228
| 10
| 79
| 5.1
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139241
| 79
| 2
| 40
| 39.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.088608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
49444f1f207d953ef259e316a6862cdbc8bd1456
| 155
|
py
|
Python
|
src/pytools/api/__init__.py
|
BCG-Gamma/pytools
|
d7be703e0665917cd75b671564d5c0163f13b77b
|
[
"Apache-2.0"
] | 17
|
2021-01-12T08:07:11.000Z
|
2022-03-03T22:59:04.000Z
|
src/pytools/api/__init__.py
|
BCG-Gamma/pytools
|
d7be703e0665917cd75b671564d5c0163f13b77b
|
[
"Apache-2.0"
] | 10
|
2021-01-08T17:04:39.000Z
|
2022-01-18T13:21:52.000Z
|
src/pytools/api/__init__.py
|
BCG-Gamma/pytools
|
d7be703e0665917cd75b671564d5c0163f13b77b
|
[
"Apache-2.0"
] | 1
|
2021-11-06T00:16:43.000Z
|
2021-11-06T00:16:43.000Z
|
"""
Basic tools for API development, supporting documentation, deprecation,
and run-time validation.
"""
from ._api import *
from ._doc_validator import *
| 22.142857
| 71
| 0.767742
| 19
| 155
| 6.105263
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135484
| 155
| 6
| 72
| 25.833333
| 0.865672
| 0.619355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
49568dfc6682b5664dca247e52d487278363e327
| 87
|
py
|
Python
|
faster_rcnn/da_head/build.py
|
Shuntw6096/swda-detectron2
|
6ca9cfc2487b979f38adff5cd9138f233a6578cd
|
[
"MIT"
] | 4
|
2021-09-16T10:29:35.000Z
|
2022-03-13T14:28:23.000Z
|
faster_rcnn/da_head/build.py
|
Shuntw6096/swda-detectron2
|
6ca9cfc2487b979f38adff5cd9138f233a6578cd
|
[
"MIT"
] | 2
|
2021-10-07T00:24:06.000Z
|
2022-03-04T10:17:16.000Z
|
faster_rcnn/da_head/build.py
|
Shuntw6096/swda-detectron2
|
6ca9cfc2487b979f38adff5cd9138f233a6578cd
|
[
"MIT"
] | null | null | null |
from detectron2.utils.registry import Registry
DA_HEAD_REGISTRY = Registry("DA_HEAD")
| 21.75
| 46
| 0.827586
| 12
| 87
| 5.75
| 0.583333
| 0.289855
| 0.405797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.091954
| 87
| 3
| 47
| 29
| 0.860759
| 0
| 0
| 0
| 0
| 0
| 0.08046
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
496fa00dc04f89e3ac9410efd5be93c554cb3c0b
| 207
|
py
|
Python
|
src/xsd_frontend/admin.py
|
minyiky/xSACdb
|
8c407e9a9da196750a66ad53613ad67c8c56e1c3
|
[
"MIT"
] | 2
|
2017-08-14T14:40:17.000Z
|
2019-02-07T13:10:23.000Z
|
src/xsd_frontend/admin.py
|
minyiky/xSACdb
|
8c407e9a9da196750a66ad53613ad67c8c56e1c3
|
[
"MIT"
] | 19
|
2016-02-07T18:02:53.000Z
|
2019-11-03T17:48:13.000Z
|
src/xsd_frontend/admin.py
|
minyiky/xSACdb
|
8c407e9a9da196750a66ad53613ad67c8c56e1c3
|
[
"MIT"
] | 4
|
2015-10-19T17:24:35.000Z
|
2021-05-12T07:30:32.000Z
|
from django.contrib import admin
from django.conf import settings
admin.site.site_title = '[xSACdb:{club_name}]'.format(club_name=settings.CLUB.get('name'))
admin.site.site_header = admin.site.site_title
| 25.875
| 90
| 0.78744
| 32
| 207
| 4.9375
| 0.46875
| 0.170886
| 0.246835
| 0.227848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082126
| 207
| 7
| 91
| 29.571429
| 0.831579
| 0
| 0
| 0
| 0
| 0
| 0.117073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4975c42d952a779b3fcb19093ebfe0bf41d11da2
| 359
|
py
|
Python
|
tests/test_api_api.py
|
SomeoneLixin/api-dock
|
3958a3a3286ae7f8802df9aba5ece2908ca4361e
|
[
"MIT"
] | 4
|
2018-05-07T15:39:17.000Z
|
2019-07-03T21:28:10.000Z
|
tests/test_api_api.py
|
SomeoneLixin/api-dock
|
3958a3a3286ae7f8802df9aba5ece2908ca4361e
|
[
"MIT"
] | 4
|
2020-09-05T10:57:19.000Z
|
2021-05-09T16:01:22.000Z
|
tests/test_api_api.py
|
SomeoneLixin/api-dock
|
3958a3a3286ae7f8802df9aba5ece2908ca4361e
|
[
"MIT"
] | 1
|
2018-05-09T07:57:03.000Z
|
2018-05-09T07:57:03.000Z
|
import unittest
from tests.base import BaseTestCase
class ApiApiTestCase(BaseTestCase):
def test_get_apis_list(self):
pass
def test_new_api(self):
pass
def test_show_api(self):
pass
def test_edit_api(self):
pass
def test_delete_api(self):
pass
if __name__ == '__main__':
unittest.main()
| 14.958333
| 35
| 0.64624
| 46
| 359
| 4.630435
| 0.5
| 0.164319
| 0.206573
| 0.28169
| 0.253521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278552
| 359
| 23
| 36
| 15.608696
| 0.822394
| 0
| 0
| 0.333333
| 0
| 0
| 0.022284
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.133333
| 0
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
77073387dc748d0eeb7d8ca0097d8f9d7d5ef5e6
| 143
|
py
|
Python
|
administration/src/services/csrf_service.py
|
shivamvku/flakrfid
|
559198d23907eea6e87f38fac1c5fb5c2b6fbca8
|
[
"MIT"
] | null | null | null |
administration/src/services/csrf_service.py
|
shivamvku/flakrfid
|
559198d23907eea6e87f38fac1c5fb5c2b6fbca8
|
[
"MIT"
] | 1
|
2019-05-13T16:19:36.000Z
|
2019-05-19T11:21:22.000Z
|
administration/src/services/csrf_service.py
|
shivamvku/flakrfid
|
559198d23907eea6e87f38fac1c5fb5c2b6fbca8
|
[
"MIT"
] | null | null | null |
import random
from werkzeug.security import gen_salt
def generate_csrf():
length = random.randint(1, 11) * 3
return gen_salt(length)
| 17.875
| 38
| 0.734266
| 21
| 143
| 4.857143
| 0.761905
| 0.137255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034188
| 0.181818
| 143
| 7
| 39
| 20.428571
| 0.837607
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
772229deaf0d68226422bc6fd149c39931dcb8d3
| 136
|
py
|
Python
|
Darlington/phase1/python Basic 1/day 14 solution/qtn6.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Darlington/phase1/python Basic 1/day 14 solution/qtn6.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Darlington/phase1/python Basic 1/day 14 solution/qtn6.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
#program to get the actual module object for a given object.
from inspect import getmodule
from math import sqrt
print(getmodule(sqrt))
| 27.2
| 60
| 0.808824
| 22
| 136
| 5
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 136
| 4
| 61
| 34
| 0.948276
| 0.433824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
773b1ab9684d2b559a76b0ce2381c18dc4cc45c2
| 95
|
py
|
Python
|
readability_baseline/utils/utils.py
|
SharvilN/Common-Readability-Prize
|
c92f8b3456610a092027fb1a287541cbecab410b
|
[
"MIT"
] | null | null | null |
readability_baseline/utils/utils.py
|
SharvilN/Common-Readability-Prize
|
c92f8b3456610a092027fb1a287541cbecab410b
|
[
"MIT"
] | null | null | null |
readability_baseline/utils/utils.py
|
SharvilN/Common-Readability-Prize
|
c92f8b3456610a092027fb1a287541cbecab410b
|
[
"MIT"
] | null | null | null |
def get_col_names(prefix: str, count: int):
return [f"{prefix}_{i}" for i in range(count)]
| 31.666667
| 50
| 0.673684
| 17
| 95
| 3.588235
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 95
| 3
| 50
| 31.666667
| 0.7625
| 0
| 0
| 0
| 0
| 0
| 0.126316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
77406b64b1ef7217814ec3a364ae5dfae74ee117
| 818
|
py
|
Python
|
data/test.py
|
Belousk/YalLessonOrm
|
f74f9088d0710a4ab29b8a2758aba61fd9d539fb
|
[
"BSD-3-Clause"
] | null | null | null |
data/test.py
|
Belousk/YalLessonOrm
|
f74f9088d0710a4ab29b8a2758aba61fd9d539fb
|
[
"BSD-3-Clause"
] | null | null | null |
data/test.py
|
Belousk/YalLessonOrm
|
f74f9088d0710a4ab29b8a2758aba61fd9d539fb
|
[
"BSD-3-Clause"
] | null | null | null |
from requests import get, post, delete, put
print(get('http://localhost:5000/api/v2/jobs/2').json())
print(get('http://localhost:5000/api/v2/jobs/666').json())
print(get('http://localhost:5000/api/v2/jobs/g').json())
print(post('http://localhost:5000/api/v2/jobs').json()) # Empty request
print(post('http://localhost:5000/api/v2/jobs',
json={'job': 'Loh'}).json()) # Bad request
print(post('http://localhost:5000/api/v2/jobs',
json={'job': 'Learn English', 'work_size': 10, 'collaborators': '1, 2, 3', 'is_finished': False,
'team_leader': 2}).json()) # Success request
print(delete('http://localhost:5000/api/v2/jobs/999').json())
print(delete('http://localhost:5000/api/v2/jobs/9').json())
# print(put('http://localhost:5000/api/v2/jobs/2', json={'job': 'S**k d**k'}))
| 43.052632
| 107
| 0.638142
| 125
| 818
| 4.152
| 0.336
| 0.225434
| 0.294798
| 0.346821
| 0.680154
| 0.680154
| 0.680154
| 0.680154
| 0.410405
| 0.188825
| 0
| 0.082531
| 0.111247
| 818
| 18
| 108
| 45.444444
| 0.631362
| 0.144254
| 0
| 0.166667
| 0
| 0
| 0.505036
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.083333
| 0
| 0.083333
| 0.666667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
775f09a5879b55ec31c0cc774613efc4f38632f6
| 73
|
py
|
Python
|
celery_once/backends/__init__.py
|
alkymi-io/celery-once
|
adcb91d393569703b560c863305746b2e0e12d28
|
[
"BSD-2-Clause"
] | 388
|
2017-06-02T00:10:24.000Z
|
2022-03-31T12:49:09.000Z
|
celery_once/backends/__init__.py
|
alkymi-io/celery-once
|
adcb91d393569703b560c863305746b2e0e12d28
|
[
"BSD-2-Clause"
] | 82
|
2017-06-05T11:16:15.000Z
|
2021-11-25T12:59:03.000Z
|
celery_once/backends/__init__.py
|
alkymi-io/celery-once
|
adcb91d393569703b560c863305746b2e0e12d28
|
[
"BSD-2-Clause"
] | 63
|
2017-05-12T17:39:09.000Z
|
2021-11-23T19:37:53.000Z
|
# -*- coding: utf-8 -*-
from .file import File
from .redis import Redis
| 14.6
| 24
| 0.657534
| 11
| 73
| 4.363636
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0.191781
| 73
| 4
| 25
| 18.25
| 0.79661
| 0.287671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
62068a73ecb1a30d961062c33c8247d3a5bb9b4d
| 167
|
py
|
Python
|
tests/fixtures/yarden.py
|
NickleDave/conbirt
|
71db6c6fd68dfef1bdbdcfacd8b2a16b21b86089
|
[
"BSD-3-Clause"
] | null | null | null |
tests/fixtures/yarden.py
|
NickleDave/conbirt
|
71db6c6fd68dfef1bdbdcfacd8b2a16b21b86089
|
[
"BSD-3-Clause"
] | 3
|
2018-12-16T17:57:22.000Z
|
2018-12-16T20:12:33.000Z
|
tests/fixtures/yarden.py
|
NickleDave/conbirt
|
71db6c6fd68dfef1bdbdcfacd8b2a16b21b86089
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
@pytest.fixture
def yarden_annot_mat(test_data_root):
return test_data_root / 'audio_wav_annot_yarden' / 'llb16_annotation_May_2019_alexa_4TF.mat'
| 23.857143
| 96
| 0.826347
| 26
| 167
| 4.769231
| 0.730769
| 0.129032
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046667
| 0.101796
| 167
| 6
| 97
| 27.833333
| 0.78
| 0
| 0
| 0
| 0
| 0
| 0.365269
| 0.365269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
62084cf2ae86efe4ebb5487651b01743d5514853
| 119
|
py
|
Python
|
katas/kyu_7/number_star_ladder.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/kyu_7/number_star_ladder.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/kyu_7/number_star_ladder.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
OUTPUT = '1{}{}'.format
def pattern(n):
return '\n'.join(OUTPUT('*' * a, a + 1 if a else '') for a in xrange(n))
| 19.833333
| 76
| 0.537815
| 21
| 119
| 3.047619
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 0.218487
| 119
| 5
| 77
| 23.8
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.067227
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
6215b304af26eb64665d0c01ee70077520af8402
| 455
|
py
|
Python
|
SocialRobotCustom/python/social_bot/envs/__init__.py
|
jesbu1/h-baselines
|
f6f775bb18de22527f2d01d73bd733ed2e435ba3
|
[
"MIT"
] | 74
|
2019-02-26T03:05:18.000Z
|
2022-02-11T06:56:08.000Z
|
SocialRobotCustom/python/social_bot/envs/__init__.py
|
jesbu1/h-baselines
|
f6f775bb18de22527f2d01d73bd733ed2e435ba3
|
[
"MIT"
] | 66
|
2019-02-09T23:52:38.000Z
|
2022-01-16T01:27:52.000Z
|
SocialRobotCustom/python/social_bot/envs/__init__.py
|
jesbu1/h-baselines
|
f6f775bb18de22527f2d01d73bd733ed2e435ba3
|
[
"MIT"
] | 25
|
2019-03-08T02:40:19.000Z
|
2021-08-12T02:43:41.000Z
|
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
from .simple_navigation import SimpleNavigation
from .simple_navigation import SimpleNavigationDiscreteAction
from .simple_navigation import SimpleNavigationLanguage
from .simple_navigation import SimpleNavigationSelfStatesLanguage
from .cartpole import CartPole
from .pr2 import Pr2Gripper
from .play_ground import PlayGround
from .icub_walk import ICubWalk
from .icub_walk import ICubWalkPID
| 37.916667
| 65
| 0.868132
| 51
| 455
| 7.607843
| 0.509804
| 0.103093
| 0.206186
| 0.268041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01467
| 0.101099
| 455
| 11
| 66
| 41.363636
| 0.933985
| 0.125275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
623f56c8f0c91bc3784a19af70943223e5e8fcd7
| 10,100
|
py
|
Python
|
src/footprint/azext_footprint/tests/latest/test_footprint_scenario.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/footprint/azext_footprint/tests/latest/test_footprint_scenario.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/footprint/azext_footprint/tests/latest/test_footprint_scenario.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import os
from azure.cli.testsdk import ScenarioTest
from .. import try_manual, raise_if, calc_coverage
from azure.cli.testsdk import ResourceGroupPreparer
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
@try_manual
def setup(test, rg):
pass
# EXAMPLE: /profiles/put/Create or update a Footprint profile.
@try_manual
def step__profiles_put_create_or_update_a_footprint_profile_(test, rg):
test.cmd('az footprint profile create '
'--location "westus2" '
'--measurement-count 3 '
'--start-delay-ms 5000 '
'--tags key1="value1" key2="value2" '
'--name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /profiles/get/Get the details of a Footprint profile.
@try_manual
def step__profiles_get_get_the_details_of_a_footprint_profile_(test, rg):
test.cmd('az footprint profile show '
'--name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /profiles/get/List all Footprint profiles under a Resource Group.
@try_manual
def step__profiles_get_list_all_footprint_profiles_under_a_resource_group_(test, rg):
test.cmd('az footprint profile list '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /profiles/get/List all Footprint profiles under a subscription.
@try_manual
def step__profiles_get_list_all_footprint_profiles_under_a_subscription_(test, rg):
test.cmd('az footprint profile list '
'-g ""',
checks=[])
# EXAMPLE: /profiles/patch/Update a Footprint profile.
@try_manual
def step__profiles_patch_update_a_footprint_profile_(test, rg):
test.cmd('az footprint profile update '
'--tags key1="value1" key2="value2" '
'--name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /experiments/put/Create or update an experiment.
@try_manual
def step__experiments_put_create_or_update_an_experiment_(test, rg):
test.cmd('az footprint experiment create '
'--name "{myExperiment}" '
'--description "An experiment description." '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /experiments/get/Get the details of an experiment.
@try_manual
def step__experiments_get_get_the_details_of_an_experiment_(test, rg):
test.cmd('az footprint experiment show '
'--name "{myExperiment}" '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /experiments/get/List all experiments under a Footprint profile.
@try_manual
def step__experiments_get_list_all_experiments_under_a_footprint_profile_(test, rg):
test.cmd('az footprint experiment list '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /measurementEndpoints/put/Create or update a measurement endpoint.
@try_manual
def step__measurementendpoints_put_create_or_update_a_measurement_endpoint_(test, rg):
test.cmd('az footprint measurement-endpoint create '
'--name "{myMeasurementEndpoint}" '
'--endpoint "www.contoso.com" '
'--experiment-id "664cdec4f07d4e1083c9b3969ee2c49b" '
'--measurement-type 2 '
'--object-path "/trans.gif" '
'--weight 10 '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /measurementEndpoints/get/Get the details of a measurement endpoint.
@try_manual
def step__measurementendpoints_get_get_the_details_of_a_measurement_endpoint_(test, rg):
test.cmd('az footprint measurement-endpoint show '
'--name "{myMeasurementEndpoint}" '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /measurementEndpoints/get/List all the measurement endpoints under a Footprint profile.
@try_manual
def step__measurementendpoints_get_list_all_the_measurement_endpoints_under_a_footprint_profile_(test, rg):
test.cmd('az footprint measurement-endpoint list '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /measurementEndpointConditions/put/Create or update a measurement endpoint condition.
@try_manual
def step__measurementendpointconditions_put_create_or_update_a_measurement_endpoint_condition_(test, rg):
test.cmd('az footprint measurement-endpoint-condition create '
'--name "condition0" '
'--endpoint-name "{myMeasurementEndpoint}" '
'--constant "Edge-Prod-WST" '
'--operator "MatchValueIgnoreCasing" '
'--variable "X-FD-EdgeEnvironment" '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /measurementEndpointConditions/get/Get the details of a measurement endpoint condition.
@try_manual
def step__measurementendpointconditions_get_get_the_details_of_a_measurement_endpoint_condition_(test, rg):
test.cmd('az footprint measurement-endpoint-condition show '
'--name "condition0" '
'--endpoint-name "{myMeasurementEndpoint}" '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /measurementEndpointConditions/get/List all conditions under a measurement endpoint.
@try_manual
def step__measurementendpointconditions_get_list_all_conditions_under_a_measurement_endpoint_(test, rg):
test.cmd('az footprint measurement-endpoint-condition list '
'--endpoint-name "{myMeasurementEndpoint}" '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /measurementEndpointConditions/delete/Delete a measurement endpoint condition.
@try_manual
def step__measurementendpointconditions_delete_delete_a_measurement_endpoint_condition_(test, rg):
test.cmd('az footprint measurement-endpoint-condition delete -y '
'--name "condition0" '
'--endpoint-name "{myMeasurementEndpoint}" '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /experiments/delete/Delete an experiment.
@try_manual
def step__experiments_delete_delete_an_experiment_(test, rg):
test.cmd('az footprint experiment delete -y '
'--name "{myExperiment}" '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /measurementEndpoints/delete/Delete a measurement endpoint.
@try_manual
def step__measurementendpoints_delete_delete_a_measurement_endpoint_(test, rg):
test.cmd('az footprint measurement-endpoint delete -y '
'--name "{myMeasurementEndpoint}" '
'--profile-name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
# EXAMPLE: /profiles/delete/Delete a Footprint profile.
@try_manual
def step__profiles_delete_delete_a_footprint_profile_(test, rg):
test.cmd('az footprint profile delete -y '
'--name "{myProfile}" '
'--resource-group "{rg}"',
checks=[])
@try_manual
def cleanup(test, rg):
pass
@try_manual
def call_scenario(test, rg):
setup(test, rg)
step__profiles_put_create_or_update_a_footprint_profile_(test, rg)
step__profiles_get_get_the_details_of_a_footprint_profile_(test, rg)
step__profiles_get_list_all_footprint_profiles_under_a_resource_group_(test, rg)
step__profiles_get_list_all_footprint_profiles_under_a_subscription_(test, rg)
step__profiles_patch_update_a_footprint_profile_(test, rg)
step__experiments_put_create_or_update_an_experiment_(test, rg)
step__experiments_get_get_the_details_of_an_experiment_(test, rg)
step__experiments_get_list_all_experiments_under_a_footprint_profile_(test, rg)
step__measurementendpoints_put_create_or_update_a_measurement_endpoint_(test, rg)
step__measurementendpoints_get_get_the_details_of_a_measurement_endpoint_(test, rg)
step__measurementendpoints_get_list_all_the_measurement_endpoints_under_a_footprint_profile_(test, rg)
step__measurementendpointconditions_put_create_or_update_a_measurement_endpoint_condition_(test, rg)
step__measurementendpointconditions_get_get_the_details_of_a_measurement_endpoint_condition_(test, rg)
step__measurementendpointconditions_get_list_all_conditions_under_a_measurement_endpoint_(test, rg)
step__measurementendpointconditions_delete_delete_a_measurement_endpoint_condition_(test, rg)
step__experiments_delete_delete_an_experiment_(test, rg)
step__measurementendpoints_delete_delete_a_measurement_endpoint_(test, rg)
step__profiles_delete_delete_a_footprint_profile_(test, rg)
cleanup(test, rg)
@try_manual
class FootprintMonitoringManagementClientScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='clitestfootprint_rgName'[:7], key='rg', parameter_name='rg')
def test_footprint(self, rg):
self.kwargs.update({
'myProfile': self.create_random_name(prefix='fpProfile1'[:5], length=10),
'myMeasurementEndpoint': self.create_random_name(prefix='endpoint1'[:4], length=9),
'myExperiment': self.create_random_name(prefix='fpExp1'[:2], length=9),
})
call_scenario(self, rg)
calc_coverage(__file__)
raise_if()
| 39.453125
| 107
| 0.683267
| 1,103
| 10,100
| 5.883046
| 0.142339
| 0.03791
| 0.038835
| 0.044383
| 0.815996
| 0.787178
| 0.776237
| 0.759131
| 0.701803
| 0.564186
| 0
| 0.006152
| 0.195248
| 10,100
| 255
| 108
| 39.607843
| 0.7922
| 0.173762
| 0
| 0.516854
| 0
| 0
| 0.287604
| 0.050379
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123596
| false
| 0.011236
| 0.022472
| 0
| 0.151685
| 0.207865
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
655d33076ebef068d4efd9a513cb0ebe370e7044
| 103
|
py
|
Python
|
kytos/utils/exceptions.py
|
josemauro/kytos-utils
|
03014ab0960fae31892dda2759de1372d50a1b2d
|
[
"MIT"
] | 3
|
2017-05-11T18:18:47.000Z
|
2021-11-16T11:37:01.000Z
|
kytos/utils/exceptions.py
|
josemauro/kytos-utils
|
03014ab0960fae31892dda2759de1372d50a1b2d
|
[
"MIT"
] | 154
|
2017-02-02T21:48:08.000Z
|
2021-04-29T20:19:58.000Z
|
kytos/utils/exceptions.py
|
josemauro/kytos-utils
|
03014ab0960fae31892dda2759de1372d50a1b2d
|
[
"MIT"
] | 32
|
2017-02-17T18:15:15.000Z
|
2022-02-16T23:28:15.000Z
|
"""Kytos utils exceptions."""
class KytosException(Exception):
"""Kytos utils main exception."""
| 17.166667
| 37
| 0.68932
| 10
| 103
| 7.1
| 0.7
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145631
| 103
| 5
| 38
| 20.6
| 0.806818
| 0.495146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6578b6bc1c78e27ace6f5fcd0ef737987c0d1355
| 2,199
|
py
|
Python
|
amrMusic.py
|
HeyImTG/Amazing-Mirror-Randomizer
|
11d846308b7ab3b40cfe1365676eb57a1c938e3c
|
[
"MIT"
] | 12
|
2019-06-05T16:02:13.000Z
|
2022-02-09T22:19:52.000Z
|
amrMusic.py
|
Unovamata/Amazing-Mirror-Randomizer
|
4504cdffda39561322af452f2465766aa021bbf9
|
[
"MIT"
] | 1
|
2020-09-20T22:30:26.000Z
|
2020-09-20T22:30:26.000Z
|
amrMusic.py
|
Unovamata/Amazing-Mirror-Randomizer
|
4504cdffda39561322af452f2465766aa021bbf9
|
[
"MIT"
] | 3
|
2020-08-06T17:21:07.000Z
|
2021-11-07T22:20:02.000Z
|
#This script randomizes the game's music.
#Music is randomized into two different groups: looped and non-looped. You don't want to get the victory dance theme as your Olive Ocean music...
import sys
import os
import random
#==================================================
def writeMusicToROM(romFile,musicList,musicAdd):
for x in range(len(musicList)):
romFile.seek(musicAdd[x])
romFile.write(musicList[x].to_bytes(4,'big'))
def randomizeMusic(romFile,randomMode):
print("Randomizing music...")
#First shuffle the looped music.
if randomMode == "Shuffle Music":
musicLoopedList = [1422774280, 4107259912, 2362888200, 1155321864, 1357172744, 3975008264, 1090244616, 3422802184, 3288977672, 3692286216, 3223703816, 137548040, 808898824, 2957168904, 3830436104, 677302536, 1752027400, 679334152, 3766735112, 1820971272, 277795080, 1620168968, 1016320264, 3969437960, 2158153992, 3770011912, 2830749960, 1891422472]
else:
musicLoopedList = [1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608, 1080537608]
musicLoopedAdd = [11686888, 11686896, 11686904, 11686912, 11686920, 11686928, 11686936, 11686944, 11686952, 11686960, 11686968, 11686976, 11686984, 11686992, 11687000, 11687008, 11687016, 11687024, 11687048, 11687072, 11687080, 11687088, 11687096, 11687104, 11687112, 11687160, 11687176, 11687184]
random.shuffle(musicLoopedList)
writeMusicToROM(romFile,musicLoopedList,musicLoopedAdd)
#Then shuffle the non-looped music.
if randomMode == "Shuffle Music":
musicNonloopedList = [2222903560, 1618989320, 75944200, 948424968, 614846728, 78106888, 3634876680, 2024329480, 1219088648, 3770077448]
musicNonloopedAdd = [11687032, 11687040, 11687056, 11687064, 11687120, 11687128, 11687136, 11687144, 11687152, 11687168]
random.shuffle(musicNonloopedList)
writeMusicToROM(romFile,musicNonloopedList,musicNonloopedAdd)
#==================================================
| 70.935484
| 357
| 0.752615
| 212
| 2,199
| 7.801887
| 0.650943
| 0.326481
| 0.471584
| 0.604595
| 0.211608
| 0.211608
| 0.169287
| 0.169287
| 0.169287
| 0.169287
| 0
| 0.489482
| 0.113688
| 2,199
| 31
| 358
| 70.935484
| 0.359159
| 0.158709
| 0
| 0.095238
| 0
| 0
| 0.026997
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.142857
| 0
| 0.238095
| 0.047619
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
02d96e7f5aa396219fa8bdaf698d3c1f0d761d2c
| 126
|
py
|
Python
|
0x04-python-more_data_structures/101-square_matrix_map.py
|
BennettDixon/holbertonschool-higher_level_programming
|
3fbcd5e009548aab5539ce8610b4113f005964c4
|
[
"MIT"
] | 1
|
2022-02-07T12:13:18.000Z
|
2022-02-07T12:13:18.000Z
|
0x04-python-more_data_structures/101-square_matrix_map.py
|
BennettDixon/holbertonschool-higher_level_programming
|
3fbcd5e009548aab5539ce8610b4113f005964c4
|
[
"MIT"
] | null | null | null |
0x04-python-more_data_structures/101-square_matrix_map.py
|
BennettDixon/holbertonschool-higher_level_programming
|
3fbcd5e009548aab5539ce8610b4113f005964c4
|
[
"MIT"
] | 1
|
2021-12-06T18:15:54.000Z
|
2021-12-06T18:15:54.000Z
|
#!/usr/bin/python3
def square_matrix_map(matrix=[]):
return list(map(lambda row: list(map(lambda x: x**2, row)), matrix))
| 31.5
| 72
| 0.68254
| 21
| 126
| 4
| 0.619048
| 0.166667
| 0.309524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018018
| 0.119048
| 126
| 3
| 73
| 42
| 0.738739
| 0.134921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
02df598643dbcd254f20b9cf858ff8710fe2eee5
| 22
|
py
|
Python
|
Python/Hello India.py
|
kennethsequeira/Hello-world
|
464227bc7d9778a4a2a4044fe415a629003ea77f
|
[
"MIT"
] | 2
|
2020-05-14T16:13:14.000Z
|
2020-05-14T16:13:19.000Z
|
Python/Hello India.py
|
kennethsequeira/Hello-world
|
464227bc7d9778a4a2a4044fe415a629003ea77f
|
[
"MIT"
] | 5
|
2018-10-27T16:54:11.000Z
|
2018-10-27T17:04:37.000Z
|
Python/Hello India.py
|
kennethsequeira/Hello-world
|
464227bc7d9778a4a2a4044fe415a629003ea77f
|
[
"MIT"
] | 7
|
2019-09-11T07:17:32.000Z
|
2019-09-25T12:23:52.000Z
|
print("Hello! India")
| 11
| 21
| 0.681818
| 3
| 22
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 22
| 1
| 22
| 22
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
02efca95b611d2d83746f5e03b7e4417b7a4fb51
| 2,337
|
py
|
Python
|
examples/gaussian/functions/model.py
|
lsawade/nnodes
|
0f5caf672da138104039ed85a224cc4067e95da1
|
[
"MIT"
] | null | null | null |
examples/gaussian/functions/model.py
|
lsawade/nnodes
|
0f5caf672da138104039ed85a224cc4067e95da1
|
[
"MIT"
] | null | null | null |
examples/gaussian/functions/model.py
|
lsawade/nnodes
|
0f5caf672da138104039ed85a224cc4067e95da1
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
def write_model(m, outdir, it, ls=None):
"""Takes in model vector, modldirectory, iteration and linesearch number
and write model to modl directory.
Parameters
----------
m : ndarray
modelvector
modldir : str
model directory
it : int
iteration number
ls : int, optional
linesearch number
"""
# Get dir
modldir = os.path.join(outdir, 'modl')
# Create filename that contains both iteration and linesearch number
if ls is not None:
fname = f"m_it{it:05d}_ls{ls:05d}.npy"
else:
fname = f"m_it{it:05d}.npy"
file = os.path.join(modldir, fname)
np.save(file, m)
def read_model(outdir, it, ls=None):
"""Reads model vector
Parameters
----------
modldir : str
model directory
it : int
iteration number
ls : int, optional
linesearch number
Returns
-------
ndarray
model vector
"""
# Get dir
modldir = os.path.join(outdir, 'modl')
if ls is not None:
fname = f"m_it{it:05d}_ls{ls:05d}.npy"
else:
fname = f"m_it{it:05d}.npy"
file = os.path.join(modldir, fname)
m = np.load(file)
return m
def write_scaling(s, outdir):
# Write scaling to metadir
metadir = os.path.join(outdir, 'meta')
# Create filename that contains both iteration and linesearch number
file = os.path.join(metadir, "scaling.npy")
np.save(file, s)
def read_scaling(outdir):
# Write scaling to metadir
metadir = os.path.join(outdir, 'meta')
# Create filename that contains both iteration and linesearch number
file = os.path.join(metadir, "scaling.npy")
# Return the scaling vector
return np.load(file)
def write_names(mnames, outdir):
# Write scaling to metadir
metadir = os.path.join(outdir, 'meta')
# Create filename that contains both iteration and linesearch number
file = os.path.join(metadir, "model_names.npy")
np.save(file, mnames)
def read_names(outdir):
# Write scaling to metadir
metadir = os.path.join(outdir, 'meta')
# Create filename that contains both iteration and linesearch number
file = os.path.join(metadir, "model_names.npy")
# Return the scaling vector
return np.load(file)
| 21.638889
| 76
| 0.627728
| 315
| 2,337
| 4.612698
| 0.196825
| 0.049553
| 0.082588
| 0.115623
| 0.751549
| 0.751549
| 0.751549
| 0.751549
| 0.706125
| 0.613902
| 0
| 0.007001
| 0.266581
| 2,337
| 107
| 77
| 21.841122
| 0.840723
| 0.407788
| 0
| 0.628571
| 0
| 0
| 0.128674
| 0.042891
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171429
| false
| 0
| 0.057143
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b83a2e9fa37fff5a7f2a2f7c7fe38e6b79bf6c91
| 3,594
|
py
|
Python
|
libica/openapi/libtes/__init__.py
|
umccr-illumina/libica
|
916d27eea499f29bee590268b84208effb0cc576
|
[
"MIT"
] | null | null | null |
libica/openapi/libtes/__init__.py
|
umccr-illumina/libica
|
916d27eea499f29bee590268b84208effb0cc576
|
[
"MIT"
] | 4
|
2021-11-15T10:47:51.000Z
|
2022-02-22T04:43:20.000Z
|
libica/openapi/libtes/__init__.py
|
umccr-illumina/libica
|
916d27eea499f29bee590268b84208effb0cc576
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
Task Execution Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "1.0.0"
# import apis into sdk package
from libica.openapi.libtes.api.task_runs_api import TaskRunsApi
from libica.openapi.libtes.api.task_versions_api import TaskVersionsApi
from libica.openapi.libtes.api.tasks_api import TasksApi
# import ApiClient
from libica.openapi.libtes.api_client import ApiClient
from libica.openapi.libtes.configuration import Configuration
from libica.openapi.libtes.exceptions import OpenApiException
from libica.openapi.libtes.exceptions import ApiTypeError
from libica.openapi.libtes.exceptions import ApiValueError
from libica.openapi.libtes.exceptions import ApiKeyError
from libica.openapi.libtes.exceptions import ApiException
# import models into sdk package
from libica.openapi.libtes.models.container_state import ContainerState
from libica.openapi.libtes.models.container_state_running import ContainerStateRunning
from libica.openapi.libtes.models.container_state_terminated import ContainerStateTerminated
from libica.openapi.libtes.models.container_state_waiting import ContainerStateWaiting
from libica.openapi.libtes.models.container_status import ContainerStatus
from libica.openapi.libtes.models.create_task_request import CreateTaskRequest
from libica.openapi.libtes.models.create_task_run_request import CreateTaskRunRequest
from libica.openapi.libtes.models.create_task_version_request import CreateTaskVersionRequest
from libica.openapi.libtes.models.credentials import Credentials
from libica.openapi.libtes.models.environment import Environment
from libica.openapi.libtes.models.error_response import ErrorResponse
from libica.openapi.libtes.models.execution import Execution
from libica.openapi.libtes.models.heartbeat_task_run_request import HeartbeatTaskRunRequest
from libica.openapi.libtes.models.image import Image
from libica.openapi.libtes.models.input_mount_mapping_with_creds import InputMountMappingWithCreds
from libica.openapi.libtes.models.input_stream_settings import InputStreamSettings
from libica.openapi.libtes.models.launch_task_request import LaunchTaskRequest
from libica.openapi.libtes.models.mount_mapping_with_creds import MountMappingWithCreds
from libica.openapi.libtes.models.resources import Resources
from libica.openapi.libtes.models.system_files import SystemFiles
from libica.openapi.libtes.models.task import Task
from libica.openapi.libtes.models.task_run import TaskRun
from libica.openapi.libtes.models.task_run_heartbeat import TaskRunHeartbeat
from libica.openapi.libtes.models.task_run_logs import TaskRunLogs
from libica.openapi.libtes.models.task_run_summary import TaskRunSummary
from libica.openapi.libtes.models.task_run_summary_paged_items import TaskRunSummaryPagedItems
from libica.openapi.libtes.models.task_summary import TaskSummary
from libica.openapi.libtes.models.task_summary_paged_items import TaskSummaryPagedItems
from libica.openapi.libtes.models.task_version import TaskVersion
from libica.openapi.libtes.models.task_version_summary import TaskVersionSummary
from libica.openapi.libtes.models.task_version_summary_paged_items import TaskVersionSummaryPagedItems
from libica.openapi.libtes.models.update_task_request import UpdateTaskRequest
from libica.openapi.libtes.models.update_task_version_request import UpdateTaskVersionRequest
| 53.641791
| 124
| 0.8734
| 459
| 3,594
| 6.67756
| 0.246187
| 0.140294
| 0.238499
| 0.322675
| 0.537031
| 0.415334
| 0.269494
| 0.058728
| 0
| 0
| 0
| 0.002694
| 0.070395
| 3,594
| 66
| 125
| 54.454545
| 0.914696
| 0.093211
| 0
| 0
| 1
| 0
| 0.001548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.977778
| 0
| 0.977778
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b85b3a7e2a373044c37fb54b06a34be6c6df6e8c
| 59
|
py
|
Python
|
lib/toute/utils/__init__.py
|
luiscastilho/toute
|
56207d3d92b8bc1066e115c285cf79f96be3e249
|
[
"MIT"
] | 3
|
2021-01-04T02:12:33.000Z
|
2021-09-22T14:41:39.000Z
|
lib/toute/utils/__init__.py
|
luiscastilho/toute
|
56207d3d92b8bc1066e115c285cf79f96be3e249
|
[
"MIT"
] | 6
|
2020-08-04T22:34:12.000Z
|
2020-10-10T12:40:14.000Z
|
lib/toute/utils/__init__.py
|
luiscastilho/toute
|
56207d3d92b8bc1066e115c285cf79f96be3e249
|
[
"MIT"
] | 1
|
2020-08-04T21:59:43.000Z
|
2020-08-04T21:59:43.000Z
|
from toute.utils.validation import validate_client # noqa
| 29.5
| 58
| 0.830508
| 8
| 59
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 59
| 1
| 59
| 59
| 0.923077
| 0.067797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b8602a0f1b52472cc7d96f538f65173b01180151
| 99
|
py
|
Python
|
day2/strings.py
|
anishLearnsToCode/python-workshop-4
|
d34c312d17c0d4194b2a1d269b8c0e2a56a04699
|
[
"MIT"
] | 4
|
2020-07-15T13:47:10.000Z
|
2021-09-15T20:28:50.000Z
|
day2/strings.py
|
anishLearnsToCode/python-workshop-4
|
d34c312d17c0d4194b2a1d269b8c0e2a56a04699
|
[
"MIT"
] | null | null | null |
day2/strings.py
|
anishLearnsToCode/python-workshop-4
|
d34c312d17c0d4194b2a1d269b8c0e2a56a04699
|
[
"MIT"
] | 2
|
2020-07-18T03:28:03.000Z
|
2020-11-01T00:56:02.000Z
|
"""
Strings are immutable
range object is immutable
tuple is immutable
lists are not immutable
"""
| 14.142857
| 25
| 0.777778
| 14
| 99
| 5.5
| 0.642857
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161616
| 99
| 7
| 26
| 14.142857
| 0.927711
| 0.919192
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b87e95a6fc0694a8580cdde5cbe6465d4027e862
| 1,107
|
py
|
Python
|
tests/test_mediainfo.py
|
ratoaq2/knowit
|
e7cc0d786fafdb9dba99b95a1cc95c02f84f0b5b
|
[
"MIT"
] | 21
|
2016-10-15T13:49:16.000Z
|
2021-06-14T14:42:57.000Z
|
tests/test_mediainfo.py
|
ratoaq2/knowit
|
e7cc0d786fafdb9dba99b95a1cc95c02f84f0b5b
|
[
"MIT"
] | 35
|
2016-11-18T17:08:38.000Z
|
2021-11-26T09:36:35.000Z
|
tests/test_mediainfo.py
|
ratoaq2/knowit
|
e7cc0d786fafdb9dba99b95a1cc95c02f84f0b5b
|
[
"MIT"
] | 5
|
2016-11-23T23:39:52.000Z
|
2021-02-27T19:18:27.000Z
|
import pytest
from tests import mediafiles
from knowit import know
from . import assert_expected, id_func
@pytest.mark.parametrize('media', mediafiles.get_json_media('mediainfo'), ids=id_func)
def test_mediainfo_provider(mediainfo, media, options):
# Given
mediainfo[media.video_path] = media.input_data
# When
actual = know(media.video_path, options)
# Then
assert_expected(media.expected_data, actual, options)
@pytest.mark.parametrize('media', mediafiles.get_real_media('mediainfo'), ids=id_func)
def test_mediainfo_provider_real_media(media, options):
# Given
options['provider'] = 'mediainfo'
# When
actual = know(media.video_path, options)
# Then
assert_expected(media.expected_data, actual, options)
@pytest.mark.parametrize('media', mediafiles.get_real_media('mediainfo'), ids=id_func)
def test_mediainfo_provider_real_media_cli(mediainfo_cli, media, options):
# Given
options['provider'] = 'mediainfo'
# When
actual = know(media.video_path, options)
# Then
assert_expected(media.expected_data, actual, options)
| 25.159091
| 86
| 0.738934
| 139
| 1,107
| 5.640288
| 0.230216
| 0.071429
| 0.071429
| 0.09949
| 0.784439
| 0.784439
| 0.734694
| 0.734694
| 0.734694
| 0.674745
| 0
| 0
| 0.153568
| 1,107
| 43
| 87
| 25.744186
| 0.836713
| 0.042457
| 0
| 0.526316
| 0
| 0
| 0.07245
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.157895
| false
| 0
| 0.210526
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b89960c5facd01658fe0f449edb10d527791d375
| 3,998
|
py
|
Python
|
notebooks/tests/test_utils.py
|
grchristensen/avpd
|
f7617844ae454a93825aa231e04c125cb4e58a20
|
[
"Apache-2.0"
] | null | null | null |
notebooks/tests/test_utils.py
|
grchristensen/avpd
|
f7617844ae454a93825aa231e04c125cb4e58a20
|
[
"Apache-2.0"
] | 9
|
2021-03-04T20:29:54.000Z
|
2021-03-31T22:03:51.000Z
|
notebooks/tests/test_utils.py
|
grchristensen/avpd
|
f7617844ae454a93825aa231e04c125cb4e58a20
|
[
"Apache-2.0"
] | 3
|
2021-01-30T02:19:07.000Z
|
2021-04-11T19:48:37.000Z
|
import pytest
import pandas as pd
from notebooks import utils
from tests import tutils
# TODO: Edge cases.
# TODO: Error conditons (good error reporting when not enough texts are available).
# class TestExtractAuthorTexts:
# example_dataframe = pd.DataFrame(
# [
# [1.0, 2.0],
# [2.0, 3.0],
# [1.0, 2.0],
# [3.0, 4.0],
# [5.0, 6.0],
# [1.0, 2.0],
# [3.0, 4.0],
# [1.0, 2.0],
# [3.0, 4.0],
# [1.0, 2.0],
# ],
# index=pd.MultiIndex.from_tuples(
# [
# (7, 0, 0),
# (7, 0, 1),
# (7, 1, 0),
# (7, 1, 1),
# (7, 1, 2),
# (13, 0, 0),
# (13, 0, 1),
# (13, 1, 0),
# (13, 1, 1),
# (13, 2, 0),
# ],
# names=["author", "text_id", "sentence_id"],
# ),
# )
#
# authors = [7, 13]
#
# dataframes = [
# example_dataframe.copy(),
# example_dataframe.copy(),
# ]
#
# text_counts = [1, 2]
#
# expected_author_text_sets = [
# pd.DataFrame(
# [
# [1.0, 2.0],
# [2.0, 3.0],
# ],
# index=pd.MultiIndex.from_tuples(
# [
# (7, 0, 0),
# (7, 0, 1),
# ],
# names=["author", "text_id", "sentence_id"],
# ),
# ),
# pd.DataFrame(
# [
# [1.0, 2.0],
# [3.0, 4.0],
# ],
# index=pd.MultiIndex.from_tuples(
# [
# (8, 0, 0),
# (8, 0, 1),
# ],
# names=["author", "text_id", "sentence_id"],
# ),
# ),
# ]
#
# expected_dataframes = [
# pd.DataFrame(
# [
# [1.0, 2.0],
# [3.0, 4.0],
# [5.0, 6.0],
# [1.0, 2.0],
# [3.0, 4.0],
# [1.0, 2.0],
# [3.0, 4.0],
# [1.0, 2.0],
# ],
# index=pd.MultiIndex.from_tuples(
# [
# (7, 1, 0),
# (7, 1, 1),
# (7, 1, 2),
# (13, 0, 0),
# (13, 0, 1),
# (13, 1, 0),
# (13, 1, 1),
# (13, 2, 0),
# ],
# names=["author", "text_id", "sentence_id"],
# ),
# ),
# pd.DataFrame(
# [
# [1.0, 2.0],
# [2.0, 3.0],
# [1.0, 2.0],
# [3.0, 4.0],
# [5.0, 6.0],
# [1.0, 2.0],
# [3.0, 4.0],
# [1.0, 2.0],
# ],
# index=pd.MultiIndex.from_tuples(
# [
# (7, 0, 0),
# (7, 0, 1),
# (7, 1, 0),
# (7, 1, 1),
# (7, 1, 2),
# (13, 1, 0),
# (13, 1, 1),
# (13, 2, 0),
# ],
# names=["author", "text_id", "sentence_id"],
# ),
# ),
# ]
#
# @pytest.mark.parametrize(
# "author, df, expected_author_texts, expected_dataframe",
# zip(authors, dataframes, expected_author_text_sets, expected_dataframes),
# )
# def test_extract_author_tests_should_return_author_texts(
# self, author, df, expected_author_texts, expected_dataframe
# ):
# author_texts, df = utils.extract_author_texts(author, df)
#
# assert tutils.npequal(author_texts, expected_author_texts)
# assert tutils.npequal(df, expected_dataframe)
| 28.15493
| 83
| 0.313657
| 387
| 3,998
| 3.118863
| 0.162791
| 0.034797
| 0.044739
| 0.04971
| 0.523612
| 0.523612
| 0.501243
| 0.428335
| 0.404308
| 0.404308
| 0
| 0.118196
| 0.517509
| 3,998
| 141
| 84
| 28.35461
| 0.507517
| 0.909955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007092
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b8a1fdd59db1f30bc4f3a569840daf7ce7d45399
| 260
|
py
|
Python
|
UserServer/login/login_authentication_utility.py
|
cxiaolong/XboxBackend
|
d42033c73d29a8ba43e359b32020fa55d1740da1
|
[
"MIT"
] | null | null | null |
UserServer/login/login_authentication_utility.py
|
cxiaolong/XboxBackend
|
d42033c73d29a8ba43e359b32020fa55d1740da1
|
[
"MIT"
] | null | null | null |
UserServer/login/login_authentication_utility.py
|
cxiaolong/XboxBackend
|
d42033c73d29a8ba43e359b32020fa55d1740da1
|
[
"MIT"
] | null | null | null |
from flask import abort, g
# def authentication(func):
# def wrapper(*args, **kwargs):
# if g.username == 'admin' and g.password == '123':
# return func(*args, **kwargs)
# else:
# abort(401)
#
# return wrapper
| 21.666667
| 59
| 0.530769
| 29
| 260
| 4.758621
| 0.689655
| 0.144928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0.319231
| 260
| 11
| 60
| 23.636364
| 0.745763
| 0.823077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b215ef73b41cc135eee5608b48ed1436b0b61cf3
| 1,015
|
py
|
Python
|
airSim_exp.py
|
FaisalAhmed0/gym-airsim
|
3cb7525699a0f1a12224701b0a5c1f172c0109f9
|
[
"MIT"
] | null | null | null |
airSim_exp.py
|
FaisalAhmed0/gym-airsim
|
3cb7525699a0f1a12224701b0a5c1f172c0109f9
|
[
"MIT"
] | null | null | null |
airSim_exp.py
|
FaisalAhmed0/gym-airsim
|
3cb7525699a0f1a12224701b0a5c1f172c0109f9
|
[
"MIT"
] | null | null | null |
from spinup import ppo_pytorch as ppo
import torch
import gym
import gym_airsim
# Check this, it may not work
def envFunc():
env = gym.make('airsim_gym-v0')
return env
# Setup the environment function and hyperparameters
env_fn = envFunc
ac_kwargs = dict(hidden_sizes=[64, 64])
logger_kwargs = dict(output_dir='/home/isra/Documents/airsim_exp_results_random_seed_0',exp_name='random_goals')
ppo(env_fn=envFunc, ac_kwargs=ac_kwargs, seed= 0 ,max_ep_len=500, steps_per_epoch=4000, epochs=250, logger_kwargs=logger_kwargs)
logger_kwargs = dict(output_dir='/home/isra/Documents/airsim_exp_results_random_seed_5',exp_name='random_goals')
ppo(env_fn=envFunc, ac_kwargs=ac_kwargs, seed= 5 ,max_ep_len=500, steps_per_epoch=4000, epochs=250, logger_kwargs=logger_kwargs)
logger_kwargs = dict(output_dir='/home/isra/Documents/airsim_exp_results_random_seed_10',exp_name='random_goals')
ppo(env_fn=envFunc, ac_kwargs=ac_kwargs, seed= 10 ,max_ep_len=500, steps_per_epoch=4000, epochs=250, logger_kwargs=logger_kwargs)
| 44.130435
| 129
| 0.812808
| 172
| 1,015
| 4.447674
| 0.343023
| 0.141176
| 0.117647
| 0.156863
| 0.739869
| 0.713725
| 0.713725
| 0.713725
| 0.713725
| 0.713725
| 0
| 0.046088
| 0.080788
| 1,015
| 22
| 130
| 46.136364
| 0.773848
| 0.077833
| 0
| 0
| 0
| 0
| 0.22449
| 0.171858
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.266667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b267af904e8fb18d54f84df93f35d2b6dff6d376
| 155
|
py
|
Python
|
scenarios/callback_show/executable.py
|
timgates42/balanced-python
|
1df86b45c36a97ec2e214480c6806c4df3c79860
|
[
"MIT"
] | 12
|
2015-04-12T06:18:33.000Z
|
2021-03-03T23:54:19.000Z
|
scenarios/callback_show/executable.py
|
timgates42/balanced-python
|
1df86b45c36a97ec2e214480c6806c4df3c79860
|
[
"MIT"
] | 1
|
2021-11-24T20:10:19.000Z
|
2021-11-24T20:10:19.000Z
|
scenarios/callback_show/executable.py
|
timgates42/balanced-python
|
1df86b45c36a97ec2e214480c6806c4df3c79860
|
[
"MIT"
] | 14
|
2015-03-23T17:52:06.000Z
|
2021-11-24T11:04:15.000Z
|
import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
callback = balanced.Callback.fetch('/callbacks/CB4a7Q7HSdJJgMVHwPsarIw8')
| 31
| 73
| 0.851613
| 13
| 155
| 10.153846
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 0.045161
| 155
| 5
| 73
| 31
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0.487179
| 0.487179
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b27e84d2b7da219fd21ca53a8cabaeb5ab3db0c1
| 1,183
|
py
|
Python
|
Inhyuk/Stack/python/2504.py
|
lee20h/ALPS_2020_Summer_Study
|
3656d5c6ab44f7f43c1f8c691e75352495c76adc
|
[
"Apache-2.0"
] | 1
|
2020-08-02T14:30:03.000Z
|
2020-08-02T14:30:03.000Z
|
Inhyuk/Stack/python/2504.py
|
alps-jbnu/ALPS_2020_Summer_Study
|
3656d5c6ab44f7f43c1f8c691e75352495c76adc
|
[
"Apache-2.0"
] | null | null | null |
Inhyuk/Stack/python/2504.py
|
alps-jbnu/ALPS_2020_Summer_Study
|
3656d5c6ab44f7f43c1f8c691e75352495c76adc
|
[
"Apache-2.0"
] | 2
|
2020-07-09T14:53:29.000Z
|
2020-07-17T10:39:00.000Z
|
A = list(input())
stack = []
flag = False
for ch in A:
if ch == '(':
stack.append('(')
elif ch == ')':
if not '(' in stack:
flag = True
break
elif stack[-1] == '(':
stack.pop()
stack.append(2)
elif isinstance(stack[-1], int):
temp = stack.pop()
while isinstance(stack[-1], int):
temp += stack.pop()
if stack[-1] == '(':
stack.pop()
stack.append(2*temp)
elif ch == '[':
stack.append('[')
elif ch == ']':
if not '[' in stack:
flag = True
break
elif stack[-1] == '[':
stack.pop()
stack.append(3)
elif isinstance(stack[-1], int):
temp = stack.pop()
while isinstance(stack[-1], int):
temp += stack.pop()
if stack[-1] == '[':
stack.pop()
stack.append(3*temp)
if flag or '(' in stack or '[' in stack:
print(0)
else:
ans = stack.pop()
while stack:
ans += stack.pop()
print(ans)
| 26.288889
| 46
| 0.393914
| 123
| 1,183
| 3.788618
| 0.227642
| 0.171674
| 0.094421
| 0.120172
| 0.742489
| 0.742489
| 0.742489
| 0.733906
| 0.733906
| 0.733906
| 0
| 0.019939
| 0.448859
| 1,183
| 44
| 47
| 26.886364
| 0.694785
| 0
| 0
| 0.372093
| 0
| 0
| 0.012291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a22d294c3753b08fb0f88c1f6e879785f30caf3d
| 88
|
py
|
Python
|
ontology/logistic_regression/sherlock/listify_circuits_k40_forward.py
|
ehbeam/neuro-knowledge-engine
|
9dc56ade0bbbd8d14f0660774f787c3f46d7e632
|
[
"MIT"
] | 15
|
2020-07-17T07:10:26.000Z
|
2022-02-18T05:51:45.000Z
|
ontology/neural_network/sherlock/listify_circuits_k40_forward.py
|
YifeiCAO/neuro-knowledge-engine
|
9dc56ade0bbbd8d14f0660774f787c3f46d7e632
|
[
"MIT"
] | 2
|
2022-01-14T09:10:12.000Z
|
2022-01-28T17:32:42.000Z
|
ontology/neural_network/sherlock/listify_circuits_k40_forward.py
|
YifeiCAO/neuro-knowledge-engine
|
9dc56ade0bbbd8d14f0660774f787c3f46d7e632
|
[
"MIT"
] | 4
|
2021-12-22T13:27:32.000Z
|
2022-02-18T05:51:47.000Z
|
#!/bin/python
import listify_circuits
listify_circuits.optimize_circuits(40, 'forward')
| 22
| 49
| 0.829545
| 11
| 88
| 6.363636
| 0.727273
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024096
| 0.056818
| 88
| 4
| 49
| 22
| 0.819277
| 0.136364
| 0
| 0
| 0
| 0
| 0.092105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a2b17fab0e9cf1ab19273979e9f4d2479f2f2f4a
| 112
|
py
|
Python
|
test.py
|
bigmingyu/Git_GUI_Tutorial
|
cd77b75c2ccf490591c04fbf851f501713054955
|
[
"MIT"
] | null | null | null |
test.py
|
bigmingyu/Git_GUI_Tutorial
|
cd77b75c2ccf490591c04fbf851f501713054955
|
[
"MIT"
] | null | null | null |
test.py
|
bigmingyu/Git_GUI_Tutorial
|
cd77b75c2ccf490591c04fbf851f501713054955
|
[
"MIT"
] | null | null | null |
a = 1
b = 2
print("Hello from GitHub")
print("Hello from Local")
print("This is nothing")
print("Time to merge")
| 18.666667
| 26
| 0.6875
| 20
| 112
| 3.85
| 0.75
| 0.25974
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021053
| 0.151786
| 112
| 6
| 27
| 18.666667
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.539823
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
a2b3ea4abe65dd887b38e6b25985cff2d6aab5a8
| 121
|
py
|
Python
|
LearningAgent.py
|
SanjoSolutions/cfr
|
a82eadfa9d6577daa0e10316fcbeda66dc17e8bc
|
[
"Unlicense"
] | null | null | null |
LearningAgent.py
|
SanjoSolutions/cfr
|
a82eadfa9d6577daa0e10316fcbeda66dc17e8bc
|
[
"Unlicense"
] | null | null | null |
LearningAgent.py
|
SanjoSolutions/cfr
|
a82eadfa9d6577daa0e10316fcbeda66dc17e8bc
|
[
"Unlicense"
] | null | null | null |
class LearningAgent:
def choose_action(self, game):
pass
def receive_result(self, result):
pass
| 17.285714
| 37
| 0.636364
| 14
| 121
| 5.357143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.289256
| 121
| 6
| 38
| 20.166667
| 0.872093
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.4
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
a2d03f650de353e8effd56dec23241850adb7a08
| 1,228
|
py
|
Python
|
Bot/Tests/DiscordUtilityTest.py
|
Pyruths/DiscordRPbot
|
87d4235fbec060f5bbcfabb6f2202913612ce28c
|
[
"MIT"
] | null | null | null |
Bot/Tests/DiscordUtilityTest.py
|
Pyruths/DiscordRPbot
|
87d4235fbec060f5bbcfabb6f2202913612ce28c
|
[
"MIT"
] | 19
|
2018-06-21T10:51:13.000Z
|
2018-06-28T01:38:38.000Z
|
Bot/Tests/DiscordUtilityTest.py
|
Pyruths/DiscordRPbot
|
87d4235fbec060f5bbcfabb6f2202913612ce28c
|
[
"MIT"
] | null | null | null |
import unittest
from DiscordUtility import *
"""
Check validity of utility functions used
"""
class DiscordUtilityTest(unittest.TestCase):
def test_mention_valid(self):
self.assertEqual("133546464495403008", valid_id("<@133546464495403008>"), "Did not accept valid value")
def test_mention_invalid(self):
self.assertEqual(None, valid_id("<@13354646449adsf403008>"), "Did not reject non-numeric")
def test_mention_not(self):
self.assertEqual(None, valid_id("<133546464495403008"), "Did not reject non-mention")
def test_mention_none(self):
self.assertEqual(None,valid_id(None),"Did not reject None")
def test_role_valid(self):
self.assertEqual("133546464495403008",valid_role("<@&133546464495403008>"),"Did not accept valid value")
def test_role_invalid(self):
self.assertEqual(None,valid_role("<@&13354646449adsf403008>"),"Did not reject non-numeric")
def test_role_not(self):
self.assertEqual(None,valid_role("<133546464495403008"),"Did not reject non-mention")
def test_role_none(self):
self.assertEqual(None,valid_role(None),"Did not reject None")
if __name__ == '__main__':
unittest.main()
| 40.933333
| 113
| 0.705212
| 146
| 1,228
| 5.712329
| 0.239726
| 0.067146
| 0.182254
| 0.165468
| 0.769784
| 0.714628
| 0.345324
| 0.345324
| 0
| 0
| 0
| 0.139626
| 0.171824
| 1,228
| 29
| 114
| 42.344828
| 0.680433
| 0
| 0
| 0
| 0
| 0
| 0.32
| 0.08
| 0
| 0
| 0
| 0
| 0.380952
| 1
| 0.380952
| false
| 0
| 0.095238
| 0
| 0.52381
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
a2d3bc094f52f8884c7cd9e05ffb09dacacea1ff
| 82
|
py
|
Python
|
MUDI_V1.00/scATACseq.py
|
yufanzhouonline/MUDI
|
f07a2b062bd8788574caace0d0d9137259dcf8e9
|
[
"MIT"
] | null | null | null |
MUDI_V1.00/scATACseq.py
|
yufanzhouonline/MUDI
|
f07a2b062bd8788574caace0d0d9137259dcf8e9
|
[
"MIT"
] | null | null | null |
MUDI_V1.00/scATACseq.py
|
yufanzhouonline/MUDI
|
f07a2b062bd8788574caace0d0d9137259dcf8e9
|
[
"MIT"
] | null | null | null |
from ATACseq import *
class scATACseq(ATACseq):
def load(self):
pass
| 13.666667
| 25
| 0.646341
| 10
| 82
| 5.3
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268293
| 82
| 5
| 26
| 16.4
| 0.883333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
a2dbb5550fb39d2a5614118322982b47eb97a26a
| 125
|
py
|
Python
|
books/admin.py
|
devmedtz/sogea
|
54cf257856cae451ad87e2396b8e44a34c0c6daf
|
[
"MIT"
] | 3
|
2021-04-08T12:50:40.000Z
|
2021-05-09T07:36:52.000Z
|
books/admin.py
|
devmedtz/sogea
|
54cf257856cae451ad87e2396b8e44a34c0c6daf
|
[
"MIT"
] | 5
|
2021-06-05T00:07:57.000Z
|
2022-03-12T01:00:20.000Z
|
books/admin.py
|
devmedtz/sogea
|
54cf257856cae451ad87e2396b8e44a34c0c6daf
|
[
"MIT"
] | 2
|
2021-04-07T15:17:46.000Z
|
2021-05-09T06:29:57.000Z
|
from django.contrib import admin
from .models import Category, Book
admin.site.register(Category)
admin.site.register(Book)
| 20.833333
| 34
| 0.816
| 18
| 125
| 5.666667
| 0.555556
| 0.176471
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096
| 125
| 5
| 35
| 25
| 0.902655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0c0c0fdc5a76c9e7c8b59d49e103318e0744b063
| 79
|
py
|
Python
|
locale/pot/api/utilities/_autosummary/pyvista-Text3D-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 4
|
2020-08-07T08:19:19.000Z
|
2020-12-04T09:51:11.000Z
|
locale/pot/api/utilities/_autosummary/pyvista-Text3D-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 19
|
2020-08-06T00:24:30.000Z
|
2022-03-30T19:22:24.000Z
|
locale/pot/api/utilities/_autosummary/pyvista-Text3D-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 1
|
2021-03-09T07:50:40.000Z
|
2021-03-09T07:50:40.000Z
|
import pyvista
text_mesh = pyvista.Text3D('PyVista')
text_mesh.plot(cpos='xy')
| 19.75
| 37
| 0.772152
| 12
| 79
| 4.916667
| 0.666667
| 0.372881
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 0.075949
| 79
| 3
| 38
| 26.333333
| 0.794521
| 0
| 0
| 0
| 0
| 0
| 0.113924
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0c449ed5bf2b9f3fc31289dade1ccc8158475f85
| 244
|
py
|
Python
|
mock_functions/subdir/nested_module.py
|
tjdevries/easy_python_requirements
|
2bf905f34065637cbc781d17d8e5806892fb66d5
|
[
"MIT"
] | null | null | null |
mock_functions/subdir/nested_module.py
|
tjdevries/easy_python_requirements
|
2bf905f34065637cbc781d17d8e5806892fb66d5
|
[
"MIT"
] | 2
|
2016-07-01T13:47:43.000Z
|
2021-11-28T23:07:17.000Z
|
mock_functions/subdir/nested_module.py
|
tjdevries/easy_python_requirements
|
2bf905f34065637cbc781d17d8e5806892fb66d5
|
[
"MIT"
] | 1
|
2021-11-28T21:50:41.000Z
|
2021-11-28T21:50:41.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class nestedClass:
def nested_funct(self):
"""
TEST INFO:
TEST DESCRIPTION BEGIN
This is a nested module
TEST DESCRIPTION END
"""
pass
| 17.428571
| 31
| 0.532787
| 27
| 244
| 4.777778
| 0.851852
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012903
| 0.364754
| 244
| 13
| 32
| 18.769231
| 0.819355
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
0c56b44b5dba116d125238b9021714cedfeca5bf
| 1,682
|
py
|
Python
|
src/tx/parallex/data.py
|
xu-hao/tx-paralax
|
4b1be3591e88c0df634a35be74a73872bff73caf
|
[
"MIT"
] | null | null | null |
src/tx/parallex/data.py
|
xu-hao/tx-paralax
|
4b1be3591e88c0df634a35be74a73872bff73caf
|
[
"MIT"
] | null | null | null |
src/tx/parallex/data.py
|
xu-hao/tx-paralax
|
4b1be3591e88c0df634a35be74a73872bff73caf
|
[
"MIT"
] | 1
|
2020-05-02T15:50:15.000Z
|
2020-05-02T15:50:15.000Z
|
from dataclasses import dataclass
def _is(a, b):
return a is b
def _is_not(a, b):
return a is not b
def _eq(a, b):
return a == b
def _not_eq(a, b):
return a != b
def _lt(a, b):
return a < b
def _gt(a, b):
return a > b
def _lt_e(a, b):
return a <= b
def _gt_e(a, b):
return a >= b
def _in(a, b):
return a in b
def _not_in(a, b):
return a not in b
def _and(*a):
return all(a)
def _or(*a):
return any(a)
def _add(a, b):
return a + b
def _sub(a, b):
return a - b
def _mult(a, b):
return a * b
def _div(a, b):
return a / b
def _mod(a, b):
return a % b
def _floor_div(a, b):
return a // b
def _l_shift(a, b):
return a << b
def _r_shift(a, b):
return a >> b
def _bit_and(a, b):
return a & b
def _bit_or(a, b):
return a | b
def _bit_xor(a, b):
return a ^ b
def _mat_mult(a, b):
return a @ b
def _pow(a, b):
return a ** b
def _invert(a):
return ~a
def _not(a):
return not a
def _u_add(a):
return +a
def _u_sub(a):
return -a
def _if_exp(t,b,o):
return b if t else o
def _subscript(a, b):
return a[b]
@dataclass
class Starred:
def __init__(self, a):
self.value = a
def _starred(a):
return Starred(a)
def _list(*args):
l = []
for arg in args:
if isinstance(arg, Starred):
l.extend(arg.value)
else:
l.append(arg)
return l
def _tuple(*args):
return args
def _dict(*args):
n = int(len(args)/2)
obj = {}
for k, v in zip(args[:n], args[n:]):
if k is None:
obj.update(v)
else:
obj[k] = v
return obj
| 13.349206
| 40
| 0.529727
| 311
| 1,682
| 2.691318
| 0.205788
| 0.105137
| 0.229391
| 0.258065
| 0.415771
| 0.351254
| 0.273596
| 0
| 0
| 0
| 0
| 0.000895
| 0.33591
| 1,682
| 125
| 41
| 13.456
| 0.748433
| 0
| 0
| 0.022727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.409091
| false
| 0
| 0.011364
| 0.375
| 0.829545
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a7681bf8445a52a74c064220dcb3dbd920b25502
| 33
|
py
|
Python
|
src/pyutil.py
|
Sakurairihito/QCMaterialNew
|
d1698adf791c7b6587d8ca76f7678b1cb7a26135
|
[
"MIT"
] | 4
|
2022-01-05T15:09:49.000Z
|
2022-03-29T07:33:53.000Z
|
src/pyutil.py
|
sakurairihito/QCMaterialNew
|
5281aed3d57bfeb832e1e72271fa2ebc560eeb40
|
[
"MIT"
] | null | null | null |
src/pyutil.py
|
sakurairihito/QCMaterialNew
|
5281aed3d57bfeb832e1e72271fa2ebc560eeb40
|
[
"MIT"
] | null | null | null |
def doublefunc(x):
return 2*x
| 16.5
| 18
| 0.666667
| 6
| 33
| 3.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.212121
| 33
| 2
| 19
| 16.5
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
a7aa0942f9925d1095802b70d153843175c8a046
| 802
|
py
|
Python
|
tests/test_utils.py
|
hiro-o918/openapi-python-client
|
5ea70ac68059010e36a3934b91a953b905ca0005
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
hiro-o918/openapi-python-client
|
5ea70ac68059010e36a3934b91a953b905ca0005
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
hiro-o918/openapi-python-client
|
5ea70ac68059010e36a3934b91a953b905ca0005
|
[
"MIT"
] | null | null | null |
from openapi_python_client import utils
def test_snake_case_uppercase_str():
assert utils.snake_case("HTTP") == "http"
assert utils.snake_case("HTTP RESPONSE") == "http_response"
def test_snake_case_from_pascal_with_acronyms():
assert utils.snake_case("HTTPResponse") == "http_response"
assert utils.snake_case("APIClientHTTPResponse") == "api_client_http_response"
assert utils.snake_case("OAuthClientHTTPResponse") == "o_auth_client_http_response"
def test_snake_case_from_pascal():
assert utils.snake_case("HttpResponsePascalCase") == "http_response_pascal_case"
def test_snake_case_from_camel():
assert utils.snake_case("httpResponseLowerCamel") == "http_response_lower_camel"
def test_spinal_case():
assert utils.spinal_case("keep_alive") == "keep-alive"
| 32.08
| 87
| 0.779302
| 102
| 802
| 5.676471
| 0.303922
| 0.170984
| 0.193437
| 0.241796
| 0.35924
| 0.241796
| 0.131261
| 0.131261
| 0
| 0
| 0
| 0
| 0.110973
| 802
| 24
| 88
| 33.416667
| 0.812062
| 0
| 0
| 0
| 0
| 0
| 0.334165
| 0.235661
| 0
| 0
| 0
| 0
| 0.571429
| 1
| 0.357143
| true
| 0
| 0.071429
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a7ac9d8e9dd13f02c0b5738b0c35a2a6524fade0
| 237
|
py
|
Python
|
test_data/parse/expected/single_class/property/description/meta_model.py
|
gillistephan/aas-core-codegen
|
5b89ea2ee35aecaca9a1bed7ac81d420cc560f29
|
[
"MIT"
] | 5
|
2021-12-29T12:55:34.000Z
|
2022-03-01T17:57:21.000Z
|
test_data/parse/expected/single_class/property/description/meta_model.py
|
gillistephan/aas-core-codegen
|
5b89ea2ee35aecaca9a1bed7ac81d420cc560f29
|
[
"MIT"
] | 10
|
2021-12-29T02:15:55.000Z
|
2022-03-09T11:04:22.000Z
|
test_data/parse/expected/single_class/property/description/meta_model.py
|
gillistephan/aas-core-codegen
|
5b89ea2ee35aecaca9a1bed7ac81d420cc560f29
|
[
"MIT"
] | 2
|
2021-12-29T01:42:12.000Z
|
2022-02-15T13:46:33.000Z
|
class Something:
"""Represent something."""
some_property: int
"""some property"""
another_property: str
yet_another_property: str
"""yet another property"""
__book_url__ = "dummy"
__book_version__ = "dummy"
| 15.8
| 30
| 0.666667
| 25
| 237
| 5.76
| 0.52
| 0.3125
| 0.25
| 0.291667
| 0.395833
| 0.395833
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21097
| 237
| 14
| 31
| 16.928571
| 0.770053
| 0.084388
| 0
| 0
| 0
| 0
| 0.060241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
a7b9e4432b02485a7863cde1b5c89fd2e6810e3e
| 185
|
py
|
Python
|
autotorch/__init__.py
|
StacyYang/AutoTorch
|
bd963471a2aab25a67bef84b6a9bb5619e2380e3
|
[
"Apache-2.0"
] | 62
|
2020-04-11T01:10:18.000Z
|
2022-01-20T02:05:58.000Z
|
autotorch/__init__.py
|
StacyYang/AutoTorch
|
bd963471a2aab25a67bef84b6a9bb5619e2380e3
|
[
"Apache-2.0"
] | 14
|
2020-04-11T01:10:10.000Z
|
2020-05-13T23:59:30.000Z
|
autotorch/__init__.py
|
StacyYang/AutoTorch
|
bd963471a2aab25a67bef84b6a9bb5619e2380e3
|
[
"Apache-2.0"
] | 7
|
2020-04-21T13:06:42.000Z
|
2022-03-14T11:54:39.000Z
|
"""AutoTorch"""
from .version import __version__
from . import scheduler, searcher, utils
from .scheduler import get_cpu_count, get_gpu_count
from .utils import *
from .core import *
| 20.555556
| 51
| 0.772973
| 25
| 185
| 5.4
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140541
| 185
| 8
| 52
| 23.125
| 0.849057
| 0.048649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac00396bb5413dcd63acc67e8b5af1cbcf5d6bbc
| 873
|
py
|
Python
|
zot/test_IncludeResolver.py
|
niccroad/zot
|
8a9ad7292abc4cc93392d89597ab991a0480afb2
|
[
"MIT"
] | 1
|
2017-10-23T01:43:17.000Z
|
2017-10-23T01:43:17.000Z
|
zot/test_IncludeResolver.py
|
niccroad/zot
|
8a9ad7292abc4cc93392d89597ab991a0480afb2
|
[
"MIT"
] | null | null | null |
zot/test_IncludeResolver.py
|
niccroad/zot
|
8a9ad7292abc4cc93392d89597ab991a0480afb2
|
[
"MIT"
] | null | null | null |
import unittest
from IncludeResolver import IncludeResolver
class test_IncludeResolver(unittest.TestCase):
def test_rewriteInclude_on_a_local_include(self):
resolver = IncludeResolver('.', [], '.')
self.assertEquals('"../include/TestFramework.h"',
resolver.rewriteInclude('"../include/TestFramework.h"'))
def test_rewriteInclude_on_a_local_include_generated_to_another_folder(self):
resolver = IncludeResolver('.', [], '../..')
self.assertEquals('"../../include/TestFramework.h"',
resolver.rewriteInclude('"include/TestFramework.h"'))
def test_rewriteInclude_on_a_system_include(self):
resolver = IncludeResolver('.', [], '../..')
self.assertEquals('<include/TestFramework.h>',
resolver.rewriteInclude('<include/TestFramework.h>'))
| 45.947368
| 82
| 0.65063
| 75
| 873
| 7.306667
| 0.306667
| 0.218978
| 0.229927
| 0.125912
| 0.773723
| 0.773723
| 0.773723
| 0.698905
| 0.698905
| 0.698905
| 0
| 0
| 0.201604
| 873
| 19
| 83
| 45.947368
| 0.786227
| 0
| 0
| 0.133333
| 0
| 0
| 0.201373
| 0.185355
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.133333
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ac09b832c55c25123d8d7c554d1cd0b69bfbc96f
| 2,182
|
py
|
Python
|
api_demo/easy_example.py
|
Chariotteam/FionaNasaSpaceApps
|
a14c845354f3f8c453cdef6d1a9837195f3f2ef5
|
[
"MIT"
] | null | null | null |
api_demo/easy_example.py
|
Chariotteam/FionaNasaSpaceApps
|
a14c845354f3f8c453cdef6d1a9837195f3f2ef5
|
[
"MIT"
] | null | null | null |
api_demo/easy_example.py
|
Chariotteam/FionaNasaSpaceApps
|
a14c845354f3f8c453cdef6d1a9837195f3f2ef5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 3 01:22:53 2021
@author: timur
"""
import requests
def compare_values(y, arrays):
params = {'y': y, 'with': arrays}
rest_url='https://chariot-maria.space/api/v1.0/linear/compare_values'
headers = {}
headers["Content-Type"] = "application/json; charset=UTF-8"
headers["Accept"] = "application/json; charset=UTF-8"
rest_response = requests.request("POST", rest_url, headers=headers, json=params)
return rest_response.json()
def predict_val(degrees, predict, x, y, subsets = 300, sampling_param = 2):
params = {"degrees": degrees, "predict": predict, "x":x, "y":y,"subsets": subsets, "sampling_param":sampling_param}
rest_url='https://chariot-maria.space/api/v1.0/validating_pol'
headers = {}
headers["Content-Type"] = "application/json; charset=UTF-8"
headers["Accept"] = "application/json; charset=UTF-8"
rest_response = requests.request("POST", rest_url, headers=headers, json=params)
return rest_response.json()
def clustering(data, k ,column, size_min, size_max, max_attemps = 300, zScale = False):
params = {"data": [{"column": column, "k": k, "attemps": max_attemps, "size_min":size_min, "size_max":size_max, "z_score": str(zScale).lower()}, data]}
rest_url='https://chariot-maria.space/api/v1.0/constrained_clustering'
headers = {}
headers["Content-Type"] = "application/json; charset=UTF-8"
headers["Accept"] = "application/json; charset=UTF-8"
rest_response = requests.request("POST", rest_url, headers=headers, json=params)
return rest_response.json()
def km_clustering(data, k ,column, max_attemps = 300, zScale = False):
params = {"data": [{"column": column, "k": k, "attemps": max_attemps, "z_score": str(zScale).lower()}, data]}
rest_url='https://chariot-maria.space/api/v1.0/kmean'
headers = {}
headers["Content-Type"] = "application/json; charset=UTF-8"
headers["Accept"] = "application/json; charset=UTF-8"
rest_response = requests.request("POST", rest_url, headers=headers, json=params)
return rest_response.json()
| 32.567164
| 155
| 0.664528
| 288
| 2,182
| 4.90625
| 0.263889
| 0.025478
| 0.124558
| 0.141543
| 0.723992
| 0.723992
| 0.723992
| 0.723992
| 0.723992
| 0.674452
| 0
| 0.021393
| 0.164528
| 2,182
| 66
| 156
| 33.060606
| 0.753703
| 0.043538
| 0
| 0.606061
| 0
| 0
| 0.314877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.030303
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ac20f67e5a5626890ff281123b56a049b517beb5
| 1,247
|
py
|
Python
|
twodim_compiler/drawing.py
|
jivnov/Language-for-2D-graphics
|
3d09ea4e14509089750c45d5f6f8eb03841e4e2e
|
[
"MIT"
] | null | null | null |
twodim_compiler/drawing.py
|
jivnov/Language-for-2D-graphics
|
3d09ea4e14509089750c45d5f6f8eb03841e4e2e
|
[
"MIT"
] | 2
|
2021-05-02T12:24:35.000Z
|
2021-06-18T10:44:34.000Z
|
twodim_compiler/drawing.py
|
jivnov/Language-for-2D-graphics
|
3d09ea4e14509089750c45d5f6f8eb03841e4e2e
|
[
"MIT"
] | null | null | null |
import svgwrite as svg
from graph import Vertex, Graph
class Drawing2d:
def __init__(self, w, h, output_path='./generated_images/output.svg'):
"""
:param w:
:param h:
"""
self.viewport_width = w
self.viewport_height = h
self.canvas = svg.Drawing(output_path, (w, h))
def draw(self, v: Vertex, parent: Vertex = None):
# TODO: Draw all neighbours and neighbours' neighbours etc.
"""
Basic algo:
1. Go "up" (check the IN reference) until you reach the root of the graph
NOTE: When adding a neighbour A to a Vertex B CONTAINED in some shape X, you should add "A IN X" relation automatically as well
2. Draw root shape parent_graphX
3. Call algo for each of X's neighbours until there are no neighbours to draw
:param parent: Parent of the vertex; if None, assume this is the root
:param v:
:return:
"""
if v.drawn:
return
v.draw(self.canvas)
def _graph_to_something_insertable(self, graph: Graph):
# TODO: This helper method should enable defining relations between Vertices and Graphs
# TIP: We may want to use <defs> tag for this
pass
| 33.702703
| 135
| 0.622294
| 177
| 1,247
| 4.305085
| 0.548023
| 0.005249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004598
| 0.302326
| 1,247
| 36
| 136
| 34.638889
| 0.871264
| 0.498797
| 0
| 0
| 1
| 0
| 0.056641
| 0.056641
| 0
| 0
| 0
| 0.055556
| 0
| 1
| 0.230769
| false
| 0.076923
| 0.153846
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
3bad1b252b5668fe8cf27ff1b4ff28c36ce9e83b
| 3,919
|
py
|
Python
|
tests/test_get_Rfam_ribo.py
|
Sung-Huan/ANNOgesic
|
af3de26f6c5ff9d2218f18a84bbc863a1bb95550
|
[
"0BSD"
] | 26
|
2016-02-25T19:27:55.000Z
|
2022-01-22T09:54:59.000Z
|
tests/test_get_Rfam_ribo.py
|
Sung-Huan/ANNOgesic
|
af3de26f6c5ff9d2218f18a84bbc863a1bb95550
|
[
"0BSD"
] | 28
|
2018-11-22T19:51:06.000Z
|
2022-03-20T23:02:13.000Z
|
tests/test_get_Rfam_ribo.py
|
Sung-Huan/ANNOgesic
|
af3de26f6c5ff9d2218f18a84bbc863a1bb95550
|
[
"0BSD"
] | 18
|
2016-06-01T11:53:45.000Z
|
2021-12-27T03:41:03.000Z
|
import sys
import os
import unittest
import shutil
from io import StringIO
sys.path.append(".")
import annogesiclib.get_Rfam_ribo as grr
class TestGetRfamRibo(unittest.TestCase):
def setUp(self):
self.example = Example()
self.test_folder = "test_folder"
if (not os.path.exists(self.test_folder)):
os.mkdir(self.test_folder)
def tearDown(self):
if os.path.exists(self.test_folder):
shutil.rmtree(self.test_folder)
def test_rbs_from_rfam(self):
out_file = os.path.join(self.test_folder, "test.out")
ribo_table = os.path.join(self.test_folder, "ribo_table")
with open(ribo_table, "w") as fh:
fh.write(self.example.ribo_table)
rfam_file = os.path.join(self.test_folder, "rfam")
with open(rfam_file, "w") as fh:
fh.write(self.example.rfam_file)
grr.rbs_from_rfam(ribo_table, rfam_file, out_file)
datas = []
with open(out_file) as fh:
for line in fh:
line = line.strip()
datas.append(line)
self.assertEqual(set(datas), set(self.example.out.split("\n")))
class Example(object):
ribo_table = """RF00162 SAM SAM riboswitch box leader
RF00174 Cobalamin Cobalamin riboswitch
RF00634 SAM-IV S adenosyl methionine SAM riboswitch"""
rfam_file = """INFERNAL1/a [1.1 | October 2013]
NAME SAM
ACC RF00162
STATES 338
NODES 91
CLEN 108
W 272
ALPH RNA
RF no
CONS yes
MAP yes
DATE Thu Feb 20 18:18:58 2014
COM [1] cmbuild -F CM SEED
COM [2] cmcalibrate --mpi CM
PBEGIN 0.05
PEND 0.05
WBETA 1e-07
QDBBETA1 1e-07
QDBBETA2 1e-15
N2OMEGA 1.52588e-05
N3OMEGA 1.52588e-05
ELSELF -0.08926734
//
INFERNAL1/a [1.1rc4 | June 2013]
NAME SAM-IV
ACC RF00634
STATES 362
NODES 95
CLEN 116
W 149
ALPH RNA
RF no
CONS yes
MAP yes
DATE Tue Aug 20 22:41:06 2013
COM [1] cmbuild -F CM SEED
COM [2] cmcalibrate --mpi CM
PBEGIN 0.05
PEND 0.05
WBETA 1e-07
QDBBETA1 1e-07
QDBBETA2 1e-15
N2OMEGA 1.52588e-05
N3OMEGA 1.52588e-05
ELSELF -0.08926734
NSEQ 40
EFFN 1.655273
CKSUM 531515698
NULL 0.000 0.000 0.000 0.000
GA 38.00
TC 38.60
NC 37.10
EFP7GF -7.0145 0.71835
//
INFERNAL1/a [1.1rc4 | June 2013]
NAME test
ACC RF001111
STATES 362
NODES 95
CLEN 116
W 149
ALPH RNA
RF no
CONS yes
MAP yes
DATE Tue Aug 20 22:41:06 2013
COM [1] cmbuild -F CM SEED
COM [2] cmcalibrate --mpi CM
PBEGIN 0.05
PEND 0.05
WBETA 1e-07
QDBBETA1 1e-07
QDBBETA2 1e-15
N2OMEGA 1.52588e-05
N3OMEGA 1.52588e-05
ELSELF -0.08926734
NSEQ 40
EFFN 1.655273
CKSUM 531515698
NULL 0.000 0.000 0.000 0.000
GA 38.00
TC 38.60
NC 37.10
EFP7GF -7.0145 0.71835
"""
out = """INFERNAL1/a [1.1 | October 2013]
NAME SAM
ACC RF00162
STATES 338
NODES 91
CLEN 108
W 272
ALPH RNA
RF no
CONS yes
MAP yes
DATE Thu Feb 20 18:18:58 2014
COM [1] cmbuild -F CM SEED
COM [2] cmcalibrate --mpi CM
PBEGIN 0.05
PEND 0.05
WBETA 1e-07
QDBBETA1 1e-07
QDBBETA2 1e-15
N2OMEGA 1.52588e-05
N3OMEGA 1.52588e-05
ELSELF -0.08926734
//
INFERNAL1/a [1.1rc4 | June 2013]
NAME SAM-IV
ACC RF00634
STATES 362
NODES 95
CLEN 116
W 149
ALPH RNA
RF no
CONS yes
MAP yes
DATE Tue Aug 20 22:41:06 2013
COM [1] cmbuild -F CM SEED
COM [2] cmcalibrate --mpi CM
PBEGIN 0.05
PEND 0.05
WBETA 1e-07
QDBBETA1 1e-07
QDBBETA2 1e-15
N2OMEGA 1.52588e-05
N3OMEGA 1.52588e-05
ELSELF -0.08926734
NSEQ 40
EFFN 1.655273
CKSUM 531515698
NULL 0.000 0.000 0.000 0.000
GA 38.00
TC 38.60
NC 37.10
EFP7GF -7.0145 0.71835
//"""
if __name__ == "__main__":
unittest.main()
| 20.73545
| 71
| 0.614953
| 641
| 3,919
| 3.700468
| 0.229329
| 0.020236
| 0.037943
| 0.030354
| 0.700675
| 0.700675
| 0.668634
| 0.61425
| 0.61425
| 0.61425
| 0
| 0.217692
| 0.299056
| 3,919
| 188
| 72
| 20.845745
| 0.645795
| 0
| 0
| 0.755682
| 0
| 0
| 0.680704
| 0
| 0
| 0
| 0
| 0
| 0.005682
| 1
| 0.017045
| false
| 0
| 0.034091
| 0
| 0.079545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3be5c69cc30af555544cbfd54391e479e4314b98
| 17
|
py
|
Python
|
edc_ogc/__init__.py
|
eurodatacube/ogc-edc
|
5f2844e5dbf63f05d8076965b02171bd91871820
|
[
"MIT"
] | null | null | null |
edc_ogc/__init__.py
|
eurodatacube/ogc-edc
|
5f2844e5dbf63f05d8076965b02171bd91871820
|
[
"MIT"
] | 21
|
2019-11-11T14:54:14.000Z
|
2021-06-10T22:22:07.000Z
|
edc_ogc/__init__.py
|
eurodatacube/ogc-edc
|
5f2844e5dbf63f05d8076965b02171bd91871820
|
[
"MIT"
] | 1
|
2020-10-05T15:17:38.000Z
|
2020-10-05T15:17:38.000Z
|
VERSION="0.13.8"
| 8.5
| 16
| 0.647059
| 4
| 17
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.058824
| 17
| 1
| 17
| 17
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ce02b8f735e60c63a06c706f5ae028c623eea9fa
| 107
|
py
|
Python
|
django_frontend_presets/settings.py
|
mikemenard/django-frontend-presets
|
0d1837415282ae43488b3e6e66889bc94f1a45b4
|
[
"BSD-3-Clause"
] | null | null | null |
django_frontend_presets/settings.py
|
mikemenard/django-frontend-presets
|
0d1837415282ae43488b3e6e66889bc94f1a45b4
|
[
"BSD-3-Clause"
] | null | null | null |
django_frontend_presets/settings.py
|
mikemenard/django-frontend-presets
|
0d1837415282ae43488b3e6e66889bc94f1a45b4
|
[
"BSD-3-Clause"
] | null | null | null |
import os
APP_DIR = os.path.dirname(os.path.abspath(__file__))
STUBS_DIR = os.path.join(APP_DIR, 'stubs')
| 21.4
| 52
| 0.747664
| 19
| 107
| 3.842105
| 0.526316
| 0.246575
| 0.246575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093458
| 107
| 4
| 53
| 26.75
| 0.752577
| 0
| 0
| 0
| 0
| 0
| 0.046729
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.