hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8f7c450dac70ccff034743e52b3931ea6911903
| 2,577
|
py
|
Python
|
api/src/domain/input_output/Keyboard.py
|
SamuelJansen/Application
|
6ab3202fb7de12782510f477a3e74d8800ea2927
|
[
"MIT"
] | null | null | null |
api/src/domain/input_output/Keyboard.py
|
SamuelJansen/Application
|
6ab3202fb7de12782510f477a3e74d8800ea2927
|
[
"MIT"
] | null | null | null |
api/src/domain/input_output/Keyboard.py
|
SamuelJansen/Application
|
6ab3202fb7de12782510f477a3e74d8800ea2927
|
[
"MIT"
] | null | null | null |
import pygame as pg
import keyboardFunction
print('Keyboard library imported')
class Keyboard:
def __init__(self,application):
self.application = application
self.status = [0,0]
self.printStatus = True
def handleEvent(self,pgEvent):
if pgEvent.type==pg.KEYDOWN :
if pgEvent.key==pg.K_LEFT :
self.status[0] = -1
self.keyboardPrint(keyboardFunction.KeyState.LEFT_ARROW_DOWN)
elif pgEvent.key==pg.K_RIGHT :
self.status[0] = 1
self.keyboardPrint(keyboardFunction.KeyState.RIGHT_ARROW_DOWN)
if pgEvent.type==pg.KEYDOWN :
if pgEvent.key==pg.K_UP :
self.status[1] = -1
self.keyboardPrint(keyboardFunction.KeyState.UP_ARROW_DOWN)
elif pgEvent.key==pg.K_DOWN :
self.status[1] = 1
self.keyboardPrint(keyboardFunction.KeyState.DOWN_ARROW_DOWN)
if pgEvent.type==pg.KEYUP :
if pg.key.get_pressed()[pg.K_LEFT] and not pg.key.get_pressed()[pg.K_RIGHT] :
self.status[0] = -1
self.keyboardPrint(keyboardFunction.KeyState.LEFT_ARROW_DOWN)
elif pg.key.get_pressed()[pg.K_RIGHT] and not pg.key.get_pressed()[pg.K_LEFT] :
self.status[0] = 1
self.keyboardPrint(keyboardFunction.KeyState.RIGHT_ARROW_DOWN)
elif not pg.key.get_pressed()[pg.K_LEFT] and not pg.key.get_pressed()[pg.K_RIGHT] :
self.status[0] = 0
if pgEvent.type==pg.KEYUP :
if pg.key.get_pressed()[pg.K_UP] and not pg.key.get_pressed()[pg.K_DOWN] :
self.status[1] = -1
self.keyboardPrint(keyboardFunction.KeyState.UP_ARROW_DOWN)
elif pg.key.get_pressed()[pg.K_DOWN] and not pg.key.get_pressed()[pg.K_UP] :
self.status[1] = 1
self.keyboardPrint(keyboardFunction.KeyState.DOWN_ARROW_DOWN)
elif not pg.key.get_pressed()[pg.K_UP] and not pg.key.get_pressed()[pg.K_DOWN] :
self.status[1] = 0
def keyboardPrint(self,keyState):
if self.printStatus :
print(keyState)
# def handleEvent(self,pgEvent):
# if self.keyboard.arrows[1]==-1 :
# gl.playSound(upSound)
# if self.keyboard.arrows[1]==1 :
# gl.playSound(downSound)
# if self.keyboard.arrows[0]==1 :
# gl.playMusic('Sounds/TakeaWalk.mp3')
# if self.keyboard.arrows[0]==-1 :
# gl.playSound(leftSound)
| 40.265625
| 95
| 0.591773
| 325
| 2,577
| 4.544615
| 0.150769
| 0.032498
| 0.064997
| 0.121869
| 0.811104
| 0.777251
| 0.777251
| 0.725118
| 0.651997
| 0.627624
| 0
| 0.016958
| 0.290648
| 2,577
| 63
| 96
| 40.904762
| 0.791028
| 0.122235
| 0
| 0.454545
| 0
| 0
| 0.011101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.068182
| 0
| 0.159091
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
77076c128ae2a0736c926ed0a35fa4f8c68a7080
| 194
|
py
|
Python
|
controller/__init__.py
|
nlaurens/budgetRapportage
|
a6edb3525d04e6f2660671e8ba02e56881cc861d
|
[
"MIT"
] | null | null | null |
controller/__init__.py
|
nlaurens/budgetRapportage
|
a6edb3525d04e6f2660671e8ba02e56881cc861d
|
[
"MIT"
] | null | null | null |
controller/__init__.py
|
nlaurens/budgetRapportage
|
a6edb3525d04e6f2660671e8ba02e56881cc861d
|
[
"MIT"
] | null | null | null |
from .index import Index
from .report import Report
from .admin import Admin
from .graph import Graph
from .view import View
from .salaris import Salaris
from .orderlist import Orderlist
| 24.25
| 33
| 0.783505
| 28
| 194
| 5.428571
| 0.321429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180412
| 194
| 7
| 34
| 27.714286
| 0.955975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
771c00a3e9b9c1ea10c9a5c93f390d20293c0e40
| 227
|
py
|
Python
|
src/thinker.py
|
EmmanuelMess/TrafficSim
|
ce4377c67d9e4e581f40676b2c47e334307eefdc
|
[
"MIT"
] | null | null | null |
src/thinker.py
|
EmmanuelMess/TrafficSim
|
ce4377c67d9e4e581f40676b2c47e334307eefdc
|
[
"MIT"
] | 2
|
2019-11-19T22:33:26.000Z
|
2019-11-20T20:31:49.000Z
|
src/thinker.py
|
EmmanuelMess/TrafficSim
|
ce4377c67d9e4e581f40676b2c47e334307eefdc
|
[
"MIT"
] | null | null | null |
class Thinker:
def __init__(self):
self.velocity = 0
def viewDistance(self):
return 20
def step(self, deltaTime):
self.velocity = 30
def getVelocity(self):
return self.velocity
| 18.916667
| 30
| 0.603524
| 26
| 227
| 5.115385
| 0.538462
| 0.270677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032051
| 0.312775
| 227
| 12
| 31
| 18.916667
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0
| 0.222222
| 0.777778
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7724f315411fd78c1e0cbd08280cceef766b8765
| 239
|
py
|
Python
|
app/robot/fetcher/auth/BaseAuth.py
|
matrufsc2/matrufsc2
|
d8a32c532281cc2a09a26444bd5b8497bc578b18
|
[
"RSA-MD"
] | 4
|
2017-07-07T19:04:07.000Z
|
2018-07-04T18:03:49.000Z
|
app/robot/fetcher/auth/BaseAuth.py
|
matrufsc2/matrufsc2
|
d8a32c532281cc2a09a26444bd5b8497bc578b18
|
[
"RSA-MD"
] | 6
|
2015-02-27T03:21:02.000Z
|
2019-07-30T19:58:35.000Z
|
app/robot/fetcher/auth/BaseAuth.py
|
matrufsc2/matrufsc2
|
d8a32c532281cc2a09a26444bd5b8497bc578b18
|
[
"RSA-MD"
] | null | null | null |
__author__ = 'fernando'
class BaseAuth(object):
def has_data(self):
raise NotImplementedError()
def get_username(self):
raise NotImplementedError()
def get_password(self):
raise NotImplementedError()
| 19.916667
| 35
| 0.677824
| 23
| 239
| 6.73913
| 0.608696
| 0.174194
| 0.541935
| 0.4
| 0.43871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.23431
| 239
| 12
| 36
| 19.916667
| 0.846995
| 0
| 0
| 0.375
| 0
| 0
| 0.033333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0.125
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
77369d43ee4c53fdb436183d31430f1141cd4da7
| 81
|
py
|
Python
|
pyvisdk/do/object.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/object.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/object.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
'''
Created on Aug 7, 2011
@author: eplaster
'''
from __builtin__ import object
| 11.571429
| 30
| 0.716049
| 11
| 81
| 4.909091
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 0.17284
| 81
| 7
| 30
| 11.571429
| 0.731343
| 0.506173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
91f5cfc2369718dc44c1c8613d10ae965248b21b
| 59
|
py
|
Python
|
pds_github_util/corral/__init__.py
|
NASA-PDS-Incubator/pds-github-util
|
fec9f28dd8e2f7fa3389910399cadef72cef7b0a
|
[
"Apache-2.0"
] | null | null | null |
pds_github_util/corral/__init__.py
|
NASA-PDS-Incubator/pds-github-util
|
fec9f28dd8e2f7fa3389910399cadef72cef7b0a
|
[
"Apache-2.0"
] | 42
|
2020-09-17T17:30:40.000Z
|
2022-03-31T21:09:19.000Z
|
pds_github_util/corral/__init__.py
|
nasa-pds-engineering-node/pds-github-util
|
d65aa96787e77fe8d4ee8c023d5c6ca32bbd13c9
|
[
"Apache-2.0"
] | 3
|
2020-08-12T23:02:40.000Z
|
2021-09-30T11:57:59.000Z
|
from .cattle_head import CattleHead
from .herd import Herd
| 19.666667
| 35
| 0.830508
| 9
| 59
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 59
| 2
| 36
| 29.5
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
91f99ebf3167a26bfccdc652262cd3931168020b
| 138
|
py
|
Python
|
mini_lambda/tests/test_packaging.py
|
semiversus/python-mini-lambda
|
35ec4b6304b08ffd28939ffef7ead6b150dc1525
|
[
"BSD-3-Clause"
] | 9
|
2018-09-11T13:01:40.000Z
|
2021-04-07T13:13:08.000Z
|
mini_lambda/tests/test_packaging.py
|
semiversus/python-mini-lambda
|
35ec4b6304b08ffd28939ffef7ead6b150dc1525
|
[
"BSD-3-Clause"
] | 22
|
2017-11-15T08:47:20.000Z
|
2021-05-09T04:25:36.000Z
|
mini_lambda/tests/test_packaging.py
|
semiversus/python-mini-lambda
|
35ec4b6304b08ffd28939ffef7ead6b150dc1525
|
[
"BSD-3-Clause"
] | 1
|
2018-10-01T18:46:19.000Z
|
2018-10-01T18:46:19.000Z
|
def test_named_import():
import mini_lambda as ml
o = ml.InputVar
def test_import_from():
from mini_lambda import InputVar
| 15.333333
| 36
| 0.724638
| 21
| 138
| 4.47619
| 0.52381
| 0.148936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 138
| 8
| 37
| 17.25
| 0.87037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.8
| 0
| 1.2
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
624b1163be7ae82592ce8ea52679fdedc7221ce8
| 778
|
py
|
Python
|
examples/renderer/server/base_server.py
|
justinkterry/MAgent
|
d7b8bfb67f41e943018445fa0441d53c9364f3ba
|
[
"MIT"
] | 1,532
|
2017-12-05T12:13:44.000Z
|
2022-03-31T02:38:01.000Z
|
examples/renderer/server/base_server.py
|
justinkterry/MAgent
|
d7b8bfb67f41e943018445fa0441d53c9364f3ba
|
[
"MIT"
] | 84
|
2017-12-06T07:51:19.000Z
|
2022-02-12T07:12:59.000Z
|
examples/renderer/server/base_server.py
|
justinkterry/MAgent
|
d7b8bfb67f41e943018445fa0441d53c9364f3ba
|
[
"MIT"
] | 319
|
2017-12-06T05:56:59.000Z
|
2022-03-26T11:56:25.000Z
|
from abc import ABCMeta, abstractmethod
class BaseServer:
__metaclass__ = ABCMeta
@abstractmethod
def get_info(self):
pass
@abstractmethod
def get_data(self, frame_id, x_range, y_range):
pass
@abstractmethod
def add_agents(self, x, y, g):
pass
@abstractmethod
def get_map_size(self):
pass
@abstractmethod
def get_banners(self, frame_id, resolution):
pass
@abstractmethod
def get_status(self, frame_id):
pass
@abstractmethod
def keydown(self, frame_id, key, mouse_x, mouse_y):
pass
@abstractmethod
def mousedown(self, frame_id, key, mouse_x, mouse_y):
pass
@abstractmethod
def get_endscreen(self, frame_id):
pass
| 18.97561
| 57
| 0.631105
| 92
| 778
| 5.076087
| 0.347826
| 0.327623
| 0.359743
| 0.256959
| 0.321199
| 0.201285
| 0.201285
| 0.201285
| 0.201285
| 0.201285
| 0
| 0
| 0.29563
| 778
| 41
| 58
| 18.97561
| 0.85219
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0.3
| 0.033333
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6268b5a5e1ff7ca51bdc52e4c3799d5135111978
| 1,337
|
py
|
Python
|
python/Day2.py
|
Simik31/AOC-2021
|
fc0459cf4f3af8439657969b4e957a35e5d56484
|
[
"WTFPL"
] | null | null | null |
python/Day2.py
|
Simik31/AOC-2021
|
fc0459cf4f3af8439657969b4e957a35e5d56484
|
[
"WTFPL"
] | null | null | null |
python/Day2.py
|
Simik31/AOC-2021
|
fc0459cf4f3af8439657969b4e957a35e5d56484
|
[
"WTFPL"
] | null | null | null |
def part_1() -> None:
h_pos: int = 0
v_pos: int = 0
with open("../data/day2.txt", "r") as dFile:
for row in dFile.readlines():
value: int = int(row.split(" ")[1])
match row.split(" ")[0]:
case "forward":
h_pos += value
case "down":
v_pos += value
case "up":
v_pos -= value
case _:
raise ValueError(f"Unsupported command {row.split(' ')[0]}")
print("Day: 2 | Part: 1 | Result:", h_pos * v_pos)
def part_2() -> None:
h_pos: int = 0
v_pos: int = 0
aim : int = 0
with open("../data/day2.txt", "r") as dFile:
for row in dFile.readlines():
value: int = int(row.split(" ")[1])
match row.split(" ")[0]:
case "forward":
h_pos += value
v_pos += value * aim
case "down":
aim += value
case "up":
aim -= value
case _:
raise ValueError(f"Unsupported command {row.split(' ')[0]}")
print("Day: 2 | Part: 2 | Result:", h_pos * v_pos)
if __name__ == "__main__":
part_1()
part_2()
| 27.854167
| 80
| 0.403889
| 150
| 1,337
| 3.42
| 0.273333
| 0.054581
| 0.054581
| 0.042885
| 0.795322
| 0.740741
| 0.740741
| 0.740741
| 0.740741
| 0.670565
| 0
| 0.029006
| 0.458489
| 1,337
| 47
| 81
| 28.446809
| 0.679558
| 0
| 0
| 0.648649
| 0
| 0
| 0.15258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0
| 0
| 0.054054
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
626b1abb133ee171c2b763535db23487962938cf
| 166
|
py
|
Python
|
ustreasurycurve/__init__.py
|
oisinkenny/USTreasuryCurve
|
214f299c57227232f608a560bb7ff8862b37ed3c
|
[
"MIT"
] | 1
|
2020-07-24T14:48:51.000Z
|
2020-07-24T14:48:51.000Z
|
ustreasurycurve/__init__.py
|
oisinkenny/USTreasuryCurve
|
214f299c57227232f608a560bb7ff8862b37ed3c
|
[
"MIT"
] | null | null | null |
ustreasurycurve/__init__.py
|
oisinkenny/USTreasuryCurve
|
214f299c57227232f608a560bb7ff8862b37ed3c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 11 20:27:21 2020
@author: oisin
"""
from .nominalRates import nominalRates
from .realRates import realRates
| 16.6
| 39
| 0.662651
| 22
| 166
| 5
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099237
| 0.210843
| 166
| 9
| 40
| 18.444444
| 0.740458
| 0.445783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
62742a76f6ecc1b2143089ab3914c883babe018a
| 226
|
py
|
Python
|
tests/test_Matrix.py
|
AlirezaRa/PySchoenberg
|
9c97fa62a51974238d97ee72868b26d2d18e05ff
|
[
"Apache-2.0"
] | 8
|
2015-07-20T21:39:31.000Z
|
2017-01-11T17:28:15.000Z
|
tests/test_Matrix.py
|
aalireza/PySchoenberg
|
9c97fa62a51974238d97ee72868b26d2d18e05ff
|
[
"Apache-2.0"
] | null | null | null |
tests/test_Matrix.py
|
aalireza/PySchoenberg
|
9c97fa62a51974238d97ee72868b26d2d18e05ff
|
[
"Apache-2.0"
] | 1
|
2015-02-21T06:05:19.000Z
|
2015-02-21T06:05:19.000Z
|
from PySchoenberg.core import Note, Row
import pytest
def test_matrix():
pass
def test_transposition():
pass
def test_numerical():
pass
def test_row_traversal():
pass
def test_column_traversal():
pass
| 12.555556
| 39
| 0.716814
| 30
| 226
| 5.166667
| 0.5
| 0.225806
| 0.283871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207965
| 226
| 17
| 40
| 13.294118
| 0.865922
| 0
| 0
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| true
| 0.416667
| 0.166667
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
65727e05e032488c8982900fbdc27c08419fb7ab
| 123
|
py
|
Python
|
setup.py
|
flavio-casacurta/File-FixedS
|
a0435e52187af7b9a88f87644c76d0f611f1c3df
|
[
"MIT"
] | 1
|
2018-09-21T13:05:06.000Z
|
2018-09-21T13:05:06.000Z
|
setup.py
|
flavio-casacurta/File-FixedS
|
a0435e52187af7b9a88f87644c76d0f611f1c3df
|
[
"MIT"
] | null | null | null |
setup.py
|
flavio-casacurta/File-FixedS
|
a0435e52187af7b9a88f87644c76d0f611f1c3df
|
[
"MIT"
] | 1
|
2018-09-21T13:05:10.000Z
|
2018-09-21T13:05:10.000Z
|
# setup.py
# para gerar %run setup.py py2exe
from distutils.core import setup
import py2exe
setup(console=["Hex2Zip.py"])
| 17.571429
| 33
| 0.756098
| 19
| 123
| 4.894737
| 0.631579
| 0.150538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028037
| 0.130081
| 123
| 6
| 34
| 20.5
| 0.841122
| 0.325203
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
659c02b284b8bdf2eb28abd47be6664c737e9b35
| 233
|
py
|
Python
|
backend/api/serializers/contact_serializer.py
|
ferdn4ndo/infotrem
|
4728c5fe8385dcc0a1c75068429fa20e2afbf6f2
|
[
"MIT"
] | null | null | null |
backend/api/serializers/contact_serializer.py
|
ferdn4ndo/infotrem
|
4728c5fe8385dcc0a1c75068429fa20e2afbf6f2
|
[
"MIT"
] | 1
|
2020-06-21T18:38:14.000Z
|
2020-06-21T21:57:09.000Z
|
backend/api/serializers/contact_serializer.py
|
ferdn4ndo/infotrem
|
4728c5fe8385dcc0a1c75068429fa20e2afbf6f2
|
[
"MIT"
] | null | null | null |
from api.models import Contact
from .generic_audited_model_serializer import GenericAuditedModelSerializer
class ContactSerializer(GenericAuditedModelSerializer):
class Meta:
model = Contact
fields = '__all__'
| 23.3
| 75
| 0.781116
| 21
| 233
| 8.333333
| 0.714286
| 0.388571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175966
| 233
| 9
| 76
| 25.888889
| 0.911458
| 0
| 0
| 0
| 0
| 0
| 0.030043
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
659dacbd8c99c29d29d5ddbe16574f603dc32a2d
| 170
|
py
|
Python
|
appr/models/kv/redis/__init__.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 31
|
2017-07-05T07:25:31.000Z
|
2021-01-18T22:21:57.000Z
|
appr/models/kv/redis/__init__.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 48
|
2017-06-27T15:48:29.000Z
|
2021-01-26T21:02:27.000Z
|
appr/models/kv/redis/__init__.py
|
sergeyberezansky/appr
|
03168addf05c3efd779dad5168fb0a80d0512100
|
[
"Apache-2.0"
] | 17
|
2017-07-05T07:25:38.000Z
|
2021-01-20T14:52:29.000Z
|
import os
import redis
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
# @TODO redis-configuration
redis_client = redis.StrictRedis(host=REDIS_HOST, port=6379, db=0)
| 21.25
| 66
| 0.770588
| 25
| 170
| 5.08
| 0.56
| 0.212598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032895
| 0.105882
| 170
| 7
| 67
| 24.285714
| 0.802632
| 0.147059
| 0
| 0
| 0
| 0
| 0.132867
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
02eb012cc2f7314547dbbb6cbeeb60cb6cdb88db
| 225
|
py
|
Python
|
03 Asset Classes/03 Futures/01 Futures/02 code.py
|
Jay-Jay-D/Documentation
|
c4894e5ac20355ec82ee0db19618ad7f17bf8592
|
[
"Apache-2.0"
] | null | null | null |
03 Asset Classes/03 Futures/01 Futures/02 code.py
|
Jay-Jay-D/Documentation
|
c4894e5ac20355ec82ee0db19618ad7f17bf8592
|
[
"Apache-2.0"
] | null | null | null |
03 Asset Classes/03 Futures/01 Futures/02 code.py
|
Jay-Jay-D/Documentation
|
c4894e5ac20355ec82ee0db19618ad7f17bf8592
|
[
"Apache-2.0"
] | null | null | null |
# In Initialize
future = self.AddFuture(Futures.Indices.SP500EMini, Resolution.Minute)
future.SetFilter(timedelta(0), timedelta(182))
# or Lambda
future.SetFilter(universe => universe.Expiration(timedelta(0), timedelta(182)))
| 45
| 79
| 0.795556
| 27
| 225
| 6.62963
| 0.666667
| 0.167598
| 0.212291
| 0.24581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052381
| 0.066667
| 225
| 5
| 79
| 45
| 0.8
| 0.102222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f3075170837be4781e389cec971a9eaaadb05aad
| 287
|
py
|
Python
|
bitmovin_api_sdk/encoding/infrastructure/azure/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/infrastructure/azure/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/infrastructure/azure/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.infrastructure.azure.azure_api import AzureApi
from bitmovin_api_sdk.encoding.infrastructure.azure.regions.regions_api import RegionsApi
from bitmovin_api_sdk.encoding.infrastructure.azure.azure_account_list_query_params import AzureAccountListQueryParams
| 71.75
| 118
| 0.912892
| 37
| 287
| 6.756757
| 0.432432
| 0.144
| 0.18
| 0.216
| 0.58
| 0.58
| 0.58
| 0.4
| 0
| 0
| 0
| 0
| 0.041812
| 287
| 3
| 119
| 95.666667
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f310f0fefb69d9b85e9cfdcc4443d344ca229f64
| 163
|
py
|
Python
|
src/tests/test_server.py
|
ShannonTully/super-cool-chat
|
4f5e9920146dc0e27efac3d62a093d45d514034c
|
[
"MIT"
] | 1
|
2018-12-07T02:49:05.000Z
|
2018-12-07T02:49:05.000Z
|
src/tests/test_server.py
|
ShannonTully/super-cool-chat
|
4f5e9920146dc0e27efac3d62a093d45d514034c
|
[
"MIT"
] | 16
|
2018-12-04T19:06:02.000Z
|
2018-12-05T02:06:54.000Z
|
src/tests/test_server.py
|
ShannonTully/super-cool-chat
|
4f5e9920146dc0e27efac3d62a093d45d514034c
|
[
"MIT"
] | null | null | null |
from ..server import ChatServer
from ..client import Client
import pytest
# @pytest.fixture
# def server():
# output = ChatServer(4444)
# return output
| 14.818182
| 31
| 0.699387
| 19
| 163
| 6
| 0.578947
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030769
| 0.202454
| 163
| 10
| 32
| 16.3
| 0.846154
| 0.472393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f31607ec296074c6b25d22992328d74b581462ae
| 102
|
py
|
Python
|
shi.py
|
salsita/shishito
|
caaf0a359abe5db256a30b88486f0bcd24bc6cbf
|
[
"MIT"
] | 7
|
2015-01-05T15:07:22.000Z
|
2019-05-21T22:48:32.000Z
|
shi.py
|
salsita/shishito
|
caaf0a359abe5db256a30b88486f0bcd24bc6cbf
|
[
"MIT"
] | 22
|
2015-01-05T11:54:50.000Z
|
2019-08-12T11:50:28.000Z
|
shi.py
|
salsita/shishito
|
caaf0a359abe5db256a30b88486f0bcd24bc6cbf
|
[
"MIT"
] | 5
|
2015-05-06T09:39:15.000Z
|
2020-08-21T07:39:01.000Z
|
from shishito.shishito_runner import ShishitoRunner
import os
ShishitoRunner(os.getcwd()).run_tests()
| 25.5
| 51
| 0.843137
| 13
| 102
| 6.461538
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068627
| 102
| 3
| 52
| 34
| 0.884211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f3266125285818aec2e3ba93a22429670b682c43
| 25
|
py
|
Python
|
Nova pasta (2)/passagem_de_argumentos.py
|
cristest/python
|
378605e64604978b746b160432d899ef5ad31e3c
|
[
"Apache-2.0"
] | null | null | null |
Nova pasta (2)/passagem_de_argumentos.py
|
cristest/python
|
378605e64604978b746b160432d899ef5ad31e3c
|
[
"Apache-2.0"
] | null | null | null |
Nova pasta (2)/passagem_de_argumentos.py
|
cristest/python
|
378605e64604978b746b160432d899ef5ad31e3c
|
[
"Apache-2.0"
] | null | null | null |
#Passagem de Argumentos
| 12.5
| 24
| 0.8
| 3
| 25
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0.88
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b87fc8781fe2ed599ee0d192c870bb418f11d1e2
| 3,991
|
py
|
Python
|
infos/forms.py
|
acdh-oeaw/bestiarium
|
49652a0471ba9aae3ea5246e694129c91256f651
|
[
"MIT"
] | null | null | null |
infos/forms.py
|
acdh-oeaw/bestiarium
|
49652a0471ba9aae3ea5246e694129c91256f651
|
[
"MIT"
] | 14
|
2021-11-08T09:24:04.000Z
|
2022-03-09T15:49:57.000Z
|
infos/forms.py
|
acdh-oeaw/bestiarium
|
49652a0471ba9aae3ea5246e694129c91256f651
|
[
"MIT"
] | null | null | null |
# generated by appcreator
from crispy_forms.bootstrap import Accordion, AccordionGroup
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Div, Fieldset, Layout, MultiField, Submit
from django import forms
from .models import AboutTheProject, ProjectInst, TeamMember
class ProjectInstFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(ProjectInstFilterFormHelper, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.helper.form_tag = False
self.add_input(Submit('Filter', 'Search'))
self.layout = Layout(
Fieldset(
'Basic search options',
'id',
'name',
css_id="basic_search_fields"
),
Accordion(
AccordionGroup(
'More',
'website',
css_id="more"
),
)
)
class ProjectInstForm(forms.ModelForm):
class Meta:
model = ProjectInst
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'col-md-9'
self.helper.add_input(Submit('submit', 'save'),)
class TeamMemberFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.helper.form_tag = False
self.add_input(Submit('Filter', 'Search'))
self.layout = Layout(
Fieldset(
'Basic search options',
'id',
'name',
css_id="basic_search_fields"
),
Accordion(
AccordionGroup(
'More',
'website',
'role',
css_id="more"
),
)
)
class TeamMemberForm(forms.ModelForm):
class Meta:
model = TeamMember
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'col-md-9'
self.helper.add_input(Submit('submit', 'save'),)
class AboutTheProjectFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.helper.form_tag = False
self.add_input(Submit('Filter', 'Search'))
self.layout = Layout(
Fieldset(
'Basic search options',
'id',
'description',
css_id="basic_search_fields"
),
Accordion(
AccordionGroup(
'Authors',
'author',
css_id="more"
),
)
)
class AboutTheProjectForm(forms.ModelForm):
class Meta:
model = AboutTheProject
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'col-md-9'
self.helper.add_input(Submit('submit', 'save'),)
| 30.937984
| 79
| 0.538462
| 367
| 3,991
| 5.577657
| 0.19346
| 0.117245
| 0.061553
| 0.043967
| 0.744993
| 0.703957
| 0.703957
| 0.664387
| 0.664387
| 0.664387
| 0
| 0.002312
| 0.349787
| 3,991
| 128
| 80
| 31.179688
| 0.786513
| 0.005763
| 0
| 0.774775
| 1
| 0
| 0.109178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.045045
| 0
| 0.18018
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b28d363e01432d0c5bc5a0742316f706baafe86e
| 75
|
py
|
Python
|
examples/search.py
|
bhgomes/oeis
|
27555acdcdeec936431789c052ea097b1aa19d69
|
[
"Unlicense"
] | 6
|
2019-03-29T05:01:35.000Z
|
2021-12-21T08:13:38.000Z
|
examples/search.py
|
bhgomes/oeis
|
27555acdcdeec936431789c052ea097b1aa19d69
|
[
"Unlicense"
] | 4
|
2019-04-05T08:45:23.000Z
|
2019-04-15T06:48:40.000Z
|
examples/search.py
|
bhgomes/oeis
|
27555acdcdeec936431789c052ea097b1aa19d69
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*- #
import oeis
print(oeis.query("1, 2, 3, 4, 5"))
| 12.5
| 34
| 0.52
| 13
| 75
| 3
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.2
| 75
| 5
| 35
| 15
| 0.55
| 0.28
| 0
| 0
| 0
| 0
| 0.26
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
b2a406b1fc11d4991d5c9d80b0c22d4e26740646
| 64
|
py
|
Python
|
scripts/npc/viking_cannon1.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 9
|
2021-04-26T11:59:29.000Z
|
2021-12-20T13:15:27.000Z
|
scripts/npc/viking_cannon1.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
scripts/npc/viking_cannon1.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 6
|
2021-07-14T06:32:05.000Z
|
2022-02-06T02:32:56.000Z
|
# Cannon (1302008) | Ship Deck 1 (106030500)
sm.warp(106030102)
| 21.333333
| 44
| 0.71875
| 9
| 64
| 5.111111
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.472727
| 0.140625
| 64
| 3
| 45
| 21.333333
| 0.363636
| 0.65625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a25522bd87ac778efb957f91bce8f9fe9bc472bc
| 349
|
py
|
Python
|
0801-0900/0804-Unique Morse Code Words/0804-Unique Morse Code Words.py
|
jiadaizhao/LeetCode
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
[
"MIT"
] | 49
|
2018-05-05T02:53:10.000Z
|
2022-03-30T12:08:09.000Z
|
0801-0900/0804-Unique Morse Code Words/0804-Unique Morse Code Words.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 11
|
2017-12-15T22:31:44.000Z
|
2020-10-02T12:42:49.000Z
|
0801-0900/0804-Unique Morse Code Words/0804-Unique Morse Code Words.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 28
|
2017-12-05T10:56:51.000Z
|
2022-01-26T18:18:27.000Z
|
class Solution:
def uniqueMorseRepresentations(self, words: List[str]) -> int:
table = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
return len({''.join(table[ord(c) - ord('a')] for c in word) for word in words})
| 69.8
| 177
| 0.332378
| 26
| 349
| 4.461538
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123209
| 349
| 4
| 178
| 87.25
| 0.379085
| 0
| 0
| 0
| 0
| 0
| 0.237822
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
a263667dab6bdb89a5da49c5eb57a5025c47e2b8
| 91
|
py
|
Python
|
UCTB/dataset/__init__.py
|
TempAnonymous/Context_Analysis
|
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
|
[
"MIT"
] | 3
|
2021-06-29T06:18:18.000Z
|
2021-09-07T03:11:35.000Z
|
UCTB/dataset/__init__.py
|
TempAnonymous/Context_Analysis
|
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
|
[
"MIT"
] | null | null | null |
UCTB/dataset/__init__.py
|
TempAnonymous/Context_Analysis
|
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
|
[
"MIT"
] | null | null | null |
from .data_loader import NodeTrafficLoader, TransferDataLoader
from .dataset import DataSet
| 45.5
| 62
| 0.879121
| 10
| 91
| 7.9
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 91
| 2
| 63
| 45.5
| 0.951807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a279dd326affbc95da792764630e5a633f40958d
| 19
|
py
|
Python
|
deleteme.py
|
LanayaCarbonell/stem
|
8b3789d33c5bfdf949e679a4a4323c71543ce2ff
|
[
"MIT"
] | null | null | null |
deleteme.py
|
LanayaCarbonell/stem
|
8b3789d33c5bfdf949e679a4a4323c71543ce2ff
|
[
"MIT"
] | null | null | null |
deleteme.py
|
LanayaCarbonell/stem
|
8b3789d33c5bfdf949e679a4a4323c71543ce2ff
|
[
"MIT"
] | null | null | null |
print("Delete Me")
| 9.5
| 18
| 0.684211
| 3
| 19
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 19
| 1
| 19
| 19
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
a29f9710d8f955caa577387a2989968c5e683cf0
| 125
|
py
|
Python
|
asynchronous_qiwi/data_types/QIWIWallet/pay_methods.py
|
LexLuthorReal/asynchronous_qiwi
|
5847a8d4008493656e973e5283888a4e57234962
|
[
"MIT"
] | 3
|
2021-05-20T02:36:30.000Z
|
2021-11-28T16:00:15.000Z
|
asynchronous_qiwi/data_types/QIWIWallet/pay_methods.py
|
LexLuthorReal/asynchronous_qiwi
|
5847a8d4008493656e973e5283888a4e57234962
|
[
"MIT"
] | null | null | null |
asynchronous_qiwi/data_types/QIWIWallet/pay_methods.py
|
LexLuthorReal/asynchronous_qiwi
|
5847a8d4008493656e973e5283888a4e57234962
|
[
"MIT"
] | 1
|
2021-11-28T16:00:20.000Z
|
2021-11-28T16:00:20.000Z
|
import enum
class PayMethodFilter(enum.Enum):
QIWI = enum.auto()
LINKED_CARD = enum.auto()
CARD = enum.auto()
| 13.888889
| 33
| 0.648
| 16
| 125
| 5
| 0.5
| 0.3
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224
| 125
| 8
| 34
| 15.625
| 0.824742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
a2b8d4c40ec1fe0214cf934dfa9f688b2f131918
| 44
|
py
|
Python
|
ocrn/__init__.py
|
swvist/Ocrn
|
173d0017317af34595b532aa5c65c6a291ae0a90
|
[
"MIT"
] | 24
|
2015-08-01T16:07:27.000Z
|
2020-04-28T02:11:50.000Z
|
ocrn/__init__.py
|
nxvipin/Ocrn
|
173d0017317af34595b532aa5c65c6a291ae0a90
|
[
"MIT"
] | 2
|
2016-01-17T17:59:23.000Z
|
2018-03-28T06:29:17.000Z
|
ocrn/__init__.py
|
nxvipin/Ocrn
|
173d0017317af34595b532aa5c65c6a291ae0a90
|
[
"MIT"
] | 24
|
2015-04-01T08:58:34.000Z
|
2019-12-03T02:03:10.000Z
|
#To treat the current directory as a module
| 22
| 43
| 0.795455
| 8
| 44
| 4.375
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 1
| 44
| 44
| 0.972222
| 0.954545
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0c3d0dcf207511a22e50ae9c4278bab76b26f5b7
| 9,290
|
py
|
Python
|
tests/hwsim/test_mbo.py
|
9A9A/wpa_supplicant-fork
|
f33468a35109317ef0ed9cdd6eceb8b068a4278b
|
[
"Unlicense"
] | null | null | null |
tests/hwsim/test_mbo.py
|
9A9A/wpa_supplicant-fork
|
f33468a35109317ef0ed9cdd6eceb8b068a4278b
|
[
"Unlicense"
] | null | null | null |
tests/hwsim/test_mbo.py
|
9A9A/wpa_supplicant-fork
|
f33468a35109317ef0ed9cdd6eceb8b068a4278b
|
[
"Unlicense"
] | null | null | null |
# MBO tests
# Copyright (c) 2016, Intel Deutschland GmbH
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import hostapd
import os
import time
from tshark import run_tshark
def test_mbo_assoc_disallow(dev, apdev, params):
hapd1 = hostapd.add_ap(apdev[0], { "ssid": "MBO", "mbo": "1" })
hapd2 = hostapd.add_ap(apdev[1], { "ssid": "MBO", "mbo": "1" })
logger.debug("Set mbo_assoc_disallow with invalid value")
if "FAIL" not in hapd1.request("SET mbo_assoc_disallow 2"):
raise Exception("Set mbo_assoc_disallow for AP1 succeeded unexpectedly with value 2")
logger.debug("Disallow associations to AP1 and allow association to AP2")
if "OK" not in hapd1.request("SET mbo_assoc_disallow 1"):
raise Exception("Failed to set mbo_assoc_disallow for AP1")
if "OK" not in hapd2.request("SET mbo_assoc_disallow 0"):
raise Exception("Failed to set mbo_assoc_disallow for AP2")
dev[0].connect("MBO", key_mgmt="NONE", scan_freq="2412")
out = run_tshark(os.path.join(params['logdir'], "hwsim0.pcapng"),
"wlan.fc.type == 0 && wlan.fc.type_subtype == 0x00",
wait=False)
if "Destination address: " + hapd1.own_addr() in out:
raise Exception("Association request sent to disallowed AP")
timestamp = run_tshark(os.path.join(params['logdir'], "hwsim0.pcapng"),
"wlan.fc.type_subtype == 0x00",
display=['frame.time'], wait=False)
logger.debug("Allow associations to AP1 and disallow assications to AP2")
if "OK" not in hapd1.request("SET mbo_assoc_disallow 0"):
raise Exception("Failed to set mbo_assoc_disallow for AP1")
if "OK" not in hapd2.request("SET mbo_assoc_disallow 1"):
raise Exception("Failed to set mbo_assoc_disallow for AP2")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
# Force new scan, so the assoc_disallowed indication is updated */
dev[0].request("FLUSH")
dev[0].connect("MBO", key_mgmt="NONE", scan_freq="2412")
filter = 'wlan.fc.type == 0 && wlan.fc.type_subtype == 0x00 && frame.time > "' + timestamp.rstrip() + '"'
out = run_tshark(os.path.join(params['logdir'], "hwsim0.pcapng"),
filter, wait=False)
if "Destination address: " + hapd2.own_addr() in out:
raise Exception("Association request sent to disallowed AP 2")
def test_mbo_cell_capa_update(dev, apdev):
"""MBO cellular data capability update"""
ssid = "test-wnm-mbo"
params = { 'ssid': ssid, 'mbo': '1' }
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
if "OK" not in dev[0].request("SET mbo_cell_capa 1"):
raise Exception("Failed to set STA as cellular data capable")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
if 'mbo_cell_capa' not in sta or sta['mbo_cell_capa'] != '1':
raise Exception("mbo_cell_capa missing after association")
if "OK" not in dev[0].request("SET mbo_cell_capa 3"):
raise Exception("Failed to set STA as cellular data not-capable")
time.sleep(0.2)
sta = hapd.get_sta(addr)
if 'mbo_cell_capa' not in sta:
raise Exception("mbo_cell_capa missing after update")
if sta['mbo_cell_capa'] != '3':
raise Exception("mbo_cell_capa not updated properly")
def test_mbo_cell_capa_update_pmf(dev, apdev):
"""MBO cellular data capability update with PMF required"""
ssid = "test-wnm-mbo"
passphrase = "12345678"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params["wpa_key_mgmt"] = "WPA-PSK-SHA256";
params["ieee80211w"] = "2";
params['mbo'] = '1'
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
if "OK" not in dev[0].request("SET mbo_cell_capa 1"):
raise Exception("Failed to set STA as cellular data capable")
dev[0].connect(ssid, psk=passphrase, key_mgmt="WPA-PSK-SHA256",
proto="WPA2", ieee80211w="2", scan_freq="2412")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
if 'mbo_cell_capa' not in sta or sta['mbo_cell_capa'] != '1':
raise Exception("mbo_cell_capa missing after association")
if "OK" not in dev[0].request("SET mbo_cell_capa 3"):
raise Exception("Failed to set STA as cellular data not-capable")
time.sleep(0.2)
sta = hapd.get_sta(addr)
if 'mbo_cell_capa' not in sta:
raise Exception("mbo_cell_capa missing after update")
if sta['mbo_cell_capa'] != '3':
raise Exception("mbo_cell_capa not updated properly")
def test_mbo_non_pref_chan(dev, apdev):
"""MBO non-preferred channel list"""
ssid = "test-wnm-mbo"
params = { 'ssid': ssid, 'mbo': '1' }
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
if "OK" not in dev[0].request("SET non_pref_chan 81:7:200:3"):
raise Exception("Failed to set non-preferred channel list")
if "OK" not in dev[0].request("SET non_pref_chan 81:7:200:1:123 81:9:100:2"):
raise Exception("Failed to set non-preferred channel list")
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' not in sta:
raise Exception("Missing non_pref_chan[0] value (assoc)")
if sta['non_pref_chan[0]'] != '81:200:1:123:7':
raise Exception("Unexpected non_pref_chan[0] value (assoc)")
if 'non_pref_chan[1]' not in sta:
raise Exception("Missing non_pref_chan[1] value (assoc)")
if sta['non_pref_chan[1]'] != '81:100:2:0:9':
raise Exception("Unexpected non_pref_chan[1] value (assoc)")
if 'non_pref_chan[2]' in sta:
raise Exception("Unexpected non_pref_chan[2] value (assoc)")
if "OK" not in dev[0].request("SET non_pref_chan 81:9:100:2"):
raise Exception("Failed to update non-preferred channel list")
time.sleep(0.1)
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' not in sta:
raise Exception("Missing non_pref_chan[0] value (update 1)")
if sta['non_pref_chan[0]'] != '81:100:2:0:9':
raise Exception("Unexpected non_pref_chan[0] value (update 1)")
if 'non_pref_chan[1]' in sta:
raise Exception("Unexpected non_pref_chan[2] value (update 1)")
if "OK" not in dev[0].request("SET non_pref_chan 81:9:100:2 81:10:100:2 81:8:100:2 81:7:100:1:123 81:5:100:1:124"):
raise Exception("Failed to update non-preferred channel list")
time.sleep(0.1)
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' not in sta:
raise Exception("Missing non_pref_chan[0] value (update 2)")
if sta['non_pref_chan[0]'] != '81:100:1:123:7':
raise Exception("Unexpected non_pref_chan[0] value (update 2)")
if 'non_pref_chan[1]' not in sta:
raise Exception("Missing non_pref_chan[1] value (update 2)")
if sta['non_pref_chan[1]'] != '81:100:1:124:5':
raise Exception("Unexpected non_pref_chan[1] value (update 2)")
if 'non_pref_chan[2]' not in sta:
raise Exception("Missing non_pref_chan[2] value (update 2)")
if sta['non_pref_chan[2]'] != '81:100:2:0:9,10,8':
raise Exception("Unexpected non_pref_chan[2] value (update 2)")
if 'non_pref_chan[3]' in sta:
raise Exception("Unexpected non_pref_chan[3] value (update 2)")
if "OK" not in dev[0].request("SET non_pref_chan 81:5:90:2 82:14:91:2"):
raise Exception("Failed to update non-preferred channel list")
time.sleep(0.1)
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' not in sta:
raise Exception("Missing non_pref_chan[0] value (update 3)")
if sta['non_pref_chan[0]'] != '81:90:2:0:5':
raise Exception("Unexpected non_pref_chan[0] value (update 3)")
if 'non_pref_chan[1]' not in sta:
raise Exception("Missing non_pref_chan[1] value (update 3)")
if sta['non_pref_chan[1]'] != '82:91:2:0:14':
raise Exception("Unexpected non_pref_chan[1] value (update 3)")
if 'non_pref_chan[2]' in sta:
raise Exception("Unexpected non_pref_chan[2] value (update 3)")
if "OK" not in dev[0].request("SET non_pref_chan "):
raise Exception("Failed to update non-preferred channel list")
time.sleep(0.1)
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'non_pref_chan[0]' in sta:
raise Exception("Unexpected non_pref_chan[0] value (update 4)")
def test_mbo_sta_supp_op_classes(dev, apdev):
"""MBO STA supported operating classes"""
ssid = "test-wnm-mbo"
params = { 'ssid': ssid, 'mbo': '1' }
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, key_mgmt="NONE", scan_freq="2412")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
logger.debug("STA: " + str(sta))
if 'supp_op_classes' not in sta:
raise Exception("No supp_op_classes")
supp = bytearray(sta['supp_op_classes'].decode("hex"))
if supp[0] != 81:
raise Exception("Unexpected current operating class %d" % supp[0])
if 115 not in supp:
raise Exception("Operating class 115 missing")
| 42.227273
| 119
| 0.658127
| 1,470
| 9,290
| 4.004762
| 0.121088
| 0.058264
| 0.091558
| 0.036691
| 0.789366
| 0.760829
| 0.749788
| 0.725327
| 0.682861
| 0.60761
| 0
| 0.050784
| 0.196663
| 9,290
| 219
| 120
| 42.420091
| 0.738041
| 0.023143
| 0
| 0.491429
| 0
| 0.011429
| 0.41019
| 0
| 0
| 0
| 0.00135
| 0
| 0
| 0
| null | null | 0.017143
| 0.028571
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a75c4ba21d117fa118a51e1a5bbc53e41582f1d1
| 5,543
|
py
|
Python
|
ample/util/tests/test_cphasematch.py
|
fsimkovic/ample
|
c3c2196ca292e831e3cd8d15e3d3079bb6609848
|
[
"BSD-3-Clause"
] | 6
|
2017-03-17T14:43:14.000Z
|
2021-08-06T07:07:14.000Z
|
ample/util/tests/test_cphasematch.py
|
fsimkovic/ample
|
c3c2196ca292e831e3cd8d15e3d3079bb6609848
|
[
"BSD-3-Clause"
] | 47
|
2017-03-17T14:37:09.000Z
|
2021-01-28T10:22:15.000Z
|
ample/util/tests/test_cphasematch.py
|
fsimkovic/ample
|
c3c2196ca292e831e3cd8d15e3d3079bb6609848
|
[
"BSD-3-Clause"
] | 6
|
2017-09-26T08:45:09.000Z
|
2020-03-19T14:26:49.000Z
|
"""Test functions for cphasematch"""
import os
import sys
import unittest
from ample import constants
from ample.util import cphasematch
import iotbx.mtz
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = constants.SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
def test_merge_mtz(self):
os.chdir(self.thisd) # Need as otherwise tests that happen in other directories change os.cwd()
mtz1 = os.path.join(self.ample_share, "examples", 'toxd-example', 'input', '1dtx.mtz')
mtz1_labels = ['FP', 'SIGFP']
mtz2 = os.path.join(self.testfiles_dir, "phaser_loc0_ALL_c1_tl49_r1_allatom_UNMOD.1.mtz")
mtz2_labels = ['FC', 'PHIC']
merged_mtz, mtz_labels = cphasematch.merge_mtz(mtz1, mtz1_labels, mtz2, mtz2_labels)
input_labels = set(mtz1_labels + mtz2_labels)
# Get all the labels
miller_dict = iotbx.mtz.object(file_name=merged_mtz).as_miller_arrays_dict()
ref_labels = set([d[2] for d in miller_dict.keys()])
self.assertEqual(len(input_labels - ref_labels), 0, "Labels were not identical")
os.unlink(merged_mtz)
return
def test_merge_mtz2(self):
os.chdir(self.thisd) # Need as otherwise tests that happen in other directories change os.cwd()
mtz1 = os.path.join(self.ample_share, "examples", 'toxd-example', 'input', '1dtx.mtz')
mtz1_labels = ['FP', 'SIGFP']
mtz2 = os.path.join(self.ample_share, "examples", 'toxd-example', 'input', '1dtx.mtz')
mtz2_labels = ['FP', 'SIGFP']
merged_mtz, mtz_labels = cphasematch.merge_mtz(mtz1, mtz1_labels, mtz2, mtz2_labels)
# Get all the labels
miller_dict = iotbx.mtz.object(file_name=merged_mtz).as_miller_arrays_dict()
merged_labels = set([d[2] for d in miller_dict.keys()])
# An integer equal to the file number should have been added
ref_labels = set(['FP', 'SIGFP', 'FP2', 'SIGFP2'])
self.assertEqual(len(merged_labels - ref_labels), 0, "Labels were not identical")
os.unlink(merged_mtz)
return
@unittest.skip("Work in progress")
def test_cphasematch_pdb(self):
os.chdir(self.thisd) # Need as otherwise tests that happen in other directories change os.cwd()
native_pdb = os.path.join(self.ample_share, "examples", 'toxd-example', 'input', '1DTX.pdb')
native_mtz = os.path.join(self.ample_share, "examples", 'toxd-example', 'input', '1dtx.mtz')
mr_mtz = os.path.join(self.testfiles_dir, "phaser_loc0_ALL_c1_tl49_r1_allatom_UNMOD.1.mtz")
before_origin, after_origin, change_of_hand, origin_shift = cphasematch.calc_phase_error_pdb(
native_pdb, native_mtz, mr_mtz, f_label='FP', sigf_label='SIGFP'
)
self.assertEqual(89.6763, before_origin)
self.assertEqual(62.4365, after_origin)
self.assertEqual([0.0, 0.0, 0.5], origin_shift)
return
@unittest.skipIf(sys.platform.startswith("win"), "requires Linux")
def test_cphasematch_mtz(self):
os.chdir(self.thisd) # Need as otherwise tests that happen in other directories change os.cwd()
native_mtz_phased = os.path.join(self.testfiles_dir, "toxd_59.1.mtz")
mr_mtz = os.path.join(self.testfiles_dir, "phaser_loc0_ALL_c1_tl49_r1_allatom_UNMOD.1.mtz")
before_origin, after_origin, change_of_hand, origin_shift = cphasematch.calc_phase_error_mtz(
native_mtz_phased, mr_mtz, f_label='FP', sigf_label='SIGFP'
)
self.assertEqual(88.8476, before_origin)
self.assertEqual(62.4833, after_origin)
self.assertEqual([0.0, 0.5, 0.0], origin_shift)
return
@unittest.skip("Work in progress")
def test_cphasematch_mtz_origin(self):
os.chdir(self.thisd) # Need as otherwise tests that happen in other directories change os.cwd()
native_mtz_phased = os.path.join(self.testfiles_dir, "toxd_59.1.mtz")
mr_mtz = os.path.join(self.testfiles_dir, "phaser_loc0_ALL_c1_tl49_r1_allatom_UNMOD.1.mtz")
origin = [0.0, 0.5, 0.0]
before_origin, after_origin, change_of_hand, origin_shift = cphasematch.calc_phase_error_mtz(
native_mtz_phased, mr_mtz, origin=origin
)
# Can't test exact equality as cphasematch and cctbx return slightly different errors
self.assertAlmostEqual(88.8476, before_origin, 0)
self.assertAlmostEqual(62.4833, after_origin, 0)
self.assertEqual([0.0, 0.5, 0.0], origin_shift)
return
@unittest.skip("Work in progress")
def test_cphasematch_shelxe(self):
os.chdir(self.thisd) # Need as otherwise tests that happen in other directories change os.cwd()
native_mtz_phased = os.path.join(self.testfiles_dir, "toxd_59.1.mtz")
shelxe_mtz = os.path.join(self.testfiles_dir, "shelxe_phaser_loc0_ALL_c1_t49_r1_polyAla_UNMOD.mtz")
origin = [0.0, 0.0, 0.0]
before_origin, after_origin, change_of_hand, origin_shift = cphasematch.calc_phase_error_mtz(
native_mtz_phased, shelxe_mtz, origin=origin
)
# Can't test exact equality as cphasematch and cctbx return slightly different errors
self.assertAlmostEqual(88.8476, before_origin, 0)
self.assertAlmostEqual(62.4833, after_origin, 0)
self.assertEqual([0.0, 0.5, 0.0], origin_shift)
return
if __name__ == "__main__":
unittest.main()
| 47.376068
| 107
| 0.684106
| 791
| 5,543
| 4.549937
| 0.183312
| 0.01167
| 0.0389
| 0.05057
| 0.794387
| 0.771603
| 0.770492
| 0.75132
| 0.75132
| 0.739094
| 0
| 0.035302
| 0.202778
| 5,543
| 116
| 108
| 47.784483
| 0.779136
| 0.132419
| 0
| 0.450549
| 0
| 0
| 0.130816
| 0.048821
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.076923
| false
| 0
| 0.065934
| 0
| 0.21978
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a78f4eeb934c12023512eff192fb85057ec4dba6
| 58
|
py
|
Python
|
orthogonal/mapping/exceptions/UnSupportedOperationException.py
|
hasii2011/OrthogonalDrawing
|
f74ee5d8a4aa44be3bbbe2bd08f8b577db7918cb
|
[
"MIT"
] | null | null | null |
orthogonal/mapping/exceptions/UnSupportedOperationException.py
|
hasii2011/OrthogonalDrawing
|
f74ee5d8a4aa44be3bbbe2bd08f8b577db7918cb
|
[
"MIT"
] | 9
|
2020-05-06T19:42:07.000Z
|
2021-03-18T01:49:48.000Z
|
orthogonal/mapping/exceptions/UnSupportedOperationException.py
|
hasii2011/OrthogonalDrawing
|
f74ee5d8a4aa44be3bbbe2bd08f8b577db7918cb
|
[
"MIT"
] | null | null | null |
class UnSupportedOperationException(Exception):
pass
| 14.5
| 47
| 0.810345
| 4
| 58
| 11.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 3
| 48
| 19.333333
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
a7b33d967366fe2445f72b9f24ebba84c960d9b8
| 299
|
py
|
Python
|
general-practice/Exercises solved/codingbat/String1/make_abba.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/codingbat/String1/make_abba.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
general-practice/Exercises solved/codingbat/String1/make_abba.py
|
lugabrielbueno/Projeto
|
f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0
|
[
"MIT"
] | null | null | null |
#Given two strings, a and b, return the result of putting them together in the order abba, e.g. "Hi" and "Bye" returns "HiByeByeHi".
#make_abba('Hi', 'Bye') → 'HiByeByeHi'
#make_abba('Yo', 'Alice') → 'YoAliceAliceYo'
#make_abba('What', 'Up') → 'WhatUpUpWhat'
def make_abba(a,b):
return a+b+b+a
| 37.375
| 132
| 0.675585
| 52
| 299
| 3.865385
| 0.596154
| 0.159204
| 0.179104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147157
| 299
| 8
| 133
| 37.375
| 0.776471
| 0.839465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a7b87f28daf67356202c2cea3443ee63aa433e18
| 3,676
|
py
|
Python
|
tests/test_core.py
|
linhd-postdata/jollyjumper
|
240b2213a4a2b8cdccc05d52ce268861dad9b8d3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_core.py
|
linhd-postdata/jollyjumper
|
240b2213a4a2b8cdccc05d52ce268861dad9b8d3
|
[
"Apache-2.0"
] | 8
|
2019-04-30T11:12:23.000Z
|
2019-06-28T19:03:04.000Z
|
tests/test_core.py
|
linhd-postdata/jollyjumper
|
240b2213a4a2b8cdccc05d52ce268861dad9b8d3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from unittest import mock
import jollyjumper.core
from jollyjumper.core import get_enjambment
class TokenMock(mock.MagicMock):
_ = property(lambda self: mock.Mock(has_tmesis=self.has_tmesis,
line=self.line))
def __isinstance__(self, token): # noqa
return True
@staticmethod
def is_ancestor(token): # noqa
return True
@staticmethod
def nbor(): # noqa
return TokenMock()
def test_get_enjambment_tmesis(monkeypatch):
def mockreturn(lang=None):
return lambda _: [
TokenMock(text="mi-\nra", i=0, is_punct=False, has_tmesis=True,
line=1)
]
monkeypatch.setattr(jollyjumper.core, 'load_pipeline', mockreturn)
enjambment = get_enjambment("text")
assert enjambment == {1: {"type": 'tmesis', "on": ['mi', 'ra']}}
def test_get_enjambment_spacy_doc(monkeypatch):
token = TokenMock(text="mi-\nra", i=0, is_punct=False, has_tmesis=True, line=1)
def mockreturn(lang=None):
return lambda _: [
token
]
monkeypatch.setattr(jollyjumper.core, 'load_pipeline', mockreturn)
enjambment = get_enjambment(token)
assert enjambment == {1: {"type": 'tmesis', "on": ['mi', 'ra']}}
def test_get_enjambment_no_tmesis(monkeypatch):
def mockreturn(lang=None):
return lambda _: [
TokenMock(text="mi\nra", i=0, is_punct=False, has_tmesis=False,
line=1)
]
monkeypatch.setattr(jollyjumper.core, 'load_pipeline', mockreturn)
enjambment = get_enjambment("text")
assert enjambment == {}
def test_get_enjambment(monkeypatch):
def mockreturn(lang=None):
return lambda _: [
TokenMock(n_rights=0, tag_="NumType", pos_="ADJ", text="mi", i=0,
is_punct=False, has_tmesis=False,
line=0),
TokenMock(n_rights=0, tag_="NumType", pos_="SPACE", text="\n", i=1,
is_punct=False, has_tmesis=False,
line=0),
TokenMock(n_rights=0, tag_="NumType", pos_="NOUN", text="casa", i=2,
is_punct=False, has_tmesis=False,
line=1)
]
monkeypatch.setattr(jollyjumper.core, 'load_pipeline', mockreturn)
enjambment = get_enjambment("text")
assert enjambment == {0: {"type": 'sirrematic', "on": ['ADJ', 'NOUN']}}
def test_get_enjambment_empty(monkeypatch):
def mockreturn(lang=None):
return lambda _: [
TokenMock(n_rights=0, tag_="NumType", pos_="ADJ", text="mi-", i=0,
is_punct=False, has_tmesis=False,
line=1),
TokenMock(n_rights=0, tag_="NumType", pos_="SPACE", text="\n", i=0,
is_punct=False, has_tmesis=False,
line=1),
TokenMock(n_rights=0, tag_="NumType", pos_="NOUN", text="ro", i=0,
is_punct=False, has_tmesis=False,
line=2)
]
monkeypatch.setattr(jollyjumper.core, 'load_pipeline', mockreturn)
enjambment = get_enjambment("text")
assert enjambment == {}
def test_get_enjambment_no_monkeypatch():
text = "maña-\nna"
output = get_enjambment(text)
assert output == {0: {'type': 'tmesis', 'on': ['maña', 'na']}}
def test_get_enjambment_oov_no_monkeypatch():
text = "Yo estoy depri-\nmente. El coche-\ncito. No más-\nencabalgamiento."
output = get_enjambment(text)
assert output == {
0: {'type': 'tmesis', 'on': ['depri', 'mente']},
1: {'type': 'tmesis', 'on': ['coche', 'cito']},
}
| 32.821429
| 83
| 0.584059
| 419
| 3,676
| 4.916468
| 0.195704
| 0.09466
| 0.052427
| 0.065534
| 0.761165
| 0.759223
| 0.710194
| 0.710194
| 0.710194
| 0.686893
| 0
| 0.011624
| 0.274483
| 3,676
| 111
| 84
| 33.117117
| 0.76078
| 0.009793
| 0
| 0.5
| 0
| 0
| 0.102613
| 0.006052
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.178571
| false
| 0
| 0.035714
| 0.095238
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a7bf008475dd539b7cf47d56aedeaeea90df8542
| 78
|
py
|
Python
|
src/obfuscapk/obfuscators/virus_total/__init__.py
|
AkshayJainG/Obfuscapk
|
6f2e791dbc06afc17d69393b3e5064cc4821a582
|
[
"MIT"
] | 12
|
2020-02-24T13:40:41.000Z
|
2021-05-05T12:41:43.000Z
|
src/obfuscapk/obfuscators/virus_total/__init__.py
|
z3r0Sec/Obfuscapk
|
3adbf7bfa84adf117326409c683e375ceb5a5df4
|
[
"MIT"
] | null | null | null |
src/obfuscapk/obfuscators/virus_total/__init__.py
|
z3r0Sec/Obfuscapk
|
3adbf7bfa84adf117326409c683e375ceb5a5df4
|
[
"MIT"
] | 3
|
2020-05-15T16:44:43.000Z
|
2021-07-31T03:21:14.000Z
|
#!/usr/bin/env python3.7
# coding: utf-8
from .virus_total import VirusTotal
| 15.6
| 35
| 0.74359
| 13
| 78
| 4.384615
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 0.128205
| 78
| 4
| 36
| 19.5
| 0.794118
| 0.474359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac3fa7f5420c29397bc2d760413cdf0d452ce157
| 1,441
|
py
|
Python
|
hypha/apply/api/v1/permissions.py
|
slifty/hypha
|
93313933c26589858beb9a861e33431658cd3b24
|
[
"BSD-3-Clause"
] | 14
|
2018-10-05T14:34:21.000Z
|
2020-01-17T17:45:44.000Z
|
hypha/apply/api/v1/permissions.py
|
OpenTechFund/WebApp
|
d6e2bb21a39d1fa7566cb60fe19f372dabfa5f0f
|
[
"BSD-3-Clause"
] | 1,098
|
2017-12-15T11:23:03.000Z
|
2020-01-24T07:58:07.000Z
|
hypha/apply/api/v1/permissions.py
|
OpenTechFund/WebApp
|
d6e2bb21a39d1fa7566cb60fe19f372dabfa5f0f
|
[
"BSD-3-Clause"
] | 7
|
2018-01-09T10:13:06.000Z
|
2019-08-12T16:38:59.000Z
|
from rest_framework import permissions
class IsAuthor(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.user == request.user
class IsApplyStaffUser(permissions.BasePermission):
"""
Custom permission to only allow organisation Staff or higher
"""
def has_permission(self, request, view):
return request.user.is_apply_staff
def has_object_permission(self, request, view, obj):
return request.user.is_apply_staff
class IsFinance1User(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_finance_level_1
def has_object_permission(self, request, view, obj):
return request.user.is_finance_level_1
class IsFinance2User(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_finance_level_2
def has_object_permission(self, request, view, obj):
return request.user.is_finance_level_2
class HasDeliverableEditPermission(permissions.BasePermission):
def has_permission(self, request, view):
invoice = view.get_invoice_object()
return invoice.can_user_edit_deliverables(request.user)
class HasRequiredChecksPermission(permissions.BasePermission):
def has_permission(self, request, view):
invoice = view.get_object()
return invoice.can_user_complete_required_checks(request.user)
| 30.659574
| 70
| 0.752949
| 171
| 1,441
| 6.111111
| 0.263158
| 0.051675
| 0.180861
| 0.215311
| 0.657416
| 0.607656
| 0.588517
| 0.584689
| 0.584689
| 0.54067
| 0
| 0.005017
| 0.170021
| 1,441
| 46
| 71
| 31.326087
| 0.868729
| 0.041638
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.037037
| 0.259259
| 0.925926
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ac4f97df47ac1dde1b3d088df84083b0cf10c42b
| 65
|
py
|
Python
|
cct/core2/devices/ups/keen800/__init__.py
|
awacha/cct
|
be1adbed2533df15c778051f3f4f9da0749c873a
|
[
"BSD-3-Clause"
] | 1
|
2015-11-04T16:37:39.000Z
|
2015-11-04T16:37:39.000Z
|
cct/core2/devices/ups/keen800/__init__.py
|
awacha/cct
|
be1adbed2533df15c778051f3f4f9da0749c873a
|
[
"BSD-3-Clause"
] | null | null | null |
cct/core2/devices/ups/keen800/__init__.py
|
awacha/cct
|
be1adbed2533df15c778051f3f4f9da0749c873a
|
[
"BSD-3-Clause"
] | 1
|
2020-03-05T02:50:43.000Z
|
2020-03-05T02:50:43.000Z
|
from .backend import Keen800Backend
from .frontend import Keen800
| 32.5
| 35
| 0.861538
| 8
| 65
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 0.107692
| 65
| 2
| 36
| 32.5
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3be6b5083c84cd73abb4ec81e1710ea32f44f9e5
| 172
|
py
|
Python
|
FactorioCalcFastAPI/locale.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | 1
|
2021-09-21T01:42:05.000Z
|
2021-09-21T01:42:05.000Z
|
FactorioCalcFastAPI/locale.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
FactorioCalcFastAPI/locale.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
def get_locale_name(something, lang):
if type(something) == str:
pass
if type(something) == list:
pass
if type(something) == dict:
pass
| 21.5
| 37
| 0.575581
| 21
| 172
| 4.619048
| 0.571429
| 0.185567
| 0.463918
| 0.391753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.313953
| 172
| 7
| 38
| 24.571429
| 0.822034
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.428571
| 0
| 0
| 0.142857
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
3bfc558666d441394dcbaa87d67e9aa575376726
| 21,583
|
py
|
Python
|
ETLook/unstable.py
|
HesamZamanpour/wapor
|
553981e78164e7fd326be5f65a46bdd1dc80288a
|
[
"Apache-2.0"
] | null | null | null |
ETLook/unstable.py
|
HesamZamanpour/wapor
|
553981e78164e7fd326be5f65a46bdd1dc80288a
|
[
"Apache-2.0"
] | null | null | null |
ETLook/unstable.py
|
HesamZamanpour/wapor
|
553981e78164e7fd326be5f65a46bdd1dc80288a
|
[
"Apache-2.0"
] | null | null | null |
import math
import numpy as np
from pyWAPOR.ETLook import constants as c
def initial_sensible_heat_flux_canopy_daily(rn_24_canopy, t_24_init):
r"""
Computes the initial sensible heat flux before the iteration which solves
the stability corrections. The first estimation of transpiration is used
to estimate the initial sensible heat flux.
.. math ::
H_{canopy}=Q_{canopy}^{*}-T
Parameters
----------
rn_24_canopy : float
daily net radiation for the canopy
:math:`Q^{canopy}^{*}`
[W m-2]
t_24_init : float
initial estimate of daily transpiration
:math:`T`
[W m-2]
Returns
-------
h_canopy_24_init : float
initial estimate of the sensible heat flux
:math:`H^{canopy}`
[W m-2]
"""
return rn_24_canopy - t_24_init
def initial_sensible_heat_flux_soil_daily(rn_24_soil, e_24_init, g0_24):
r"""
Computes the initial sensible heat flux before the iteration which solves
the stability corrections. The first estimation of transpiration is used
to estimate the initial sensible heat flux.
.. math ::
H_{soil}=Q_{soil}^{*}-G_{0}-E
Parameters
----------
rn_24_soil : float
daily net radiation for the soil
:math:`Q^{canopy}^{*}`
[W m-2]
g0_24 : float
daily soil heat flux
:math:`G_{0}`
[W m-2]
e_24_init : float
initial estimate of daily evaporation
:math:`E`
[W m-2]
Returns
-------
h_soil_24_init : float
initial estimate of the sensible heat flux
:math:`H_{canopy}`
[W m-2]
"""
return rn_24_soil - g0_24 - e_24_init
def initial_friction_velocity_daily(u_b_24, z0m, disp, z_b=100):
r"""
Computes the initial friction velocity without using stability corrections.
.. math ::
u_{*}=\frac{ku_{b}}{ln\left(\frac{z_{b}-d}{z_{0,m}}\right)}
Parameters
----------
u_b_24 : float
daily wind speed at blending heigt
:math:`u_{b}`
[m s-1]
z0m : float
surface roughness
:math:`z_{0,m}`
[m]
disp : float
displacement height
:math:`d`
[m]
z_b : float
blending height
:math:`z_{b}`
[m]
Returns
-------
u_star_24_init : float
initial estimate of the daily friction velocity
:math:`u_{*}`
[m s-1]
"""
return (c.k * u_b_24) / (np.log((z_b - disp) / z0m))
def initial_friction_velocity_soil_daily(u_b_24, disp, z_b=100):
r"""
Computes the initial firction velocity without using stability corrections.
.. math ::
u_{*}=\frac{ku_{b}}{ln\left(\frac{z_{b}-d}{z_{0,m}}\right)}
Parameters
----------
u_b_24 : float
daily wind speed at blending heigt
:math:`u_{b}`
[m s-1]
disp : float
displacement height
:math:`d`
[m]
z_b : float
blending height
:math:`z_{b}`
[m]
Returns
-------
u_star_24_soil_init : float
initial estimate of the daily friction velocity for soil
:math:`u_{*}`
[m s-1]
"""
return (c.k * u_b_24) / (np.log((z_b - disp) / c.z0_soil))
def monin_obukhov_length(h_flux, ad, u_star, t_air_k):
r"""
Computes the Monin-Obukhov length. The Monin-Obukhov length is used to
describe the effects of buoyancy on turbulent flows. The Monin-Obukhov
length is usually negative during daytime.
.. math ::
L=\frac{-\rho c_{p}u_{*}^{3}T_{a}}{kgH_{canopy}}
Parameters
----------
h_flux : float
sensible heat flux
:math:`H`
[W m-2]
ad : float
air density
:math:`\rho`
[kg m-3]
u_star : float
Monin Obukhov length
:math:`L`
[m]
t_air_k : float
air tempererature in kelvin
:math:`T_{a}`
[K]
Returns
-------
monin : float
monin obukhov length
:math:`L`
[m]
"""
return (-ad * c.sh * u_star ** 3 * t_air_k) / (c.k * c.g * h_flux)
def stability_parameter(monin, disp, z_b=100):
r"""
Computes the stability parameter introduced by Monin and Obukhov. This
parameter includes effects of both shear stress and buoyancy on turbulence.
It is applicable to blending height.
.. math ::
x_{b}=1-16\left(\frac{z_{b}-d}{L}\right)^{0.25}
Parameters
----------
monin : float
monin obukhov length
:math:`L`
[m]
z_b : float
blending height
:math:`z_{b}`
[m]
disp : float
displacement height
:math:`d`
[m]
Returns
-------
x_b : float
stability parameter used in stability correction
:math:`x_{b}`
[-]
"""
return (1 - 16 * ((z_b - disp) / monin)) ** 0.25
def stability_factor(x_b):
r"""
Computes the stability correction for heat at blending height.
.. math ::
\psi_{h,b}=2\ln\left(\frac{1+x_{b}}{2}\right)+
\ln\left(\frac{1+x_{b}^{2}}{2}\right)-
2\arctan\left(x_{b}\right)+0.5\pi
Parameters
----------
x_b : float
stability parameter used in stability correction
:math:`x_{b}`
[-]
Returns
-------
sf : float
stability correction for heat
:math:`\psi_{h,b}`
[-]
"""
return (
2 * np.log((1 + x_b) / 2)
+ np.log((1 + x_b ** 2) / 2)
- 2 * np.arctan(x_b)
+ 0.5 * np.pi
)
def stability_parameter_obs(monin, z_obs):
r"""
Computes the stability parameter introduced by Monin and Obukhov. This
parameter includes effects of both shear stress and buoyancy on turbulence.
It is applicable to observation height.
.. math ::
x_{obs}=1-16\left(\frac{z_{obs}}{L}\right)^{0.25}
Parameters
----------
monin : float
monin obukhov length
:math:`L`
[m]
z_obs : float
observation height
:math:`z_{obs}`
[m]
Returns
-------
x_b_obs : float
stability parameter used in stability correction for observation height
:math:`x_{obs}`
[-]
"""
return (1 - 16 * (z_obs / monin)) ** 0.25
def stability_correction_heat_obs(x_b_obs):
r"""
Computes the stability correction for heat at observation height.
.. math ::
\psi_{h,obs}=2\ln\left(\frac{1+x_{obs}^{2}}{2}\right)
Parameters
----------
x_b_obs : float
stability parameter used in stability correction for observation height
:math:`x_{obs}`
[-]
Returns
-------
sf_obs : float
stability correction for heat for observation height
:math:`\psi_{h,obs}`
[-]
"""
return 2 * np.log((1 + x_b_obs ** 2) / 2)
def friction_velocity(u_b, z_b, z0m, disp, sf):
r"""
Computes the friction velocity
.. math ::
u_{*}=\frac{ku_{b}}{ln\left(\frac{z_{b}-d}{z_{0,m}}\right)-\psi_{h,b}}
Parameters
----------
u_b : float
windspeed at blending height
:math:`u_{b}`
[m]
z_b : float
blending height
:math:`z_{b}`
[m]
z0m : float
roughness length
:math:`z_{0,m}`
[m]
disp : float
displacement height
:math:`d`
[m]
sf : float
stability factor at blending height
:math:`\psi_{h,b}`
[m]
Returns
-------
u_star : float
friction velocity
:math:`u_{*}`
[m s-1]
"""
return (c.k * u_b) / (np.log((z_b - disp) / z0m) - sf)
def ra_canopy(
h_canopy_init, t_air_k, u_star_init, ad, z0m, disp, u_b, z_obs=2, z_b=100, iter_ra=3
):
r"""
Computes the aerodynamical resistance for canopy using an iterative
approach. The iteration is needed to compute the frication velocity at
blending height Iteration stops either after five iterations or
if the difference between two subsequent estimations is less than 0.01.
.. math ::
\begin{cases}
\begin{array}{c}
L=\frac{-\rho c_{p}u_{*}^{3}T_{a}}{kgH_{canopy}}\\
x_{b}=1-16\left(\frac{z_{b}-d}{L}\right)^{0.25}\\
\psi_{h,b}=2\ln\left(\frac{1+z_{b}}{2}\right)+
\ln\left(\frac{1+z_{b}^{2}}{2}\right)-
2\arctan\left(x_{b}\right)+0.5\pi\\
u_{*}=\frac{ku_{b}}{ln\left(\frac{z_{b}-d}{z_{0,m}}\right)-\psi_{h,b}}
\end{array}\end{cases}
The friction velocity is independent of height. So this value can be used
to calculate together with the stability correction for heat on observation
heigth the aerodynamical resistance.
.. math ::
x_{obs}=1-16\left(\frac{z_{obs}}{L}\right)^{0.25}
\psi_{h,obs}=2\ln\left(\frac{1+x_{obs}^{2}}{2}\right)
r_{a,canopy}=\frac{\ln\left(\frac{z_{obs}-d}
{0.1z_{0,m}}\right)-\psi_{h,obs}}{ku_{*}}
Parameters
----------
h_canopy_init : float
initial estimate of the sensible heat flux
:math:`H^{canopy}`
[W m-2]
t_air_k : float
air tempererature in kelvin
:math:`T_{a}`
[K]
u_star_init : float
initial estimate of the daily friction velocity
:math:`u_{*}`
[m s-1]
ad : float
air density
:math:`\rho`
[kg m-3]
z_b : float
blending height
:math:`z_{b}`
[m]
z_obs : float
observation height
:math:`z_{obs}`
[m]
z0m : float
roughness length
:math:`z_{0,m}`
[m]
disp : float
displacement height
:math:`d`
[m]
u_b : float
windspeed at blending height
:math:`u_{b}`
[m/s]
iter_ra : integer
number of iterations for aerodynamical resistance
:math:`n_{ra}`
[-]
Returns
-------
ra_canopy : float
aerodynamical resistance for canopy
:math:`r_{a,canopy}`
[s m-1]
"""
h_flux = h_canopy_init
u_star_start = u_star_init
iteration = 0
epsilon = 10.0
while (iteration < iter_ra) and (np.nanmax(epsilon) > 0.01):
iteration += 1
monin = monin_obukhov_length(h_flux, ad, u_star_start, t_air_k)
x_b = np.where(monin > 0, 1, stability_parameter(monin, disp, z_b))
sf = stability_factor(x_b)
u_star = friction_velocity(u_b, z_b, z0m, disp, sf)
epsilon = abs(u_star - u_star_start)
u_star_start = u_star
x_b_obs = np.where(monin <= 0, stability_parameter_obs(monin, z_obs), 1)
sf_obs = np.where(monin <= 0, stability_correction_heat_obs(x_b_obs), 0)
disp = np.minimum(disp, 1.5)
ra = (np.log((z_obs - disp) / (0.1 * z0m)) - sf_obs) / (c.k * u_star)
ra = np.minimum(ra, 500)
ra = np.maximum(ra, 25)
return ra
def transpiration(
rn_24_canopy,
ssvp_24,
ad_24,
vpd_24,
psy_24,
r_canopy,
h_canopy_24_init,
t_air_k_24,
u_star_24_init,
z0m,
disp,
u_b_24,
z_obs=2,
z_b=100,
iter_h=5,
):
r"""
Computes the transpiration using an iterative approach. The iteration is
needed to compute the aerodynamical resistance.Iteration stops either after
five iterations orif the difference between two subsequent estimations
is less than 0.01. The iteration is started with an estimate on :math:`H`
using the initial guess without stability corrections. Subsequent
iterations use the guess with stability corrections.
.. math ::
T=\frac{\Delta\left(Q_{canopy}^{*}\right)+\rho c_{p}\
frac{\Delta_{e}}{r_{a,canopy}}}{\Delta+
\gamma\left(1+\frac{r_{canopy}}{r_{a,canopy}}\right)}
Parameters
----------
rn_24_canopy : float
net radiation for the canopy
:math:`Q^{*}_{canopy}`
[Wm-2]
ssvp_24 : float
daily slope of saturated vapour pressure curve
:math:`\Delta_{24}`
[mbar K-1]
ad_24 : float
daily air density
:math:`\rho_{24}`
[kg m-3]
vpd_24 : float
daily vapour pressure deficit
:math:`\Delta_{e,24}`
[mbar]
psy_24 : float
daily psychrometric constant
:math:`\gamma_{24}`
[mbar K-1]
r_canopy : float
canopy resistance
:math:`r_{canopy}`
[sm-1]
h_canopy_24_init : float
initial estimate of the sensible heat flux
:math:`H^{canopy}`
[W m-2]
t_air_k_24 : float
daily air tempererature in kelvin
:math:`T_{a}`
[K]
u_star_24_init : float
initial estimate of the daily friction velocity
:math:`u_{*}`
[m s-1]
z0m : float
roughness length
:math:`z_{0,m}`
[m]
disp : float
displacement height
:math:`d`
[m]
u_b_24 : float
daily windspeed at blending height
:math:`u_{b}`
[m]
z_b : float
blending height
:math:`z_{b}`
[m]
z_obs : float
observation height
:math:`z_{obs}`
[m]
iter_h : integer
number of iterations for sensible heat flux
:math:`n_h`
[-]
Returns
-------
t_24 : float
daily transpiration energy equivalent
:math:`T_{24}`
[W m-2]
"""
iteration = 0
epsilon = 10.0
h_start = h_canopy_24_init
while (iteration < iter_h) and (np.nanmax(epsilon) > 0.01):
iteration += 1
ra_canopy_start = ra_canopy(
h_start, t_air_k_24, u_star_24_init, ad_24, z0m, disp, u_b_24, z_obs, z_b
)
t = (ssvp_24 * rn_24_canopy + ad_24 * c.sh * (vpd_24 / ra_canopy_start)) / (
ssvp_24 + psy_24 * (1 + r_canopy / ra_canopy_start)
)
h = rn_24_canopy - t
epsilon = abs(h - h_start)
h_start = h
return t
def ra_soil(
h_soil_24_init, t_air_k, u_star_24_init, ad, disp, u_b, z_obs=2, z_b=100, iter_ra=3
):
r"""
Computes the aerodynamical resistance for canopy using an iterative
approach. The iteration is needed to compute the friction velocity at
blending height Iteration stops either after five iterations or
if the difference between two subsequent estimations is less than 0.01.
.. math ::
\begin{cases}
\begin{array}{c}
L=\frac{-\rho c_{p}u_{*}^{3}T_{a}}{kgH_{soil}}\\
x_{b}=1-16\left(\frac{z_{b}-d}{L}\right)^{0.25}\\
\psi_{h,b}=2\ln\left(\frac{1+z_{b}}{2}\right)+
\ln\left(\frac{1+z_{b}^{2}}{2}\right)-
2\arctan\left(x_{b}\right)+0.5\pi\\
u_{*}=\frac{ku_{b}}{ln\left(\frac{z_{b}-d}{z_{0,soil}}\right)
-\psi_{h,b}}
\end{array}\end{cases}
The friction velocity is independent of height. So this value can be used
to calculate together with the stability correction for heat on observation
heigth the aerodynamical resistance.
.. math ::
x_{obs}=1-16\left(\frac{z_{obs}}{L}\right)^{0.25}
\psi_{h,obs}=2\ln\left(\frac{1+x_{obs}^{2}}{2}\right)
r_{a,soil}=\frac{\ln\left(\frac{z_{obs}-d}
{0.1z_{0,soil}}\right)-\psi_{h,obs}}{ku_{*}}
Parameters
----------
h_soil_24_init : float
initial estimate of the sensible heat flux for soil
:math:`H^{soil}`
[W m-2]
t_air_k : float
air tempererature in kelvin
:math:`T_{a}`
[K]
u_star_24_init : float
initial estimate of the daily friction velocity
:math:`u_{*}`
[m s-1]
ad : float
air density
:math:`\rho`
[kg m-3]
z_b : float
blending height
:math:`z_{b}`
[m]
z_obs : float
observation height
:math:`z_{obs}`
[m]
disp : float
displacement height
:math:`d`
[m]
u_b : float
windspeed at blending height
:math:`u_{b}`
[m]
iter_ra : integer
number of iterations for aerodynamical resistance
:math:`n_{ra}`
[-]
Returns
-------
ra_soil : float
aerodynamical resistance for soil
:math:`r_{a,soil}`
[s m-1]
"""
h_flux = h_soil_24_init
u_star_start = u_star_24_init
iteration = 0
epsilon = 10
while (iteration < iter_ra) and (np.nanmax(epsilon) > 0.01):
iteration += 1
monin = monin_obukhov_length(h_flux, ad, u_star_start, t_air_k)
x_b = np.where(monin > 0, 0, stability_parameter(monin, disp, z_b)) #!!! monin > 0 is 0? while in ra_canopy this is 1??
sf = stability_factor(x_b)
u_star = friction_velocity(u_b, z_b, c.z0_soil, disp, sf)
epsilon = abs(u_star - u_star_start)
u_star_start = u_star
x_b_obs = np.where(monin <= 0, stability_parameter_obs(monin, z_obs), 1)
sf_obs = np.where(monin <= 0, stability_correction_heat_obs(x_b_obs), 0)
# 1.5 limit is from ETLook IDL
disp = np.minimum(disp, 1.5)
ra = (np.log((z_obs - disp) / (0.1 * c.z0_soil)) - sf_obs) / (c.k * u_star)
ra = np.maximum(ra, 25)
return ra
def evaporation(
rn_24_soil,
g0_24,
ssvp_24,
ad_24,
vpd_24,
psy_24,
r_soil,
h_soil_24_init,
t_air_k_24,
u_star_24_soil_init,
disp,
u_b_24,
z_b=100,
z_obs=2,
iter_h=3,
):
r"""
Computes the evaporation using an iterative approach. The iteration is
needed to compute the aerodynamic resistance.Iteration stops either after
five iterations or if the difference between two subsequent estimations
is less than 0.01. The iteration is started with an estimate on :math:`H`
using the initial guess without stability corrections. Subsequent
iterations use the guess with stability corrections.
.. math ::
E=\frac{\Delta\left(Q_{soil}^{*}-G\right)+
\rho c_{p}\frac{\Delta_{e}}{r_{a,soil}}}
{\Delta+\gamma\left(1+\frac{r_{soil}}{r_{a,soil}}\right)}
Parameters
----------
rn_24_soil : float
net radiation for the soil
:math:`Q^{*}_{canopy}`
[Wm-2]
g0_24 : float
daily soil heat flux
:math:`G`
[Wm-2]
ssvp_24 : float
daily slope of saturated vapour pressure curve
:math:`\Delta_{24}`
[mbar K-1]
ad_24 : float
daily air density
:math:`\rho_{24}`
[kg m-3]
vpd_24 : float
daily vapour pressure deficit
:math:`\Delta_{e,24}`
[mbar]
psy_24 : float
daily psychrometric constant
:math:`\gamma_{24}`
[mbar K-1]
r_soil : float
soil resistance
:math:`r_{soil}`
[sm-1]
h_soil_24_init : float
initial estimate of the sensible heat flux for soil
:math:`H^{soil}`
[W m-2]
t_air_k_24 : float
daily air temperature in kelvin
:math:`T_{a}`
[K]
u_star_24_soil_init : float
initial estimate of the daily friction velocity for soil
:math:`u_{*}`
[m s-1]
disp : float
displacement height
:math:`d`
[m]
u_b_24 : float
daily wind speed at blending height
:math:`u_{b}`
[m]
z_b : float
blending height
:math:`z_{b}`
[m]
z_obs : float
observation height
:math:`z_{obs}`
[m]
iter_h : integer
number of iterations for sensible heat flux
:math:`n_h`
[-]
Returns
-------
e_24 : float
daily evaporation energy equivalent
:math:`E_{24}`
[W m-2]
"""
iteration = 0
epsilon = 10
h_start = h_soil_24_init
while (iteration < iter_h) and (np.nanmax(epsilon) > 0.1):
iteration += 1
ra_soil_start = ra_soil(
h_start, t_air_k_24, u_star_24_soil_init, ad_24, disp, u_b_24, z_obs, z_b
)
e = (
ssvp_24 * (rn_24_soil - g0_24) + ad_24 * c.sh * (vpd_24 / ra_soil_start)
) / (ssvp_24 + psy_24 * (1 + r_soil / ra_soil_start))
h = rn_24_soil - g0_24 - e
epsilon = abs(h - h_start)
h_start = h
return e
def transpiration_mm(t_24, lh_24):
r"""
Computes the canopy transpiration based on the Penman Monteith equation
adapted for canopy.
.. math ::
T=Td_{sec}\lambda_{24}
where the following constants are used
* :math:`d_{sec}` seconds in the day = 86400 [s]
Parameters
----------
t_24 : float
daily transpiration energy equivalent
:math:`E^{0}`
[W m-2]
lh_24 : float
daily latent heat of evaporation
:math:`\lambda_{24}`
[J/kg]
Returns
-------
t_24_mm : float
daily transpiration in mm
:math:`T`
[mm d-1]
"""
return t_24 * c.day_sec / lh_24
def evaporation_mm(e_24, lh_24):
r"""
Computes the soil evaporation based on the Penman Monteith equation
adapted for soils.
.. math ::
E=Ed_{sec}\lambda_{24}
where the following constants are used
* :math:`d_{sec}` seconds in the day = 86400 [s]
Parameters
----------
e_24 : float
daily evaporation energy equivalent
:math:`E^{0}`
[W m-2]
lh_24 : float
daily latent heat of evaporation
:math:`\lambda_{24}`
[J/kg]
Returns
-------
e_24_mm : float
daily evaporation in mm
:math:`E`
[mm d-1]
"""
return e_24 * c.day_sec / lh_24
| 24.922633
| 127
| 0.556549
| 3,072
| 21,583
| 3.70931
| 0.077148
| 0.008074
| 0.023168
| 0.029487
| 0.857832
| 0.817376
| 0.779026
| 0.740588
| 0.675033
| 0.640369
| 0
| 0.037432
| 0.313024
| 21,583
| 865
| 128
| 24.951445
| 0.731099
| 0.65774
| 0
| 0.374194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103226
| false
| 0
| 0.019355
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ce27b2fe3014337c74593e22c2d2c5710220f1d9
| 146
|
py
|
Python
|
anthill/platform/atomic/exceptions.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | 1
|
2018-11-30T21:56:14.000Z
|
2018-11-30T21:56:14.000Z
|
anthill/platform/atomic/exceptions.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
anthill/platform/atomic/exceptions.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
class TransactionError(Exception):
pass
class TransactionTimeoutError(Exception):
pass
class TransactionFinished(Exception):
pass
| 13.272727
| 41
| 0.767123
| 12
| 146
| 9.333333
| 0.5
| 0.348214
| 0.321429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171233
| 146
| 10
| 42
| 14.6
| 0.92562
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ce39b352c5c92d6c3140c3d99d12b55c5ea9dec3
| 148
|
py
|
Python
|
app/student/__init__.py
|
Anonymous78/Registration-System
|
967c8a1c28f5c344663c5b27e0087a70c6e9f193
|
[
"MIT"
] | null | null | null |
app/student/__init__.py
|
Anonymous78/Registration-System
|
967c8a1c28f5c344663c5b27e0087a70c6e9f193
|
[
"MIT"
] | null | null | null |
app/student/__init__.py
|
Anonymous78/Registration-System
|
967c8a1c28f5c344663c5b27e0087a70c6e9f193
|
[
"MIT"
] | null | null | null |
# app/student/__init__.py
from flask import Blueprint
student = Blueprint("student", __name__, url_prefix='/student')
from . import views
| 18.5
| 64
| 0.722973
| 18
| 148
| 5.444444
| 0.666667
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168919
| 148
| 7
| 65
| 21.142857
| 0.796748
| 0.155405
| 0
| 0
| 0
| 0
| 0.12931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
cbf03f4f56d197e09d2ee05f04a223e15f61f6de
| 156
|
py
|
Python
|
algo/algorithms/math/euclid.py
|
kigawas/algo-py
|
ca5ac3f1be47a1b89bb68f376501832e975c125b
|
[
"MIT"
] | null | null | null |
algo/algorithms/math/euclid.py
|
kigawas/algo-py
|
ca5ac3f1be47a1b89bb68f376501832e975c125b
|
[
"MIT"
] | null | null | null |
algo/algorithms/math/euclid.py
|
kigawas/algo-py
|
ca5ac3f1be47a1b89bb68f376501832e975c125b
|
[
"MIT"
] | null | null | null |
def gcd(a: int, b: int) -> int:
# supposed a >= b
if b > a:
return gcd(b, a)
elif a % b == 0:
return b
return gcd(b, a % b)
| 19.5
| 31
| 0.435897
| 28
| 156
| 2.428571
| 0.357143
| 0.088235
| 0.294118
| 0.323529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 0.410256
| 156
| 7
| 32
| 22.285714
| 0.728261
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
026a612d21ad749194e06ebac8018add4a697550
| 2,995
|
py
|
Python
|
SensorGateway/home/tinaja/testReader.py
|
TinajaLabs/makerfaire2015
|
a9dc87912ea065d1181d7c21c6798bb30f139eb2
|
[
"MIT"
] | null | null | null |
SensorGateway/home/tinaja/testReader.py
|
TinajaLabs/makerfaire2015
|
a9dc87912ea065d1181d7c21c6798bb30f139eb2
|
[
"MIT"
] | null | null | null |
SensorGateway/home/tinaja/testReader.py
|
TinajaLabs/makerfaire2015
|
a9dc87912ea065d1181d7c21c6798bb30f139eb2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import serial, syslog, time
from XBEE import xbee
# SERIALPORT = "/dev/tts/0" # the com/serial port the XBee is connected to
SERIALPORT = "/dev/ttyAMA0" # the com/serial port the XBee is connected to
BAUDRATE = 9600 # the baud rate we talk to the xbee
# open up the FTDI serial port to get data transmitted to xbee
ser = serial.Serial(SERIALPORT, BAUDRATE)
ser.open()
print "serial port opened...?"
syslog.syslog("serial port opened...")
print "ready... and waiting for sensor data"
while True:
# grab one packet from the xbee, or timeout
syslog.syslog("testReader.py - " + time.strftime("%Y %m %d, %H:%M"))
packet = xbee.find_packet(ser)
if packet:
xb = xbee(packet)
print xb
try:
if xb.address_16 == 140:
print "014: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 13:
print "023: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 23:
print "023: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 12:
print "012: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 21:
print "021: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 160:
print "016: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 170:
print "017: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 180:
print "018: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 190:
print "019: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 200:
print "020: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
if xb.address_16 == 210:
print "021: ", xb.analog_samples[0][0], xb.analog_samples[0][1], xb.analog_samples[0][2], xb.analog_samples[0][3], xb.analog_samples[0][4]
except Exception, e:
print "xb exception: "+str(e)
syslog.syslog("xb exception: "+str(e))
| 54.454545
| 154
| 0.612354
| 482
| 2,995
| 3.665975
| 0.188797
| 0.24901
| 0.466893
| 0.498019
| 0.661573
| 0.661573
| 0.661573
| 0.661573
| 0.661573
| 0.620826
| 0
| 0.085361
| 0.217696
| 2,995
| 54
| 155
| 55.462963
| 0.668801
| 0.092487
| 0
| 0.097561
| 0
| 0
| 0.076015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.04878
| null | null | 0.365854
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0280fd87c7bc18bb0ab3836466f5f9731ffdd2e8
| 156
|
py
|
Python
|
lib/rapidsms/messages/error.py
|
ILMServices/rapidsms
|
17cdb3d867cb1b78cfce59dc90e494bc54818e92
|
[
"BSD-3-Clause"
] | 3
|
2015-04-17T04:47:31.000Z
|
2018-04-02T17:44:19.000Z
|
lib/rapidsms/messages/error.py
|
sparkplug/rapidsms
|
17cdb3d867cb1b78cfce59dc90e494bc54818e92
|
[
"BSD-3-Clause"
] | null | null | null |
lib/rapidsms/messages/error.py
|
sparkplug/rapidsms
|
17cdb3d867cb1b78cfce59dc90e494bc54818e92
|
[
"BSD-3-Clause"
] | 4
|
2015-01-30T02:53:13.000Z
|
2019-11-08T11:08:20.000Z
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from .outgoing import OutgoingMessage
class ErrorMessage(OutgoingMessage):
"""
"""
pass
| 12
| 37
| 0.647436
| 21
| 156
| 4.809524
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024793
| 0.224359
| 156
| 12
| 38
| 13
| 0.809917
| 0.301282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
5a66a2edf2304f5c415271677482486cad0b692e
| 54
|
py
|
Python
|
jupyterlabpymolpysnips/FormatLabel/labelSS.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlabpymolpysnips/FormatLabel/labelSS.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlabpymolpysnips/FormatLabel/labelSS.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
cmd.do('alter , ss='%1';')
cmd.do('label (%2),'%3';')
| 18
| 26
| 0.462963
| 10
| 54
| 2.5
| 0.8
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 0.092593
| 54
| 2
| 27
| 27
| 0.44898
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ce5b43203d86127dc5e3e2997d95d14caf1e7e7d
| 20
|
py
|
Python
|
3-1/codestar/1009.py
|
sat0317/root
|
c9ec76f9139a577805fa387daf7c4d8169d0b2e7
|
[
"MIT"
] | null | null | null |
3-1/codestar/1009.py
|
sat0317/root
|
c9ec76f9139a577805fa387daf7c4d8169d0b2e7
|
[
"MIT"
] | null | null | null |
3-1/codestar/1009.py
|
sat0317/root
|
c9ec76f9139a577805fa387daf7c4d8169d0b2e7
|
[
"MIT"
] | null | null | null |
print(str(input()))
| 10
| 19
| 0.65
| 3
| 20
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 20
| 1
| 20
| 20
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
ce69e53a8361d3c5434fe76317fc098e6c2d878f
| 204
|
py
|
Python
|
template/first_program.py
|
mjtsai1974/DevBlog
|
f1429e28e7ea618a64f5e111be4d7f42ae616ce8
|
[
"MIT"
] | null | null | null |
template/first_program.py
|
mjtsai1974/DevBlog
|
f1429e28e7ea618a64f5e111be4d7f42ae616ce8
|
[
"MIT"
] | null | null | null |
template/first_program.py
|
mjtsai1974/DevBlog
|
f1429e28e7ea618a64f5e111be4d7f42ae616ce8
|
[
"MIT"
] | null | null | null |
# First Python program
from __future__ import print_function, division
import time
print('Welcome to your first Python program.')
#raw_input('Press enter to exit the program.')
print('Bye!')
time.sleep(2)
| 29.142857
| 47
| 0.779412
| 31
| 204
| 4.935484
| 0.709677
| 0.143791
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005556
| 0.117647
| 204
| 7
| 48
| 29.142857
| 0.844444
| 0.318627
| 0
| 0
| 0
| 0
| 0.29927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0.6
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
ce8f1cf641c153eb1b924e7dc7f2f0ca14e85828
| 196
|
py
|
Python
|
myapp/admin.py
|
pkbagchi/Result-Processing-System-DIU
|
4df5b736574e3b579e4eae28013afec982124473
|
[
"MIT"
] | null | null | null |
myapp/admin.py
|
pkbagchi/Result-Processing-System-DIU
|
4df5b736574e3b579e4eae28013afec982124473
|
[
"MIT"
] | 1
|
2020-05-24T06:50:27.000Z
|
2020-07-21T05:02:51.000Z
|
myapp/admin.py
|
pkbagchi/Result-Processing-System-DIU
|
4df5b736574e3b579e4eae28013afec982124473
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Student, Result, Semester
# Register your models here.
admin.site.register(Student)
admin.site.register(Result)
admin.site.register(Semester)
| 28
| 46
| 0.811224
| 27
| 196
| 5.888889
| 0.481481
| 0.169811
| 0.320755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096939
| 196
| 6
| 47
| 32.666667
| 0.898305
| 0.132653
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ced806a75a34c6682a63e2f4ace56ddcdacb0b77
| 188
|
py
|
Python
|
market_maker/utils/errors.py
|
Quant-Network/sample-market-maker
|
4c47b60be66b1aead901400ba5fe96abf5e73c1b
|
[
"Apache-2.0"
] | null | null | null |
market_maker/utils/errors.py
|
Quant-Network/sample-market-maker
|
4c47b60be66b1aead901400ba5fe96abf5e73c1b
|
[
"Apache-2.0"
] | null | null | null |
market_maker/utils/errors.py
|
Quant-Network/sample-market-maker
|
4c47b60be66b1aead901400ba5fe96abf5e73c1b
|
[
"Apache-2.0"
] | 1
|
2021-04-27T12:02:41.000Z
|
2021-04-27T12:02:41.000Z
|
class AuthenticationError(Exception):
pass
class MarketClosedError(Exception):
pass
class MarketEmptyError(Exception):
pass
class InternalStateBotError(Exception):
pass
| 15.666667
| 39
| 0.771277
| 16
| 188
| 9.0625
| 0.4375
| 0.358621
| 0.372414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164894
| 188
| 11
| 40
| 17.090909
| 0.923567
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ceda617097ada5b35b42451310920805a73bcd1b
| 369
|
py
|
Python
|
examples/database.py
|
Kehrlann/python-test-examples
|
9c5aa8c500cfd892dbd9e4cc1c6660ff1566b771
|
[
"Apache-2.0"
] | null | null | null |
examples/database.py
|
Kehrlann/python-test-examples
|
9c5aa8c500cfd892dbd9e4cc1c6660ff1566b771
|
[
"Apache-2.0"
] | null | null | null |
examples/database.py
|
Kehrlann/python-test-examples
|
9c5aa8c500cfd892dbd9e4cc1c6660ff1566b771
|
[
"Apache-2.0"
] | null | null | null |
class Database:
def __init__(self):
self.stuff = {}
def cleanup(self):
self.stuff = {}
def save(self, id, timestamp, count):
self.stuff[id] = {
'timestamp': timestamp,
'count': count
}
def find_all(self, id):
return [self.stuff[id]]
def count(self):
return len(self.stuff)
| 19.421053
| 41
| 0.514905
| 41
| 369
| 4.512195
| 0.365854
| 0.243243
| 0.140541
| 0.172973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.352304
| 369
| 18
| 42
| 20.5
| 0.774059
| 0
| 0
| 0.142857
| 0
| 0
| 0.03794
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.357143
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
0c7595c0482009b3e37f5753010dd64392117552
| 65
|
py
|
Python
|
collagen/data/utils/_solt.py
|
MIPT-Oulu/Collagen
|
0cbc4285d60e5c9fcc89f629fcf4321e80b7452c
|
[
"MIT"
] | 4
|
2019-05-14T14:44:51.000Z
|
2020-03-13T08:37:48.000Z
|
collagen/data/utils/_solt.py
|
MIPT-Oulu/Collagen
|
0cbc4285d60e5c9fcc89f629fcf4321e80b7452c
|
[
"MIT"
] | 26
|
2019-04-21T20:35:22.000Z
|
2022-03-12T00:32:57.000Z
|
collagen/data/utils/_solt.py
|
MIPT-Oulu/Collagen
|
0cbc4285d60e5c9fcc89f629fcf4321e80b7452c
|
[
"MIT"
] | 1
|
2019-05-14T14:53:28.000Z
|
2019-05-14T14:53:28.000Z
|
# TODO: Define properly a set of transformers to and from SOLT!!
| 32.5
| 64
| 0.753846
| 11
| 65
| 4.454545
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184615
| 65
| 1
| 65
| 65
| 0.924528
| 0.953846
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0cb177c931d0c8a1b6bfdab060ce33b8c2f27760
| 14,058
|
py
|
Python
|
tests/test_icdar_format.py
|
yasakova-anastasia/datumaro
|
7082fc4f50291d3b3c2022f069f76628e9fd07bb
|
[
"MIT"
] | null | null | null |
tests/test_icdar_format.py
|
yasakova-anastasia/datumaro
|
7082fc4f50291d3b3c2022f069f76628e9fd07bb
|
[
"MIT"
] | 1
|
2021-10-05T17:41:05.000Z
|
2021-10-05T17:41:05.000Z
|
tests/test_icdar_format.py
|
yasakova-anastasia/datumaro
|
7082fc4f50291d3b3c2022f069f76628e9fd07bb
|
[
"MIT"
] | null | null | null |
from functools import partial
from unittest import TestCase
import os.path as osp
import numpy as np
from datumaro.components.annotation import Bbox, Caption, Mask, Polygon
from datumaro.components.environment import Environment
from datumaro.components.extractor import DatasetItem
from datumaro.components.project import Dataset
from datumaro.plugins.icdar_format.converter import (
IcdarTextLocalizationConverter, IcdarTextSegmentationConverter,
IcdarWordRecognitionConverter,
)
from datumaro.plugins.icdar_format.extractor import (
IcdarTextLocalizationImporter, IcdarTextSegmentationImporter,
IcdarWordRecognitionImporter,
)
from datumaro.util.image import Image
from datumaro.util.test_utils import (
TestDir, compare_datasets, test_save_and_load,
)
from .requirements import Requirements, mark_requirement
DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'icdar_dataset')
class IcdarImporterTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_word_recognition(self):
detected_formats = Environment().detect_dataset(
osp.join(DUMMY_DATASET_DIR, 'word_recognition'))
self.assertIn(IcdarWordRecognitionImporter.NAME, detected_formats)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_text_localization(self):
detected_formats = Environment().detect_dataset(
osp.join(DUMMY_DATASET_DIR, 'text_localization'))
self.assertEqual([IcdarTextLocalizationImporter.NAME], detected_formats)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_text_segmentation(self):
detected_formats = Environment().detect_dataset(
osp.join(DUMMY_DATASET_DIR, 'text_segmentation'))
self.assertIn(IcdarTextSegmentationImporter.NAME, detected_formats)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_captions(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='word_1', subset='train',
image=np.ones((10, 15, 3)),
annotations=[
Caption('PROPER'),
]
),
DatasetItem(id='word_2', subset='train',
image=np.ones((10, 15, 3)),
annotations=[
Caption("Canon"),
]
),
])
dataset = Dataset.import_from(
osp.join(DUMMY_DATASET_DIR, 'word_recognition'),
'icdar_word_recognition')
compare_datasets(self, expected_dataset, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_bboxes(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='img_1', subset='train',
image=np.ones((10, 15, 3)),
annotations=[
Polygon([0, 0, 3, 1, 4, 6, 1, 7],
attributes={'text': 'FOOD'}),
]
),
DatasetItem(id='img_2', subset='train',
image=np.ones((10, 15, 3)),
annotations=[
Bbox(0, 0, 2, 3, attributes={'text': 'RED'}),
Bbox(3, 3, 2, 3, attributes={'text': 'LION'}),
]
),
])
dataset = Dataset.import_from(
osp.join(DUMMY_DATASET_DIR, 'text_localization'),
'icdar_text_localization')
compare_datasets(self, expected_dataset, dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_masks(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='1', subset='train',
image=np.ones((2, 5, 3)),
annotations=[
Mask(group=0,
image=np.array([[0, 1, 1, 0, 0], [0, 0, 0, 0, 0]]),
attributes={ 'index': 0, 'color': '108 225 132',
'text': 'F', 'center': '0 1'
}),
Mask(group=1,
image=np.array([[0, 0, 0, 1, 0], [0, 0, 0, 1, 0]]),
attributes={ 'index': 1, 'color': '82 174 214',
'text': 'T', 'center': '1 3'
}),
Mask(group=1,
image=np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]),
attributes={ 'index': 2, 'color': '241 73 144',
'text': 'h', 'center': '1 4'
}),
]
),
])
dataset = Dataset.import_from(
osp.join(DUMMY_DATASET_DIR, 'text_segmentation'),
'icdar_text_segmentation')
compare_datasets(self, expected_dataset, dataset)
class IcdarConverterTest(TestCase):
def _test_save_and_load(self, source_dataset, converter, test_dir, importer,
target_dataset=None, importer_args=None, **kwargs):
return test_save_and_load(self, source_dataset, converter, test_dir,
importer,
target_dataset=target_dataset, importer_args=importer_args, **kwargs)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_captions(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a/b/1', subset='train',
image=np.ones((10, 15, 3)), annotations=[
Caption('caption 0'),
]),
DatasetItem(id=2, subset='train',
image=np.ones((10, 15, 3)), annotations=[
Caption('caption_1'),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(IcdarWordRecognitionConverter.convert, save_images=True),
test_dir, 'icdar_word_recognition')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_captions_with_no_save_images(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a/b/1', subset='train',
image=np.ones((10, 15, 3)), annotations=[
Caption('caption 0'),
])
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(IcdarWordRecognitionConverter.convert, save_images=False),
test_dir, 'icdar_word_recognition')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_bboxes(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a/b/1', subset='train',
image=np.ones((10, 15, 3)), annotations=[
Bbox(1, 3, 6, 10),
Bbox(0, 1, 3, 5, attributes={'text': 'word 0'}),
]),
DatasetItem(id=2, subset='train',
image=np.ones((10, 15, 3)), annotations=[
Polygon([0, 0, 3, 0, 4, 7, 1, 8],
attributes={'text': 'word 1'}),
Polygon([1, 2, 5, 3, 6, 8, 0, 7]),
]),
DatasetItem(id=3, subset='train',
image=np.ones((10, 15, 3)), annotations=[
Polygon([2, 2, 8, 3, 7, 10, 2, 9],
attributes={'text': 'word_2'}),
Bbox(0, 2, 5, 9, attributes={'text': 'word_3'}),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(IcdarTextLocalizationConverter.convert, save_images=True),
test_dir, 'icdar_text_localization')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_bboxes_with_no_save_images(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id=3, subset='train',
image=np.ones((10, 15, 3)), annotations=[
Polygon([2, 2, 8, 3, 7, 10, 2, 9],
attributes={'text': 'word_2'}),
Bbox(0, 2, 5, 9, attributes={'text': 'word_3'}),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(IcdarTextLocalizationConverter.convert, save_images=False),
test_dir, 'icdar_text_localization')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_masks(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a/b/1', subset='train',
image=np.ones((10, 15, 3)), annotations=[
Mask(image=np.array([[0, 0, 0, 1, 1]]), group=1,
attributes={ 'index': 1, 'color': '82 174 214', 'text': 'j',
'center': '0 3' }),
Mask(image=np.array([[0, 1, 1, 0, 0]]), group=1,
attributes={ 'index': 0, 'color': '108 225 132', 'text': 'F',
'center': '0 1' }),
]),
DatasetItem(id=2, subset='train',
image=np.ones((10, 15, 3)), annotations=[
Mask(image=np.array([[0, 0, 0, 0, 0, 1]]), group=0,
attributes={ 'index': 3, 'color': '183 6 28', 'text': ' ',
'center': '0 5' }),
Mask(image=np.array([[1, 0, 0, 0, 0, 0]]), group=1,
attributes={ 'index': 0, 'color': '108 225 132', 'text': 'L',
'center': '0 0' }),
Mask(image=np.array([[0, 0, 0, 1, 1, 0]]), group=1,
attributes={ 'index': 1, 'color': '82 174 214', 'text': 'o',
'center': '0 3' }),
Mask(image=np.array([[0, 1, 1, 0, 0, 0]]), group=0,
attributes={ 'index': 2, 'color': '241 73 144', 'text': 'P',
'center': '0 1' }),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(IcdarTextSegmentationConverter.convert,
save_images=True),
test_dir, 'icdar_text_segmentation')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_masks_with_no_save_images(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a/b/1', subset='train',
image=np.ones((10, 15, 3)), annotations=[
Mask(image=np.array([[0, 0, 0, 1, 1]]), group=1,
attributes={ 'index': 1, 'color': '82 174 214', 'text': 'j',
'center': '0 3' }),
Mask(image=np.array([[0, 1, 1, 0, 0]]), group=1,
attributes={ 'index': 0, 'color': '108 225 132', 'text': 'F',
'center': '0 1' }),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(IcdarTextSegmentationConverter.convert,
save_images=False),
test_dir, 'icdar_text_segmentation')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_no_subsets(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.ones((8, 8, 3)),
annotations=[
Bbox(0, 1, 3, 5),
]),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
IcdarTextLocalizationConverter.convert, test_dir,
'icdar_text_localization')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='кириллица с пробелом',
image=np.ones((8, 8, 3))),
])
for importer, converter in [
('icdar_word_recognition', IcdarWordRecognitionConverter),
('icdar_text_localization', IcdarTextLocalizationConverter),
('icdar_text_segmentation', IcdarTextSegmentationConverter),
]:
with self.subTest(subformat=converter), TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(converter.convert, save_images=True),
test_dir, importer, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_image_with_arbitrary_extension(self):
expected = Dataset.from_iterable([
DatasetItem(id='q/1', image=Image(path='q/1.JPEG',
data=np.zeros((4, 3, 3)))),
DatasetItem(id='a/b/c/2', image=Image(path='a/b/c/2.bmp',
data=np.zeros((3, 4, 3)))),
])
for importer, converter in [
('icdar_word_recognition', IcdarWordRecognitionConverter),
('icdar_text_localization', IcdarTextLocalizationConverter),
('icdar_text_segmentation', IcdarTextSegmentationConverter),
]:
with self.subTest(subformat=converter), TestDir() as test_dir:
self._test_save_and_load(expected,
partial(converter.convert, save_images=True),
test_dir, importer, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_captions_with_quotes(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='1', image=np.ones((5, 5, 3)),
annotations=[Caption('caption\"')]
)
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(IcdarWordRecognitionConverter.convert, save_images=True),
test_dir, 'icdar_word_recognition')
| 43.122699
| 85
| 0.555982
| 1,489
| 14,058
| 5.008731
| 0.108126
| 0.010995
| 0.010056
| 0.068651
| 0.780504
| 0.765084
| 0.753687
| 0.741888
| 0.718289
| 0.676857
| 0
| 0.041553
| 0.320387
| 14,058
| 325
| 86
| 43.255385
| 0.739062
| 0
| 0
| 0.58885
| 0
| 0
| 0.084151
| 0.027387
| 0
| 0
| 0
| 0
| 0.010453
| 1
| 0.059233
| false
| 0
| 0.114983
| 0.003484
| 0.184669
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0ccd776bfb89ec04382a5b7ce1c3f2b8f1f50fe4
| 158
|
py
|
Python
|
tensortrade/orders/__init__.py
|
cihilt/tensortrade
|
47b8f2f043d3cc430838aac02a915ab42dcc7b64
|
[
"Apache-2.0"
] | 7
|
2020-09-28T23:36:40.000Z
|
2022-02-22T02:00:32.000Z
|
tensortrade/orders/__init__.py
|
cihilt/tensortrade
|
47b8f2f043d3cc430838aac02a915ab42dcc7b64
|
[
"Apache-2.0"
] | 4
|
2020-11-13T18:48:52.000Z
|
2022-02-10T01:29:47.000Z
|
tensortrade/orders/__init__.py
|
cihilt/tensortrade
|
47b8f2f043d3cc430838aac02a915ab42dcc7b64
|
[
"Apache-2.0"
] | 3
|
2020-11-23T17:31:59.000Z
|
2021-04-08T10:55:03.000Z
|
from .broker import Broker
from .order import Order, OrderStatus
from .order_listener import OrderListener
from .recipe import Recipe
from . import criteria
| 22.571429
| 41
| 0.822785
| 21
| 158
| 6.142857
| 0.428571
| 0.139535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139241
| 158
| 6
| 42
| 26.333333
| 0.948529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0cf10e31cd60967c9e5e90c6c2fde672516e9aed
| 9,228
|
py
|
Python
|
tests/legacy/test_run.py
|
symuvia/symupy
|
e6604c59bb4474f594ef5c997508f0407c9b3870
|
[
"MIT"
] | 2
|
2019-07-01T09:58:53.000Z
|
2020-06-12T12:12:46.000Z
|
tests/legacy/test_run.py
|
licit-lab/symupy
|
942a17ee78cd12a363a4cd7b7f8363e239ccf7fe
|
[
"MIT"
] | 33
|
2021-01-18T13:59:01.000Z
|
2021-11-29T13:21:10.000Z
|
tests/legacy/test_run.py
|
licit-lab/symupy
|
942a17ee78cd12a363a4cd7b7f8363e239ccf7fe
|
[
"MIT"
] | 7
|
2018-07-12T13:34:38.000Z
|
2019-10-02T13:37:31.000Z
|
import os
import unittest
from symupy.runtime.api import Simulation, Simulator
import platform
class TestBottleneck001(unittest.TestCase):
def setUp(self):
self.get_simulator()
self.get_bottleneck_001()
def get_simulator(self):
self.libpath = ("lib", _libpath, _libfilen)
self.sim_path = os.path.join(os.getcwd(), *self.libpath)
def get_bottleneck_001(self):
self.file_name = "bottleneck_001.xml"
file_path = ("tests", "mocks", "bottlenecks", self.file_name)
self.mocks_path = os.path.join(os.getcwd(), *file_path)
@unittest.skip("Skipping momentary")
def test_load_bottleneck_001(self):
sim_case = Simulation(self.mocks_path)
self.assertEqual(sim_case.filename, self.mocks_path)
@unittest.skip("Skipping momentary")
def test_constructor_bottleneck_001(self):
sim_instance = Simulator.from_path(self.mocks_path, self.sim_path)
self.assertEqual(self.mocks_path, sim_instance.casename)
@unittest.skip("Skipping momentary")
def test_get_simulation_data_bottleneck_001(self):
sim_case = Simulation(self.mocks_path)
sim_param = sim_case.get_simulation_parameters()
PAR = (
{
"id": "simID",
"pasdetemps": "1",
"debut": "00:00:00",
"fin": "00:00:30",
"loipoursuite": "exacte",
"comportementflux": "iti",
"date": "1985-01-17",
"titre": "",
"proc_deceleration": "false",
"seed": "1",
},
{
"id": "simID2",
"pasdetemps": "1",
"debut": "00:00:00",
"fin": "00:00:30",
"loipoursuite": "exacte",
"comportementflux": "iti",
"date": "1985-01-17",
"titre": "",
"proc_deceleration": "false",
"seed": "1",
},
)
self.assertTupleEqual(sim_param, PAR)
@unittest.skip("Skipping momentary")
def test_get_vehicletype_data_bottleneck_001(self):
sim_case = Simulation(self.mocks_path)
sim_vehtype = sim_case.get_vehicletype_information()
VEH_TYPE = (
{"id": "VL", "w": "-5.8823", "kx": "0.17", "vx": "25"},
{"id": "VL2", "w": "-5.8823", "kx": "0.17", "vx": "25"},
)
self.assertTupleEqual(sim_vehtype, VEH_TYPE)
@unittest.skip("Skipping momentary")
def test_get_network_endpoints_botleneck_001(self):
sim_case = Simulation(self.mocks_path)
sim_endpoints = sim_case.get_network_endpoints()
END_POINTS = ("Ext_In", "Ext_Out")
self.assertTupleEqual(sim_endpoints, END_POINTS)
@unittest.skip("Skipping momentary")
def test_run_bottleneck_001(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.load_symuvia()
sim_instance.run_simulation(sim_case)
@unittest.skip("Skipping momentary")
def test_run_simulation_alternative_constructor_bottleneck_001(self):
sim_instance = Simulator.from_path(self.mocks_path, self.sim_path)
sim_instance.run_simulation()
@unittest.skip("Skipping momentary")
def test_run_stepbystep_bottleneck_001(self):
# Using new constructor
sim_instance = Simulator.from_path(self.mocks_path, self.sim_path)
with sim_instance as s:
while s.do_next:
s.run_step()
@unittest.skip("Skipping momentary")
def test_initialize_container_bottleneck_001(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.register_simulation(sim_case)
with sim_instance as s:
while s.do_next:
# TODO: This needs some work on Parser.py
s.state.get_vehicle_data()
@unittest.skip("Skipping momentary")
def test_create_vehicle_bottleneck_001(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.register_simulation(sim_case)
# with
sim_instance.load_symuvia()
sim_instance.load_network()
sim_instance.init_simulation()
veh_id = sim_instance.create_vehicle("VL", "Ext_In", "Ext_Out")
self.assertGreaterEqual(veh_id, 0)
@unittest.skip("Skipping momentary")
def test_create_drive_vehicle_bottleneck_001(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.register_simulation(sim_case)
# with
# REVIEW: For the sake of simplicity the vehicle will be created after an entering vehicle has been created.
with sim_instance as s:
while s.do_next:
s.request_answer() # Initialize
s.request_answer() # Vehicle 0
veh_id = s.create_vehicle("VL", "Ext_In", "Ext_Out")
s.request_answer() # Vehicle instantiation
drive_status = s.drive_vehicle(veh_id, 20.0, "Zone_001")
s.stop_step()
self.assertGreaterEqual(veh_id, 0)
self.assertEqual(drive_status, 1)
self.assertAlmostEqual(
float(sim_instance.state.query_vehicle_position("1")[0]), 20.0
)
@unittest.skip("Skipping momentary")
def test_drive_vehicle_bottleneck_001(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.register_simulation(sim_case)
# with
with sim_instance as s:
while s.do_next:
s.run_step()
if s.state.is_vehicle_in_network("0"):
drive_status = s.drive_vehicle(0, 1.0)
s.run_step()
drive_status = s.drive_vehicle(0, 1.0)
s.stop_step()
continue
else:
continue
self.assertEqual(drive_status, 1)
self.assertAlmostEqual(
float(sim_instance.state.query_vehicle_position("0")[0]), 1.0
)
class TestBottleneck002(unittest.TestCase):
def setUp(self):
self.get_simulator()
self.get_bottleneck_002()
def get_simulator(self):
self.libpath = ("lib", _libpath, _libfilen)
self.sim_path = os.path.join(os.getcwd(), *self.libpath)
def get_bottleneck_002(self):
self.file_name = "bottleneck_002.xml"
file_path = ("tests", "mocks", "bottlenecks", self.file_name)
self.mocks_path = os.path.join(os.getcwd(), *file_path)
@unittest.skip("Skipping momentary")
def test_load_bottleneck_002(self):
sim_case = Simulation(self.mocks_path)
self.assertEqual(sim_case.filename, self.mocks_path)
@unittest.skip("Skipping momentary")
def test_run_bottleneck_002(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.load_symuvia()
sim_instance.run_simulation(sim_case)
@unittest.skip("Skipping momentary")
def test_run_stepbystep_bottleneck_002(self):
# Using new constructor
sim_instance = Simulator.from_path(self.mocks_path, self.sim_path)
with sim_instance as s:
while s.do_next:
s.run_step()
@unittest.skip("Skipping momentary")
def test_query_vehicles_upstream_bottleneck002(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.register_simulation(sim_case)
with sim_instance as s:
while s.do_next:
s.run_step()
if s.state.is_vehicle_in_network("2"):
(nup,) = s.state.vehicle_upstream_of("1")
s.stop_step()
continue
else:
continue
self.assertEqual(nup, "2")
@unittest.skip("Skipping momentary")
def test_query_vehicles_downstream_bottleneck002(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.register_simulation(sim_case)
with sim_instance as s:
while s.do_next:
s.run_step()
if s.state.is_vehicle_in_network("2"):
(ndown,) = s.state.vehicle_downstream_of("1")
s.stop_step()
continue
else:
continue
self.assertEqual(ndown, "0")
@unittest.skip("Skipping momentary")
def test_query_vehicle_neighbors_bottleneck002(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.register_simulation(sim_case)
pass
@unittest.skip("Skipping momentary")
def test_fixed_leader_neighbors_bottleneck002(self):
sim_case = Simulation(self.mocks_path)
sim_instance = Simulator(self.sim_path)
sim_instance.register_simulation(sim_case)
pass
| 36.474308
| 116
| 0.610858
| 1,066
| 9,228
| 4.994371
| 0.153846
| 0.084711
| 0.058603
| 0.103494
| 0.823441
| 0.789444
| 0.77855
| 0.716003
| 0.685575
| 0.649136
| 0
| 0.027273
| 0.284785
| 9,228
| 252
| 117
| 36.619048
| 0.779394
| 0.026875
| 0
| 0.680952
| 0
| 0
| 0.087189
| 0
| 0
| 0
| 0
| 0.003968
| 0.066667
| 1
| 0.119048
| false
| 0.009524
| 0.019048
| 0
| 0.147619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b639a65344f1511e3c85e0b3e581170e4e6bf72
| 336
|
py
|
Python
|
CA117/Lab_7/lotto_42.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 6
|
2016-02-04T00:15:20.000Z
|
2019-10-13T13:53:16.000Z
|
CA117/Lab_7/lotto_42.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 2
|
2016-03-14T04:01:36.000Z
|
2019-10-16T12:45:34.000Z
|
CA117/Lab_7/lotto_42.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 10
|
2016-02-09T14:38:32.000Z
|
2021-05-25T08:16:26.000Z
|
(lambda C,M,B:(lambda P:[print("Match {}'s : {:>5} ({} to 1)".format(n,P.count(n),"?"if P.count(n)==0else int(1000000/P.count(n))))for n in range(3,7)])([len(set(__import__("random").sample(B,6)).intersection(C))for _ in range(0,1000001)]))([int(x)for x in __import__("sys").argv[1:]],{x:0for x in range(3,7)},[x for x in range(1,48)])
| 168
| 335
| 0.622024
| 70
| 336
| 2.857143
| 0.528571
| 0.14
| 0.105
| 0.09
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090615
| 0.080357
| 336
| 1
| 336
| 336
| 0.556634
| 0
| 0
| 0
| 0
| 0
| 0.113095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
0b7b7cd047603eacd51102f37852acfe25355209
| 135
|
py
|
Python
|
greeting.py
|
taras-stx/pytest-tips-tricks
|
beec4d284a3ff9796c92426ca499fea5bc4eb18f
|
[
"CC0-1.0"
] | 1
|
2022-03-13T18:07:33.000Z
|
2022-03-13T18:07:33.000Z
|
greeting.py
|
taras-stx/pytest-tips-tricks
|
beec4d284a3ff9796c92426ca499fea5bc4eb18f
|
[
"CC0-1.0"
] | null | null | null |
greeting.py
|
taras-stx/pytest-tips-tricks
|
beec4d284a3ff9796c92426ca499fea5bc4eb18f
|
[
"CC0-1.0"
] | 4
|
2021-11-29T07:42:13.000Z
|
2022-02-20T04:39:12.000Z
|
def my_name(name):
# import ipdb;ipdb.set_trace()
return f"My name is: {name}"
if __name__ == "__main__":
my_name("bob")
| 16.875
| 34
| 0.622222
| 21
| 135
| 3.47619
| 0.619048
| 0.246575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214815
| 135
| 7
| 35
| 19.285714
| 0.688679
| 0.207407
| 0
| 0
| 0
| 0
| 0.27619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
f01f10b437d577155785b7e52cd755683129e070
| 2,026
|
py
|
Python
|
fluiddb/common/paths.py
|
fluidinfo/fluiddb
|
b5a8c8349f3eaf3364cc4efba4736c3e33b30d96
|
[
"Apache-2.0"
] | 3
|
2021-05-10T14:41:30.000Z
|
2021-12-16T05:53:30.000Z
|
fluiddb/common/paths.py
|
fluidinfo/fluiddb
|
b5a8c8349f3eaf3364cc4efba4736c3e33b30d96
|
[
"Apache-2.0"
] | null | null | null |
fluiddb/common/paths.py
|
fluidinfo/fluiddb
|
b5a8c8349f3eaf3364cc4efba4736c3e33b30d96
|
[
"Apache-2.0"
] | 2
|
2018-01-24T09:03:21.000Z
|
2021-06-25T08:34:54.000Z
|
# All paths that have to do with permissions are in permissions.py
from fluiddb.common import queues
from fluiddb.common.defaults import (
adminUsername, namespaceCategoryName, tagCategoryName, aboutTagName,
pathTagName, descriptionTagName, usernameTagName, passwordTagName,
nameTagName, emailTagName, adminUserNamespaceName,
activationTokenTagName, createdAtTagName)
from fluiddb.common.types_thrift.ttypes import TInvalidPath
maxPathLength = (queues.maxQueueNameLength -
max(len(queues.makeTagQueue('')),
len(queues.makeNamespaceQueue(''))))
def checkPath(path):
if len(path) > maxPathLength:
raise TInvalidPath()
def aboutPath():
return [adminUsername, aboutTagName]
def categoryPath(category):
# TODO: This should really be called categoryPathPath.
if category == namespaceCategoryName:
return [adminUsername, category, pathTagName]
else:
# This case catches tagCategoryName and
# defaults.tagInstanceSetCategoryName. We do no error
# checking.
return [adminUsername, tagCategoryName, pathTagName]
def categoryDescriptionPath(category):
if category == namespaceCategoryName:
return [adminUsername, category, descriptionTagName]
else:
# This case catches tagCategoryName and
# defaults.tagInstanceSetCategoryName. We do no error
# checking.
return [adminUsername, tagCategoryName, descriptionTagName]
def usernamePath():
return [adminUsername, adminUserNamespaceName, usernameTagName]
def namePath():
return [adminUsername, adminUserNamespaceName, nameTagName]
def passwordPath():
return [adminUsername, adminUserNamespaceName, passwordTagName]
def emailPath():
return [adminUsername, adminUserNamespaceName, emailTagName]
def activationTokenPath():
return [adminUsername, adminUserNamespaceName, activationTokenTagName]
def createdAtPath():
return [adminUsername, adminUserNamespaceName, createdAtTagName]
| 29.794118
| 74
| 0.743337
| 164
| 2,026
| 9.176829
| 0.439024
| 0.13887
| 0.163455
| 0.049169
| 0.24186
| 0.24186
| 0.164784
| 0.164784
| 0.164784
| 0.164784
| 0
| 0
| 0.1846
| 2,026
| 67
| 75
| 30.238806
| 0.911017
| 0.156466
| 0
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0
| 1
| 0.27027
| false
| 0.081081
| 0.081081
| 0.189189
| 0.648649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
f02cec6f38083ac5910194a0a9d1276d94999756
| 113
|
py
|
Python
|
auth/view/resource/sign_in_request.py
|
nicolaszein/auth
|
90112f1a4d6f368714b19daad7e8a4226594b383
|
[
"MIT"
] | null | null | null |
auth/view/resource/sign_in_request.py
|
nicolaszein/auth
|
90112f1a4d6f368714b19daad7e8a4226594b383
|
[
"MIT"
] | null | null | null |
auth/view/resource/sign_in_request.py
|
nicolaszein/auth
|
90112f1a4d6f368714b19daad7e8a4226594b383
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel, EmailStr
class SignInRequest(BaseModel):
email: EmailStr
password: str
| 16.142857
| 40
| 0.761062
| 12
| 113
| 7.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185841
| 113
| 6
| 41
| 18.833333
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
f038ce8f5eaa02a52ba725c5cb79563131146d02
| 144
|
py
|
Python
|
Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula165/produto/views.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula165/produto/views.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_de_Python_3_do_Basico_Ao_Avancado_Udemy/aula165/produto/views.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def metodo(request):
return render(request, 'produto/index.html')
| 24
| 48
| 0.791667
| 19
| 144
| 6
| 0.736842
| 0.175439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 144
| 5
| 49
| 28.8
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
f064a52cd3b5c792ba5a18a634af118db25ec329
| 107
|
py
|
Python
|
home/views.py
|
nadeengamage/esta-i
|
a94a7ce53006c291d1133c8d66c7566e8b5ac420
|
[
"Apache-2.0"
] | null | null | null |
home/views.py
|
nadeengamage/esta-i
|
a94a7ce53006c291d1133c8d66c7566e8b5ac420
|
[
"Apache-2.0"
] | 3
|
2021-04-08T20:57:59.000Z
|
2022-02-10T12:07:58.000Z
|
home/views.py
|
nadeengamage/esta-i
|
a94a7ce53006c291d1133c8d66c7566e8b5ac420
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
def index(request):
return render(request, 'home/views/index.html')
| 17.833333
| 48
| 0.775701
| 15
| 107
| 5.533333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11215
| 107
| 5
| 49
| 21.4
| 0.873684
| 0
| 0
| 0
| 0
| 0
| 0.196262
| 0.196262
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
f07540cf59212efbdc2ebce0c13e4861bdfa1f2b
| 9,033
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/surface/_contours.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/surface/_contours.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/surface/_contours.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Contours(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "surface"
_path_str = "surface.contours"
_valid_props = {"x", "y", "z"}
# x
# -
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.contours.X`
- A dict of string/value properties that will be passed
to the X constructor
Supported dict properties:
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more
than `contours.start`
highlight
Determines whether or not contour lines about
the x dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour
lines.
highlightwidth
Sets the width of the highlighted contour
lines.
project
:class:`plotly.graph_objects.surface.contours.x
.Project` instance or dict with compatible
properties
show
Determines whether or not contour lines about
the x dimension are drawn.
size
Sets the step between each contour level. Must
be positive.
start
Sets the starting contour level value. Must be
less than `contours.end`
usecolormap
An alternate to "color". Determines whether or
not the contour lines are colored using the
trace "colorscale".
width
Sets the width of the contour lines.
Returns
-------
plotly.graph_objs.surface.contours.X
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.contours.Y`
- A dict of string/value properties that will be passed
to the Y constructor
Supported dict properties:
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more
than `contours.start`
highlight
Determines whether or not contour lines about
the y dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour
lines.
highlightwidth
Sets the width of the highlighted contour
lines.
project
:class:`plotly.graph_objects.surface.contours.y
.Project` instance or dict with compatible
properties
show
Determines whether or not contour lines about
the y dimension are drawn.
size
Sets the step between each contour level. Must
be positive.
start
Sets the starting contour level value. Must be
less than `contours.end`
usecolormap
An alternate to "color". Determines whether or
not the contour lines are colored using the
trace "colorscale".
width
Sets the width of the contour lines.
Returns
-------
plotly.graph_objs.surface.contours.Y
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# z
# -
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.contours.Z`
- A dict of string/value properties that will be passed
to the Z constructor
Supported dict properties:
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more
than `contours.start`
highlight
Determines whether or not contour lines about
the z dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour
lines.
highlightwidth
Sets the width of the highlighted contour
lines.
project
:class:`plotly.graph_objects.surface.contours.z
.Project` instance or dict with compatible
properties
show
Determines whether or not contour lines about
the z dimension are drawn.
size
Sets the step between each contour level. Must
be positive.
start
Sets the starting contour level value. Must be
less than `contours.end`
usecolormap
An alternate to "color". Determines whether or
not the contour lines are colored using the
trace "colorscale".
width
Sets the width of the contour lines.
Returns
-------
plotly.graph_objs.surface.contours.Z
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
:class:`plotly.graph_objects.surface.contours.X`
instance or dict with compatible properties
y
:class:`plotly.graph_objects.surface.contours.Y`
instance or dict with compatible properties
z
:class:`plotly.graph_objects.surface.contours.Z`
instance or dict with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Contours object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.surface.Contours`
x
:class:`plotly.graph_objects.surface.contours.X`
instance or dict with compatible properties
y
:class:`plotly.graph_objects.surface.contours.Y`
instance or dict with compatible properties
z
:class:`plotly.graph_objects.surface.contours.Z`
instance or dict with compatible properties
Returns
-------
Contours
"""
super(Contours, self).__init__("contours")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.surface.Contours
constructor must be a dict or
an instance of :class:`plotly.graph_objs.surface.Contours`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.087912
| 82
| 0.500498
| 919
| 9,033
| 4.838955
| 0.136017
| 0.033056
| 0.050371
| 0.044524
| 0.769058
| 0.735777
| 0.734653
| 0.733978
| 0.733978
| 0.733978
| 0
| 0
| 0.424887
| 9,033
| 272
| 83
| 33.209559
| 0.856015
| 0.588177
| 0
| 0.149254
| 0
| 0
| 0.203186
| 0.061989
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119403
| false
| 0
| 0.029851
| 0.014925
| 0.283582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b2db6f9aa7b93c8fbaea251533d45094a7b43fc2
| 275
|
py
|
Python
|
src/cms/views/imprint/__init__.py
|
mckinly/cms-django
|
c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca
|
[
"Apache-2.0"
] | 14
|
2020-12-03T07:56:30.000Z
|
2021-10-30T13:09:50.000Z
|
integreat_cms/cms/views/imprint/__init__.py
|
Carlosbogo/integreat-cms
|
066f188b138e105e72f5420bc36d25709f25402d
|
[
"Apache-2.0"
] | 367
|
2020-11-20T00:34:20.000Z
|
2021-12-14T15:20:42.000Z
|
src/cms/views/imprint/__init__.py
|
mckinly/cms-django
|
c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca
|
[
"Apache-2.0"
] | 3
|
2021-02-09T18:46:52.000Z
|
2021-12-07T10:41:39.000Z
|
from .imprint_view import ImprintView
from .imprint_revision_view import ImprintRevisionView
from .imprint_sbs_view import ImprintSideBySideView
from .imprint_actions import (
archive_imprint,
restore_imprint,
delete_imprint,
expand_imprint_translation_id,
)
| 27.5
| 54
| 0.829091
| 31
| 275
| 6.967742
| 0.516129
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134545
| 275
| 9
| 55
| 30.555556
| 0.907563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.444444
| 0
| 0.444444
| 0.888889
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
652ba9d1595d62f332bb590a43bbe19f5edcf426
| 141
|
py
|
Python
|
frappe/patches/v5_0/force_sync_website.py
|
khatrijitendra/lumalock-frappe
|
b3864278dad21dde5c53604be65aa56c79e5d909
|
[
"MIT"
] | null | null | null |
frappe/patches/v5_0/force_sync_website.py
|
khatrijitendra/lumalock-frappe
|
b3864278dad21dde5c53604be65aa56c79e5d909
|
[
"MIT"
] | 7
|
2020-03-24T17:07:47.000Z
|
2022-03-11T23:49:25.000Z
|
frappe/patches/v5_0/force_sync_website.py
|
khatrijitendra/lumalock-frappe
|
b3864278dad21dde5c53604be65aa56c79e5d909
|
[
"MIT"
] | 5
|
2016-11-12T12:14:58.000Z
|
2018-03-21T15:45:45.000Z
|
from __future__ import unicode_literals
import frappe
from frappe.website import statics
def execute():
statics.sync_statics(rebuild=True)
| 20.142857
| 39
| 0.836879
| 19
| 141
| 5.894737
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 141
| 6
| 40
| 23.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
654698046ce027c68f0639cd8e4ef903e9bd8742
| 633
|
py
|
Python
|
open-test-data/rfc5118-sip-torture-test/mult-ip-in-header.dat.py
|
bobjects/BobStack
|
c177b286075044832f44baf9ace201780c8b4320
|
[
"Apache-2.0"
] | null | null | null |
open-test-data/rfc5118-sip-torture-test/mult-ip-in-header.dat.py
|
bobjects/BobStack
|
c177b286075044832f44baf9ace201780c8b4320
|
[
"Apache-2.0"
] | null | null | null |
open-test-data/rfc5118-sip-torture-test/mult-ip-in-header.dat.py
|
bobjects/BobStack
|
c177b286075044832f44baf9ace201780c8b4320
|
[
"Apache-2.0"
] | null | null | null |
messageString = (
'BYE sip:user@host.example.net SIP/2.0\r\n'
'Via: SIP/2.0/UDP [2001:db8::9:1]:6050;branch=z9hG4bKas3-111\r\n'
'Via: SIP/2.0/UDP 192.0.2.1;branch=z9hG4bKjhja8781hjuaij65144\r\n'
'Via: SIP/2.0/TCP [2001:db8::9:255];branch=z9hG4bK451jj;received=192.0.2.200\r\n'
'Call-ID: 997077@lau_4100\r\n'
'Max-Forwards: 70\r\n'
'CSeq: 89187 BYE\r\n'
'To: sip:user@example.net;tag=9817--94\r\n'
'From: sip:user@example.com;tag=81x2\r\n'
'Content-Length: 0\r\n'
'\r\n'
)
| 45.214286
| 94
| 0.516588
| 101
| 633
| 3.227723
| 0.465347
| 0.067485
| 0.06135
| 0.07362
| 0.110429
| 0.110429
| 0.079755
| 0
| 0
| 0
| 0
| 0.205882
| 0.301738
| 633
| 13
| 95
| 48.692308
| 0.531674
| 0
| 0
| 0
| 0
| 0.230769
| 0.675806
| 0.403226
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e8e6ffcbf65df71aaf0d84345c6d55be7bdec6d9
| 100
|
py
|
Python
|
pyspeckit/cubes/__init__.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | 79
|
2015-03-03T15:06:20.000Z
|
2022-03-27T21:29:47.000Z
|
pyspeckit/cubes/__init__.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | 240
|
2015-01-04T02:59:12.000Z
|
2021-11-13T15:11:14.000Z
|
pyspeckit/cubes/__init__.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | 68
|
2015-03-02T12:23:12.000Z
|
2022-02-28T10:26:36.000Z
|
"""
:Author: Adam Ginsburg <adam.g.ginsburg@gmail.com>
"""
from .SpectralCube import Cube,CubeStack
| 20
| 50
| 0.74
| 13
| 100
| 5.692308
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 100
| 4
| 51
| 25
| 0.822222
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e8f4b2e7a7d20fa7aedf3c17532bf61312b3ab8e
| 57
|
py
|
Python
|
tests/assets/multipleroots/rootpackagegreen/two.py
|
ramiro/import-linter
|
afab73c8f60c284cd5dbe013c4c7de03c294cc6a
|
[
"BSD-2-Clause"
] | 171
|
2019-04-03T20:22:11.000Z
|
2022-03-31T23:55:46.000Z
|
tests/assets/multipleroots/rootpackagegreen/two.py
|
ramiro/import-linter
|
afab73c8f60c284cd5dbe013c4c7de03c294cc6a
|
[
"BSD-2-Clause"
] | 76
|
2019-02-16T11:28:00.000Z
|
2022-03-24T08:36:18.000Z
|
tests/assets/multipleroots/rootpackagegreen/two.py
|
ramiro/import-linter
|
afab73c8f60c284cd5dbe013c4c7de03c294cc6a
|
[
"BSD-2-Clause"
] | 18
|
2019-05-17T11:45:37.000Z
|
2022-03-24T14:25:52.000Z
|
from rootpackageblue.one import alpha
from . import one
| 14.25
| 37
| 0.807018
| 8
| 57
| 5.75
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 57
| 3
| 38
| 19
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e8f9af2d536b8560a5f1f94feea08e8cce0e3b6a
| 919
|
py
|
Python
|
tests/test_1752.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1752.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1752.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pytest
"""
Test 1752. Check if Array Is Sorted and Rotated
"""
@pytest.fixture(scope="session")
def init_variables_1752():
from src.leetcode_1752_check_if_array_is_sorted_and_rotated import Solution
solution = Solution()
def _init_variables_1752():
return solution
yield _init_variables_1752
class TestClass1752:
def test_solution_0(self, init_variables_1752):
assert init_variables_1752().check([3, 4, 5, 1, 2])
def test_solution_1(self, init_variables_1752):
assert not init_variables_1752().check([2, 1, 3, 4])
def test_solution_2(self, init_variables_1752):
assert init_variables_1752().check([1, 2, 3])
def test_solution_3(self, init_variables_1752):
assert init_variables_1752().check([1, 1, 1])
def test_solution_4(self, init_variables_1752):
assert init_variables_1752().check([2, 1])
| 24.837838
| 79
| 0.711643
| 133
| 919
| 4.571429
| 0.285714
| 0.277961
| 0.363487
| 0.172697
| 0.524671
| 0.480263
| 0.4375
| 0.4375
| 0.325658
| 0.164474
| 0
| 0.114514
| 0.182807
| 919
| 36
| 80
| 25.527778
| 0.695073
| 0.021763
| 0
| 0
| 0
| 0
| 0.008304
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 1
| 0.368421
| false
| 0
| 0.105263
| 0.052632
| 0.578947
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
33111c1b907ac44a95d49e5335d05c8249b069a2
| 206
|
py
|
Python
|
utils/__init__.py
|
nassir90/ICNet-pytorch
|
af6eec01a4419ce43c52d295bc502c366478fbd7
|
[
"MIT"
] | 83
|
2019-11-27T12:20:16.000Z
|
2022-03-30T08:34:50.000Z
|
utils/__init__.py
|
nassir90/ICNet-pytorch
|
af6eec01a4419ce43c52d295bc502c366478fbd7
|
[
"MIT"
] | 18
|
2019-11-26T11:19:37.000Z
|
2022-03-30T13:06:51.000Z
|
utils/__init__.py
|
nassir90/ICNet-pytorch
|
af6eec01a4419ce43c52d295bc502c366478fbd7
|
[
"MIT"
] | 24
|
2020-02-05T09:12:24.000Z
|
2022-03-04T20:47:32.000Z
|
"""Utility functions."""
from .loss import ICNetLoss
from .lr_scheduler import IterationPolyLR
from .metric import SegmentationMetric
from .logger import SetupLogger
from .visualize import get_color_pallete
| 34.333333
| 41
| 0.839806
| 25
| 206
| 6.8
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101942
| 206
| 6
| 42
| 34.333333
| 0.918919
| 0.087379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3329e191cd9e4f7b1884ef09a2d82cea7c9452c5
| 112
|
py
|
Python
|
airtable/table.py
|
jlouie95618/airtable-python
|
dafdbb2028bbf63a2a1c1daa3e8adf5db7d071bd
|
[
"MIT"
] | 2
|
2015-09-20T13:43:05.000Z
|
2016-04-12T02:48:50.000Z
|
airtable/table.py
|
jlouie95618/airtable-python
|
dafdbb2028bbf63a2a1c1daa3e8adf5db7d071bd
|
[
"MIT"
] | null | null | null |
airtable/table.py
|
jlouie95618/airtable-python
|
dafdbb2028bbf63a2a1c1daa3e8adf5db7d071bd
|
[
"MIT"
] | null | null | null |
class Table(object):
"""docstring for Table"""
def __init__(self, arg):
self.arg = arg
| 18.666667
| 29
| 0.544643
| 13
| 112
| 4.384615
| 0.692308
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.321429
| 112
| 6
| 30
| 18.666667
| 0.75
| 0.169643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6841eec785d9826bab06dd87213af813f4b42657
| 89
|
py
|
Python
|
TypeRX_server/lobby/admin.py
|
kamaljohnson/TypRX-GAME
|
f06cc6c8517b6bb462148f491a3792fa79e435cd
|
[
"MIT"
] | 1
|
2022-01-15T07:28:37.000Z
|
2022-01-15T07:28:37.000Z
|
TypeRX_server/lobby/admin.py
|
kamaljohnson/TReX-GAME
|
f06cc6c8517b6bb462148f491a3792fa79e435cd
|
[
"MIT"
] | 9
|
2020-07-19T15:46:23.000Z
|
2022-02-27T00:00:16.000Z
|
backend/api/admin.py
|
Janjs/CountryFlagNameGame
|
7ce5d7752716cd10a084f59182a3d618479b37aa
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Player
admin.site.register(Player)
| 17.8
| 32
| 0.820225
| 13
| 89
| 5.615385
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11236
| 89
| 4
| 33
| 22.25
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6873ce24717e5c2815ad0b10a2992424d9a09ff5
| 74
|
py
|
Python
|
modbusproxy_cpe_cb/__init__.py
|
Inmarsat/modbusproxy_cpe_cb
|
3070f3539b38b8dab63afa5ec2c8e2f4ea4935d6
|
[
"Apache-2.0"
] | null | null | null |
modbusproxy_cpe_cb/__init__.py
|
Inmarsat/modbusproxy_cpe_cb
|
3070f3539b38b8dab63afa5ec2c8e2f4ea4935d6
|
[
"Apache-2.0"
] | null | null | null |
modbusproxy_cpe_cb/__init__.py
|
Inmarsat/modbusproxy_cpe_cb
|
3070f3539b38b8dab63afa5ec2c8e2f4ea4935d6
|
[
"Apache-2.0"
] | null | null | null |
import constants
import context
import store
import modbus_server_adapter
| 14.8
| 28
| 0.891892
| 10
| 74
| 6.4
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 4
| 29
| 18.5
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6876f027d9a46356daf30b9446731ca7ad7e29fb
| 93
|
py
|
Python
|
python/8Kyu/Grasshopper - Personalized Message.py
|
athasv/Codewars-data
|
5e106466e709fd776f23585ad9f652d0d65b48d3
|
[
"MIT"
] | null | null | null |
python/8Kyu/Grasshopper - Personalized Message.py
|
athasv/Codewars-data
|
5e106466e709fd776f23585ad9f652d0d65b48d3
|
[
"MIT"
] | null | null | null |
python/8Kyu/Grasshopper - Personalized Message.py
|
athasv/Codewars-data
|
5e106466e709fd776f23585ad9f652d0d65b48d3
|
[
"MIT"
] | null | null | null |
def greet(name, owner):
return "Hello boss" if str(name) == str(owner) else "Hello guest"
| 46.5
| 69
| 0.677419
| 15
| 93
| 4.2
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172043
| 93
| 2
| 69
| 46.5
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.223404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
68a6373d0013ae0d1a78ad8082a5247281bfb611
| 140
|
py
|
Python
|
CA/__init__.py
|
Justin900429/computational_aesthetics
|
4a7b59b6de31c5f70f3536b5870de933b3b56101
|
[
"MIT"
] | 1
|
2021-08-28T09:27:24.000Z
|
2021-08-28T09:27:24.000Z
|
CA/__init__.py
|
Justin900429/computational_aesthetics
|
4a7b59b6de31c5f70f3536b5870de933b3b56101
|
[
"MIT"
] | null | null | null |
CA/__init__.py
|
Justin900429/computational_aesthetics
|
4a7b59b6de31c5f70f3536b5870de933b3b56101
|
[
"MIT"
] | null | null | null |
from .main import CA
from .base import Base
from .color import ImageColor
from .composition import Composition
from .texture import Texture
| 23.333333
| 36
| 0.821429
| 20
| 140
| 5.75
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 140
| 5
| 37
| 28
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
68b23ef97e2a92ab17217e5fb407448fb93869af
| 55
|
py
|
Python
|
HELLO WORLD.py
|
dhruvvk2326/Google-code-in
|
b0a38be8baf1772e60289b25e4fd7ddf33a7feeb
|
[
"MIT"
] | null | null | null |
HELLO WORLD.py
|
dhruvvk2326/Google-code-in
|
b0a38be8baf1772e60289b25e4fd7ddf33a7feeb
|
[
"MIT"
] | null | null | null |
HELLO WORLD.py
|
dhruvvk2326/Google-code-in
|
b0a38be8baf1772e60289b25e4fd7ddf33a7feeb
|
[
"MIT"
] | null | null | null |
print('HELLO WORLD')
print("my cgi id is dhruvvk2326")
| 18.333333
| 33
| 0.727273
| 9
| 55
| 4.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.127273
| 55
| 2
| 34
| 27.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
68b679b4e5a909625b51be0102b249faa2af9e9c
| 176
|
py
|
Python
|
labelmodels/__init__.py
|
yongzx/labelmodels
|
78d27e2b5cc56ec167f6f485b811b93c8f5b2c26
|
[
"Apache-2.0"
] | 12
|
2019-11-01T19:00:58.000Z
|
2022-01-18T20:53:53.000Z
|
labelmodels/__init__.py
|
yongzx/labelmodels
|
78d27e2b5cc56ec167f6f485b811b93c8f5b2c26
|
[
"Apache-2.0"
] | 3
|
2021-06-09T01:22:05.000Z
|
2021-07-15T03:18:22.000Z
|
labelmodels/__init__.py
|
yongzx/labelmodels
|
78d27e2b5cc56ec167f6f485b811b93c8f5b2c26
|
[
"Apache-2.0"
] | 1
|
2022-01-10T23:31:57.000Z
|
2022-01-10T23:31:57.000Z
|
from .hmm import HMM
from .label_model import LearningConfig
from .linked_hmm import LinkedHMM
from .naive_bayes import NaiveBayes
from .partial_labels import PartialLabelModel
| 35.2
| 45
| 0.863636
| 24
| 176
| 6.166667
| 0.583333
| 0.121622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107955
| 176
| 5
| 45
| 35.2
| 0.942675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d7b46114eea25240509d9fa18f489b2c4475c93b
| 58
|
py
|
Python
|
argvparser/__init__.py
|
Arthuchaut/ArgvParser
|
6dfde974d222c6023282da1ca90e828199196726
|
[
"MIT"
] | null | null | null |
argvparser/__init__.py
|
Arthuchaut/ArgvParser
|
6dfde974d222c6023282da1ca90e828199196726
|
[
"MIT"
] | null | null | null |
argvparser/__init__.py
|
Arthuchaut/ArgvParser
|
6dfde974d222c6023282da1ca90e828199196726
|
[
"MIT"
] | null | null | null |
from argvparser.argvparser import ArgvParser as argvparser
| 58
| 58
| 0.896552
| 7
| 58
| 7.428571
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 58
| 1
| 58
| 58
| 0.981132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d7be98bfa057ea58f75f5bdc046c1217dbc62a46
| 239
|
py
|
Python
|
Exercise_2_4.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_2_4.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_2_4.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
#Name Cases.
Personal_Name = "joey Tribionny"
print("Person's name in lower case: " + Personal_Name.lower())
print("Person's name in upper case: " + Personal_Name.upper())
print("Person's name in title case: " + Personal_Name.title())
| 26.555556
| 62
| 0.711297
| 36
| 239
| 4.611111
| 0.361111
| 0.289157
| 0.216867
| 0.289157
| 0.325301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142259
| 239
| 8
| 63
| 29.875
| 0.809756
| 0.046025
| 0
| 0
| 0
| 0
| 0.448889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.75
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
d7ce5eb0a1217ef529269d0aeb877e42c6e340d0
| 440
|
py
|
Python
|
vkmodels/objects/prettyCards.py
|
deknowny/vk-api-py-models
|
6760c9395b39efd2a987251893b418a61eefbdca
|
[
"MIT"
] | null | null | null |
vkmodels/objects/prettyCards.py
|
deknowny/vk-api-py-models
|
6760c9395b39efd2a987251893b418a61eefbdca
|
[
"MIT"
] | null | null | null |
vkmodels/objects/prettyCards.py
|
deknowny/vk-api-py-models
|
6760c9395b39efd2a987251893b418a61eefbdca
|
[
"MIT"
] | null | null | null |
import dataclasses
import enum
import typing
from vkmodels.bases.object import ObjectBase
@dataclasses.dataclass
class PrettyCard(
ObjectBase,
):
card_id: str
link_url: str
photo: str
title: str
button: typing.Optional[str] = None
button_text: typing.Optional[str] = None
images: typing.Optional[typing.List[Image]] = None
price: typing.Optional[str] = None
price_old: typing.Optional[str] = None
| 20.952381
| 54
| 0.713636
| 56
| 440
| 5.535714
| 0.5
| 0.225806
| 0.219355
| 0.270968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195455
| 440
| 20
| 55
| 22
| 0.875706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.235294
| 0
| 0.823529
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
cc2dba51dd582ded75653662f3b05b1546d61ec9
| 83
|
py
|
Python
|
aarin/config/__init__.py
|
DuTra01/aarin-pix-sdk
|
36510607049a60c942cbe73025470ca4e9b7cdcb
|
[
"MIT"
] | 2
|
2021-07-15T17:53:35.000Z
|
2021-08-21T01:07:24.000Z
|
aarin/config/__init__.py
|
DuTra01/aarin-pix-sdk
|
36510607049a60c942cbe73025470ca4e9b7cdcb
|
[
"MIT"
] | null | null | null |
aarin/config/__init__.py
|
DuTra01/aarin-pix-sdk
|
36510607049a60c942cbe73025470ca4e9b7cdcb
|
[
"MIT"
] | null | null | null |
from .config import Config
from .request_options import Token, Auth, RequestOptions
| 41.5
| 56
| 0.843373
| 11
| 83
| 6.272727
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108434
| 83
| 2
| 56
| 41.5
| 0.932432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0be4d33001d8cc9758c5dc14a973e67197758d2a
| 107
|
py
|
Python
|
cloudfile/__init__.py
|
takotab/cloudfile
|
7b1af196619c0c7fdb1d3631e58d96fb3b947242
|
[
"MIT"
] | null | null | null |
cloudfile/__init__.py
|
takotab/cloudfile
|
7b1af196619c0c7fdb1d3631e58d96fb3b947242
|
[
"MIT"
] | 11
|
2019-06-11T12:54:41.000Z
|
2021-02-08T20:31:09.000Z
|
cloudfile/__init__.py
|
takotab/cloudfile
|
7b1af196619c0c7fdb1d3631e58d96fb3b947242
|
[
"MIT"
] | null | null | null |
from .save import save
from .restore import restore, restore_file, download
from .add_file import add_file
| 26.75
| 52
| 0.82243
| 17
| 107
| 5
| 0.411765
| 0.164706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130841
| 107
| 3
| 53
| 35.666667
| 0.913978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0bf6892a8f0e192d2a0c64558b78138ff46fa751
| 154
|
py
|
Python
|
SpotMoodBot/data_models/__init__.py
|
md2002/Spotify-MoodBot
|
62c745bd7dd551d64f7370412455050dce1f1ed6
|
[
"MIT"
] | null | null | null |
SpotMoodBot/data_models/__init__.py
|
md2002/Spotify-MoodBot
|
62c745bd7dd551d64f7370412455050dce1f1ed6
|
[
"MIT"
] | null | null | null |
SpotMoodBot/data_models/__init__.py
|
md2002/Spotify-MoodBot
|
62c745bd7dd551d64f7370412455050dce1f1ed6
|
[
"MIT"
] | null | null | null |
from .conversation_flow import ConversationFlow, Question
from .user_profile import UserProfile
__all__ = ["ConversationFlow", "Question", "UserProfile"]
| 38.5
| 57
| 0.818182
| 15
| 154
| 8
| 0.666667
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 154
| 4
| 58
| 38.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
040b97a48df9ecb4c84ec8913d5b59453dc990a7
| 26,964
|
py
|
Python
|
pyuvdata/uvbeam/tests/test_cst_beam.py
|
no-lex/pyuvdata
|
90537f78230d3d34f5db4d39a9f2a18373435437
|
[
"BSD-2-Clause"
] | null | null | null |
pyuvdata/uvbeam/tests/test_cst_beam.py
|
no-lex/pyuvdata
|
90537f78230d3d34f5db4d39a9f2a18373435437
|
[
"BSD-2-Clause"
] | null | null | null |
pyuvdata/uvbeam/tests/test_cst_beam.py
|
no-lex/pyuvdata
|
90537f78230d3d34f5db4d39a9f2a18373435437
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
import os
import shutil
import pytest
import numpy as np
from pyuvdata.data import DATA_PATH
from pyuvdata import UVBeam
from pyuvdata.uvbeam.cst_beam import CSTBeam
import pyuvdata.tests as uvtest
filenames = ["HERA_NicCST_150MHz.txt", "HERA_NicCST_123MHz.txt"]
cst_folder = "NicCSTbeams"
cst_files = [os.path.join(DATA_PATH, cst_folder, f) for f in filenames]
cst_yaml_file = os.path.join(DATA_PATH, cst_folder, "NicCSTbeams.yaml")
cst_yaml_vivaldi = os.path.join(DATA_PATH, cst_folder, "HERA_Vivaldi_CST_beams.yaml")
def test_basic_frequencyparse():
beam1 = CSTBeam()
parsed_freqs = [beam1.name2freq(f) for f in cst_files]
assert parsed_freqs == [150e6, 123e6]
def test_frequencyparse_extra_numbers():
beam1 = CSTBeam()
test_path = os.path.join(
"pyuvdata_1510194907049",
"_t_env",
"lib",
"python2.7",
"site-packages",
"pyuvdata",
"data",
)
test_files = [os.path.join(test_path, f) for f in filenames]
parsed_freqs = [beam1.name2freq(f) for f in test_files]
assert parsed_freqs == [150e6, 123e6]
def test_frequencyparse_nicf_path():
beam1 = CSTBeam()
test_path = os.path.join(
"Simulations",
"Radiation_patterns",
"E-field pattern-Rigging height4.9m",
"HERA_4.9m_E-pattern_100-200MHz",
)
test_files = [os.path.join(test_path, f) for f in filenames]
parsed_freqs = [beam1.name2freq(f) for f in test_files]
assert parsed_freqs == [150e6, 123e6]
def test_frequencyparse_decimal_non_mhz():
beam1 = CSTBeam()
test_path = os.path.join(
"Simulations",
"Radiation_patterns",
"E-field pattern-Rigging height4.9m",
"HERA_4.9m_E-pattern_100-200MHz",
)
test_names = [
"HERA_Sim_120.87kHz.txt",
"HERA_Sim_120.87GHz.txt",
"HERA_Sim_120.87Hz.txt",
]
test_files = [os.path.join(test_path, f) for f in test_names]
parsed_freqs = [beam1.name2freq(f) for f in test_files]
assert parsed_freqs == [120.87e3, 120.87e9, 120.87]
def test_read_yaml(cst_efield_2freq):
pytest.importorskip("yaml")
beam1 = UVBeam()
beam2 = UVBeam()
extra_keywords = {
"software": "CST 2016",
"sim_type": "E-farfield",
"layout": "1 antenna",
"port_num": 1,
}
beam1 = cst_efield_2freq
beam2.read_cst_beam(cst_yaml_file, beam_type="efield")
assert beam1 == beam2
assert beam2.reference_impedance == 100
assert beam2.extra_keywords == extra_keywords
def test_read_yaml_override(cst_efield_2freq):
pytest.importorskip("yaml")
beam1 = UVBeam()
beam2 = UVBeam()
extra_keywords = {
"software": "CST 2016",
"sim_type": "E-farfield",
"layout": "1 antenna",
"port_num": 1,
}
beam1 = cst_efield_2freq
beam1.telescope_name = "test"
with uvtest.check_warnings(
UserWarning,
match=(
"The telescope_name keyword is set, overriding "
"the value in the settings yaml file."
),
):
beam2.read_cst_beam(cst_yaml_file, beam_type="efield", telescope_name="test"),
assert beam1 == beam2
assert beam2.reference_impedance == 100
assert beam2.extra_keywords == extra_keywords
def test_read_yaml_freq_select(cst_efield_1freq):
pytest.importorskip("yaml")
# test frequency_select
beam1 = UVBeam()
beam2 = UVBeam()
beam1 = cst_efield_1freq
beam2.read_cst_beam(cst_yaml_file, beam_type="efield", frequency_select=[150e6])
assert beam1 == beam2
# test error with using frequency_select where no such frequency
freq = 180e6
with pytest.raises(ValueError, match=f"frequency {freq} not in frequency list"):
beam2.read_cst_beam(cst_yaml_file, beam_type="power", frequency_select=[freq])
def test_read_yaml_feed_pol_list(cst_efield_2freq, cst_efield_1freq):
pytest.importorskip("yaml")
# make yaml with a list of (the same) feed_pols
import yaml
test_yaml_file = os.path.join(DATA_PATH, cst_folder, "test_cst_settings.yaml")
with open(cst_yaml_file, "r") as file:
settings_dict = yaml.safe_load(file)
settings_dict["feed_pol"] = ["x", "x"]
with open(test_yaml_file, "w") as outfile:
yaml.dump(settings_dict, outfile, default_flow_style=False)
beam1 = UVBeam()
beam2 = UVBeam()
extra_keywords = {
"software": "CST 2016",
"sim_type": "E-farfield",
"layout": "1 antenna",
"port_num": 1,
}
beam1 = cst_efield_2freq
beam2.read_cst_beam(test_yaml_file, beam_type="efield")
assert beam1 == beam2
assert beam2.reference_impedance == 100
assert beam2.extra_keywords == extra_keywords
# also test with frequency_select
beam1 = cst_efield_1freq
beam2.read_cst_beam(test_yaml_file, beam_type="efield", frequency_select=[150e6])
assert beam1 == beam2
os.remove(test_yaml_file)
def test_read_yaml_multi_pol(tmp_path):
pytest.importorskip("yaml")
# make yaml for one freq, 2 pols
import yaml
# copy the beam files to the tmp directory so that it can read them
# when the yaml is stored there
for fname in cst_files:
shutil.copy2(src=fname, dst=tmp_path)
test_yaml_file = str(tmp_path / "test_cst_settings.yaml")
with open(cst_yaml_file, "r") as file:
settings_dict = yaml.safe_load(file)
settings_dict["feed_pol"] = ["x", "y"]
first_file = settings_dict["filenames"][0]
settings_dict["filenames"] = [first_file, first_file]
first_freq = settings_dict["frequencies"][0]
settings_dict["frequencies"] = [first_freq, first_freq]
with open(test_yaml_file, "w") as outfile:
yaml.dump(settings_dict, outfile, default_flow_style=False)
beam1 = UVBeam()
beam2 = UVBeam()
extra_keywords = {
"software": "CST 2016",
"sim_type": "E-farfield",
"layout": "1 antenna",
"port_num": 1,
}
with uvtest.check_warnings(
UserWarning, "No frequency provided. Detected frequency is", nwarnings=2,
):
beam1.read_cst_beam(
[cst_files[0], cst_files[0]],
beam_type="efield",
feed_pol=["x", "y"],
telescope_name="HERA",
feed_name="Dipole",
feed_version="1.0",
model_name="Dipole - Rigging height 4.9 m",
model_version="1.0",
x_orientation="east",
reference_impedance=100,
history="Derived from https://github.com/Nicolas-Fagnoni/Simulations."
"\nOnly 2 files included to keep test data volume low.",
extra_keywords=extra_keywords,
)
beam2.read_cst_beam(test_yaml_file, beam_type="efield")
assert beam1 == beam2
# also test with frequency_select
beam2.read_cst_beam(test_yaml_file, beam_type="efield", frequency_select=[150e6])
assert beam2.feed_array.tolist() == ["x", "y"]
assert beam1 == beam2
os.remove(test_yaml_file)
def test_read_yaml_errors(tmp_path):
pytest.importorskip("yaml")
# test error if required key is not present in yaml file
import yaml
test_yaml_file = str(tmp_path / "test_cst_settings.yaml")
with open(cst_yaml_file, "r") as file:
settings_dict = yaml.safe_load(file)
settings_dict.pop("telescope_name")
with open(test_yaml_file, "w") as outfile:
yaml.dump(settings_dict, outfile, default_flow_style=False)
beam1 = UVBeam()
with pytest.raises(
ValueError,
match=(
"telescope_name is a required key in CST settings files but is "
"not present."
),
):
beam1.read_cst_beam(test_yaml_file, beam_type="power")
os.remove(test_yaml_file)
def test_read_power(cst_power_2freq):
beam2 = UVBeam()
beam1 = cst_power_2freq
assert beam1.pixel_coordinate_system == "az_za"
assert beam1.beam_type == "power"
assert beam1.data_array.shape == (1, 1, 2, 2, 181, 360)
assert np.max(beam1.data_array) == 8275.5409
assert np.allclose(
beam1.data_array[:, :, 0, :, :, np.where(beam1.axis1_array == 0)[0]],
beam1.data_array[:, :, 1, :, :, np.where(beam1.axis1_array == np.pi / 2.0)[0]],
)
# test passing in other polarization
beam2.read_cst_beam(
np.array(cst_files),
beam_type="power",
frequency=np.array([150e6, 123e6]),
feed_pol="y",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
assert np.allclose(beam1.freq_array, beam2.freq_array)
assert np.allclose(beam2.polarization_array, np.array([-6, -5]))
assert np.allclose(
beam1.data_array[:, :, 0, :, :, :], beam2.data_array[:, :, 0, :, :, :]
)
def test_read_power_single_freq(cst_power_1freq):
# test single frequency
beam2 = UVBeam()
beam1 = cst_power_1freq
assert beam1.freq_array == [150e6]
assert beam1.pixel_coordinate_system == "az_za"
assert beam1.beam_type == "power"
assert beam1.data_array.shape == (1, 1, 2, 1, 181, 360)
# test single frequency and not rotating the polarization
with uvtest.check_warnings(
UserWarning, "No frequency provided. Detected frequency is"
):
beam2.read_cst_beam(
cst_files[0],
beam_type="power",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
rotate_pol=False,
)
assert beam2.freq_array == [150e6]
assert beam2.pixel_coordinate_system == "az_za"
assert beam2.beam_type == "power"
assert beam2.polarization_array == np.array([-5])
assert beam2.data_array.shape == (1, 1, 1, 1, 181, 360)
assert np.allclose(beam1.data_array[:, :, 0, :, :, :], beam2.data_array)
def test_read_power_multi_pol():
# test reading in multiple polarization files
beam1 = UVBeam()
beam2 = UVBeam()
beam1.read_cst_beam(
[cst_files[0], cst_files[0]],
beam_type="power",
frequency=[150e6],
feed_pol=np.array(["xx", "yy"]),
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
assert beam1.data_array.shape == (1, 1, 2, 1, 181, 360)
assert np.allclose(
beam1.data_array[:, :, 0, :, :, :], beam1.data_array[:, :, 1, :, :, :]
)
# test reading in cross polarization files
beam2.read_cst_beam(
[cst_files[0]],
beam_type="power",
frequency=[150e6],
feed_pol=np.array(["xy"]),
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
assert np.allclose(beam2.polarization_array, np.array([-7, -8]))
assert beam2.data_array.shape == (1, 1, 2, 1, 181, 360)
assert np.allclose(
beam1.data_array[:, :, 0, :, :, :], beam2.data_array[:, :, 0, :, :, :]
)
def test_read_errors():
# test errors
beam1 = UVBeam()
pytest.raises(
ValueError,
beam1.read_cst_beam,
cst_files,
beam_type="power",
frequency=[150e6, 123e6, 100e6],
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
pytest.raises(
ValueError,
beam1.read_cst_beam,
cst_files[0],
beam_type="power",
frequency=[150e6, 123e6],
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
pytest.raises(
ValueError,
beam1.read_cst_beam,
[cst_files[0], cst_files[0], cst_files[0]],
beam_type="power",
feed_pol=["x", "y"],
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
pytest.raises(
ValueError,
beam1.read_cst_beam,
cst_files[0],
beam_type="power",
feed_pol=["x", "y"],
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
pytest.raises(
ValueError,
beam1.read_cst_beam,
[[cst_files[0]], [cst_files[1]]],
beam_type="power",
frequency=[150e6, 123e6],
feed_pol=["x"],
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
pytest.raises(
ValueError,
beam1.read_cst_beam,
np.array([[cst_files[0]], [cst_files[1]]]),
beam_type="power",
frequency=[150e6, 123e6],
feed_pol=["x"],
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
pytest.raises(
ValueError,
beam1.read_cst_beam,
cst_files,
beam_type="power",
frequency=[[150e6], [123e6]],
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
pytest.raises(
ValueError,
beam1.read_cst_beam,
cst_files,
beam_type="power",
frequency=np.array([[150e6], [123e6]]),
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
pytest.raises(
ValueError,
beam1.read_cst_beam,
cst_files,
beam_type="power",
feed_pol=[["x"], ["y"]],
frequency=150e6,
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
pytest.raises(
ValueError,
beam1.read_cst_beam,
cst_files,
beam_type="power",
feed_pol=np.array([["x"], ["y"]]),
frequency=150e6,
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
def test_read_efield(cst_efield_2freq):
beam1 = cst_efield_2freq
beam2 = UVBeam()
assert beam1.pixel_coordinate_system == "az_za"
assert beam1.beam_type == "efield"
assert beam1.data_array.shape == (2, 1, 2, 2, 181, 360)
assert np.max(np.abs(beam1.data_array)) == 90.97
# test passing in other polarization
beam2.read_cst_beam(
cst_files,
beam_type="efield",
frequency=[150e6, 123e6],
feed_pol="y",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
assert beam2.feed_array[0] == "y"
assert beam2.feed_array[1] == "x"
assert beam1.data_array.shape == (2, 1, 2, 2, 181, 360)
assert np.allclose(
beam1.data_array[:, :, 0, :, :, :], beam2.data_array[:, :, 0, :, :, :]
)
# test single frequency and not rotating the polarization
with uvtest.check_warnings(
UserWarning, "No frequency provided. Detected frequency is"
):
beam2.read_cst_beam(
cst_files[0],
beam_type="efield",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
rotate_pol=False,
)
assert beam2.pixel_coordinate_system == "az_za"
assert beam2.beam_type == "efield"
assert beam2.feed_array == np.array(["x"])
assert beam2.data_array.shape == (2, 1, 1, 1, 181, 360)
assert np.allclose(
beam1.data_array[:, :, 0, 1, :, :], beam2.data_array[:, :, 0, 0, :, :]
)
# test reading in multiple polarization files
beam1.read_cst_beam(
[cst_files[0], cst_files[0]],
beam_type="efield",
frequency=[150e6],
feed_pol=["x", "y"],
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
assert beam1.data_array.shape == (2, 1, 2, 1, 181, 360)
assert np.allclose(
beam1.data_array[:, :, 0, :, :, :], beam1.data_array[:, :, 1, :, :, :]
)
def test_no_deg_units(tmp_path):
# need to write a modified file to test headers not in degrees
testfile = str(tmp_path / "HERA_NicCST_150MHz_modified.txt")
with open(cst_files[0], "r") as file:
line1 = file.readline()
line2 = file.readline()
data = np.loadtxt(cst_files[0], skiprows=2)
raw_names = line1.split("]")
raw_names = [raw_name for raw_name in raw_names if "\n" not in raw_name]
column_names = []
column_names_simple = []
units = []
for raw_name in raw_names:
column_name, unit = tuple(raw_name.split("["))
column_names.append(column_name)
column_names_simple.append("".join(column_name.lower().split(" ")))
if unit != "deg.":
units.append(unit)
else:
units.append(" ")
new_column_headers = []
for index, name in enumerate(column_names):
new_column_headers.append(name + "[" + units[index] + "]")
new_header = ""
for col in new_column_headers:
new_header += "{:12}".format(col)
beam1 = UVBeam()
beam2 = UVBeam()
# format to match existing file
existing_format = [
"%8.3f",
"%15.3f",
"%20.3e",
"%19.3e",
"%19.3f",
"%19.3e",
"%19.3f",
"%19.3e",
]
np.savetxt(
testfile,
data,
fmt=existing_format,
header=new_header + "\n" + line2,
comments="",
)
# this errors because the phi 2pi rotation doesn't work
# (because they are degrees but the code thinks they're radians)
pytest.raises(
ValueError,
beam1.read_cst_beam,
testfile,
beam_type="efield",
frequency=np.array([150e6]),
feed_pol="y",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
theta_col = np.where(np.array(column_names_simple) == "theta")[0][0]
phi_col = np.where(np.array(column_names_simple) == "phi")[0][0]
theta_phase_col = np.where(np.array(column_names_simple) == "phase(theta)")[0][0]
phi_phase_col = np.where(np.array(column_names_simple) == "phase(phi)")[0][0]
data[:, theta_col] = np.radians(data[:, theta_col])
data[:, phi_col] = np.radians(data[:, phi_col])
data[:, theta_phase_col] = np.radians(data[:, theta_phase_col])
data[:, phi_phase_col] = np.radians(data[:, phi_phase_col])
np.savetxt(
testfile,
data,
fmt=existing_format,
header=new_header + "\n" + line2,
comments="",
)
# this errors because theta isn't regularly gridded (too few sig figs)
pytest.raises(
ValueError,
beam1.read_cst_beam,
testfile,
beam_type="efield",
frequency=np.array([150e6]),
feed_pol="y",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
# use more decimal places for theta so that it is regularly gridded
new_format = [
"%15.12e",
"%15.3e",
"%20.3e",
"%19.3e",
"%19.3f",
"%19.3e",
"%19.3f",
"%19.3e",
]
np.savetxt(
testfile, data, fmt=new_format, header=new_header + "\n" + line2, comments=""
)
# this errors because phi isn't regularly gridded (too few sig figs)
pytest.raises(
ValueError,
beam1.read_cst_beam,
testfile,
beam_type="efield",
frequency=np.array([150e6]),
feed_pol="y",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
# use more decimal places so that it is regularly gridded and matches data
new_format = [
"%15.12e",
"%15.12e",
"%20.3e",
"%19.3e",
"%19.12f",
"%19.3e",
"%19.12f",
"%19.3e",
]
np.savetxt(
testfile, data, fmt=new_format, header=new_header + "\n" + line2, comments=""
)
with uvtest.check_warnings(
UserWarning, "No frequency provided. Detected frequency is"
):
beam1.read_cst_beam(
cst_files[0],
beam_type="efield",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
with uvtest.check_warnings(
UserWarning, "No frequency provided. Detected frequency is"
):
beam2.read_cst_beam(
testfile,
beam_type="efield",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
assert beam1 == beam2
# remove a row to make data not on a grid to catch that error
data = data[1:, :]
np.savetxt(
testfile, data, fmt=new_format, header=new_header + "\n" + line2, comments=""
)
# this errors because theta & phi aren't on a strict grid
pytest.raises(
ValueError,
beam1.read_cst_beam,
testfile,
beam_type="efield",
frequency=np.array([150e6]),
feed_pol="y",
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
def test_wrong_column_names(tmp_path):
# need to write modified files to test headers with wrong column names
testfile = str(tmp_path / "HERA_NicCST_150MHz_modified.txt")
with open(cst_files[0], "r") as file:
line1 = file.readline()
line2 = file.readline()
data = np.loadtxt(cst_files[0], skiprows=2)
raw_names = line1.split("]")
raw_names = [raw_name for raw_name in raw_names if "\n" not in raw_name]
column_names = []
missing_power_column_names = []
extra_power_column_names = []
column_names_simple = []
units = []
for raw_name in raw_names:
column_name, unit = tuple(raw_name.split("["))
column_names.append(column_name)
column_names_simple.append("".join(column_name.lower().split(" ")))
units.append(unit)
if column_name.strip() == "Abs(V )":
missing_power_column_names.append("Power")
else:
missing_power_column_names.append(column_name)
if column_name.strip() == "Abs(Theta)":
extra_power_column_names.append("Abs(E )")
else:
extra_power_column_names.append(column_name)
missing_power_column_headers = []
for index, name in enumerate(missing_power_column_names):
missing_power_column_headers.append(name + "[" + units[index] + "]")
extra_power_column_headers = []
for index, name in enumerate(extra_power_column_names):
extra_power_column_headers.append(name + "[" + units[index] + "]")
missing_power_header = ""
for col in missing_power_column_headers:
missing_power_header += "{:12}".format(col)
beam1 = UVBeam()
# format to match existing file
existing_format = [
"%8.3f",
"%15.3f",
"%20.3e",
"%19.3e",
"%19.3f",
"%19.3e",
"%19.3f",
"%19.3e",
]
np.savetxt(
testfile,
data,
fmt=existing_format,
header=missing_power_header + "\n" + line2,
comments="",
)
# this errors because there's no recognized power column
pytest.raises(
ValueError,
beam1.read_cst_beam,
testfile,
beam_type="power",
frequency=np.array([150e6]),
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
extra_power_header = ""
for col in extra_power_column_headers:
extra_power_header += "{:12}".format(col)
np.savetxt(
testfile,
data,
fmt=existing_format,
header=extra_power_header + "\n" + line2,
comments="",
)
# this errors because there's multiple recognized power columns
pytest.raises(
ValueError,
beam1.read_cst_beam,
testfile,
beam_type="power",
frequency=np.array([150e6]),
telescope_name="TEST",
feed_name="bob",
feed_version="0.1",
model_name="E-field pattern - Rigging height 4.9m",
model_version="1.0",
)
def test_hera_yaml():
pytest.importorskip("yaml")
beam1 = UVBeam()
beam2 = UVBeam()
beam1.read_cst_beam(cst_yaml_vivaldi, beam_type="efield", frequency_select=[150e6])
assert beam1.reference_impedance == 100
extra_keywords = {
"software": "CST 2016",
"sim_type": "E-farfield",
"layout": "1 antenna",
"port_num": 1,
}
assert beam1.extra_keywords == extra_keywords
beam2.read_cst_beam(cst_yaml_vivaldi, beam_type="power", frequency_select=[150e6])
beam1.efield_to_power(calc_cross_pols=False)
# The values in the beam file only have 4 sig figs, so they don't match precisely
diff = np.abs(beam1.data_array - beam2.data_array)
assert np.max(diff) < 2
reldiff = diff / beam2.data_array
assert np.max(reldiff) < 0.002
# set data_array tolerances higher to test the rest of the object
# tols are (relative, absolute)
tols = [0.002, 0]
beam1._data_array.tols = tols
assert beam1.history != beam2.history
beam1.history = beam2.history
assert beam1 == beam2
| 29.15027
| 87
| 0.600801
| 3,465
| 26,964
| 4.443001
| 0.101299
| 0.021825
| 0.026437
| 0.035076
| 0.802338
| 0.739721
| 0.712114
| 0.686132
| 0.652095
| 0.628191
| 0
| 0.04758
| 0.268877
| 26,964
| 924
| 88
| 29.181818
| 0.733337
| 0.068091
| 0
| 0.700134
| 0
| 0
| 0.134242
| 0.013791
| 0
| 0
| 0
| 0
| 0.084337
| 1
| 0.024096
| false
| 0
| 0.024096
| 0
| 0.048193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
045df62667f9612b01ce9b6a2b80713e18004b07
| 81
|
py
|
Python
|
Lab_2/special_sum.py
|
drozdms/PythonProjects
|
b199516fad70710f2deb432e48704ccfbe051029
|
[
"MIT"
] | null | null | null |
Lab_2/special_sum.py
|
drozdms/PythonProjects
|
b199516fad70710f2deb432e48704ccfbe051029
|
[
"MIT"
] | null | null | null |
Lab_2/special_sum.py
|
drozdms/PythonProjects
|
b199516fad70710f2deb432e48704ccfbe051029
|
[
"MIT"
] | null | null | null |
def calculate_special_sum(n):
return sum([x**2 * (x + 1) for x in range(n)])
| 27
| 50
| 0.617284
| 16
| 81
| 3
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030769
| 0.197531
| 81
| 2
| 51
| 40.5
| 0.707692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f0e5898fcd7588e492adc38f296288254119b1d2
| 413
|
py
|
Python
|
stan/proc_functions/merge.py
|
chappers/Stan
|
61c189ab12ea50214390804cff5694ac51f8df35
|
[
"MIT"
] | 1
|
2015-01-06T11:10:24.000Z
|
2015-01-06T11:10:24.000Z
|
stan/proc_functions/merge.py
|
chappers/Stan
|
61c189ab12ea50214390804cff5694ac51f8df35
|
[
"MIT"
] | null | null | null |
stan/proc_functions/merge.py
|
chappers/Stan
|
61c189ab12ea50214390804cff5694ac51f8df35
|
[
"MIT"
] | null | null | null |
"""
The :mod:`stan.proc_functions.merge` module is the proc merge function
"""
def merge(dt_left, dt_right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True):
return dt_left.merge(dt_right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True)
| 59
| 168
| 0.726392
| 70
| 413
| 4.042857
| 0.371429
| 0.127208
| 0.141343
| 0.106007
| 0.69258
| 0.69258
| 0.69258
| 0.69258
| 0.69258
| 0.69258
| 0
| 0
| 0.096852
| 413
| 6
| 169
| 68.833333
| 0.758713
| 0.169492
| 0
| 0
| 0
| 0
| 0.053731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f0f19e49a7b1bc559e5f859c12a6cfd9b2a2a8a8
| 25,531
|
py
|
Python
|
tests/helpers/test_reports_helpers.py
|
sanger/lighthouse
|
ded48ed20829301f7a131e5711532ee237982050
|
[
"MIT"
] | 1
|
2021-11-14T19:57:03.000Z
|
2021-11-14T19:57:03.000Z
|
tests/helpers/test_reports_helpers.py
|
sanger/lighthouse
|
ded48ed20829301f7a131e5711532ee237982050
|
[
"MIT"
] | 496
|
2020-04-23T07:45:53.000Z
|
2022-03-31T14:33:01.000Z
|
tests/helpers/test_reports_helpers.py
|
sanger/lighthouse
|
ded48ed20829301f7a131e5711532ee237982050
|
[
"MIT"
] | 3
|
2020-11-01T23:41:08.000Z
|
2021-02-23T16:09:40.000Z
|
import os
from datetime import datetime, timedelta
from shutil import copy
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
from lighthouse.constants.fields import (
FIELD_COORDINATE,
FIELD_PLATE_BARCODE,
FIELD_RESULT,
FIELD_ROOT_SAMPLE_ID,
FIELD_SOURCE,
)
from lighthouse.helpers.reports import (
add_cherrypicked_column,
delete_reports,
get_cherrypicked_samples,
get_distinct_plate_barcodes,
get_fit_to_pick_samples,
get_new_report_name_and_path,
report_query_window_start,
unpad_coordinate,
)
# ----- get_new_report_name_and_path tests -----
def test_get_new_report_name_and_path(app, freezer):
report_date = datetime.now().strftime("%y%m%d_%H%M")
with app.app_context():
report_name, _ = get_new_report_name_and_path()
assert report_name == f"{report_date}_fit_to_pick_with_locations.xlsx"
# ----- unpad_coordinate tests -----
def test_unpad_coordinate_A01():
assert unpad_coordinate("A01") == "A1"
def test_unpad_coordinate_A1():
assert unpad_coordinate("A1") == "A1"
def test_unpad_coordinate_A10():
assert unpad_coordinate("A10") == "A10"
def test_unpad_coordinate_B01010():
assert unpad_coordinate("B01010") == "B1010"
# ----- delete_reports tests -----
def test_delete_reports(app):
copies_of_reports_folder = "tests/data/reports_copies"
filenames = [
"200716_1345_positives_with_locations.xlsx",
"200716_1618_positives_with_locations.xlsx",
"200716_1640_positives_with_locations.xlsx",
"200716_1641_fit_to_pick_with_locations.xlsx",
"200716_1642_fit_to_pick_with_locations.xlsx",
]
for filename in filenames:
copy(f"{copies_of_reports_folder}/{filename}", f"{app.config['REPORTS_DIR']}/{filename}")
with app.app_context():
delete_reports(filenames)
for filename in filenames:
assert os.path.isfile(f"{app.config['REPORTS_DIR']}/{filename}") is False
# ----- get_cherrypicked_samples tests -----
def test_get_cherrypicked_samples_test_db_connection_close(app):
"""
Test Scenario
- Check that connection is close when we call get_cherrypicked_samples
"""
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine") as mock_sql_engine:
mock_db_connection = Mock()
mock_sql_engine().connect.return_value = mock_db_connection
get_cherrypicked_samples(samples, plate_barcodes)
mock_db_connection.close.assert_called_once()
def test_get_cherrypicked_samples_test_db_connection_close_on_exception(app):
"""
Test Scenario
- Check that connection is close when we call get_cherrypicked_samples
"""
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine") as mock_sql_engine:
with patch(
"pandas.read_sql",
side_effect=Exception("Boom!"),
):
mock_db_connection = Mock()
mock_sql_engine().connect.return_value = mock_db_connection
get_cherrypicked_samples(samples, plate_barcodes)
mock_db_connection.close.assert_called_once()
# Test Scenario
# - Mocking database responses
# - Only the Sentinel query returns matches (No Beckman)
# - No chunking: a single query is made in which all matches are returned
# - No duplication of returned matches
def test_get_cherrypicked_samples_no_beckman(app):
expected = [
pd.DataFrame(
["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2]
), # Cherrypicking query response
]
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=expected,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes)
assert returned_samples.at[0, FIELD_ROOT_SAMPLE_ID] == "MCM001"
assert returned_samples.at[1, FIELD_ROOT_SAMPLE_ID] == "MCM003"
assert returned_samples.at[2, FIELD_ROOT_SAMPLE_ID] == "MCM005"
# Test Scenario
# - Mocking database responses
# - Only the Sentinel queries return matches (No Beckman)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - No duplication of returned matches
def test_get_cherrypicked_samples_chunking_no_beckman(app):
# Note: This represents the results of three different (Sentinel, Beckman) sets of
# database queries, each Sentinel query getting indexed from 0. Do not change the
# indices here unless you have modified the behaviour of the query.
query_results = [
pd.DataFrame(["MCM001"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM003"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
]
expected = pd.DataFrame(["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2])
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=query_results,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes, 2)
pd.testing.assert_frame_equal(expected, returned_samples)
# Test Scenario
# - Actual database responses
# - Only the Sentinel queries return matches (No Beckman)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - Duplication of returned matches across different chunks: duplicates should be filtered out
def test_get_cherrypicked_samples_repeat_tests_no_beckman(app, mlwh_sentinel_cherrypicked, event_wh_data):
# the following come from MLWH_SAMPLE_STOCK_RESOURCE in fixture_data
root_sample_ids = ["root_1", "root_2", "root_1"]
plate_barcodes = ["pb_1", "pb_2", "pb_3"]
# root_1 will match 2 samples, but only one of those will match an event (on Sanger Sample Id)
# therefore we only get 1 of the samples called 'root_1' back (the one on plate 'pb_1')
# this also checks we don't get a duplicate row for root_1 / pb_1, despite it cropped up in 2
# different 'chunks'
expected_rows = [["root_1", "pb_1", "positive", "A1"], ["root_2", "pb_2", "positive", "A1"]]
expected_columns = [FIELD_ROOT_SAMPLE_ID, FIELD_PLATE_BARCODE, "Result_lower", FIELD_COORDINATE]
expected = pd.DataFrame(np.array(expected_rows), columns=expected_columns, index=[0, 1])
with app.app_context():
chunk_size = 2
returned_samples = get_cherrypicked_samples(root_sample_ids, plate_barcodes, chunk_size)
print(returned_samples)
pd.testing.assert_frame_equal(expected, returned_samples)
# Test Scenario
# - Mocking database responses
# - Only the Beckman query returns matches (No Sentinel)
# - No chunking: a single query is made in which all matches are returned
# - No duplication of returned matches
def test_get_cherrypicked_samples_no_sentinel(app):
expected = [
pd.DataFrame(
["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2]
), # Cherrypicking query response
]
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=expected,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes)
assert returned_samples.at[0, FIELD_ROOT_SAMPLE_ID] == "MCM001"
assert returned_samples.at[1, FIELD_ROOT_SAMPLE_ID] == "MCM003"
assert returned_samples.at[2, FIELD_ROOT_SAMPLE_ID] == "MCM005"
# Test Scenario
# - Mocking database responses
# - Only the Beckman queries return matches (No Sentinel)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - No duplication of returned matches
def test_get_cherrypicked_samples_chunking_no_sentinel(app):
# Note: This represents the results of three different (Sentinel, Beckman) sets of
# database queries, each Sentinel query getting indexed from 0. Do not change the
# indices here unless you have modified the behaviour of the query.
query_results = [
pd.DataFrame(["MCM001"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM003"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
]
expected = pd.DataFrame(["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2])
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=query_results,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes, 2)
pd.testing.assert_frame_equal(expected, returned_samples)
# Test Scenario
# - Actual database responses
# - Only the Beckman queries return matches (No Sentinel)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - Duplication of returned matches across different chunks: duplicates should be filtered out
def test_get_cherrypicked_samples_repeat_tests_no_sentinel(app, mlwh_beckman_cherrypicked, event_wh_data):
# the following come from MLWH_SAMPLE_LIGHTHOUSE_SAMPLE in fixture_data
root_sample_ids = ["root_4", "root_5", "root_4"]
plate_barcodes = ["pb_4", "pb_5", "pb_6"]
# root_4 will match 2 samples, but only one of those will match an event (on sample uuid)
# therefore we only get 1 of the samples called 'root_4' back (the one on plate 'pb_4')
# this also checks we don't get a duplicate row for root_4 / pb_4, despite it cropped up in 2
# different 'chunks'
expected_rows = [["root_4", "pb_4", "positive", "A1"], ["root_5", "pb_5", "positive", "A1"]]
expected_columns = [FIELD_ROOT_SAMPLE_ID, FIELD_PLATE_BARCODE, "Result_lower", FIELD_COORDINATE]
expected = pd.DataFrame(np.array(expected_rows), columns=expected_columns, index=[0, 1])
with app.app_context():
chunk_size = 2
returned_samples = get_cherrypicked_samples(root_sample_ids, plate_barcodes, chunk_size)
# The view could be returning the rows in a different order, which we solve by sorting and
# reindexing the rows for returned_samples, so we can compare with our expected frame
resorted_returned_samples = returned_samples.sort_values(by=FIELD_ROOT_SAMPLE_ID, ignore_index=True)
pd.testing.assert_frame_equal(expected, resorted_returned_samples)
# Test Scenario
# - Mocking database responses
# - Both Sentinel and Beckman queries return matches
# - No chunking: a single query is made (per workflow) in which all matches are returned
# - Duplication of returned matches across different workflows: duplicates should be filtered out
def test_get_cherrypicked_samples_sentinel_and_beckman(app):
expected = [
pd.DataFrame(
[
# Sentinel
"MCM001",
"MCM006",
# Beckman
"MCM001",
"MCM003",
"MCM005",
],
columns=[FIELD_ROOT_SAMPLE_ID],
index=[0, 1, 2, 3, 4],
), # Cherrypicking query response
]
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005", "MCM006"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=expected,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes)
assert returned_samples.at[0, FIELD_ROOT_SAMPLE_ID] == "MCM001"
assert returned_samples.at[1, FIELD_ROOT_SAMPLE_ID] == "MCM006"
assert returned_samples.at[2, FIELD_ROOT_SAMPLE_ID] == "MCM003"
assert returned_samples.at[3, FIELD_ROOT_SAMPLE_ID] == "MCM005"
# Test Scenario
# - Mocking database responses
# - Both Sentinel and Beckman queries return matches
# - Chunking: multiple queries are made (per workflow), with all matches contained in the sum
# - Duplication of returned matches across different workflows: duplicates should be filtered out
def test_get_cherrypicked_samples_chunking_sentinel_and_beckman(app):
# Note: This represents the results of three different (Sentinel, Beckman) sets of
# database queries, each query getting indexed from 0. Do not changes the
# indicies here unless you have modified the behaviour of the query.
query_results = [
pd.DataFrame(
[
# Sentinel
"MCM001",
# Beckman
"MCM001",
"MCM002",
],
columns=[FIELD_ROOT_SAMPLE_ID],
index=[0, 1, 2],
), # Cherrypicking info
pd.DataFrame(
[
# Sentinel
"MCM003",
# Beckman
"MCM003",
"MCM004",
],
columns=[FIELD_ROOT_SAMPLE_ID],
index=[0, 1, 2],
), # Cherrypicking info
pd.DataFrame(
[
# Sentinel
"MCM005",
# Beckman
"MCM005",
"MCM006",
],
columns=[FIELD_ROOT_SAMPLE_ID],
index=[0, 1, 2],
), # Cherrypicking info
]
expected = pd.DataFrame(
["MCM001", "MCM002", "MCM003", "MCM004", "MCM005", "MCM006"],
columns=[FIELD_ROOT_SAMPLE_ID],
index=[0, 1, 2, 3, 4, 5],
)
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=query_results,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes, 2)
pd.testing.assert_frame_equal(expected, returned_samples)
# Test Scenario
# - Actual database responses
# - Both Sentinel and Beckman queries return matches
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - Duplication of returned matches across different chunks: duplicates should be filtered out
def test_get_cherrypicked_samples_repeat_tests_sentinel_and_beckman(
app, freezer, mlwh_sentinel_and_beckman_cherrypicked, event_wh_data
):
# the following come from MLWH_SAMPLE_STOCK_RESOURCE and
# MLWH_SAMPLE_LIGHTHOUSE_SAMPLE in fixture_data
root_sample_ids = ["root_1", "root_2", "root_3", "root_4", "root_5", "root_1"]
plate_barcodes = ["pb_1", "pb_2", "pb_3", "pb_4", "pb_5", "pb_6"]
# root_1 will match 2 samples, but only one of those will match a Sentinel event (on pb_1)
# root_2 will match a single sample with a matching Sentinel event (on pb_2)
# root_3 will match 2 samples, but not match either a Sentinel or Beckman event
# root_4 will match 2 samples, but only one of those will match a Beckman event (on pb_4)
# root_5 will match a single sample with a matching Beckman event (on pb_5)
# We also chunk to further test different scenarios
expected_rows = [
["root_1", "pb_1", "positive", "A1"],
["root_2", "pb_2", "positive", "A1"],
["root_4", "pb_4", "positive", "A1"],
["root_5", "pb_5", "positive", "A1"],
]
expected_columns = [FIELD_ROOT_SAMPLE_ID, FIELD_PLATE_BARCODE, "Result_lower", FIELD_COORDINATE]
expected = pd.DataFrame(np.array(expected_rows), columns=expected_columns, index=[0, 1, 2, 3])
with app.app_context():
chunk_size = 2
returned_samples = get_cherrypicked_samples(root_sample_ids, plate_barcodes, chunk_size)
pd.testing.assert_frame_equal(expected, returned_samples)
# ----- get_all_positive_samples tests -----
def test_get_fit_to_pick_samples(app, freezer, samples, priority_samples):
with app.app_context():
samples_collection = app.data.driver.db.samples
fit_to_pick_samples = get_fit_to_pick_samples(samples_collection)
assert len(fit_to_pick_samples) == 7
assert fit_to_pick_samples.at[0, FIELD_ROOT_SAMPLE_ID] == "sample_001"
assert fit_to_pick_samples.at[0, FIELD_RESULT] == "Positive"
assert fit_to_pick_samples.at[0, FIELD_SOURCE] == "centre_1"
assert fit_to_pick_samples.at[0, FIELD_PLATE_BARCODE] == "plate_123"
assert fit_to_pick_samples.at[0, FIELD_COORDINATE] == "A1"
assert fit_to_pick_samples.at[0, "plate and well"] == "plate_123:A1"
assert fit_to_pick_samples.at[1, FIELD_ROOT_SAMPLE_ID] == "sample_002"
assert fit_to_pick_samples.at[2, FIELD_ROOT_SAMPLE_ID] == "sample_101"
# ----- add_cherrypicked_column tests -----
def test_add_cherrypicked_column(app):
# existing dataframe before 'add_cherrypicked_column' is run (essentially queried from MongoDB)
existing_dataframe = pd.DataFrame(
[
["MCM001", "123", "TEST", "Positive", "A1"],
["MCM001", "456", "TEST", "Positive", "A1"], # plate barcode differs from first sample
["MCM001", "123", "TEST", "Positive2", "A1"], # result differs from first sample
["MCM001", "123", "TEST", "Positive", "A2"], # coordinate differs from first sample
["MCM002", "123", "TEST", "Positive", "A1"], # root sample id differs from first sample
],
columns=[
FIELD_ROOT_SAMPLE_ID,
FIELD_PLATE_BARCODE,
"Lab ID",
FIELD_RESULT,
FIELD_COORDINATE,
],
)
# mock response from the 'get_cherrypicked_samples' method
mock_get_cherrypicked_samples_rows = [
["MCM001", "123", "positive", "A1"], # matches first sample only
["MCM002", "123", "positive", "A1"], # matches final sample only
]
mock_get_cherrypicked_samples_columns = [
FIELD_ROOT_SAMPLE_ID,
FIELD_PLATE_BARCODE,
"Result_lower",
FIELD_COORDINATE,
]
mock_get_cherrypicked_samples = pd.DataFrame(
np.array(mock_get_cherrypicked_samples_rows), columns=mock_get_cherrypicked_samples_columns
)
# output from 'add_cherrypicked_column' - after merging existing_dataframe with response from
# 'get_cherrypicked_samples'
expected_columns = [
FIELD_ROOT_SAMPLE_ID,
FIELD_PLATE_BARCODE,
"Lab ID",
FIELD_RESULT,
FIELD_COORDINATE,
"LIMS submission",
]
# rows with "Yes" are because was returned from get_cherrypicked_samples
expected_data = [
[
"MCM001",
"123",
"TEST",
"Positive",
"A1",
"Yes",
],
["MCM001", "456", "TEST", "Positive", "A1", "No"],
["MCM001", "123", "TEST", "Positive2", "A1", "No"],
["MCM001", "123", "TEST", "Positive", "A2", "No"],
[
"MCM002",
"123",
"TEST",
"Positive",
"A1",
"Yes",
],
]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"lighthouse.helpers.reports.get_cherrypicked_samples",
return_value=mock_get_cherrypicked_samples,
):
new_dataframe = add_cherrypicked_column(existing_dataframe)
assert new_dataframe.columns.to_list() == expected_columns
assert np.array_equal(new_dataframe.to_numpy(), expected_data)
def test_add_cherrypicked_column_duplicates(app):
# Demonstrates the behaviour where, if 'get_cherrypicked_samples' returns duplicates,
# 'add_cherrypicked_column' will also return duplicates.
# De-duping should be handled in 'get_cherrypicked_samples'.
# existing dataframe before 'add_cherrypicked_column' is run (essentially queried from MongoDB)
existing_dataframe = pd.DataFrame(
[["MCM001", "123", "TEST", "Positive", "A1"], ["MCM002", "456", "TEST", "Positive", "A2"]],
columns=[
FIELD_ROOT_SAMPLE_ID,
FIELD_PLATE_BARCODE,
"Lab ID",
FIELD_RESULT,
FIELD_COORDINATE,
],
)
# mock response from the 'get_cherrypicked_samples' method
mock_get_cherrypicked_samples_rows = [
["MCM002", "456", "positive", "A2"], # matches second sample
["MCM002", "456", "positive", "A2"], # identical to above
]
mock_get_cherrypicked_samples_columns = [
FIELD_ROOT_SAMPLE_ID,
FIELD_PLATE_BARCODE,
"Result_lower",
FIELD_COORDINATE,
]
mock_get_cherrypicked_samples = pd.DataFrame(
np.array(mock_get_cherrypicked_samples_rows), columns=mock_get_cherrypicked_samples_columns
)
# output from 'add_cherrypicked_column' - after merging existing_dataframe with response from
# 'get_cherrypicked_samples'
expected_columns = [
FIELD_ROOT_SAMPLE_ID,
FIELD_PLATE_BARCODE,
"Lab ID",
FIELD_RESULT,
FIELD_COORDINATE,
"LIMS submission",
]
# Duplicates reflect response from get_cherrypicked_samples
expected_data = [
["MCM001", "123", "TEST", "Positive", "A1", "No"],
["MCM002", "456", "TEST", "Positive", "A2", "Yes"],
[
"MCM002",
"456",
"TEST",
"Positive",
"A2",
"Yes",
],
]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"lighthouse.helpers.reports.get_cherrypicked_samples",
return_value=mock_get_cherrypicked_samples,
):
new_dataframe = add_cherrypicked_column(existing_dataframe)
assert new_dataframe.columns.to_list() == expected_columns
assert np.array_equal(new_dataframe.to_numpy(), expected_data)
def test_add_cherrypicked_column_no_rows(app):
# mocks response from get_cherrypicked_samples()
existing_dataframe = pd.DataFrame(
[
["MCM001", "123", "TEST", "Positive", "A1"],
["MCM002", "123", "TEST", "Positive", "A1"],
],
columns=[
FIELD_ROOT_SAMPLE_ID,
FIELD_PLATE_BARCODE,
"Lab ID",
FIELD_RESULT,
FIELD_COORDINATE,
],
)
# Not sure if this is an accurate mock - haven't tried it with a real db connection
mock_get_cherrypicked_samples = pd.DataFrame(
[], columns=[FIELD_ROOT_SAMPLE_ID, FIELD_PLATE_BARCODE, "Result_lower", FIELD_COORDINATE]
)
expected_columns = [
FIELD_ROOT_SAMPLE_ID,
FIELD_PLATE_BARCODE,
"Lab ID",
FIELD_RESULT,
FIELD_COORDINATE,
"LIMS submission",
]
expected_data = [
["MCM001", "123", "TEST", "Positive", "A1", "No"],
["MCM002", "123", "TEST", "Positive", "A1", "No"],
]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"lighthouse.helpers.reports.get_cherrypicked_samples",
return_value=mock_get_cherrypicked_samples,
):
new_dataframe = add_cherrypicked_column(existing_dataframe)
assert new_dataframe.columns.to_list() == expected_columns
assert np.array_equal(new_dataframe.to_numpy(), expected_data)
# ----- get_distinct_plate_barcodes tests -----
def test_get_distinct_plate_barcodes(app, samples):
with app.app_context():
samples = app.data.driver.db.samples
assert get_distinct_plate_barcodes(samples)[0] == "plate_123"
def test_report_query_window_start(app):
with app.app_context():
window_size = app.config["REPORT_WINDOW_SIZE"]
start = datetime.now() + timedelta(days=-window_size)
assert report_query_window_start().year == start.year
assert report_query_window_start().month == start.month
assert report_query_window_start().day == start.day
assert report_query_window_start().hour == 0
assert report_query_window_start().minute == 0
assert report_query_window_start().second == 0
| 38.742033
| 108
| 0.650738
| 3,066
| 25,531
| 5.128506
| 0.10274
| 0.049606
| 0.072755
| 0.045408
| 0.823137
| 0.773149
| 0.73321
| 0.712033
| 0.689328
| 0.671267
| 0
| 0.037882
| 0.244174
| 25,531
| 658
| 109
| 38.800912
| 0.77696
| 0.253182
| 0
| 0.645455
| 0
| 0
| 0.138588
| 0.042793
| 0
| 0
| 0
| 0
| 0.104545
| 1
| 0.052273
| false
| 0
| 0.018182
| 0
| 0.070455
| 0.002273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9bcd11cac34e1e847415e828a577df1b6fbae019
| 555
|
py
|
Python
|
src/pvt_model/scripts/__init__.py
|
BenWinchester/PVTModel
|
6bf3976b06f406f632e0a9e525cd8b05359da239
|
[
"MIT"
] | 1
|
2021-05-11T14:15:11.000Z
|
2021-05-11T14:15:11.000Z
|
src/pvt_model/scripts/__init__.py
|
BenWinchester/PVTModel
|
6bf3976b06f406f632e0a9e525cd8b05359da239
|
[
"MIT"
] | 14
|
2021-02-23T11:53:08.000Z
|
2021-11-16T10:45:31.000Z
|
src/pvt_model/scripts/__init__.py
|
BenWinchester/PVTModel
|
6bf3976b06f406f632e0a9e525cd8b05359da239
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3.7
########################################################################################
# __init__.py - The init module for this PVT model's scripts folder. #
# #
# Author: Ben Winchester #
# Copyright: Ben Winchester, 2021 #
########################################################################################
| 69.375
| 88
| 0.2
| 23
| 555
| 4.652174
| 0.869565
| 0.242991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019481
| 0.445045
| 555
| 7
| 89
| 79.285714
| 0.327922
| 0.556757
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9be93903f53a8fb4548c46fa6af3f51d37bef1cd
| 75
|
py
|
Python
|
HackerRank/Sets/SetsAdd.py
|
OAPJ/Python
|
2fdf9c161f962cd912f2393ff72aed823b4241cb
|
[
"Apache-2.0"
] | null | null | null |
HackerRank/Sets/SetsAdd.py
|
OAPJ/Python
|
2fdf9c161f962cd912f2393ff72aed823b4241cb
|
[
"Apache-2.0"
] | null | null | null |
HackerRank/Sets/SetsAdd.py
|
OAPJ/Python
|
2fdf9c161f962cd912f2393ff72aed823b4241cb
|
[
"Apache-2.0"
] | null | null | null |
print (len(set([ input().strip() for _ in range(int(input().strip())) ])))
| 37.5
| 74
| 0.6
| 11
| 75
| 4
| 0.818182
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 75
| 1
| 75
| 75
| 0.656716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
9bef74e9edbb5c27608d5c4c59f22d25175de501
| 104
|
py
|
Python
|
staticpy/lib/__init__.py
|
SnowWalkerJ/StaticPy
|
818b7f009af7a6040313791993f543779781dddf
|
[
"BSD-3-Clause"
] | 13
|
2019-10-14T19:22:11.000Z
|
2021-08-23T08:39:06.000Z
|
staticpy/lib/__init__.py
|
SnowWalkerJ/StaticPy
|
818b7f009af7a6040313791993f543779781dddf
|
[
"BSD-3-Clause"
] | 5
|
2019-09-30T07:42:18.000Z
|
2020-01-01T15:07:00.000Z
|
staticpy/lib/__init__.py
|
SnowWalkerJ/StaticPy
|
818b7f009af7a6040313791993f543779781dddf
|
[
"BSD-3-Clause"
] | null | null | null |
from .iostream import cprint, cin, cout, cerr, endl
from .cmath import *
from . import cmath, iostream
| 20.8
| 51
| 0.740385
| 15
| 104
| 5.133333
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173077
| 104
| 4
| 52
| 26
| 0.895349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
50149884ea0b8a3f27270ef8aa6d5ce4a6edbcda
| 144
|
py
|
Python
|
run_test_random.py
|
idrissbado/mxdevtool-python
|
38e2d467027b2dbf37dddabbf4f202ea73bbcfa3
|
[
"MIT"
] | 12
|
2021-01-15T00:46:48.000Z
|
2022-02-18T04:37:42.000Z
|
run_test_random.py
|
idrissbado/mxdevtool-python
|
38e2d467027b2dbf37dddabbf4f202ea73bbcfa3
|
[
"MIT"
] | 5
|
2021-01-15T13:26:36.000Z
|
2021-08-18T09:56:51.000Z
|
run_test_random.py
|
idrissbado/mxdevtool-python
|
38e2d467027b2dbf37dddabbf4f202ea73bbcfa3
|
[
"MIT"
] | 7
|
2020-07-17T05:15:20.000Z
|
2021-07-26T07:48:02.000Z
|
import randomseq.PseudoRandom as pseudo
import randomseq.SobolRandom as sobol
if __name__ == "__main__":
pseudo.test()
sobol.test()
| 14.4
| 39
| 0.729167
| 17
| 144
| 5.705882
| 0.647059
| 0.309278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180556
| 144
| 9
| 40
| 16
| 0.822034
| 0
| 0
| 0
| 0
| 0
| 0.056738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ac97f31e2428e98a657afabd0352fa36c0e59a8a
| 29,264
|
py
|
Python
|
nova/tests/unit/api/openstack/compute/test_availability_zone.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/api/openstack/compute/test_availability_zone.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/api/openstack/compute/test_availability_zone.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Copyright 2012 IBM Corp.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'datetime'
newline|'\n'
name|'import'
name|'iso8601'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
op|'.'
name|'compute'
name|'import'
name|'availability_zone'
name|'as'
name|'az_v21'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
op|'.'
name|'compute'
name|'import'
name|'extension_info'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
op|'.'
name|'compute'
name|'import'
name|'servers'
name|'as'
name|'servers_v21'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'availability_zones'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'compute'
name|'import'
name|'api'
name|'as'
name|'compute_api'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'compute'
name|'import'
name|'flavors'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'context'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'db'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'servicegroup'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'fakes'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'fake_instance'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
name|'import'
name|'fake'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'matchers'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'objects'
name|'import'
name|'test_service'
newline|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
nl|'\n'
DECL|variable|FAKE_UUID
name|'FAKE_UUID'
op|'='
name|'fakes'
op|'.'
name|'FAKE_UUID'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|fake_service_get_all
name|'def'
name|'fake_service_get_all'
op|'('
name|'context'
op|','
name|'disabled'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
DECL|function|__fake_service
indent|' '
name|'def'
name|'__fake_service'
op|'('
name|'binary'
op|','
name|'availability_zone'
op|','
nl|'\n'
name|'created_at'
op|','
name|'updated_at'
op|','
name|'host'
op|','
name|'disabled'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'dict'
op|'('
name|'test_service'
op|'.'
name|'fake_service'
op|','
nl|'\n'
name|'binary'
op|'='
name|'binary'
op|','
nl|'\n'
name|'availability_zone'
op|'='
name|'availability_zone'
op|','
nl|'\n'
name|'available_zones'
op|'='
name|'availability_zone'
op|','
nl|'\n'
name|'created_at'
op|'='
name|'created_at'
op|','
nl|'\n'
name|'updated_at'
op|'='
name|'updated_at'
op|','
nl|'\n'
name|'host'
op|'='
name|'host'
op|','
nl|'\n'
name|'disabled'
op|'='
name|'disabled'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'disabled'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'__fake_service'
op|'('
string|'"nova-compute"'
op|','
string|'"zone-2"'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'11'
op|','
number|'14'
op|','
number|'9'
op|','
number|'53'
op|','
number|'25'
op|','
number|'0'
op|')'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'12'
op|','
number|'26'
op|','
number|'14'
op|','
number|'45'
op|','
number|'25'
op|','
number|'0'
op|')'
op|','
nl|'\n'
string|'"fake_host-1"'
op|','
name|'True'
op|')'
op|','
nl|'\n'
name|'__fake_service'
op|'('
string|'"nova-scheduler"'
op|','
string|'"internal"'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'11'
op|','
number|'14'
op|','
number|'9'
op|','
number|'57'
op|','
number|'3'
op|','
number|'0'
op|')'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'12'
op|','
number|'26'
op|','
number|'14'
op|','
number|'45'
op|','
number|'25'
op|','
number|'0'
op|')'
op|','
nl|'\n'
string|'"fake_host-1"'
op|','
name|'True'
op|')'
op|','
nl|'\n'
name|'__fake_service'
op|'('
string|'"nova-network"'
op|','
string|'"internal"'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'11'
op|','
number|'16'
op|','
number|'7'
op|','
number|'25'
op|','
number|'46'
op|','
number|'0'
op|')'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'12'
op|','
number|'26'
op|','
number|'14'
op|','
number|'45'
op|','
number|'24'
op|','
number|'0'
op|')'
op|','
nl|'\n'
string|'"fake_host-2"'
op|','
name|'True'
op|')'
op|']'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'__fake_service'
op|'('
string|'"nova-compute"'
op|','
string|'"zone-1"'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'11'
op|','
number|'14'
op|','
number|'9'
op|','
number|'53'
op|','
number|'25'
op|','
number|'0'
op|')'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'12'
op|','
number|'26'
op|','
number|'14'
op|','
number|'45'
op|','
number|'25'
op|','
number|'0'
op|')'
op|','
nl|'\n'
string|'"fake_host-1"'
op|','
name|'False'
op|')'
op|','
nl|'\n'
name|'__fake_service'
op|'('
string|'"nova-sched"'
op|','
string|'"internal"'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'11'
op|','
number|'14'
op|','
number|'9'
op|','
number|'57'
op|','
number|'3'
op|','
number|'0'
op|')'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'12'
op|','
number|'26'
op|','
number|'14'
op|','
number|'45'
op|','
number|'25'
op|','
number|'0'
op|')'
op|','
nl|'\n'
string|'"fake_host-1"'
op|','
name|'False'
op|')'
op|','
nl|'\n'
name|'__fake_service'
op|'('
string|'"nova-network"'
op|','
string|'"internal"'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'11'
op|','
number|'16'
op|','
number|'7'
op|','
number|'25'
op|','
number|'46'
op|','
number|'0'
op|')'
op|','
nl|'\n'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2012'
op|','
number|'12'
op|','
number|'26'
op|','
number|'14'
op|','
number|'45'
op|','
number|'24'
op|','
number|'0'
op|')'
op|','
nl|'\n'
string|'"fake_host-2"'
op|','
name|'False'
op|')'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|fake_service_is_up
dedent|''
dedent|''
name|'def'
name|'fake_service_is_up'
op|'('
name|'self'
op|','
name|'service'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'service'
op|'['
string|"'binary'"
op|']'
op|'!='
string|'u"nova-network"'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|fake_set_availability_zones
dedent|''
name|'def'
name|'fake_set_availability_zones'
op|'('
name|'context'
op|','
name|'services'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'services'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|fake_get_availability_zones
dedent|''
name|'def'
name|'fake_get_availability_zones'
op|'('
name|'context'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
string|"'nova'"
op|']'
op|','
op|'['
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|CONF
dedent|''
name|'CONF'
op|'='
name|'cfg'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|AvailabilityZoneApiTestV21
name|'class'
name|'AvailabilityZoneApiTestV21'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
DECL|variable|availability_zone
indent|' '
name|'availability_zone'
op|'='
name|'az_v21'
newline|'\n'
nl|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'AvailabilityZoneApiTestV21'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'availability_zones'
op|'.'
name|'reset_cache'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.service_get_all'"
op|','
name|'fake_service_get_all'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'availability_zones'
op|','
string|"'set_availability_zones'"
op|','
nl|'\n'
name|'fake_set_availability_zones'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'servicegroup'
op|'.'
name|'API'
op|','
string|"'service_is_up'"
op|','
name|'fake_service_is_up'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'controller'
op|'='
name|'self'
op|'.'
name|'availability_zone'
op|'.'
name|'AvailabilityZoneController'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'req'
op|'='
name|'fakes'
op|'.'
name|'HTTPRequest'
op|'.'
name|'blank'
op|'('
string|"''"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_filtered_availability_zones
dedent|''
name|'def'
name|'test_filtered_availability_zones'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'zones'
op|'='
op|'['
string|"'zone1'"
op|','
string|"'internal'"
op|']'
newline|'\n'
name|'expected'
op|'='
op|'['
op|'{'
string|"'zoneName'"
op|':'
string|"'zone1'"
op|','
nl|'\n'
string|"'zoneState'"
op|':'
op|'{'
string|"'available'"
op|':'
name|'True'
op|'}'
op|','
nl|'\n'
string|'"hosts"'
op|':'
name|'None'
op|'}'
op|']'
newline|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_get_filtered_availability_zones'
op|'('
name|'zones'
op|','
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'result'
op|','
name|'expected'
op|')'
newline|'\n'
nl|'\n'
name|'expected'
op|'='
op|'['
op|'{'
string|"'zoneName'"
op|':'
string|"'zone1'"
op|','
nl|'\n'
string|"'zoneState'"
op|':'
op|'{'
string|"'available'"
op|':'
name|'False'
op|'}'
op|','
nl|'\n'
string|'"hosts"'
op|':'
name|'None'
op|'}'
op|']'
newline|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'_get_filtered_availability_zones'
op|'('
name|'zones'
op|','
nl|'\n'
name|'False'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'result'
op|','
name|'expected'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_availability_zone_index
dedent|''
name|'def'
name|'test_availability_zone_index'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'resp_dict'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'index'
op|'('
name|'self'
op|'.'
name|'req'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertIn'
op|'('
string|"'availabilityZoneInfo'"
op|','
name|'resp_dict'
op|')'
newline|'\n'
name|'zones'
op|'='
name|'resp_dict'
op|'['
string|"'availabilityZoneInfo'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'zones'
op|')'
op|','
number|'2'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'zones'
op|'['
number|'0'
op|']'
op|'['
string|"'zoneName'"
op|']'
op|','
string|"u'zone-1'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'zones'
op|'['
number|'0'
op|']'
op|'['
string|"'zoneState'"
op|']'
op|'['
string|"'available'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'zones'
op|'['
number|'0'
op|']'
op|'['
string|"'hosts'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'zones'
op|'['
number|'1'
op|']'
op|'['
string|"'zoneName'"
op|']'
op|','
string|"u'zone-2'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'zones'
op|'['
number|'1'
op|']'
op|'['
string|"'zoneState'"
op|']'
op|'['
string|"'available'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'zones'
op|'['
number|'1'
op|']'
op|'['
string|"'hosts'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_availability_zone_detail
dedent|''
name|'def'
name|'test_availability_zone_detail'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'resp_dict'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'detail'
op|'('
name|'self'
op|'.'
name|'req'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertIn'
op|'('
string|"'availabilityZoneInfo'"
op|','
name|'resp_dict'
op|')'
newline|'\n'
name|'zones'
op|'='
name|'resp_dict'
op|'['
string|"'availabilityZoneInfo'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'zones'
op|')'
op|','
number|'3'
op|')'
newline|'\n'
name|'timestamp'
op|'='
name|'iso8601'
op|'.'
name|'parse_date'
op|'('
string|'"2012-12-26T14:45:25Z"'
op|')'
newline|'\n'
name|'nova_network_timestamp'
op|'='
name|'iso8601'
op|'.'
name|'parse_date'
op|'('
string|'"2012-12-26T14:45:24Z"'
op|')'
newline|'\n'
name|'expected'
op|'='
op|'['
op|'{'
string|"'zoneName'"
op|':'
string|"'zone-1'"
op|','
nl|'\n'
string|"'zoneState'"
op|':'
op|'{'
string|"'available'"
op|':'
name|'True'
op|'}'
op|','
nl|'\n'
string|"'hosts'"
op|':'
op|'{'
string|"'fake_host-1'"
op|':'
op|'{'
nl|'\n'
string|"'nova-compute'"
op|':'
op|'{'
string|"'active'"
op|':'
name|'True'
op|','
string|"'available'"
op|':'
name|'True'
op|','
nl|'\n'
string|"'updated_at'"
op|':'
name|'timestamp'
op|'}'
op|'}'
op|'}'
op|'}'
op|','
nl|'\n'
op|'{'
string|"'zoneName'"
op|':'
string|"'internal'"
op|','
nl|'\n'
string|"'zoneState'"
op|':'
op|'{'
string|"'available'"
op|':'
name|'True'
op|'}'
op|','
nl|'\n'
string|"'hosts'"
op|':'
op|'{'
string|"'fake_host-1'"
op|':'
op|'{'
nl|'\n'
string|"'nova-sched'"
op|':'
op|'{'
string|"'active'"
op|':'
name|'True'
op|','
string|"'available'"
op|':'
name|'True'
op|','
nl|'\n'
string|"'updated_at'"
op|':'
name|'timestamp'
op|'}'
op|'}'
op|','
nl|'\n'
string|"'fake_host-2'"
op|':'
op|'{'
nl|'\n'
string|"'nova-network'"
op|':'
op|'{'
nl|'\n'
string|"'active'"
op|':'
name|'True'
op|','
nl|'\n'
string|"'available'"
op|':'
name|'False'
op|','
nl|'\n'
string|"'updated_at'"
op|':'
name|'nova_network_timestamp'
op|'}'
op|'}'
op|'}'
op|'}'
op|','
nl|'\n'
op|'{'
string|"'zoneName'"
op|':'
string|"'zone-2'"
op|','
nl|'\n'
string|"'zoneState'"
op|':'
op|'{'
string|"'available'"
op|':'
name|'False'
op|'}'
op|','
nl|'\n'
string|"'hosts'"
op|':'
name|'None'
op|'}'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected'
op|','
name|'zones'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_availability_zone_detail_no_services
dedent|''
name|'def'
name|'test_availability_zone_detail_no_services'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'expected_response'
op|'='
op|'{'
string|"'availabilityZoneInfo'"
op|':'
nl|'\n'
op|'['
op|'{'
string|"'zoneState'"
op|':'
op|'{'
string|"'available'"
op|':'
name|'True'
op|'}'
op|','
nl|'\n'
string|"'hosts'"
op|':'
op|'{'
op|'}'
op|','
nl|'\n'
string|"'zoneName'"
op|':'
string|"'nova'"
op|'}'
op|']'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'availability_zones'
op|','
string|"'get_availability_zones'"
op|','
nl|'\n'
name|'fake_get_availability_zones'
op|')'
newline|'\n'
nl|'\n'
name|'resp_dict'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'detail'
op|'('
name|'self'
op|'.'
name|'req'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertThat'
op|'('
name|'resp_dict'
op|','
nl|'\n'
name|'matchers'
op|'.'
name|'DictMatches'
op|'('
name|'expected_response'
op|')'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ServersControllerCreateTestV21
dedent|''
dedent|''
name|'class'
name|'ServersControllerCreateTestV21'
op|'('
name|'test'
op|'.'
name|'TestCase'
op|')'
op|':'
newline|'\n'
DECL|variable|base_url
indent|' '
name|'base_url'
op|'='
string|"'/v2/fake/'"
newline|'\n'
nl|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Shared implementation for tests below that create instance."""'
newline|'\n'
name|'super'
op|'('
name|'ServersControllerCreateTestV21'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'instance_cache_num'
op|'='
number|'0'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'_set_up_controller'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|function|instance_create
name|'def'
name|'instance_create'
op|'('
name|'context'
op|','
name|'inst'
op|')'
op|':'
newline|'\n'
indent|' '
name|'inst_type'
op|'='
name|'flavors'
op|'.'
name|'get_flavor_by_flavor_id'
op|'('
number|'3'
op|')'
newline|'\n'
name|'image_uuid'
op|'='
string|"'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'"
newline|'\n'
name|'def_image_ref'
op|'='
string|"'http://localhost/images/%s'"
op|'%'
name|'image_uuid'
newline|'\n'
name|'self'
op|'.'
name|'instance_cache_num'
op|'+='
number|'1'
newline|'\n'
name|'instance'
op|'='
name|'fake_instance'
op|'.'
name|'fake_db_instance'
op|'('
op|'**'
op|'{'
nl|'\n'
string|"'id'"
op|':'
name|'self'
op|'.'
name|'instance_cache_num'
op|','
nl|'\n'
string|"'display_name'"
op|':'
name|'inst'
op|'['
string|"'display_name'"
op|']'
name|'or'
string|"'test'"
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'FAKE_UUID'
op|','
nl|'\n'
string|"'instance_type'"
op|':'
name|'inst_type'
op|','
nl|'\n'
string|"'access_ip_v4'"
op|':'
string|"'1.2.3.4'"
op|','
nl|'\n'
string|"'access_ip_v6'"
op|':'
string|"'fead::1234'"
op|','
nl|'\n'
string|"'image_ref'"
op|':'
name|'inst'
op|'.'
name|'get'
op|'('
string|"'image_ref'"
op|','
name|'def_image_ref'
op|')'
op|','
nl|'\n'
string|"'user_id'"
op|':'
string|"'fake'"
op|','
nl|'\n'
string|"'project_id'"
op|':'
string|"'fake'"
op|','
nl|'\n'
string|"'availability_zone'"
op|':'
string|"'nova'"
op|','
nl|'\n'
string|"'reservation_id'"
op|':'
name|'inst'
op|'['
string|"'reservation_id'"
op|']'
op|','
nl|'\n'
string|'"created_at"'
op|':'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2010'
op|','
number|'10'
op|','
number|'10'
op|','
number|'12'
op|','
number|'0'
op|','
number|'0'
op|')'
op|','
nl|'\n'
string|'"updated_at"'
op|':'
name|'datetime'
op|'.'
name|'datetime'
op|'('
number|'2010'
op|','
number|'11'
op|','
number|'11'
op|','
number|'11'
op|','
number|'0'
op|','
number|'0'
op|')'
op|','
nl|'\n'
string|'"progress"'
op|':'
number|'0'
op|','
nl|'\n'
string|'"fixed_ips"'
op|':'
op|'['
op|']'
op|','
nl|'\n'
string|'"task_state"'
op|':'
string|'""'
op|','
nl|'\n'
string|'"vm_state"'
op|':'
string|'""'
op|','
nl|'\n'
string|'"root_device_name"'
op|':'
name|'inst'
op|'.'
name|'get'
op|'('
string|"'root_device_name'"
op|','
string|"'vda'"
op|')'
op|','
nl|'\n'
op|'}'
op|')'
newline|'\n'
nl|'\n'
name|'return'
name|'instance'
newline|'\n'
nl|'\n'
dedent|''
name|'fake'
op|'.'
name|'stub_out_image_service'
op|'('
name|'self'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stub_out'
op|'('
string|"'nova.db.instance_create'"
op|','
name|'instance_create'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'req'
op|'='
name|'fakes'
op|'.'
name|'HTTPRequest'
op|'.'
name|'blank'
op|'('
string|"''"
op|')'
newline|'\n'
nl|'\n'
DECL|member|_set_up_controller
dedent|''
name|'def'
name|'_set_up_controller'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ext_info'
op|'='
name|'extension_info'
op|'.'
name|'LoadedExtensionInfo'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'controller'
op|'='
name|'servers_v21'
op|'.'
name|'ServersController'
op|'('
nl|'\n'
name|'extension_info'
op|'='
name|'ext_info'
op|')'
newline|'\n'
name|'CONF'
op|'.'
name|'set_override'
op|'('
string|"'extensions_blacklist'"
op|','
nl|'\n'
string|"'os-availability-zone'"
op|','
nl|'\n'
string|"'osapi_v21'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'no_availability_zone_controller'
op|'='
name|'servers_v21'
op|'.'
name|'ServersController'
op|'('
nl|'\n'
name|'extension_info'
op|'='
name|'ext_info'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_test_create_extra
dedent|''
name|'def'
name|'_test_create_extra'
op|'('
name|'self'
op|','
name|'params'
op|','
name|'controller'
op|')'
op|':'
newline|'\n'
indent|' '
name|'image_uuid'
op|'='
string|"'c905cedb-7281-47e4-8a62-f26bc5fc4c77'"
newline|'\n'
name|'server'
op|'='
name|'dict'
op|'('
name|'name'
op|'='
string|"'server_test'"
op|','
name|'imageRef'
op|'='
name|'image_uuid'
op|','
name|'flavorRef'
op|'='
number|'2'
op|')'
newline|'\n'
name|'server'
op|'.'
name|'update'
op|'('
name|'params'
op|')'
newline|'\n'
name|'body'
op|'='
name|'dict'
op|'('
name|'server'
op|'='
name|'server'
op|')'
newline|'\n'
name|'server'
op|'='
name|'controller'
op|'.'
name|'create'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'body'
op|'='
name|'body'
op|')'
op|'.'
name|'obj'
op|'['
string|"'server'"
op|']'
newline|'\n'
nl|'\n'
DECL|member|test_create_instance_with_availability_zone_disabled
dedent|''
name|'def'
name|'test_create_instance_with_availability_zone_disabled'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'params'
op|'='
op|'{'
string|"'availability_zone'"
op|':'
string|"'foo'"
op|'}'
newline|'\n'
name|'old_create'
op|'='
name|'compute_api'
op|'.'
name|'API'
op|'.'
name|'create'
newline|'\n'
nl|'\n'
DECL|function|create
name|'def'
name|'create'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'kwargs'
op|'['
string|"'availability_zone'"
op|']'
op|')'
newline|'\n'
name|'return'
name|'old_create'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'create'"
op|','
name|'create'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_test_create_extra'
op|'('
name|'params'
op|','
name|'self'
op|'.'
name|'no_availability_zone_controller'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_create_instance_with_availability_zone
dedent|''
name|'def'
name|'_create_instance_with_availability_zone'
op|'('
name|'self'
op|','
name|'zone_name'
op|')'
op|':'
newline|'\n'
DECL|function|create
indent|' '
name|'def'
name|'create'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertIn'
op|'('
string|"'availability_zone'"
op|','
name|'kwargs'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|"'nova'"
op|','
name|'kwargs'
op|'['
string|"'availability_zone'"
op|']'
op|')'
newline|'\n'
name|'return'
name|'old_create'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'old_create'
op|'='
name|'compute_api'
op|'.'
name|'API'
op|'.'
name|'create'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'compute_api'
op|'.'
name|'API'
op|','
string|"'create'"
op|','
name|'create'
op|')'
newline|'\n'
name|'image_href'
op|'='
string|"'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'"
newline|'\n'
name|'flavor_ref'
op|'='
op|'('
string|"'http://localhost'"
op|'+'
name|'self'
op|'.'
name|'base_url'
op|'+'
string|"'flavors/3'"
op|')'
newline|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'server'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
string|"'server_test'"
op|','
nl|'\n'
string|"'imageRef'"
op|':'
name|'image_href'
op|','
nl|'\n'
string|"'flavorRef'"
op|':'
name|'flavor_ref'
op|','
nl|'\n'
string|"'metadata'"
op|':'
op|'{'
nl|'\n'
string|"'hello'"
op|':'
string|"'world'"
op|','
nl|'\n'
string|"'open'"
op|':'
string|"'stack'"
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
string|"'availability_zone'"
op|':'
name|'zone_name'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'admin_context'
op|'='
name|'context'
op|'.'
name|'get_admin_context'
op|'('
op|')'
newline|'\n'
name|'db'
op|'.'
name|'service_create'
op|'('
name|'admin_context'
op|','
op|'{'
string|"'host'"
op|':'
string|"'host1_zones'"
op|','
nl|'\n'
string|"'binary'"
op|':'
string|'"nova-compute"'
op|','
nl|'\n'
string|"'topic'"
op|':'
string|"'compute'"
op|','
nl|'\n'
string|"'report_count'"
op|':'
number|'0'
op|'}'
op|')'
newline|'\n'
name|'agg'
op|'='
name|'db'
op|'.'
name|'aggregate_create'
op|'('
name|'admin_context'
op|','
nl|'\n'
op|'{'
string|"'name'"
op|':'
string|"'agg1'"
op|'}'
op|','
op|'{'
string|"'availability_zone'"
op|':'
string|"'nova'"
op|'}'
op|')'
newline|'\n'
name|'db'
op|'.'
name|'aggregate_host_add'
op|'('
name|'admin_context'
op|','
name|'agg'
op|'['
string|"'id'"
op|']'
op|','
string|"'host1_zones'"
op|')'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'req'
op|','
name|'body'
newline|'\n'
nl|'\n'
DECL|member|test_create_instance_with_availability_zone
dedent|''
name|'def'
name|'test_create_instance_with_availability_zone'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'zone_name'
op|'='
string|"'nova'"
newline|'\n'
name|'req'
op|','
name|'body'
op|'='
name|'self'
op|'.'
name|'_create_instance_with_availability_zone'
op|'('
name|'zone_name'
op|')'
newline|'\n'
name|'res'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'create'
op|'('
name|'req'
op|','
name|'body'
op|'='
name|'body'
op|')'
op|'.'
name|'obj'
newline|'\n'
name|'server'
op|'='
name|'res'
op|'['
string|"'server'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'fakes'
op|'.'
name|'FAKE_UUID'
op|','
name|'server'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_instance_with_invalid_availability_zone_too_long
dedent|''
name|'def'
name|'test_create_instance_with_invalid_availability_zone_too_long'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'zone_name'
op|'='
string|"'a'"
op|'*'
number|'256'
newline|'\n'
name|'req'
op|','
name|'body'
op|'='
name|'self'
op|'.'
name|'_create_instance_with_availability_zone'
op|'('
name|'zone_name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'ValidationError'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'create'
op|','
name|'req'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_instance_with_invalid_availability_zone_too_short
dedent|''
name|'def'
name|'test_create_instance_with_invalid_availability_zone_too_short'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'zone_name'
op|'='
string|"''"
newline|'\n'
name|'req'
op|','
name|'body'
op|'='
name|'self'
op|'.'
name|'_create_instance_with_availability_zone'
op|'('
name|'zone_name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'ValidationError'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'create'
op|','
name|'req'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_instance_with_invalid_availability_zone_not_str
dedent|''
name|'def'
name|'test_create_instance_with_invalid_availability_zone_not_str'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'zone_name'
op|'='
number|'111'
newline|'\n'
name|'req'
op|','
name|'body'
op|'='
name|'self'
op|'.'
name|'_create_instance_with_availability_zone'
op|'('
name|'zone_name'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'ValidationError'
op|','
nl|'\n'
name|'self'
op|'.'
name|'controller'
op|'.'
name|'create'
op|','
name|'req'
op|','
name|'body'
op|'='
name|'body'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_create_instance_without_availability_zone
dedent|''
name|'def'
name|'test_create_instance_without_availability_zone'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'image_href'
op|'='
string|"'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'"
newline|'\n'
name|'flavor_ref'
op|'='
op|'('
string|"'http://localhost'"
op|'+'
name|'self'
op|'.'
name|'base_url'
op|'+'
string|"'flavors/3'"
op|')'
newline|'\n'
name|'body'
op|'='
op|'{'
nl|'\n'
string|"'server'"
op|':'
op|'{'
nl|'\n'
string|"'name'"
op|':'
string|"'server_test'"
op|','
nl|'\n'
string|"'imageRef'"
op|':'
name|'image_href'
op|','
nl|'\n'
string|"'flavorRef'"
op|':'
name|'flavor_ref'
op|','
nl|'\n'
string|"'metadata'"
op|':'
op|'{'
nl|'\n'
string|"'hello'"
op|':'
string|"'world'"
op|','
nl|'\n'
string|"'open'"
op|':'
string|"'stack'"
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
name|'res'
op|'='
name|'self'
op|'.'
name|'controller'
op|'.'
name|'create'
op|'('
name|'self'
op|'.'
name|'req'
op|','
name|'body'
op|'='
name|'body'
op|')'
op|'.'
name|'obj'
newline|'\n'
name|'server'
op|'='
name|'res'
op|'['
string|"'server'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'fakes'
op|'.'
name|'FAKE_UUID'
op|','
name|'server'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 12.234114
| 88
| 0.60067
| 4,292
| 29,264
| 3.997204
| 0.068733
| 0.135346
| 0.064118
| 0.053859
| 0.843029
| 0.769002
| 0.715901
| 0.669562
| 0.63523
| 0.579739
| 0
| 0.014766
| 0.09281
| 29,264
| 2,391
| 89
| 12.23923
| 0.63146
| 0
| 0
| 0.94312
| 0
| 0
| 0.353369
| 0.053137
| 0
| 0
| 0
| 0
| 0.009201
| 0
| null | null | 0
| 0.007946
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4a08252e340e9b4a8e337bc06b957232884b243f
| 225
|
py
|
Python
|
analytics/admin.py
|
ayushkalani/delightchat
|
7b60ca16ccd1cf4005fc2833fa03256f11cd71c7
|
[
"MIT"
] | null | null | null |
analytics/admin.py
|
ayushkalani/delightchat
|
7b60ca16ccd1cf4005fc2833fa03256f11cd71c7
|
[
"MIT"
] | null | null | null |
analytics/admin.py
|
ayushkalani/delightchat
|
7b60ca16ccd1cf4005fc2833fa03256f11cd71c7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from analytics.models import Accounts, Users, Conversations, Channels
admin.site.register(Accounts)
admin.site.register(Users)
admin.site.register(Conversations)
admin.site.register(Channels)
| 32.142857
| 69
| 0.84
| 29
| 225
| 6.517241
| 0.448276
| 0.190476
| 0.359788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 225
| 7
| 70
| 32.142857
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4a26f0816b209c1d559188e8cdc9467006e51228
| 118
|
py
|
Python
|
week1/yaml_test1.py
|
nlinkov/test_a
|
091dc66134f1e77ede35f58a774593862a44ded2
|
[
"MIT"
] | null | null | null |
week1/yaml_test1.py
|
nlinkov/test_a
|
091dc66134f1e77ede35f58a774593862a44ded2
|
[
"MIT"
] | null | null | null |
week1/yaml_test1.py
|
nlinkov/test_a
|
091dc66134f1e77ede35f58a774593862a44ded2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from pprint import pprint as pp
from yaml_helper import read_yaml
pp(read_yaml('test_file3.yml'))
| 16.857143
| 33
| 0.779661
| 21
| 118
| 4.190476
| 0.666667
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009615
| 0.118644
| 118
| 6
| 34
| 19.666667
| 0.836538
| 0.135593
| 0
| 0
| 0
| 0
| 0.138614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c5932205db4b9aa0bdd13f3a4902ab9c3b0791cf
| 63
|
py
|
Python
|
babble/text/__init__.py
|
rit-git/vanilla-snorkel
|
401bff1418b7bbf5d1c38312800b5ccf43b21297
|
[
"Apache-2.0"
] | 130
|
2018-05-11T15:49:02.000Z
|
2022-03-27T15:28:27.000Z
|
babble/text/__init__.py
|
we1l1n/babble
|
3991ceeadb6bb9ee75a90d7290f8acd0132472db
|
[
"Apache-2.0"
] | 9
|
2020-03-24T18:21:57.000Z
|
2022-02-10T00:28:33.000Z
|
babble/text/__init__.py
|
we1l1n/babble
|
3991ceeadb6bb9ee75a90d7290f8acd0132472db
|
[
"Apache-2.0"
] | 17
|
2018-07-12T09:05:50.000Z
|
2021-11-15T17:09:35.000Z
|
from .text_base import text_grammar
from .text_helpers import *
| 31.5
| 35
| 0.84127
| 10
| 63
| 5
| 0.6
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 63
| 2
| 36
| 31.5
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c5af5d62bd0b3d258d486426320a947f7ae28a0c
| 119
|
py
|
Python
|
tnalgo/__main__.py
|
manimanis/tnalgo
|
2340387ecfd9f372aed912f3839db6c68a4512c4
|
[
"MIT"
] | null | null | null |
tnalgo/__main__.py
|
manimanis/tnalgo
|
2340387ecfd9f372aed912f3839db6c68a4512c4
|
[
"MIT"
] | null | null | null |
tnalgo/__main__.py
|
manimanis/tnalgo
|
2340387ecfd9f372aed912f3839db6c68a4512c4
|
[
"MIT"
] | null | null | null |
def aléa(a, b):
return (a + b) // 2
if __name__ == '__main__':
print('Import this module in order to use it!')
| 23.8
| 51
| 0.596639
| 20
| 119
| 3.15
| 0.9
| 0.063492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.243697
| 119
| 5
| 51
| 23.8
| 0.688889
| 0
| 0
| 0
| 0
| 0
| 0.383333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
c5ba36f4eccbc06e1306bfc970b5d16162ba7e54
| 14,202
|
py
|
Python
|
Generator/CatanMap.py
|
Japjappedulap/Catan
|
40edb38eceab8c45b6889de460f4d529c38215c4
|
[
"MIT"
] | 7
|
2018-04-15T23:56:19.000Z
|
2021-05-14T15:30:51.000Z
|
Generator/CatanMap.py
|
Japjappedulap/Catan
|
40edb38eceab8c45b6889de460f4d529c38215c4
|
[
"MIT"
] | null | null | null |
Generator/CatanMap.py
|
Japjappedulap/Catan
|
40edb38eceab8c45b6889de460f4d529c38215c4
|
[
"MIT"
] | 4
|
2019-03-15T03:00:07.000Z
|
2020-10-02T22:00:05.000Z
|
import numpy
class CatanMap:
# GLOBAL variables
pair_coefficient = [4, 7]
trip_coefficient = [8, 10]
pair_peak = pair_coefficient[0] + pair_coefficient[1] / 2
trip_peak = trip_coefficient[0] + trip_coefficient[1] / 2
initial_coefficient = 100
def __init__(self):
# print("creating new map")
self.tile = []
self.tile_dice = []
self.neighbor_recurrence = {}
self.resource_distribution = None
self.tile_pairs = []
self.tile_to_name = {0: "none", 1: "LUMB", 2: "WOOL", 3: "GRAI", 4: "OREE", 5: "CLAY", 6: "DESE"}
self.name_to_tile = {"LUMB": 1, "WOOL": 2, "GRAI": 3, "OREE": 4, "CLAY": 5, "DESE": 6}
self.dice_probability = {2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 5, 9: 4, 10: 3, 11: 2, 12: 1}
# self.statistics = Statistics()
# def __del__(self):
# self.statistics.close_statistics()
# self.statistics.generate_graph()
def export(self):
"""
JSON schema
{
type: *classic, extended*
tile: [elements]
if classic, list has 19 elements
if extended,list has 30 elements
}
Element schema
{
index: *0-19, 0-30*
resource_type: *LUMB, WOOL, GRAI, OREE, CLAY, DESE*
dice: *0, 2-12*
}
:return: JSON representing the map
"""
pass
def fix_tile(self, index, resource, dice):
self.tile[index] = resource
self.tile_dice[index] = dice
self.resource_distribution.remove_resource(resource, dice)
def get_resource_distribution(self):
return self.resource_distribution
def completed(self):
if not self.no_identic_neighbours():
return False
for i in self.tile:
if i == 0:
return False
return True
def no_identic_neighbours(self):
for i in self.neighbor_recurrence:
for j in self.neighbor_recurrence[i]:
if self.tile[j] == self.tile[i]:
return False
return True
def are_neighbor(self, tile1, tile2):
for i in self.neighbor_recurrence[tile1]:
if i == tile2:
return True
for i in self.neighbor_recurrence[tile2]:
if i == tile1:
return True
return False
def generate_dices(self):
self.resource_distribution.generate_balanced()
def generate_next_tile_possibilities_single(self):
try:
candidates = []
prob = []
for i in self.resource_distribution.configuration:
for j in self.resource_distribution.configuration[i]:
candidates.append((i, j))
coefficient = len(self.resource_distribution.configuration[i])
coefficient += self.dice_probability[j]
prob.append(coefficient)
indexes = []
total = sum(prob)
for i in range(len(prob)):
prob[i] = prob[i] / total
indexes.append(i)
result = numpy.random.choice(indexes, 1, p=prob)
return candidates[numpy.asscalar(result[0])]
except Exception:
raise Exception
def generate_next_tile_possibilities_pair(self, index):
try:
tile_resource = self.tile[index] # already set
tile_dice = self.tile_dice[index] # already set
candidates = []
prob = []
for i in self.resource_distribution.configuration:
for j in self.resource_distribution.configuration[i]:
if self.pair_coefficient[0] <= self.dice_probability[tile_dice] + self.dice_probability[j] <= \
self.pair_coefficient[1]:
coefficient = self.initial_coefficient
candidates.append((i, j))
# coefficient grows if there are more resources available in the pool
coefficient += len(self.resource_distribution.configuration[i])
# coefficient grows if bigger dice
coefficient += self.dice_probability[j]
# coefficient grows when getting closer to peak
coefficient = coefficient - 2 * abs(self.pair_peak - (self.dice_probability[tile_dice] +
self.dice_probability[j])) ** 2
# coefficient shrinks if resources are identical
if i == tile_resource:
coefficient /= 4
else:
coefficient *= 4
prob.append(coefficient)
# records coefficient for statistics
# self.statistics.log_coefficient(coefficient)
indexes = []
total = sum(prob)
for i in range(len(prob)):
prob[i] = prob[i] / total
indexes.append(i)
result = numpy.random.choice(indexes, 1, p=prob)
return candidates[numpy.asscalar(result[0])]
except Exception as e:
print(e)
# if no tile and dice available, throw exception in order to stop
raise Exception
def generate_next_tile_possibilities_closed_triple(self, index1, index2):
try:
tile1_resource = self.tile[index1] # already set
tile1_dice = self.tile_dice[index1] # already set
tile2_resource = self.tile[index2] # already set
tile2_dice = self.tile_dice[index2] # already set
candidates = []
prob = []
for i in self.resource_distribution.configuration:
for j in self.resource_distribution.configuration[i]:
if self.trip_coefficient[0] <= self.dice_probability[tile1_dice] + self.dice_probability[j] + \
self.dice_probability[tile2_dice] <= self.trip_coefficient[1]:
coefficient = self.initial_coefficient
candidates.append((i, j))
# coefficient grows if there are more resources available in the pool
coefficient += len(self.resource_distribution.configuration[i])
# coefficient grows if bigger dice
coefficient += self.dice_probability[j]
# coefficient grows when getting closer to peak
coefficient -= 2 * abs(self.trip_peak - (self.dice_probability[tile1_dice] +
self.dice_probability[tile2_dice] +
self.dice_probability[j])) ** 2
# coefficient shrinks if resources are identical
if i == tile1_resource:
coefficient /= 4
elif i != tile1_resource:
coefficient *= 4
if i == tile2_resource:
coefficient /= 4
elif i != tile2_resource:
coefficient *= 4
prob.append(coefficient)
# records coefficient for statistics
# self.statistics.log_coefficient(coefficient)
indexes = []
total = sum(prob)
for i in range(len(prob)):
prob[i] = prob[i] / total
indexes.append(i)
result = numpy.random.choice(indexes, 1, p=prob)
return candidates[numpy.asscalar(result[0])]
except Exception:
raise Exception
def generate_next_tile_possibilities_scattered_triple(self, index1, index2):
try:
tile1_resource = self.tile[index1] # already set
tile1_dice = self.tile_dice[index1] # already set
tile2_resource = self.tile[index2] # already set
tile2_dice = self.tile_dice[index2] # already set
candidates = []
prob = []
for i in self.resource_distribution.configuration:
for j in self.resource_distribution.configuration[i]:
if self.pair_coefficient[0] <= self.dice_probability[tile1_dice] + self.dice_probability[j] <= \
self.pair_coefficient[1] and \
self.pair_coefficient[0] <= self.dice_probability[tile2_dice] + self.dice_probability[j] \
<= self.pair_coefficient[1]:
coefficient = self.initial_coefficient
candidates.append((i, j))
# coefficient grows if there are more resources available in the pool
coefficient += len(self.resource_distribution.configuration[i])
# coefficient grows if bigger dice
coefficient += self.dice_probability[j]
# coefficient grows when getting closer to peak
coefficient -= 2 * (abs(self.pair_peak - (self.dice_probability[tile1_dice] +
self.dice_probability[j])) ** 2 +
abs(self.pair_peak - (self.dice_probability[tile2_dice] +
self.dice_probability[j])) ** 2)
# coefficient shrinks if resources are identical
if i == tile1_resource:
coefficient /= 4
elif i != tile1_resource:
coefficient *= 4
if i == tile2_resource:
coefficient /= 4
elif i != tile2_resource:
coefficient *= 4
prob.append(coefficient)
# records coefficient for statistics
# self.statistics.log_coefficient(coefficient)
indexes = []
total = sum(prob)
for i in range(len(prob)):
prob[i] = prob[i] / total
indexes.append(i)
result = numpy.random.choice(indexes, 1, p=prob)
return candidates[numpy.asscalar(result[0])]
except Exception:
raise Exception
def generate_next_tile_possibilities_quad(self, index1, index2, index3):
# index1 must be common for both triples (smallest of the indexes)
try:
tile1_resource = self.tile[index1] # already set
tile1_dice = self.tile_dice[index1] # already set
tile2_resource = self.tile[index2] # already set
tile2_dice = self.tile_dice[index2] # already se
tile3_resource = self.tile[index3] # already set
tile3_dice = self.tile_dice[index3] # already set
candidates = []
prob = []
for i in self.resource_distribution.configuration:
for j in self.resource_distribution.configuration[i]:
if self.trip_coefficient[0] <= self.dice_probability[tile1_dice] + \
self.dice_probability[tile2_dice] + \
self.dice_probability[j] <= self.trip_coefficient[1] \
and self.trip_coefficient[0] <= \
self.dice_probability[tile1_dice] + \
self.dice_probability[tile3_dice] + \
self.dice_probability[j] <= self.trip_coefficient[1]:
coefficient = self.initial_coefficient
candidates.append((i, j))
# coefficient grows if there are more resources available in the pool
coefficient += len(self.resource_distribution.configuration[i])
# coefficient grows if bigger dice
coefficient += self.dice_probability[j]
# coefficient grows when getting closer to peak
coefficient -= 2 * (abs(self.trip_peak - (self.dice_probability[tile1_dice] +
self.dice_probability[tile2_dice] +
self.dice_probability[j])) ** 2 +
abs(self.trip_peak - (self.dice_probability[tile1_dice] +
self.dice_probability[tile3_dice] +
self.dice_probability[j])) ** 2)
# coefficient shrinks if resources are identical
if i == tile1_resource:
coefficient /= 4
elif i != tile1_resource:
coefficient *= 4
if i == tile2_resource:
coefficient /= 4
elif i != tile2_resource:
coefficient *= 4
if i == tile3_resource:
coefficient /= 4
elif i != tile3_resource:
coefficient *= 4
prob.append(coefficient)
# records coefficient for statistics
# self.statistics.log_coefficient(coefficient)
indexes = []
total = sum(prob)
for i in range(len(prob)):
prob[i] = prob[i] / total
indexes.append(i)
result = numpy.random.choice(indexes, 1, p=prob)
return candidates[numpy.asscalar(result[0])]
except Exception:
raise Exception
| 44.38125
| 118
| 0.502253
| 1,351
| 14,202
| 5.129534
| 0.113249
| 0.041558
| 0.098701
| 0.049062
| 0.776912
| 0.754401
| 0.738817
| 0.729293
| 0.717893
| 0.697403
| 0
| 0.022647
| 0.418603
| 14,202
| 319
| 119
| 44.520376
| 0.81664
| 0.132446
| 0
| 0.657778
| 0
| 0
| 0.004281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057778
| false
| 0.004444
| 0.004444
| 0.004444
| 0.151111
| 0.004444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c5dd7c3fc4d6b6a3f52df9680bd2e4c470a367b8
| 2,481
|
py
|
Python
|
stubs.min/System/Windows/__init___parts/FontStyleConverter.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs.min/System/Windows/__init___parts/FontStyleConverter.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/System/Windows/__init___parts/FontStyleConverter.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class FontStyleConverter(TypeConverter):
"""
Converts instances of System.Windows.FontStyle to and from other data types.
FontStyleConverter()
"""
def CanConvertFrom(self,*__args):
"""
CanConvertFrom(self: FontStyleConverter,td: ITypeDescriptorContext,t: Type) -> bool
Returns a value that indicates whether this converter can convert an object of
the given type to an instance of System.Windows.FontStyle.
td: Describes the context information of a type.
t: The type of the source that is being evaluated for conversion.
Returns: true if the converter can convert the provided type to an instance of
System.Windows.FontStyle; otherwise,false.
"""
pass
def CanConvertTo(self,*__args):
"""
CanConvertTo(self: FontStyleConverter,context: ITypeDescriptorContext,destinationType: Type) -> bool
Determines whether an instance of System.Windows.FontStyle can be converted to
a different type.
context: Context information of a type.
destinationType: The desired type that that this instance of System.Windows.FontStyle is being
evaluated for conversion to.
Returns: true if the converter can convert this instance of System.Windows.FontStyle;
otherwise,false.
"""
pass
def ConvertFrom(self,*__args):
"""
ConvertFrom(self: FontStyleConverter,td: ITypeDescriptorContext,ci: CultureInfo,value: object) -> object
Attempts to convert a specified object to an instance of
System.Windows.FontStyle.
td: Context information of a type.
ci: System.Globalization.CultureInfo of the type being converted.
value: The object being converted.
Returns: The instance of System.Windows.FontStyle created from the converted value.
"""
pass
def ConvertTo(self,*__args):
"""
ConvertTo(self: FontStyleConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object,destinationType: Type) -> object
Attempts to convert an instance of System.Windows.FontStyle to a specified type.
context: Context information of a type.
culture: System.Globalization.CultureInfo of the type being converted.
value: The instance of System.Windows.FontStyle to convert.
destinationType: The type this instance of System.Windows.FontStyle is converted to.
Returns: The object created from the converted instance of System.Windows.FontStyle.
"""
pass
| 38.765625
| 138
| 0.723096
| 296
| 2,481
| 6.033784
| 0.236486
| 0.053751
| 0.100784
| 0.161254
| 0.470885
| 0.379059
| 0.298992
| 0.176932
| 0.12766
| 0.068309
| 0
| 0
| 0.210802
| 2,481
| 63
| 139
| 39.380952
| 0.912155
| 0.836356
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0.444444
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
c5e119efca3464c784f9c359664184645fe9ae47
| 205
|
py
|
Python
|
Codewars/Find_the_median_7_kyu.py
|
maxcohen31/A-bored-math-student
|
007beb4dabf7b4406f48e9a3a967c29d032eab89
|
[
"MIT"
] | null | null | null |
Codewars/Find_the_median_7_kyu.py
|
maxcohen31/A-bored-math-student
|
007beb4dabf7b4406f48e9a3a967c29d032eab89
|
[
"MIT"
] | null | null | null |
Codewars/Find_the_median_7_kyu.py
|
maxcohen31/A-bored-math-student
|
007beb4dabf7b4406f48e9a3a967c29d032eab89
|
[
"MIT"
] | null | null | null |
def median(array):
array = sorted(array)
if len(array) % 2 == 0:
return (array[(len(array) - 1) // 2] + array[len(array) // 2]) / 2
else:
return array[len(array) // 2]
| 25.625
| 74
| 0.497561
| 28
| 205
| 3.642857
| 0.392857
| 0.313725
| 0.264706
| 0.372549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.317073
| 205
| 7
| 75
| 29.285714
| 0.678571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c5fa1f4ff54e510dcf48e6329ce91ebc9e7b6a1c
| 124
|
py
|
Python
|
marcotti/etl/ejson/default.py
|
soccermetrics/marcotti
|
eda2f19bd6cbc6f9c7482e8fe31b2233b33aacfd
|
[
"MIT"
] | 30
|
2015-11-23T07:51:54.000Z
|
2020-06-29T16:11:55.000Z
|
marcotti/etl/ejson/default.py
|
soccermetrics/marcotti
|
eda2f19bd6cbc6f9c7482e8fe31b2233b33aacfd
|
[
"MIT"
] | 1
|
2016-06-26T18:44:47.000Z
|
2016-06-29T03:02:40.000Z
|
marcotti/etl/ejson/default.py
|
soccermetrics/marcotti
|
eda2f19bd6cbc6f9c7482e8fe31b2233b33aacfd
|
[
"MIT"
] | 8
|
2016-01-13T12:23:16.000Z
|
2021-10-11T07:39:33.000Z
|
from .base import BaseJSON, extract
class JSONExtractor(BaseJSON):
pass
class JSONStatExtractor(BaseJSON):
pass
| 12.4
| 35
| 0.75
| 13
| 124
| 7.153846
| 0.692308
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185484
| 124
| 9
| 36
| 13.777778
| 0.920792
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
680f529f2a5ddf73f6932238385137f6fa9100a5
| 4,325
|
py
|
Python
|
src/antlr4parser/RuleCheckTreeLexer.py
|
Zhou-Yucheng/auto-rule-transform
|
1a0760a6b6a5c4e911ca527d45bf43c4b8331830
|
[
"MIT"
] | 7
|
2020-10-09T06:28:18.000Z
|
2022-01-14T16:48:30.000Z
|
src/antlr4parser/RuleCheckTreeLexer.py
|
SkydustZ/auto-rule-transform
|
37b1d800d2de53e3c588d25be08304cca938f276
|
[
"MIT"
] | null | null | null |
src/antlr4parser/RuleCheckTreeLexer.py
|
SkydustZ/auto-rule-transform
|
37b1d800d2de53e3c588d25be08304cca938f276
|
[
"MIT"
] | 4
|
2021-04-18T07:48:56.000Z
|
2022-03-10T07:43:50.000Z
|
# Generated from .\RuleCheckTree.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\13")
buf.write("\177\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write("\7\4\b\t\b\4\t\t\t\4\n\t\n\3\2\3\2\7\2\30\n\2\f\2\16\2")
buf.write("\33\13\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\7\3&\n\3")
buf.write("\f\3\16\3)\13\3\3\3\3\3\3\3\3\3\3\3\3\3\3\4\3\4\7\4\63")
buf.write("\n\4\f\4\16\4\66\13\4\3\4\3\4\5\4:\n\4\3\4\3\4\3\4\3\4")
buf.write("\3\4\3\4\3\5\3\5\7\5D\n\5\f\5\16\5G\13\5\3\5\3\5\5\5K")
buf.write("\n\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6\7\6V\n\6\f\6")
buf.write("\16\6Y\13\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\7\3\7\7")
buf.write("\7e\n\7\f\7\16\7h\13\7\3\7\3\7\3\7\3\7\3\7\3\7\3\b\6\b")
buf.write("q\n\b\r\b\16\br\3\b\3\b\3\t\3\t\3\n\5\nz\n\n\3\n\3\n\3")
buf.write("\n\3\n\t\31\'\64EWfr\2\13\3\3\5\4\7\5\t\6\13\7\r\b\17")
buf.write("\t\21\n\23\13\3\2\3\5\2\f\f\17\17]]\2\u0088\2\3\3\2\2")
buf.write("\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2")
buf.write("\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\3\25")
buf.write("\3\2\2\2\5#\3\2\2\2\7\60\3\2\2\2\tA\3\2\2\2\13S\3\2\2")
buf.write("\2\rb\3\2\2\2\17p\3\2\2\2\21v\3\2\2\2\23y\3\2\2\2\25\31")
buf.write("\7]\2\2\26\30\5\21\t\2\27\26\3\2\2\2\30\33\3\2\2\2\31")
buf.write("\32\3\2\2\2\31\27\3\2\2\2\32\34\3\2\2\2\33\31\3\2\2\2")
buf.write("\34\35\7\61\2\2\35\36\7r\2\2\36\37\7t\2\2\37 \7q\2\2 ")
buf.write("!\7r\2\2!\"\7_\2\2\"\4\3\2\2\2#\'\7]\2\2$&\5\21\t\2%$")
buf.write("\3\2\2\2&)\3\2\2\2\'(\3\2\2\2\'%\3\2\2\2(*\3\2\2\2)\'")
buf.write("\3\2\2\2*+\7\61\2\2+,\7e\2\2,-\7o\2\2-.\7r\2\2./\7_\2")
buf.write("\2/\6\3\2\2\2\60\64\7]\2\2\61\63\5\21\t\2\62\61\3\2\2")
buf.write("\2\63\66\3\2\2\2\64\65\3\2\2\2\64\62\3\2\2\2\65\67\3\2")
buf.write("\2\2\66\64\3\2\2\2\679\7\61\2\28:\7C\2\298\3\2\2\29:\3")
buf.write("\2\2\2:;\3\2\2\2;<\7T\2\2<=\7q\2\2=>\7d\2\2>?\7l\2\2?")
buf.write("@\7_\2\2@\b\3\2\2\2AE\7]\2\2BD\5\21\t\2CB\3\2\2\2DG\3")
buf.write("\2\2\2EF\3\2\2\2EC\3\2\2\2FH\3\2\2\2GE\3\2\2\2HJ\7\61")
buf.write("\2\2IK\7C\2\2JI\3\2\2\2JK\3\2\2\2KL\3\2\2\2LM\7T\2\2M")
buf.write("N\7r\2\2NO\7t\2\2OP\7q\2\2PQ\7r\2\2QR\7_\2\2R\n\3\2\2")
buf.write("\2SW\7]\2\2TV\5\21\t\2UT\3\2\2\2VY\3\2\2\2WX\3\2\2\2W")
buf.write("U\3\2\2\2XZ\3\2\2\2YW\3\2\2\2Z[\7\61\2\2[\\\7q\2\2\\]")
buf.write("\7d\2\2]^\7l\2\2^_\7_\2\2_`\3\2\2\2`a\b\6\2\2a\f\3\2\2")
buf.write("\2bf\7]\2\2ce\5\21\t\2dc\3\2\2\2eh\3\2\2\2fg\3\2\2\2f")
buf.write("d\3\2\2\2gi\3\2\2\2hf\3\2\2\2ij\7\61\2\2jk\7Q\2\2kl\7")
buf.write("_\2\2lm\3\2\2\2mn\b\7\2\2n\16\3\2\2\2oq\5\21\t\2po\3\2")
buf.write("\2\2qr\3\2\2\2rs\3\2\2\2rp\3\2\2\2st\3\2\2\2tu\b\b\2\2")
buf.write("u\20\3\2\2\2vw\n\2\2\2w\22\3\2\2\2xz\7\17\2\2yx\3\2\2")
buf.write("\2yz\3\2\2\2z{\3\2\2\2{|\7\f\2\2|}\3\2\2\2}~\b\n\2\2~")
buf.write("\24\3\2\2\2\r\2\31\'\649EJWfry\3\b\2\2")
return buf.getvalue()
class RuleCheckTreeLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
PROP = 1
CMP = 2
ROBJ = 3
RPROP = 4
OBJ = 5
OTHER = 6
OTHERS = 7
CHAR = 8
NEWLINE = 9
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
]
symbolicNames = [ "<INVALID>",
"PROP", "CMP", "ROBJ", "RPROP", "OBJ", "OTHER", "OTHERS", "CHAR",
"NEWLINE" ]
ruleNames = [ "PROP", "CMP", "ROBJ", "RPROP", "OBJ", "OTHER", "OTHERS",
"CHAR", "NEWLINE" ]
grammarFileName = "RuleCheckTree.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 45.526316
| 103
| 0.526705
| 1,111
| 4,325
| 2.030603
| 0.181818
| 0.137411
| 0.101064
| 0.069149
| 0.256649
| 0.140071
| 0.102394
| 0.079787
| 0.079787
| 0.04344
| 0
| 0.260953
| 0.160925
| 4,325
| 94
| 104
| 46.010638
| 0.360705
| 0.010636
| 0
| 0
| 1
| 0.539474
| 0.543158
| 0.506667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.052632
| 0
| 0.328947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a87229944543498fc91d732ff1fd00136b519567
| 4,796
|
py
|
Python
|
py3canvas/tests/modules.py
|
tylerclair/py3canvas
|
7485d458606b65200f0ffa5bbe597a9d0bee189f
|
[
"MIT"
] | null | null | null |
py3canvas/tests/modules.py
|
tylerclair/py3canvas
|
7485d458606b65200f0ffa5bbe597a9d0bee189f
|
[
"MIT"
] | null | null | null |
py3canvas/tests/modules.py
|
tylerclair/py3canvas
|
7485d458606b65200f0ffa5bbe597a9d0bee189f
|
[
"MIT"
] | null | null | null |
"""Modules API Tests for Version 1.0.
This is a testing template for the generated ModulesAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.modules import ModulesAPI
from py3canvas.apis.modules import Module
from py3canvas.apis.modules import Completionrequirement
from py3canvas.apis.modules import Contentdetails
from py3canvas.apis.modules import Moduleitem
from py3canvas.apis.modules import Moduleitemsequencenode
from py3canvas.apis.modules import Moduleitemsequence
class TestModulesAPI(unittest.TestCase):
"""Tests for the ModulesAPI."""
def setUp(self):
self.client = ModulesAPI(secrets.instance_address, secrets.access_token)
def test_list_modules(self):
"""Integration test for the ModulesAPI.list_modules method."""
course_id = None # Change me!!
r = self.client.list_modules(
course_id, include=None, search_term=None, student_id=None
)
def test_show_module(self):
"""Integration test for the ModulesAPI.show_module method."""
course_id = None # Change me!!
id = None # Change me!!
r = self.client.show_module(course_id, id, include=None, student_id=None)
def test_create_module(self):
"""Integration test for the ModulesAPI.create_module method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_update_module(self):
"""Integration test for the ModulesAPI.update_module method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_delete_module(self):
"""Integration test for the ModulesAPI.delete_module method."""
course_id = None # Change me!!
id = None # Change me!!
r = self.client.delete_module(course_id, id)
def test_re_lock_module_progressions(self):
"""Integration test for the ModulesAPI.re_lock_module_progressions method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_list_module_items(self):
"""Integration test for the ModulesAPI.list_module_items method."""
course_id = None # Change me!!
module_id = None # Change me!!
r = self.client.list_module_items(
course_id, module_id, include=None, search_term=None, student_id=None
)
def test_show_module_item(self):
"""Integration test for the ModulesAPI.show_module_item method."""
course_id = None # Change me!!
module_id = None # Change me!!
id = None # Change me!!
r = self.client.show_module_item(
course_id, id, module_id, include=None, student_id=None
)
def test_create_module_item(self):
"""Integration test for the ModulesAPI.create_module_item method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_update_module_item(self):
"""Integration test for the ModulesAPI.update_module_item method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_select_mastery_path(self):
"""Integration test for the ModulesAPI.select_mastery_path method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_delete_module_item(self):
"""Integration test for the ModulesAPI.delete_module_item method."""
course_id = None # Change me!!
module_id = None # Change me!!
id = None # Change me!!
r = self.client.delete_module_item(course_id, id, module_id)
def test_mark_module_item_as_done_not_done(self):
"""Integration test for the ModulesAPI.mark_module_item_as_done_not_done method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_get_module_item_sequence(self):
"""Integration test for the ModulesAPI.get_module_item_sequence method."""
course_id = None # Change me!!
r = self.client.get_module_item_sequence(
course_id, asset_id=None, asset_type=None
)
def test_mark_module_item_read(self):
"""Integration test for the ModulesAPI.mark_module_item_read method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
| 40.302521
| 126
| 0.699333
| 628
| 4,796
| 5.148089
| 0.138535
| 0.035261
| 0.079183
| 0.102072
| 0.802041
| 0.725951
| 0.693474
| 0.641509
| 0.523044
| 0.469842
| 0
| 0.002427
| 0.226647
| 4,796
| 118
| 127
| 40.644068
| 0.869237
| 0.453086
| 0
| 0.338462
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.246154
| false
| 0.123077
| 0.153846
| 0
| 0.415385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
a8753227562dac4c0f835fdc3c772dcd3501bf9b
| 64
|
py
|
Python
|
count.py
|
karimjamali/Class-1
|
64a97e143dd0a9bd0e177481a6a858638bfd3766
|
[
"Apache-2.0"
] | null | null | null |
count.py
|
karimjamali/Class-1
|
64a97e143dd0a9bd0e177481a6a858638bfd3766
|
[
"Apache-2.0"
] | null | null | null |
count.py
|
karimjamali/Class-1
|
64a97e143dd0a9bd0e177481a6a858638bfd3766
|
[
"Apache-2.0"
] | null | null | null |
for i in range (9):
print i
for x in range (5):
print x
| 9.142857
| 19
| 0.5625
| 14
| 64
| 2.571429
| 0.571429
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.34375
| 64
| 6
| 20
| 10.666667
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
a87aa548ce2589611fcee1c5b5c397f3c7208777
| 195
|
py
|
Python
|
decharges/decharge/templatetags/data_manipulation.py
|
Brndan/decharges-sudeducation
|
6807b619f48b3c0b1dda4cd56d0a1cf46695e1f3
|
[
"MIT"
] | null | null | null |
decharges/decharge/templatetags/data_manipulation.py
|
Brndan/decharges-sudeducation
|
6807b619f48b3c0b1dda4cd56d0a1cf46695e1f3
|
[
"MIT"
] | null | null | null |
decharges/decharge/templatetags/data_manipulation.py
|
Brndan/decharges-sudeducation
|
6807b619f48b3c0b1dda4cd56d0a1cf46695e1f3
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
@register.filter
def index(array, i):
return array[i]
@register.filter(name="abs")
def abs_filter(value):
return abs(value)
| 13.928571
| 29
| 0.717949
| 27
| 195
| 5.148148
| 0.555556
| 0.201439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158974
| 195
| 13
| 30
| 15
| 0.847561
| 0
| 0
| 0
| 0
| 0
| 0.015385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.25
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.