hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be1f5263e315eef10dfb3b8d5495dced90a35ab2
| 68
|
py
|
Python
|
timecardgenerator/models/__init__.py
|
TBPixel/Sage300-TimecardGenerator
|
85646a06c81d016e0b21104a456f9cf4eee6de62
|
[
"MIT"
] | null | null | null |
timecardgenerator/models/__init__.py
|
TBPixel/Sage300-TimecardGenerator
|
85646a06c81d016e0b21104a456f9cf4eee6de62
|
[
"MIT"
] | null | null | null |
timecardgenerator/models/__init__.py
|
TBPixel/Sage300-TimecardGenerator
|
85646a06c81d016e0b21104a456f9cf4eee6de62
|
[
"MIT"
] | null | null | null |
from .employee import Employee
from .spreadsheet import Spreadsheet
| 22.666667
| 36
| 0.852941
| 8
| 68
| 7.25
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 68
| 2
| 37
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07975b68c2e184eff1402f18ad6d07b849b3f3d6
| 132
|
py
|
Python
|
AutamaProfiles/admin.py
|
smilinhawaiian/Autama-Backend
|
afdc43a39f3d79b50c4dfd0ec50b4f06add56f1e
|
[
"MIT"
] | null | null | null |
AutamaProfiles/admin.py
|
smilinhawaiian/Autama-Backend
|
afdc43a39f3d79b50c4dfd0ec50b4f06add56f1e
|
[
"MIT"
] | null | null | null |
AutamaProfiles/admin.py
|
smilinhawaiian/Autama-Backend
|
afdc43a39f3d79b50c4dfd0ec50b4f06add56f1e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import AutamaProfile
# Register your models here.
admin.site.register(AutamaProfile)
| 22
| 34
| 0.825758
| 17
| 132
| 6.411765
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 132
| 5
| 35
| 26.4
| 0.931624
| 0.19697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07a096f4597d6b7c7a95de0d0249f3b09562c34e
| 268
|
py
|
Python
|
jj/mock/_history/__init__.py
|
nikitanovosibirsk/jj
|
ea75c932e476c0dc3f282141877a7199ee4a81a9
|
[
"Apache-2.0"
] | 4
|
2020-09-08T08:14:21.000Z
|
2022-01-27T19:22:53.000Z
|
jj/mock/_history/__init__.py
|
nikitanovosibirsk/jj
|
ea75c932e476c0dc3f282141877a7199ee4a81a9
|
[
"Apache-2.0"
] | 19
|
2018-02-13T05:51:25.000Z
|
2022-03-27T22:48:11.000Z
|
jj/mock/_history/__init__.py
|
nikitanovosibirsk/jj
|
ea75c932e476c0dc3f282141877a7199ee4a81a9
|
[
"Apache-2.0"
] | 3
|
2017-11-17T13:25:23.000Z
|
2022-02-03T12:57:00.000Z
|
from ._history_item import HistoryItem
from ._history_repository import HistoryRepository
from ._history_request import HistoryRequest
from ._history_response import HistoryResponse
__all__ = ("HistoryRepository", "HistoryRequest", "HistoryResponse", "HistoryItem",)
| 38.285714
| 84
| 0.843284
| 25
| 268
| 8.56
| 0.48
| 0.205607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085821
| 268
| 6
| 85
| 44.666667
| 0.873469
| 0
| 0
| 0
| 0
| 0
| 0.212687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07b7d8bf558882bdee18de92d0bf80f7de8a5273
| 134
|
py
|
Python
|
src/routes/__init__.py
|
amoghmadan/Python-Starlette-REST-Starter
|
b6bcfaf85f4faf070abb99ccd90dee074e90340b
|
[
"MIT"
] | 3
|
2020-10-30T11:29:10.000Z
|
2020-12-11T08:40:53.000Z
|
src/routes/__init__.py
|
amoghmadan/Python-Starlette-REST-Starter
|
b6bcfaf85f4faf070abb99ccd90dee074e90340b
|
[
"MIT"
] | null | null | null |
src/routes/__init__.py
|
amoghmadan/Python-Starlette-REST-Starter
|
b6bcfaf85f4faf070abb99ccd90dee074e90340b
|
[
"MIT"
] | null | null | null |
from starlette.routing import Mount
from . import hello_world
routes = [
Mount('/api/hello_world', routes=hello_world.routes),
]
| 19.142857
| 57
| 0.746269
| 18
| 134
| 5.388889
| 0.5
| 0.309278
| 0.494845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141791
| 134
| 6
| 58
| 22.333333
| 0.843478
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
580189562f7d89300280dc2e0a3ace61a4b5003c
| 4,867
|
py
|
Python
|
raiden/transfer/utils.py
|
jjtechuy/lumino
|
74b7d0e21f6a71a92c1fd285a5a4352c37c53a5f
|
[
"MIT"
] | 1
|
2020-06-30T00:36:08.000Z
|
2020-06-30T00:36:08.000Z
|
raiden/transfer/utils.py
|
jjtechuy/lumino
|
74b7d0e21f6a71a92c1fd285a5a4352c37c53a5f
|
[
"MIT"
] | null | null | null |
raiden/transfer/utils.py
|
jjtechuy/lumino
|
74b7d0e21f6a71a92c1fd285a5a4352c37c53a5f
|
[
"MIT"
] | null | null | null |
import random
from eth_utils import to_checksum_address
from web3 import Web3
from raiden.constants import EMPTY_HASH
from raiden.storage import sqlite
from raiden.utils.serialization import serialize_bytes
from raiden.utils.typing import (
Address,
BalanceHash,
ChainID,
ChannelID,
Locksroot,
TokenAmount,
TokenNetworkID,
)
def get_state_change_with_balance_proof_by_balance_hash(
storage: sqlite.SQLiteStorage,
chain_id: ChainID,
token_network_identifier: TokenNetworkID,
channel_identifier: ChannelID,
balance_hash: BalanceHash,
sender: Address,
) -> sqlite.StateChangeRecord:
""" Returns the state change which contains the corresponding balance
proof.
Use this function to find a balance proof for a call to settle, which only
has the blinded balance proof data.
"""
return storage.get_latest_state_change_by_data_field({
'balance_proof.chain_id': chain_id,
'balance_proof.token_network_identifier': to_checksum_address(token_network_identifier),
'balance_proof.channel_identifier': str(channel_identifier),
'balance_proof.balance_hash': serialize_bytes(balance_hash),
'balance_proof.sender': to_checksum_address(sender),
})
def get_state_change_with_balance_proof_by_locksroot(
storage: sqlite.SQLiteStorage,
chain_id: ChainID,
token_network_identifier: TokenNetworkID,
channel_identifier: ChannelID,
locksroot: Locksroot,
sender: Address,
) -> sqlite.StateChangeRecord:
""" Returns the state change which contains the corresponding balance
proof.
Use this function to find a balance proof for a call to unlock, which only
happens after settle, so the channel has the unblinded version of the
balance proof.
"""
return storage.get_latest_state_change_by_data_field({
'balance_proof.chain_id': chain_id,
'balance_proof.token_network_identifier': to_checksum_address(token_network_identifier),
'balance_proof.channel_identifier': str(channel_identifier),
'balance_proof.locksroot': serialize_bytes(locksroot),
'balance_proof.sender': to_checksum_address(sender),
})
def get_event_with_balance_proof_by_balance_hash(
storage: sqlite.SQLiteStorage,
chain_id: ChainID,
token_network_identifier: TokenNetworkID,
channel_identifier: ChannelID,
balance_hash: BalanceHash,
) -> sqlite.EventRecord:
""" Returns the event which contains the corresponding balance
proof.
Use this function to find a balance proof for a call to settle, which only
has the blinded balance proof data.
"""
return storage.get_latest_event_by_data_field({
'balance_proof.chain_id': chain_id,
'balance_proof.token_network_identifier': to_checksum_address(token_network_identifier),
'balance_proof.channel_identifier': str(channel_identifier),
'balance_proof.balance_hash': serialize_bytes(balance_hash),
})
def get_event_with_balance_proof_by_locksroot(
storage: sqlite.SQLiteStorage,
chain_id: ChainID,
token_network_identifier: TokenNetworkID,
channel_identifier: ChannelID,
locksroot: Locksroot,
) -> sqlite.EventRecord:
""" Returns the event which contains the corresponding balance proof.
Use this function to find a balance proof for a call to unlock, which only
happens after settle, so the channel has the unblinded version of the
balance proof.
"""
return storage.get_latest_event_by_data_field({
'balance_proof.chain_id': chain_id,
'balance_proof.token_network_identifier': to_checksum_address(token_network_identifier),
'balance_proof.channel_identifier': str(channel_identifier),
'balance_proof.locksroot': serialize_bytes(locksroot),
})
def hash_balance_data(
transferred_amount: TokenAmount,
locked_amount: TokenAmount,
locksroot: Locksroot,
) -> bytes:
assert locksroot != b''
assert len(locksroot) == 32
if transferred_amount == 0 and locked_amount == 0 and locksroot == EMPTY_HASH:
return EMPTY_HASH
return Web3.soliditySha3( # pylint: disable=no-value-for-parameter
['uint256', 'uint256', 'bytes32'],
[transferred_amount, locked_amount, locksroot],
)
def pseudo_random_generator_from_json(data):
# JSON serializes a tuple as a list
pseudo_random_generator = random.Random()
state = list(data['pseudo_random_generator']) # copy
state[1] = tuple(state[1]) # fix type
pseudo_random_generator.setstate(tuple(state))
return pseudo_random_generator
def is_valid_secret_reveal(state_change, transfer_secrethash, secret):
return secret != EMPTY_HASH and state_change.secrethash == transfer_secrethash
| 35.525547
| 96
| 0.730018
| 578
| 4,867
| 5.847751
| 0.186851
| 0.12071
| 0.078107
| 0.021302
| 0.725444
| 0.725444
| 0.725444
| 0.72071
| 0.712426
| 0.684615
| 0
| 0.004608
| 0.197452
| 4,867
| 136
| 97
| 35.786765
| 0.860727
| 0.187179
| 0
| 0.576087
| 0
| 0
| 0.142635
| 0.126815
| 0
| 0
| 0
| 0
| 0.021739
| 1
| 0.076087
| false
| 0
| 0.076087
| 0.01087
| 0.23913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed25055fb48c657f7489f77a748fa728dd86a591
| 1,234
|
py
|
Python
|
pywineds/run.py
|
cjerdonek/wineds-converter
|
61e42b959b14a6f62db79059549f701e5a26ec69
|
[
"BSD-3-Clause"
] | 1
|
2020-11-13T09:48:33.000Z
|
2020-11-13T09:48:33.000Z
|
pywineds/run.py
|
cjerdonek/wineds-converter
|
61e42b959b14a6f62db79059549f701e5a26ec69
|
[
"BSD-3-Clause"
] | null | null | null |
pywineds/run.py
|
cjerdonek/wineds-converter
|
61e42b959b14a6f62db79059549f701e5a26ec69
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Usage: wineds-convert ELECTION_NAME PRECINCTS.csv WINEDS.txt OUTPUT_BASE
Parses the given files and writes a new output file to stdout.
The new output file is tab-delimited (.tsv). Tabs are used since some
fields contain commas (e.g. "US Representative, District 12").
Arguments:
ELECTION_NAME: the name of the election for display purposes.
This appears in the first line of the output file.
An example value is "San Francisco June 3, 2014 Election".
PRECINCTS.csv: path to a CSV file mapping precincts to their
different districts and neighborhoods.
WINEDS.txt: path to a TXT export file from the WinEDS Reporting Tool.
The report contains vote totals for each precinct in each contest,
along with "registered voters" and "ballots cast" totals.
OUTPUT_BASE: desired output path base. The file extension will be
appended to the argument provided, so the output paths will have the
form "OUTPUT_BASE.tsv" and "OUTPUT_BASE.xlsx".
In the above, relative paths will be interpreted as relative to the
current working directory.
"""
import sys
import pywineds.main
def main():
"""
The main console_script setup.py entry point.
"""
pywineds.main.main(__doc__, sys.argv)
| 30.85
| 72
| 0.747164
| 190
| 1,234
| 4.794737
| 0.589474
| 0.043908
| 0.02854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007028
| 0.192869
| 1,234
| 39
| 73
| 31.641026
| 0.907631
| 0.903566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c0e52b1a3967dd2310513d7b72c2515247f9eb4
| 7,178
|
py
|
Python
|
script/Constants.py
|
johanlahti/urban-lu-model
|
a8f7419b6f13bc4c273f1c0c6262a5daf4f87deb
|
[
"MIT"
] | 29
|
2015-01-03T15:17:01.000Z
|
2022-01-16T02:27:53.000Z
|
script/Constants.py
|
lanlugar/urban-lu-model
|
a8f7419b6f13bc4c273f1c0c6262a5daf4f87deb
|
[
"MIT"
] | null | null | null |
script/Constants.py
|
lanlugar/urban-lu-model
|
a8f7419b6f13bc4c273f1c0c6262a5daf4f87deb
|
[
"MIT"
] | 13
|
2017-11-16T08:06:43.000Z
|
2021-07-01T14:44:19.000Z
|
import math
distances = {}
for col in range(-8, 8+1):
for row in range(-8, 8+1):
dist = int(round((math.sqrt((row**2+col**2)))*100))
if dist<=800 and dist!=0:
try:
distances[dist].append((col, row))
except:
distances[dist] = [(col, row)]
#distances = {
# 100: [
# (-1, 0),
# (0, 1),
# (1, 0),
# (0, -1)],
# 141: [
# (-1,-1),
# (-1,1),
# (1,1),
# (1,-1)],
# 200: [
# (-2,0),
# (0,2),
# (2,0),
# (0,-2)],
# 223: [
# (-2,-1),
# (-2,1),
# (-1,2),
# (1,2),
# (2,1),
# (2,-1),
# (1,-2),
# (-1,-2)],
# 283: [
# (-2,-2),
# (-2,2),
# (2,2),
# (2,-2) ],
# 300: [
# (-3,0),
# (0,3),
# (3,0),
# (0,-3)],
# 316: [
# (-3,-1),
# (-3,1),
# (-1,3),
# (1,3),
# (3,1),
# (3,-1),
# (1,-3),
# (-1,-3)],
# 361: [
# (-3,-2),
# (-3,2),
# (-2,3),
# (2,3),
# (3,2),
# (3,-2),
# (2,-3),
# (-2,-3)],
# 400: [
# (-4,0),
# (0,4),
# (4,0),
# (0,-4)],
# 412: [
# (-4,-1),
# (-4,1),
# (-1,4),
# (1,4),
# (4,1),
# (4,-1),
# (1,-4),
# (-1,-4)],
# 424: [
# (-3,-3),
# (-3,3),
# (3,3),
# (3,-3)],
# 447: [
# (-4,-2),
# (-4,2),
# (-2,4),
# (2,4),
# (4,2),
# (4,-2),
# (2,-4),
# (-2,-4)],
# 500: [
# (-4,-3),
# (-4,3),
# (-3,4),
# (3,4),
# (4,3),
# (4,-3),
# (3,-4),
# (-3,-4),
# (-5,0),
# (0,5),
# (5,0),
# (0,-5)],
# 510: [
# (-5,-1),
# (-5,1),
# (-1,5),
# (1,5),
# (5,1),
# (5,-1),
# (1,-5),
# (-1,-5)],
# 539: [
# (-5,-2),
# (-5,2),
# (-2,5),
# (2,5),
# (5,2),
# (5,-2),
# (2,-5),
# (-2,-5)],
# 566: [
# (-4,-4),
# (-4,4),
# (4,4),
# (4,-4)],
# 583: [
# (-5,-3),
# (-5,3),
# (-3,5),
# (3,5),
# (5,3),
# (5,-3),
# (3,-5),
# (-3,-5)],
# 600: [
# (-6,0),
# (0,6),
# (6,0),
# (0,-6)],
# 608: [
# (-6,-1),
# (-6,1),
# (-1,6),
# (1,6),
# (6,1),
# (6,-1),
# (1,-6),
# (-1,-6)],
# 632: [
# (-6,-2),
# (-6,2),
# (-2,6),
# (2,6),
# (6,2),
# (6,-2),
# (2,-6),
# (-2,-6)],
# 640: [
# (-5,-4),
# (-5,4),
# (-4,5),
# (4,5),
# (5,4),
# (5,-4),
# (4,-5),
# (-4,-5)],
# 671: [
# (-6,-3),
# (-6,3),
# (-3,6),
# (3,6),
# (6,3),
# (6,-3),
# (3,-6),
# (-3,-6)],
# 700: [
# (-7,0),
# (0,7),
# (7,0),
# (0,-7)],
# 707: [
# (-7,-1),
# (-7,1),
# (-1,7),
# (1,7),
# (7,1),
# (7,-1),
# (1,-7),
# (-1,-7),
# (-5,-5),
# (-5,5),
# (5,5),
# (5,-5)],
# 721: [
# (-6,-4),
# (-6,4),
# (-4,6),
# (4,6),
# (6,4),
# (6,-4),
# (4,-6),
# (-4,-6)],
# 728: [
# (-7,-2),
# (-7,2),
# (-2,7),
# (2,7),
# (7,2),
# (7,-2),
# (2,-7),
# (-2,-7)],
# 761: [
# (-3,-7),
# (-7,3),
# (-3,7),
# (3,7),
# (7,3),
# (7,-3),
# (3,-7),
# (-3,-7)],
# 781: [
# (-5,-6),
# (-6,5),
# (-5,6),
# (5,6),
# (6,5),
# (6,-5),
# (5,-6),
# (-5,-6)],
# 800: [
# (-8,0),
# (0,8),
# (8,0),
# (0,-8)]
# }
| 29.908333
| 60
| 0.089161
| 466
| 7,178
| 1.373391
| 0.118026
| 0.0625
| 0.028125
| 0.03125
| 0.53125
| 0.5
| 0.4625
| 0.4
| 0
| 0
| 0
| 0.25
| 0.724714
| 7,178
| 240
| 61
| 29.908333
| 0.073887
| 0.859432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c1635e76bddd72835e571663967497f18d0f364
| 42
|
py
|
Python
|
tests/__init__.py
|
campfireman/abalone-engine
|
b3e2c998fff137e4b2a3f71e86b168896e409685
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
campfireman/abalone-engine
|
b3e2c998fff137e4b2a3f71e86b168896e409685
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
campfireman/abalone-engine
|
b3e2c998fff137e4b2a3f71e86b168896e409685
|
[
"MIT"
] | null | null | null |
"""The test module for abalone_engine."""
| 21
| 41
| 0.714286
| 6
| 42
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.783784
| 0.833333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c33540eed3e5b78417e5a9c8e7e8f350aa7dfea
| 302
|
py
|
Python
|
__init__.py
|
ekoly/DataStructures
|
4e6b7edfb1b4c400c7c18d08f47a95ddafd6343c
|
[
"MIT"
] | null | null | null |
__init__.py
|
ekoly/DataStructures
|
4e6b7edfb1b4c400c7c18d08f47a95ddafd6343c
|
[
"MIT"
] | null | null | null |
__init__.py
|
ekoly/DataStructures
|
4e6b7edfb1b4c400c7c18d08f47a95ddafd6343c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from DataStructures.LinkedListClasses import LinkedList
from DataStructures.DoublyLinkedListClasses import DoubleLinkedList
from DataStructures.StackClasses import Stack
from DataStructures.QueueClasses import Queue, Deque
from DataStructures.BSTClasses import BinarySearchTree
| 37.75
| 67
| 0.884106
| 30
| 302
| 8.9
| 0.6
| 0.337079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003597
| 0.07947
| 302
| 7
| 68
| 43.142857
| 0.956835
| 0.069536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c44ac7443918fcc302bd5a1693b4ab32b4b4825
| 44
|
py
|
Python
|
tests/__init__.py
|
mattyocode/hypermodern-python-tutorial
|
8a87b1712bb0261688aa12938a246fa191d6c013
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
mattyocode/hypermodern-python-tutorial
|
8a87b1712bb0261688aa12938a246fa191d6c013
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
mattyocode/hypermodern-python-tutorial
|
8a87b1712bb0261688aa12938a246fa191d6c013
|
[
"MIT"
] | null | null | null |
"""Test suite for console and wikipedia."""
| 22
| 43
| 0.704545
| 6
| 44
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 1
| 44
| 44
| 0.815789
| 0.840909
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c4849b945a9fd4fbcee8d2b63db9eeef4109e1a
| 170
|
py
|
Python
|
djhug/__init__.py
|
sturmianseq/django-hug
|
778b16f568ce9fdb01f360b7bff89c9d8b6c2ef7
|
[
"MIT"
] | 3
|
2019-03-11T16:40:37.000Z
|
2020-08-06T18:27:48.000Z
|
djhug/__init__.py
|
sturmianseq/django-hug
|
778b16f568ce9fdb01f360b7bff89c9d8b6c2ef7
|
[
"MIT"
] | null | null | null |
djhug/__init__.py
|
sturmianseq/django-hug
|
778b16f568ce9fdb01f360b7bff89c9d8b6c2ef7
|
[
"MIT"
] | 1
|
2021-08-18T12:54:51.000Z
|
2021-08-18T12:54:51.000Z
|
from .content_negotiation import request_parser, response_renderer
from .shortcuts import request, response
from .routes import Routes, route
from .arguments import Body
| 34
| 66
| 0.847059
| 22
| 170
| 6.409091
| 0.590909
| 0.184397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111765
| 170
| 4
| 67
| 42.5
| 0.933775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
92c9c6421145a2a8f3a2ec8dab549c01693e9e3d
| 107
|
py
|
Python
|
gym_numerai/envs/__init__.py
|
SethKitchen/numerai-gym
|
b3e66841fe0251cf3330be084ca943fd6457a0f6
|
[
"MIT"
] | null | null | null |
gym_numerai/envs/__init__.py
|
SethKitchen/numerai-gym
|
b3e66841fe0251cf3330be084ca943fd6457a0f6
|
[
"MIT"
] | null | null | null |
gym_numerai/envs/__init__.py
|
SethKitchen/numerai-gym
|
b3e66841fe0251cf3330be084ca943fd6457a0f6
|
[
"MIT"
] | null | null | null |
"""numerai Gym Enviornments."""
# First party
from gym_numerai.envs.numerai_env import numeraiEnv # noqa
| 21.4
| 59
| 0.766355
| 14
| 107
| 5.714286
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130841
| 107
| 4
| 60
| 26.75
| 0.860215
| 0.401869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1314c4f9041aa01357fa611d0867648d08a31dbd
| 13,325
|
py
|
Python
|
tests/wallet/test_wallet.py
|
thomashuber/chia-blockchain
|
fd080917fec4038cf6a0f1c2ba8a2cd63a002c32
|
[
"Apache-2.0"
] | null | null | null |
tests/wallet/test_wallet.py
|
thomashuber/chia-blockchain
|
fd080917fec4038cf6a0f1c2ba8a2cd63a002c32
|
[
"Apache-2.0"
] | null | null | null |
tests/wallet/test_wallet.py
|
thomashuber/chia-blockchain
|
fd080917fec4038cf6a0f1c2ba8a2cd63a002c32
|
[
"Apache-2.0"
] | 1
|
2022-01-26T11:57:29.000Z
|
2022-01-26T11:57:29.000Z
|
import asyncio
from secrets import token_bytes
import pytest
from src.protocols import full_node_protocol
from src.simulator.simulator_protocol import FarmNewBlockProtocol, ReorgProtocol
from src.types.peer_info import PeerInfo
from src.util.ints import uint16, uint32
from tests.setup_nodes import setup_simulators_and_wallets
from src.consensus.block_rewards import calculate_base_fee, calculate_block_reward
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestWalletSimulator:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(
1, 2, {"COINBASE_FREEZE_PERIOD": 0}
):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes_five_freeze(self):
async for _ in setup_simulators_and_wallets(
1, 2, {"COINBASE_FREEZE_PERIOD": 5}
):
yield _
@pytest.fixture(scope="function")
async def three_sim_two_wallets(self):
async for _ in setup_simulators_and_wallets(
3, 2, {"COINBASE_FREEZE_PERIOD": 0}
):
yield _
@pytest.mark.asyncio
async def test_wallet_coinbase(self, wallet_node):
num_blocks = 10
full_nodes, wallets = wallet_node
full_node_1, server_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))
await asyncio.sleep(3)
funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, num_blocks - 2)
]
)
assert await wallet.get_confirmed_balance() == funds
@pytest.mark.asyncio
async def test_wallet_make_transaction(self, two_wallet_nodes):
num_blocks = 10
full_nodes, wallets = two_wallet_nodes
full_node_1, server_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(0, num_blocks):
await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await asyncio.sleep(2)
assert await wallet.get_confirmed_balance() == funds
assert await wallet.get_unconfirmed_balance() == funds
tx = await wallet.generate_signed_transaction(
10,
await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(),
0,
)
await wallet.push_transaction(tx)
await asyncio.sleep(2)
confirmed_balance = await wallet.get_confirmed_balance()
unconfirmed_balance = await wallet.get_unconfirmed_balance()
assert confirmed_balance == funds
assert unconfirmed_balance == funds - 10
for i in range(0, num_blocks):
await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))
await asyncio.sleep(2)
new_funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, (2 * num_blocks) - 1)
]
)
confirmed_balance = await wallet.get_confirmed_balance()
unconfirmed_balance = await wallet.get_unconfirmed_balance()
assert confirmed_balance == new_funds - 10
assert unconfirmed_balance == new_funds - 10
@pytest.mark.asyncio
async def test_wallet_coinbase_reorg(self, wallet_node):
num_blocks = 10
full_nodes, wallets = wallet_node
full_node_1, server_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))
await asyncio.sleep(3)
funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, num_blocks - 2)
]
)
assert await wallet.get_confirmed_balance() == funds
await full_node_1.reorg_from_index_to_new_index(
ReorgProtocol(uint32(5), uint32(num_blocks + 3), token_bytes())
)
await asyncio.sleep(3)
funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, 5)
]
)
assert await wallet.get_confirmed_balance() == funds
@pytest.mark.asyncio
async def test_wallet_send_to_three_peers(self, three_sim_two_wallets):
num_blocks = 10
full_nodes, wallets = three_sim_two_wallets
wallet_0, wallet_server_0 = wallets[0]
full_node_0, server_0 = full_nodes[0]
full_node_1, server_1 = full_nodes[1]
full_node_2, server_2 = full_nodes[2]
ph = await wallet_0.wallet_state_manager.main_wallet.get_new_puzzlehash()
# wallet0 <-> sever0
await wallet_server_0.start_client(
PeerInfo("localhost", uint16(server_0._port)), None
)
for i in range(1, num_blocks):
await full_node_0.farm_new_block(FarmNewBlockProtocol(ph))
all_blocks = await full_node_0.get_current_blocks(full_node_0.get_tip())
for block in all_blocks:
async for _ in full_node_1.respond_block(
full_node_protocol.RespondBlock(block)
):
pass
async for _ in full_node_2.respond_block(
full_node_protocol.RespondBlock(block)
):
pass
await asyncio.sleep(2)
funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, num_blocks - 2)
]
)
assert (
await wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance()
== funds
)
tx = await wallet_0.wallet_state_manager.main_wallet.generate_signed_transaction(
10, token_bytes(), 0
)
await wallet_0.wallet_state_manager.main_wallet.push_transaction(tx)
await asyncio.sleep(1)
bundle0 = full_node_0.mempool_manager.get_spendbundle(tx.name())
assert bundle0 is not None
# wallet0 <-> sever1
await wallet_server_0.start_client(
PeerInfo("localhost", uint16(server_1._port)), wallet_0._on_connect
)
await asyncio.sleep(1)
bundle1 = full_node_1.mempool_manager.get_spendbundle(tx.name())
assert bundle1 is not None
# wallet0 <-> sever2
await wallet_server_0.start_client(
PeerInfo("localhost", uint16(server_2._port)), wallet_0._on_connect
)
await asyncio.sleep(1)
bundle2 = full_node_2.mempool_manager.get_spendbundle(tx.name())
assert bundle2 is not None
@pytest.mark.asyncio
async def test_wallet_make_transaction_hop(self, two_wallet_nodes_five_freeze):
num_blocks = 10
full_nodes, wallets = two_wallet_nodes_five_freeze
full_node_0, full_node_server = full_nodes[0]
wallet_node_0, wallet_0_server = wallets[0]
wallet_node_1, wallet_1_server = wallets[1]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
await wallet_0_server.start_client(
PeerInfo("localhost", uint16(full_node_server._port)), None
)
await wallet_1_server.start_client(
PeerInfo("localhost", uint16(full_node_server._port)), None
)
for i in range(0, num_blocks):
await full_node_0.farm_new_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await asyncio.sleep(2)
assert await wallet_0.get_confirmed_balance() == funds
assert await wallet_0.get_unconfirmed_balance() == funds
tx = await wallet_0.generate_signed_transaction(
10,
await wallet_node_1.wallet_state_manager.main_wallet.get_new_puzzlehash(),
0,
)
await wallet_0.push_transaction(tx)
await asyncio.sleep(1)
# Full node height 11, wallet height 9
confirmed_balance = await wallet_0.get_confirmed_balance()
unconfirmed_balance = await wallet_0.get_unconfirmed_balance()
assert confirmed_balance == funds
assert unconfirmed_balance == funds - 10
for i in range(0, 7):
await full_node_0.farm_new_block(FarmNewBlockProtocol(token_bytes()))
await asyncio.sleep(1)
new_funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
# Full node height 17, wallet height 15
confirmed_balance = await wallet_0.get_confirmed_balance()
unconfirmed_balance = await wallet_0.get_unconfirmed_balance()
wallet_2_confirmed_balance = await wallet_1.get_confirmed_balance()
assert confirmed_balance == new_funds - 10
assert unconfirmed_balance == new_funds - 10
assert wallet_2_confirmed_balance == 10
tx = await wallet_1.generate_signed_transaction(
5, await wallet_0.get_new_puzzlehash(), 0
)
await wallet_1.push_transaction(tx)
for i in range(0, 7):
await full_node_0.farm_new_block(FarmNewBlockProtocol(token_bytes()))
await asyncio.sleep(1)
confirmed_balance = await wallet_0.get_confirmed_balance()
unconfirmed_balance = await wallet_0.get_unconfirmed_balance()
wallet_2_confirmed_balance = await wallet_1.get_confirmed_balance()
assert confirmed_balance == new_funds - 5
assert unconfirmed_balance == new_funds - 5
assert wallet_2_confirmed_balance == 5
@pytest.mark.asyncio
async def test_wallet_make_transaction_with_fee(self, two_wallet_nodes):
num_blocks = 10
full_nodes, wallets = two_wallet_nodes
full_node_1, server_1 = full_nodes[0]
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(0, num_blocks):
await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await asyncio.sleep(2)
assert await wallet.get_confirmed_balance() == funds
assert await wallet.get_unconfirmed_balance() == funds
tx_amount = 32000000000000
tx_fee = 10
tx = await wallet.generate_signed_transaction(
tx_amount,
await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(),
tx_fee,
)
fees = tx.spend_bundle.fees()
assert fees == tx_fee
await wallet.push_transaction(tx)
await asyncio.sleep(2)
confirmed_balance = await wallet.get_confirmed_balance()
unconfirmed_balance = await wallet.get_unconfirmed_balance()
assert confirmed_balance == funds
assert unconfirmed_balance == funds - tx_amount - tx_fee
for i in range(0, num_blocks):
await full_node_1.farm_new_block(FarmNewBlockProtocol(token_bytes()))
await asyncio.sleep(2)
new_funds = sum(
[
calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
confirmed_balance = await wallet.get_confirmed_balance()
unconfirmed_balance = await wallet.get_unconfirmed_balance()
assert confirmed_balance == new_funds - tx_amount - tx_fee
assert unconfirmed_balance == new_funds - tx_amount - tx_fee
| 34.342784
| 89
| 0.643002
| 1,651
| 13,325
| 4.833434
| 0.082374
| 0.070301
| 0.015038
| 0.027569
| 0.846491
| 0.807393
| 0.78208
| 0.722807
| 0.675188
| 0.630451
| 0
| 0.033004
| 0.279174
| 13,325
| 387
| 90
| 34.431525
| 0.797814
| 0.009831
| 0
| 0.565217
| 0
| 0
| 0.014028
| 0.005005
| 0
| 0
| 0
| 0
| 0.100334
| 1
| 0.003344
| false
| 0.006689
| 0.0301
| 0
| 0.036789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
131892c95fff07308772428eb8e3d8d7351533a8
| 201
|
py
|
Python
|
deepmodel/__init__.py
|
KarolAntczak/DeepModel
|
3849651f1c0dc8aa7e46968a49eb035d18be2a9c
|
[
"Apache-2.0"
] | 4
|
2017-01-17T16:32:35.000Z
|
2021-09-29T14:15:28.000Z
|
deepmodel/__init__.py
|
KarolAntczak/DeepModel
|
3849651f1c0dc8aa7e46968a49eb035d18be2a9c
|
[
"Apache-2.0"
] | null | null | null |
deepmodel/__init__.py
|
KarolAntczak/DeepModel
|
3849651f1c0dc8aa7e46968a49eb035d18be2a9c
|
[
"Apache-2.0"
] | 1
|
2018-11-25T21:46:00.000Z
|
2018-11-25T21:46:00.000Z
|
from .Autoencoder import Autoencoder
from .Multilayer import Multilayer
from .Perceptron import Perceptron
from .StackedAutoencoder import StackedAutoencoder
from .Convolution2D import Convolution2D
| 25.125
| 50
| 0.865672
| 20
| 201
| 8.7
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011173
| 0.109453
| 201
| 7
| 51
| 28.714286
| 0.960894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1333ce00f3ad45c5f3e2fce19d51a36dcc82a073
| 24
|
py
|
Python
|
hello_world.py
|
vaibhav-lone/profiles-rest-api
|
9dfa7494a2bca2b69df157186569de470206aafc
|
[
"MIT"
] | null | null | null |
hello_world.py
|
vaibhav-lone/profiles-rest-api
|
9dfa7494a2bca2b69df157186569de470206aafc
|
[
"MIT"
] | null | null | null |
hello_world.py
|
vaibhav-lone/profiles-rest-api
|
9dfa7494a2bca2b69df157186569de470206aafc
|
[
"MIT"
] | null | null | null |
print ("Hello There!!!")
| 24
| 24
| 0.625
| 3
| 24
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 24
| 1
| 24
| 24
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
13771448d74330075fb79753e6d01882e66eb7eb
| 4,580
|
py
|
Python
|
venv/lib/python3.8/site-packages/spaceone/api/billing/plugin/data_source_pb2_grpc.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/billing/plugin/data_source_pb2_grpc.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/billing/plugin/data_source_pb2_grpc.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from spaceone.api.billing.plugin import data_source_pb2 as spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2
class DataSourceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.init = channel.unary_unary(
'/spaceone.api.billing.plugin.DataSource/init',
request_serializer=spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2.InitRequest.SerializeToString,
response_deserializer=spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2.PluginInfo.FromString,
)
self.verify = channel.unary_unary(
'/spaceone.api.billing.plugin.DataSource/verify',
request_serializer=spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2.PluginVerifyRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class DataSourceServicer(object):
"""Missing associated documentation comment in .proto file."""
def init(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def verify(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DataSourceServicer_to_server(servicer, server):
rpc_method_handlers = {
'init': grpc.unary_unary_rpc_method_handler(
servicer.init,
request_deserializer=spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2.InitRequest.FromString,
response_serializer=spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2.PluginInfo.SerializeToString,
),
'verify': grpc.unary_unary_rpc_method_handler(
servicer.verify,
request_deserializer=spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2.PluginVerifyRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'spaceone.api.billing.plugin.DataSource', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class DataSource(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def init(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.billing.plugin.DataSource/init',
spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2.InitRequest.SerializeToString,
spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2.PluginInfo.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def verify(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/spaceone.api.billing.plugin.DataSource/verify',
spaceone_dot_api_dot_billing_dot_plugin_dot_data__source__pb2.PluginVerifyRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 45.346535
| 135
| 0.703275
| 489
| 4,580
| 6.165644
| 0.202454
| 0.036484
| 0.04743
| 0.056385
| 0.769154
| 0.757877
| 0.741957
| 0.705804
| 0.628856
| 0.628856
| 0
| 0.00451
| 0.225328
| 4,580
| 100
| 136
| 45.8
| 0.845265
| 0.112445
| 0
| 0.438356
| 1
| 0
| 0.07994
| 0.054459
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.041096
| 0.027397
| 0.191781
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
137ddb82031e036bf458d514d33c86cb7e2aadf5
| 334
|
gyp
|
Python
|
binding.gyp
|
p0x6/process_watcher
|
a0acbbdc7d4c8b55c082a33b33631b01668c4eb7
|
[
"MIT"
] | 1
|
2021-07-16T10:49:47.000Z
|
2021-07-16T10:49:47.000Z
|
binding.gyp
|
p0x6/process_watcher
|
a0acbbdc7d4c8b55c082a33b33631b01668c4eb7
|
[
"MIT"
] | 3
|
2020-04-18T03:27:36.000Z
|
2021-07-16T10:49:31.000Z
|
binding.gyp
|
p0x6/process_watcher
|
a0acbbdc7d4c8b55c082a33b33631b01668c4eb7
|
[
"MIT"
] | null | null | null |
{
"targets": [
{
"target_name": "process_watcher",
"sources": [ "watcher.cc", "process_watcher.cc" ],
'conditions': [['OS != "win"', {'sources!': ['watcher.cc', 'process_watcher.cc']}]],
"include_dirs": [
"<!(node -e \"require('nan')\")"
],
"libraries": [ "wbemuuid.lib"]
}
]
}
| 23.857143
| 90
| 0.482036
| 29
| 334
| 5.37931
| 0.655172
| 0.230769
| 0.205128
| 0.294872
| 0.410256
| 0.410256
| 0
| 0
| 0
| 0
| 0
| 0
| 0.257485
| 334
| 13
| 91
| 25.692308
| 0.629032
| 0
| 0
| 0
| 0
| 0
| 0.520958
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1383cd542774c717f61fcc362216eab834a022bc
| 190
|
py
|
Python
|
hydra_client/connection/__init__.py
|
openagua/hydra-client-python
|
aa24dcab1937fe7a6484ef8629352647c09f154b
|
[
"MIT"
] | null | null | null |
hydra_client/connection/__init__.py
|
openagua/hydra-client-python
|
aa24dcab1937fe7a6484ef8629352647c09f154b
|
[
"MIT"
] | null | null | null |
hydra_client/connection/__init__.py
|
openagua/hydra-client-python
|
aa24dcab1937fe7a6484ef8629352647c09f154b
|
[
"MIT"
] | null | null | null |
from .json_connection import JSONConnection
from .remote_json_connection import RemoteJSONConnection, JsonConnection
__all__ = ['JSONConnection', 'RemoteJSONConnection', 'JsonConnection']
| 31.666667
| 72
| 0.842105
| 16
| 190
| 9.5625
| 0.5
| 0.183007
| 0.261438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084211
| 190
| 5
| 73
| 38
| 0.87931
| 0
| 0
| 0
| 0
| 0
| 0.253968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1395965dcca16ecb5894dbcb003a2110161aff2b
| 1,102
|
py
|
Python
|
uploads/core/migrations/0007_auto_20181228_0133.py
|
christinalee98/submissions_box
|
2ee12f1ffd168d8e53ded211c8025bf729dbf11d
|
[
"MIT"
] | null | null | null |
uploads/core/migrations/0007_auto_20181228_0133.py
|
christinalee98/submissions_box
|
2ee12f1ffd168d8e53ded211c8025bf729dbf11d
|
[
"MIT"
] | null | null | null |
uploads/core/migrations/0007_auto_20181228_0133.py
|
christinalee98/submissions_box
|
2ee12f1ffd168d8e53ded211c8025bf729dbf11d
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.3 on 2018-12-28 06:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_remove_document_file'),
]
operations = [
migrations.AddField(
model_name='document',
name='file_1',
field=models.FileField(blank=True, upload_to='documents/'),
),
migrations.AddField(
model_name='document',
name='file_2',
field=models.FileField(blank=True, upload_to='documents/'),
),
migrations.AddField(
model_name='document',
name='file_3',
field=models.FileField(blank=True, upload_to='documents/'),
),
migrations.AddField(
model_name='document',
name='file_4',
field=models.FileField(blank=True, upload_to='documents/'),
),
migrations.AddField(
model_name='document',
name='file_5',
field=models.FileField(blank=True, upload_to='documents/'),
),
]
| 28.25641
| 71
| 0.559891
| 109
| 1,102
| 5.495413
| 0.348624
| 0.15025
| 0.191987
| 0.225376
| 0.742905
| 0.742905
| 0.742905
| 0.671119
| 0.594324
| 0.594324
| 0
| 0.031662
| 0.31216
| 1,102
| 38
| 72
| 29
| 0.758575
| 0.040835
| 0
| 0.625
| 1
| 0
| 0.141232
| 0.023697
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13d3342895163ab4fcf2f7d45e8bb2eb721c0b7f
| 665
|
py
|
Python
|
resolwe/flow/managers/__init__.py
|
zagm/resolwe
|
da371a3ec0260a45ccab848704c6a339a0de79cc
|
[
"Apache-2.0"
] | null | null | null |
resolwe/flow/managers/__init__.py
|
zagm/resolwe
|
da371a3ec0260a45ccab848704c6a339a0de79cc
|
[
"Apache-2.0"
] | null | null | null |
resolwe/flow/managers/__init__.py
|
zagm/resolwe
|
da371a3ec0260a45ccab848704c6a339a0de79cc
|
[
"Apache-2.0"
] | null | null | null |
""".. Ignore pydocstyle D400.
=============
Flow Managers
=============
Workflow workload managers.
.. data:: manager
The global manager instance.
:type: :class:`~resolwe.flow.managers.dispatcher.Manager`
.. automodule:: resolwe.flow.managers.dispatcher
:members:
.. automodule:: resolwe.flow.managers.workload_connectors
.. automodule:: resolwe.flow.managers.listener
.. automodule:: resolwe.flow.managers.state
.. automodule:: resolwe.flow.managers.consumer
:members:
.. automodule:: resolwe.flow.managers.utils
:members:
"""
from .dispatcher import Manager
__all__ = ('manager', )
manager = Manager() # pylint: disable=invalid-name
| 21.451613
| 61
| 0.699248
| 67
| 665
| 6.865672
| 0.447761
| 0.208696
| 0.28913
| 0.378261
| 0.156522
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005181
| 0.129323
| 665
| 30
| 62
| 22.166667
| 0.789292
| 0.866165
| 0
| 0
| 0
| 0
| 0.08642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
13e01f1692896607f8ff2d8ed2dc29887bbe6c85
| 97
|
py
|
Python
|
recommend_service/download_juno/__init__.py
|
MakingL/music_recommendation
|
5bb13152079fb14707f6748de6227bb97cd08037
|
[
"MIT"
] | 53
|
2019-01-07T12:21:56.000Z
|
2022-03-11T03:11:26.000Z
|
recommend_service/download_juno/__init__.py
|
MakingL/music_recommendation
|
5bb13152079fb14707f6748de6227bb97cd08037
|
[
"MIT"
] | 8
|
2020-01-28T22:34:16.000Z
|
2021-09-08T05:02:35.000Z
|
recommend_service/download_juno/__init__.py
|
MakingL/music_recommendation
|
5bb13152079fb14707f6748de6227bb97cd08037
|
[
"MIT"
] | 18
|
2019-04-29T09:10:23.000Z
|
2022-02-16T22:26:04.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/7 13:32
# @Author : MLee
# @File : __init__.py.py
| 24.25
| 27
| 0.515464
| 15
| 97
| 3.066667
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150685
| 0.247423
| 97
| 4
| 28
| 24.25
| 0.479452
| 0.917526
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13ef2739b3dc028701b1bc7a3b25fddfcedf2b65
| 152
|
py
|
Python
|
util/__init__.py
|
zheng-zy/LearnPyqt
|
9e8c107102d00a7bfe810e514326631e7f457447
|
[
"Apache-2.0"
] | 1
|
2016-03-23T07:54:55.000Z
|
2016-03-23T07:54:55.000Z
|
util/__init__.py
|
zheng-zy/LearnPyqt
|
9e8c107102d00a7bfe810e514326631e7f457447
|
[
"Apache-2.0"
] | null | null | null |
util/__init__.py
|
zheng-zy/LearnPyqt
|
9e8c107102d00a7bfe810e514326631e7f457447
|
[
"Apache-2.0"
] | 1
|
2020-07-23T18:27:53.000Z
|
2020-07-23T18:27:53.000Z
|
#!usr/bin/env python
#coding=utf-8
# Author: zhezhiyong@163.com
# Created: 2016年02月29日 29:13:56
# 编辑器:pycharm3.4,python版本:2.66
"""
# TODO(purpose):
"""
| 19
| 31
| 0.690789
| 24
| 152
| 4.375
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169118
| 0.105263
| 152
| 8
| 32
| 19
| 0.602941
| 0.888158
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.125
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b952724d1f11ec68afba06f2429b7ea197070eff
| 309
|
py
|
Python
|
Assets/StreamingAssets/.q/Lib/site-packages/docplex/mp/sparktrans/__init__.py
|
hennlo/Q-shall-not-pass
|
8013ce891462683eb9cfedc4ac12a1e602fc1ba8
|
[
"Apache-2.0"
] | 1
|
2021-05-10T16:29:03.000Z
|
2021-05-10T16:29:03.000Z
|
Assets/StreamingAssets/.q/Lib/site-packages/docplex/mp/sparktrans/__init__.py
|
hennlo/Q-shall-not-pass
|
8013ce891462683eb9cfedc4ac12a1e602fc1ba8
|
[
"Apache-2.0"
] | 1
|
2019-11-14T09:30:19.000Z
|
2019-11-22T23:23:27.000Z
|
docplex/mp/sparktrans/__init__.py
|
ctzhu/docplex
|
783d2137bedfe8b01553cf31035803085fb8819a
|
[
"Apache-2.0"
] | 1
|
2021-01-17T16:29:26.000Z
|
2021-01-17T16:29:26.000Z
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2018
# --------------------------------------------------------------------------
# gendoc: ignore
| 38.625
| 76
| 0.365696
| 23
| 309
| 4.913043
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035461
| 0.087379
| 309
| 7
| 77
| 44.142857
| 0.365248
| 0.954693
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b965277dff4624788e381a8c7cc531a80481f290
| 86
|
py
|
Python
|
nnet/_neural_net_exception.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
nnet/_neural_net_exception.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
nnet/_neural_net_exception.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
class NeuralNetException(Exception):
pass
| 17.2
| 38
| 0.825581
| 9
| 86
| 7.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 86
| 4
| 39
| 21.5
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
b97db3a56920c843541f916fc5362dcee66e6a1d
| 93
|
py
|
Python
|
Programs/ForWhat.py
|
aposum23/laborathory8
|
9378a461ae02be0fe70560b6fe775d9683354514
|
[
"MIT"
] | null | null | null |
Programs/ForWhat.py
|
aposum23/laborathory8
|
9378a461ae02be0fe70560b6fe775d9683354514
|
[
"MIT"
] | null | null | null |
Programs/ForWhat.py
|
aposum23/laborathory8
|
9378a461ae02be0fe70560b6fe775d9683354514
|
[
"MIT"
] | null | null | null |
lst = [10, 20, 30]
tpl = (10, 20, 30)
print(lst.__sizeof__())
#32
print(tpl.__sizeof__())
#24
| 15.5
| 23
| 0.623656
| 16
| 93
| 3.125
| 0.5625
| 0.16
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.139785
| 93
| 6
| 24
| 15.5
| 0.425
| 0.043011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b9a09d419d71e352e94d5f1c7e641e44f1a621dc
| 586
|
py
|
Python
|
codevision/accounts/models.py
|
nabin0/codevision
|
658d153129a62e85316e543cef30656cc5e09a09
|
[
"MIT"
] | null | null | null |
codevision/accounts/models.py
|
nabin0/codevision
|
658d153129a62e85316e543cef30656cc5e09a09
|
[
"MIT"
] | null | null | null |
codevision/accounts/models.py
|
nabin0/codevision
|
658d153129a62e85316e543cef30656cc5e09a09
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class UserDetail(models.Model):
sno = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=25)
last_name = models.CharField(max_length=25)
username = models.CharField(max_length=25)
gender = models.CharField(max_length=25)
email = models.CharField(max_length=25)
phone = models.IntegerField()
password = models.CharField(max_length=25)
confirm_password = models.CharField(max_length=25)
def __str__(self) -> str:
return "The Username is : "+ self.username
| 32.555556
| 54
| 0.725256
| 77
| 586
| 5.324675
| 0.467532
| 0.256098
| 0.307317
| 0.409756
| 0.502439
| 0.312195
| 0
| 0
| 0
| 0
| 0
| 0.028807
| 0.170648
| 586
| 17
| 55
| 34.470588
| 0.814815
| 0.040956
| 0
| 0
| 0
| 0
| 0.032143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0.153846
| 0.076923
| 0.076923
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
b9abeb19028b7b528df74b4e6efb084d0cf80141
| 619
|
py
|
Python
|
rustiql/dao/main_dao.py
|
pyrustic/rustiql
|
cc4c38e59ae6b9d6d6beaf8cfb1c45f9096819b1
|
[
"MIT"
] | 1
|
2021-02-02T21:05:44.000Z
|
2021-02-02T21:05:44.000Z
|
rustiql/dao/main_dao.py
|
pyrustic/rustiql
|
cc4c38e59ae6b9d6d6beaf8cfb1c45f9096819b1
|
[
"MIT"
] | null | null | null |
rustiql/dao/main_dao.py
|
pyrustic/rustiql
|
cc4c38e59ae6b9d6d6beaf8cfb1c45f9096819b1
|
[
"MIT"
] | null | null | null |
class MainDao:
def __init__(self, dao):
self._dao = dao
def script(self, path, is_file=False):
self._dao.script(path, is_file)
def tables(self):
return self._dao.tables()
def columns(self, table):
return self._dao.columns(table)
def table_content(self, table):
sql = "SELECT * FROM " + table
return self._dao.query(sql)
def edit(self, sql):
return self._dao.edit(sql)
def query(self, sql):
return self._dao.query(sql)
def test(self):
return self._dao.test()
def close(self):
self._dao.close()
| 19.967742
| 42
| 0.588045
| 83
| 619
| 4.192771
| 0.277108
| 0.201149
| 0.224138
| 0.097701
| 0.215517
| 0.137931
| 0
| 0
| 0
| 0
| 0
| 0
| 0.289176
| 619
| 30
| 43
| 20.633333
| 0.790909
| 0
| 0
| 0.1
| 0
| 0
| 0.02269
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.45
| false
| 0
| 0
| 0.25
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b9f0bf5082cd8f1b564a472db2ebbecdbfcc36d2
| 148
|
py
|
Python
|
src/binary-search/python/interative.py
|
M3nin0/data-algorithm
|
dd840873b7efd3698ff6933390df015acb254252
|
[
"BSD-2-Clause"
] | 5
|
2019-05-31T01:52:15.000Z
|
2021-06-07T19:43:41.000Z
|
src/binary-search/python/interative.py
|
M3nin0/data-algorithm
|
dd840873b7efd3698ff6933390df015acb254252
|
[
"BSD-2-Clause"
] | 2
|
2019-09-14T19:18:13.000Z
|
2019-10-05T19:34:57.000Z
|
src/binary-search/python/interative.py
|
M3nin0/data-algorithm
|
dd840873b7efd3698ff6933390df015acb254252
|
[
"BSD-2-Clause"
] | null | null | null |
from search import binary_search
elements = [1, 5, 7, 8, 9, 10, 11, 12, 13]
print(binary_search(12, elements))
print(binary_search(78, elements))
| 21.142857
| 42
| 0.716216
| 25
| 148
| 4.12
| 0.64
| 0.349515
| 0.330097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133858
| 0.141892
| 148
| 6
| 43
| 24.666667
| 0.677165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
6a09ca85e5ef4bbf866c1bc7c9cb157fa37c6705
| 18,487
|
py
|
Python
|
Deeplab/research/deeplab/core/conv2d_ws_test.py
|
LTTM/GMNet
|
e17959eb219e1884e2be271c9244ba284c2f4ffa
|
[
"Apache-2.0"
] | 21
|
2020-07-22T10:32:19.000Z
|
2022-03-12T19:11:41.000Z
|
Deeplab/research/deeplab/core/conv2d_ws_test.py
|
LTTM/GMNet
|
e17959eb219e1884e2be271c9244ba284c2f4ffa
|
[
"Apache-2.0"
] | 2
|
2020-11-23T12:42:36.000Z
|
2021-06-24T07:21:45.000Z
|
Deeplab/research/deeplab/core/conv2d_ws_test.py
|
LTTM/GMNet
|
e17959eb219e1884e2be271c9244ba284c2f4ffa
|
[
"Apache-2.0"
] | 3
|
2020-09-29T11:18:07.000Z
|
2021-11-02T16:09:52.000Z
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conv2d_ws."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from research.deeplab.core import conv2d_ws
class ConvolutionTest(tf.test.TestCase):
def testInvalidShape(self):
with self.cached_session():
images_3d = tf.random_uniform((5, 6, 7, 9, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'Convolution expects input with rank 4, got 5'):
conv2d_ws.conv2d(images_3d, 32, 3)
def testInvalidDataFormat(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv2d_ws.conv2d(images, 32, 3, data_format='CHWN')
def testCreateConv(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32)
output = conv2d_ws.conv2d(images, 32, [3, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateConvWithWS(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, height, width, 4)).astype(np.float32)
output = conv2d_ws.conv2d(
images, 32, [3, 3], use_weight_standardization=True)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateConvNCHW(self):
height, width = 7, 9
with self.cached_session():
images = np.random.uniform(size=(5, 4, height, width)).astype(np.float32)
output = conv2d_ws.conv2d(images, 32, [3, 3], data_format='NCHW')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateSquareConv(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, images.get_shape()[1:3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateFullyConv(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
output = conv2d_ws.conv2d(
images, 64, images.get_shape()[1:3], padding='VALID')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [64])
def testFullyConvWithCustomGetter(self):
height, width = 7, 9
with self.cached_session():
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with tf.variable_scope('test', custom_getter=custom_getter):
images = tf.random_uniform((5, height, width, 32), seed=1)
conv2d_ws.conv2d(images, 64, images.get_shape()[1:3])
self.assertEqual(called[0], 2) # Custom getter called twice.
def testCreateVerticalConv(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 4), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 1])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32])
biases = contrib_framework.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateHorizontalConv(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 4), seed=1)
output = conv2d_ws.conv2d(images, 32, [1, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = contrib_framework.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32])
def testCreateConvWithStride(self):
height, width = 6, 8
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 3], stride=2)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 7, 9
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(contrib_framework.get_variables('conv1/weights'))
self.assertFalse(contrib_framework.get_variables('conv1/biases'))
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(contrib_framework.get_variables('conv1/weights'))
self.assertTrue(contrib_framework.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
def testCreateConvWithCollection(self):
height, width = 7, 9
images = tf.random_uniform((5, height, width, 3), seed=1)
with tf.name_scope('fe'):
conv = conv2d_ws.conv2d(
images, 32, [3, 3], outputs_collections='outputs', scope='Conv')
output_collected = tf.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['Conv'])
self.assertEqual(output_collected, conv)
def testCreateConvWithoutActivation(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 3], activation_fn=None)
self.assertEqual(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = conv2d_ws.conv2d(images, 32, [3, 3], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 5, 7, 32])
def testCreateConvWithWD(self):
height, width = 7, 9
weight_decay = 0.01
with self.cached_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1)
regularizer = contrib_layers.l2_regularizer(weight_decay)
conv2d_ws.conv2d(images, 32, [3, 3], weights_regularizer=regularizer)
l2_loss = tf.nn.l2_loss(
contrib_framework.get_variables_by_name('weights')[0])
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name, 'Conv/kernel/Regularizer/l2_regularizer')
sess.run(tf.global_variables_initializer())
self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval())
def testCreateConvNoRegularizers(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
conv2d_ws.conv2d(images, 32, [3, 3])
self.assertEqual(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseVars(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(contrib_framework.get_variables()), 2)
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(contrib_framework.get_variables()), 2)
def testNonReuseVars(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
conv2d_ws.conv2d(images, 32, [3, 3])
self.assertEqual(len(contrib_framework.get_variables()), 2)
conv2d_ws.conv2d(images, 32, [3, 3])
self.assertEqual(len(contrib_framework.get_variables()), 4)
def testReuseConvWithWD(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
weight_decay = contrib_layers.l2_regularizer(0.01)
with contrib_framework.arg_scope([conv2d_ws.conv2d],
weights_regularizer=weight_decay):
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(contrib_framework.get_variables()), 2)
self.assertEqual(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
conv2d_ws.conv2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(contrib_framework.get_variables()), 2)
self.assertEqual(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testConvWithBatchNorm(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with contrib_framework.arg_scope([conv2d_ws.conv2d],
normalizer_fn=contrib_layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = conv2d_ws.conv2d(images, 32, [3, 3])
net = conv2d_ws.conv2d(net, 32, [3, 3])
self.assertEqual(len(contrib_framework.get_variables()), 8)
self.assertEqual(
len(contrib_framework.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(
len(contrib_framework.get_variables('Conv_1/BatchNorm')), 3)
def testReuseConvWithBatchNorm(self):
height, width = 7, 9
with self.cached_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with contrib_framework.arg_scope([conv2d_ws.conv2d],
normalizer_fn=contrib_layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = conv2d_ws.conv2d(images, 32, [3, 3], scope='Conv')
net = conv2d_ws.conv2d(net, 32, [3, 3], scope='Conv', reuse=True)
self.assertEqual(len(contrib_framework.get_variables()), 4)
self.assertEqual(
len(contrib_framework.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(
len(contrib_framework.get_variables('Conv_1/BatchNorm')), 0)
def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self):
height, width = 7, 9
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.cached_session():
self.assertFalse(contrib_framework.get_variables('conv1/weights'))
self.assertFalse(contrib_framework.get_variables('conv1/biases'))
conv2d_ws.conv2d(images, 32, [3, 3], rate=2, scope='conv1')
self.assertTrue(contrib_framework.get_variables('conv1/weights'))
self.assertTrue(contrib_framework.get_variables('conv1/biases'))
def testOutputSizeWithRateTwoSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=2, padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 8, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoThreeValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 6, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=[2, 3], padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testDynamicOutputSizeWithRateOneValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 7, 9, num_filters]
with self.cached_session():
images = tf.placeholder(np.float32, [None, None, None, input_size[3]])
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=1, padding='VALID')
tf.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self):
if tf.test.is_gpu_available(cuda_only=True):
num_filters = 32
input_size = [5, 3, 9, 11]
expected_size = [None, num_filters, None, None]
expected_size_dynamic = [5, num_filters, 7, 9]
with self.session(use_gpu=True):
images = tf.placeholder(np.float32, [None, input_size[1], None, None])
output = conv2d_ws.conv2d(
images,
num_filters, [3, 3],
rate=1,
padding='VALID',
data_format='NCHW')
tf.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 5, 7, num_filters]
with self.cached_session():
images = tf.placeholder(np.float32, [None, None, None, input_size[3]])
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
tf.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images, num_filters, [3, 3], rate=2, padding='VALID', scope='conv7')
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = tf.random_uniform(input_size, seed=1)
output = conv2d_ws.conv2d(
images,
num_filters, [3, 3],
rate=2,
padding='VALID',
activation_fn=None,
scope='conv7')
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/BiasAdd')
self.assertListEqual(list(output.eval().shape), expected_size)
if __name__ == '__main__':
tf.test.main()
| 43.912114
| 80
| 0.672959
| 2,414
| 18,487
| 4.976388
| 0.101491
| 0.050362
| 0.047782
| 0.059935
| 0.768418
| 0.761425
| 0.750187
| 0.728544
| 0.709814
| 0.70199
| 0
| 0.041378
| 0.188186
| 18,487
| 420
| 81
| 44.016667
| 0.759062
| 0.039758
| 0
| 0.638418
| 0
| 0
| 0.037505
| 0.002143
| 0
| 0
| 0
| 0
| 0.234463
| 1
| 0.096045
| false
| 0
| 0.022599
| 0
| 0.124294
| 0.002825
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a243b4b2479bd2eab4e53207a6ff6ceba6f1ba1
| 278
|
py
|
Python
|
old/TwitterBot/login.py
|
kvandenbrande/TwitterBot
|
e4790d6d2c5ce9992cfc626d32667bbc23be77b2
|
[
"MIT"
] | 1
|
2021-01-31T03:17:28.000Z
|
2021-01-31T03:17:28.000Z
|
old/TwitterBot/login.py
|
kvandenbrande/TwitterBot
|
e4790d6d2c5ce9992cfc626d32667bbc23be77b2
|
[
"MIT"
] | null | null | null |
old/TwitterBot/login.py
|
kvandenbrande/TwitterBot
|
e4790d6d2c5ce9992cfc626d32667bbc23be77b2
|
[
"MIT"
] | 1
|
2016-07-11T10:40:59.000Z
|
2016-07-11T10:40:59.000Z
|
SCREEN_NAME = "BlaqueDev"
CONSUMER_KEY = "z4qCqfsWIEeOv9oVull5HZKfq"
CONSUMER_SECRET = "1UBLkR2NnO7NBBfdmw4DAX5dUnVDLHXwUqE3p1J2OmPt7r0YTB"
ACCESS_TOKEN = "706558809381056517-KEZd26jInvdwcqaZTNnvshpRZPj6ASi"
ACCESS_TOKEN_SECRET = "fu2yev7jNThlPskEVaR1m9iB4nPmY3P0wJkvTz5VreKMm"
| 46.333333
| 70
| 0.888489
| 17
| 278
| 14.176471
| 0.764706
| 0.091286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159696
| 0.053957
| 278
| 5
| 71
| 55.6
| 0.756654
| 0
| 0
| 0
| 0
| 0
| 0.643885
| 0.611511
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a3019421a1f9c5eecd939446ab1f1805d31c2c9
| 220
|
py
|
Python
|
backend/app/app/models/tag.py
|
smartgoo/fastapi-mongodb-realworld-example-app
|
87ea48ce3ebd67b19bf4c6a1d067dda35e8cb0eb
|
[
"MIT"
] | 1
|
2021-01-30T08:38:15.000Z
|
2021-01-30T08:38:15.000Z
|
backend/app/app/models/tag.py
|
smartgoo/fastapi-mongodb-realworld-example-app
|
87ea48ce3ebd67b19bf4c6a1d067dda35e8cb0eb
|
[
"MIT"
] | null | null | null |
backend/app/app/models/tag.py
|
smartgoo/fastapi-mongodb-realworld-example-app
|
87ea48ce3ebd67b19bf4c6a1d067dda35e8cb0eb
|
[
"MIT"
] | null | null | null |
from typing import List
from .dbmodel import DBModelMixin
from .rwmodel import RWModel
class Tag(RWModel):
tag: str
class TagInDB(DBModelMixin, Tag):
pass
class TagsList(RWModel):
tags: List[str] = []
| 12.941176
| 33
| 0.709091
| 28
| 220
| 5.571429
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204545
| 220
| 16
| 34
| 13.75
| 0.891429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.111111
| 0.333333
| 0
| 0.888889
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
6a4bf412d7ab4f035cbac8aa81d8ec944fedec52
| 492
|
py
|
Python
|
tests/test_settings.py
|
data4knowledge/RdfOgm
|
18cf0e1ab2025e0541d34350533ac731aae0f7a1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_settings.py
|
data4knowledge/RdfOgm
|
18cf0e1ab2025e0541d34350533ac731aae0f7a1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_settings.py
|
data4knowledge/RdfOgm
|
18cf0e1ab2025e0541d34350533ac731aae0f7a1
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from rdfogm.settings import Settings
def test_defaults():
settings = Settings()
assert settings.protocol == "http"
assert settings.host == "localhost"
assert settings.port == "3030"
assert settings.dataset == "test"
def test_graph():
settings = Settings()
assert settings.default_graph == "http://www.data4knowledge/graphs/test"
def test_graph():
settings = Settings()
assert settings.rdf_types['http://www.example.com/A'] == 'ModelName'
| 25.894737
| 76
| 0.695122
| 57
| 492
| 5.912281
| 0.473684
| 0.249258
| 0.195846
| 0.267062
| 0.272997
| 0.272997
| 0.272997
| 0.272997
| 0
| 0
| 0
| 0.012285
| 0.172764
| 492
| 18
| 77
| 27.333333
| 0.815725
| 0
| 0
| 0.357143
| 0
| 0
| 0.184959
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.214286
| false
| 0
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e008e9f084182cbd1332ba6d653c02b41e415b06
| 133
|
py
|
Python
|
src/player.py
|
HoloTheDrunk/damalex
|
9284c152b873dcaceff625f6f265e3cd91432936
|
[
"Unlicense"
] | null | null | null |
src/player.py
|
HoloTheDrunk/damalex
|
9284c152b873dcaceff625f6f265e3cd91432936
|
[
"Unlicense"
] | null | null | null |
src/player.py
|
HoloTheDrunk/damalex
|
9284c152b873dcaceff625f6f265e3cd91432936
|
[
"Unlicense"
] | null | null | null |
from colours import Colour
class Player:
def __init__(self, remaining: int):
self.remaining = remaining # pieces left
| 19
| 49
| 0.699248
| 16
| 133
| 5.5625
| 0.8125
| 0.292135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233083
| 133
| 6
| 50
| 22.166667
| 0.872549
| 0.082707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
e02fc60c08781e3cea7ea233818c42841925b295
| 179
|
py
|
Python
|
core/lambda/python_reduce_demo.py
|
caserwin/daily-learning-python
|
01fea4c5d4e86cbea2dbef8817146f018b5f1479
|
[
"Apache-2.0"
] | 1
|
2019-05-04T07:27:18.000Z
|
2019-05-04T07:27:18.000Z
|
core/lambda/python_reduce_demo.py
|
caserwin/daily-learning-python
|
01fea4c5d4e86cbea2dbef8817146f018b5f1479
|
[
"Apache-2.0"
] | null | null | null |
core/lambda/python_reduce_demo.py
|
caserwin/daily-learning-python
|
01fea4c5d4e86cbea2dbef8817146f018b5f1479
|
[
"Apache-2.0"
] | 1
|
2018-09-20T01:49:36.000Z
|
2018-09-20T01:49:36.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2018/7/29 下午1:38
# @Author : yidxue
print(reduce(lambda x, y: x + y, [2, 3, 4, 5, 6], 1))
print(reduce(lambda x, y: x + y, [2, 3, 4, 5, 6]))
| 29.833333
| 53
| 0.50838
| 36
| 179
| 2.527778
| 0.611111
| 0.087912
| 0.373626
| 0.395604
| 0.571429
| 0.571429
| 0.571429
| 0.571429
| 0.571429
| 0.571429
| 0
| 0.15942
| 0.22905
| 179
| 6
| 54
| 29.833333
| 0.5
| 0.374302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
e079ecebcbf332171b28cca5a245694141b4b99b
| 118
|
py
|
Python
|
backend/nocodeML/uploadapp/admin.py
|
raoashish10/Auto-ML
|
e9d0847eef212ee3059d90246c3531af683ea8b1
|
[
"MIT"
] | 3
|
2020-09-26T07:40:46.000Z
|
2021-11-16T11:40:02.000Z
|
backend/nocodeML/uploadapp/admin.py
|
raoashish10/Auto-ML
|
e9d0847eef212ee3059d90246c3531af683ea8b1
|
[
"MIT"
] | null | null | null |
backend/nocodeML/uploadapp/admin.py
|
raoashish10/Auto-ML
|
e9d0847eef212ee3059d90246c3531af683ea8b1
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import DataSet
# Register your models here.
admin.site.register(DataSet)
| 29.5
| 32
| 0.822034
| 17
| 118
| 5.705882
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110169
| 118
| 4
| 33
| 29.5
| 0.92381
| 0.220339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ece06bd656d6c98e59670d6ab8efc55b6819587
| 273
|
py
|
Python
|
app/constants/agency_features.py
|
joelbcastillo/NYCOpenRecords
|
001dfa21cc33d75a9067cae78752c5ba5734718b
|
[
"Apache-2.0"
] | 37
|
2016-01-21T18:33:56.000Z
|
2021-10-24T01:43:20.000Z
|
app/constants/agency_features.py
|
CityOfNewYork/NYCOpenRecords
|
476a236a573e6f3a2f96c6537a30ee27b2bd3a2b
|
[
"Apache-2.0"
] | 179
|
2016-01-21T21:33:31.000Z
|
2022-02-15T21:31:35.000Z
|
app/constants/agency_features.py
|
joelbcastillo/NYCOpenRecords
|
001dfa21cc33d75a9067cae78752c5ba5734718b
|
[
"Apache-2.0"
] | 13
|
2017-05-19T17:27:31.000Z
|
2020-07-05T00:55:29.000Z
|
MONITOR_AGENCY_REQUESTS = 'monitor_agency_requests'
SPECIFIC_REQUEST_INSTRUCTIONS = 'specific_request_instructions'
CUSTOM_REQUEST_FORMS = 'custom_request_forms'
AGENCY_FEATURES = [
MONITOR_AGENCY_REQUESTS,
SPECIFIC_REQUEST_INSTRUCTIONS,
CUSTOM_REQUEST_FORMS
]
| 30.333333
| 63
| 0.846154
| 29
| 273
| 7.310345
| 0.310345
| 0.183962
| 0.29717
| 0.273585
| 0.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 273
| 9
| 64
| 30.333333
| 0.865306
| 0
| 0
| 0
| 0
| 0
| 0.262774
| 0.189781
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
16148892c66fa2845be0dc8fcb6319fb532ca982
| 226
|
py
|
Python
|
attacksurfacemeter/__init__.py
|
andymeneely/attack-surface-metrics
|
9cef791a79771ee29f18a0da2159f36c3df32755
|
[
"MIT"
] | 16
|
2015-12-25T10:53:10.000Z
|
2022-02-26T08:27:55.000Z
|
attacksurfacemeter/__init__.py
|
andymeneely/attack-surface-metrics
|
9cef791a79771ee29f18a0da2159f36c3df32755
|
[
"MIT"
] | 30
|
2015-01-29T19:34:31.000Z
|
2021-06-10T17:22:57.000Z
|
attacksurfacemeter/__init__.py
|
andymeneely/attack-surface-metrics
|
9cef791a79771ee29f18a0da2159f36c3df32755
|
[
"MIT"
] | 4
|
2016-11-03T15:59:42.000Z
|
2020-10-29T17:56:59.000Z
|
__author__ = 'kevin'
# from attacksurfacemeter.call import Call
# from attacksurfacemeter.cflow_call import CflowCall
# from attacksurfacemeter.gprof_call import GprofCall
# from attacksurfacemeter.call_graph import CallGraph
| 37.666667
| 53
| 0.849558
| 25
| 226
| 7.4
| 0.48
| 0.475676
| 0.281081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10177
| 226
| 6
| 54
| 37.666667
| 0.91133
| 0.867257
| 0
| 0
| 0
| 0
| 0.192308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1619b43344c0cb256434ea6003a989948bf63da7
| 4,260
|
py
|
Python
|
tests/unit/bokeh/colors/test_hsl.py
|
Suicoleiro/bokeh
|
a212acdf091a7a4df639fa9d443be6ade0018039
|
[
"BSD-3-Clause"
] | 15,193
|
2015-01-01T05:11:45.000Z
|
2022-03-31T19:30:20.000Z
|
tests/unit/bokeh/colors/test_hsl.py
|
Suicoleiro/bokeh
|
a212acdf091a7a4df639fa9d443be6ade0018039
|
[
"BSD-3-Clause"
] | 9,554
|
2015-01-01T03:16:54.000Z
|
2022-03-31T22:59:39.000Z
|
tests/unit/bokeh/colors/test_hsl.py
|
Suicoleiro/bokeh
|
a212acdf091a7a4df639fa9d443be6ade0018039
|
[
"BSD-3-Clause"
] | 4,829
|
2015-01-02T03:35:32.000Z
|
2022-03-30T16:40:26.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh.colors import RGB
# Module under test
import bokeh.colors.hsl as bch # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_HSL:
def test_init(self) -> None:
c = bch.HSL(10, 0.2, 0.3)
assert c
assert c.a == 1.0
assert c.h == 10
assert c.s == 0.2
assert c.l == 0.3
c = bch.HSL(10, 0.2, 0.3, 0.3)
assert c
assert c.a == 0.3
assert c.h == 10
assert c.s == 0.2
assert c.l == 0.3
def test_repr(self) -> None:
c = bch.HSL(10, 0.2, 0.3)
assert repr(c) == c.to_css()
c = bch.HSL(10, 0.2, 0.3, 0.3)
assert repr(c) == c.to_css()
def test_copy(self) -> None:
c = bch.HSL(10, 0.2, 0.3)
c2 = c.copy()
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
def test_from_hsl(self) -> None:
c = bch.HSL(10, 0.2, 0.3)
c2 = bch.HSL.from_hsl(c)
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
c = bch.HSL(10, 0.2, 0.3, 0.1)
c2 = bch.HSL.from_hsl(c)
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
def test_from_rgb(self) -> None:
c = RGB(255, 100, 0)
c2 = bch.HSL.from_rgb(c)
assert c2 is not c
assert c2.a == 1
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
c = RGB(255, 100, 0, 0.1)
c2 = bch.HSL.from_rgb(c)
assert c2 is not c
assert c2.a == 0.1
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
def test_to_css(self) -> None:
c = bch.HSL(10, 0.2, 0.3)
assert c.to_css() == "hsl(10, 20.0%, 30.0%)"
c = bch.HSL(10, 0.2, 0.3, 0.3)
assert c.to_css() == "hsla(10, 20.0%, 30.0%, 0.3)"
def test_to_hsl(self) -> None:
c = bch.HSL(10, 0.2, 0.3)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
c = bch.HSL(10, 0.2, 0.3, 0.1)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == c.h
assert c2.s == c.s
assert c2.l == c.l
def test_to_rgb(self) -> None:
c = bch.HSL(10, 0.2, 0.3)
c2 = c.to_rgb()
assert c2 is not c
assert c2.a == 1.0
assert c2.r == 92
assert c2.g == 66
assert c2.b == 61
c = bch.HSL(10, 0.2, 0.3, 0.1)
c2 = c.to_rgb()
assert c2 is not c
assert c.a == 0.1
assert c2.r == 92
assert c2.g == 66
assert c2.b == 61
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| 29.37931
| 78
| 0.357746
| 572
| 4,260
| 2.611888
| 0.138112
| 0.235609
| 0.06091
| 0.078313
| 0.722222
| 0.692771
| 0.684739
| 0.674029
| 0.659304
| 0.656627
| 0
| 0.075619
| 0.279812
| 4,260
| 144
| 79
| 29.583333
| 0.411343
| 0.318075
| 0
| 0.75
| 0
| 0
| 0.016684
| 0
| 0
| 0
| 0
| 0
| 0.614583
| 1
| 0.083333
| false
| 0
| 0.041667
| 0
| 0.135417
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
164d6a0ba5e856bae118b7b6294205efa3bdb155
| 45
|
py
|
Python
|
tests/components/input_boolean/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/input_boolean/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/input_boolean/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Tests for the input_boolean component."""
| 22.5
| 44
| 0.733333
| 6
| 45
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 1
| 45
| 45
| 0.8
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
168c4d0ccd4907b4c849160a5f7765a343957813
| 66
|
py
|
Python
|
piazza_api/piazza_config.py
|
abhididdigi/Piazza-API2
|
1b7f60833745ed212e9860dd759517f64838bc66
|
[
"MIT"
] | 1
|
2018-07-07T13:40:59.000Z
|
2018-07-07T13:40:59.000Z
|
piazza_api/piazza_config.py
|
abhididdigi/Piazza-API2
|
1b7f60833745ed212e9860dd759517f64838bc66
|
[
"MIT"
] | null | null | null |
piazza_api/piazza_config.py
|
abhididdigi/Piazza-API2
|
1b7f60833745ed212e9860dd759517f64838bc66
|
[
"MIT"
] | null | null | null |
import os
piazza_session_id = os.environ.get('piazza_session_id')
| 22
| 55
| 0.818182
| 11
| 66
| 4.545455
| 0.636364
| 0.52
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 66
| 3
| 55
| 22
| 0.819672
| 0
| 0
| 0
| 0
| 0
| 0.253731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
169fa16f3fa2169c5589c49a1d13088c9e10e534
| 67
|
py
|
Python
|
unipixel/__init__.py
|
Razerfish/unipixel
|
712571afd5bf5f2fde31018e1965aac7ad238fa2
|
[
"MIT"
] | null | null | null |
unipixel/__init__.py
|
Razerfish/unipixel
|
712571afd5bf5f2fde31018e1965aac7ad238fa2
|
[
"MIT"
] | 1
|
2020-03-06T16:53:24.000Z
|
2020-03-06T16:53:24.000Z
|
unipixel/__init__.py
|
Razerfish/unipixel
|
712571afd5bf5f2fde31018e1965aac7ad238fa2
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-module-docstring
from .unipixel import *
| 22.333333
| 42
| 0.791045
| 8
| 67
| 6.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 67
| 2
| 43
| 33.5
| 0.883333
| 0.597015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16b0b6a181ded91a53729ccabb1a26990cd56b63
| 8,336
|
py
|
Python
|
lambda-archive/lambda-functions/codebreaker-problem-grader/awstools.py
|
singaporezoo/codebreaker-official
|
1fe5792f1c36f922abd0836d8dcb42d271a9323d
|
[
"MIT"
] | 11
|
2021-09-19T06:32:44.000Z
|
2022-03-14T19:09:46.000Z
|
lambda-archive/lambda-functions/codebreaker-problem-grader/awstools.py
|
singaporezoo/codebreaker-official
|
1fe5792f1c36f922abd0836d8dcb42d271a9323d
|
[
"MIT"
] | null | null | null |
lambda-archive/lambda-functions/codebreaker-problem-grader/awstools.py
|
singaporezoo/codebreaker-official
|
1fe5792f1c36f922abd0836d8dcb42d271a9323d
|
[
"MIT"
] | 1
|
2022-03-02T13:27:27.000Z
|
2022-03-02T13:27:27.000Z
|
import boto3
import json
from botocore.exceptions import ClientError
from botocore.client import Config
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource('dynamodb','ap-southeast-1')
problems_table = dynamodb.Table('codebreaker-problems')
submissions_table = dynamodb.Table('codebreaker-submissions')
users_table = dynamodb.Table('codebreaker-users')
lambda_client = boto3.client('lambda')
def getUserInfo(email):
response = users_table.query(
KeyConditionExpression = Key('email').eq(email)
)
user_info = response['Items']
if len(user_info) == 0:
newUserInfo = {
'email' : email,
'role' : 'disabled',
'username' : '',
'problemScores' : {},
'problemSubtaskScores': {},
}
users_table.put_item(Item = newUserInfo)
return getUserInfo(email)
return user_info[0]
def getUserInfoFromUsername(username):
scan_kwargs = {
'FilterExpression':Key('username').eq(username)
}
done = False
start_key = None
while not done:
if start_key:
scan_kwargs['ExclusiveStartKey']= start_key
response = users_table.scan(**scan_kwargs)
res = response.get('Items',[])
if len(res) > 0:
return res[0]
start_key = response.get('LastEvaluatedKey',None)
done = start_key is None
placeHolder = {
'email' : '',
'school':'',
'role':'',
'username':'',
'problem_scores':{},
'problem_subtask_scores':{},
}
return placeHolder
def gradeSubmission(lambda_input):
print(lambda_input)
res = lambda_client.invoke(
FunctionName = 'arn:aws:lambda:ap-southeast-1:354145626860:function:codebreaker-problem-grader-parallel',
InvocationType='RequestResponse',
Payload = json.dumps(lambda_input)
)
def updateScores(problem, username):
submissions = submissions_table.query(
IndexName = 'problemIndex4',
KeyConditionExpression = Key('problemName').eq(problem),
ProjectionExpression = 'totalScore',
FilterExpression = Attr('username').eq(username),
ScanIndexForward = False
)['Items']
if len(submissions) == 0:
return
maxScore = 0
for i in submissions:
maxScore = max(maxScore, i['totalScore'])
userInfo = getUserInfoFromUsername(username)
problemScores = userInfo['problemScores']
prevScore = 0
if problem in problemScores:
prevScore = problemScores[problem]
users_table.update_item(
Key = {'email': userInfo['email']},
UpdateExpression = f'set problemScores. #a = :s',
ExpressionAttributeValues = {':s': maxScore},
ExpressionAttributeNames = {'#a': problem}
)
if prevScore == 100 and maxScore != 100:
problems_table.update_item(
Key = {'problemName': problem},
UpdateExpression = f'set noACs = noACs - :one',
ExpressionAttributeValues = {':one':1},
)
elif prevScore != 100 and maxScore == 100:
problems_table.update_item(
Key = {'problemName': problem},
UpdateExpression = f'set noACs = noACs + :one',
ExpressionAttributeValues = {':one': 1}
)
def updateStitchedScores(problem, username):
submissions = submissions_table.query(
IndexName = 'problemIndex3',
KeyConditionExpression = Key('problemName').eq(problem),
ProjectionExpression = 'subtaskScores',
FilterExpression = Attr('username').eq(username),
ScanIndexForward = False
)['Items']
if len(submissions) == 0:
return
scores = [0] * len(submissions[0]['subtaskScores'])
for i in submissions:
for j in range(len(scores)):
scores[j] = max(scores[j], int(i['subtaskScores'][j]))
subtaskMaxScores = problems_table.query(
KeyConditionExpression = Key('problemName').eq(problem),
ProjectionExpression = 'subtaskScores'
)['Items'][0]['subtaskScores']
totalScore = 0
for i in range(len(scores)):
totalScore += scores[i] * int(subtaskMaxScores[i])
totalScore /= 100
userInfo = getUserInfoFromUsername(username)
problemScores = userInfo['problemScores']
prevScore = 0
if problem in problemScores:
prevScore = problemScores[problem]
maxScore = max(totalScore, prevScore)
if int(maxScore) == maxScore:
maxScore = int(maxScore)
else:
maxScore = round(maxScore, 2)
users_table.update_item(
Key = {'email': userInfo['email']},
UpdateExpression = f'set problemScores. #a = :s',
ExpressionAttributeValues = {':s': maxScore},
ExpressionAttributeNames = {'#a': problem}
)
if prevScore != 100 and maxScore == 100:
problems_table.update_item(
Key = {'problemName': problem},
UpdateExpression = f'set noACs = noACs + :one',
ExpressionAttributeValues = {':one': 1}
)
def uploadSubmission(submission_upload):
submissions_table.put_item(Item = submission_upload)
def updateScores(problem, username):
submissions = submissions_table.query(
IndexName = 'problemIndex4',
KeyConditionExpression = Key('problemName').eq(problem),
ProjectionExpression = 'totalScore',
FilterExpression = Attr('username').eq(username),
ScanIndexForward = False
)['Items']
if len(submissions) == 0:
return
maxScore = 0
for i in submissions:
maxScore = max(maxScore, i['totalScore'])
userInfo = getUserInfoFromUsername(username)
problemScores = userInfo['problemScores']
prevScore = 0
if problem in problemScores:
prevScore = problemScores[problem]
users_table.update_item(
Key = {'email': userInfo['email']},
UpdateExpression = f'set problemScores. #a = :s',
ExpressionAttributeValues = {':s': maxScore},
ExpressionAttributeNames = {'#a': problem}
)
if prevScore == 100 and maxScore != 100:
problems_table.update_item(
Key = {'problemName': problem},
UpdateExpression = f'set noACs = noACs - :one',
ExpressionAttributeValues = {':one':1},
)
elif prevScore != 100 and maxScore == 100:
problems_table.update_item(
Key = {'problemName': problem},
UpdateExpression = f'set noACs = noACs + :one',
ExpressionAttributeValues = {':one': 1}
)
def updateStitchedScores(problem, username):
submissions = submissions_table.query(
IndexName = 'problemIndex3',
KeyConditionExpression = Key('problemName').eq(problem),
ProjectionExpression = 'subtaskScores',
FilterExpression = Attr('username').eq(username),
ScanIndexForward = False
)['Items']
if len(submissions) == 0:
return
scores = [0] * len(submissions[0]['subtaskScores'])
for i in submissions:
for j in range(len(scores)):
scores[j] = max(scores[j], int(i['subtaskScores'][j]))
subtaskMaxScores = problems_table.query(
KeyConditionExpression = Key('problemName').eq(problem),
ProjectionExpression = 'subtaskScores'
)['Items'][0]['subtaskScores']
totalScore = 0
for i in range(len(scores)):
totalScore += scores[i] * int(subtaskMaxScores[i])
totalScore /= 100
userInfo = getUserInfoFromUsername(username)
problemScores = userInfo['problemScores']
prevScore = 0
if problem in problemScores:
prevScore = problemScores[problem]
maxScore = max(totalScore, prevScore)
if int(maxScore) == maxScore:
maxScore = int(maxScore)
else:
maxScore = round(maxScore, 2)
users_table.update_item(
Key = {'email': userInfo['email']},
UpdateExpression = f'set problemScores. #a = :s',
ExpressionAttributeValues = {':s': maxScore},
ExpressionAttributeNames = {'#a': problem}
)
if prevScore != 100 and maxScore == 100:
problems_table.update_item(
Key = {'problemName': problem},
UpdateExpression = f'set noACs = noACs + :one',
ExpressionAttributeValues = {':one': 1}
)
| 30.988848
| 113
| 0.620681
| 757
| 8,336
| 6.760898
| 0.15852
| 0.032825
| 0.029308
| 0.03517
| 0.75635
| 0.75635
| 0.75635
| 0.75635
| 0.75635
| 0.75635
| 0
| 0.015228
| 0.259477
| 8,336
| 268
| 114
| 31.104478
| 0.813867
| 0
| 0
| 0.694064
| 0
| 0.004566
| 0.140955
| 0.015835
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03653
| false
| 0
| 0.022831
| 0
| 0.09589
| 0.004566
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
16bc176efed0af37c05baa5791a8acccc038851f
| 51
|
py
|
Python
|
djmoney/tests/__init__.py
|
lpetrov/django-money
|
5febf2d759e2d311fdc89ab5bc30b2c5f111952a
|
[
"BSD-3-Clause"
] | 1
|
2019-04-27T15:11:27.000Z
|
2019-04-27T15:11:27.000Z
|
djmoney/tests/__init__.py
|
lpetrov/django-money
|
5febf2d759e2d311fdc89ab5bc30b2c5f111952a
|
[
"BSD-3-Clause"
] | null | null | null |
djmoney/tests/__init__.py
|
lpetrov/django-money
|
5febf2d759e2d311fdc89ab5bc30b2c5f111952a
|
[
"BSD-3-Clause"
] | null | null | null |
from model_tests import *
from form_tests import *
| 17
| 25
| 0.803922
| 8
| 51
| 4.875
| 0.625
| 0.564103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 51
| 2
| 26
| 25.5
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bc44c1fa3a63f8c7b68846b00a000d3296a71b5d
| 1,493
|
py
|
Python
|
public/pdfjs/test/ttx/fonttools-code/Lib/fontTools/ttLib/tables/__init__.py
|
EnricoPietrocola/CidReaderWeb-WebRTC-PDFjs-TestPlayground
|
a9628050f7f31bf1907fe365920cb584a11d1c06
|
[
"Apache-2.0"
] | 1
|
2020-12-07T14:10:03.000Z
|
2020-12-07T14:10:03.000Z
|
public/pdfjs/test/ttx/fonttools-code/Lib/fontTools/ttLib/tables/__init__.py
|
EnricoPietrocola/CidReaderWeb-WebRTC-PDFjs-TestPlayground
|
a9628050f7f31bf1907fe365920cb584a11d1c06
|
[
"Apache-2.0"
] | 9
|
2019-06-15T21:31:27.000Z
|
2021-05-08T18:55:51.000Z
|
test/ttx/fonttools-code/Lib/fontTools/ttLib/tables/__init__.py
|
VictorYu379/Invertor
|
aa9a0f293f1fbb3ff1f011b1d19affed3cf42f14
|
[
"Apache-2.0"
] | null | null | null |
# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
def _moduleFinderHint():
"""Dummy function to let modulefinder know what tables may be
dynamically imported. Generated by MetaTools/buildTableList.py.
"""
from . import B_A_S_E_
from . import C_B_D_T_
from . import C_B_L_C_
from . import C_F_F_
from . import C_O_L_R_
from . import C_P_A_L_
from . import D_S_I_G_
from . import E_B_D_T_
from . import E_B_L_C_
from . import F_F_T_M_
from . import G_D_E_F_
from . import G_M_A_P_
from . import G_P_K_G_
from . import G_P_O_S_
from . import G_S_U_B_
from . import J_S_T_F_
from . import L_T_S_H_
from . import M_A_T_H_
from . import M_E_T_A_
from . import O_S_2f_2
from . import S_I_N_G_
from . import S_V_G_
from . import T_S_I_B_
from . import T_S_I_D_
from . import T_S_I_J_
from . import T_S_I_P_
from . import T_S_I_S_
from . import T_S_I_V_
from . import T_S_I__0
from . import T_S_I__1
from . import T_S_I__2
from . import T_S_I__3
from . import T_S_I__5
from . import V_D_M_X_
from . import V_O_R_G_
from . import _c_m_a_p
from . import _c_v_t
from . import _f_p_g_m
from . import _g_a_s_p
from . import _g_l_y_f
from . import _h_d_m_x
from . import _h_e_a_d
from . import _h_h_e_a
from . import _h_m_t_x
from . import _k_e_r_n
from . import _l_o_c_a
from . import _m_a_x_p
from . import _n_a_m_e
from . import _p_o_s_t
from . import _p_r_e_p
from . import _s_b_i_x
from . import _v_h_e_a
from . import _v_m_t_x
| 25.305085
| 68
| 0.754186
| 345
| 1,493
| 2.657971
| 0.15942
| 0.577972
| 0.131952
| 0.143948
| 0.376227
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005843
| 0.197589
| 1,493
| 58
| 69
| 25.741379
| 0.759599
| 0.127261
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| true
| 0
| 0.981481
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bc52f119752d789e630c5ac6ffe66b52ae41c04c
| 19
|
py
|
Python
|
pbutils/__init__.py
|
phonybone/phonybone_utils
|
d95f226ddfc62a1d69b5ff6f53de86188fe0c8f9
|
[
"MIT"
] | null | null | null |
pbutils/__init__.py
|
phonybone/phonybone_utils
|
d95f226ddfc62a1d69b5ff6f53de86188fe0c8f9
|
[
"MIT"
] | null | null | null |
pbutils/__init__.py
|
phonybone/phonybone_utils
|
d95f226ddfc62a1d69b5ff6f53de86188fe0c8f9
|
[
"MIT"
] | null | null | null |
version = "0.1.11"
| 9.5
| 18
| 0.578947
| 4
| 19
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.157895
| 19
| 1
| 19
| 19
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bc59b926a5321b4466e7a76ee10873d868ebff54
| 139
|
py
|
Python
|
backend/corpora/dataset_processing/exceptions.py
|
BuildJet/single-cell-data-portal
|
080ad03f4745d59ade75c3480149e83bb76cf39b
|
[
"MIT"
] | 16
|
2020-05-12T23:25:51.000Z
|
2021-06-17T12:04:13.000Z
|
backend/corpora/dataset_processing/exceptions.py
|
BuildJet/single-cell-data-portal
|
080ad03f4745d59ade75c3480149e83bb76cf39b
|
[
"MIT"
] | 943
|
2020-05-11T18:03:59.000Z
|
2021-08-18T21:57:51.000Z
|
backend/corpora/dataset_processing/exceptions.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | 2
|
2021-09-07T19:04:17.000Z
|
2021-12-23T21:51:36.000Z
|
class ValidationFailed(Exception):
pass
class ProcessingFailed(Exception):
pass
class ProcessingCancelled(Exception):
pass
| 12.636364
| 37
| 0.755396
| 12
| 139
| 8.75
| 0.5
| 0.371429
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179856
| 139
| 10
| 38
| 13.9
| 0.921053
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
bc5bd0df8efa9d1774221a0219b0729da301f851
| 376
|
py
|
Python
|
Decode me/script.py
|
killua4564/2019-Crypto-CTF
|
f465601f3088222822d27f8135da39cd930c98bb
|
[
"MIT"
] | null | null | null |
Decode me/script.py
|
killua4564/2019-Crypto-CTF
|
f465601f3088222822d27f8135da39cd930c98bb
|
[
"MIT"
] | null | null | null |
Decode me/script.py
|
killua4564/2019-Crypto-CTF
|
f465601f3088222822d27f8135da39cd930c98bb
|
[
"MIT"
] | null | null | null |
import base64
s = "Habl bl max yetz: E5BIFgmsGI6pHRByMeI8L75qxRBdLsJ6EgA8tLF6JRSpue4RALPhA6X4 Xnlm wxvhwx bm :D"
t = s.maketrans("OPQRSTUVWXYZABCDEFGHIJKLMNtuvwxyzabcdefghijklmnopqrs1234567890", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz6789012345")
print(s.translate(t))
print(base64.b64decode(b'Q0NURntzSU1wTDNfYlU3X20xeDNkXzV1QnM3aXR1VDEwbl9DMXBoM1J9').decode())
| 75.2
| 147
| 0.875
| 27
| 376
| 12.185185
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134078
| 0.047872
| 376
| 5
| 148
| 75.2
| 0.784916
| 0
| 0
| 0
| 0
| 0
| 0.721485
| 0.625995
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bca7472072e4bf20e3bdfb6c4b80edb3bf3e8286
| 188
|
py
|
Python
|
ynv/properties.py
|
BitDEVil2K16-Club/Sollumz
|
9cfed42f7bba797d05f4bfa663dc5e075b8a796c
|
[
"MIT"
] | 1
|
2022-01-26T03:27:52.000Z
|
2022-01-26T03:27:52.000Z
|
ynv/properties.py
|
BitDEVil2K16-Club/Sollumz
|
9cfed42f7bba797d05f4bfa663dc5e075b8a796c
|
[
"MIT"
] | 1
|
2022-03-14T19:48:12.000Z
|
2022-03-14T19:48:12.000Z
|
ynv/properties.py
|
ZiDeveloper/Sollumz
|
e6abb71016bfcea12c9fd551374c1d0b94f945b7
|
[
"MIT"
] | 1
|
2022-01-19T03:40:27.000Z
|
2022-01-19T03:40:27.000Z
|
import bpy
def register():
bpy.types.Object.poly_flags = bpy.props.CollectionProperty(
type=bpy.props.StringProperty)
def unregister():
del bpy.types.Object.poly_flags
| 17.090909
| 63
| 0.723404
| 24
| 188
| 5.583333
| 0.583333
| 0.119403
| 0.208955
| 0.268657
| 0.343284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 188
| 10
| 64
| 18.8
| 0.858974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.166667
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bca81fb1c468148d21e4da9c85109457e458ae9d
| 26,733
|
py
|
Python
|
tests/integration_test/history_client_test.py
|
digiteinfotech/kairon
|
6a2f0a056dbfe5c041fd9e00a6f5b878e339309e
|
[
"Apache-2.0"
] | 97
|
2020-08-18T10:07:48.000Z
|
2022-03-26T18:33:37.000Z
|
tests/integration_test/history_client_test.py
|
digiteinfotech/kairon
|
6a2f0a056dbfe5c041fd9e00a6f5b878e339309e
|
[
"Apache-2.0"
] | 276
|
2020-08-27T23:24:35.000Z
|
2022-03-31T09:43:30.000Z
|
tests/integration_test/history_client_test.py
|
digiteinfotech/kairon
|
6a2f0a056dbfe5c041fd9e00a6f5b878e339309e
|
[
"Apache-2.0"
] | 46
|
2020-09-11T13:29:41.000Z
|
2022-03-08T12:27:17.000Z
|
import json
import os
import datetime
import responses
from fastapi.testclient import TestClient
from mongoengine import connect
import pytest
from kairon.api.app.main import app
from kairon.shared.account.processor import AccountProcessor
from kairon.shared.data.processor import MongoProcessor
from kairon.exceptions import AppException
from kairon.history.processor import HistoryProcessor
from kairon.shared.utils import Utility
from mongomock import MongoClient
client = TestClient(app)
def pytest_configure():
return {'token_type': None,
'access_token': None,
'bot': None
}
@pytest.fixture(autouse=True)
def setup():
os.environ["system_file"] = "./tests/testing_data/system.yaml"
Utility.load_environment()
connect(**Utility.mongoengine_connection())
def user_details(*args, **kwargs):
pytest.bot = "integration"
return {
"email": "integration@demo.com",
"password": Utility.get_password_hash("welcome@1"),
"first_name": "integration",
"last_name": "test",
"status": True,
"bot": [pytest.bot],
"account": 1,
"is_integration_user": False,
}
def bot_details(*args, **kwargs):
return {
"user": "integration@demo.com",
"status": True,
"bot": pytest.bot,
"account": 1,
}
@pytest.fixture
def mock_auth(monkeypatch):
monkeypatch.setattr(AccountProcessor, "get_user_details", user_details)
monkeypatch.setattr(AccountProcessor, "get_bot", bot_details)
def endpoint_details(*args, **kwargs):
return {"history_endpoint": {"url": "https://localhost:8083", "token": "test_token"}}
@pytest.fixture
def mock_mongo_processor(monkeypatch):
monkeypatch.setattr(MongoProcessor, "get_endpoints", endpoint_details)
@pytest.fixture
def mock_mongo_processor_endpoint_not_configured(monkeypatch):
def _mock_exception(*args, **kwargs):
raise AppException('Config not found')
monkeypatch.setattr(MongoProcessor, "get_endpoints", _mock_exception)
@pytest.fixture
def mock_db_client(monkeypatch):
def db_client(*args, **kwargs):
return MongoClient(), "conversation", None
monkeypatch.setattr(HistoryProcessor, "get_mongo_connection", db_client)
def history_users(*args, **kwargs):
return [
"5b029887-bed2-4bbb-aa25-bd12fda26244",
"b868d6ee-f98f-4c1b-b284-ce034aaad01f",
"b868d6ee-f98f-4c1b-b284-ce034aaad61f",
"b868d6ee-f98f-4c1b-b284-ce4534aaad61f",
"49931985-2b51-4db3-89d5-a50767e6d98e",
"2e409e7c-06f8-4de8-8c88-93b4cf0b7211",
"2fed7769-b647-4088-8ed9-a4f4f3653f25",
], None
def user_history(*args, **kwargs):
json_data = json.load(open("tests/testing_data/history/conversation.json"))
return (
json_data['events'],
None
)
def history_conversations(*args, **kwargs):
json_data = json.load(open("tests/testing_data/history/conversations_history.json"))
return json_data, None
@pytest.fixture
def mock_chat_history(monkeypatch):
monkeypatch.setattr(HistoryProcessor, "fetch_user_history", user_history)
monkeypatch.setattr(HistoryProcessor, "fetch_chat_users", history_users)
def test_chat_history_users_connection_error(mock_auth, mock_mongo_processor):
response = client.post(
"/api/auth/login",
data={"username": "integration@demo", "password": "welcome@1"},
)
token_response = response.json()
pytest.access_token = token_response["data"]["access_token"]
pytest.token_type = token_response["data"]["token_type"]
response = client.get(
f"/api/history/{pytest.bot}/users",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 422
assert actual["data"] is None
assert actual["message"].__contains__('Unable to connect to history server: ')
assert not actual["success"]
@responses.activate
def test_chat_history_users_kairon_client_user_endpoint(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/conversations/users",
status=200,
json={"data": {"users": history_users()[0]}},
match=[responses.json_params_matcher({'month': 1})],
)
response = client.get(
f"/api/history/{pytest.bot}/users",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
assert responses.calls[len(responses.calls) - 1].request.headers['Authorization'] == 'Bearer test_token'
actual = response.json()
assert actual["error_code"] == 0
assert len(actual["data"]["users"]) == 7
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_chat_history_users_kairon_client_kairon_endpoint(mock_auth, mock_mongo_processor_endpoint_not_configured):
responses.add(
responses.GET,
f"{Utility.environment['history_server']['url']}/api/history/{pytest.bot}/conversations/users",
status=200,
json={"data": {"users": history_users()[0]}},
match=[responses.json_params_matcher({'month': 1})],
)
response = client.get(
f"/api/history/{pytest.bot}/users",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
assert responses.calls[0].request.headers['Authorization'] == 'Bearer ' + Utility.environment['history_server']['token']
actual = response.json()
assert actual["error_code"] == 0
assert len(actual["data"]["users"]) == 7
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_chat_history_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/conversations/users/5e564fbcdcf0d5fad89e3acd",
status=200,
json={"data": {"history": history_conversations()[0]}},
match=[responses.json_params_matcher({'month': 1})],
)
response = client.get(
f"/api/history/{pytest.bot}/users/5e564fbcdcf0d5fad89e3acd",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert len(actual["data"]["history"]) == 7
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_fallback_count_range_no_nlu_fallback_rule(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/trends/fallback",
status=200,
match=[responses.json_params_matcher({'month': 6, 'action_fallback': 'action_default_fallback',
'nlu_fallback': None})],
json={"data": {'fallback_counts': {1: 25, 2: 24, 3: 28, 4: 26, 5: 20, 6: 25}}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/trend/user/fallback",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["fallback_counts"] == {'1': 25, '2': 24, '3': 28, '4': 26, '5': 20, '6': 25}
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_visitor_hit_fallback_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/fallback",
status=200,
json={"data": {'fallback_count': 10, 'total_count': 90}},
match=[responses.json_params_matcher({'month': 1, 'action_fallback': 'action_default_fallback',
'nlu_fallback': 'utter_please_rephrase'})],
)
steps = [
{"name": "nlu_fallback", "type": "INTENT"},
{"name": "utter_please_rephrase", "type": "BOT"}
]
rule = {'name': 'fallback_rule', 'steps': steps, 'type': 'RULE'}
MongoProcessor().add_complex_story(rule, pytest.bot, 'test')
response = client.get(
f"/api/history/{pytest.bot}/metrics/fallback",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["fallback_count"] == 10
assert actual["data"]["total_count"] == 90
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_conversation_steps_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/conversation/steps",
status=200,
match=[responses.json_params_matcher({'month': 1})],
json={"data": 100}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/conversation/steps",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"] == 100
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_conversation_time_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/conversation/time",
status=200,
match=[responses.json_params_matcher({'month': 1})],
json={"data": 900.5}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/conversation/time",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"] == 900.5
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_user_with_metrics_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/users",
status=200,
match=[responses.json_params_matcher({'month': 1})],
json={"data": {'users': [{'sender_id': 'test@kairon.com', 'steps': 55, 'time': 15},
{'sender_id': 'bot@kairon.com', 'steps': 20, 'time': 5}]}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/users",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["users"] == [{'sender_id': 'test@kairon.com', 'steps': 55, 'time': 15},
{'sender_id': 'bot@kairon.com', 'steps': 20, 'time': 5}]
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_engaged_users_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/users/engaged",
match=[responses.json_params_matcher({'month': 1, 'conversation_step_threshold': 10})],
status=200,
json={"data": {'engaged_users': 50}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/user/engaged",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["engaged_users"] == 50
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_new_users_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/users/new",
match=[responses.json_params_matcher({'month': 1})],
status=200,
json={"data": {'new_users': 50}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/user/new",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["new_users"] == 50
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_successful_conversation_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/conversation/success",
status=200,
match=[responses.json_params_matcher({'month': 1, 'action_fallback': 'action_default_fallback',
'nlu_fallback': 'utter_please_rephrase'})],
json={"data": {'successful_conversations': 150}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/conversation/success",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["successful_conversations"] == 150
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_user_retention_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/users/retention",
match=[responses.json_params_matcher({'month': 1})],
status=200,
json={"data": {'user_retention': 25}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/user/retention",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["user_retention"] == 25
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_engaged_user_range_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/trends/users/engaged",
status=200,
match=[responses.json_params_matcher({'month': 6, 'conversation_step_threshold': 10})],
json={"data": {'engaged_user_range': {1: 25, 2: 24, 3: 28, 4: 26, 5: 20, 6: 25}}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/trend/user/engaged",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]['engaged_user_range'] == {'1': 25, '2': 24, '3': 28, '4': 26, '5': 20, '6': 25}
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_new_user_range_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/trends/users/new",
status=200,
match=[responses.json_params_matcher({'month': 6})],
json={"data": {'new_user_range': {1: 25, 2: 24, 3: 28, 4: 26, 5: 20, 6: 25}}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/trend/user/new",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]['new_user_range'] == {'1': 25, '2': 24, '3': 28, '4': 26, '5': 20, '6': 25}
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_successful_conversation_range_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/trends/conversations/success",
status=200,
match=[responses.json_params_matcher({'month': 6, 'action_fallback': 'action_default_fallback',
'nlu_fallback': 'utter_please_rephrase'})],
json={"data": {'success_conversation_range': {1: 25, 2: 24, 3: 28, 4: 26, 5: 20, 6: 25}}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/trend/conversation/success",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["success_conversation_range"] == {'1': 25, '2': 24, '3': 28, '4': 26, '5': 20, '6': 25}
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_user_retention_range_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/trends/users/retention",
status=200,
match=[responses.json_params_matcher({'month': 6})],
json={"data": {'retention_range': {1: 25, 2: 24, 3: 28, 4: 26, 5: 20, 6: 25}}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/trend/user/retention",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["retention_range"] == {'1': 25, '2': 24, '3': 28, '4': 26, '5': 20, '6': 25}
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_engaged_users_with_value_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/users/engaged",
status=200,
match=[responses.json_params_matcher({'month': 5, 'conversation_step_threshold': 11})],
json={"data": {'engaged_users': 60}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/user/engaged?month=5&conversation_step_threshold=11",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["engaged_users"] == 60
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_engaged_user_range_with_value_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/trends/users/engaged",
status=200,
match=[responses.json_params_matcher({'month': 5, 'conversation_step_threshold': 11})],
json={"data": {'engaged_user_range': {1: 25, 2: 24, 3: 28, 4: 26, 5: 20, 6: 25}}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/trend/user/engaged/?month=5&conversation_step_threshold=11",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]['engaged_user_range'] == {'1': 25, '2': 24, '3': 28, '4': 26, '5': 20, '6': 25}
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_fallback_count_range_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/trends/fallback",
status=200,
match=[responses.json_params_matcher({'month': 6, 'action_fallback': 'action_default_fallback',
'nlu_fallback': 'utter_please_rephrase'})],
json={"data": {'fallback_counts': {1: 25, 2: 24, 3: 28, 4: 26, 5: 20, 6: 25}}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/trend/user/fallback",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["fallback_counts"] == {'1': 25, '2': 24, '3': 28, '4': 26, '5': 20, '6': 25}
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_flat_conversations_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/conversations/",
status=200,
match=[responses.json_params_matcher({'month': 1})],
json={"data": {'conversation_data': history_conversations()[0]}}
)
response = client.get(
f"/api/history/{pytest.bot}/conversations/",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert len(actual["data"]["conversation_data"]) == 7
assert actual["message"] is None
assert actual["success"]
def list_bot_mock(*args, **kwargs):
return [{'name': 'test', '_id': pytest.bot}]
@pytest.fixture
def mock_list_bots(monkeypatch):
monkeypatch.setattr(AccountProcessor, "list_bots", list_bot_mock)
@responses.activate
def test_download_conversation_with_data_with_kairon_client(mock_auth, mock_mongo_processor, mock_list_bots):
file = open('./tests/testing_data/history/conversation.json')
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/conversations/download",
status=200,
match=[responses.json_params_matcher({'month': 1})],
body=file.read(),
content_type="text/plain",
adding_headers={"Content-Disposition": "attachment; filename=conversations.csv"},
stream=True
)
response = client.get(
f"/api/history/{pytest.bot}/conversations/download",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
assert response.content.decode('utf-8')
assert f"conversation_history_test{datetime.date.today().strftime('_%d_%m_%y.csv')}" in str(response.headers)
@responses.activate
def test_download_conversation_with_error_with_kairon_client(mock_auth, mock_mongo_processor, mock_list_bots):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/conversations/download",
status=200,
match=[responses.json_params_matcher({'month': 1})],
json={'error_code': 422, 'message': "No data available!", 'success': False}
)
response = client.get(
f"/api/history/{pytest.bot}/conversations/download",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 422
assert actual["message"] == "No data available!"
assert not actual["success"]
@responses.activate
def test_total_conversation_range_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/trends/conversations/total",
status=200,
match=[responses.json_params_matcher({'month': 6})],
json={"data": {'total_conversation_range': {1: 25, 2: 24, 3: 28, 4: 26, 5: 20, 6: 25}}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/trend/conversations/total",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["total_conversation_range"] == {'1': 25, '2': 24, '3': 28, '4': 26, '5': 20, '6': 25}
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_top_intent_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/intents/topmost",
match=[responses.json_params_matcher({'month': 1, "top_n": 10})],
status=200,
json={"data": [{'_id': 'action_google_search_kanban', 'count': 43}]}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/intents/topmost",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"] == [{'_id': 'action_google_search_kanban', 'count': 43}]
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_top_action_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/metrics/actions/topmost",
match=[responses.json_params_matcher({'month': 1, "top_n": 10})],
status=200,
json={"data": [{'_id': 'nlu_fallback', 'count': 32}]}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/actions/topmost",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"] == [{'_id': 'nlu_fallback', 'count': 32}]
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_conversation_step_range_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/trends/conversations/steps",
status=200,
match=[responses.json_params_matcher({'month': 6})],
json={"data": {'total_conversation_range': {1: 25, 2: 24, 3: 28, 4: 26, 5: 20, 6: 25}}}
)
response = client.get(
f"/api/history/{pytest.bot}/metrics/trend/conversations/steps",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["error_code"] == 0
assert actual["data"]["total_conversation_range"] == {'1': 25, '2': 24, '3': 28, '4': 26, '5': 20, '6': 25}
assert actual["message"] is None
assert actual["success"]
@responses.activate
def test_wordcloud_with_kairon_client(mock_auth, mock_mongo_processor):
responses.add(
responses.GET,
f"https://localhost:8083/api/history/{pytest.bot}/conversations/wordcloud",
status=200,
match=[responses.json_params_matcher({'month': 1, 'l_bound': 0, 'u_bound': 1, 'stopword_list': None})],
json={"data": [{'_id': 'nlu_fallback', 'count': 32}]}
)
response = client.get(
f"/api/history/{pytest.bot}/conversations/wordcloud",
headers={"Authorization": pytest.token_type + " " + pytest.access_token},
)
actual = response.json()
assert actual["data"] == [{'_id': 'nlu_fallback', 'count': 32}]
| 35.549202
| 124
| 0.65017
| 3,185
| 26,733
| 5.267504
| 0.078493
| 0.070811
| 0.052453
| 0.062288
| 0.796209
| 0.785421
| 0.774811
| 0.757644
| 0.748167
| 0.730941
| 0
| 0.038503
| 0.193618
| 26,733
| 751
| 125
| 35.596538
| 0.73976
| 0
| 0
| 0.563427
| 0
| 0.003295
| 0.278366
| 0.097939
| 0
| 0
| 0
| 0
| 0.179572
| 1
| 0.074135
| false
| 0.003295
| 0.023064
| 0.009885
| 0.112026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bcc801bcd4fc882d63633651842fcdc28626811c
| 472
|
py
|
Python
|
models/wrf_hydro/hydro_dart_py/hydrodartpy/__init__.py
|
fairaque1999/DART
|
7490f75cf9800cc841b66d87840ad96c5751b809
|
[
"Apache-2.0"
] | 65
|
2019-10-16T13:31:06.000Z
|
2022-03-14T11:52:58.000Z
|
models/wrf_hydro/hydro_dart_py/hydrodartpy/__init__.py
|
fairaque1999/DART
|
7490f75cf9800cc841b66d87840ad96c5751b809
|
[
"Apache-2.0"
] | 283
|
2019-09-23T15:48:34.000Z
|
2022-03-31T21:44:41.000Z
|
models/wrf_hydro/hydro_dart_py/hydrodartpy/__init__.py
|
fairaque1999/DART
|
7490f75cf9800cc841b66d87840ad96c5751b809
|
[
"Apache-2.0"
] | 67
|
2019-09-19T22:13:24.000Z
|
2022-03-20T15:58:26.000Z
|
from .core import dartclasses
from .core import setup_dart
from .core.get_ensemble_time import *
from .core.setup_experiment import *
from .core.advance_ensemble import advance_ensemble
from .core.run_filter_experiment import run_filter_experiment
from .core import setup_experiment_tools
from .core import setup_initial_ens
from .core import setup_obs_prep
from .core import setup_wrf_hydro
from .core import setup_wrf_hydro_ens
from .core import setup_wrf_hydro_ens_job
| 36.307692
| 61
| 0.860169
| 75
| 472
| 5.08
| 0.28
| 0.251969
| 0.293963
| 0.349081
| 0.286089
| 0.228346
| 0.15748
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 472
| 12
| 62
| 39.333333
| 0.898585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bce4c9d160dfa78e5ec0c9a37b4a14bf10d125b8
| 42
|
py
|
Python
|
soft_prompts/model/__init__.py
|
hiaoxui/soft-prompts
|
214dbedf735fe1c98ab2be3a26066d50ff0a86d8
|
[
"Apache-2.0"
] | 48
|
2021-04-30T15:43:59.000Z
|
2022-03-31T21:57:31.000Z
|
soft_prompts/model/__init__.py
|
hiaoxui/soft-prompts
|
214dbedf735fe1c98ab2be3a26066d50ff0a86d8
|
[
"Apache-2.0"
] | 1
|
2021-07-15T05:06:14.000Z
|
2021-07-15T05:06:14.000Z
|
soft_prompts/model/__init__.py
|
hiaoxui/soft-prompts
|
214dbedf735fe1c98ab2be3a26066d50ff0a86d8
|
[
"Apache-2.0"
] | 6
|
2021-06-17T11:00:22.000Z
|
2022-03-26T11:26:49.000Z
|
from .relaxed_pattern import PatternModel
| 21
| 41
| 0.880952
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bce4ebaaa6746bb46595d567b4ebfbdbbdec7d9d
| 177
|
py
|
Python
|
lib/JumpScale/baselib/github/__init__.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | null | null | null |
lib/JumpScale/baselib/github/__init__.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 4
|
2016-08-25T12:08:39.000Z
|
2018-04-12T12:36:01.000Z
|
lib/JumpScale/baselib/github/__init__.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 3
|
2016-03-08T07:49:34.000Z
|
2018-10-19T13:56:43.000Z
|
from JumpScale import j
def cb():
from .github import GitHubFactory
return GitHubFactory()
j.base.loader.makeAvailable(j, 'clients')
j.clients._register('github', cb)
| 19.666667
| 41
| 0.734463
| 23
| 177
| 5.608696
| 0.608696
| 0.124031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146893
| 177
| 8
| 42
| 22.125
| 0.854305
| 0
| 0
| 0
| 0
| 0
| 0.073446
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4c1819ce86363dd3e6403801e95633a1da295828
| 190
|
py
|
Python
|
git_t5/data/__init__.py
|
formermagic/git-t5
|
5ee27d1be72988986f03659bd79fc2f9680e5372
|
[
"MIT"
] | 1
|
2021-07-05T18:40:35.000Z
|
2021-07-05T18:40:35.000Z
|
git_t5/data/__init__.py
|
formermagic/git-t5
|
5ee27d1be72988986f03659bd79fc2f9680e5372
|
[
"MIT"
] | null | null | null |
git_t5/data/__init__.py
|
formermagic/git-t5
|
5ee27d1be72988986f03659bd79fc2f9680e5372
|
[
"MIT"
] | null | null | null |
from .data_collator import DataCollatorForT5MLM
from .data_preprocessing import compute_input_and_target_lengths, prepare_dataset
from .dataset import Dataset, T5Dataset, T5MultitaskDataset
| 47.5
| 81
| 0.889474
| 22
| 190
| 7.363636
| 0.681818
| 0.098765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017143
| 0.078947
| 190
| 3
| 82
| 63.333333
| 0.908571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4c419b325aa97b19ca46f3f8c5f9a90bc7e55f1a
| 53
|
py
|
Python
|
src/pkgs/ui/__init__.py
|
Electronya/rc-mission-command
|
c801cde1a68a5c113cee56747b0f0709b1b92e2f
|
[
"MIT"
] | null | null | null |
src/pkgs/ui/__init__.py
|
Electronya/rc-mission-command
|
c801cde1a68a5c113cee56747b0f0709b1b92e2f
|
[
"MIT"
] | 6
|
2021-09-06T18:45:48.000Z
|
2021-12-17T01:30:30.000Z
|
src/pkgs/ui/__init__.py
|
Electronya/rc-mission-commander
|
c801cde1a68a5c113cee56747b0f0709b1b92e2f
|
[
"MIT"
] | null | null | null |
from .appComposer import AppComposer # noqa: F401
| 26.5
| 52
| 0.754717
| 6
| 53
| 6.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 0.188679
| 53
| 1
| 53
| 53
| 0.860465
| 0.188679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4c49198882852efba55cda4d365c8f9d61e540b0
| 16
|
py
|
Python
|
files/general_overview/loading/__init__.py
|
mipt-npm-study/sciprog-python
|
09a0d99254fcd559ec8d13bedd5521e98e86b3ce
|
[
"MIT"
] | 3
|
2021-09-12T20:54:08.000Z
|
2021-09-21T14:49:15.000Z
|
files/general_overview/loading/__init__.py
|
mipt-npm-study/sciprog-python
|
09a0d99254fcd559ec8d13bedd5521e98e86b3ce
|
[
"MIT"
] | null | null | null |
files/general_overview/loading/__init__.py
|
mipt-npm-study/sciprog-python
|
09a0d99254fcd559ec8d13bedd5521e98e86b3ce
|
[
"MIT"
] | 2
|
2021-09-14T13:15:41.000Z
|
2021-09-14T15:18:01.000Z
|
print("loading")
| 16
| 16
| 0.75
| 2
| 16
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 1
| 16
| 16
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
4c76484694dada03172dd351e9fee823072fb12d
| 124
|
py
|
Python
|
gnss/__init__.py
|
swift-nav/snavutils
|
bc1ff7c68c83f3fc0cf014de0bcf313b4c803a42
|
[
"MIT"
] | 16
|
2017-09-26T22:58:43.000Z
|
2021-11-26T15:18:03.000Z
|
gnss/__init__.py
|
swift-nav/snavutils
|
bc1ff7c68c83f3fc0cf014de0bcf313b4c803a42
|
[
"MIT"
] | 1
|
2021-04-19T19:19:56.000Z
|
2021-04-19T19:19:56.000Z
|
gnss/__init__.py
|
swift-nav/snavutils
|
bc1ff7c68c83f3fc0cf014de0bcf313b4c803a42
|
[
"MIT"
] | 12
|
2017-12-04T20:18:32.000Z
|
2021-05-28T17:57:59.000Z
|
from .coord_system import llh_from_ecef, ecef_from_llh
from .gps_time import gps_format_to_datetime, datetime_to_gps_format
| 41.333333
| 68
| 0.887097
| 22
| 124
| 4.454545
| 0.5
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080645
| 124
| 2
| 69
| 62
| 0.859649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9106f2c014f1dbea2aa50493272e1d64b767ab1f
| 227
|
py
|
Python
|
pyaz/search/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/search/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/search/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
Manage Azure Search services, admin keys and query keys.
'''
from .. pyaz_utils import _call_az
from . import admin_key, private_endpoint_connection, private_link_resource, query_key, service, shared_private_link_resource
| 32.428571
| 125
| 0.814978
| 32
| 227
| 5.40625
| 0.6875
| 0.127168
| 0.219653
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114537
| 227
| 6
| 126
| 37.833333
| 0.860697
| 0.246696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
911228776358e0c6c1605872db478dedcc8e7641
| 1,583
|
py
|
Python
|
tests/test_tox_pipenv-install.py
|
ermakov-oleg/tox-pipenv-install
|
669c2cbe2eae86bc018ed498c13ecde3d356fd90
|
[
"MIT"
] | 5
|
2019-07-18T08:50:28.000Z
|
2021-03-19T00:09:46.000Z
|
tests/test_tox_pipenv-install.py
|
ermakov-oleg/tox-pipenv-install
|
669c2cbe2eae86bc018ed498c13ecde3d356fd90
|
[
"MIT"
] | 1
|
2019-07-08T07:53:33.000Z
|
2019-07-08T07:53:33.000Z
|
tests/test_tox_pipenv-install.py
|
ermakov-oleg/tox-pipenv-install
|
669c2cbe2eae86bc018ed498c13ecde3d356fd90
|
[
"MIT"
] | 1
|
2019-07-08T07:41:34.000Z
|
2019-07-08T07:41:34.000Z
|
# coding: utf-8
from tox.venv import tox_testenv_create
from tox_pipenv_install.plugin import tox_testenv_install_deps
def test_install_deps_indexserver__with_deps(newmocksession):
mocksession = newmocksession(
[],
"""\
[tox]
[testenv:py123]
deps=
dep1
""",
)
venv = mocksession.getvenv("py123")
with mocksession.newaction(venv.name, "getenv") as action:
tox_testenv_create(action=action, venv=venv)
pcalls = mocksession._pcalls
assert len(pcalls) == 1
pcalls[:] = []
tox_testenv_install_deps(action=action, venv=venv)
assert len(pcalls) == 2
args = " ".join(pcalls[0].args)
assert args.endswith('-m pip install dep1 pipenv')
args = " ".join(pcalls[1].args)
assert args.endswith('-m pipenv install --dev')
def test_install_deps_indexserver__without_deps(newmocksession):
mocksession = newmocksession(
[],
"""\
[tox]
[testenv:py123]
""",
)
venv = mocksession.getvenv("py123")
with mocksession.newaction(venv.name, "getenv") as action:
tox_testenv_create(action=action, venv=venv)
pcalls = mocksession._pcalls
assert len(pcalls) == 1
pcalls[:] = []
tox_testenv_install_deps(action=action, venv=venv)
assert len(pcalls) == 2
args = " ".join(pcalls[0].args)
assert args.endswith('-m pip install pipenv')
args = " ".join(pcalls[1].args)
assert args.endswith('-m pipenv install --dev')
| 27.77193
| 64
| 0.607707
| 175
| 1,583
| 5.32
| 0.228571
| 0.085929
| 0.068743
| 0.085929
| 0.87218
| 0.809882
| 0.809882
| 0.685285
| 0.685285
| 0.685285
| 0
| 0.019896
| 0.269741
| 1,583
| 56
| 65
| 28.267857
| 0.785467
| 0.008212
| 0
| 0.722222
| 0
| 0
| 0.083275
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
91168953391f92024ce5fcabdc89553aa1cb41b3
| 136
|
py
|
Python
|
examples/gallery/_gen_images.py
|
rkube/mpl-interactions
|
b08e3933dcd60db43551c4c6b90055d64566b56e
|
[
"BSD-3-Clause"
] | 67
|
2020-08-09T16:41:32.000Z
|
2022-03-31T20:46:20.000Z
|
examples/gallery/_gen_images.py
|
rkube/mpl-interactions
|
b08e3933dcd60db43551c4c6b90055d64566b56e
|
[
"BSD-3-Clause"
] | 172
|
2020-08-04T00:31:19.000Z
|
2022-03-17T19:19:03.000Z
|
examples/gallery/_gen_images.py
|
rkube/mpl-interactions
|
b08e3933dcd60db43551c4c6b90055d64566b56e
|
[
"BSD-3-Clause"
] | 17
|
2020-08-06T17:26:01.000Z
|
2022-01-04T23:46:01.000Z
|
from mpl_playback.record import record_file
# record_file("heatmap_slicer.py", "fig")
record_file("mpl-sliders-same-figure.py", "fig")
| 27.2
| 48
| 0.772059
| 21
| 136
| 4.761905
| 0.619048
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 136
| 4
| 49
| 34
| 0.793651
| 0.286765
| 0
| 0
| 0
| 0
| 0.305263
| 0.273684
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
91309486287ffa069e828fe3844606d409ba4276
| 178
|
py
|
Python
|
examples/components/spread_props/test_spread_props.py
|
abilian/viewdom
|
9ceed007e67606c9a0125633132b4af3fdaf8680
|
[
"MIT"
] | 3
|
2020-06-19T21:10:00.000Z
|
2021-02-22T12:34:17.000Z
|
examples/components/spread_props/test_spread_props.py
|
abilian/viewdom
|
9ceed007e67606c9a0125633132b4af3fdaf8680
|
[
"MIT"
] | 32
|
2020-05-22T22:15:50.000Z
|
2022-03-31T02:24:21.000Z
|
examples/components/spread_props/test_spread_props.py
|
abilian/viewdom
|
9ceed007e67606c9a0125633132b4af3fdaf8680
|
[
"MIT"
] | 2
|
2020-05-22T20:18:09.000Z
|
2022-01-08T15:31:55.000Z
|
"""Test an example."""
from . import main
def test_main() -> None:
"""Ensure the demo matches expected."""
assert main() == '<div title="My Title" id="d1">Hello</div>'
| 22.25
| 64
| 0.606742
| 25
| 178
| 4.28
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006897
| 0.185393
| 178
| 7
| 65
| 25.428571
| 0.731034
| 0.280899
| 0
| 0
| 0
| 0
| 0.350427
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e66fff1ec8acf60949e71d4563f7d3ef26cf2951
| 30
|
py
|
Python
|
mak/build_framework/configure/host/linux.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | 4
|
2015-05-13T16:28:36.000Z
|
2017-05-24T15:34:14.000Z
|
mak/build_framework/configure/host/linux.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | null | null | null |
mak/build_framework/configure/host/linux.py
|
motor-dev/Motor
|
98cb099fe1c2d31e455ed868cc2a25eae51e79f0
|
[
"BSD-3-Clause"
] | 1
|
2017-03-21T08:28:07.000Z
|
2017-03-21T08:28:07.000Z
|
def configure(conf):
pass
| 10
| 20
| 0.666667
| 4
| 30
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 30
| 2
| 21
| 15
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
e67315fbc7b326be9f8fc02af37b4606505f8c49
| 287
|
py
|
Python
|
cms_articles/models/__init__.py
|
execut/django-cms-articles
|
55e5879269bb180928314312d376ceb9dd49154f
|
[
"BSD-3-Clause"
] | 9
|
2016-04-18T15:59:50.000Z
|
2019-09-12T07:11:15.000Z
|
cms_articles/models/__init__.py
|
execut/django-cms-articles
|
55e5879269bb180928314312d376ceb9dd49154f
|
[
"BSD-3-Clause"
] | 6
|
2019-01-22T17:53:48.000Z
|
2020-07-19T17:35:31.000Z
|
cms_articles/models/__init__.py
|
execut/django-cms-articles
|
55e5879269bb180928314312d376ceb9dd49154f
|
[
"BSD-3-Clause"
] | 4
|
2017-02-10T17:19:30.000Z
|
2020-02-02T16:58:20.000Z
|
from .article import Article
from .attribute import Attribute
from .category import Category
from .plugins import ArticlePlugin, ArticlesCategoryPlugin, ArticlesPlugin
from .title import Title
(Category, Article, Title, Attribute, ArticlePlugin, ArticlesPlugin, ArticlesCategoryPlugin)
| 35.875
| 92
| 0.843206
| 29
| 287
| 8.344828
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10453
| 287
| 7
| 93
| 41
| 0.941634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e6b56d914513e4b3037f8049b84067c644cd368e
| 2,431
|
py
|
Python
|
QRDecomposition.py
|
igortakeo/Calculo-Numerico
|
96ed1892c3d2ba80039f2f2ce7de4ca834aa6283
|
[
"MIT"
] | null | null | null |
QRDecomposition.py
|
igortakeo/Calculo-Numerico
|
96ed1892c3d2ba80039f2f2ce7de4ca834aa6283
|
[
"MIT"
] | null | null | null |
QRDecomposition.py
|
igortakeo/Calculo-Numerico
|
96ed1892c3d2ba80039f2f2ce7de4ca834aa6283
|
[
"MIT"
] | null | null | null |
import numpy as np
def QRDecomposition(A):
n = np.shape(A)[0] #pegando o tamanho das linhas de A
m = np.shape(A)[1] #pegando o tamanho das colunas de A
Q = np.zeros((n,m)) #declarando a matriz Q
R = np.zeros((m,m)) #declarando a matriz R
for j in range(0, m):
A_column = A[:, j] #pegando as colunas da matriz A
V = np.zeros(n) #declarando o vetor V
V = A_column #V igual a coluna j de A
for i in range (0, j):
R[i,j] = Q[:,i].dot(A_column) #fazendo o calculo do R[i,j] = coluna i de Q * coluna j de A ( i != j)
V -= (Q[:,i].dot(A_column))*Q[:,i] #fazendo o calculo de V
R[j,j] = np.linalg.norm(V) # R[j,j] = norma da coluna j de A (i == j)
Q[:,j] = V/np.linalg.norm(V) #normalizando V e atribuindo a coluna j de Q
return Q, R
def QRDecompositionModificada(A):
n = np.shape(A)[0] #pegando o tamanho das linhas de A
m = np.shape(A)[1] #pegando o tamanho das colunas de A
Q = np.zeros((n,m)) #declarando a matriz Q
R = np.zeros((m,m)) #declarando a matriz R
V = np.copy(A) #copiando A para V
for j in range(0, m):
for i in range (0, j):
R[i,j] = Q[:,i].dot(V[:,j]) #fazendo o calculo do R[i,j] = coluna i de Q * coluna j de V ( i != j)
V[:,j] -= (Q[:,i].dot(V[:,j]))*Q[:,i] #fazendo o calculo de V
R[j,j] = np.linalg.norm(V[:,j]) # R[j,j] = norma da coluna j de V (i == j)
Q[:,j] = V[:,j]/np.linalg.norm(V[:,j]) #normalizando V e atribuindo a coluna j de Q
return Q, R
A = np.array([[1,2],[1,3],[-2,0]], dtype='double')
B = np.array([[3,1], [4,-1]], dtype = 'double')
print('Decomposicao QR classica\n')
(Q, R) = QRDecomposition(A)
print('{}\n\n{}\n\n{}'.format(Q, R, Q.dot(R)))
print('\n')
(Q, R) = QRDecomposition(B)
print('{}\n\n{}\n\n{}'.format(Q, R, Q.dot(R)))
print('\n\n')
A = np.array([[1,2],[1,3],[-2,0]], dtype='double')
B = np.array([[3,1], [4,-1]], dtype = 'double')
print('Decomposicao QR do Python\n')
(Q_python,R_python) = np.linalg.qr(A)
print('{}\n\n{}\n\n{}'.format(Q_python, R_python, Q_python.dot(R_python)))
print('\n')
(Q_python,R_python) = np.linalg.qr(B)
print('{}\n\n{}\n\n{}'.format(Q_python, R_python, Q_python.dot(R_python)))
print('\n\n')
A = np.array([[1,2],[1,3],[-2,0]], dtype='double')
B = np.array([[3,1], [4,-1]], dtype = 'double')
print('Decomposicao QR modificada\n')
(Q, R) = QRDecompositionModificada(A)
print('{}\n\n{}'.format(Q, R))
print('\n')
(Q, R) = QRDecomposition(B)
print('{}\n\n{}'.format(Q, R))
| 30.012346
| 103
| 0.587413
| 492
| 2,431
| 2.869919
| 0.130081
| 0.022663
| 0.03966
| 0.038244
| 0.818697
| 0.800992
| 0.740085
| 0.740085
| 0.676346
| 0.676346
| 0
| 0.018962
| 0.175648
| 2,431
| 80
| 104
| 30.3875
| 0.685629
| 0.274784
| 0
| 0.6
| 0
| 0
| 0.116667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.018182
| 0
| 0.090909
| 0.254545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e6ee450d10d657c1e859f2521f574db40952eea1
| 167
|
py
|
Python
|
src/dogpile_backend_redis_advanced/_compat.py
|
jvanasco/dogpile_redis_advanced
|
1203e0ae95695ebc991c83261476e31e5dbd5f81
|
[
"BSD-3-Clause"
] | 9
|
2018-03-18T18:06:51.000Z
|
2021-08-18T07:08:43.000Z
|
src/dogpile_backend_redis_advanced/_compat.py
|
jvanasco/dogpile_redis_advanced
|
1203e0ae95695ebc991c83261476e31e5dbd5f81
|
[
"BSD-3-Clause"
] | null | null | null |
src/dogpile_backend_redis_advanced/_compat.py
|
jvanasco/dogpile_redis_advanced
|
1203e0ae95695ebc991c83261476e31e5dbd5f81
|
[
"BSD-3-Clause"
] | 1
|
2021-04-30T09:30:53.000Z
|
2021-04-30T09:30:53.000Z
|
from six import PY3
from six.moves import cPickle as pickle
if PY3:
def u(s):
return s
else:
def u(s):
return unicode(s, "utf-8") # noqa
| 11.928571
| 42
| 0.580838
| 28
| 167
| 3.464286
| 0.642857
| 0.14433
| 0.103093
| 0.226804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026549
| 0.323353
| 167
| 13
| 43
| 12.846154
| 0.831858
| 0.023952
| 0
| 0.25
| 0
| 0
| 0.031056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
fc142241a24a00bfe2f8c486482f05ead5de6dbb
| 99
|
py
|
Python
|
commons/Metaclass/__init__.py
|
swxs/home
|
652dab7c15ae9c2221c99405ce827be6ccaaccbb
|
[
"Apache-2.0"
] | 1
|
2021-09-23T10:28:34.000Z
|
2021-09-23T10:28:34.000Z
|
commons/Metaclass/__init__.py
|
swxs/home
|
652dab7c15ae9c2221c99405ce827be6ccaaccbb
|
[
"Apache-2.0"
] | null | null | null |
commons/Metaclass/__init__.py
|
swxs/home
|
652dab7c15ae9c2221c99405ce827be6ccaaccbb
|
[
"Apache-2.0"
] | 1
|
2021-09-23T10:28:36.000Z
|
2021-09-23T10:28:36.000Z
|
# -*- coding: utf-8 -*-
# @File : __init__.py.py
# @AUTH : swxs
# @Time : 2018/7/14 15:24
| 19.8
| 28
| 0.494949
| 15
| 99
| 3
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0.272727
| 99
| 4
| 29
| 24.75
| 0.458333
| 0.909091
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc53247465acd02e79c76a5a71f308fa8020aa8f
| 41
|
py
|
Python
|
autobot/version/__init__.py
|
andreacioni/AutoBot
|
6cfcaf3a36adf6ba15c93f517fbc08ac9f93b389
|
[
"BSD-3-Clause"
] | 1
|
2019-05-16T10:08:35.000Z
|
2019-05-16T10:08:35.000Z
|
autobot/version/__init__.py
|
andreacioni/AutoBot
|
6cfcaf3a36adf6ba15c93f517fbc08ac9f93b389
|
[
"BSD-3-Clause"
] | null | null | null |
autobot/version/__init__.py
|
andreacioni/AutoBot
|
6cfcaf3a36adf6ba15c93f517fbc08ac9f93b389
|
[
"BSD-3-Clause"
] | 2
|
2018-12-24T23:51:28.000Z
|
2019-05-16T15:53:56.000Z
|
from .version import name, version as ver
| 41
| 41
| 0.804878
| 7
| 41
| 4.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 41
| 1
| 41
| 41
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fc6e07056afbbfe74c1514428e320de0312467f1
| 12
|
py
|
Python
|
config.py
|
lowcloudnine/tornado_static_skeleton
|
4a66a8b6a13e1ba30538b93fea2490f077c40046
|
[
"BSD-2-Clause"
] | null | null | null |
config.py
|
lowcloudnine/tornado_static_skeleton
|
4a66a8b6a13e1ba30538b93fea2490f077c40046
|
[
"BSD-2-Clause"
] | null | null | null |
config.py
|
lowcloudnine/tornado_static_skeleton
|
4a66a8b6a13e1ba30538b93fea2490f077c40046
|
[
"BSD-2-Clause"
] | null | null | null |
port = 8000
| 6
| 11
| 0.666667
| 2
| 12
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0.25
| 12
| 1
| 12
| 12
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc727b72b71a306a323717be4baef6c03fb8a5b9
| 176
|
py
|
Python
|
WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/sysconfig/sysconfig_get_platform.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/sysconfig/sysconfig_get_platform.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/sysconfig/sysconfig_get_platform.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Platform specifier for binary modules
"""
# end_pymotw_header
import sysconfig
print(sysconfig.get_platform())
| 17.6
| 57
| 0.761364
| 22
| 176
| 5.954545
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.136364
| 176
| 9
| 58
| 19.555556
| 0.835526
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
fc8aa8edfa3d5e34a850f920a29147c61178f9e3
| 220
|
py
|
Python
|
cp01/01.py
|
foxleoly/py_for_kids
|
c2d476bafab323732fcdfb3e4928e8acc1c8a080
|
[
"MIT"
] | null | null | null |
cp01/01.py
|
foxleoly/py_for_kids
|
c2d476bafab323732fcdfb3e4928e8acc1c8a080
|
[
"MIT"
] | null | null | null |
cp01/01.py
|
foxleoly/py_for_kids
|
c2d476bafab323732fcdfb3e4928e8acc1c8a080
|
[
"MIT"
] | null | null | null |
import sys
print(sys.platform)
print(sys.version)
print('1. Hello {0}'.format(sys.platform))
print('1.1 Hello python {ver}'.format(ver=sys.version))
print(f'2. Hello {sys.platform}')
print('3. Hello %s' % sys.platform)
| 24.444444
| 55
| 0.704545
| 37
| 220
| 4.189189
| 0.405405
| 0.283871
| 0.309677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03
| 0.090909
| 220
| 8
| 56
| 27.5
| 0.745
| 0
| 0
| 0
| 0
| 0
| 0.309091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.142857
| 0
| 0.142857
| 0.857143
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
fc8c673582e50571efe53dfdc6eba37bec9d3a60
| 186
|
py
|
Python
|
takco/cluster/__init__.py
|
karmaresearch/takco
|
7c68d6934d721a84302e46ed29faec050065b9e0
|
[
"MIT"
] | 16
|
2019-07-27T07:19:54.000Z
|
2022-03-15T16:28:41.000Z
|
takco/cluster/__init__.py
|
karmaresearch/takco
|
7c68d6934d721a84302e46ed29faec050065b9e0
|
[
"MIT"
] | 2
|
2020-07-09T23:24:09.000Z
|
2021-06-01T16:19:40.000Z
|
takco/cluster/__init__.py
|
karmaresearch/takco
|
7c68d6934d721a84302e46ed29faec050065b9e0
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import logging as log
from .headerunions import combine_by_first_header, table_get_headerId
from .context import tables_add_context_rows
from .cluster import *
| 26.571429
| 69
| 0.854839
| 28
| 186
| 5.392857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11828
| 186
| 6
| 70
| 31
| 0.920732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fc9a222f70e300f7ca63cdf27ec2ec9bee9de747
| 230
|
py
|
Python
|
Chapter 05/Games/Cricket/othercountrybowler.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 05/Games/Cricket/othercountrybowler.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 05/Games/Cricket/othercountrybowler.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
#Cricket Package --- othercountrybowler module
def name_othercountrybowler():
'''Other Country Bowler Names are'''
print("Other Country Bowler Function")
print("Bowler1: Mr. G")
print("Bowler2: Mr. H")
print()
| 28.75
| 46
| 0.678261
| 26
| 230
| 5.961538
| 0.692308
| 0.154839
| 0.232258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010695
| 0.186957
| 230
| 8
| 47
| 28.75
| 0.818182
| 0.330435
| 0
| 0
| 0
| 0
| 0.38255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0
| 0
| 0.2
| 0.8
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
5d8408a56b0d61f8204f68446d6a53e8aa087127
| 123
|
py
|
Python
|
Musofirlar.Website/src/embassy/admin.py
|
SanjarbekSaminjonov/Musofirlar.uz
|
aab647e06c2f06979408d8f2d2a78758a8b3c65c
|
[
"Apache-2.0"
] | 3
|
2021-12-22T09:01:50.000Z
|
2021-12-23T18:29:12.000Z
|
Musofirlar.Website/src/embassy/admin.py
|
SanjarbekSaminjonov/Musofirlar.uz
|
aab647e06c2f06979408d8f2d2a78758a8b3c65c
|
[
"Apache-2.0"
] | null | null | null |
Musofirlar.Website/src/embassy/admin.py
|
SanjarbekSaminjonov/Musofirlar.uz
|
aab647e06c2f06979408d8f2d2a78758a8b3c65c
|
[
"Apache-2.0"
] | 1
|
2021-12-28T06:15:33.000Z
|
2021-12-28T06:15:33.000Z
|
from django.contrib import admin
from .models import Embassy
# Register your models here.
admin.site.register(Embassy)
| 13.666667
| 32
| 0.788618
| 17
| 123
| 5.705882
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 123
| 8
| 33
| 15.375
| 0.92381
| 0.211382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d84b37d7ef229fd1127ec405d590ea9a7694eab
| 77
|
py
|
Python
|
python/testData/inspections/ConvertSingleQuotedDocstring_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/ConvertSingleQuotedDocstring_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/ConvertSingleQuotedDocstring_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def foo():
"""foo first line docstring
second line of docstring"""
pass
| 19.25
| 29
| 0.675325
| 11
| 77
| 4.727273
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207792
| 77
| 4
| 30
| 19.25
| 0.852459
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5d8713bc368d04a912b1c3b9d4d2994131feb3a2
| 1,596
|
py
|
Python
|
tests/test__sensor.py
|
timothygebhard/hswfs
|
8442314674c028b7b5833c278c5d455901fa38b0
|
[
"MIT"
] | 6
|
2020-05-14T05:43:23.000Z
|
2022-01-12T06:41:24.000Z
|
tests/test__sensor.py
|
timothygebhard/hswfs
|
8442314674c028b7b5833c278c5d455901fa38b0
|
[
"MIT"
] | null | null | null |
tests/test__sensor.py
|
timothygebhard/hswfs
|
8442314674c028b7b5833c278c5d455901fa38b0
|
[
"MIT"
] | null | null | null |
"""
Unit tests for functions in sensor.py
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
import numpy as np
from hswfs.sensor import HSWFS
from hswfs.shifts import generate_test_shifts
# -----------------------------------------------------------------------------
# TEST CASES
# -----------------------------------------------------------------------------
def test__x_shift() -> None:
shifts = generate_test_shifts(test_case='x_shift',
grid_size=8)
sensor = HSWFS(relative_shifts=shifts)
coefficients = sensor.fit_wavefront(n_zernike=9)
expected = np.array([True if _ != 2 else False for _ in range(10)])
assert np.array_equal(np.isclose(coefficients, 0), expected)
def test__y_shift() -> None:
shifts = generate_test_shifts(test_case='y_shift',
grid_size=8)
sensor = HSWFS(relative_shifts=shifts)
coefficients = sensor.fit_wavefront(n_zernike=9)
expected = np.array([True if _ != 1 else False for _ in range(10)])
assert np.array_equal(np.isclose(coefficients, 0), expected)
def test__defocus() -> None:
shifts = generate_test_shifts(test_case='defocus',
grid_size=8)
sensor = HSWFS(relative_shifts=shifts)
coefficients = sensor.fit_wavefront(n_zernike=9)
expected = np.array([True if _ != 4 else False for _ in range(10)])
assert np.array_equal(np.isclose(coefficients, 0), expected)
| 30.113208
| 79
| 0.530075
| 171
| 1,596
| 4.701754
| 0.292398
| 0.052239
| 0.089552
| 0.109453
| 0.807214
| 0.807214
| 0.807214
| 0.762438
| 0.660448
| 0.660448
| 0
| 0.013964
| 0.192356
| 1,596
| 52
| 80
| 30.692308
| 0.609775
| 0.231203
| 0
| 0.5
| 0
| 0
| 0.017298
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d9621f37945d800a7c424dd6dee3e35581387c8
| 88
|
py
|
Python
|
apps/core/purpleserver/serializers/__init__.py
|
rcknr/purplship-server
|
f8ec35af3da870fada0e989c20a8349c958c637c
|
[
"ECL-2.0",
"Apache-2.0"
] | 12
|
2020-02-03T08:11:21.000Z
|
2021-04-13T02:00:38.000Z
|
apps/core/purpleserver/serializers/__init__.py
|
rcknr/purplship-server
|
f8ec35af3da870fada0e989c20a8349c958c637c
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2020-02-12T00:25:08.000Z
|
2021-04-20T10:31:59.000Z
|
apps/core/purpleserver/serializers/__init__.py
|
rcknr/purplship-server
|
f8ec35af3da870fada0e989c20a8349c958c637c
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2020-02-03T08:10:50.000Z
|
2021-04-13T15:17:12.000Z
|
from rest_framework.serializers import *
from purpleserver.serializers.abstract import *
| 44
| 47
| 0.863636
| 10
| 88
| 7.5
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079545
| 88
| 2
| 47
| 44
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5db636be6817b35609534bb33b39736d3dc6670a
| 538
|
py
|
Python
|
ChineseChess/src/mcts/base_state.py
|
shunliz/chinesechess
|
910aed9fa7383e08be81092f697ef77ff0e88548
|
[
"Apache-2.0"
] | null | null | null |
ChineseChess/src/mcts/base_state.py
|
shunliz/chinesechess
|
910aed9fa7383e08be81092f697ef77ff0e88548
|
[
"Apache-2.0"
] | null | null | null |
ChineseChess/src/mcts/base_state.py
|
shunliz/chinesechess
|
910aed9fa7383e08be81092f697ef77ff0e88548
|
[
"Apache-2.0"
] | null | null | null |
class TwoPlayersGameState:
def __init__(self, state, next_to_move):
self.state = state
self.next_to_move = next_to_move
def game_result(self):
raise NotImplemented("Implement game_result function")
def is_game_over(self):
raise NotImplemented("Implement is_game_over function")
def move(self, action):
raise NotImplemented("Implement move function")
def get_legal_actions(self):
raise NotImplemented("Implement get_legal_actions function")
class Action:
pass
| 24.454545
| 68
| 0.704461
| 65
| 538
| 5.523077
| 0.338462
| 0.211699
| 0.311978
| 0.267409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22119
| 538
| 21
| 69
| 25.619048
| 0.856802
| 0
| 0
| 0
| 0
| 0
| 0.223048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.357143
| false
| 0.071429
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5db98c0facf45b42b186fb9d0d226f71d70f43d6
| 253
|
py
|
Python
|
pyruct/reconstruction/__init__.py
|
berkanlafci/pyruct
|
21f5f60dfd4b068d919ab0e6c555b8a7158d34d4
|
[
"MIT"
] | 7
|
2021-11-03T13:27:17.000Z
|
2022-02-28T10:12:18.000Z
|
pyruct/reconstruction/__init__.py
|
berkanlafci/pyruct
|
21f5f60dfd4b068d919ab0e6c555b8a7158d34d4
|
[
"MIT"
] | null | null | null |
pyruct/reconstruction/__init__.py
|
berkanlafci/pyruct
|
21f5f60dfd4b068d919ab0e6c555b8a7158d34d4
|
[
"MIT"
] | null | null | null |
#-----
# Description : init for reflection ultrasound computed tomography reconstruction
# Date : October 2021
# Author : Berkan Lafci
# E-mail : lafciberkan@gmail.com
#-----
from pyruct.reconstruction.cpuReconUS import cpuDAS
| 28.111111
| 83
| 0.679842
| 25
| 253
| 6.88
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020305
| 0.221344
| 253
| 8
| 84
| 31.625
| 0.852792
| 0.743083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5dd738c24e46995e3ce7a63283a1d53ad8827e53
| 10,611
|
py
|
Python
|
article/views.py
|
skylifewww/artdelo
|
55d235a59d8a3abdf0f904336c1c75a2be903699
|
[
"MIT"
] | null | null | null |
article/views.py
|
skylifewww/artdelo
|
55d235a59d8a3abdf0f904336c1c75a2be903699
|
[
"MIT"
] | null | null | null |
article/views.py
|
skylifewww/artdelo
|
55d235a59d8a3abdf0f904336c1c75a2be903699
|
[
"MIT"
] | null | null | null |
from django.http.response import Http404
from django.shortcuts import render_to_response, redirect
from article.models import *
from django.core.exceptions import ObjectDoesNotExist
# from article.forms import CommentForm
from django.template.context_processors import csrf
from django.contrib import auth
from django.core.paginator import Paginator
from django.contrib.auth.models import User
# from iosDevCourse.users.models import User
from django.template import loader, Context, RequestContext
# Create your views here.
articles_of_course = {}
current_category = 0
def return_path_f(request):
request.session.modified = True
if 'return_path' in request.session:
del request.session['return_path']
request.session['return_path'] = request.META.get('HTTP_REFERER', '/')
else:
request.session['return_path'] = request.META.get('HTTP_REFERER', '/')
def callactions(request):
return_path_f(request)
args = {}
args['username'] = auth.get_user(request).username
return render_to_response("callactions.html", args)
def catalog(request):
return_path_f(request)
args = {}
args['works'] = Works.objects.all()
args['username'] = auth.get_user(request).username
return render_to_response("catalog.html", args)
def articles(request):
return_path_f(request)
args = {}
args['tags'] = Tag.objects.all()
args['articles'] = Article.objects.all()
args['username'] = auth.get_user(request).username
args["categories"] = Category.objects.all()
args["authors"] = Author.objects.all()
return render_to_response("articles.html", args)
def article(request, category_id, article_id=1):
global current_category
# all_comments = Comments.objects.filter(comments_article_id=article_id)
current_category = Category.objects.get(id=category_id)
course_articles = Article.objects.filter(article_category__in=current_category.get_descendants(include_self=True))
article = Article.objects.get(id=article_id)
article_number = 1
i = 1
for art in course_articles:
articles_of_course[i] = art
if article_id == art.id:
article_number = i
i = i + 1
len_dict = len(articles_of_course)
args = {}
args.update(csrf(request))
return_path_f(request)
args["len_dict"] = len_dict
args["article_number"] = article_number
args["article"] = article
args["author"] = Author.objects.filter(id=article_id)
args['current_category'] = current_category
args['tags'] = Tag.objects.all()
# args["comments"] = all_comments
# args["form"] = CommentForm
args["username"] = auth.get_user(request).username
args["categories"] = Category.objects.all()
args["authors"] = Author.objects.all()
return render_to_response("article.html", args, context_instance=RequestContext(request))
def article_left_right(request, art_page_number, left_right):
article_number = 1
left_right = int(left_right)
if left_right == 0:
article_number = int(art_page_number) - 1
if left_right == 1:
article_number = int(art_page_number) + 1
article = articles_of_course[article_number]
article_id = article.id
# current_category = Category.objects.filter(id=article_id)
# all_comments = Comments.objects.filter(comments_article_id=article_id)
args = {}
args.update(csrf(request))
return_path_f(request)
args["article_number"] = article_number
args["article"] = article
args["len_dict"] = len(articles_of_course)
args["author"] = Author.objects.filter(id=article_id)
args['current_category'] = current_category
args['tags'] = Tag.objects.all()
# args["comments"] = all_comments
# args["form"] = CommentForm
args["username"] = auth.get_user(request).username
args["categories"] = Category.objects.all()
args["authors"] = Author.objects.all()
return render_to_response("article.html", args, context_instance=RequestContext(request))
def category(request, category_id=1):
global current_category
return_path_f(request)
current_category = Category.objects.get(id=category_id)
root_category_id = current_category.get_root().id
args = {}
args['tags'] = Tag.objects.all()
args['current_category'] = current_category
args['root_category_id'] = root_category_id
args['categories'] = Category.objects.all()
args['authors'] = Author.objects.all()
args['articles'] = Article.objects.filter(article_category__in=current_category.get_descendants(include_self=True))
args['username'] = auth.get_user(request).username
return render_to_response('articles.html', args, context_instance=RequestContext(request))
def authors(request, author_id=1):
return_path_f(request)
current_author = Author.objects.get(id=author_id)
root_author_id = current_author.get_root().id
args = {}
args['tags'] = Tag.objects.all()
args['current_author'] = current_author
args['root_author_id'] = root_author_id
args['authors'] = Author.objects.all()
args['categories'] = Category.objects.all()
args['articles'] = Article.objects.filter(article_author_id=author_id)
args['username'] = auth.get_user(request).username
return render_to_response('articles.html', args, context_instance=RequestContext(request))
def tags(request, tag_id=1):
return_path_f(request)
current_tag = Tag.objects.get(id=tag_id)
args = {}
args['tags'] = Tag.objects.all()
args['current_tag'] = current_tag
args['authors'] = Author.objects.all()
args['categories'] = Category.objects.all()
args['articles'] = Article.objects.filter(article_tag__tag_name__exact=current_tag)
args['username'] = auth.get_user(request).username
return render_to_response('articles.html', args, context_instance=RequestContext(request))
# def addlike(request, article_id):
# try:
# if article_id in request.COOKIES:
# redirect(request.META.get('HTTP_REFERER', '/'))
# else:
# article = Article.objects.get(id=article_id)
# article.article_likes += 1
# article.save()
# response = redirect(request.META.get('HTTP_REFERER', '/'))
# response.set_cookie(article_id, "test")
# return response
# except ObjectDoesNotExist:
# raise Http404
# return redirect(request.META.get('HTTP_REFERER', '/'))
# def addcomment(request, article_id):
# if request.POST:
# form = CommentForm(request.POST)
# user = auth.get_user(request)
# if form.is_valid():
# comment = form.save(commit=False)
# comment.comments_article = Article.objects.get(id=article_id)
# comment.comments_user = user
# form.save()
# return redirect(request.META.get('HTTP_REFERER', '/'))
# def articles(request, page_number=1):
# all_article = Article.objects.all()
# current_page = Paginator(all_article, 3)
# return_path_f(request)
# args = {}
# args['tags'] = Tag.objects.all()
# args['articles'] = current_page.page(page_number)
# args['username'] = auth.get_user(request).username
# args['art_page_number'] = page_number
# args["categories"] = Category.objects.all()
# args["authors"] = Author.objects.all()
# return render_to_response("articles.html", args)
# def article(request, article_id=1, art_page_number=1):
# all_comments = Comments.objects.filter(comments_article_id=article_id)
# args = {}
# args.update(csrf(request))
# return_path_f(request)
# args["article"] = Article.objects.get(id=article_id)
# args["author"] = Author.objects.filter(id=article_id)
# args['tags'] = Tag.objects.all()
# args["comments"] = all_comments
# args["form"] = CommentForm
# args["username"] = auth.get_user(request).username
# args["art_page_number"] = art_page_number
# args["categories"] = Category.objects.all()
# args["authors"] = Author.objects.all()
# return render_to_response("article.html", args, context_instance=RequestContext(request))
# def category(request, category_id=1, page_number=1):
# current_category = Category.objects.get(id=category_id)
# root_category_id = current_category.get_root().id
# all_article = Article.objects.filter(article_category__in=current_category.get_descendants(include_self=True))
# current_page = Paginator(all_article, 3)
# args = {}
# args['tags'] = Tag.objects.all()
# args['current_category'] = current_category
# args['root_category_id'] = root_category_id
# args['categories'] = Category.objects.all()
# args['authors'] = Author.objects.all()
# args['articles'] = current_page.page(page_number)
# args['art_page_number'] = page_number
# args['username'] = auth.get_user(request).username
# return render_to_response('articles.html', args, context_instance=RequestContext(request))
# def authors(request, author_id=1, page_number=1):
# all_article = Article.objects.filter(article_author_id=author_id)
# current_author = Author.objects.get(id=author_id)
# root_author_id = current_author.get_root().id
# current_page = Paginator(all_article, 3)
# args = {}
# args['tags'] = Tag.objects.all()
# args['current_author'] = current_author
# args['root_author_id'] = root_author_id
# args['authors'] = Author.objects.all()
# args['categories'] = Category.objects.all()
# args['articles'] = current_page.page(page_number)
# args['art_page_number'] = page_number
# args['username'] = auth.get_user(request).username
# return render_to_response('articles.html', args, context_instance=RequestContext(request))
# def tags(request, tag_id=1, page_number=1):
# current_tag = Tag.objects.get(id=tag_id)
# args = {}
# args['tags'] = Tag.objects.all()
# all_article = Article.objects.filter(article_tag__tag_name__exact=current_tag)
# current_page = Paginator(all_article, 3)
# args['current_tag'] = current_tag
# args['authors'] = Author.objects.all()
# args['categories'] = Category.objects.all()
# args['articles'] = current_page.page(page_number)
# args['art_page_number'] = page_number
# args['username'] = auth.get_user(request).username
# return render_to_response('articles.html', args, context_instance=RequestContext(request))
| 32.449541
| 119
| 0.682217
| 1,305
| 10,611
| 5.308046
| 0.083525
| 0.051971
| 0.058611
| 0.036379
| 0.816515
| 0.801501
| 0.778836
| 0.725855
| 0.679659
| 0.644002
| 0
| 0.003813
| 0.184431
| 10,611
| 326
| 120
| 32.54908
| 0.796626
| 0.452832
| 0
| 0.532258
| 0
| 0
| 0.10079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072581
| false
| 0
| 0.072581
| 0
| 0.209677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5df601b2022f6421ad95e828886372508c65c14c
| 27
|
py
|
Python
|
python/testData/completion/moduleDotPy/a.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/moduleDotPy/a.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/moduleDotPy/a.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from shazam import *
xyzzy
| 9
| 20
| 0.777778
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 3
| 21
| 9
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b903cbf5655d56cf1d8833741e43013413944563
| 1,025
|
py
|
Python
|
pythonProject/05al135JSON/json2.py
|
D-Wolter/PycharmProjects
|
c8d6144efa30261bff72a3e0414a0d80f6730f9b
|
[
"MIT"
] | null | null | null |
pythonProject/05al135JSON/json2.py
|
D-Wolter/PycharmProjects
|
c8d6144efa30261bff72a3e0414a0d80f6730f9b
|
[
"MIT"
] | null | null | null |
pythonProject/05al135JSON/json2.py
|
D-Wolter/PycharmProjects
|
c8d6144efa30261bff72a3e0414a0d80f6730f9b
|
[
"MIT"
] | null | null | null |
#https://docs.python.org/3/library/json.html
from dados import *
import json
# dados_json = json.dumps(clientes_dicionario)
# print(dados_json)
# #{"1": {"nome": "Luiz Ot\u00e1vio", "sobrenome": "Miranda", "idade": 25, "altura": 1.8, "peso": 80.53}, "2": {"nome": "Maria", "sobrenome": "Oliveira", "idade": 52, "altura": 1.67, "peso": 57}, "3": {"nome": "Pedro", "sobrenome": "Faria", "idade": 32, "altura": 1.95, "peso": 113}}
dados_json = json.dumps(clientes_dicionario, indent=10)#indent+10 distancia a esquerda no print
print(dados_json)
# {
# "1": {
# "nome": "Luiz Ot\u00e1vio",
# "sobrenome": "Miranda",
# "idade": 25,
# "altura": 1.8,
# "peso": 80.53
# },
# "2": {
# "nome": "Maria",
# "sobrenome": "Oliveira",
# "idade": 52,
# "altura": 1.67,
# "peso": 57
# },
# "3": {
# "nome": "Pedro",
# "sobrenome": "Faria",
# "idade": 32,
# "altura": 1.95,
# "peso": 113
# }
# }
| 28.472222
| 283
| 0.501463
| 118
| 1,025
| 4.305085
| 0.389831
| 0.082677
| 0.051181
| 0.070866
| 0.80315
| 0.80315
| 0.661417
| 0.661417
| 0.661417
| 0.661417
| 0
| 0.083554
| 0.26439
| 1,025
| 35
| 284
| 29.285714
| 0.590186
| 0.838049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b90b1824c048d86e29ac15a4e05ee871ff66d5d8
| 114
|
py
|
Python
|
tests/missing_data/test_missing_data_ozone_DiscardRow_None.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/missing_data/test_missing_data_ozone_DiscardRow_None.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/missing_data/test_missing_data_ozone_DiscardRow_None.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.missing_data.test_missing_data_ozone_generic as gen
gen.test_ozone_missing_data('DiscardRow', None)
| 28.5
| 64
| 0.868421
| 18
| 114
| 5.055556
| 0.611111
| 0.362637
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061404
| 114
| 3
| 65
| 38
| 0.850467
| 0
| 0
| 0
| 0
| 0
| 0.087719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f8db998829e1cd4a31f002fca719ba985106ca48
| 98
|
py
|
Python
|
openslides/motion/__init__.py
|
DerPate/OpenSlides
|
2733a47d315fec9b8f3cb746fd5f3739be225d65
|
[
"MIT"
] | 1
|
2015-03-22T02:07:23.000Z
|
2015-03-22T02:07:23.000Z
|
openslides/motion/__init__.py
|
frauenknecht/OpenSlides
|
6521d6b095bca33dc0c5f09f59067551800ea1e3
|
[
"MIT"
] | null | null | null |
openslides/motion/__init__.py
|
frauenknecht/OpenSlides
|
6521d6b095bca33dc0c5f09f59067551800ea1e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import main_menu, personal_info, signals, slides, widgets # noqa
| 24.5
| 72
| 0.673469
| 13
| 98
| 4.923077
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 0.173469
| 98
| 3
| 73
| 32.666667
| 0.777778
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f8fdce25e710b1b34f419da122bcaed082078553
| 144
|
py
|
Python
|
water_modelling/test/datapassing/int_test_shape_updates.py
|
Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine
|
4b28f75fb74647d6453385a893149a48f797eeed
|
[
"MIT"
] | null | null | null |
water_modelling/test/datapassing/int_test_shape_updates.py
|
Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine
|
4b28f75fb74647d6453385a893149a48f797eeed
|
[
"MIT"
] | null | null | null |
water_modelling/test/datapassing/int_test_shape_updates.py
|
Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine
|
4b28f75fb74647d6453385a893149a48f797eeed
|
[
"MIT"
] | null | null | null |
# TODO: flow needs to be testable, currently it is bounded to local paths (less of a problem, tmpdir exist) and
# endpoints (a bigger problem)
| 48
| 111
| 0.75
| 24
| 144
| 4.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 144
| 2
| 112
| 72
| 0.923077
| 0.965278
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.5
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d191d0cd6ba1f4d08d840f1f7eca2d98b12b90c
| 128
|
py
|
Python
|
src/petronia/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 19
|
2017-06-21T10:28:24.000Z
|
2021-12-31T11:49:28.000Z
|
src/petronia/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 10
|
2016-11-11T18:57:57.000Z
|
2021-02-01T15:33:43.000Z
|
src/petronia/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 3
|
2017-09-17T03:29:35.000Z
|
2019-06-03T10:43:08.000Z
|
"""
The Petronia Window Manager.
A flexible, platform agnostic window manager written in Python with secure
capabilities.
"""
| 16
| 74
| 0.773438
| 16
| 128
| 6.1875
| 0.875
| 0.262626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 128
| 7
| 75
| 18.285714
| 0.916667
| 0.921875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d659feb753876766a2ebfd1a002f75f24a0f16e
| 46
|
py
|
Python
|
text/_cascade/element/user/interface.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
text/_cascade/element/user/interface.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
text/_cascade/element/user/interface.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
class UserInterfaceElement(Element):
pass
| 15.333333
| 36
| 0.782609
| 4
| 46
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 46
| 2
| 37
| 23
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5373af87a9b72a40c54cdf5e7521f6356aac7131
| 1,972
|
py
|
Python
|
ex4/src/composite.py
|
cdfmlr/NumericalAnalysis
|
b752af60c1f3202d53cc3c76e66419a885250ed9
|
[
"Apache-2.0"
] | null | null | null |
ex4/src/composite.py
|
cdfmlr/NumericalAnalysis
|
b752af60c1f3202d53cc3c76e66419a885250ed9
|
[
"Apache-2.0"
] | null | null | null |
ex4/src/composite.py
|
cdfmlr/NumericalAnalysis
|
b752af60c1f3202d53cc3c76e66419a885250ed9
|
[
"Apache-2.0"
] | 1
|
2021-08-15T01:34:35.000Z
|
2021-08-15T01:34:35.000Z
|
# 该文件中实现了复化梯形、复化 Simpson 求积方法。
#
# - composite_trapezium_integral(f, a, b, epsilon, max_iter=10000): 复化梯形
# - composite_simpson_integral(f, a, b, epsilon, max_iter=10000): 复化 Simpson
#
# Copyright 2020 CDFMLR. All rights reserved.
def composite_trapezium_integral(f, a, b, epsilon, max_iter=10000):
"""复化梯形公式
Args:
f: 要求积的函数
a, b: 求积区间
epsilon: 目标精度,达到则停止,返回积分值
max_iter: 最大迭代次数,超出这个次数迭代不到目标精度,则 raise 一个 RuntimeError
Returns:
i, iter
i: 最终得到的积分值
iter_times: 迭代次数
Raises:
RuntimeError: 无法在 max_iter 步内迭代到目标精度
"""
m = 1
h = b - a
t = h * (f(a) + f(b)) / 2
_iter_times = 0
t_next = 0
for _iter_times in range(int(max_iter)):
h /= 2
s = sum([f(a + (2 * k - 1) * h) for k in range(1, m+1)])
t_next = t / 2 + h * s
m <<= 1
if abs(t_next - t) <= epsilon:
break
t = t_next
else:
raise RuntimeError('无法在 max_iter 步内迭代到目标精度')
return t_next, _iter_times+1
def composite_simpson_integral(f, a, b, epsilon, max_iter=10000):
"""复化 Simpson 公式
Args:
f: 要求积的函数
a, b: 求积区间
epsilon: 目标精度,达到则停止,返回积分值
max_iter: 最大迭代次数,超出这个次数迭代不到目标精度,则 raise 一个 RuntimeError
Returns:
i, iter
i: 最终得到的积分值
iter_times: 迭代次数
Raises:
RuntimeError: 无法在 max_iter 步内迭代到目标精度
"""
m = 1
h = (b - a) / 2
i = h * (f(a) + 4 * f((a+b) / 2) + f(b)) / 3
_iter_times = 0
i_next = 0
for _iter_times in range(int(max_iter)):
h /= 2
s0 = sum([f(a + (2 * k - 1) * h) for k in range(1, 2 * m + 1)])
s1 = sum([f(a + (4 * k - 2) * h) for k in range(1, m + 1)])
i_next = i / 2 + h * (4 * s0 - 2 * s1) / 3
m <<= 1
if abs(i_next - i) <= epsilon:
break
i = i_next
else:
raise RuntimeError('无法在 max_iter 步内迭代到目标精度')
return i_next, _iter_times+1
| 22.930233
| 76
| 0.530933
| 297
| 1,972
| 3.377104
| 0.225589
| 0.083749
| 0.014955
| 0.043868
| 0.75673
| 0.75673
| 0.75673
| 0.75673
| 0.739781
| 0.64008
| 0
| 0.048837
| 0.345842
| 1,972
| 85
| 77
| 23.2
| 0.728682
| 0.360041
| 0
| 0.457143
| 0
| 0
| 0.038328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53c2661811722db363ba90548a9b03aecabe7331
| 437
|
py
|
Python
|
fixes/fix_ordermap.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
fixes/fix_ordermap.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
fixes/fix_ordermap.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Upload main_ordermap
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.main.models.ordermap import OrderMap
def fix():
OrderMap.update_models()
| 29.133333
| 72
| 0.356979
| 31
| 437
| 4.967742
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022843
| 0.098398
| 437
| 14
| 73
| 31.214286
| 0.36802
| 0.757437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
53d7f76b4cda2452882f209f36ff69dea772e9b3
| 9,684
|
py
|
Python
|
networkx/algorithms/tests/test_dominance.py
|
rakschahsa/networkx
|
6cac55b1064c3c346665f9281680fa3b66442ad0
|
[
"BSD-3-Clause"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/tests/test_dominance.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/tests/test_dominance.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 136
|
2018-01-09T22:52:06.000Z
|
2022-02-24T13:26:18.000Z
|
import networkx as nx
from nose.tools import *
class TestImmediateDominators(object):
def test_exceptions(self):
G = nx.Graph()
G.add_node(0)
assert_raises(nx.NetworkXNotImplemented, nx.immediate_dominators, G, 0)
G = nx.MultiGraph(G)
assert_raises(nx.NetworkXNotImplemented, nx.immediate_dominators, G, 0)
G = nx.DiGraph([[0, 0]])
assert_raises(nx.NetworkXError, nx.immediate_dominators, G, 1)
def test_singleton(self):
G = nx.DiGraph()
G.add_node(0)
assert_equal(nx.immediate_dominators(G, 0), {0: 0})
G.add_edge(0, 0)
assert_equal(nx.immediate_dominators(G, 0), {0: 0})
def test_path(self):
n = 5
G = nx.path_graph(n, create_using=nx.DiGraph())
assert_equal(nx.immediate_dominators(G, 0),
{i: max(i - 1, 0) for i in range(n)})
def test_cycle(self):
n = 5
G = nx.cycle_graph(n, create_using=nx.DiGraph())
assert_equal(nx.immediate_dominators(G, 0),
{i: max(i - 1, 0) for i in range(n)})
def test_unreachable(self):
n = 5
assert_greater(n, 1)
G = nx.path_graph(n, create_using=nx.DiGraph())
assert_equal(nx.immediate_dominators(G, n // 2),
{i: max(i - 1, n // 2) for i in range(n // 2, n)})
def test_irreducible1(self):
# Graph taken from Figure 2 of
# K. D. Cooper, T. J. Harvey, and K. Kennedy.
# A simple, fast dominance algorithm.
# Software Practice & Experience, 4:110, 2001.
edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)]
G = nx.DiGraph(edges)
assert_equal(nx.immediate_dominators(G, 5),
{i: 5 for i in range(1, 6)})
def test_irreducible2(self):
# Graph taken from Figure 4 of
# K. D. Cooper, T. J. Harvey, and K. Kennedy.
# A simple, fast dominance algorithm.
# Software Practice & Experience, 4:110, 2001.
edges = [(1, 2), (2, 1), (2, 3), (3, 2), (4, 2), (4, 3), (5, 1),
(6, 4), (6, 5)]
G = nx.DiGraph(edges)
assert_equal(nx.immediate_dominators(G, 6),
{i: 6 for i in range(1, 7)})
def test_domrel_png(self):
# Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png
edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)]
G = nx.DiGraph(edges)
assert_equal(nx.immediate_dominators(G, 1),
{1: 1, 2: 1, 3: 2, 4: 2, 5: 2, 6: 2})
# Test postdominance.
with nx.utils.reversed(G):
assert_equal(nx.immediate_dominators(G, 6),
{1: 2, 2: 6, 3: 5, 4: 5, 5: 2, 6: 6})
def test_boost_example(self):
# Graph taken from Figure 1 of
# http://www.boost.org/doc/libs/1_56_0/libs/graph/doc/lengauer_tarjan_dominator.htm
edges = [(0, 1), (1, 2), (1, 3), (2, 7), (3, 4), (4, 5), (4, 6),
(5, 7), (6, 4)]
G = nx.DiGraph(edges)
assert_equal(nx.immediate_dominators(G, 0),
{0: 0, 1: 0, 2: 1, 3: 1, 4: 3, 5: 4, 6: 4, 7: 1})
# Test postdominance.
with nx.utils.reversed(G):
assert_equal(nx.immediate_dominators(G, 7),
{0: 1, 1: 7, 2: 7, 3: 4, 4: 5, 5: 7, 6: 4, 7: 7})
class TestDominanceFrontiers(object):
def test_exceptions(self):
G = nx.Graph()
G.add_node(0)
assert_raises(nx.NetworkXNotImplemented, nx.dominance_frontiers, G, 0)
G = nx.MultiGraph(G)
assert_raises(nx.NetworkXNotImplemented, nx.dominance_frontiers, G, 0)
G = nx.DiGraph([[0, 0]])
assert_raises(nx.NetworkXError, nx.dominance_frontiers, G, 1)
def test_singleton(self):
G = nx.DiGraph()
G.add_node(0)
assert_equal(nx.dominance_frontiers(G, 0), {0: set()})
G.add_edge(0, 0)
assert_equal(nx.dominance_frontiers(G, 0), {0: set()})
def test_path(self):
n = 5
G = nx.path_graph(n, create_using=nx.DiGraph())
assert_equal(nx.dominance_frontiers(G, 0),
{i: set() for i in range(n)})
def test_cycle(self):
n = 5
G = nx.cycle_graph(n, create_using=nx.DiGraph())
assert_equal(nx.dominance_frontiers(G, 0),
{i: set() for i in range(n)})
def test_unreachable(self):
n = 5
assert_greater(n, 1)
G = nx.path_graph(n, create_using=nx.DiGraph())
assert_equal(nx.dominance_frontiers(G, n // 2),
{i: set() for i in range(n // 2, n)})
def test_irreducible1(self):
# Graph taken from Figure 2 of
# K. D. Cooper, T. J. Harvey, and K. Kennedy.
# A simple, fast dominance algorithm.
# Software Practice & Experience, 4:110, 2001.
edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)]
G = nx.DiGraph(edges)
assert_equal({u: df
for u, df in nx.dominance_frontiers(G, 5).items()},
{1: set([2]), 2: set([1]), 3: set([2]),
4: set([1]), 5: set()})
def test_irreducible2(self):
# Graph taken from Figure 4 of
# K. D. Cooper, T. J. Harvey, and K. Kennedy.
# A simple, fast dominance algorithm.
# Software Practice & Experience, 4:110, 2001.
edges = [(1, 2), (2, 1), (2, 3), (3, 2), (4, 2), (4, 3), (5, 1),
(6, 4), (6, 5)]
G = nx.DiGraph(edges)
assert_equal(nx.dominance_frontiers(G, 6),
{1: set([2]), 2: set([1, 3]), 3: set([2]), 4: set([2, 3]), 5: set([1]), 6: set([])})
def test_domrel_png(self):
# Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png
edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)]
G = nx.DiGraph(edges)
assert_equal(nx.dominance_frontiers(G, 1),
{1: set([]), 2: set([2]), 3: set([5]), 4: set([5]),
5: set([2]), 6: set()})
# Test postdominance.
with nx.utils.reversed(G):
assert_equal(nx.dominance_frontiers(G, 6),
{1: set(), 2: set([2]), 3: set([2]), 4: set([2]),
5: set([2]), 6: set()})
def test_boost_example(self):
# Graph taken from Figure 1 of
# http://www.boost.org/doc/libs/1_56_0/libs/graph/doc/lengauer_tarjan_dominator.htm
edges = [(0, 1), (1, 2), (1, 3), (2, 7), (3, 4), (4, 5), (4, 6),
(5, 7), (6, 4)]
G = nx.DiGraph(edges)
assert_equal(nx.dominance_frontiers(G, 0),
{0: set(), 1: set(), 2: set([7]), 3: set([7]),
4: set([4, 7]), 5: set([7]), 6: set([4]), 7: set()})
# Test postdominance.
with nx.utils.reversed(G):
assert_equal(nx.dominance_frontiers(G, 7),
{0: set(), 1: set(), 2: set([1]), 3: set([1]),
4: set([1, 4]), 5: set([1]), 6: set([4]), 7: set()})
def test_discard_issue(self):
# https://github.com/networkx/networkx/issues/2071
g = nx.DiGraph()
g.add_edges_from([
('b0', 'b1'),
('b1', 'b2'),
('b2', 'b3'),
('b3', 'b1'),
('b1', 'b5'),
('b5', 'b6'),
('b5', 'b8'),
('b6', 'b7'),
('b8', 'b7'),
('b7', 'b3'),
('b3', 'b4')
]
)
df = nx.dominance_frontiers(g, 'b0')
assert_equal(df, {'b4': set(), 'b5': set(['b3']), 'b6': set(['b7']),
'b7': set(['b3']),
'b0': set(), 'b1': set(['b1']), 'b2': set(['b3']),
'b3': set(['b1']), 'b8': set(['b7'])})
def test_loop(self):
g = nx.DiGraph()
g.add_edges_from([('a', 'b'), ('b', 'c'), ('b', 'a')])
df = nx.dominance_frontiers(g, 'a')
assert_equal(df, {'a': set(), 'b': set(), 'c': set()})
def test_missing_immediate_doms(self):
# see https://github.com/networkx/networkx/issues/2070
g = nx.DiGraph()
edges = [
('entry_1', 'b1'),
('b1', 'b2'),
('b2', 'b3'),
('b3', 'exit'),
('entry_2', 'b3')
]
# entry_1
# |
# b1
# |
# b2 entry_2
# | /
# b3
# |
# exit
g.add_edges_from(edges)
# formerly raised KeyError on entry_2 when parsing b3
# because entry_2 does not have immediate doms (no path)
nx.dominance_frontiers(g, 'entry_1')
def test_loops_larger(self):
# from
# http://ecee.colorado.edu/~waite/Darmstadt/motion.html
g = nx.DiGraph()
edges = [
('entry', 'exit'),
('entry', '1'),
('1', '2'),
('2', '3'),
('3', '4'),
('4', '5'),
('5', '6'),
('6', 'exit'),
('6', '2'),
('5', '3'),
('4', '4')
]
g.add_edges_from(edges)
df = nx.dominance_frontiers(g, 'entry')
answer = {'entry': set(),
'1': set(['exit']),
'2': set(['exit', '2']),
'3': set(['exit', '3', '2']),
'4': set(['exit', '4', '3', '2']),
'5': set(['exit', '3', '2']),
'6': set(['exit', '2']),
'exit': set()}
for n in df:
assert_equal(set(df[n]), set(answer[n]))
| 37.103448
| 105
| 0.46613
| 1,314
| 9,684
| 3.331811
| 0.111872
| 0.017816
| 0.062357
| 0.086341
| 0.82572
| 0.75651
| 0.722704
| 0.702147
| 0.696208
| 0.694381
| 0
| 0.073839
| 0.348306
| 9,684
| 260
| 106
| 37.246154
| 0.61987
| 0.14271
| 0
| 0.557895
| 0
| 0
| 0.02615
| 0
| 0
| 0
| 0
| 0
| 0.173684
| 1
| 0.115789
| false
| 0
| 0.010526
| 0
| 0.136842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07509189e19982399fef2d30420c10b60d37ccb5
| 47
|
py
|
Python
|
waterbutler/providers/figshare/__init__.py
|
alexschiller/waterbutler
|
24014d7705aca3e99a6565fc3b9b4075ec6ec563
|
[
"Apache-2.0"
] | 65
|
2015-01-23T03:22:04.000Z
|
2022-01-11T22:33:19.000Z
|
waterbutler/providers/figshare/__init__.py
|
alexschiller/waterbutler
|
24014d7705aca3e99a6565fc3b9b4075ec6ec563
|
[
"Apache-2.0"
] | 300
|
2015-02-16T16:45:02.000Z
|
2022-01-31T14:49:07.000Z
|
waterbutler/providers/figshare/__init__.py
|
Johnetordoff/waterbutler
|
b505cdbcffadaba12984dcb19c9139068e6c314d
|
[
"Apache-2.0"
] | 76
|
2015-01-20T20:45:17.000Z
|
2021-07-30T13:18:10.000Z
|
from .provider import FigshareProvider # noqa
| 23.5
| 46
| 0.808511
| 5
| 47
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 1
| 47
| 47
| 0.95
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4adeda05dbf4b3ca2d8e357a1dcd3b0f8632f921
| 58
|
py
|
Python
|
alive_progress/tools/__init__.py
|
Shinyh29/alive-progress
|
767445917e7cb384981c0dc29b3b3204384353b1
|
[
"MIT"
] | 3,304
|
2019-08-05T07:16:40.000Z
|
2022-03-31T11:50:57.000Z
|
alive_progress/tools/__init__.py
|
Shinyh29/alive-progress
|
767445917e7cb384981c0dc29b3b3204384353b1
|
[
"MIT"
] | 126
|
2019-08-07T09:08:32.000Z
|
2022-03-31T14:27:17.000Z
|
alive_progress/tools/__init__.py
|
Shinyh29/alive-progress
|
767445917e7cb384981c0dc29b3b3204384353b1
|
[
"MIT"
] | 151
|
2019-08-12T19:14:15.000Z
|
2022-03-31T07:17:48.000Z
|
from .repl import print_chars
__all__ = ('print_chars',)
| 14.5
| 29
| 0.741379
| 8
| 58
| 4.625
| 0.75
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 3
| 30
| 19.333333
| 0.74
| 0
| 0
| 0
| 0
| 0
| 0.189655
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
4af5aeb38fbe2728282e6b58e94b35c220f38074
| 171
|
py
|
Python
|
orders/admin.py
|
bayloun/liban_ex
|
b7438cea178976a01959e2955d1702a425e6b42c
|
[
"MIT"
] | null | null | null |
orders/admin.py
|
bayloun/liban_ex
|
b7438cea178976a01959e2955d1702a425e6b42c
|
[
"MIT"
] | null | null | null |
orders/admin.py
|
bayloun/liban_ex
|
b7438cea178976a01959e2955d1702a425e6b42c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from orders.models import Order, OrderHistory
# Register your models here.
admin.site.register(Order)
admin.site.register(OrderHistory)
| 21.375
| 45
| 0.818713
| 23
| 171
| 6.086957
| 0.565217
| 0.128571
| 0.242857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 171
| 7
| 46
| 24.428571
| 0.915033
| 0.152047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ab007629424e1b538bf24182fe5a1098c317391c
| 177
|
py
|
Python
|
gan_apps/__init__.py
|
narenderkumarnain/ganapps
|
f548e725e5f0eb7013bc7d8730896a2e9cdb6450
|
[
"MIT"
] | null | null | null |
gan_apps/__init__.py
|
narenderkumarnain/ganapps
|
f548e725e5f0eb7013bc7d8730896a2e9cdb6450
|
[
"MIT"
] | null | null | null |
gan_apps/__init__.py
|
narenderkumarnain/ganapps
|
f548e725e5f0eb7013bc7d8730896a2e9cdb6450
|
[
"MIT"
] | null | null | null |
# version
__version__ = '1.0.1'
from gan_apps.colourgan.model import ColourGAN
from gan_apps.colourgan.data import Cifar10Dataset
from gan_apps.colourgan.config import get_cfg
| 25.285714
| 50
| 0.830508
| 27
| 177
| 5.148148
| 0.518519
| 0.151079
| 0.23741
| 0.431655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031447
| 0.101695
| 177
| 6
| 51
| 29.5
| 0.842767
| 0.039548
| 0
| 0
| 0
| 0
| 0.029762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ab5694882a0394817db8f128fed6efe1c59561ef
| 2,803
|
py
|
Python
|
test_TTT_unittest.py
|
zackbeucler/Tic-Tac-Toe
|
280639eebde73591e39290a47c9044b4aa63c637
|
[
"MIT"
] | null | null | null |
test_TTT_unittest.py
|
zackbeucler/Tic-Tac-Toe
|
280639eebde73591e39290a47c9044b4aa63c637
|
[
"MIT"
] | 39
|
2020-10-30T14:53:40.000Z
|
2020-12-17T00:52:07.000Z
|
test_TTT_unittest.py
|
zackbeucler/Tic-Tac-Toe
|
280639eebde73591e39290a47c9044b4aa63c637
|
[
"MIT"
] | 2
|
2020-10-30T15:58:31.000Z
|
2020-11-14T17:26:28.000Z
|
import Tic_tac_toe
import unittest
from unittest.mock import patch
class TestTicTacToe(unittest.TestCase):
def test_random(self):
"""This test case ensures that the program actually selects the player who goes
first at random"""
counter = 0
for i in range(100):
if (Tic_tac_toe.firstMove("player0", "player1") == "player1"):
counter += 1
print("Player 1 chosen", counter, "percent of the time.")
def test_winner(self):
"""This test case ensures that the program correctly identifies when there is a winner"""
self.assertTrue(Tic_tac_toe.checkWinner(['X', 'X', 'X'," "," "," "," "," "," "], ['X','O'], "player0", "player1"))
self.assertTrue(Tic_tac_toe.checkWinner([" ", " ", " ","X","X","X"," "," "," "], ['X','O'], "player0", "player1"))
self.assertTrue(Tic_tac_toe.checkWinner([' ', ' ', ' '," "," "," ","X","X","X"], ['X','O'], "player0", "player1"))
self.assertTrue(Tic_tac_toe.checkWinner(['X', ' ', ' ',"X"," "," ","X"," "," "], ['X','O'], "player0", "player1"))
self.assertTrue(Tic_tac_toe.checkWinner([' ', 'X', ' '," ","X"," "," ","X"," "], ['X','O'], "player0", "player1"))
self.assertTrue(Tic_tac_toe.checkWinner([' ', ' ', 'X'," "," ","X"," "," ","X"], ['X','O'], "player0", "player1"))
self.assertTrue(Tic_tac_toe.checkWinner(['X', ' ', ' '," ","X"," "," "," ","X"], ['X','O'], "player0", "player1"))
self.assertTrue(Tic_tac_toe.checkWinner([' ', ' ', 'X'," ","X"," ","X"," "," "], ['X','O'], "player0", "player1"))
@patch('Tic_tac_toe.playerPickSymbol', return_value="X")
def test_upperx(self, input):
"""This test case ensures that the program correctly assigns a player the symbol 'X'
if the player chooses"""
self.assertEqual(Tic_tac_toe.answer(), "X")
@patch('Tic_tac_toe.playerPickSymbol', return_value="o")
def test_lowero(self, input):
"""This test case ensures that the program correctly assigns a player the symbol 'o'
if the player chooses"""
self.assertEqual(Tic_tac_toe.answer(), "o")
@patch('Tic_tac_toe.playerPickSymbol', return_value="O")
def test_uppero(self, input):
"""This test case ensures that the program correctly assigns a player the symbol 'O'
if the player chooses"""
self.assertEqual(Tic_tac_toe.answer(), "O")
@patch('Tic_tac_toe.playerPickSymbol', return_value="x")
def test_lowerx(self, input):
"""This test case ensures that the program correctly assigns a player the symbol 'x'
if the player chooses"""
self.assertEqual(Tic_tac_toe.answer(), "x")
if __name__ == '__main__':
unittest.main()
| 45.209677
| 123
| 0.57046
| 340
| 2,803
| 4.544118
| 0.208824
| 0.031068
| 0.104854
| 0.10356
| 0.776052
| 0.776052
| 0.776052
| 0.776052
| 0.72233
| 0.72233
| 0
| 0.011531
| 0.226543
| 2,803
| 61
| 124
| 45.95082
| 0.701107
| 0.211559
| 0
| 0
| 0
| 0
| 0.184793
| 0.053898
| 0
| 0
| 0
| 0
| 0.363636
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.30303
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
db414a1d76fc18951ee0e2de8cf8ba7d3b9a1218
| 150
|
py
|
Python
|
billy/utils/https.py
|
martingaston/billy-search
|
60bdfa0cf740675c3afd86ad68f83755c9cd6596
|
[
"MIT"
] | null | null | null |
billy/utils/https.py
|
martingaston/billy-search
|
60bdfa0cf740675c3afd86ad68f83755c9cd6596
|
[
"MIT"
] | 17
|
2018-11-28T19:20:01.000Z
|
2019-01-06T18:00:58.000Z
|
billy/utils/https.py
|
martingaston/billy-search
|
60bdfa0cf740675c3afd86ad68f83755c9cd6596
|
[
"MIT"
] | null | null | null |
def convert_to_https(url):
"""Replace the first instance of http:// in a string with https://"""
return url.replace("http://", "https://", 1)
| 37.5
| 73
| 0.633333
| 22
| 150
| 4.227273
| 0.772727
| 0.215054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008
| 0.166667
| 150
| 3
| 74
| 50
| 0.736
| 0.42
| 0
| 0
| 0
| 0
| 0.185185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
db4b96cbd24b72fc14a031018e6a65b91b4f5781
| 129
|
py
|
Python
|
appAPI/admin.py
|
Gowtham21/samples-RESTAPI
|
143e1044f6f1483256223dc9171f016bee3d20f0
|
[
"MIT"
] | null | null | null |
appAPI/admin.py
|
Gowtham21/samples-RESTAPI
|
143e1044f6f1483256223dc9171f016bee3d20f0
|
[
"MIT"
] | null | null | null |
appAPI/admin.py
|
Gowtham21/samples-RESTAPI
|
143e1044f6f1483256223dc9171f016bee3d20f0
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from appAPI import models
# Register your models here.
admin.site.register(models.UserProfile)
| 21.5
| 39
| 0.821705
| 18
| 129
| 5.888889
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 129
| 5
| 40
| 25.8
| 0.929825
| 0.20155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
db7d4382568ac8c273547e6f1ca506da20f43190
| 75,188
|
py
|
Python
|
cinder/tests/unit/api/contrib/test_volume_actions.py
|
arunvinodqmco/cinder
|
62cb72c6890e458427ba0601646b186b7b36dc01
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/api/contrib/test_volume_actions.py
|
arunvinodqmco/cinder
|
62cb72c6890e458427ba0601646b186b7b36dc01
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/unit/api/contrib/test_volume_actions.py
|
arunvinodqmco/cinder
|
62cb72c6890e458427ba0601646b186b7b36dc01
|
[
"Apache-2.0"
] | 1
|
2022-03-28T10:49:13.000Z
|
2022-03-28T10:49:13.000Z
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from http import HTTPStatus
from unittest import mock
import ddt
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
import webob
from cinder.api.contrib import volume_actions
from cinder.api import microversions as mv
from cinder.api.openstack import api_version_request as api_version
from cinder import context
from cinder import db
from cinder import exception
from cinder.image import glance
from cinder import objects
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v3 import fakes as v3_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.tests.unit import utils
from cinder import volume
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
CONF = cfg.CONF
ENCRYPTED_VOLUME_ID = 'f78e8977-6164-4114-a593-358fa6646eff'
@ddt.ddt
class VolumeActionsTest(test.TestCase):
_actions = ('os-reserve', 'os-unreserve')
_methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume')
def setUp(self):
super(VolumeActionsTest, self).setUp()
self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
is_admin=False)
self.controller = volume_actions.VolumeActionsController()
self.api_patchers = {}
for _meth in self._methods:
self.api_patchers[_meth] = mock.patch('cinder.volume.api.API.' +
_meth)
self.api_patchers[_meth].start()
self.addCleanup(self.api_patchers[_meth].stop)
self.api_patchers[_meth].return_value = True
db_vol = {'id': fake.VOLUME_ID, 'host': 'fake', 'status': 'available',
'size': 1, 'migration_status': None,
'volume_type_id': fake.VOLUME_TYPE_ID,
'project_id': fake.PROJECT_ID}
vol = fake_volume.fake_volume_obj(self.context, **db_vol)
self.get_patcher = mock.patch('cinder.volume.api.API.get')
self.mock_volume_get = self.get_patcher.start()
self.addCleanup(self.get_patcher.stop)
self.mock_volume_get.return_value = vol
self.update_patcher = mock.patch('cinder.volume.api.API.update')
self.mock_volume_update = self.update_patcher.start()
self.addCleanup(self.update_patcher.stop)
self.mock_volume_update.return_value = vol
self.db_get_patcher = mock.patch(
'cinder.db.sqlalchemy.api._volume_get')
self.mock_volume_db_get = self.db_get_patcher.start()
self.addCleanup(self.db_get_patcher.stop)
self.mock_volume_db_get.return_value = vol
self.flags(transport_url='fake:/')
def test_simple_api_actions(self):
app = fakes.wsgi_app(fake_auth_context=self.context)
for _action in self._actions:
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({_action: None})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
def test_initialize_connection(self):
with mock.patch.object(volume_api.API,
'initialize_connection') as init_conn:
init_conn.return_value = {}
body = {'os-initialize_connection': {'connector': {
'fake': 'fake'}}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.OK, res.status_int)
def test_initialize_connection_without_connector(self):
with mock.patch.object(volume_api.API,
'initialize_connection') as init_conn:
init_conn.return_value = {}
body = {'os-initialize_connection': {}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.initialize_connection')
def test_initialize_connection_without_initiator(self,
_init_connection):
_init_connection.side_effect = messaging.RemoteError('InvalidInput')
body = {'os-initialize_connection': {'connector': 'w/o_initiator'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
def test_initialize_connection_exception(self):
with mock.patch.object(volume_api.API,
'initialize_connection') as init_conn:
init_conn.side_effect = \
exception.VolumeBackendAPIException(data=None)
body = {'os-initialize_connection': {'connector': {
'fake': 'fake'}}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR,
res.status_int)
def test_terminate_connection(self):
with mock.patch.object(volume_api.API,
'terminate_connection') as terminate_conn:
terminate_conn.return_value = {}
body = {'os-terminate_connection': {'connector': 'fake'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
def test_terminate_connection_without_connector(self):
with mock.patch.object(volume_api.API,
'terminate_connection') as terminate_conn:
terminate_conn.return_value = {}
body = {'os-terminate_connection': {}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
def test_terminate_connection_with_exception(self):
with mock.patch.object(volume_api.API,
'terminate_connection') as terminate_conn:
terminate_conn.side_effect = \
exception.VolumeBackendAPIException(data=None)
body = {'os-terminate_connection': {'connector': 'fake'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR,
res.status_int)
def test_attach_to_instance(self):
body = {'os-attach': {'instance_uuid': fake.INSTANCE_ID,
'mountpoint': '/dev/vdc',
'mode': 'rw'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
body = {'os-attach': {'instance_uuid': fake.INSTANCE_ID,
'host_name': 'fake_host',
'mountpoint': '/dev/vdc'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.headers["content-type"] = "application/json"
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
def test_attach_to_host(self):
# using 'read-write' mode attach volume by default
body = {'os-attach': {'host_name': 'fake_host',
'mountpoint': '/dev/vdc'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
def test_volume_attach_to_instance_raises_remote_error(self):
volume_remote_error = \
messaging.RemoteError(exc_type='InvalidUUID')
with mock.patch.object(volume_api.API, 'attach',
side_effect=volume_remote_error):
id = fake.VOLUME_ID
vol = {"instance_uuid": fake.INSTANCE_ID,
"mountpoint": "/dev/vdc",
"mode": "rw"}
body = {"os-attach": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._attach,
req,
id,
body=body)
def test_volume_attach_to_instance_raises_db_error(self):
# In case of DB error 500 error code is returned to user
volume_remote_error = \
messaging.RemoteError(exc_type='DBError')
with mock.patch.object(volume_api.API, 'attach',
side_effect=volume_remote_error):
id = fake.VOLUME_ID
vol = {"instance_uuid": fake.INSTANCE_ID,
"mountpoint": "/dev/vdc",
"mode": "rw"}
body = {"os-attach": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
self.assertRaises(messaging.RemoteError,
self.controller._attach,
req,
id,
body=body)
def test_detach(self):
body = {'os-detach': {'attachment_id': fake.ATTACHMENT_ID}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
def test_detach_null_attachment_id(self):
body = {'os-detach': {'attachment_id': None}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
def test_volume_detach_raises_remote_error(self):
volume_remote_error = \
messaging.RemoteError(exc_type='VolumeAttachmentNotFound')
with mock.patch.object(volume_api.API, 'detach',
side_effect=volume_remote_error):
id = fake.VOLUME_ID
vol = {"attachment_id": fake.ATTACHMENT_ID}
body = {"os-detach": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._detach,
req,
id,
body=body)
def test_volume_detach_raises_db_error(self):
# In case of DB error 500 error code is returned to user
volume_remote_error = \
messaging.RemoteError(exc_type='DBError')
with mock.patch.object(volume_api.API, 'detach',
side_effect=volume_remote_error):
id = fake.VOLUME_ID
vol = {"attachment_id": fake.ATTACHMENT_ID}
body = {"os-detach": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
self.assertRaises(messaging.RemoteError,
self.controller._detach,
req,
id,
body=body)
def test_attach_with_invalid_arguments(self):
# Invalid request to attach volume an invalid target
body = {'os-attach': {'mountpoint': '/dev/vdc'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.headers["content-type"] = "application/json"
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
# Invalid request to attach volume with an invalid mode
body = {'os-attach': {'instance_uuid': 'fake',
'mountpoint': '/dev/vdc',
'mode': 'rr'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.headers["content-type"] = "application/json"
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
body = {'os-attach': {'host_name': 'fake_host',
'mountpoint': '/dev/vdc',
'mode': 'ww'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.headers["content-type"] = "application/json"
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
def test_attach_to_instance_no_mountpoint(self):
# The mountpoint parameter is required. If not provided the
# API should fail with a 400 error.
body = {'os-attach': {'instance_uuid': fake.INSTANCE_ID,
'mode': 'rw'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(400, res.status_int)
def test_begin_detaching(self):
def fake_begin_detaching(*args, **kwargs):
return {}
self.mock_object(volume.api.API, 'begin_detaching',
fake_begin_detaching)
body = {'os-begin_detaching': {'fake': 'fake'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
def test_roll_detaching(self):
def fake_roll_detaching(*args, **kwargs):
return {}
self.mock_object(volume.api.API, 'roll_detaching',
fake_roll_detaching)
body = {'os-roll_detaching': {'fake': 'fake'}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
def test_extend_volume(self):
def fake_extend_volume(*args, **kwargs):
return {}
self.mock_object(volume.api.API, 'extend',
fake_extend_volume)
body = {'os-extend': {'new_size': 5}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
def test_extend_volume_invalid_status(self):
def fake_extend_volume(*args, **kwargs):
msg = "Volume status must be available"
raise exception.InvalidVolume(reason=msg)
self.mock_object(volume.api.API, 'extend',
fake_extend_volume)
body = {'os-extend': {'new_size': 5}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
@ddt.data((True, HTTPStatus.ACCEPTED), (False, HTTPStatus.ACCEPTED),
('1', HTTPStatus.ACCEPTED), ('0', HTTPStatus.ACCEPTED),
('true', HTTPStatus.ACCEPTED), ('false', HTTPStatus.ACCEPTED),
('tt', HTTPStatus.BAD_REQUEST), (11, HTTPStatus.BAD_REQUEST),
(None, HTTPStatus.BAD_REQUEST))
@ddt.unpack
def test_update_readonly_flag(self, readonly, return_code):
def fake_update_readonly_flag(*args, **kwargs):
return {}
self.mock_object(volume.api.API, 'update_readonly_flag',
fake_update_readonly_flag)
body = {"os-update_readonly_flag": {"readonly": readonly}}
if readonly is None:
body = {"os-update_readonly_flag": {}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(return_code, res.status_int)
@ddt.data((True, HTTPStatus.OK), (False, HTTPStatus.OK),
('1', HTTPStatus.OK), ('0', HTTPStatus.OK),
('true', HTTPStatus.OK), ('false', HTTPStatus.OK),
('tt', HTTPStatus.BAD_REQUEST), (11, HTTPStatus.BAD_REQUEST),
(None, HTTPStatus.BAD_REQUEST))
@ddt.unpack
def test_set_bootable(self, bootable, return_code):
body = {"os-set_bootable": {"bootable": bootable}}
if bootable is None:
body = {"os-set_bootable": {}}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.context))
self.assertEqual(return_code, res.status_int)
@ddt.ddt
class VolumeRetypeActionsTest(test.TestCase):
def setUp(self):
super(VolumeRetypeActionsTest, self).setUp()
self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
is_admin=False)
self.flags(transport_url='fake:/')
self.retype_mocks = {}
paths = ('cinder.quota.QUOTAS.add_volume_type_opts',
'cinder.quota.QUOTAS.reserve')
for path in paths:
name = path.split('.')[-1]
patcher = mock.patch(path, return_value=None)
self.retype_mocks[name] = patcher.start()
self.addCleanup(patcher.stop)
@mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True)
def _retype_volume_exec(self, expected_status,
new_type=fake.VOLUME_TYPE2_ID, vol_id=None,
exists_mock=None):
vol_id = vol_id or fake.VOLUME_ID
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, vol_id))
req.method = 'POST'
req.headers['content-type'] = 'application/json'
retype_body = {'new_type': new_type, 'migration_policy': 'never'}
req.body = jsonutils.dump_as_bytes({'os-retype': retype_body})
res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context))
self.assertEqual(expected_status, res.status_int)
def test_retype_volume_no_body(self):
# Request with no body should fail
vol = utils.create_volume(self.context,
status='available',
testcase_instance=self)
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, vol.id))
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dump_as_bytes({'os-retype': None})
res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
def test_retype_volume_bad_policy(self):
# Request with invalid migration policy should fail
vol = utils.create_volume(self.context,
status='available',
testcase_instance=self)
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, vol.id))
req.method = 'POST'
req.headers['content-type'] = 'application/json'
retype_body = {'new_type': 'foo', 'migration_policy': 'invalid'}
req.body = jsonutils.dump_as_bytes({'os-retype': retype_body})
res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
def test_retype_volume_bad_status(self):
# Should fail if volume does not have proper status
vol_type_old = utils.create_volume_type(context.get_admin_context(),
self, name='old')
vol_type_new = utils.create_volume_type(context.get_admin_context(),
self, name='new')
vol = utils.create_volume(self.context,
status='error',
volume_type_id=vol_type_old.id,
testcase_instance=self)
self._retype_volume_exec(HTTPStatus.BAD_REQUEST, vol_type_new.id,
vol.id)
def test_retype_type_no_exist(self):
# Should fail if new type does not exist
vol_type_old = utils.create_volume_type(context.get_admin_context(),
self, name='old')
vol = utils.create_volume(self.context,
status='available',
volume_type_id=vol_type_old.id,
testcase_instance=self)
self._retype_volume_exec(HTTPStatus.NOT_FOUND, 'fake_vol_type',
vol.id)
def test_retype_same_type(self):
# Should fail if new type and old type are the same
vol_type_old = utils.create_volume_type(context.get_admin_context(),
self, name='old')
vol = utils.create_volume(self.context,
status='available',
volume_type_id=vol_type_old.id,
testcase_instance=self)
self._retype_volume_exec(HTTPStatus.BAD_REQUEST, vol_type_old.id,
vol.id)
def test_retype_over_quota(self):
# Should fail if going over quota for new type
vol_type_new = utils.create_volume_type(context.get_admin_context(),
self, name='old')
vol = utils.create_volume(self.context,
status='available',
testcase_instance=self)
exc = exception.OverQuota(overs=['gigabytes'],
quotas={'gigabytes': 20},
usages={'gigabytes': {'reserved': 5,
'in_use': 15}})
self.retype_mocks['reserve'].side_effect = exc
self._retype_volume_exec(HTTPStatus.REQUEST_ENTITY_TOO_LARGE,
vol_type_new.id, vol.id)
@ddt.data(('in-use', 'front-end', HTTPStatus.BAD_REQUEST),
('in-use', 'back-end', HTTPStatus.ACCEPTED),
('available', 'front-end', HTTPStatus.ACCEPTED),
('available', 'back-end', HTTPStatus.ACCEPTED),
('in-use', 'front-end', HTTPStatus.ACCEPTED, True),
('in-use', 'back-end', HTTPStatus.ACCEPTED, True),
('available', 'front-end', HTTPStatus.ACCEPTED, True),
('available', 'back-end', HTTPStatus.ACCEPTED, True),
('in-use', 'front-end', HTTPStatus.BAD_REQUEST, False, False),
('in-use', 'back-end', HTTPStatus.ACCEPTED, False, False),
('in-use', '', HTTPStatus.ACCEPTED, True, False),
('available', 'front-end', HTTPStatus.ACCEPTED, False, False),
('available', 'back-end', HTTPStatus.ACCEPTED, False, False),
('available', '', HTTPStatus.ACCEPTED, True, False),
('in-use', 'front-end', HTTPStatus.BAD_REQUEST, False,
False, False),
('in-use', '', HTTPStatus.ACCEPTED, True, False, False),
('in-use', 'back-end', HTTPStatus.ACCEPTED, False,
False, False),
('available', 'front-end', HTTPStatus.ACCEPTED, False,
False, False),
('in-use', '', HTTPStatus.ACCEPTED, True, False, False),
('in-use', 'back-end', HTTPStatus.ACCEPTED, False,
False, False))
@ddt.unpack
def test_retype_volume_qos(self, vol_status, consumer_pass,
expected_status, same_qos=False, has_qos=True,
has_type=True):
"""Test volume retype with QoS
This test conatins following test-cases:
1) should fail if changing qos enforced by front-end for in-use volume
2) should NOT fail for in-use if changing qos enforced by back-end
3) should NOT fail if changing qos enforced by FE for available
volumes
4) should NOT fail if changing qos enforced by back-end for available
volumes
5) should NOT fail if changing qos enforced by front-end for in-use
volumes if the qos is the same
6) should NOT fail if changing qos enforced by back-end for in-use
volumes if the qos is the same
7) should NOT fail if changing qos enforced by front-end for available
volumes if the qos is the same
8) should NOT fail if changing qos enforced by back-end for available
volumes if the qos is the same
9) should fail if changing qos enforced by front-end on the new type
and volume originally had no qos and was in-use
10) should NOT fail if changing qos enforced by back-end on the
new type and volume originally had no qos and was in-use
11) should NOT fail if original and destinal types had no qos for
in-use volumes
12) should NOT fail if changing qos enforced by front-end on the
new type and volume originally had no qos and was available
13) should NOT fail if changing qos enforced by back-end on the
new type and volume originally had no qos and was available
14) should NOT fail if original and destinal types had no qos for
available volumes
15) should fail if changing volume had no type, was in-use and
destination type qos was enforced by front-end
16) should NOT fail if changing volume had no type, was in-use and
destination type had no qos
and volume originally had no type and was in-use
17) should NOT fail if changing volume had no type, was in-use and
destination type qos was enforced by back-end
18) should NOT fail if changing volume had no type, was in-use and
destination type qos was enforced by front-end
19) should NOT fail if changing volume had no type, was available and
destination type had no qos
and volume originally had no type and was in-use
20) should NOT fail if changing volume had no type, was available and
destination type qos was enforced by back-end
"""
admin_ctxt = context.get_admin_context()
if has_qos:
qos_old = utils.create_qos(admin_ctxt, self,
name='old',
consumer=consumer_pass)['id']
else:
qos_old = None
if same_qos:
qos_new = qos_old
else:
qos_new = utils.create_qos(admin_ctxt, self,
name='new',
consumer=consumer_pass)['id']
if has_type:
vol_type_old = utils.create_volume_type(admin_ctxt, self,
name='old',
qos_specs_id=qos_old).id
else:
vol_type_old = v3_fakes.fake_default_type_get()['id']
vol_type_new = utils.create_volume_type(admin_ctxt, self,
name='new',
qos_specs_id=qos_new).id
vol = utils.create_volume(self.context,
status=vol_status,
volume_type_id=vol_type_old,
testcase_instance=self)
self._retype_volume_exec(expected_status, vol_type_new, vol.id)
@ddt.data(('available', HTTPStatus.ACCEPTED, False, False, False),
('available', HTTPStatus.ACCEPTED, False, False),
('available', HTTPStatus.ACCEPTED, True, False, False),
('available', HTTPStatus.ACCEPTED, True, False),
('available', HTTPStatus.ACCEPTED))
@ddt.unpack
def test_retype_volume_encryption(self, vol_status, expected_status,
has_type=True,
enc_orig=True, enc_dest=True):
enc_orig = None
admin_ctxt = context.get_admin_context()
if has_type:
vol_type_old = utils.create_volume_type(admin_ctxt, self,
name='old').id
if enc_orig:
utils.create_encryption(admin_ctxt, vol_type_old, self)
else:
vol_type_old = v3_fakes.fake_default_type_get()['id']
vol_type_new = utils.create_volume_type(admin_ctxt, self,
name='new').id
if enc_dest:
utils.create_encryption(admin_ctxt, vol_type_new, self)
vol = utils.create_volume(self.context,
status=vol_status,
volume_type_id=vol_type_old,
testcase_instance=self)
self._retype_volume_exec(expected_status, vol_type_new, vol.id)
def fake_volume_get(self, context, volume_id):
volume = v3_fakes.create_volume(volume_id)
if volume_id == fake.VOLUME3_ID:
volume['status'] = 'in-use'
else:
volume['status'] = 'available'
return volume
def fake_volume_get_obj(self, context, volume_id, **kwargs):
volume = fake_volume.fake_volume_obj(context,
id=volume_id,
display_description='displaydesc',
**kwargs)
if volume_id == fake.VOLUME3_ID:
volume.status = 'in-use'
else:
volume.status = 'available'
if volume_id == ENCRYPTED_VOLUME_ID:
volume['encryption_key_id'] = 'does_not_matter'
volume.volume_type = fake_volume.fake_volume_type_obj(
context,
name=v3_fakes.DEFAULT_VOL_TYPE)
return volume
def fake_upload_volume_to_image_service(self, context, volume, metadata,
force):
ret = {"id": volume['id'],
"updated_at": datetime.datetime(1, 1, 1, 1, 1, 1),
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": fake.IMAGE_ID,
"container_format": 'bare',
"disk_format": 'raw',
"image_name": 'image_name'}
return ret
@ddt.ddt
class VolumeImageActionsTest(test.TestCase):
def setUp(self):
super(VolumeImageActionsTest, self).setUp()
self.controller = volume_actions.VolumeActionsController()
self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
is_admin=False)
self.maxDiff = 2000
def _get_os_volume_upload_image(self):
vol = {
"container_format": 'bare',
"disk_format": 'raw',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": vol}
return body
def fake_image_service_create(self, *args):
ret = {
'status': 'queued',
'name': 'image_name',
'deleted': False,
'container_format': 'bare',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'disk_format': 'raw',
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'id': fake.IMAGE_ID,
'min_ram': 0,
'checksum': None,
'min_disk': 0,
'deleted_at': None,
'properties': {'x_billing_code_license': '246254365'},
'size': 0}
return ret
def fake_image_service_create_with_params(self, *args):
ret = {
'status': 'queued',
'name': 'image_name',
'deleted': False,
'container_format': 'bare',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'disk_format': 'raw',
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'id': fake.IMAGE_ID,
'min_ram': 0,
'checksum': None,
'min_disk': 0,
'visibility': 'public',
'protected': True,
'deleted_at': None,
'properties': {'x_billing_code_license': '246254365'},
'size': 0}
return ret
def fake_rpc_copy_volume_to_image(self, *args):
pass
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
@mock.patch.object(volume_api.API, "copy_volume_to_image",
fake_upload_volume_to_image_service)
def test_copy_volume_to_image(self):
id = fake.VOLUME_ID
img = {"container_format": 'bare',
"disk_format": 'raw',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": img}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
res_dict = self.controller._volume_upload_image(req, id, body=body)
expected = {'os-volume_upload_image':
{'id': id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'status': 'uploading',
'display_description': 'displaydesc',
'size': 1,
'volume_type': fake_volume.fake_volume_type_obj(
context,
name='vol_type_name'),
'image_id': fake.IMAGE_ID,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'image_name'}}
self.assertDictEqual(expected, res_dict)
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
@mock.patch.object(volume_api.API, "copy_volume_to_image")
def test_check_image_metadata_copy_encrypted_volume_to_image(
self, mock_copy_vol):
"""Make sure the encryption image properties exit the controller."""
# all we're interested in is that the 'metadata' dict contains the
# correct data, so we do this bad hack to smuggle it out in the
# controller's response to make it easy to access
def really_fake_upload_volume(context, volume, metadata, force):
return metadata
mock_copy_vol.side_effect = really_fake_upload_volume
FAKE_ID = 'fake-encryption-key-id'
# the controller does a lazy init of the key manager, so we
# need a 2-level mock here
self.mock_object(self.controller, '_key_mgr')
self.controller._key_mgr.return_value = not None
self.mock_object(self.controller._key_manager, 'store')
self.controller._key_manager.store.return_value = FAKE_ID
vol_id = ENCRYPTED_VOLUME_ID
img = {"container_format": 'bare',
"disk_format": 'raw',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": img}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, vol_id))
res_dict = self.controller._volume_upload_image(req, vol_id, body=body)
sent_meta = res_dict['os-volume_upload_image']
self.assertIn('cinder_encryption_key_id', sent_meta)
self.assertEqual(FAKE_ID, sent_meta['cinder_encryption_key_id'])
self.assertIn('cinder_encryption_key_deletion_policy', sent_meta)
self.assertEqual('on_image_deletion',
sent_meta['cinder_encryption_key_deletion_policy'])
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
@mock.patch.object(volume_api.API, "copy_volume_to_image")
def test_check_image_metadata_copy_nonencrypted_volume_to_image(
self, mock_copy_vol):
"""Make sure no encryption image properties are sent."""
def really_fake_upload_volume(context, volume, metadata, force):
return metadata
mock_copy_vol.side_effect = really_fake_upload_volume
id = fake.VOLUME_ID
img = {"container_format": 'bare',
"disk_format": 'raw',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": img}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
res_dict = self.controller._volume_upload_image(req, id, body=body)
sent_meta = res_dict['os-volume_upload_image']
self.assertNotIn('cinder_encryption_key_id', sent_meta)
self.assertNotIn('cinder_encryption_key_deletion_policy', sent_meta)
def test_copy_volume_to_image_volumenotfound(self):
def fake_volume_get_raise_exc(self, context, volume_id):
raise exception.VolumeNotFound(volume_id=volume_id)
self.mock_object(volume_api.API, 'get', fake_volume_get_raise_exc)
id = fake.WILL_NOT_BE_FOUND_ID
vol = {"container_format": 'bare',
"disk_format": 'raw',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
self.assertRaises(exception.VolumeNotFound,
self.controller._volume_upload_image,
req,
id,
body=body)
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
@mock.patch.object(volume_api.API, 'copy_volume_to_image',
side_effect=exception.InvalidVolume(reason='blah'))
def test_copy_volume_to_image_invalidvolume(self, mock_copy):
id = fake.VOLUME2_ID
vol = {"container_format": 'bare',
"disk_format": 'raw',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._volume_upload_image,
req,
id,
body=body)
@mock.patch.object(volume_api.API, 'get', fake_volume_get)
def test_copy_volume_to_image_invalid_disk_format(self):
id = fake.IMAGE_ID
vol = {"container_format": 'bare',
"disk_format": 'iso',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action'
% (fake.PROJECT_ID, id))
self.assertRaises(exception.ValidationError,
self.controller._volume_upload_image,
req,
id,
body=body)
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
def test_copy_volume_to_image_bad_disk_format_for_encrypted_vol(self):
id = ENCRYPTED_VOLUME_ID
vol = {"container_format": 'bare',
"disk_format": 'qcow2',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action'
% (fake.PROJECT_ID, id))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._volume_upload_image,
req,
id,
body=body)
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
def test_copy_volume_to_image_bad_container_format_for_encrypted_vol(self):
id = ENCRYPTED_VOLUME_ID
vol = {"container_format": 'ovf',
"disk_format": 'raw',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action'
% (fake.PROJECT_ID, id))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._volume_upload_image,
req,
id,
body=body)
@mock.patch.object(volume_api.API, "copy_volume_to_image")
def test_copy_volume_to_image_disk_format_ploop(self,
mock_copy_to_image):
volume = utils.create_volume(self.context, metadata={'test': 'test'})
img = {"container_format": 'bare',
"disk_format": 'ploop',
"image_name": 'image_name'}
body = {"os-volume_upload_image": img}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, volume.id))
image_metadata = {'container_format': 'bare',
'disk_format': 'ploop',
'name': 'image_name'}
self.controller._volume_upload_image(req, volume.id, body=body)
mock_copy_to_image.assert_called_once_with(
req.environ['cinder.context'], volume, image_metadata, False)
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
@mock.patch.object(volume_api.API, 'copy_volume_to_image',
side_effect=ValueError)
def test_copy_volume_to_image_valueerror(self, mock_copy):
id = fake.VOLUME2_ID
vol = {"container_format": 'bare',
"disk_format": 'raw',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, fake.VOLUME_ID))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._volume_upload_image,
req,
id,
body=body)
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
@mock.patch.object(volume_api.API, 'copy_volume_to_image',
side_effect=messaging.RemoteError)
def test_copy_volume_to_image_remoteerror(self, mock_copy):
id = fake.VOLUME2_ID
vol = {"container_format": 'bare',
"disk_format": 'raw',
"image_name": 'image_name',
"force": True}
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._volume_upload_image,
req,
id,
body=body)
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
@mock.patch.object(volume_api.API, 'copy_volume_to_image',
side_effect=messaging.RemoteError)
@ddt.data(
({"image_name": 'image_name', "protected": None},
exception.ValidationError),
({"image_name": 'image_name', "protected": ' '},
exception.ValidationError),
({"image_name": 'image_name', "protected": 'test'},
exception.ValidationError),
({"image_name": 'image_name', "visibility": 'test'},
exception.ValidationError),
({"image_name": 'image_name', "visibility": ' '},
exception.ValidationError),
({"image_name": 'image_name', "visibility": None},
exception.ValidationError))
@ddt.unpack
def test_copy_volume_to_image_invalid_request_body(
self, vol, exception, mock_copy):
id = fake.VOLUME2_ID
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
req.api_version_request = api_version.APIVersionRequest("3.1")
self.assertRaises(exception,
self.controller._volume_upload_image,
req, id, body=body)
def test_volume_upload_image_typeerror(self):
id = fake.VOLUME2_ID
body = {"os-volume_upload_image_fake": "fake"}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
def test_volume_upload_image_without_type(self):
id = fake.VOLUME2_ID
vol = {"container_format": 'bare',
"disk_format": 'raw',
"image_name": None,
"force": True}
body = {"": vol}
req = webob.Request.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context))
self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
@mock.patch.object(volume_api.API, 'get', fake_volume_get)
def test_extend_volume_valueerror(self):
id = fake.VOLUME2_ID
body = {'os-extend': {'new_size': 'fake'}}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
self.assertRaises(exception.ValidationError,
self.controller._extend,
req,
id,
body=body)
@ddt.data({'version': mv.get_prior_version(mv.VOLUME_EXTEND_INUSE),
'status': 'available'},
{'version': mv.get_prior_version(mv.VOLUME_EXTEND_INUSE),
'status': 'in-use'},
{'version': mv.VOLUME_EXTEND_INUSE,
'status': 'available'},
{'version': mv.VOLUME_EXTEND_INUSE,
'status': 'in-use'})
@ddt.unpack
def test_extend_attached_volume(self, version, status):
vol = db.volume_create(self.context,
{'size': 1, 'project_id': fake.PROJECT_ID,
'status': status,
'volume_type_id': fake.VOLUME_TYPE_ID})
self.mock_object(volume_api.API, 'get', return_value=vol)
mock_extend = self.mock_object(volume_api.API, '_extend')
body = {"os-extend": {"new_size": 2}}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, vol['id']))
req.api_version_request = mv.get_api_version(version)
self.controller._extend(req, vol['id'], body=body)
if version == mv.VOLUME_EXTEND_INUSE and status == 'in-use':
mock_extend.assert_called_with(req.environ['cinder.context'],
vol, 2, attached=True)
else:
mock_extend.assert_called_with(req.environ['cinder.context'],
vol, 2, attached=False)
def test_extend_volume_no_exist(self):
vol_id = fake.WILL_NOT_BE_FOUND_ID
body = {'os-extend': {'new_size': 5}}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, vol_id))
self.assertRaises(exception.VolumeNotFound,
self.controller._extend,
req,
vol_id,
body=body)
def test_copy_volume_to_image_notimagename(self):
id = fake.VOLUME2_ID
vol = {"container_format": 'bare',
"disk_format": 'raw',
"image_name": None,
"force": True}
body = {"os-volume_upload_image": vol}
req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' %
(fake.PROJECT_ID, id))
self.assertRaises(exception.ValidationError,
self.controller._volume_upload_image,
req,
id,
body=body)
def _create_volume_with_type(self, status='available',
display_description='displaydesc', **kwargs):
admin_ctxt = context.get_admin_context()
vol_type = db.volume_type_create(admin_ctxt, {'name': 'vol_name'})
self.addCleanup(db.volume_type_destroy, admin_ctxt, vol_type.id)
volume = utils.create_volume(self.context, volume_type_id=vol_type.id,
status=status,
display_description=display_description,
**kwargs)
self.addCleanup(db.volume_destroy, admin_ctxt, volume.id)
expected = {
'os-volume_upload_image': {
'id': volume.id,
'updated_at': mock.ANY,
'status': 'uploading',
'display_description': 'displaydesc',
'size': 1,
'volume_type': mock.ANY,
'image_id': fake.IMAGE_ID,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'image_name'
}
}
return volume, expected
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_with_protected_prop(
self, mock_copy_to_image, mock_create, mock_get_image_metadata):
"""Test create image from volume with protected properties."""
volume, expected = self._create_volume_with_type()
mock_get_image_metadata.return_value = {"volume_id": volume.id,
"key": "x_billing_license",
"value": "246254365"}
mock_create.side_effect = self.fake_image_service_create
req = fakes.HTTPRequest.blank(
'/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id),
use_admin_context=self.context.is_admin)
body = self._get_os_volume_upload_image()
res_dict = self.controller._volume_upload_image(req, volume.id,
body=body)
self.assertDictEqual(expected, res_dict)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('uploading', vol_db.status)
self.assertEqual('available', vol_db.previous_status)
@mock.patch.object(volume_api.API, 'get', fake_volume_get_obj)
def test_copy_volume_to_image_public_not_authorized(self):
"""Test unauthorized create public image from volume."""
id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
req = fakes.HTTPRequest.blank('/v3/tenant1/volumes/%s/action' % id)
req.environ['cinder.context'].is_admin = False
req.headers = mv.get_mv_header(mv.UPLOAD_IMAGE_PARAMS)
req.api_version_request = mv.get_api_version(mv.UPLOAD_IMAGE_PARAMS)
body = self._get_os_volume_upload_image()
body['os-volume_upload_image']['visibility'] = 'public'
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._volume_upload_image,
req, id, body=body)
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_without_glance_metadata(
self, mock_copy_to_image, mock_create, mock_get_image_metadata):
"""Test create image from volume if volume is created without image.
In this case volume glance metadata will not be available for this
volume.
"""
volume, expected = self._create_volume_with_type()
mock_get_image_metadata.side_effect = \
exception.GlanceMetadataNotFound(id=volume.id)
mock_create.side_effect = self.fake_image_service_create
req = fakes.HTTPRequest.blank(
'/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id),
use_admin_context=self.context.is_admin)
body = self._get_os_volume_upload_image()
res_dict = self.controller._volume_upload_image(req, volume.id,
body=body)
self.assertDictEqual(expected, res_dict)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('uploading', vol_db.status)
self.assertEqual('available', vol_db.previous_status)
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_fail_image_create(
self, mock_copy_to_image, mock_create, mock_get_image_metadata):
"""Test create image from volume if create image fails.
In this case API will rollback to previous status.
"""
volume = utils.create_volume(self.context)
mock_get_image_metadata.return_value = {}
mock_create.side_effect = Exception()
req = fakes.HTTPRequest.blank(
'/v3/fakeproject/volumes/%s/action' % volume.id)
body = self._get_os_volume_upload_image()
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._volume_upload_image, req, volume.id,
body=body)
self.assertFalse(mock_copy_to_image.called)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('available', vol_db.status)
self.assertIsNone(vol_db.previous_status)
db.volume_destroy(context.get_admin_context(), volume.id)
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_in_use_no_force(
self, mock_copy_to_image, mock_create, mock_get_image_metadata):
"""Test create image from in-use volume.
In this case API will fail because we are not passing force.
"""
volume = utils.create_volume(self.context, status='in-use')
mock_get_image_metadata.return_value = {}
mock_create.side_effect = self.fake_image_service_create
req = fakes.HTTPRequest.blank(
'/v3/fakeproject/volumes/%s/action' % volume.id)
body = self._get_os_volume_upload_image()
body['os-volume_upload_image']['force'] = False
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._volume_upload_image, req, volume.id,
body=body)
self.assertFalse(mock_copy_to_image.called)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('in-use', vol_db.status)
self.assertIsNone(vol_db.previous_status)
db.volume_destroy(context.get_admin_context(), volume.id)
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_in_use_with_force(
self, mock_copy_to_image, mock_create, mock_get_image_metadata):
"""Test create image from in-use volume.
In this case API will succeed only when CON.enable_force_upload is
enabled.
"""
volume, expected = self._create_volume_with_type(status='in-use')
mock_get_image_metadata.return_value = {}
mock_create.side_effect = self.fake_image_service_create
req = fakes.HTTPRequest.blank(
'/v3/fakeproject/volumes/%s/action' % volume.id,
use_admin_context=self.context.is_admin)
body = self._get_os_volume_upload_image()
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._volume_upload_image, req, volume.id,
body=body)
self.assertFalse(mock_copy_to_image.called)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('in-use', vol_db.status)
self.assertIsNone(vol_db.previous_status)
CONF.set_default('enable_force_upload', True)
res_dict = self.controller._volume_upload_image(req, volume.id,
body=body)
self.assertDictEqual(expected, res_dict)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('uploading', vol_db.status)
self.assertEqual('in-use', vol_db.previous_status)
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_without_protected_prop(
self, mock_volume_to_image, mock_create, mock_get_image_metadata):
"""Test protected property is not defined with the root image."""
volume, expected = self._create_volume_with_type()
mock_get_image_metadata.return_value = {}
mock_create.side_effect = self.fake_image_service_create
req = fakes.HTTPRequest.blank(
'/v3/fakeproject/volumes/%s/action' % volume.id,
use_admin_context=self.context.is_admin)
body = self._get_os_volume_upload_image()
res_dict = self.controller._volume_upload_image(req, volume.id,
body=body)
self.assertDictEqual(expected, res_dict)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('uploading', vol_db.status)
self.assertEqual('available', vol_db.previous_status)
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_without_core_prop(
self, mock_copy_to_image, mock_create):
"""Test glance_core_properties defined in cinder.conf is empty."""
volume, expected = self._create_volume_with_type()
mock_create.side_effect = self.fake_image_service_create
self.override_config('glance_core_properties', [])
req = fakes.HTTPRequest.blank(
'/v3/fakeproject/volumes/%s/action' % volume.id,
use_admin_context=self.context.is_admin)
body = self._get_os_volume_upload_image()
res_dict = self.controller._volume_upload_image(req, volume.id,
body=body)
self.assertDictEqual(expected, res_dict)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('uploading', vol_db.status)
self.assertEqual('available', vol_db.previous_status)
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_volume_type_none(
self,
mock_copy_volume_to_image,
mock_create,
mock_get_volume_image_metadata):
"""Test create image from volume with none type volume."""
volume, expected = self._create_volume_with_type()
mock_create.side_effect = self.fake_image_service_create
req = fakes.HTTPRequest.blank(
'/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id),
use_admin_context=self.context.is_admin)
body = self._get_os_volume_upload_image()
res_dict = self.controller._volume_upload_image(req, volume.id,
body=body)
self.assertDictEqual(expected, res_dict)
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_version_with_params(
self,
mock_copy_volume_to_image,
mock_create,
mock_get_volume_image_metadata):
"""Test create image from volume with protected properties."""
volume, expected = self._create_volume_with_type()
mock_get_volume_image_metadata.return_value = {
"volume_id": volume.id,
"key": "x_billing_code_license",
"value": "246254365"}
mock_create.side_effect = self.fake_image_service_create_with_params
mock_copy_volume_to_image.side_effect = \
self.fake_rpc_copy_volume_to_image
req = fakes.HTTPRequest.blank(
'/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id),
use_admin_context=self.context.is_admin)
req.environ['cinder.context'].is_admin = True
req.headers = mv.get_mv_header(mv.UPLOAD_IMAGE_PARAMS)
req.api_version_request = mv.get_api_version(mv.UPLOAD_IMAGE_PARAMS)
body = self._get_os_volume_upload_image()
body = self._get_os_volume_upload_image()
body['os-volume_upload_image']['visibility'] = 'public'
body['os-volume_upload_image']['protected'] = True
res_dict = self.controller._volume_upload_image(req,
volume.id,
body=body)
expected['os-volume_upload_image'].update(visibility='public',
protected=True)
self.assertDictEqual(expected, res_dict)
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_vhd(
self, mock_copy_to_image, mock_create, mock_get_image_metadata):
"""Test create image from volume with vhd disk format"""
volume, expected = self._create_volume_with_type()
mock_get_image_metadata.return_value = {}
mock_create.side_effect = self.fake_image_service_create
req = fakes.HTTPRequest.blank(
'/v3/fakeproject/volumes/%s/action' % volume.id)
body = self._get_os_volume_upload_image()
body['os-volume_upload_image']['force'] = True
body['os-volume_upload_image']['container_format'] = 'bare'
body['os-volume_upload_image']['disk_format'] = 'vhd'
res_dict = self.controller._volume_upload_image(req, volume.id,
body=body)
self.assertDictEqual(expected, res_dict)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('uploading', vol_db.status)
self.assertEqual('available', vol_db.previous_status)
@mock.patch.object(volume_api.API, "get_volume_image_metadata")
@mock.patch.object(glance.GlanceImageService, "create")
@mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image")
def test_copy_volume_to_image_vhdx(
self, mock_copy_to_image, mock_create, mock_get_image_metadata):
"""Test create image from volume with vhdx disk format"""
volume, expected = self._create_volume_with_type()
mock_get_image_metadata.return_value = {}
mock_create.side_effect = self.fake_image_service_create
req = fakes.HTTPRequest.blank(
'/v3/fakeproject/volumes/%s/action' % volume.id)
body = self._get_os_volume_upload_image()
body['os-volume_upload_image']['force'] = True
body['os-volume_upload_image']['container_format'] = 'bare'
body['os-volume_upload_image']['disk_format'] = 'vhdx'
res_dict = self.controller._volume_upload_image(req, volume.id,
body=body)
self.assertDictEqual(expected, res_dict)
vol_db = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual('uploading', vol_db.status)
self.assertEqual('available', vol_db.previous_status)
def _build_reimage_req(self, body, vol_id,
version=mv.SUPPORT_REIMAGE_VOLUME):
req = fakes.HTTPRequest.blank(
'/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.environ['cinder.context'] = self.context
req.api_version_request = mv.get_api_version(version)
req.headers["content-type"] = "application/json"
return req
@ddt.data(None, False, True)
@mock.patch.object(volume_api.API, "reimage")
def test_volume_reimage(self, reimage_reserved, mock_image):
vol = utils.create_volume(self.context)
body = {"os-reimage": {"image_id": fake.IMAGE_ID}}
if reimage_reserved is not None:
body["os-reimage"]["reimage_reserved"] = reimage_reserved
req = self._build_reimage_req(body, vol.id)
self.controller._reimage(req, vol.id, body=body)
@mock.patch.object(volume_api.API, "reimage")
def test_volume_reimage_invaild_params(self, mock_image):
vol = utils.create_volume(self.context)
body = {"os-reimage": {"image_id": fake.IMAGE_ID,
"reimage_reserved": 'wrong'}}
req = self._build_reimage_req(body, vol)
self.assertRaises(exception.ValidationError,
self.controller._reimage, req,
vol.id, body=body)
def test_volume_reimage_before_3_68(self):
vol = utils.create_volume(self.context)
body = {"os-reimage": {"image_id": fake.IMAGE_ID}}
req = self._build_reimage_req(body, vol.id, version="3.67")
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller._reimage, req, vol.id, body=body)
def test_reimage_volume_invalid_status(self):
def fake_reimage_volume(*args, **kwargs):
msg = "Volume status must be available."
raise exception.InvalidVolume(reason=msg)
self.mock_object(volume.api.API, 'reimage',
fake_reimage_volume)
vol = utils.create_volume(self.context)
body = {"os-reimage": {"image_id": fake.IMAGE_ID}}
req = self._build_reimage_req(body, vol)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._reimage, req,
vol.id, body=body)
@mock.patch('cinder.context.RequestContext.authorize')
def test_reimage_volume_attach_more_than_one_server(self, mock_authorize):
vol = utils.create_volume(self.context)
va_objs = [objects.VolumeAttachment(context=self.context, id=i)
for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]]
va_list = objects.VolumeAttachmentList(context=self.context,
objects=va_objs)
vol.volume_attachment = va_list
self.mock_object(volume_api.API, 'get', return_value=vol)
body = {"os-reimage": {"image_id": fake.IMAGE_ID}}
req = self._build_reimage_req(body, vol)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._reimage, req, vol.id, body=body)
| 46.355117
| 79
| 0.587328
| 8,574
| 75,188
| 4.882785
| 0.057033
| 0.018154
| 0.029237
| 0.01899
| 0.824722
| 0.793551
| 0.749074
| 0.717521
| 0.697265
| 0.671683
| 0
| 0.005215
| 0.306352
| 75,188
| 1,621
| 80
| 46.383714
| 0.797504
| 0.062776
| 0
| 0.680211
| 0
| 0
| 0.132669
| 0.048884
| 0
| 0
| 0
| 0
| 0.069977
| 1
| 0.066968
| false
| 0.00301
| 0.018811
| 0.004515
| 0.100075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
db8b393cbe6830ac70ae9a6b21105ba54542bbc2
| 906
|
py
|
Python
|
spider/api/config.py
|
weibo-spider/back-end
|
0ccc8ad1a09a6ba1cd91c8030b554a14ed75e4f1
|
[
"Apache-2.0"
] | null | null | null |
spider/api/config.py
|
weibo-spider/back-end
|
0ccc8ad1a09a6ba1cd91c8030b554a14ed75e4f1
|
[
"Apache-2.0"
] | null | null | null |
spider/api/config.py
|
weibo-spider/back-end
|
0ccc8ad1a09a6ba1cd91c8030b554a14ed75e4f1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
# author:昌维[867597730@qq.com]
# github:https://github.com/cw1997
# str_user_info_api = "https://m.weibo.cn/api/container/getIndex?uid=%d&luicode=20000174&type=uid&value=%d&containerid=1005051644114654"
str_user_info_api = "https://m.weibo.cn/api/container/getIndex?uid=%d&luicode=20000174&type=uid&value=%d&containerid=100505%d"
# str_followers_api = "https://m.weibo.cn/util/container/getIndex?containerid=231051_-_followers_-_%d&page=%d"
str_followers_api = "https://m.weibo.cn/api/container/getIndex?containerid=231051_-_followers_-_%d&page=%d"
# str_fans_api = "https://m.weibo.cn/util/container/getIndex?containerid=231051_-_fans_-_%d&since_id=%d"
str_fans_api = "https://m.weibo.cn/api/container/getIndex?containerid=231051_-_fans_-_%d&since_id=%d"
personal_info_api = "https://m.weibo.cn/profile/info?uid=1642591402"
int_test_uid = 1934383474
# int_test_uid = 1644114654
| 53.294118
| 136
| 0.771523
| 141
| 906
| 4.687943
| 0.304965
| 0.08472
| 0.09531
| 0.14826
| 0.770045
| 0.770045
| 0.739788
| 0.739788
| 0.689864
| 0.596067
| 0
| 0.12297
| 0.048565
| 906
| 16
| 137
| 56.625
| 0.643852
| 0.502208
| 0
| 0
| 0
| 0.6
| 0.72009
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
db98c8be707db77044028c525d93b1b3718964cb
| 122
|
py
|
Python
|
taglets/labelmodel/__init__.py
|
BatsResearch/taglets
|
0fa9ebeccc9177069aa09b2da84746b7532e3495
|
[
"Apache-2.0"
] | 13
|
2021-11-10T13:17:10.000Z
|
2022-03-30T22:56:52.000Z
|
taglets/labelmodel/__init__.py
|
BatsResearch/taglets
|
0fa9ebeccc9177069aa09b2da84746b7532e3495
|
[
"Apache-2.0"
] | 1
|
2021-11-10T16:01:47.000Z
|
2021-11-10T16:01:47.000Z
|
taglets/labelmodel/__init__.py
|
BatsResearch/taglets
|
0fa9ebeccc9177069aa09b2da84746b7532e3495
|
[
"Apache-2.0"
] | 2
|
2022-02-14T22:40:29.000Z
|
2022-02-27T04:27:48.000Z
|
from .label_model import LabelModel
from .weighted import UnweightedVote, WeightedVote
from .amcl import AMCLWeightedVote
| 30.5
| 50
| 0.860656
| 14
| 122
| 7.428571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106557
| 122
| 3
| 51
| 40.666667
| 0.954128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
db9b0826ad7703a34e39cee033f95540c02b2188
| 21
|
py
|
Python
|
icepp/error_mitigation/cnot_error/__init__.py
|
pearcandy/aqcel
|
86e2d97d427f6a31ef223c69defbe3f853a69aa2
|
[
"Apache-2.0"
] | null | null | null |
icepp/error_mitigation/cnot_error/__init__.py
|
pearcandy/aqcel
|
86e2d97d427f6a31ef223c69defbe3f853a69aa2
|
[
"Apache-2.0"
] | null | null | null |
icepp/error_mitigation/cnot_error/__init__.py
|
pearcandy/aqcel
|
86e2d97d427f6a31ef223c69defbe3f853a69aa2
|
[
"Apache-2.0"
] | null | null | null |
from .fiim import zne
| 21
| 21
| 0.809524
| 4
| 21
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
919b5b973902efe1d2f139a006b1fb0b10037841
| 306
|
py
|
Python
|
app/models/enum_field.py
|
chinese-bbb/web-backend
|
635d9021b5d375b782b71c947877cf461b982997
|
[
"MIT"
] | 4
|
2019-06-16T21:14:14.000Z
|
2019-08-30T09:29:25.000Z
|
app/models/enum_field.py
|
chinese-bbb/web-backend
|
635d9021b5d375b782b71c947877cf461b982997
|
[
"MIT"
] | 8
|
2019-06-11T15:55:20.000Z
|
2021-06-01T23:43:20.000Z
|
app/models/enum_field.py
|
chinese-bbb/web-backend
|
635d9021b5d375b782b71c947877cf461b982997
|
[
"MIT"
] | 4
|
2019-04-23T08:54:54.000Z
|
2019-07-31T02:47:12.000Z
|
from marshmallow_enum import EnumField as BaseEnumField
# todo: revert it once https://github.com/justanr/marshmallow_enum/issues/23 is resolved
class EnumField(BaseEnumField):
def _deserialize(self, value, attr, data, **kwargs):
return super(EnumField, self)._deserialize(value, attr, data)
| 38.25
| 88
| 0.767974
| 39
| 306
| 5.923077
| 0.74359
| 0.12987
| 0.112554
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007547
| 0.133987
| 306
| 7
| 89
| 43.714286
| 0.864151
| 0.281046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.