hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f82b5728f8bd34310fc6e8a05849f16ad49be97
| 48
|
py
|
Python
|
my_classes/Tuples/.history/coloruser_20210724211229.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/Tuples/.history/coloruser_20210724211229.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/Tuples/.history/coloruser_20210724211229.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
import colorsfrom colors import random colorsys
| 24
| 47
| 0.875
| 6
| 48
| 7
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 48
| 1
| 48
| 48
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5f8c580b5f4b734e3f64352208635fddd9652d01
| 129
|
py
|
Python
|
Functions/NameFunction.py
|
JaydenL33/ReLearningPython
|
60d54172eefb29f285ca8976350010829d312a16
|
[
"MIT"
] | null | null | null |
Functions/NameFunction.py
|
JaydenL33/ReLearningPython
|
60d54172eefb29f285ca8976350010829d312a16
|
[
"MIT"
] | null | null | null |
Functions/NameFunction.py
|
JaydenL33/ReLearningPython
|
60d54172eefb29f285ca8976350010829d312a16
|
[
"MIT"
] | null | null | null |
def name_print(name):
print('The name entered is ' + name + " and it is " + str(len(name)) + " chars long")
name_print('alice')
| 32.25
| 86
| 0.643411
| 21
| 129
| 3.857143
| 0.619048
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178295
| 129
| 4
| 87
| 32.25
| 0.764151
| 0
| 0
| 0
| 0
| 0
| 0.361538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
5fc5dbe4423f07acbd337dd7daace6ae81d0a1b2
| 394
|
py
|
Python
|
docproduct/__init__.py
|
lordofprograms/DocProduct
|
8c365404d2a0b717f506c2d32f0bb9a9a0a13220
|
[
"MIT"
] | null | null | null |
docproduct/__init__.py
|
lordofprograms/DocProduct
|
8c365404d2a0b717f506c2d32f0bb9a9a0a13220
|
[
"MIT"
] | null | null | null |
docproduct/__init__.py
|
lordofprograms/DocProduct
|
8c365404d2a0b717f506c2d32f0bb9a9a0a13220
|
[
"MIT"
] | null | null | null |
from docproduct.train_data_to_embedding import train_data_to_embedding
from docproduct.train_ffn import train_ffn
from docproduct.train_bertffn import train_bertffn
from docproduct import tokenization
from docproduct import loss
from docproduct import metrics
from docproduct import dataset
from docproduct import models
from docproduct import predictor
from docproduct import mqa_load_dataset
| 35.818182
| 70
| 0.890863
| 55
| 394
| 6.163636
| 0.309091
| 0.412979
| 0.412979
| 0.117994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101523
| 394
| 10
| 71
| 39.4
| 0.957627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
395d7d256ed8fa4f0021c1a62b08969f3af10802
| 16
|
py
|
Python
|
coverage-3.7.1/tests/farm/html/src/m3.py
|
I-Valchev/UrPas
|
7babb5208fab6975891fba440e633cc9ad6e8718
|
[
"Apache-2.0"
] | 1
|
2015-03-04T09:00:44.000Z
|
2015-03-04T09:00:44.000Z
|
coverage-3.7.1/tests/farm/html/src/m3.py
|
I-Valchev/UrPas
|
7babb5208fab6975891fba440e633cc9ad6e8718
|
[
"Apache-2.0"
] | null | null | null |
coverage-3.7.1/tests/farm/html/src/m3.py
|
I-Valchev/UrPas
|
7babb5208fab6975891fba440e633cc9ad6e8718
|
[
"Apache-2.0"
] | null | null | null |
m3a = 1
m3b = 2
| 5.333333
| 7
| 0.5
| 4
| 16
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0.375
| 16
| 2
| 8
| 8
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3964d06fec8fe5f6090f4e43ab912005d1027471
| 145
|
py
|
Python
|
snippets/python/django/urls/csrf_view.py
|
c6401/Snippets
|
a88d97005658eeda99f1a2766e3d069a64e142cb
|
[
"MIT"
] | null | null | null |
snippets/python/django/urls/csrf_view.py
|
c6401/Snippets
|
a88d97005658eeda99f1a2766e3d069a64e142cb
|
[
"MIT"
] | null | null | null |
snippets/python/django/urls/csrf_view.py
|
c6401/Snippets
|
a88d97005658eeda99f1a2766e3d069a64e142cb
|
[
"MIT"
] | null | null | null |
from django.middleware.csrf import get_token
from django.http import JsonResponse
url(r'^csrf', lambda r: JsonResponse({'csrf': get_token(r)})),
| 36.25
| 62
| 0.772414
| 22
| 145
| 5
| 0.545455
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089655
| 145
| 3
| 63
| 48.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.062069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3968a9d7cae1292ae8c65255f89a6b5a838b4b44
| 151
|
py
|
Python
|
problems/count_the_monkeys.py
|
stereoabuse/codewars
|
d6437afaef38c3601903891b8b9cb0f84c108c54
|
[
"MIT"
] | null | null | null |
problems/count_the_monkeys.py
|
stereoabuse/codewars
|
d6437afaef38c3601903891b8b9cb0f84c108c54
|
[
"MIT"
] | null | null | null |
problems/count_the_monkeys.py
|
stereoabuse/codewars
|
d6437afaef38c3601903891b8b9cb0f84c108c54
|
[
"MIT"
] | null | null | null |
## Count the Monkeys!
## 8 kyu
## https://www.codewars.com//kata/56f69d9f9400f508fb000ba7
def monkey_count(n):
return list(range(1,n+1))
| 21.571429
| 60
| 0.662252
| 21
| 151
| 4.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153226
| 0.178808
| 151
| 7
| 61
| 21.571429
| 0.645161
| 0.543046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
3983e9a6b792a3c2ee1da1bc7231515f70ac5478
| 4,287
|
py
|
Python
|
tests/core/data_providers/test_ccxt_provider.py
|
investing-algorithms/investing-algorithm-framework
|
d579e142a3857e2e2dfb59b7d6e54202f7df5466
|
[
"Apache-2.0"
] | 1
|
2019-12-23T21:23:45.000Z
|
2019-12-23T21:23:45.000Z
|
tests/core/data_providers/test_ccxt_provider.py
|
investing-algorithms/investing-algorithm-framework
|
d579e142a3857e2e2dfb59b7d6e54202f7df5466
|
[
"Apache-2.0"
] | null | null | null |
tests/core/data_providers/test_ccxt_provider.py
|
investing-algorithms/investing-algorithm-framework
|
d579e142a3857e2e2dfb59b7d6e54202f7df5466
|
[
"Apache-2.0"
] | 1
|
2019-12-23T21:23:50.000Z
|
2019-12-23T21:23:50.000Z
|
from investing_algorithm_framework import TradingTimeUnit, \
TradingDataTypes, Ticker, OrderBook, OperationalException
from tests.resources import TestBase
class Test(TestBase):
def setUp(self):
super(Test, self).setUp()
self.start_algorithm()
def test_provide_ohlcv(self) -> None:
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.OHLCV,
target_symbol="BTC",
trading_symbol="USDT",
trading_time_unit=TradingTimeUnit.ONE_DAY,
limit=5,
market="binance"
)
self.assertEqual(5, len(data["ohlcv"].get_data()))
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.OHLCV,
target_symbol="BTC",
trading_symbol="USDT",
trading_time_unit=TradingTimeUnit.ONE_MINUTE,
limit=7200,
market="binance"
)
self.assert_almost_equal(7200, len(data["ohlcv"].get_data()), 200)
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.OHLCV,
target_symbol="BTC",
trading_symbol="USDT",
trading_time_unit=TradingTimeUnit.ONE_DAY,
limit=100,
market="binance"
)
self.assertEqual(100, len(data["ohlcv"].get_data()))
def test_provide_ohlcvs(self):
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.OHLCV,
target_symbols=["BTC", "DOT"],
trading_symbol="USDT",
trading_time_unit=TradingTimeUnit.ONE_DAY,
limit=5,
market="binance"
)
for ohlcv_data in data["ohlcvs"]:
self.assertEqual(5, len(ohlcv_data.get_data()))
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.OHLCV,
target_symbols=["BTC", "DOT"],
trading_symbol="USDT",
trading_time_unit=TradingTimeUnit.ONE_MINUTE,
limit=7200,
market="binance"
)
for ohlcv_data in data["ohlcvs"]:
self.assert_almost_equal(7200, len(ohlcv_data.get_data()), 200)
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.OHLCV,
target_symbols=["BTC", "DOT"],
trading_symbol="USDT",
trading_time_unit=TradingTimeUnit.ONE_DAY,
limit=100,
market="binance"
)
for ohlcv_data in data["ohlcvs"]:
self.assertEqual(100, len(ohlcv_data.get_data()))
def test_ticker(self):
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.TICKER,
target_symbol="BTC",
trading_symbol="USDT",
market="binance"
)
self.assertTrue(isinstance(data["ticker"], Ticker))
def test_tickers(self):
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.TICKER,
target_symbols=["BTC", "DOT"],
trading_symbol="USDT",
market="binance"
)
for ticker in data["tickers"]:
self.assertTrue(isinstance(ticker, Ticker))
def test_order_book(self):
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.ORDER_BOOK,
target_symbol="BTC",
trading_symbol="USDT",
market="binance"
)
self.assertTrue(isinstance(data["order_book"], OrderBook))
def test_order_books(self):
data = self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.ORDER_BOOK,
target_symbols=["BTC", "DOT"],
trading_symbol="USDT",
market="binance"
)
for order_book in data["order_books"]:
self.assertTrue(isinstance(order_book, OrderBook))
def test_raw(self):
with self.assertRaises(OperationalException):
self.algo_app.algorithm.get_data(
trading_data_type=TradingDataTypes.RAW,
target_symbols=["BTC", "DOT"],
trading_symbol="USDT",
market="binance"
)
| 31.755556
| 75
| 0.595521
| 448
| 4,287
| 5.430804
| 0.142857
| 0.048911
| 0.049733
| 0.090423
| 0.792848
| 0.739416
| 0.718044
| 0.718044
| 0.718044
| 0.686395
| 0
| 0.012646
| 0.299044
| 4,287
| 134
| 76
| 31.992537
| 0.797005
| 0
| 0
| 0.62963
| 0
| 0
| 0.05575
| 0
| 0
| 0
| 0
| 0
| 0.101852
| 1
| 0.074074
| false
| 0
| 0.018519
| 0
| 0.101852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3988272d8844e11c36e17f72ddf97775f1df2323
| 101
|
py
|
Python
|
Session-3/Strings/S3SS3.py
|
saianuragpeddu/python-assignemts
|
a6bb192f2c0ef8ea86531c1a98f1b76150fa474b
|
[
"MIT"
] | null | null | null |
Session-3/Strings/S3SS3.py
|
saianuragpeddu/python-assignemts
|
a6bb192f2c0ef8ea86531c1a98f1b76150fa474b
|
[
"MIT"
] | null | null | null |
Session-3/Strings/S3SS3.py
|
saianuragpeddu/python-assignemts
|
a6bb192f2c0ef8ea86531c1a98f1b76150fa474b
|
[
"MIT"
] | 1
|
2019-07-06T02:37:58.000Z
|
2019-07-06T02:37:58.000Z
|
def removeLetter(word, letter):
return word.replace(letter, "")
print(removeLetter("ban", "a"))
| 20.2
| 35
| 0.683168
| 12
| 101
| 5.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 4
| 36
| 25.25
| 0.784091
| 0
| 0
| 0
| 0
| 0
| 0.039604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
399b1d093da90e426334f51abac70479a8790685
| 12,624
|
py
|
Python
|
opencv_demos/radix_sort.py
|
perfectbullet/albumy
|
6e0fa1bef31f470c19fd6bcf6751d0be6510d864
|
[
"MIT"
] | null | null | null |
opencv_demos/radix_sort.py
|
perfectbullet/albumy
|
6e0fa1bef31f470c19fd6bcf6751d0be6510d864
|
[
"MIT"
] | null | null | null |
opencv_demos/radix_sort.py
|
perfectbullet/albumy
|
6e0fa1bef31f470c19fd6bcf6751d0be6510d864
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#encoding=utf-8
import math
def radix_sort(a, radix):
"""a为整数列表, radix为基数, 这里用一个质数作为底数"""
log_mx = math.log(max(a)+1, radix)
print('a = {}'.format(a))
K = int(math.ceil(log_mx)) # 用K位数可表示任意整数
bucket = None
for i in range(1, K+1): # K次循环
bucket = [[] for j in range(radix)] # 不能用 [[]]*radix,否则相当于开了radix个完全相同的list对象
for val in a:
bucket[val % (radix**i)//(radix**(i-1))].append(val) # 獲得整數第K位數字 (從低到高)
del a[:]
for each in bucket:
a.extend(each) # 桶合并
if i == K:
bucket = [b for b in bucket if b != []]
grup_num = len(bucket)
if grup_num != radix:
bucket = radix_sort(a, grup_num)
return bucket
def radix_sort2(a, radix=7):
"""a为整数列表, radix为基数, 这里用一个质数作为底数"""
log_mx = math.log(max(a)+1, radix) # 求得最大指数
print('a = {}'.format(a))
K = int(math.ceil(log_mx)) # 用K位数可表示任意整数
bucket = None
for i in range(1, K+1): # K次循环
bucket = [[] for j in range(radix)] # 不能用 [[]]*radix,否则相当于开了radix个完全相同的list对象
for val in a:
# 获取整数在低K位的数字, val % (radix**i)//(radix**(i-1))
bucket[val % (radix**i)//(radix**(i-1))].append(val)
del a[:]
for each in bucket:
a.extend(each) # 桶合并
if i == K:
bucket = [b for b in bucket if b != []]
print('bucket = {}'.format(bucket))
return bucket
if __name__ == '__main__':
tt = [[33, 33, 36, 58, 122, 127, 136, 138, 138], [0, 0, 30, 32, 37, 53, 54, 54, 132, 134, 138, 138, 139, 140, 140], [0, 0, 0, 1, 3, 20, 22, 23, 24, 31, 31, 31, 32, 32, 35, 35, 35, 46, 53, 68, 68, 69, 72, 75, 77, 144, 148, 149], [0, 0, 69, 69, 146, 147, 147, 157, 157], [22, 22, 54, 146, 147], [0, 0, 21, 22, 55, 56, 144, 154], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 21, 22, 25, 25, 43, 56, 57, 157, 157], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 17, 21, 22, 27, 43, 54, 56, 141, 144, 150, 156, 156, 157, 157], [32, 32, 60, 66, 69, 147, 151, 153, 154], [46, 46, 48, 49, 59, 60, 60, 61, 63, 63, 64, 64, 64, 65, 65, 65, 66, 66, 67, 67, 67, 67, 67, 67, 67, 67, 68, 69, 69, 69, 78, 78, 141, 141, 143], [53, 70, 71, 71, 71, 71, 72, 72, 72, 107, 109], [0, 0, 53, 64, 65, 65, 65, 67, 67, 67, 68, 68, 68, 68, 69, 69, 69, 69, 72, 73, 130, 157, 157, 157, 157], [52, 53, 54, 54, 54, 55, 55, 55, 55, 56, 56, 56, 56, 57, 58, 58, 59, 61, 62, 144, 144], [39, 39, 42, 43, 43, 143, 144, 144, 150], [36, 38, 39, 40, 40, 40, 40, 40, 42, 42, 42, 43, 43], [16, 18, 19, 147, 147], [47, 47, 48, 48, 48, 48, 50, 51, 51, 51, 142, 143, 143, 143, 144], [38, 47, 47, 48, 48, 48, 48, 48, 48, 48, 48, 48, 49, 49, 49, 50, 50, 50, 50, 50, 52, 60, 60, 61, 66, 76, 78, 78, 81, 97, 110, 128, 139, 140, 143], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, 46, 49, 50, 52, 52, 69, 76, 78, 82, 85, 89, 132, 132, 135, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157], [0, 0, 0, 0, 0, 1, 46, 47, 47, 48, 53, 134, 136, 136, 154, 157, 157, 157, 157, 157, 157], [43, 48, 49, 57, 58, 59, 60, 61, 62], [0, 0, 48, 49, 49, 49, 56, 57, 58, 58, 61, 65, 66, 78, 137, 137, 138], [53, 55, 58, 58, 58, 61, 62, 143, 144, 144, 147], [45, 53, 53, 55, 144, 145, 145, 146], [0, 0, 0, 0, 0, 0, 1, 1, 1, 45, 49, 51, 51, 157, 157, 157, 157], [30, 58, 58, 58, 58, 59, 144, 150], [40, 61, 62, 63, 63, 64, 64, 64, 64, 64, 64, 65, 65], [41, 41, 68, 68, 69, 70, 71, 72], [54, 55, 59, 60, 147]]
tt2 = [[338, 339, 361, 584, 1226, 1270, 1361, 1385, 1386], [0, 0, 305, 321, 375, 539, 541, 547, 1328, 1340, 1382, 1389, 1390, 1404, 1405], [0, 8, 8, 10, 36, 203, 224, 235, 244, 311, 314, 315, 321, 321, 356, 356, 357, 461, 533, 680, 682, 697, 725, 753, 770, 1447, 1484, 1491], [236, 237, 343, 345, 346, 445, 530, 531, 532, 661, 698, 727, 741, 1459], [220, 224, 542, 1463, 1479], [0, 0, 223, 223, 233, 559, 565, 1454, 1546], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 6, 8, 8, 8, 8, 10, 215, 219, 225, 226, 249, 287, 363, 541, 550, 558, 1570, 1570], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 10, 14, 14, 16, 16, 16, 17, 27, 27, 42, 43, 50, 215, 223, 235, 251, 265, 288, 557, 565, 626, 626, 626, 630, 1570, 1570, 1570, 1570, 1570, 1570], [320, 323, 607, 664, 691, 1474, 1519, 1537, 1544], [461, 461, 489, 490, 594, 605, 609, 618, 630, 635, 641, 644, 646, 654, 654, 654, 663, 664, 670, 671, 671, 672, 672, 673, 673, 679, 681, 696, 698, 698, 785, 785, 1413, 1413, 1432], [532, 707, 715, 715, 716, 716, 724, 724, 725, 1072, 1091], [0, 0, 523, 523, 653, 655, 663, 664, 668, 670, 670, 680, 681, 689, 689, 689, 694, 696, 713, 723, 724, 1309, 1570, 1570, 1570, 1570], [522, 539, 540, 548, 548, 550, 551, 556, 557, 565, 566, 567, 568, 575, 580, 583, 595, 616, 621, 1447, 1449], [390, 392, 424, 430, 430, 1438, 1441, 1442, 1502], [365, 385, 399, 400, 400, 401, 402, 404, 425, 425, 428, 435, 436], [168, 182, 198, 1473, 1475], [476, 479, 480, 481, 481, 481, 506, 512, 513, 515, 1429, 1437, 1438, 1439, 1444], [385, 475, 479, 480, 480, 480, 480, 480, 486, 488, 488, 489, 495, 497, 497, 504, 504, 504, 505, 507, 522, 601, 602, 619, 661, 768, 785, 785, 819, 977, 1101, 1282, 1395, 1405, 1434], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 454, 461, 495, 505, 522, 522, 698, 767, 785, 829, 854, 898, 1325, 1327, 1350, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570], [0, 0, 8, 10, 471, 473, 474, 496, 521, 529, 1346, 1347, 1356, 1361, 1362, 1370, 1373, 1374, 1570, 1570], [437, 487, 496, 576, 582, 590, 600, 618, 622], [0, 0, 488, 496, 496, 498, 568, 575, 584, 585, 611, 655, 668, 785, 1371, 1374, 1380], [530, 558, 583, 583, 584, 610, 628, 1433, 1447, 1447, 1474], [452, 532, 532, 558, 1448, 1451, 1456, 1467], [0, 0, 0, 0, 0, 0, 0, 0, 6, 8, 8, 476, 486, 513, 515, 1570, 1570], [306, 583, 583, 584, 584, 592, 1443, 1501], [408, 619, 627, 636, 637, 644, 644, 645, 646, 646, 647, 653, 653], [416, 417, 687, 687, 698, 709, 714, 723], [546, 550, 593, 601, 1474]]
tt3 = [[132, 132, 141, 228, 479, 496, 531, 541, 541], [0, 0, 117, 124, 145, 208, 209, 211, 512, 517, 533, 536, 537, 542, 542], [0, 2, 2, 3, 11, 66, 73, 77, 80, 102, 103, 103, 105, 105, 116, 117, 117, 151, 174, 223, 223, 228, 237, 247, 252, 474, 487, 489], [0, 0, 248, 254, 530, 535, 536, 542, 570, 570], [64, 65, 158, 427, 432], [0, 0, 72, 72, 75, 181, 183, 471, 501], [0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 3, 3, 9, 55, 72, 74, 79, 84, 85, 94, 187, 190, 518, 518], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 14, 76, 76, 76, 76, 81, 95, 101, 104, 109, 196, 203, 218, 218, 221, 549, 549, 553, 553, 553, 553], [130, 131, 246, 269, 280, 598, 616, 624, 627], [221, 222, 235, 236, 286, 291, 293, 297, 303, 305, 308, 309, 311, 314, 314, 314, 319, 319, 322, 322, 322, 323, 323, 323, 323, 326, 327, 335, 335, 336, 377, 377, 680, 680, 689], [197, 263, 266, 266, 266, 266, 269, 269, 269, 398, 405], [0, 0, 305, 306, 381, 383, 387, 388, 390, 391, 391, 397, 398, 402, 402, 402, 405, 406, 416, 422, 423, 764, 917, 917, 917, 917], [250, 258, 259, 263, 263, 264, 264, 267, 267, 271, 271, 272, 272, 276, 278, 280, 285, 295, 298, 694, 695], [137, 138, 149, 151, 151, 506, 507, 507, 529], [124, 130, 135, 135, 136, 136, 136, 137, 144, 144, 145, 147, 148], [66, 72, 78, 585, 586], [191, 192, 193, 193, 193, 193, 203, 206, 206, 207, 574, 577, 578, 578, 580], [145, 178, 180, 180, 180, 180, 180, 180, 182, 183, 183, 184, 186, 186, 186, 189, 189, 189, 189, 190, 196, 226, 226, 232, 248, 288, 295, 295, 308, 367, 414, 481, 524, 528, 539], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 199, 202, 216, 221, 228, 228, 305, 335, 343, 363, 373, 393, 580, 581, 591, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687, 687], [0, 0, 2, 2, 193, 193, 194, 203, 211, 214, 554, 558, 558, 643, 643], [251, 280, 285, 331, 334, 339, 345, 355, 358], [0, 0, 191, 194, 194, 195, 222, 224, 228, 229, 256, 261, 307, 536, 537, 539], [255, 268, 280, 281, 281, 293, 302, 689, 696, 696, 709], [189, 222, 222, 233, 605, 606, 608, 613], [0, 0, 0, 0, 3, 3, 3, 3, 4, 6, 7, 198, 198, 205, 211, 639, 639], [96, 183, 183, 183, 184, 186, 454, 472], [119, 181, 184, 186, 187, 189, 189, 189, 189, 189, 189, 191, 191], [146, 146, 241, 241, 244, 248, 250, 253], [173, 175, 188, 191, 469]]
# bucket = [[1, 3, 20, 22, 23, 24, 31, 31, 31, 32, 32, 35, 35, 35, 46, 53, 68, 68, 69, 72, 75, 77], [144, 148, 149]]
# bucket1 = [[69, 69], [146, 147, 147, 157, 157]]
# bucket2 = [[7, 21, 22, 25, 25, 43, 56, 57], [157, 157]]
# bucket3 = [[1, 17, 21, 22, 27, 43, 54, 56], [141, 144, 150, 156, 156, 157, 157]]
# bucket4 = [[32, 32, 60, 66, 69], [147, 151, 153, 154]]
# bucket5 = [[46, 46, 48, 49, 59, 60, 60, 61, 63, 63, 64, 64, 64, 65, 65, 65, 66, 66, 67, 67, 67, 67, 67, 67, 67, 67, 68, 69, 69, 69, 78, 78], [141, 141, 143]]
# bucket6 = [[53, 64, 65, 65, 65, 67, 67, 67, 68, 68, 68, 68, 69, 69, 69, 69, 72, 73], [130, 157, 157, 157, 157]]
# bucket7 = [[45, 46, 49, 50, 52, 52, 69, 76, 78, 82, 85, 89],
# [132, 132, 135, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157,
# 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157]]
# bucket8 = [[1, 46, 47, 47, 48, 53], [134, 136, 136, 154, 157, 157, 157, 157, 157, 157]]
for t in tt:
# print(b)
# bucket1 = radix_sort(t, len(t))[[338, 339, 361, 584, 1226, 1270, 1361, 1385, 1386], [0, 0, 305, 321, 375, 539, 541, 547, 1328, 1340, 1382, 1389, 1390, 1404, 1405], [0, 8, 8, 10, 36, 203, 224, 235, 244, 311, 314, 315, 321, 321, 356, 356, 357, 461, 533, 680, 682, 697, 725, 753, 770, 1447, 1484, 1491], [236, 237, 343, 345, 346, 445, 530, 531, 532, 661, 698, 727, 741, 1459], [220, 224, 542, 1463, 1479], [0, 0, 223, 223, 233, 559, 565, 1454, 1546], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 6, 8, 8, 8, 8, 10, 215, 219, 225, 226, 249, 287, 363, 541, 550, 558, 1570, 1570], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 10, 14, 14, 16, 16, 16, 17, 27, 27, 42, 43, 50, 215, 223, 235, 251, 265, 288, 557, 565, 626, 626, 626, 630, 1570, 1570, 1570, 1570, 1570, 1570], [320, 323, 607, 664, 691, 1474, 1519, 1537, 1544], [461, 461, 489, 490, 594, 605, 609, 618, 630, 635, 641, 644, 646, 654, 654, 654, 663, 664, 670, 671, 671, 672, 672, 673, 673, 679, 681, 696, 698, 698, 785, 785, 1413, 1413, 1432], [532, 707, 715, 715, 716, 716, 724, 724, 725, 1072, 1091], [0, 0, 523, 523, 653, 655, 663, 664, 668, 670, 670, 680, 681, 689, 689, 689, 694, 696, 713, 723, 724, 1309, 1570, 1570, 1570, 1570], [522, 539, 540, 548, 548, 550, 551, 556, 557, 565, 566, 567, 568, 575, 580, 583, 595, 616, 621, 1447, 1449], [390, 392, 424, 430, 430, 1438, 1441, 1442, 1502], [365, 385, 399, 400, 400, 401, 402, 404, 425, 425, 428, 435, 436], [168, 182, 198, 1473, 1475], [476, 479, 480, 481, 481, 481, 506, 512, 513, 515, 1429, 1437, 1438, 1439, 1444], [385, 475, 479, 480, 480, 480, 480, 480, 486, 488, 488, 489, 495, 497, 497, 504, 504, 504, 505, 507, 522, 601, 602, 619, 661, 768, 785, 785, 819, 977, 1101, 1282, 1395, 1405, 1434], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 454, 461, 495, 505, 522, 522, 698, 767, 785, 829, 854, 898, 1325, 1327, 1350, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570, 1570], [0, 0, 8, 10, 471, 473, 474, 496, 521, 529, 1346, 1347, 1356, 1361, 1362, 1370, 1373, 1374, 1570, 1570], [437, 487, 496, 576, 582, 590, 600, 618, 622], [0, 0, 488, 496, 496, 498, 568, 575, 584, 585, 611, 655, 668, 785, 1371, 1374, 1380], [530, 558, 583, 583, 584, 610, 628, 1433, 1447, 1447, 1474], [452, 532, 532, 558, 1448, 1451, 1456, 1467], [0, 0, 0, 0, 0, 0, 0, 0, 6, 8, 8, 476, 486, 513, 515, 1570, 1570], [306, 583, 583, 584, 584, 592, 1443, 1501], [408, 619, 627, 636, 637, 644, 644, 645, 646, 646, 647, 653, 653], [416, 417, 687, 687, 698, 709, 714, 723], [546, 550, 593, 601, 1474]]
# print('bucket1: {}\n'.format(bucket1))
# print(b)
# t = [5, 0, 0, 219, 224, 224, 190, 53, 200]
# t = [n for n in t if n > 0]
bucket2 = radix_sort2(t)
print('bucket2: {} \n\n\n\n'.format(bucket2))
| 168.32
| 2,653
| 0.539449
| 2,517
| 12,624
| 2.697656
| 0.228844
| 0.073932
| 0.093225
| 0.113697
| 0.702356
| 0.693373
| 0.687629
| 0.678056
| 0.675405
| 0.659794
| 0
| 0.605274
| 0.233999
| 12,624
| 74
| 2,654
| 170.594595
| 0.096898
| 0.318837
| 0
| 0.666667
| 0
| 0
| 0.005959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.02381
| 0
| 0.119048
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
39c142ad7d617a051d8fc6f235834e1dfcf98146
| 8,274
|
py
|
Python
|
aion/proto/status_pb2_grpc.py
|
latonaio/aion-related-python-library
|
837e885e3368e3fc34ae41b07565a845c78970fd
|
[
"MIT"
] | 10
|
2021-09-22T07:15:14.000Z
|
2021-11-04T10:21:13.000Z
|
aion/proto/status_pb2_grpc.py
|
latonaio/aion-related-python-library
|
837e885e3368e3fc34ae41b07565a845c78970fd
|
[
"MIT"
] | null | null | null |
aion/proto/status_pb2_grpc.py
|
latonaio/aion-related-python-library
|
837e885e3368e3fc34ae41b07565a845c78970fd
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from aion.proto import status_pb2 as proto_dot_kanbanpb_dot_status__pb2
class KanbanStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ReceiveKanban = channel.unary_stream(
'/kanbanpb.Kanban/ReceiveKanban',
request_serializer=proto_dot_kanbanpb_dot_status__pb2.InitializeService.SerializeToString,
response_deserializer=proto_dot_kanbanpb_dot_status__pb2.StatusKanban.FromString,
)
self.SendKanban = channel.unary_unary(
'/kanbanpb.Kanban/SendKanban',
request_serializer=proto_dot_kanbanpb_dot_status__pb2.Request.SerializeToString,
response_deserializer=proto_dot_kanbanpb_dot_status__pb2.Response.FromString,
)
class KanbanServicer(object):
"""Missing associated documentation comment in .proto file."""
def ReceiveKanban(self, request, context):
"""最新のCカンバンを取得する
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendKanban(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KanbanServicer_to_server(servicer, server):
rpc_method_handlers = {
'ReceiveKanban': grpc.unary_stream_rpc_method_handler(
servicer.ReceiveKanban,
request_deserializer=proto_dot_kanbanpb_dot_status__pb2.InitializeService.FromString,
response_serializer=proto_dot_kanbanpb_dot_status__pb2.StatusKanban.SerializeToString,
),
'SendKanban': grpc.unary_unary_rpc_method_handler(
servicer.SendKanban,
request_deserializer=proto_dot_kanbanpb_dot_status__pb2.Request.FromString,
response_serializer=proto_dot_kanbanpb_dot_status__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'kanbanpb.Kanban', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Kanban(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def ReceiveKanban(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/kanbanpb.Kanban/ReceiveKanban',
proto_dot_kanbanpb_dot_status__pb2.InitializeService.SerializeToString,
proto_dot_kanbanpb_dot_status__pb2.StatusKanban.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SendKanban(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/kanbanpb.Kanban/SendKanban',
proto_dot_kanbanpb_dot_status__pb2.Request.SerializeToString,
proto_dot_kanbanpb_dot_status__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class SendAnythingStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ServiceBrokerConn = channel.stream_stream(
'/kanbanpb.SendAnything/ServiceBrokerConn',
request_serializer=proto_dot_kanbanpb_dot_status__pb2.SendKanban.SerializeToString,
response_deserializer=proto_dot_kanbanpb_dot_status__pb2.SendKanban.FromString,
)
self.SendToOtherDevices = channel.stream_unary(
'/kanbanpb.SendAnything/SendToOtherDevices',
request_serializer=proto_dot_kanbanpb_dot_status__pb2.SendContext.SerializeToString,
response_deserializer=proto_dot_kanbanpb_dot_status__pb2.UploadStatus.FromString,
)
class SendAnythingServicer(object):
"""Missing associated documentation comment in .proto file."""
def ServiceBrokerConn(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendToOtherDevices(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SendAnythingServicer_to_server(servicer, server):
rpc_method_handlers = {
'ServiceBrokerConn': grpc.stream_stream_rpc_method_handler(
servicer.ServiceBrokerConn,
request_deserializer=proto_dot_kanbanpb_dot_status__pb2.SendKanban.FromString,
response_serializer=proto_dot_kanbanpb_dot_status__pb2.SendKanban.SerializeToString,
),
'SendToOtherDevices': grpc.stream_unary_rpc_method_handler(
servicer.SendToOtherDevices,
request_deserializer=proto_dot_kanbanpb_dot_status__pb2.SendContext.FromString,
response_serializer=proto_dot_kanbanpb_dot_status__pb2.UploadStatus.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'kanbanpb.SendAnything', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class SendAnything(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def ServiceBrokerConn(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/kanbanpb.SendAnything/ServiceBrokerConn',
proto_dot_kanbanpb_dot_status__pb2.SendKanban.SerializeToString,
proto_dot_kanbanpb_dot_status__pb2.SendKanban.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SendToOtherDevices(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/kanbanpb.SendAnything/SendToOtherDevices',
proto_dot_kanbanpb_dot_status__pb2.SendContext.SerializeToString,
proto_dot_kanbanpb_dot_status__pb2.UploadStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 42.430769
| 116
| 0.682137
| 781
| 8,274
| 6.87452
| 0.135723
| 0.043584
| 0.074502
| 0.088471
| 0.807785
| 0.770535
| 0.765319
| 0.738313
| 0.595642
| 0.430806
| 0
| 0.004161
| 0.244863
| 8,274
| 194
| 117
| 42.649485
| 0.855154
| 0.103698
| 0
| 0.507042
| 1
| 0
| 0.075911
| 0.040696
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084507
| false
| 0
| 0.014085
| 0.028169
| 0.169014
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
39c3058f2ea320f74efffc5032327994f0ac314e
| 1,048
|
py
|
Python
|
api/client/src/test/test_build_image_response_content.py
|
enrico-usai/cfncluster
|
acf083776c301d4f2a03ce5cd6fc79f9b88c74e0
|
[
"Apache-2.0"
] | 415
|
2018-11-13T15:02:15.000Z
|
2022-03-31T15:26:06.000Z
|
api/client/src/test/test_build_image_response_content.py
|
enrico-usai/cfncluster
|
acf083776c301d4f2a03ce5cd6fc79f9b88c74e0
|
[
"Apache-2.0"
] | 2,522
|
2018-11-13T16:16:27.000Z
|
2022-03-31T13:57:10.000Z
|
api/client/src/test/test_build_image_response_content.py
|
yuleiwan/aws-parallelcluster
|
aad2a3019ef4ad08d702f5acf41b152b3f7a0b46
|
[
"Apache-2.0"
] | 164
|
2018-11-14T22:47:46.000Z
|
2022-03-22T11:33:22.000Z
|
"""
ParallelCluster
ParallelCluster API # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import pcluster.client
from pcluster.client.model.config_validation_message import ConfigValidationMessage
from pcluster.client.model.image_info_summary import ImageInfoSummary
globals()['ConfigValidationMessage'] = ConfigValidationMessage
globals()['ImageInfoSummary'] = ImageInfoSummary
from pcluster.client.model.build_image_response_content import BuildImageResponseContent
class TestBuildImageResponseContent(unittest.TestCase):
"""BuildImageResponseContent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBuildImageResponseContent(self):
"""Test BuildImageResponseContent"""
# FIXME: construct object with mandatory attributes with example values
# model = BuildImageResponseContent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.2
| 88
| 0.750954
| 101
| 1,048
| 7.643564
| 0.574257
| 0.072539
| 0.069948
| 0.089378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010357
| 0.170802
| 1,048
| 39
| 89
| 26.871795
| 0.878021
| 0.316794
| 0
| 0.176471
| 1
| 0
| 0.069118
| 0.033824
| 0
| 0
| 0
| 0.025641
| 0
| 1
| 0.176471
| false
| 0.176471
| 0.352941
| 0
| 0.588235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
39f239d7911f9a6c407df9b0608cdc0a0d741343
| 217
|
py
|
Python
|
src/hio/core/http/__init__.py
|
pfeairheller/hio
|
44669adb62c81357491f9f6157312bc1313b56cf
|
[
"Apache-2.0"
] | 1
|
2021-04-07T19:10:28.000Z
|
2021-04-07T19:10:28.000Z
|
src/hio/core/http/__init__.py
|
pfeairheller/hio
|
44669adb62c81357491f9f6157312bc1313b56cf
|
[
"Apache-2.0"
] | 4
|
2021-03-30T20:50:19.000Z
|
2022-01-06T17:16:18.000Z
|
src/hio/core/http/__init__.py
|
pfeairheller/hio
|
44669adb62c81357491f9f6157312bc1313b56cf
|
[
"Apache-2.0"
] | 3
|
2021-04-08T19:35:36.000Z
|
2021-06-03T13:39:05.000Z
|
# -*- encoding: utf-8 -*-
"""
hio.core.http Package
"""
from .httping import HTTPError
from .clienting import Client, openClient, ClientDoer
from .serving import BareServer, Server, WsgiServer, openServer, ServerDoer
| 27.125
| 75
| 0.75576
| 25
| 217
| 6.56
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005263
| 0.124424
| 217
| 7
| 76
| 31
| 0.857895
| 0.211982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
840075eb24a49ca4de083d64f242b8fae612155c
| 179
|
py
|
Python
|
autorch/__init__.py
|
skywalker0803r/autorch
|
b71adb2c010556d4e7895304e46a1545a347ffa6
|
[
"MIT"
] | null | null | null |
autorch/__init__.py
|
skywalker0803r/autorch
|
b71adb2c010556d4e7895304e46a1545a347ffa6
|
[
"MIT"
] | null | null | null |
autorch/__init__.py
|
skywalker0803r/autorch
|
b71adb2c010556d4e7895304e46a1545a347ffa6
|
[
"MIT"
] | null | null | null |
import os
try:
import robust_loss_pytorch
except:
os.system("pip install git+https://github.com/jonbarron/robust_loss_pytorch")
from . import utils,function,transferlearning
| 22.375
| 79
| 0.804469
| 25
| 179
| 5.6
| 0.76
| 0.142857
| 0.242857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100559
| 179
| 7
| 80
| 25.571429
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0.359551
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
841d285ec49fcdd77209b6c1f26d54645a43bb0c
| 164
|
py
|
Python
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/unittest_support.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/unittest_support.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/unittest_support.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
"""
This file fixes portability issues for unittest
"""
import sys
import warnings
from . import config
from numba.config import PYVERSION
from unittest import *
| 14.909091
| 47
| 0.780488
| 22
| 164
| 5.818182
| 0.636364
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164634
| 164
| 10
| 48
| 16.4
| 0.934307
| 0.286585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8427f5220633de90baeea5a657556cb13a79f79c
| 271
|
py
|
Python
|
kara/common/__init__.py
|
genvia/kara
|
0b3f8975fc816e64100cd845c36998ac77eace6c
|
[
"BSD-2-Clause"
] | null | null | null |
kara/common/__init__.py
|
genvia/kara
|
0b3f8975fc816e64100cd845c36998ac77eace6c
|
[
"BSD-2-Clause"
] | 1
|
2021-03-31T19:03:57.000Z
|
2021-03-31T19:03:57.000Z
|
kara/common/__init__.py
|
genvia/kara
|
0b3f8975fc816e64100cd845c36998ac77eace6c
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# only for setup.py's findpackages
from .karaerror import KaraBaseError
from .karaerror import KaraExecutorError
from .karaerror import KaraDatabaseError
from .karaerror import KaraValidatorError
# vim: set ft=python ai nu et ts=4 sw=4 tw=120:
| 24.636364
| 47
| 0.771218
| 38
| 271
| 5.5
| 0.736842
| 0.248804
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.143911
| 271
| 10
| 48
| 27.1
| 0.875
| 0.369004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
843756e9db4aaa007a306fc8fdfd8d11cf079e1f
| 326
|
py
|
Python
|
tests/elementals/atoms/test_radio.py
|
IAmTheBlurr/Automancy
|
0c52916cd01dda6bd34ef8d048c37e478dfabbb5
|
[
"MIT"
] | null | null | null |
tests/elementals/atoms/test_radio.py
|
IAmTheBlurr/Automancy
|
0c52916cd01dda6bd34ef8d048c37e478dfabbb5
|
[
"MIT"
] | null | null | null |
tests/elementals/atoms/test_radio.py
|
IAmTheBlurr/Automancy
|
0c52916cd01dda6bd34ef8d048c37e478dfabbb5
|
[
"MIT"
] | null | null | null |
from automancy import Radio
class TestRadio(object):
def test_radio_object_can_be_instantiated(self):
test_object = Radio('//div', 'Test Object', 'test_object')
assert test_object.locator == '//div'
assert test_object.name == 'Test Object'
assert test_object.system_name == 'test_object'
| 32.6
| 66
| 0.690184
| 41
| 326
| 5.195122
| 0.439024
| 0.375587
| 0.225352
| 0.187793
| 0.244131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.199387
| 326
| 9
| 67
| 36.222222
| 0.816092
| 0
| 0
| 0
| 0
| 0
| 0.165644
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
84588f1c7f30cc0297fe7d1468ab41df3f762917
| 151
|
py
|
Python
|
lightcycle-frontend/tournament/base_bot.py
|
Onapsis/pytron
|
2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58
|
[
"MIT"
] | 1
|
2015-11-04T12:04:42.000Z
|
2015-11-04T12:04:42.000Z
|
lightcycle-frontend/tournament/base_bot.py
|
Onapsis/pytron
|
2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58
|
[
"MIT"
] | null | null | null |
lightcycle-frontend/tournament/base_bot.py
|
Onapsis/pytron
|
2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58
|
[
"MIT"
] | null | null | null |
class MyLightCycleBot(LightCycleBaseBot):
def get_next_step(self, arena, x, y):
# Should return north, south, east or west
return
| 25.166667
| 50
| 0.675497
| 19
| 151
| 5.263158
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245033
| 151
| 5
| 51
| 30.2
| 0.877193
| 0.264901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f2291a7abd65e8de8a8213c02057f363f1b82bcd
| 73
|
py
|
Python
|
waitlist/utility/outgate/__init__.py
|
kimnnmadsen/eve-inc-waitlist
|
c3e4853c5563a95edbf105c11e73d481595fb3ab
|
[
"MIT"
] | null | null | null |
waitlist/utility/outgate/__init__.py
|
kimnnmadsen/eve-inc-waitlist
|
c3e4853c5563a95edbf105c11e73d481595fb3ab
|
[
"MIT"
] | 1
|
2020-02-18T05:11:20.000Z
|
2020-02-18T05:29:10.000Z
|
waitlist/utility/outgate/__init__.py
|
kimnnmadsen/eve-inc-waitlist
|
c3e4853c5563a95edbf105c11e73d481595fb3ab
|
[
"MIT"
] | null | null | null |
from . import character
from . import alliance
from . import corporation
| 18.25
| 25
| 0.794521
| 9
| 73
| 6.444444
| 0.555556
| 0.517241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 3
| 26
| 24.333333
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f27702f396ae69455b31f727219b43de27912c88
| 50
|
py
|
Python
|
behavioral/strategy/logic/__init__.py
|
Kozak24/Patterns
|
351d5c11f7c64ce5d58db37b6715fc8f7d31945a
|
[
"MIT"
] | null | null | null |
behavioral/strategy/logic/__init__.py
|
Kozak24/Patterns
|
351d5c11f7c64ce5d58db37b6715fc8f7d31945a
|
[
"MIT"
] | null | null | null |
behavioral/strategy/logic/__init__.py
|
Kozak24/Patterns
|
351d5c11f7c64ce5d58db37b6715fc8f7d31945a
|
[
"MIT"
] | null | null | null |
from .person_representer import PersonRepresenter
| 25
| 49
| 0.9
| 5
| 50
| 8.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 50
| 1
| 50
| 50
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f2a33ae4033bc4de28f09169a7bab269459e9b56
| 213
|
py
|
Python
|
acestream/ACEStream/Core/CacheDB/cachedb.py
|
GrandPaRPi/p2ptv-pi
|
6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3
|
[
"MIT"
] | null | null | null |
acestream/ACEStream/Core/CacheDB/cachedb.py
|
GrandPaRPi/p2ptv-pi
|
6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3
|
[
"MIT"
] | null | null | null |
acestream/ACEStream/Core/CacheDB/cachedb.py
|
GrandPaRPi/p2ptv-pi
|
6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3
|
[
"MIT"
] | 2
|
2018-04-17T17:34:39.000Z
|
2020-07-26T03:43:33.000Z
|
#Embedded file name: ACEStream\Core\CacheDB\cachedb.pyo
from sqlitecachedb import *
from SqliteSeedingStatsCacheDB import *
from SqliteFriendshipStatsCacheDB import *
from SqliteVideoPlaybackStatsCacheDB import *
| 35.5
| 55
| 0.859155
| 20
| 213
| 9.15
| 0.65
| 0.163934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093897
| 213
| 5
| 56
| 42.6
| 0.948187
| 0.253521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f2b1e65a4ee4df342e9bb9d7f6f20c9dd1954a94
| 158
|
py
|
Python
|
utils/__init__.py
|
bweck/cssbot
|
2f3b5a6e0600f92ae0803ad3df44948dd5408444
|
[
"MIT"
] | 1
|
2017-04-04T06:06:16.000Z
|
2017-04-04T06:06:16.000Z
|
utils/__init__.py
|
bweck/cssbot
|
2f3b5a6e0600f92ae0803ad3df44948dd5408444
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
bweck/cssbot
|
2f3b5a6e0600f92ae0803ad3df44948dd5408444
|
[
"MIT"
] | null | null | null |
from sysy import usage, argv
from dirs import switch_cwd_to_script_loc
from formatting import format_json
from filelock import FileLock, FileLockException
| 19.75
| 48
| 0.85443
| 23
| 158
| 5.652174
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132911
| 158
| 7
| 49
| 22.571429
| 0.948905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4b3bb113ab847ce9883dee90dae151f653df297c
| 231
|
py
|
Python
|
otscrape/core/loader/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
otscrape/core/loader/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
otscrape/core/loader/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
from .dummy import DummyLoader
from .request import SimpleRequestLoader
from .file import LineLoader, JSONFileLoader, CSVFileLoader
__all__ = ['DummyLoader', 'SimpleRequestLoader', 'LineLoader', 'JSONFileLoader', 'CSVFileLoader']
| 38.5
| 97
| 0.809524
| 20
| 231
| 9.15
| 0.55
| 0.262295
| 0.404372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 231
| 5
| 98
| 46.2
| 0.875598
| 0
| 0
| 0
| 0
| 0
| 0.290043
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4b4189891cfa98e3db7fa9a1557e24b1796d6327
| 135
|
py
|
Python
|
recommends/tests/admin.py
|
coagulant/django-recommends
|
412b741c3a0aa5204b70f869cc893ef9fbccbe51
|
[
"MIT"
] | 142
|
2015-02-09T20:32:34.000Z
|
2021-12-17T09:13:57.000Z
|
recommends/tests/admin.py
|
coagulant/django-recommends
|
412b741c3a0aa5204b70f869cc893ef9fbccbe51
|
[
"MIT"
] | 20
|
2015-03-12T15:43:51.000Z
|
2021-02-16T19:38:56.000Z
|
recommends/tests/admin.py
|
coagulant/django-recommends
|
412b741c3a0aa5204b70f869cc893ef9fbccbe51
|
[
"MIT"
] | 40
|
2015-05-11T19:35:31.000Z
|
2021-12-02T17:03:06.000Z
|
from django.contrib import admin
from .models import RecProduct, RecVote
admin.site.register(RecProduct)
admin.site.register(RecVote)
| 22.5
| 39
| 0.82963
| 18
| 135
| 6.222222
| 0.555556
| 0.160714
| 0.303571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 135
| 5
| 40
| 27
| 0.910569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4b6b9c0beaf0de98d4aca0342cd40f5f91611086
| 345
|
py
|
Python
|
corvid/table_filter/table_filter.py
|
cmkumar87/corvid
|
164113c5e0f0f9c7463f43213f38aeeeb6d5b05e
|
[
"Apache-2.0"
] | 2
|
2019-03-05T20:55:24.000Z
|
2019-03-05T22:25:27.000Z
|
corvid/table_filter/table_filter.py
|
cmkumar87/corvid
|
164113c5e0f0f9c7463f43213f38aeeeb6d5b05e
|
[
"Apache-2.0"
] | null | null | null |
corvid/table_filter/table_filter.py
|
cmkumar87/corvid
|
164113c5e0f0f9c7463f43213f38aeeeb6d5b05e
|
[
"Apache-2.0"
] | null | null | null |
"""
"""
from typing import List
from corvid.table.table import Table
# TODO
def predict_table_relevance(table: Table) -> float:
return float('inf')
# TODO
def filter_tables(tables: List[Table], min_relevance: float) -> List[Table]:
return [table for table in tables
if predict_table_relevance(table) > min_relevance]
| 17.25
| 76
| 0.701449
| 46
| 345
| 5.108696
| 0.413043
| 0.085106
| 0.178723
| 0.221277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191304
| 345
| 19
| 77
| 18.157895
| 0.842294
| 0.026087
| 0
| 0
| 0
| 0
| 0.009259
| 0
| 0
| 0
| 0
| 0.052632
| 0
| 1
| 0.285714
| false
| 0
| 0.285714
| 0.285714
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
4ba1a3fb25cfef3179fb2f1fb68ed2229517ab99
| 96
|
py
|
Python
|
round_robin/admin.py
|
popara/jonny-api
|
29d90c2c7bb6ac70e91bbaa7aad026d5d9229c55
|
[
"MIT"
] | null | null | null |
round_robin/admin.py
|
popara/jonny-api
|
29d90c2c7bb6ac70e91bbaa7aad026d5d9229c55
|
[
"MIT"
] | null | null | null |
round_robin/admin.py
|
popara/jonny-api
|
29d90c2c7bb6ac70e91bbaa7aad026d5d9229c55
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from models import RoundRobin
admin.site.register(RoundRobin)
| 19.2
| 32
| 0.84375
| 13
| 96
| 6.230769
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 96
| 4
| 33
| 24
| 0.94186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4bbb95ac65086ed273ac44303abbff13239286ff
| 12,655
|
py
|
Python
|
me_group/supervisor_api/works.py
|
Eng-Mohanad-Alkrunz/me_group
|
8311f034a79c1b75c03a4b6be0b1a726b07a9d95
|
[
"MIT"
] | null | null | null |
me_group/supervisor_api/works.py
|
Eng-Mohanad-Alkrunz/me_group
|
8311f034a79c1b75c03a4b6be0b1a726b07a9d95
|
[
"MIT"
] | null | null | null |
me_group/supervisor_api/works.py
|
Eng-Mohanad-Alkrunz/me_group
|
8311f034a79c1b75c03a4b6be0b1a726b07a9d95
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import time
import frappe
import frappe.client
import frappe.handler
import jwt
from frappe import _
import base64
from passlib.context import CryptContext
from mimetypes import guess_type
from frappe.utils import add_days, cint
@frappe.whitelist(allow_guest=True)
def create_work(**kwards):
lang = "ar"
if frappe.get_request_header("Language"):
lang = frappe.get_request_header("Language")
frappe.local.lang = lang
data = kwards
check = check_token()
user1 = None
work_name = None
contract = None
date = None
if check and "user" in check:
user1 = check['user']
if not user1:
frappe.local.response['http_status_code'] = 403
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
if "work_name" in data:
work_name = data['work_name']
else:
frappe.local.response['status'] = {"message": _("Work name required"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
if "contract" in data:
contract = data['contract']
else:
frappe.local.response['status'] = {"message": _("contract id required"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
if "date" in data:
date = data['date']
new_work = frappe.new_doc("Work Application")
new_work.set("work_name",work_name)
new_work.set("supervisor", user1.name)
if date is not None:
new_work.set("date", date)
new_work.set("contract", contract)
new_work.set("images", updatefile(data))
new_work.save(ignore_permissions=True)
frappe.db.commit()
frappe.local.response['status'] = {"message": _("Work created successfully"), "success": True, "code": 200}
frappe.local.response['data'] = None
@frappe.whitelist(allow_guest=True)
def create_work_management(**kwards):
lang = "ar"
if frappe.get_request_header("Language"):
lang = frappe.get_request_header("Language")
frappe.local.lang = lang
data = kwards
check = check_token()
user1 = None
contract = None
date = None
work_name = None
if check and "user" in check:
user1 = check['user']
if not user1:
frappe.local.response['http_status_code'] = 403
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
work_name = data['work_name']
if "contract" in data:
contract = data['contract']
else:
frappe.local.response['status'] = {"message": _("contract id required"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
if "date" in data:
date = data['date']
email = data['email']
new_work = frappe.new_doc("Work Management Application")
work = frappe.get_all("Work Application",filters={"work_name":work_name})
if len(work) == 1 :
new_work.set("work", work[0].name)
new_work.set("supervisor", user1.name)
if date is not None:
new_work.set("date", date)
new_work.set("contract", contract)
new_work.set("email", email)
new_work.save(ignore_permissions=True)
frappe.db.commit()
frappe.local.response['status'] = {"message": _("Work created successfully"), "success": True, "code": 200}
frappe.local.response['data'] = None
@frappe.whitelist(allow_guest=True)
def get_management_works(**kwards):
lang = "ar"
if frappe.get_request_header("Language"):
lang = frappe.get_request_header("Language")
frappe.local.lang = lang
data = kwards
check = check_token()
user1 = None
contract = None
if check and "user" in check:
user1 = check['user']
if 'contract' in data:
contract = data['contract']
if not user1:
frappe.local.response['http_status_code'] = 403
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
result = []
works =None
if contract is not None and contract != "":
works = frappe.get_all("Work Management Application", fields=["*"], filters={"supervisor": user1.name,"contract":['like',"%"+contract+"%"]})
else:
works = frappe.get_all("Work Management Application", fields=["*"], filters={"supervisor": user1.name})
# works = frappe.get_all("Work Management Application",fields =["*"],filters= {"supervisor":user1.name})
for work in works:
work_doc = frappe.get_doc("Work Management Application",work.name)
result.append({
"id":work.name,
"contract":work.contract,
"date":work.date,
"customer" :work.customer,
"work": work.work,
"email": work.email,
})
frappe.local.response['status'] = {"message": _("Works list "), "success": True, "code": 200}
frappe.local.response['data'] = result
@frappe.whitelist(allow_guest=True)
def get_works(**kwards):
lang = "ar"
if frappe.get_request_header("Language"):
lang = frappe.get_request_header("Language")
frappe.local.lang = lang
data = kwards
check = check_token()
user1 = None
contract = None
if check and "user" in check:
user1 = check['user']
if not user1:
frappe.local.response['http_status_code'] = 403
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
if "contract" in data:
contract = data['contract']
else:
frappe.local.response['status'] = {"message": _("contract id required"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
result = []
works = frappe.get_all("Work Application",fields =["*"],filters= {"contract":contract})
for work in works:
work_doc = frappe.get_doc("Work Application",work.name)
images = []
if work_doc.images is not None:
for image in work_doc.images:
images.append({
"image" : image.images
})
result.append({
"id":work.name,
"contract":work.contract,
"date":work.date,
"status" :_(work.status),
"images" : images
})
frappe.local.response['status'] = {"message": _("Works list "), "success": True, "code": 200}
frappe.local.response['data'] = result
@frappe.whitelist(allow_guest=True)
def get_done_works(**kwards):
lang = "ar"
if frappe.get_request_header("Language"):
lang = frappe.get_request_header("Language")
frappe.local.lang = lang
data = kwards
check = check_token()
user1 = None
contract = None
if check and "user" in check:
user1 = check['user']
if not user1:
frappe.local.response['http_status_code'] = 403
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
if "contract" in data:
contract = data['contract']
else:
frappe.local.response['status'] = {"message": _("contract id required"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
contract_doc = frappe.get_doc("Contract Application",contract)
if contract_doc.contract_status == "end of the contract":
result = []
works = frappe.get_all("Work Application",fields =["*"],filters= {"contract":contract})
for work in works :
images = []
for image in work.images:
images.append({
"image" : image.images
})
result.append({
"id":work.name,
"contract":work.contract,
"date":work.date,
"status" :_(work.status),
"images" : images
})
frappe.local.response['status'] = {"message": _("Work created successfully"), "success": True, "code": 200}
frappe.local.response['data'] = result
else :
frappe.local.response['status'] = {"message": _("Contract Not completed yet "), "success": True, "code": 200}
frappe.local.response['data'] = None
@frappe.whitelist(allow_guest=True)
def check_token():
request = frappe.request
secret_key = "Me System"
frappe.local.lang = "ar"
log = frappe.get_doc({"doctype": "Api Log"})
lang = "ar"
if frappe.get_request_header("Language"):
lang = frappe.get_request_header("Language")
frappe.local.lang = lang
if frappe.get_request_header("Authorization"):
authorization_header = frappe.get_request_header("Authorization").split(" ")
if authorization_header[0] != "Bearer" and len(authorization_header) != 2:
log.response = _("Not Authorized")
log.flags.ignore_permissions = True
log.insert()
frappe.db.commit()
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
token = frappe.get_request_header("Authorization").replace('Bearer ', '')
log.token = token
user_devices = frappe.get_all("User Device", ['name'],
filters={"access_token": token, "docstatus": ['<', 2]})
if not user_devices:
frappe.local.response['http_status_code'] = 403
log.response = _("Not Authorized")
log.flags.ignore_permissions = True
log.insert()
frappe.db.commit();
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
try:
token = jwt.decode(token, secret_key, algorithms="HS256")
except Exception:
frappe.local.response['http_status_code'] = 401
log.response = _("Not Authorized")
log.flags.ignore_permissions = True
log.insert()
frappe.db.commit();
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
user_device = frappe.get_doc("User Device", user_devices[0].name)
if not user_device.user:
frappe.local.response['http_status_code'] = 403
log.response = _("Not Authorized")
log.flags.ignore_permissions = True
log.insert()
frappe.db.commit()
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
supervisor = frappe.get_doc("Supervisor", user_device.user)
log.response = "success login"
log.flags.ignore_permissions = True
log.insert()
frappe.db.commit()
return {"user": supervisor}
else:
frappe.local.response['http_status_code'] = 403
log.response = _("Not Authorized")
log.flags.ignore_permissions = True
log.insert()
frappe.db.commit()
frappe.local.response['status'] = {"message": _("Not Authorized"), "success": False, "code": 403}
frappe.local.response['data'] = None
return
@frappe.whitelist(allow_guest=True)
def updatefile(data):
gallery =[]
user = frappe.get_doc("User", frappe.session.user)
i = 0
for i in range(len(frappe.request.files)):
file = frappe.request.files['image['+str(i)+']']
is_private = 0
fieldname = ""
folder = 'Home'
filename = ""
content = None
content = file.stream.read()
filename = file.filename
content_type = guess_type(filename)[0]
frappe.local.uploaded_file = content
frappe.local.uploaded_filename = filename
ret = frappe.new_doc("File")
ret.folder = folder
ret.file_name = filename
ret.content = content
ret.is_private = cint(is_private)
ret.save(ignore_permissions=True)
frappe.db.commit()
gallery.append({
'images':ret.file_url,
})
return gallery
| 33.215223
| 148
| 0.597866
| 1,429
| 12,655
| 5.16585
| 0.10077
| 0.089407
| 0.131265
| 0.071119
| 0.77391
| 0.739772
| 0.719317
| 0.703062
| 0.696017
| 0.695205
| 0
| 0.013259
| 0.261004
| 12,655
| 381
| 149
| 33.215223
| 0.776091
| 0.00806
| 0
| 0.716981
| 0
| 0
| 0.165233
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022013
| false
| 0.003145
| 0.034591
| 0
| 0.110063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
299e5e261ce152883e331debcfb7f10cfc114866
| 13,480
|
py
|
Python
|
tests/test_cli.py
|
ads-ad-itcenter/qunomon-lite
|
9171026426ce69becbb55029e7e0bc24a8c7de28
|
[
"Apache-2.0"
] | 3
|
2021-12-14T02:00:53.000Z
|
2022-03-28T01:47:11.000Z
|
tests/test_cli.py
|
ads-ad-itcenter/qunomon-lite
|
9171026426ce69becbb55029e7e0bc24a8c7de28
|
[
"Apache-2.0"
] | 3
|
2021-12-07T11:50:00.000Z
|
2022-03-29T04:37:42.000Z
|
tests/test_cli.py
|
ads-ad-itcenter/qunomon-lite
|
9171026426ce69becbb55029e7e0bc24a8c7de28
|
[
"Apache-2.0"
] | null | null | null |
import pathlib
import re
import sys
import textwrap
from typing import List
import docker
import pytest
from pytest_mock import MockerFixture
from rich.console import Console
from qunomon_lite import ait, ait_core, cli
def _dedent(heredoc: str) -> str:
return textwrap.dedent(heredoc)[1:-1]
class TestMainCommand:
MESSAGE_FOR_USAGE = _dedent(
"""
usage: qunomon-lite [-h] {run,result-show} ...
positional arguments:
{run,result-show}
run see `run -h`
result-show see `run -h`
optional arguments:
-h, --help show this help message and exit
"""
)
if sys.version_info >= (3, 10):
MESSAGE_FOR_USAGE = _dedent(
"""
usage: qunomon-lite [-h] {run,result-show} ...
positional arguments:
{run,result-show}
run see `run -h`
result-show see `run -h`
options:
-h, --help show this help message and exit
"""
)
def test_main(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite"],
)
cli.main()
cap = capsys.readouterr()
assert cap.out == self.MESSAGE_FOR_USAGE
assert cap.err == ""
@pytest.mark.parametrize(
"args",
[
(["-h"]),
(["--help"]),
],
)
def test_main_help(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
args: List[str],
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", *args],
)
with pytest.raises(SystemExit) as e:
cli.main()
assert e.value.code == 0
cap = capsys.readouterr()
assert cap.out == self.MESSAGE_FOR_USAGE
assert cap.err == ""
@pytest.mark.parametrize(
"args,err_msg",
[
(["dummy"], "invalid choice: 'dummy' (choose from 'run', 'result-show')"),
(["--dummy"], "unrecognized arguments: --dummy"),
(["-d"], "unrecognized arguments: -d"),
],
)
def test_main_invalid_args(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
args: List[str],
err_msg: str,
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", *args],
)
with pytest.raises(SystemExit) as e:
cli.main()
assert e.value.code == 2
cap = capsys.readouterr()
assert cap.out == ""
assert "usage: qunomon-lite [-h] {run,result-show} ..." in cap.err
assert err_msg in cap.err
class TestSubCommandForRun:
MESSAGE_FOR_USAGE = _dedent(
"""
usage: qunomon-lite run [-h] [--inventories [INVENTORIES [INVENTORIES ...]]]
[--params [PARAMS [PARAMS ...]]]
ait
positional arguments:
ait
optional arguments:
-h, --help show this help message and exit
--inventories [INVENTORIES [INVENTORIES ...]]
--params [PARAMS [PARAMS ...]]
"""
)
if sys.version_info >= (3, 10):
MESSAGE_FOR_USAGE = _dedent(
"""
usage: qunomon-lite run [-h] [--inventories [INVENTORIES ...]]
[--params [PARAMS ...]]
ait
positional arguments:
ait
options:
-h, --help show this help message and exit
--inventories [INVENTORIES ...]
--params [PARAMS ...]
"""
)
@pytest.mark.parametrize(
"args",
[
(["run", "-h"]),
(["run", "--help"]),
],
)
def test_main_help(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
args: List[str],
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", *args],
)
with pytest.raises(SystemExit) as e:
cli.main()
assert e.value.code == 0
cap = capsys.readouterr()
assert cap.out == self.MESSAGE_FOR_USAGE
assert cap.err == ""
@pytest.mark.parametrize(
"args,err_msg",
[
(
["run", "--dummy"],
"the following arguments are required: ait",
),
(
["run", "-d"],
"the following arguments are required: ait",
),
],
)
def test_main_invalid_args(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
args: List[str],
err_msg: str,
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", *args],
)
with pytest.raises(SystemExit) as e:
cli.main()
assert e.value.code == 2
cap = capsys.readouterr()
assert cap.out == ""
assert "usage: qunomon-lite run [-h] [--inventories [INVENTORIES " in cap.err
assert "[--params [PARAMS " in cap.err
assert err_msg in cap.err
def test_main_run(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
tmp_path: pathlib.Path,
ait_stub: str,
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", "run", ait_stub],
)
mocker.patch.object(ait, "OUTPUT_ROOT_DIR_PATH", tmp_path)
cli.main()
cap = capsys.readouterr()
assert (
"Running docker container (image: qunomon-lite/ait-stub:latest)" in cap.out
)
assert "Finished! run-id: " in cap.out
assert cap.err == ""
def test_main_run_full_option(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
tmp_path: pathlib.Path,
shared_datadir: pathlib.Path,
ait_stub: str,
):
mocker.patch.object(
sys,
"argv",
[
"qunomon-lite",
"run",
ait_stub,
"--inventories",
"inventory_sample=%s" % str(shared_datadir.resolve() / "sample.txt"),
"--params",
"p1=ppp1",
],
)
mocker.patch.object(ait, "OUTPUT_ROOT_DIR_PATH", tmp_path)
mocker.patch.object(ait, "_generate_run_id", return_value="run-id")
cli.main()
cap = capsys.readouterr()
assert (
"Running docker container (image: qunomon-lite/ait-stub:latest)" in cap.out
)
assert "Finished! run-id: %s" % "run-id" in cap.out
assert (
"See output directory for results: \n%s"
% str(tmp_path.resolve() / "run-id")
in cap.out
)
assert cap.err == ""
ait_input_json_expected = {
"testbed_mount_volume_path": "/usr/local/qai/mnt",
"job_id": "-",
"run_id": "-",
"Inventories": [
{
"Name": "inventory_sample",
"Value": "/usr/local/qai/inventory/sample.txt",
},
],
"MethodParams": [
{"Name": "p1", "Value": "ppp1"},
],
}
assert (
ait_core._load_json_file(tmp_path / "run-id/ait.input.json")
== ait_input_json_expected
)
def test_main_run_execution_error(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
tmp_path: pathlib.Path,
ait_stub_for_err: str,
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", "run", ait_stub_for_err],
)
mocker.patch.object(ait, "OUTPUT_ROOT_DIR_PATH", tmp_path)
with pytest.raises(SystemExit) as e:
cli.main()
assert e.value.code == 1
cap = capsys.readouterr()
assert (
"Running docker container (image: qunomon-lite/ait-stub-for-err:latest)"
in cap.out
)
assert "Finished! run-id: " not in cap.out
assert (
"AIT docker container running succeeded, "
+ "but AIT execution error occured. "
+ "Error Code: E901, Error Detail: Traceback (most recent call last):"
in cap.err
)
def test_main_run_docker_error(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
tmp_path: pathlib.Path,
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", "run", "repo/name:ver"],
)
mocker.patch.object(ait, "OUTPUT_ROOT_DIR_PATH", tmp_path)
mocker.patch.object(
ait_core.Runner,
"_docker_run",
side_effect=docker.errors.APIError("dummy docker api error"),
)
with pytest.raises(SystemExit) as e:
cli.main()
assert e.value.code == 1
cap = capsys.readouterr()
assert "Running docker container (image: repo/name:ver)" in cap.out
assert "Finished! run-id: " not in cap.out
assert "dummy docker api error" in cap.err
class TestSubCommandForResultShow:
MESSAGE_FOR_USAGE = _dedent(
"""
usage: qunomon-lite result-show [-h] run_id
positional arguments:
run_id
optional arguments:
-h, --help show this help message and exit
"""
)
if sys.version_info >= (3, 10):
MESSAGE_FOR_USAGE = _dedent(
"""
usage: qunomon-lite result-show [-h] run_id
positional arguments:
run_id
options:
-h, --help show this help message and exit
"""
)
@pytest.mark.parametrize(
"args",
[
(["result-show", "-h"]),
(["result-show", "--help"]),
],
)
def test_main_help(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
args: List[str],
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", *args],
)
with pytest.raises(SystemExit) as e:
cli.main()
assert e.value.code == 0
cap = capsys.readouterr()
assert cap.out == self.MESSAGE_FOR_USAGE
assert cap.err == ""
@pytest.mark.parametrize(
"args,err_msg",
[
(
["result-show", "--dummy"],
"the following arguments are required: run_id",
),
(
["result-show", "-d"],
"the following arguments are required: run_id",
),
],
)
def test_main_invalid_args(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
args: List[str],
err_msg: str,
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", *args],
)
with pytest.raises(SystemExit) as e:
cli.main()
assert e.value.code == 2
cap = capsys.readouterr()
assert cap.out == ""
assert cap.err == _dedent(
"""
usage: qunomon-lite result-show [-h] run_id
qunomon-lite result-show: error: %s
"""
% err_msg
)
def test_main_result_show(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
shared_datadir: pathlib.Path,
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", "result-show", "latest"],
)
mocker.patch.object(ait, "console", Console(width=10000))
mocker.patch.object(ait, "OUTPUT_ROOT_DIR_PATH", shared_datadir / "output_dirs")
cli.main()
cap = capsys.readouterr()
assert (
str(shared_datadir.resolve() / "output_dirs/output4/-/-/ait.output.json")
in cap.out
)
assert "Name: eval_mnist_acc_tf2.3" in cap.out
assert "Version: 0.1" in cap.out
assert re.search(r"Accuracy\s+0.81652", cap.out)
assert cap.err == ""
def test_main_result_show_full_option(
self,
mocker: MockerFixture,
capsys: pytest.CaptureFixture,
shared_datadir: pathlib.Path,
):
mocker.patch.object(
sys,
"argv",
["qunomon-lite", "result-show", "output1"],
)
mocker.patch.object(ait, "console", Console(width=10000))
mocker.patch.object(ait, "OUTPUT_ROOT_DIR_PATH", shared_datadir / "output_dirs")
cli.main()
cap = capsys.readouterr()
assert (
str(shared_datadir.resolve() / "output_dirs/output1/-/-/ait.output.json")
in cap.out
)
assert "Name: eval_mnist_acc_tf2.3" in cap.out
assert "Version: 0.1" in cap.out
assert re.search(r"Accuracy\s+0.81651", cap.out)
assert cap.err == ""
| 25.386064
| 88
| 0.493027
| 1,335
| 13,480
| 4.851685
| 0.125094
| 0.045855
| 0.060367
| 0.032422
| 0.799599
| 0.791107
| 0.747877
| 0.719315
| 0.701714
| 0.668983
| 0
| 0.007134
| 0.386499
| 13,480
| 530
| 89
| 25.433962
| 0.776058
| 0
| 0
| 0.65
| 0
| 0
| 0.174186
| 0.022166
| 0
| 0
| 0
| 0
| 0.128947
| 1
| 0.036842
| false
| 0
| 0.026316
| 0.002632
| 0.081579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
29d336fa070ec95a846e24b00ba79fa4c8e57839
| 4,036
|
py
|
Python
|
home/models.py
|
AbdulBsit/alfred
|
c3e876c828a9c57b40249f9652812ca71b468508
|
[
"bzip2-1.0.6"
] | null | null | null |
home/models.py
|
AbdulBsit/alfred
|
c3e876c828a9c57b40249f9652812ca71b468508
|
[
"bzip2-1.0.6"
] | 5
|
2021-03-19T00:00:20.000Z
|
2021-09-22T18:37:38.000Z
|
home/models.py
|
AbdulBsit/alfred
|
c3e876c828a9c57b40249f9652812ca71b468508
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.db import models
from datetime import datetime
class Contact(models.Model):
con_id = models.AutoField(primary_key = True)
date = models.DateField(default=datetime.now)
name = models.CharField(max_length=100,default="")
phone = models.CharField(max_length=100,default="")
email = models.CharField(max_length=100,default="")
organization = models.CharField(max_length=100,default="")
message = models.CharField(max_length=2000,default="")
def __str__(self):
return '%d %s %s' % (self.con_id, self.name, self.date)
class Applying(models.Model):
app_id = models.AutoField(primary_key = True)
date = models.DateField(default=datetime.now)
firstname = models.CharField(max_length=50, default="")
lastname = models.CharField(max_length=50, default="")
phone = models.CharField(max_length=15, default="")
email = models.EmailField(max_length=50, default="")
position = models.CharField(max_length=50, default="")
linkedin = models.CharField(max_length=150, default="")
github = models.CharField(max_length=150, default="")
portfolio = models.CharField(max_length=150, default="")
other = models.CharField(max_length=150, default="")
twitter = models.CharField(max_length=150, default="")
aspiration = models.CharField(max_length=500, default="")
skills = models.CharField(max_length=500, default="")
project = models.CharField(max_length=500, default="")
techstack = models.CharField(max_length=500, default="")
education = models.CharField(max_length=500, default="")
availablity = models.CharField(max_length=500, default="")
protfoliolink = models.CharField(max_length=500, default="")
opensourcecommit = models.CharField(max_length=1000, default="")
resume = models.CharField(max_length=1000, default="")
def __str__(self):
return '%d %s %s %s %s' % (self.app_id, self.firstname, self.lastname, self.date, self.position)
class AdminUpdate(models.Model):
admin_id = models.AutoField(primary_key = True)
fullname = models.CharField(max_length=50)
email = models.CharField(max_length=100)
phone = models.CharField(max_length=15)
position = models.CharField(max_length=100)
def __str__(self):
return '%s %s' % (self.fullname, self.position)
class SelectIntern1(models.Model):
app_id = models.IntegerField()
task1name = models.CharField(max_length=500, default="")
task1link = models.CharField(max_length=1000, default="")
applicantEmail = models.CharField(max_length=100,default="")
def __str__(self):
return '%d %s' % (self.app_id, self.applicantEmail)
class SelectIntern2(models.Model):
app_id = models.IntegerField()
task2name = models.CharField(max_length=500, default="")
task2link = models.CharField(max_length=1000, default="")
applicantEmail = models.CharField(max_length=100,default="")
def __str__(self):
return '%d %s' % (self.app_id, self.applicantEmail)
class SelectIntern3(models.Model):
app_id = models.IntegerField()
task3name = models.CharField(max_length=500, default="")
task3link = models.CharField(max_length=1000, default="")
applicantEmail = models.CharField(max_length=100,default="")
def __str__(self):
return '%d %s' % (self.app_id, self.applicantEmail)
class Solution1(models.Model):
solution1 = models.CharField(max_length=500, default="")
appid = models.CharField(max_length=10,default="")
def __str__(self):
return '%s' % (self.appid)
class Solution2(models.Model):
solution2 = models.CharField(max_length=500, default="")
appid = models.CharField(max_length=10,default="")
def __str__(self):
return '%s' % (self.appid)
class Solution3(models.Model):
solution3 = models.CharField(max_length=500, default="")
appid = models.CharField(max_length=10,default="")
def __str__(self):
return '%s' % (self.appid)
| 37.37037
| 104
| 0.69004
| 485
| 4,036
| 5.550515
| 0.162887
| 0.143759
| 0.280832
| 0.374443
| 0.761516
| 0.719168
| 0.330981
| 0.330981
| 0.311664
| 0.311664
| 0
| 0.041766
| 0.169475
| 4,036
| 108
| 105
| 37.37037
| 0.761337
| 0
| 0
| 0.325
| 0
| 0
| 0.01189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1125
| false
| 0
| 0.025
| 0.1125
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
29e6b0a219a389502ea8259582679b23beaee1eb
| 20
|
py
|
Python
|
checkov/version.py
|
shaneutt/checkov
|
cf792b74b87b767eb6ef35cbec7c3355bb9b2343
|
[
"Apache-2.0"
] | 1
|
2020-07-26T17:56:20.000Z
|
2020-07-26T17:56:20.000Z
|
checkov/version.py
|
shaneutt/checkov
|
cf792b74b87b767eb6ef35cbec7c3355bb9b2343
|
[
"Apache-2.0"
] | 1
|
2021-06-02T03:40:50.000Z
|
2021-06-02T03:40:50.000Z
|
checkov/version.py
|
shaneutt/checkov
|
cf792b74b87b767eb6ef35cbec7c3355bb9b2343
|
[
"Apache-2.0"
] | null | null | null |
version = '1.0.412'
| 10
| 19
| 0.6
| 4
| 20
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 0.15
| 20
| 1
| 20
| 20
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d9b1a99e3dd3226ad088be36d147d376352bb33d
| 195
|
py
|
Python
|
python3.4Smartforest/lib/python3.4/site-packages/django/contrib/humanize/apps.py
|
letouriste001/SmartForest_2.0
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
[
"MIT"
] | null | null | null |
python3.4Smartforest/lib/python3.4/site-packages/django/contrib/humanize/apps.py
|
letouriste001/SmartForest_2.0
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
[
"MIT"
] | null | null | null |
python3.4Smartforest/lib/python3.4/site-packages/django/contrib/humanize/apps.py
|
letouriste001/SmartForest_2.0
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class HumanizeConfig(AppConfig):
name = 'django.contrib.humanize'
verbose_name = _("Humanize")
| 195
| 195
| 0.774359
| 23
| 195
| 6.391304
| 0.695652
| 0.136054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14359
| 195
| 1
| 195
| 195
| 0.88024
| 0
| 0
| 0
| 0
| 0
| 0.158974
| 0.117949
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d9cefdcc373048155e71173e179015d3e06cbb9a
| 30
|
py
|
Python
|
pynder/models/base.py
|
phbprogramming/HackTX2018
|
032d2cb0577c17907836f49a0e2dd7592ae0a1b2
|
[
"BSD-3-Clause"
] | null | null | null |
pynder/models/base.py
|
phbprogramming/HackTX2018
|
032d2cb0577c17907836f49a0e2dd7592ae0a1b2
|
[
"BSD-3-Clause"
] | 2
|
2017-10-12T05:59:39.000Z
|
2017-11-16T02:16:05.000Z
|
pynder/models/base.py
|
phbprogramming/HackTX2018
|
032d2cb0577c17907836f49a0e2dd7592ae0a1b2
|
[
"BSD-3-Clause"
] | 1
|
2019-02-03T19:59:42.000Z
|
2019-02-03T19:59:42.000Z
|
class Model(object):
pass
| 10
| 20
| 0.666667
| 4
| 30
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 30
| 2
| 21
| 15
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d9d771ca04cac4d28d28083b900b6f7311f89fae
| 87
|
py
|
Python
|
ontology/logistic_regression/sherlock/listify_circuits_k06_forward.py
|
ehbeam/neuro-knowledge-engine
|
9dc56ade0bbbd8d14f0660774f787c3f46d7e632
|
[
"MIT"
] | 15
|
2020-07-17T07:10:26.000Z
|
2022-02-18T05:51:45.000Z
|
ontology/neural_network/sherlock/listify_circuits_k06_forward.py
|
YifeiCAO/neuro-knowledge-engine
|
9dc56ade0bbbd8d14f0660774f787c3f46d7e632
|
[
"MIT"
] | 2
|
2022-01-14T09:10:12.000Z
|
2022-01-28T17:32:42.000Z
|
ontology/neural_network/sherlock/listify_circuits_k06_forward.py
|
YifeiCAO/neuro-knowledge-engine
|
9dc56ade0bbbd8d14f0660774f787c3f46d7e632
|
[
"MIT"
] | 4
|
2021-12-22T13:27:32.000Z
|
2022-02-18T05:51:47.000Z
|
#!/bin/python
import listify_circuits
listify_circuits.optimize_circuits(6, 'forward')
| 21.75
| 48
| 0.827586
| 11
| 87
| 6.272727
| 0.727273
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.057471
| 87
| 4
| 48
| 21.75
| 0.829268
| 0.137931
| 0
| 0
| 0
| 0
| 0.093333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d9e81225dd53e21ad12b133413fa1887ab1d8988
| 55
|
py
|
Python
|
tasks/crypto70/code/flag.py
|
internetwache/Internetwache-CTF-2016
|
a511b0ff2f48452a285f19d68a819ea81ecbf099
|
[
"MIT"
] | 83
|
2016-02-21T12:28:17.000Z
|
2021-09-16T13:03:12.000Z
|
tasks/crypto70/code/flag.py
|
internetwache/Internetwache-CTF-2016
|
a511b0ff2f48452a285f19d68a819ea81ecbf099
|
[
"MIT"
] | 1
|
2016-02-22T00:00:15.000Z
|
2016-02-22T00:09:57.000Z
|
tasks/crypto70/code/flag.py
|
internetwache/Internetwache-CTF-2016
|
a511b0ff2f48452a285f19d68a819ea81ecbf099
|
[
"MIT"
] | 33
|
2016-02-22T04:51:24.000Z
|
2020-09-22T15:30:16.000Z
|
FLAG = "IW{FUCK_YOU_HASH_MY_ASS}"
HASH = "00006800007d"
| 27.5
| 33
| 0.763636
| 9
| 55
| 4.222222
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22
| 0.090909
| 55
| 2
| 34
| 27.5
| 0.54
| 0
| 0
| 0
| 0
| 0
| 0.642857
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8a09bbac2776b4e49d2259cf22e3ac249375fbc7
| 121,229
|
py
|
Python
|
Tools/python37/Lib/test/test_email/test__header_value_parser.py
|
xxroot/android_universal
|
af2d8627182f936383d792c1f775d87da50f2f6d
|
[
"MIT"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
Tools/python37/Lib/test/test_email/test__header_value_parser.py
|
xxroot/android_universal
|
af2d8627182f936383d792c1f775d87da50f2f6d
|
[
"MIT"
] | 8
|
2019-06-29T14:18:51.000Z
|
2022-02-19T07:30:27.000Z
|
Tools/python37/Lib/test/test_email/test__header_value_parser.py
|
xxroot/android_universal
|
af2d8627182f936383d792c1f775d87da50f2f6d
|
[
"MIT"
] | 53
|
2019-03-12T16:50:21.000Z
|
2022-03-15T23:16:18.000Z
|
import string
import unittest
from email import _header_value_parser as parser
from email import errors
from email import policy
from test.test_email import TestEmailBase, parameterize
class TestTokens(TestEmailBase):
# EWWhiteSpaceTerminal
def test_EWWhiteSpaceTerminal(self):
x = parser.EWWhiteSpaceTerminal(' \t', 'fws')
self.assertEqual(x, ' \t')
self.assertEqual(str(x), '')
self.assertEqual(x.value, '')
self.assertEqual(x.token_type, 'fws')
class TestParserMixin:
def _assert_results(self, tl, rest, string, value, defects, remainder,
comments=None):
self.assertEqual(str(tl), string)
self.assertEqual(tl.value, value)
self.assertDefectsEqual(tl.all_defects, defects)
self.assertEqual(rest, remainder)
if comments is not None:
self.assertEqual(tl.comments, comments)
def _test_get_x(self, method, source, string, value, defects,
remainder, comments=None):
tl, rest = method(source)
self._assert_results(tl, rest, string, value, defects, remainder,
comments=None)
return tl
def _test_parse_x(self, method, input, string, value, defects,
comments=None):
tl = method(input)
self._assert_results(tl, '', string, value, defects, '', comments)
return tl
class TestParser(TestParserMixin, TestEmailBase):
# _wsp_splitter
rfc_printable_ascii = bytes(range(33, 127)).decode('ascii')
rfc_atext_chars = (string.ascii_letters + string.digits +
"!#$%&\'*+-/=?^_`{}|~")
rfc_dtext_chars = rfc_printable_ascii.translate(str.maketrans('','',r'\[]'))
def test__wsp_splitter_one_word(self):
self.assertEqual(parser._wsp_splitter('foo', 1), ['foo'])
def test__wsp_splitter_two_words(self):
self.assertEqual(parser._wsp_splitter('foo def', 1),
['foo', ' ', 'def'])
def test__wsp_splitter_ws_runs(self):
self.assertEqual(parser._wsp_splitter('foo \t def jik', 1),
['foo', ' \t ', 'def jik'])
# get_fws
def test_get_fws_only(self):
fws = self._test_get_x(parser.get_fws, ' \t ', ' \t ', ' ', [], '')
self.assertEqual(fws.token_type, 'fws')
def test_get_fws_space(self):
self._test_get_x(parser.get_fws, ' foo', ' ', ' ', [], 'foo')
def test_get_fws_ws_run(self):
self._test_get_x(parser.get_fws, ' \t foo ', ' \t ', ' ', [], 'foo ')
# get_encoded_word
def test_get_encoded_word_missing_start_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('abc')
def test_get_encoded_word_missing_end_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('=?abc')
def test_get_encoded_word_missing_middle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('=?abc?=')
def test_get_encoded_word_valid_ew(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?this_is_a_test?= bird',
'this is a test',
'this is a test',
[],
' bird')
def test_get_encoded_word_internal_spaces(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?this is a test?= bird',
'this is a test',
'this is a test',
[errors.InvalidHeaderDefect],
' bird')
def test_get_encoded_word_gets_first(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first?= =?utf-8?q?second?=',
'first',
'first',
[],
' =?utf-8?q?second?=')
def test_get_encoded_word_gets_first_even_if_no_space(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first?==?utf-8?q?second?=',
'first',
'first',
[],
'=?utf-8?q?second?=')
def test_get_encoded_word_sets_extra_attributes(self):
ew = self._test_get_x(parser.get_encoded_word,
'=?us-ascii*jive?q?first_second?=',
'first second',
'first second',
[],
'')
self.assertEqual(ew.charset, 'us-ascii')
self.assertEqual(ew.lang, 'jive')
def test_get_encoded_word_lang_default_is_blank(self):
ew = self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first_second?=',
'first second',
'first second',
[],
'')
self.assertEqual(ew.charset, 'us-ascii')
self.assertEqual(ew.lang, '')
def test_get_encoded_word_non_printable_defect(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first\x02second?=',
'first\x02second',
'first\x02second',
[errors.NonPrintableDefect],
'')
def test_get_encoded_word_leading_internal_space(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?=20foo?=',
' foo',
' foo',
[],
'')
def test_get_encoded_word_quopri_utf_escape_follows_cte(self):
# Issue 18044
self._test_get_x(parser.get_encoded_word,
'=?utf-8?q?=C3=89ric?=',
'Éric',
'Éric',
[],
'')
# get_unstructured
def _get_unst(self, value):
token = parser.get_unstructured(value)
return token, ''
def test_get_unstructured_null(self):
self._test_get_x(self._get_unst, '', '', '', [], '')
def test_get_unstructured_one_word(self):
self._test_get_x(self._get_unst, 'foo', 'foo', 'foo', [], '')
def test_get_unstructured_normal_phrase(self):
self._test_get_x(self._get_unst, 'foo bar bird',
'foo bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_normal_phrase_with_whitespace(self):
self._test_get_x(self._get_unst, 'foo \t bar bird',
'foo \t bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_leading_whitespace(self):
self._test_get_x(self._get_unst, ' foo bar',
' foo bar',
' foo bar',
[],
'')
def test_get_unstructured_trailing_whitespace(self):
self._test_get_x(self._get_unst, 'foo bar ',
'foo bar ',
'foo bar ',
[],
'')
def test_get_unstructured_leading_and_trailing_whitespace(self):
self._test_get_x(self._get_unst, ' foo bar ',
' foo bar ',
' foo bar ',
[],
'')
def test_get_unstructured_one_valid_ew_no_ws(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?=',
'bar',
'bar',
[],
'')
def test_get_unstructured_one_ew_trailing_ws(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?= ',
'bar ',
'bar ',
[],
'')
def test_get_unstructured_one_valid_ew_trailing_text(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?= bird',
'bar bird',
'bar bird',
[],
'')
def test_get_unstructured_phrase_with_ew_in_middle_of_text(self):
self._test_get_x(self._get_unst, 'foo =?us-ascii?q?bar?= bird',
'foo bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_phrase_with_two_ew(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= =?us-ascii?q?bird?=',
'foo barbird',
'foo barbird',
[],
'')
def test_get_unstructured_phrase_with_two_ew_trailing_ws(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= =?us-ascii?q?bird?= ',
'foo barbird ',
'foo barbird ',
[],
'')
def test_get_unstructured_phrase_with_ew_with_leading_ws(self):
self._test_get_x(self._get_unst,
' =?us-ascii?q?bar?=',
' bar',
' bar',
[],
'')
def test_get_unstructured_phrase_with_two_ew_extra_ws(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= \t =?us-ascii?q?bird?=',
'foo barbird',
'foo barbird',
[],
'')
def test_get_unstructured_two_ew_extra_ws_trailing_text(self):
self._test_get_x(self._get_unst,
'=?us-ascii?q?test?= =?us-ascii?q?foo?= val',
'testfoo val',
'testfoo val',
[],
'')
def test_get_unstructured_ew_with_internal_ws(self):
self._test_get_x(self._get_unst,
'=?iso-8859-1?q?hello=20world?=',
'hello world',
'hello world',
[],
'')
def test_get_unstructured_ew_with_internal_leading_ws(self):
self._test_get_x(self._get_unst,
' =?us-ascii?q?=20test?= =?us-ascii?q?=20foo?= val',
' test foo val',
' test foo val',
[],
'')
def test_get_unstructured_invaild_ew(self):
self._test_get_x(self._get_unst,
'=?test val',
'=?test val',
'=?test val',
[],
'')
def test_get_unstructured_undecodable_bytes(self):
self._test_get_x(self._get_unst,
b'test \xACfoo val'.decode('ascii', 'surrogateescape'),
'test \uDCACfoo val',
'test \uDCACfoo val',
[errors.UndecodableBytesDefect],
'')
def test_get_unstructured_undecodable_bytes_in_EW(self):
self._test_get_x(self._get_unst,
(b'=?us-ascii?q?=20test?= =?us-ascii?q?=20\xACfoo?='
b' val').decode('ascii', 'surrogateescape'),
' test \uDCACfoo val',
' test \uDCACfoo val',
[errors.UndecodableBytesDefect]*2,
'')
def test_get_unstructured_missing_base64_padding(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dmk?=',
'vi',
'vi',
[errors.InvalidBase64PaddingDefect],
'')
def test_get_unstructured_invalid_base64_character(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dm\x01k===?=',
'vi',
'vi',
[errors.InvalidBase64CharactersDefect],
'')
def test_get_unstructured_invalid_base64_character_and_bad_padding(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dm\x01k?=',
'vi',
'vi',
[errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect],
'')
def test_get_unstructured_invalid_base64_length(self):
# bpo-27397: Return the encoded string since there's no way to decode.
self._test_get_x(self._get_unst,
'=?utf-8?b?abcde?=',
'abcde',
'abcde',
[errors.InvalidBase64LengthDefect],
'')
def test_get_unstructured_no_whitespace_between_ews(self):
self._test_get_x(self._get_unst,
'=?utf-8?q?foo?==?utf-8?q?bar?=',
'foobar',
'foobar',
[errors.InvalidHeaderDefect],
'')
# get_qp_ctext
def test_get_qp_ctext_only(self):
ptext = self._test_get_x(parser.get_qp_ctext,
'foobar', 'foobar', ' ', [], '')
self.assertEqual(ptext.token_type, 'ptext')
def test_get_qp_ctext_all_printables(self):
with_qp = self.rfc_printable_ascii.replace('\\', '\\\\')
with_qp = with_qp. replace('(', r'\(')
with_qp = with_qp.replace(')', r'\)')
ptext = self._test_get_x(parser.get_qp_ctext,
with_qp, self.rfc_printable_ascii, ' ', [], '')
def test_get_qp_ctext_two_words_gets_first(self):
self._test_get_x(parser.get_qp_ctext,
'foo de', 'foo', ' ', [], ' de')
def test_get_qp_ctext_following_wsp_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo \t\tde', 'foo', ' ', [], ' \t\tde')
def test_get_qp_ctext_up_to_close_paren_only(self):
self._test_get_x(parser.get_qp_ctext,
'foo)', 'foo', ' ', [], ')')
def test_get_qp_ctext_wsp_before_close_paren_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo )', 'foo', ' ', [], ' )')
def test_get_qp_ctext_close_paren_mid_word(self):
self._test_get_x(parser.get_qp_ctext,
'foo)bar', 'foo', ' ', [], ')bar')
def test_get_qp_ctext_up_to_open_paren_only(self):
self._test_get_x(parser.get_qp_ctext,
'foo(', 'foo', ' ', [], '(')
def test_get_qp_ctext_wsp_before_open_paren_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo (', 'foo', ' ', [], ' (')
def test_get_qp_ctext_open_paren_mid_word(self):
self._test_get_x(parser.get_qp_ctext,
'foo(bar', 'foo', ' ', [], '(bar')
def test_get_qp_ctext_non_printables(self):
ptext = self._test_get_x(parser.get_qp_ctext,
'foo\x00bar)', 'foo\x00bar', ' ',
[errors.NonPrintableDefect], ')')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
# get_qcontent
def test_get_qcontent_only(self):
ptext = self._test_get_x(parser.get_qcontent,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(ptext.token_type, 'ptext')
def test_get_qcontent_all_printables(self):
with_qp = self.rfc_printable_ascii.replace('\\', '\\\\')
with_qp = with_qp. replace('"', r'\"')
ptext = self._test_get_x(parser.get_qcontent, with_qp,
self.rfc_printable_ascii,
self.rfc_printable_ascii, [], '')
def test_get_qcontent_two_words_gets_first(self):
self._test_get_x(parser.get_qcontent,
'foo de', 'foo', 'foo', [], ' de')
def test_get_qcontent_following_wsp_preserved(self):
self._test_get_x(parser.get_qcontent,
'foo \t\tde', 'foo', 'foo', [], ' \t\tde')
def test_get_qcontent_up_to_dquote_only(self):
self._test_get_x(parser.get_qcontent,
'foo"', 'foo', 'foo', [], '"')
def test_get_qcontent_wsp_before_close_paren_preserved(self):
self._test_get_x(parser.get_qcontent,
'foo "', 'foo', 'foo', [], ' "')
def test_get_qcontent_close_paren_mid_word(self):
self._test_get_x(parser.get_qcontent,
'foo"bar', 'foo', 'foo', [], '"bar')
def test_get_qcontent_non_printables(self):
ptext = self._test_get_x(parser.get_qcontent,
'foo\x00fg"', 'foo\x00fg', 'foo\x00fg',
[errors.NonPrintableDefect], '"')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
# get_atext
def test_get_atext_only(self):
atext = self._test_get_x(parser.get_atext,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(atext.token_type, 'atext')
def test_get_atext_all_atext(self):
atext = self._test_get_x(parser.get_atext, self.rfc_atext_chars,
self.rfc_atext_chars,
self.rfc_atext_chars, [], '')
def test_get_atext_two_words_gets_first(self):
self._test_get_x(parser.get_atext,
'foo bar', 'foo', 'foo', [], ' bar')
def test_get_atext_following_wsp_preserved(self):
self._test_get_x(parser.get_atext,
'foo \t\tbar', 'foo', 'foo', [], ' \t\tbar')
def test_get_atext_up_to_special(self):
self._test_get_x(parser.get_atext,
'foo@bar', 'foo', 'foo', [], '@bar')
def test_get_atext_non_printables(self):
atext = self._test_get_x(parser.get_atext,
'foo\x00bar(', 'foo\x00bar', 'foo\x00bar',
[errors.NonPrintableDefect], '(')
self.assertEqual(atext.defects[0].non_printables[0], '\x00')
# get_bare_quoted_string
def test_get_bare_quoted_string_only(self):
bqs = self._test_get_x(parser.get_bare_quoted_string,
'"foo"', '"foo"', 'foo', [], '')
self.assertEqual(bqs.token_type, 'bare-quoted-string')
def test_get_bare_quoted_string_must_start_with_dquote(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_bare_quoted_string('foo"')
with self.assertRaises(errors.HeaderParseError):
parser.get_bare_quoted_string(' "foo"')
def test_get_bare_quoted_string_only_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'""', '""', '', [], '')
def test_get_bare_quoted_string_following_wsp_preserved(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo"\t bar', '"foo"', 'foo', [], '\t bar')
def test_get_bare_quoted_string_multiple_words(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo bar moo"', '"foo bar moo"', 'foo bar moo', [], '')
def test_get_bare_quoted_string_multiple_words_wsp_preserved(self):
self._test_get_x(parser.get_bare_quoted_string,
'" foo moo\t"', '" foo moo\t"', ' foo moo\t', [], '')
def test_get_bare_quoted_string_end_dquote_mid_word(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo"bar', '"foo"', 'foo', [], 'bar')
def test_get_bare_quoted_string_quoted_dquote(self):
self._test_get_x(parser.get_bare_quoted_string,
r'"foo\"in"a', r'"foo\"in"', 'foo"in', [], 'a')
def test_get_bare_quoted_string_non_printables(self):
self._test_get_x(parser.get_bare_quoted_string,
'"a\x01a"', '"a\x01a"', 'a\x01a',
[errors.NonPrintableDefect], '')
def test_get_bare_quoted_string_no_end_dquote(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo', '"foo"', 'foo',
[errors.InvalidHeaderDefect], '')
self._test_get_x(parser.get_bare_quoted_string,
'"foo ', '"foo "', 'foo ',
[errors.InvalidHeaderDefect], '')
def test_get_bare_quoted_string_empty_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'""', '""', '', [], '')
# Issue 16983: apply postel's law to some bad encoding.
def test_encoded_word_inside_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'"=?utf-8?Q?not_really_valid?="',
'"not really valid"',
'not really valid',
[errors.InvalidHeaderDefect],
'')
# get_comment
def test_get_comment_only(self):
comment = self._test_get_x(parser.get_comment,
'(comment)', '(comment)', ' ', [], '', ['comment'])
self.assertEqual(comment.token_type, 'comment')
def test_get_comment_must_start_with_paren(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_comment('foo"')
with self.assertRaises(errors.HeaderParseError):
parser.get_comment(' (foo"')
def test_get_comment_following_wsp_preserved(self):
self._test_get_x(parser.get_comment,
'(comment) \t', '(comment)', ' ', [], ' \t', ['comment'])
def test_get_comment_multiple_words(self):
self._test_get_x(parser.get_comment,
'(foo bar) \t', '(foo bar)', ' ', [], ' \t', ['foo bar'])
def test_get_comment_multiple_words_wsp_preserved(self):
self._test_get_x(parser.get_comment,
'( foo bar\t ) \t', '( foo bar\t )', ' ', [], ' \t',
[' foo bar\t '])
def test_get_comment_end_paren_mid_word(self):
self._test_get_x(parser.get_comment,
'(foo)bar', '(foo)', ' ', [], 'bar', ['foo'])
def test_get_comment_quoted_parens(self):
self._test_get_x(parser.get_comment,
r'(foo\) \(\)bar)', r'(foo\) \(\)bar)', ' ', [], '', ['foo) ()bar'])
def test_get_comment_non_printable(self):
self._test_get_x(parser.get_comment,
'(foo\x7Fbar)', '(foo\x7Fbar)', ' ',
[errors.NonPrintableDefect], '', ['foo\x7Fbar'])
def test_get_comment_no_end_paren(self):
self._test_get_x(parser.get_comment,
'(foo bar', '(foo bar)', ' ',
[errors.InvalidHeaderDefect], '', ['foo bar'])
self._test_get_x(parser.get_comment,
'(foo bar ', '(foo bar )', ' ',
[errors.InvalidHeaderDefect], '', ['foo bar '])
def test_get_comment_nested_comment(self):
comment = self._test_get_x(parser.get_comment,
'(foo(bar))', '(foo(bar))', ' ', [], '', ['foo(bar)'])
self.assertEqual(comment[1].content, 'bar')
def test_get_comment_nested_comment_wsp(self):
comment = self._test_get_x(parser.get_comment,
'(foo ( bar ) )', '(foo ( bar ) )', ' ', [], '', ['foo ( bar ) '])
self.assertEqual(comment[2].content, ' bar ')
def test_get_comment_empty_comment(self):
self._test_get_x(parser.get_comment,
'()', '()', ' ', [], '', [''])
def test_get_comment_multiple_nesting(self):
comment = self._test_get_x(parser.get_comment,
'(((((foo)))))', '(((((foo)))))', ' ', [], '', ['((((foo))))'])
for i in range(4, 0, -1):
self.assertEqual(comment[0].content, '('*(i-1)+'foo'+')'*(i-1))
comment = comment[0]
self.assertEqual(comment.content, 'foo')
def test_get_comment_missing_end_of_nesting(self):
self._test_get_x(parser.get_comment,
'(((((foo)))', '(((((foo)))))', ' ',
[errors.InvalidHeaderDefect]*2, '', ['((((foo))))'])
def test_get_comment_qs_in_nested_comment(self):
comment = self._test_get_x(parser.get_comment,
r'(foo (b\)))', r'(foo (b\)))', ' ', [], '', [r'foo (b\))'])
self.assertEqual(comment[2].content, 'b)')
# get_cfws
def test_get_cfws_only_ws(self):
cfws = self._test_get_x(parser.get_cfws,
' \t \t', ' \t \t', ' ', [], '', [])
self.assertEqual(cfws.token_type, 'cfws')
def test_get_cfws_only_comment(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo)', '(foo)', ' ', [], '', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_only_mixed(self):
cfws = self._test_get_x(parser.get_cfws,
' (foo ) ( bar) ', ' (foo ) ( bar) ', ' ', [], '',
['foo ', ' bar'])
self.assertEqual(cfws[1].content, 'foo ')
self.assertEqual(cfws[3].content, ' bar')
def test_get_cfws_ends_at_non_leader(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo) bar', '(foo) ', ' ', [], 'bar', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_ends_at_non_printable(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo) \x07', '(foo) ', ' ', [], '\x07', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_non_printable_in_comment(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo \x07) "test"', '(foo \x07) ', ' ',
[errors.NonPrintableDefect], '"test"', ['foo \x07'])
self.assertEqual(cfws[0].content, 'foo \x07')
def test_get_cfws_header_ends_in_comment(self):
cfws = self._test_get_x(parser.get_cfws,
' (foo ', ' (foo )', ' ',
[errors.InvalidHeaderDefect], '', ['foo '])
self.assertEqual(cfws[1].content, 'foo ')
def test_get_cfws_multiple_nested_comments(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo (bar)) ((a)(a))', '(foo (bar)) ((a)(a))', ' ', [],
'', ['foo (bar)', '(a)(a)'])
self.assertEqual(cfws[0].comments, ['foo (bar)'])
self.assertEqual(cfws[2].comments, ['(a)(a)'])
# get_quoted_string
def test_get_quoted_string_only(self):
qs = self._test_get_x(parser.get_quoted_string,
'"bob"', '"bob"', 'bob', [], '')
self.assertEqual(qs.token_type, 'quoted-string')
self.assertEqual(qs.quoted_value, '"bob"')
self.assertEqual(qs.content, 'bob')
def test_get_quoted_string_with_wsp(self):
qs = self._test_get_x(parser.get_quoted_string,
'\t "bob" ', '\t "bob" ', ' bob ', [], '')
self.assertEqual(qs.quoted_value, ' "bob" ')
self.assertEqual(qs.content, 'bob')
def test_get_quoted_string_with_comments_and_wsp(self):
qs = self._test_get_x(parser.get_quoted_string,
' (foo) "bob"(bar)', ' (foo) "bob"(bar)', ' bob ', [], '')
self.assertEqual(qs[0][1].content, 'foo')
self.assertEqual(qs[2][0].content, 'bar')
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_with_multiple_comments(self):
qs = self._test_get_x(parser.get_quoted_string,
' (foo) (bar) "bob"(bird)', ' (foo) (bar) "bob"(bird)', ' bob ',
[], '')
self.assertEqual(qs[0].comments, ['foo', 'bar'])
self.assertEqual(qs[2].comments, ['bird'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_non_printable_in_comment(self):
qs = self._test_get_x(parser.get_quoted_string,
' (\x0A) "bob"', ' (\x0A) "bob"', ' bob',
[errors.NonPrintableDefect], '')
self.assertEqual(qs[0].comments, ['\x0A'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob"')
def test_get_quoted_string_non_printable_in_qcontent(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "a\x0B"', ' (a) "a\x0B"', ' a\x0B',
[errors.NonPrintableDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'a\x0B')
self.assertEqual(qs.quoted_value, ' "a\x0B"')
def test_get_quoted_string_internal_ws(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "foo bar "', ' (a) "foo bar "', ' foo bar ',
[], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'foo bar ')
self.assertEqual(qs.quoted_value, ' "foo bar "')
def test_get_quoted_string_header_ends_in_comment(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "bob" (a', ' (a) "bob" (a)', ' bob ',
[errors.InvalidHeaderDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs[2].comments, ['a'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_header_ends_in_qcontent(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "bob', ' (a) "bob"', ' bob',
[errors.InvalidHeaderDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob"')
def test_get_quoted_string_no_quoted_string(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_quoted_string(' (ab) xyz')
def test_get_quoted_string_qs_ends_at_noncfws(self):
qs = self._test_get_x(parser.get_quoted_string,
'\t "bob" fee', '\t "bob" ', ' bob ', [], 'fee')
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
# get_atom
def test_get_atom_only(self):
atom = self._test_get_x(parser.get_atom,
'bob', 'bob', 'bob', [], '')
self.assertEqual(atom.token_type, 'atom')
def test_get_atom_with_wsp(self):
self._test_get_x(parser.get_atom,
'\t bob ', '\t bob ', ' bob ', [], '')
def test_get_atom_with_comments_and_wsp(self):
atom = self._test_get_x(parser.get_atom,
' (foo) bob(bar)', ' (foo) bob(bar)', ' bob ', [], '')
self.assertEqual(atom[0][1].content, 'foo')
self.assertEqual(atom[2][0].content, 'bar')
def test_get_atom_with_multiple_comments(self):
atom = self._test_get_x(parser.get_atom,
' (foo) (bar) bob(bird)', ' (foo) (bar) bob(bird)', ' bob ',
[], '')
self.assertEqual(atom[0].comments, ['foo', 'bar'])
self.assertEqual(atom[2].comments, ['bird'])
def test_get_atom_non_printable_in_comment(self):
atom = self._test_get_x(parser.get_atom,
' (\x0A) bob', ' (\x0A) bob', ' bob',
[errors.NonPrintableDefect], '')
self.assertEqual(atom[0].comments, ['\x0A'])
def test_get_atom_non_printable_in_atext(self):
atom = self._test_get_x(parser.get_atom,
' (a) a\x0B', ' (a) a\x0B', ' a\x0B',
[errors.NonPrintableDefect], '')
self.assertEqual(atom[0].comments, ['a'])
def test_get_atom_header_ends_in_comment(self):
atom = self._test_get_x(parser.get_atom,
' (a) bob (a', ' (a) bob (a)', ' bob ',
[errors.InvalidHeaderDefect], '')
self.assertEqual(atom[0].comments, ['a'])
self.assertEqual(atom[2].comments, ['a'])
def test_get_atom_no_atom(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_atom(' (ab) ')
def test_get_atom_no_atom_before_special(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_atom(' (ab) @')
def test_get_atom_atom_ends_at_special(self):
atom = self._test_get_x(parser.get_atom,
' (foo) bob(bar) @bang', ' (foo) bob(bar) ', ' bob ', [], '@bang')
self.assertEqual(atom[0].comments, ['foo'])
self.assertEqual(atom[2].comments, ['bar'])
def test_get_atom_atom_ends_at_noncfws(self):
self._test_get_x(parser.get_atom,
'bob fred', 'bob ', 'bob ', [], 'fred')
def test_get_atom_rfc2047_atom(self):
self._test_get_x(parser.get_atom,
'=?utf-8?q?=20bob?=', ' bob', ' bob', [], '')
# get_dot_atom_text
def test_get_dot_atom_text(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo.bar.bang', 'foo.bar.bang', 'foo.bar.bang', [], '')
self.assertEqual(dot_atom_text.token_type, 'dot-atom-text')
self.assertEqual(len(dot_atom_text), 5)
def test_get_dot_atom_text_lone_atom_is_valid(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo', 'foo', 'foo', [], '')
def test_get_dot_atom_text_raises_on_leading_dot(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('.foo.bar')
def test_get_dot_atom_text_raises_on_trailing_dot(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('foo.bar.')
def test_get_dot_atom_text_raises_on_leading_non_atext(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text(' foo.bar')
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('@foo.bar')
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('"foo.bar"')
def test_get_dot_atom_text_trailing_text_preserved(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo@bar', 'foo', 'foo', [], '@bar')
def test_get_dot_atom_text_trailing_ws_preserved(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo .bar', 'foo', 'foo', [], ' .bar')
# get_dot_atom
def test_get_dot_atom_only(self):
dot_atom = self._test_get_x(parser.get_dot_atom,
'foo.bar.bing', 'foo.bar.bing', 'foo.bar.bing', [], '')
self.assertEqual(dot_atom.token_type, 'dot-atom')
self.assertEqual(len(dot_atom), 1)
def test_get_dot_atom_with_wsp(self):
self._test_get_x(parser.get_dot_atom,
'\t foo.bar.bing ', '\t foo.bar.bing ', ' foo.bar.bing ', [], '')
def test_get_dot_atom_with_comments_and_wsp(self):
self._test_get_x(parser.get_dot_atom,
' (sing) foo.bar.bing (here) ', ' (sing) foo.bar.bing (here) ',
' foo.bar.bing ', [], '')
def test_get_dot_atom_space_ends_dot_atom(self):
self._test_get_x(parser.get_dot_atom,
' (sing) foo.bar .bing (here) ', ' (sing) foo.bar ',
' foo.bar ', [], '.bing (here) ')
def test_get_dot_atom_no_atom_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) ')
def test_get_dot_atom_leading_dot_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) .bar')
def test_get_dot_atom_two_dots_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom('bar..bang')
def test_get_dot_atom_trailing_dot_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) bar.bang. foo')
def test_get_dot_atom_rfc2047_atom(self):
self._test_get_x(parser.get_dot_atom,
'=?utf-8?q?=20bob?=', ' bob', ' bob', [], '')
# get_word (if this were black box we'd repeat all the qs/atom tests)
def test_get_word_atom_yields_atom(self):
word = self._test_get_x(parser.get_word,
' (foo) bar (bang) :ah', ' (foo) bar (bang) ', ' bar ', [], ':ah')
self.assertEqual(word.token_type, 'atom')
self.assertEqual(word[0].token_type, 'cfws')
def test_get_word_qs_yields_qs(self):
word = self._test_get_x(parser.get_word,
'"bar " (bang) ah', '"bar " (bang) ', 'bar ', [], 'ah')
self.assertEqual(word.token_type, 'quoted-string')
self.assertEqual(word[0].token_type, 'bare-quoted-string')
self.assertEqual(word[0].value, 'bar ')
self.assertEqual(word.content, 'bar ')
def test_get_word_ends_at_dot(self):
self._test_get_x(parser.get_word,
'foo.', 'foo', 'foo', [], '.')
# get_phrase
def test_get_phrase_simple(self):
phrase = self._test_get_x(parser.get_phrase,
'"Fred A. Johnson" is his name, oh.',
'"Fred A. Johnson" is his name',
'Fred A. Johnson is his name',
[],
', oh.')
self.assertEqual(phrase.token_type, 'phrase')
def test_get_phrase_complex(self):
phrase = self._test_get_x(parser.get_phrase,
' (A) bird (in (my|your)) "hand " is messy\t<>\t',
' (A) bird (in (my|your)) "hand " is messy\t',
' bird hand is messy ',
[],
'<>\t')
self.assertEqual(phrase[0][0].comments, ['A'])
self.assertEqual(phrase[0][2].comments, ['in (my|your)'])
def test_get_phrase_obsolete(self):
phrase = self._test_get_x(parser.get_phrase,
'Fred A.(weird).O Johnson',
'Fred A.(weird).O Johnson',
'Fred A. .O Johnson',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(phrase), 7)
self.assertEqual(phrase[3].comments, ['weird'])
def test_get_phrase_pharse_must_start_with_word(self):
phrase = self._test_get_x(parser.get_phrase,
'(even weirder).name',
'(even weirder).name',
' .name',
[errors.InvalidHeaderDefect] + [errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(phrase), 3)
self.assertEqual(phrase[0].comments, ['even weirder'])
def test_get_phrase_ending_with_obsolete(self):
phrase = self._test_get_x(parser.get_phrase,
'simple phrase.(with trailing comment):boo',
'simple phrase.(with trailing comment)',
'simple phrase. ',
[errors.ObsoleteHeaderDefect]*2,
':boo')
self.assertEqual(len(phrase), 4)
self.assertEqual(phrase[3].comments, ['with trailing comment'])
def get_phrase_cfws_only_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_phrase(' (foo) ')
# get_local_part
def test_get_local_part_simple(self):
local_part = self._test_get_x(parser.get_local_part,
'dinsdale@python.org', 'dinsdale', 'dinsdale', [], '@python.org')
self.assertEqual(local_part.token_type, 'local-part')
self.assertEqual(local_part.local_part, 'dinsdale')
def test_get_local_part_with_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'Fred.A.Johnson@python.org',
'Fred.A.Johnson',
'Fred.A.Johnson',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_with_whitespace(self):
local_part = self._test_get_x(parser.get_local_part,
' Fred.A.Johnson @python.org',
' Fred.A.Johnson ',
' Fred.A.Johnson ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_with_cfws(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo) Fred.A.Johnson (bar (bird)) @python.org',
' (foo) Fred.A.Johnson (bar (bird)) ',
' Fred.A.Johnson ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
self.assertEqual(local_part[0][0].comments, ['foo'])
self.assertEqual(local_part[0][2].comments, ['bar (bird)'])
def test_get_local_part_simple_quoted(self):
local_part = self._test_get_x(parser.get_local_part,
'"dinsdale"@python.org', '"dinsdale"', '"dinsdale"', [], '@python.org')
self.assertEqual(local_part.token_type, 'local-part')
self.assertEqual(local_part.local_part, 'dinsdale')
def test_get_local_part_with_quoted_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'"Fred.A.Johnson"@python.org',
'"Fred.A.Johnson"',
'"Fred.A.Johnson"',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_quoted_with_whitespace(self):
local_part = self._test_get_x(parser.get_local_part,
' "Fred A. Johnson" @python.org',
' "Fred A. Johnson" ',
' "Fred A. Johnson" ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred A. Johnson')
def test_get_local_part_quoted_with_cfws(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo) " Fred A. Johnson " (bar (bird)) @python.org',
' (foo) " Fred A. Johnson " (bar (bird)) ',
' " Fred A. Johnson " ',
[],
'@python.org')
self.assertEqual(local_part.local_part, ' Fred A. Johnson ')
self.assertEqual(local_part[0][0].comments, ['foo'])
self.assertEqual(local_part[0][2].comments, ['bar (bird)'])
def test_get_local_part_simple_obsolete(self):
local_part = self._test_get_x(parser.get_local_part,
'Fred. A.Johnson@python.org',
'Fred. A.Johnson',
'Fred. A.Johnson',
[errors.ObsoleteHeaderDefect],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_complex_obsolete_1(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo )Fred (bar).(bird) A.(sheep)Johnson."and dogs "@python.org',
' (foo )Fred (bar).(bird) A.(sheep)Johnson."and dogs "',
' Fred . A. Johnson.and dogs ',
[errors.ObsoleteHeaderDefect],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson.and dogs ')
def test_get_local_part_complex_obsolete_invalid(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo )Fred (bar).(bird) A.(sheep)Johnson "and dogs"@python.org',
' (foo )Fred (bar).(bird) A.(sheep)Johnson "and dogs"',
' Fred . A. Johnson and dogs',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson and dogs')
def test_get_local_part_no_part_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_local_part(' (foo) ')
def test_get_local_part_special_instead_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_local_part(' (foo) @python.org')
def test_get_local_part_trailing_dot(self):
local_part = self._test_get_x(parser.get_local_part,
' borris.@python.org',
' borris.',
' borris.',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris.')
def test_get_local_part_trailing_dot_with_ws(self):
local_part = self._test_get_x(parser.get_local_part,
' borris. @python.org',
' borris. ',
' borris. ',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris.')
def test_get_local_part_leading_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'.borris@python.org',
'.borris',
'.borris',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, '.borris')
def test_get_local_part_leading_dot_after_ws(self):
local_part = self._test_get_x(parser.get_local_part,
' .borris@python.org',
' .borris',
' .borris',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, '.borris')
def test_get_local_part_double_dot_raises(self):
local_part = self._test_get_x(parser.get_local_part,
' borris.(foo).natasha@python.org',
' borris.(foo).natasha',
' borris. .natasha',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris..natasha')
def test_get_local_part_quoted_strings_in_atom_list(self):
local_part = self._test_get_x(parser.get_local_part,
'""example" example"@example.com',
'""example" example"',
'example example',
[errors.InvalidHeaderDefect]*3,
'@example.com')
self.assertEqual(local_part.local_part, 'example example')
def test_get_local_part_valid_and_invalid_qp_in_atom_list(self):
local_part = self._test_get_x(parser.get_local_part,
r'"\\"example\\" example"@example.com',
r'"\\"example\\" example"',
r'\example\\ example',
[errors.InvalidHeaderDefect]*5,
'@example.com')
self.assertEqual(local_part.local_part, r'\example\\ example')
def test_get_local_part_unicode_defect(self):
# Currently this only happens when parsing unicode, not when parsing
# stuff that was originally binary.
local_part = self._test_get_x(parser.get_local_part,
'exámple@example.com',
'exámple',
'exámple',
[errors.NonASCIILocalPartDefect],
'@example.com')
self.assertEqual(local_part.local_part, 'exámple')
# get_dtext
def test_get_dtext_only(self):
dtext = self._test_get_x(parser.get_dtext,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(dtext.token_type, 'ptext')
def test_get_dtext_all_dtext(self):
dtext = self._test_get_x(parser.get_dtext, self.rfc_dtext_chars,
self.rfc_dtext_chars,
self.rfc_dtext_chars, [], '')
def test_get_dtext_two_words_gets_first(self):
self._test_get_x(parser.get_dtext,
'foo bar', 'foo', 'foo', [], ' bar')
def test_get_dtext_following_wsp_preserved(self):
self._test_get_x(parser.get_dtext,
'foo \t\tbar', 'foo', 'foo', [], ' \t\tbar')
def test_get_dtext_non_printables(self):
dtext = self._test_get_x(parser.get_dtext,
'foo\x00bar]', 'foo\x00bar', 'foo\x00bar',
[errors.NonPrintableDefect], ']')
self.assertEqual(dtext.defects[0].non_printables[0], '\x00')
def test_get_dtext_with_qp(self):
ptext = self._test_get_x(parser.get_dtext,
r'foo\]\[\\bar\b\e\l\l',
r'foo][\barbell',
r'foo][\barbell',
[errors.ObsoleteHeaderDefect],
'')
def test_get_dtext_up_to_close_bracket_only(self):
self._test_get_x(parser.get_dtext,
'foo]', 'foo', 'foo', [], ']')
def test_get_dtext_wsp_before_close_bracket_preserved(self):
self._test_get_x(parser.get_dtext,
'foo ]', 'foo', 'foo', [], ' ]')
def test_get_dtext_close_bracket_mid_word(self):
self._test_get_x(parser.get_dtext,
'foo]bar', 'foo', 'foo', [], ']bar')
def test_get_dtext_up_to_open_bracket_only(self):
self._test_get_x(parser.get_dtext,
'foo[', 'foo', 'foo', [], '[')
def test_get_dtext_wsp_before_open_bracket_preserved(self):
self._test_get_x(parser.get_dtext,
'foo [', 'foo', 'foo', [], ' [')
def test_get_dtext_open_bracket_mid_word(self):
self._test_get_x(parser.get_dtext,
'foo[bar', 'foo', 'foo', [], '[bar')
# get_domain_literal
def test_get_domain_literal_only(self):
domain_literal = domain_literal = self._test_get_x(parser.get_domain_literal,
'[127.0.0.1]',
'[127.0.0.1]',
'[127.0.0.1]',
[],
'')
self.assertEqual(domain_literal.token_type, 'domain-literal')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_with_internal_ws(self):
domain_literal = self._test_get_x(parser.get_domain_literal,
'[ 127.0.0.1\t ]',
'[ 127.0.0.1\t ]',
'[ 127.0.0.1 ]',
[],
'')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_with_surrounding_cfws(self):
domain_literal = self._test_get_x(parser.get_domain_literal,
'(foo)[ 127.0.0.1] (bar)',
'(foo)[ 127.0.0.1] (bar)',
' [ 127.0.0.1] ',
[],
'')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_no_start_char_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) ')
def test_get_domain_literal_no_start_char_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) @')
def test_get_domain_literal_bad_dtext_char_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) [abc[@')
# get_domain
def test_get_domain_regular_domain_only(self):
domain = self._test_get_x(parser.get_domain,
'example.com',
'example.com',
'example.com',
[],
'')
self.assertEqual(domain.token_type, 'domain')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_only(self):
domain = self._test_get_x(parser.get_domain,
'[127.0.0.1]',
'[127.0.0.1]',
'[127.0.0.1]',
[],
'')
self.assertEqual(domain.token_type, 'domain')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_with_cfws(self):
domain = self._test_get_x(parser.get_domain,
'(foo) example.com(bar)\t',
'(foo) example.com(bar)\t',
' example.com ',
[],
'')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_with_cfws(self):
domain = self._test_get_x(parser.get_domain,
'(foo)[127.0.0.1]\t(bar)',
'(foo)[127.0.0.1]\t(bar)',
' [127.0.0.1] ',
[],
'')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_domain_with_cfws_ends_at_special(self):
domain = self._test_get_x(parser.get_domain,
'(foo)example.com\t(bar), next',
'(foo)example.com\t(bar)',
' example.com ',
[],
', next')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_with_cfws_ends_at_special(self):
domain = self._test_get_x(parser.get_domain,
'(foo)[127.0.0.1]\t(bar), next',
'(foo)[127.0.0.1]\t(bar)',
' [127.0.0.1] ',
[],
', next')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_obsolete(self):
domain = self._test_get_x(parser.get_domain,
'(foo) example . (bird)com(bar)\t',
'(foo) example . (bird)com(bar)\t',
' example . com ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_no_non_cfws_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain(" (foo)\t")
def test_get_domain_no_atom_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain(" (foo)\t, broken")
# get_addr_spec
def test_get_addr_spec_normal(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(addr_spec.token_type, 'addr-spec')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@example.com')
def test_get_addr_spec_with_doamin_literal(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'dinsdale@[127.0.0.1]',
'dinsdale@[127.0.0.1]',
'dinsdale@[127.0.0.1]',
[],
'')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, '[127.0.0.1]')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@[127.0.0.1]')
def test_get_addr_spec_with_cfws(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) dinsdale(bar)@ (bird) example.com (bog)',
'(foo) dinsdale(bar)@ (bird) example.com (bog)',
' dinsdale@example.com ',
[],
'')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@example.com')
def test_get_addr_spec_with_qouoted_string_and_cfws(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) "roy a bug"(bar)@ (bird) example.com (bog)',
'(foo) "roy a bug"(bar)@ (bird) example.com (bog)',
' "roy a bug"@example.com ',
[],
'')
self.assertEqual(addr_spec.local_part, 'roy a bug')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"roy a bug"@example.com')
def test_get_addr_spec_ends_at_special(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) "roy a bug"(bar)@ (bird) example.com (bog) , next',
'(foo) "roy a bug"(bar)@ (bird) example.com (bog) ',
' "roy a bug"@example.com ',
[],
', next')
self.assertEqual(addr_spec.local_part, 'roy a bug')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"roy a bug"@example.com')
def test_get_addr_spec_quoted_strings_in_atom_list(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(addr_spec.local_part, 'example example')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"example example"@example.com')
def test_get_addr_spec_dot_atom(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'star.a.star@example.com',
'star.a.star@example.com',
'star.a.star@example.com',
[],
'')
self.assertEqual(addr_spec.local_part, 'star.a.star')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'star.a.star@example.com')
# get_obs_route
def test_get_obs_route_simple(self):
obs_route = self._test_get_x(parser.get_obs_route,
'@example.com, @two.example.com:',
'@example.com, @two.example.com:',
'@example.com, @two.example.com:',
[],
'')
self.assertEqual(obs_route.token_type, 'obs-route')
self.assertEqual(obs_route.domains, ['example.com', 'two.example.com'])
def test_get_obs_route_complex(self):
obs_route = self._test_get_x(parser.get_obs_route,
'(foo),, (blue)@example.com (bar),@two.(foo) example.com (bird):',
'(foo),, (blue)@example.com (bar),@two.(foo) example.com (bird):',
' ,, @example.com ,@two. example.com :',
[errors.ObsoleteHeaderDefect], # This is the obs-domain
'')
self.assertEqual(obs_route.token_type, 'obs-route')
self.assertEqual(obs_route.domains, ['example.com', 'two.example.com'])
def test_get_obs_route_no_route_before_end_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) @example.com,')
def test_get_obs_route_no_route_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) [abc],')
def test_get_obs_route_no_route_before_special_raises2(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) @example.com [abc],')
# get_angle_addr
def test_get_angle_addr_simple(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_empty(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<>',
'<>',
'<>',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertIsNone(angle_addr.local_part)
self.assertIsNone(angle_addr.domain)
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '<>')
def test_get_angle_addr_qs_only_quotes(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<""@example.com>',
'<""@example.com>',
'<""@example.com>',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, '')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '""@example.com')
def test_get_angle_addr_with_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
' (foo) <dinsdale@example.com>(bar)',
' (foo) <dinsdale@example.com>(bar)',
' <dinsdale@example.com> ',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_qs_and_domain_literal(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<"Fred Perfect"@[127.0.0.1]>',
'<"Fred Perfect"@[127.0.0.1]>',
'<"Fred Perfect"@[127.0.0.1]>',
[],
'')
self.assertEqual(angle_addr.local_part, 'Fred Perfect')
self.assertEqual(angle_addr.domain, '[127.0.0.1]')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '"Fred Perfect"@[127.0.0.1]')
def test_get_angle_addr_internal_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<(foo) dinsdale@example.com(bar)>',
'<(foo) dinsdale@example.com(bar)>',
'< dinsdale@example.com >',
[],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_obs_route(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'(foo)<@example.com, (bird) @two.example.com: dinsdale@example.com> (bar) ',
'(foo)<@example.com, (bird) @two.example.com: dinsdale@example.com> (bar) ',
' <@example.com, @two.example.com: dinsdale@example.com> ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertEqual(angle_addr.route, ['example.com', 'two.example.com'])
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_missing_closing_angle(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_missing_closing_angle_with_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com (foo)',
'<dinsdale@example.com (foo)>',
'<dinsdale@example.com >',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_ends_at_special(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com> (foo), next',
'<dinsdale@example.com> (foo)',
'<dinsdale@example.com> ',
[],
', next')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_no_angle_raise(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) ')
def test_get_angle_addr_no_angle_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) , next')
def test_get_angle_addr_no_angle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('bar')
def test_get_angle_addr_special_after_angle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) <, bar')
# get_display_name This is phrase but with a different value.
def test_get_display_name_simple(self):
display_name = self._test_get_x(parser.get_display_name,
'Fred A Johnson',
'Fred A Johnson',
'Fred A Johnson',
[],
'')
self.assertEqual(display_name.token_type, 'display-name')
self.assertEqual(display_name.display_name, 'Fred A Johnson')
def test_get_display_name_complex1(self):
display_name = self._test_get_x(parser.get_display_name,
'"Fred A. Johnson" is his name, oh.',
'"Fred A. Johnson" is his name',
'"Fred A. Johnson is his name"',
[],
', oh.')
self.assertEqual(display_name.token_type, 'display-name')
self.assertEqual(display_name.display_name, 'Fred A. Johnson is his name')
def test_get_display_name_complex2(self):
display_name = self._test_get_x(parser.get_display_name,
' (A) bird (in (my|your)) "hand " is messy\t<>\t',
' (A) bird (in (my|your)) "hand " is messy\t',
' "bird hand is messy" ',
[],
'<>\t')
self.assertEqual(display_name[0][0].comments, ['A'])
self.assertEqual(display_name[0][2].comments, ['in (my|your)'])
self.assertEqual(display_name.display_name, 'bird hand is messy')
def test_get_display_name_obsolete(self):
display_name = self._test_get_x(parser.get_display_name,
'Fred A.(weird).O Johnson',
'Fred A.(weird).O Johnson',
'"Fred A. .O Johnson"',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(display_name), 7)
self.assertEqual(display_name[3].comments, ['weird'])
self.assertEqual(display_name.display_name, 'Fred A. .O Johnson')
def test_get_display_name_pharse_must_start_with_word(self):
display_name = self._test_get_x(parser.get_display_name,
'(even weirder).name',
'(even weirder).name',
' ".name"',
[errors.InvalidHeaderDefect] + [errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(display_name), 3)
self.assertEqual(display_name[0].comments, ['even weirder'])
self.assertEqual(display_name.display_name, '.name')
def test_get_display_name_ending_with_obsolete(self):
display_name = self._test_get_x(parser.get_display_name,
'simple phrase.(with trailing comment):boo',
'simple phrase.(with trailing comment)',
'"simple phrase." ',
[errors.ObsoleteHeaderDefect]*2,
':boo')
self.assertEqual(len(display_name), 4)
self.assertEqual(display_name[3].comments, ['with trailing comment'])
self.assertEqual(display_name.display_name, 'simple phrase.')
# get_name_addr
def test_get_name_addr_angle_addr_only(self):
name_addr = self._test_get_x(parser.get_name_addr,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.token_type, 'name-addr')
self.assertIsNone(name_addr.display_name)
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_atom_name(self):
name_addr = self._test_get_x(parser.get_name_addr,
'Dinsdale <dinsdale@example.com>',
'Dinsdale <dinsdale@example.com>',
'Dinsdale <dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.token_type, 'name-addr')
self.assertEqual(name_addr.display_name, 'Dinsdale')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_atom_name_with_cfws(self):
name_addr = self._test_get_x(parser.get_name_addr,
'(foo) Dinsdale (bar) <dinsdale@example.com> (bird)',
'(foo) Dinsdale (bar) <dinsdale@example.com> (bird)',
' Dinsdale <dinsdale@example.com> ',
[],
'')
self.assertEqual(name_addr.display_name, 'Dinsdale')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_name_with_cfws_and_dots(self):
name_addr = self._test_get_x(parser.get_name_addr,
'(foo) Roy.A.Bear (bar) <dinsdale@example.com> (bird)',
'(foo) Roy.A.Bear (bar) <dinsdale@example.com> (bird)',
' "Roy.A.Bear" <dinsdale@example.com> ',
[errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_qs_name(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_with_route(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertEqual(name_addr.route, ['two.example.com'])
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_ends_at_special(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <dinsdale@example.com>, next',
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
[],
', next')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_no_content_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr(' (foo) ')
def test_get_name_addr_no_content_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr(' (foo) ,')
def test_get_name_addr_no_angle_after_display_name_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr('foo bar')
# get_mailbox
def test_get_mailbox_addr_spec_only(self):
mailbox = self._test_get_x(parser.get_mailbox,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_angle_addr_only(self):
mailbox = self._test_get_x(parser.get_mailbox,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_name_addr(self):
mailbox = self._test_get_x(parser.get_mailbox,
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertEqual(mailbox.display_name, 'Roy A. Bear')
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_ends_at_special(self):
mailbox = self._test_get_x(parser.get_mailbox,
'"Roy A. Bear" <dinsdale@example.com>, rest',
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
[],
', rest')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertEqual(mailbox.display_name, 'Roy A. Bear')
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_quoted_strings_in_atom_list(self):
mailbox = self._test_get_x(parser.get_mailbox,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(mailbox.local_part, 'example example')
self.assertEqual(mailbox.domain, 'example.com')
self.assertEqual(mailbox.addr_spec, '"example example"@example.com')
# get_mailbox_list
def test_get_mailbox_list_single_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(mailbox_list.token_type, 'mailbox-list')
self.assertEqual(len(mailbox_list.mailboxes), 1)
mailbox = mailbox_list.mailboxes[0]
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_simple_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'dinsdale@example.com, dinsdale@test.example.com',
'dinsdale@example.com, dinsdale@test.example.com',
'dinsdale@example.com, dinsdale@test.example.com',
[],
'')
self.assertEqual(mailbox_list.token_type, 'mailbox-list')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_name_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[],
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_complex(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('(foo) "Roy A. Bear" <dinsdale@example.com>(bar),'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('(foo) "Roy A. Bear" <dinsdale@example.com>(bar),'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
(' "Roy A. Bear" <dinsdale@example.com> ,'
' "Fred Flintstone" <dinsdale@test. example.com>'),
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_unparseable_mailbox_null(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test. example.com>'),
[errors.InvalidHeaderDefect, # the 'extra' text after the local part
errors.InvalidHeaderDefect, # the local part with no angle-addr
errors.ObsoleteHeaderDefect, # period in extra text (example.com)
errors.ObsoleteHeaderDefect], # (bird) in valid address.
'')
self.assertEqual(len(mailbox_list.mailboxes), 1)
self.assertEqual(len(mailbox_list.all_mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes[0].token_type,
'invalid-mailbox')
self.assertIsNone(mailbox_list.all_mailboxes[0].display_name)
self.assertEqual(mailbox_list.all_mailboxes[0].local_part,
'Roy A. Bear')
self.assertIsNone(mailbox_list.all_mailboxes[0].domain)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'"Roy A. Bear"')
self.assertIs(mailbox_list.all_mailboxes[1],
mailbox_list.mailboxes[0])
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Fred Flintstone')
def test_get_mailbox_list_junk_after_valid_address(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[errors.InvalidHeaderDefect],
'')
self.assertEqual(len(mailbox_list.mailboxes), 1)
self.assertEqual(len(mailbox_list.all_mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.all_mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.all_mailboxes[0].token_type,
'invalid-mailbox')
self.assertIs(mailbox_list.all_mailboxes[1],
mailbox_list.mailboxes[0])
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Fred Flintstone')
def test_get_mailbox_list_empty_list_element(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>, (bird),,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>, (bird),,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>, ,,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes,
mailbox_list.mailboxes)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.all_mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
def test_get_mailbox_list_only_empty_elements(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'(foo),, (bar)',
'(foo),, (bar)',
' ,, ',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(mailbox_list.mailboxes), 0)
self.assertEqual(mailbox_list.all_mailboxes,
mailbox_list.mailboxes)
# get_group_list
def test_get_group_list_cfws_only(self):
group_list = self._test_get_x(parser.get_group_list,
'(hidden);',
'(hidden)',
' ',
[],
';')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
def test_get_group_list_mailbox_list(self):
group_list = self._test_get_x(parser.get_group_list,
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
[],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 2)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
self.assertEqual(group_list.mailboxes[1].display_name,
'Fred A. Bear')
def test_get_group_list_obs_group_list(self):
group_list = self._test_get_x(parser.get_group_list,
', (foo),,(bar)',
', (foo),,(bar)',
', ,, ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
def test_get_group_list_comment_only_invalid(self):
group_list = self._test_get_x(parser.get_group_list,
'(bar)',
'(bar)',
' ',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
# get_group
def test_get_group_empty(self):
group = self._test_get_x(parser.get_group,
'Monty Python:;',
'Monty Python:;',
'Monty Python:;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
def test_get_group_null_addr_spec(self):
group = self._test_get_x(parser.get_group,
'foo: <>;',
'foo: <>;',
'foo: <>;',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group.display_name, 'foo')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(len(group.all_mailboxes), 1)
self.assertEqual(group.all_mailboxes[0].value, '<>')
def test_get_group_cfws_only(self):
group = self._test_get_x(parser.get_group,
'Monty Python: (hidden);',
'Monty Python: (hidden);',
'Monty Python: ;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
def test_get_group_single_mailbox(self):
group = self._test_get_x(parser.get_group,
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 1)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].addr_spec,
'dinsdale@example.com')
def test_get_group_mixed_list(self):
group = self._test_get_x(parser.get_group,
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger <ping@exampele.com>, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger <ping@exampele.com>, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
' Roger <ping@exampele.com>, x@test.example.com;'),
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 3)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].display_name,
'Roger')
self.assertEqual(group.mailboxes[2].local_part, 'x')
def test_get_group_one_invalid(self):
group = self._test_get_x(parser.get_group,
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger ping@exampele.com, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger ping@exampele.com, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
' Roger ping@exampele.com, x@test.example.com;'),
[errors.InvalidHeaderDefect, # non-angle addr makes local part invalid
errors.InvalidHeaderDefect], # and its not obs-local either: no dots.
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 2)
self.assertEqual(len(group.all_mailboxes), 3)
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].local_part, 'x')
self.assertIsNone(group.all_mailboxes[1].display_name)
def test_get_group_missing_final_semicol(self):
group = self._test_get_x(parser.get_group,
('Monty Python:"Fred A. Bear" <dinsdale@example.com>,'
'eric@where.test,John <jdoe@test>'),
('Monty Python:"Fred A. Bear" <dinsdale@example.com>,'
'eric@where.test,John <jdoe@test>;'),
('Monty Python:"Fred A. Bear" <dinsdale@example.com>,'
'eric@where.test,John <jdoe@test>;'),
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 3)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].addr_spec,
'eric@where.test')
self.assertEqual(group.mailboxes[2].display_name,
'John')
self.assertEqual(group.mailboxes[2].addr_spec,
'jdoe@test')
# get_address
def test_get_address_simple(self):
address = self._test_get_x(parser.get_address,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_complex(self):
address = self._test_get_x(parser.get_address,
'(foo) "Fred A. Bear" <(bird)dinsdale@example.com>',
'(foo) "Fred A. Bear" <(bird)dinsdale@example.com>',
' "Fred A. Bear" < dinsdale@example.com>',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_rfc2047_display_name(self):
address = self._test_get_x(parser.get_address,
'=?utf-8?q?=C3=89ric?= <foo@example.com>',
'Éric <foo@example.com>',
'Éric <foo@example.com>',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].display_name,
'Éric')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_empty_group(self):
address = self._test_get_x(parser.get_address,
'Monty Python:;',
'Monty Python:;',
'Monty Python:;',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 0)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address[0].token_type,
'group')
self.assertEqual(address[0].display_name,
'Monty Python')
def test_get_address_group(self):
address = self._test_get_x(parser.get_address,
'Monty Python: x@example.com, y@example.com;',
'Monty Python: x@example.com, y@example.com;',
'Monty Python: x@example.com, y@example.com;',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 2)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address[0].token_type,
'group')
self.assertEqual(address[0].display_name,
'Monty Python')
self.assertEqual(address.mailboxes[0].local_part, 'x')
def test_get_address_quoted_local_part(self):
address = self._test_get_x(parser.get_address,
'"foo bar"@example.com',
'"foo bar"@example.com',
'"foo bar"@example.com',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address.mailboxes[0].local_part,
'foo bar')
self.assertEqual(address[0].token_type, 'mailbox')
def test_get_address_ends_at_special(self):
address = self._test_get_x(parser.get_address,
'dinsdale@example.com, next',
'dinsdale@example.com',
'dinsdale@example.com',
[],
', next')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address[0].token_type, 'mailbox')
def test_get_address_invalid_mailbox_invalid(self):
address = self._test_get_x(parser.get_address,
'ping example.com, next',
'ping example.com',
'ping example.com',
[errors.InvalidHeaderDefect, # addr-spec with no domain
errors.InvalidHeaderDefect, # invalid local-part
errors.InvalidHeaderDefect, # missing .s in local-part
],
', next')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 0)
self.assertEqual(len(address.all_mailboxes), 1)
self.assertIsNone(address.all_mailboxes[0].domain)
self.assertEqual(address.all_mailboxes[0].local_part, 'ping example.com')
self.assertEqual(address[0].token_type, 'invalid-mailbox')
def test_get_address_quoted_strings_in_atom_list(self):
address = self._test_get_x(parser.get_address,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(address.all_mailboxes[0].local_part, 'example example')
self.assertEqual(address.all_mailboxes[0].domain, 'example.com')
self.assertEqual(address.all_mailboxes[0].addr_spec, '"example example"@example.com')
# get_address_list
def test_get_address_list_mailboxes_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list[0].token_type, 'address')
self.assertIsNone(address_list[0].display_name)
def test_get_address_list_mailboxes_two_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'foo@example.com, "Fred A. Bar" <bar@example.com>',
'foo@example.com, "Fred A. Bar" <bar@example.com>',
'foo@example.com, "Fred A. Bar" <bar@example.com>',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 2)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].local_part, 'foo')
self.assertEqual(address_list.mailboxes[1].display_name, "Fred A. Bar")
def test_get_address_list_mailboxes_complex(self):
address_list = self._test_get_x(parser.get_address_list,
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo <x@example.com>,'
'Nobody Is. Special <y@(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo <x@example.com>,'
'Nobody Is. Special <y@(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'Foo <x@example.com>,'
'"Nobody Is. Special" <y@example. com>'),
[errors.ObsoleteHeaderDefect, # period in Is.
errors.ObsoleteHeaderDefect], # cfws in domain
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 3)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.mailboxes[0].token_type, 'mailbox')
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.mailboxes[1].local_part, 'x')
self.assertEqual(address_list.mailboxes[2].display_name,
'Nobody Is. Special')
def test_get_address_list_mailboxes_invalid_addresses(self):
address_list = self._test_get_x(parser.get_address_list,
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo x@example.com[],'
'Nobody Is. Special <(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo x@example.com[],'
'Nobody Is. Special <(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'Foo x@example.com[],'
'"Nobody Is. Special" < example. com>'),
[errors.InvalidHeaderDefect, # invalid address in list
errors.InvalidHeaderDefect, # 'Foo x' local part invalid.
errors.InvalidHeaderDefect, # Missing . in 'Foo x' local part
errors.ObsoleteHeaderDefect, # period in 'Is.' disp-name phrase
errors.InvalidHeaderDefect, # no domain part in addr-spec
errors.ObsoleteHeaderDefect], # addr-spec has comment in it
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(len(address_list.all_mailboxes), 3)
self.assertEqual([str(x) for x in address_list.all_mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.mailboxes[0].token_type, 'mailbox')
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.addresses[1].token_type, 'address')
self.assertEqual(len(address_list.addresses[0].mailboxes), 1)
self.assertEqual(len(address_list.addresses[1].mailboxes), 0)
self.assertEqual(len(address_list.addresses[1].mailboxes), 0)
self.assertEqual(
address_list.addresses[1].all_mailboxes[0].local_part, 'Foo x')
self.assertEqual(
address_list.addresses[2].all_mailboxes[0].display_name,
"Nobody Is. Special")
def test_get_address_list_group_empty(self):
address_list = self._test_get_x(parser.get_address_list,
'Monty Python: ;',
'Monty Python: ;',
'Monty Python: ;',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 0)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(len(address_list.addresses), 1)
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.addresses[0].display_name, 'Monty Python')
self.assertEqual(len(address_list.addresses[0].mailboxes), 0)
def test_get_address_list_group_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'Monty Python: dinsdale@example.com;',
'Monty Python: dinsdale@example.com;',
'Monty Python: dinsdale@example.com;',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.addresses[0].display_name,
'Monty Python')
self.assertEqual(address_list.addresses[0].mailboxes[0].domain,
'example.com')
def test_get_address_list_group_and_mailboxes(self):
address_list = self._test_get_x(parser.get_address_list,
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 4)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(len(address_list.addresses), 3)
self.assertEqual(address_list.mailboxes[0].local_part, 'dinsdale')
self.assertEqual(address_list.addresses[0].display_name,
'Monty Python')
self.assertEqual(address_list.addresses[0].mailboxes[0].domain,
'example.com')
self.assertEqual(address_list.addresses[0].mailboxes[1].local_part,
'flint')
self.assertEqual(address_list.addresses[1].mailboxes[0].local_part,
'x')
self.assertEqual(address_list.addresses[2].mailboxes[0].local_part,
'y')
self.assertEqual(str(address_list.addresses[1]),
str(address_list.mailboxes[2]))
def test_invalid_content_disposition(self):
content_disp = self._test_parse_x(
parser.parse_content_disposition_header,
";attachment", "; attachment", ";attachment",
[errors.InvalidHeaderDefect]*2
)
def test_invalid_content_transfer_encoding(self):
cte = self._test_parse_x(
parser.parse_content_transfer_encoding_header,
";foo", ";foo", ";foo", [errors.InvalidHeaderDefect]*3
)
@parameterize
class Test_parse_mime_parameters(TestParserMixin, TestEmailBase):
def mime_parameters_as_value(self,
value,
tl_str,
tl_value,
params,
defects):
mime_parameters = self._test_parse_x(parser.parse_mime_parameters,
value, tl_str, tl_value, defects)
self.assertEqual(mime_parameters.token_type, 'mime-parameters')
self.assertEqual(list(mime_parameters.params), params)
mime_parameters_params = {
'simple': (
'filename="abc.py"',
' filename="abc.py"',
'filename=abc.py',
[('filename', 'abc.py')],
[]),
'multiple_keys': (
'filename="abc.py"; xyz=abc',
' filename="abc.py"; xyz="abc"',
'filename=abc.py; xyz=abc',
[('filename', 'abc.py'), ('xyz', 'abc')],
[]),
'split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66",
' filename="201.tif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66",
[('filename', '201.tif')],
[]),
# Note that it is undefined what we should do for error recovery when
# there are duplicate parameter names or duplicate parts in a split
# part. We choose to ignore all duplicate parameters after the first
# and to take duplicate or missing rfc 2231 parts in appearance order.
# This is backward compatible with get_param's behavior, but the
# decisions are arbitrary.
'duplicate_key': (
'filename=abc.gif; filename=def.tiff',
' filename="abc.gif"',
"filename=abc.gif; filename=def.tiff",
[('filename', 'abc.gif')],
[errors.InvalidHeaderDefect]),
'duplicate_key_with_split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66;"
" filename=abc.gif",
' filename="201.tif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66;"
" filename=abc.gif",
[('filename', '201.tif')],
[errors.InvalidHeaderDefect]),
'duplicate_key_with_split_value_other_order': (
"filename=abc.gif; "
" filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66",
' filename="abc.gif"',
"filename=abc.gif;"
" filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66",
[('filename', 'abc.gif')],
[errors.InvalidHeaderDefect]),
'duplicate_in_split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66;"
" filename*1*=abc.gif",
' filename="201.tifabc.gif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66;"
" filename*1*=abc.gif",
[('filename', '201.tifabc.gif')],
[errors.InvalidHeaderDefect]),
'missing_split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66;",
' filename="201.tif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66;",
[('filename', '201.tif')],
[errors.InvalidHeaderDefect]),
'duplicate_and_missing_split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66;"
" filename*3*=abc.gif",
' filename="201.tifabc.gif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66;"
" filename*3*=abc.gif",
[('filename', '201.tifabc.gif')],
[errors.InvalidHeaderDefect]*2),
# Here we depart from get_param and assume the *0* was missing.
'duplicate_with_broken_split_value': (
"filename=abc.gif; "
" filename*2*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66",
' filename="abc.gif201.tif"',
"filename=abc.gif;"
" filename*2*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66",
[('filename', 'abc.gif201.tif')],
# Defects are apparent missing *0*, and two 'out of sequence'.
[errors.InvalidHeaderDefect]*3),
}
@parameterize
class Test_parse_mime_version(TestParserMixin, TestEmailBase):
def mime_version_as_value(self,
value,
tl_str,
tl_value,
major,
minor,
defects):
mime_version = self._test_parse_x(parser.parse_mime_version,
value, tl_str, tl_value, defects)
self.assertEqual(mime_version.major, major)
self.assertEqual(mime_version.minor, minor)
mime_version_params = {
'rfc_2045_1': (
'1.0',
'1.0',
'1.0',
1,
0,
[]),
'RFC_2045_2': (
'1.0 (produced by MetaSend Vx.x)',
'1.0 (produced by MetaSend Vx.x)',
'1.0 ',
1,
0,
[]),
'RFC_2045_3': (
'(produced by MetaSend Vx.x) 1.0',
'(produced by MetaSend Vx.x) 1.0',
' 1.0',
1,
0,
[]),
'RFC_2045_4': (
'1.(produced by MetaSend Vx.x)0',
'1.(produced by MetaSend Vx.x)0',
'1. 0',
1,
0,
[]),
'empty': (
'',
'',
'',
None,
None,
[errors.HeaderMissingRequiredValue]),
}
class TestFolding(TestEmailBase):
policy = policy.default
def _test(self, tl, folded, policy=policy):
self.assertEqual(tl.fold(policy=policy), folded, tl.ppstr())
def test_simple_unstructured_no_folds(self):
self._test(parser.get_unstructured("This is a test"),
"This is a test\n")
def test_simple_unstructured_folded(self):
self._test(parser.get_unstructured("This is also a test, but this "
"time there are enough words (and even some "
"symbols) to make it wrap; at least in theory."),
"This is also a test, but this time there are enough "
"words (and even some\n"
" symbols) to make it wrap; at least in theory.\n")
def test_unstructured_with_unicode_no_folds(self):
self._test(parser.get_unstructured("hübsch kleiner beißt"),
"=?utf-8?q?h=C3=BCbsch_kleiner_bei=C3=9Ft?=\n")
def test_one_ew_on_each_of_two_wrapped_lines(self):
self._test(parser.get_unstructured("Mein kleiner Kaktus ist sehr "
"hübsch. Es hat viele Stacheln "
"und oft beißt mich."),
"Mein kleiner Kaktus ist sehr =?utf-8?q?h=C3=BCbsch=2E?= "
"Es hat viele Stacheln\n"
" und oft =?utf-8?q?bei=C3=9Ft?= mich.\n")
def test_ews_combined_before_wrap(self):
self._test(parser.get_unstructured("Mein Kaktus ist hübsch. "
"Es beißt mich. "
"And that's all I'm sayin."),
"Mein Kaktus ist =?utf-8?q?h=C3=BCbsch=2E__Es_bei=C3=9Ft?= "
"mich. And that's\n"
" all I'm sayin.\n")
# XXX Need test of an encoded word so long that it needs to be wrapped
def test_simple_address(self):
self._test(parser.get_address_list("abc <xyz@example.com>")[0],
"abc <xyz@example.com>\n")
def test_address_list_folding_at_commas(self):
self._test(parser.get_address_list('abc <xyz@example.com>, '
'"Fred Blunt" <sharp@example.com>, '
'"J.P.Cool" <hot@example.com>, '
'"K<>y" <key@example.com>, '
'Firesale <cheap@example.com>, '
'<end@example.com>')[0],
'abc <xyz@example.com>, "Fred Blunt" <sharp@example.com>,\n'
' "J.P.Cool" <hot@example.com>, "K<>y" <key@example.com>,\n'
' Firesale <cheap@example.com>, <end@example.com>\n')
def test_address_list_with_unicode_names(self):
self._test(parser.get_address_list(
'Hübsch Kaktus <beautiful@example.com>, '
'beißt beißt <biter@example.com>')[0],
'=?utf-8?q?H=C3=BCbsch?= Kaktus <beautiful@example.com>,\n'
' =?utf-8?q?bei=C3=9Ft_bei=C3=9Ft?= <biter@example.com>\n')
def test_address_list_with_unicode_names_in_quotes(self):
self._test(parser.get_address_list(
'"Hübsch Kaktus" <beautiful@example.com>, '
'"beißt" beißt <biter@example.com>')[0],
'=?utf-8?q?H=C3=BCbsch?= Kaktus <beautiful@example.com>,\n'
' =?utf-8?q?bei=C3=9Ft_bei=C3=9Ft?= <biter@example.com>\n')
# XXX Need tests with comments on various sides of a unicode token,
# and with unicode tokens in the comments. Spaces inside the quotes
# currently don't do the right thing.
def test_split_at_whitespace_after_header_before_long_token(self):
body = parser.get_unstructured(' ' + 'x'*77)
header = parser.Header([
parser.HeaderLabel([parser.ValueTerminal('test:', 'atext')]),
parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]), body])
self._test(header, 'test: \n ' + 'x'*77 + '\n')
def test_split_at_whitespace_before_long_token(self):
self._test(parser.get_unstructured('xxx ' + 'y'*77),
'xxx \n ' + 'y'*77 + '\n')
def test_overlong_encodeable_is_wrapped(self):
first_token_with_whitespace = 'xxx '
chrome_leader = '=?utf-8?q?'
len_chrome = len(chrome_leader) + 2
len_non_y = len_chrome + len(first_token_with_whitespace)
self._test(parser.get_unstructured(first_token_with_whitespace +
'y'*80),
first_token_with_whitespace + chrome_leader +
'y'*(78-len_non_y) + '?=\n' +
' ' + chrome_leader + 'y'*(80-(78-len_non_y)) + '?=\n')
def test_long_filename_attachment(self):
self._test(parser.parse_content_disposition_header(
'attachment; filename="TEST_TEST_TEST_TEST'
'_TEST_TEST_TEST_TEST_TEST_TEST_TEST_TEST_TES.txt"'),
"attachment;\n"
" filename*0*=us-ascii''TEST_TEST_TEST_TEST_TEST_TEST"
"_TEST_TEST_TEST_TEST_TEST;\n"
" filename*1*=_TEST_TES.txt\n",
)
if __name__ == '__main__':
unittest.main()
| 43.686126
| 94
| 0.555733
| 13,569
| 121,229
| 4.676837
| 0.038986
| 0.058131
| 0.043807
| 0.046896
| 0.860542
| 0.816751
| 0.76809
| 0.730161
| 0.691333
| 0.652364
| 0
| 0.013082
| 0.312722
| 121,229
| 2,774
| 95
| 43.701875
| 0.748578
| 0.01658
| 0
| 0.627342
| 0
| 0.012777
| 0.199902
| 0.047853
| 0
| 0
| 0
| 0
| 0.23339
| 1
| 0.129898
| false
| 0
| 0.002555
| 0
| 0.138842
| 0.011073
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8a41c523cf012e7f702148c0c18fb11cc4ebacee
| 13,485
|
py
|
Python
|
theano/gpuarray/tests/test_multinomial.py
|
JimmyRetza/Theano
|
72d83bce0d547d54ab3513bcba35c166979f7a6f
|
[
"BSD-3-Clause"
] | 9
|
2018-10-29T20:25:25.000Z
|
2021-11-17T11:03:17.000Z
|
theano/gpuarray/tests/test_multinomial.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
theano/gpuarray/tests/test_multinomial.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2020-01-06T20:28:42.000Z
|
2020-01-06T20:28:42.000Z
|
from __future__ import absolute_import, print_function, division
import os
import numpy as np
import unittest
import theano
from theano import config, function, tensor
from theano.compat import PY3
from theano.misc.pkl_utils import CompatUnpickler
from theano.sandbox import multinomial
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import theano.tests.unittest_tools as utt
from .config import mode_with_gpu
from ..multinomial import (GPUAMultinomialFromUniform,
GPUAChoiceFromUniform)
def test_multinomial_output_dtype():
# This tests the MultinomialFromUniform Op directly, not going through the
# multinomial() call in GPU random generation.
p = tensor.fmatrix()
u = tensor.fvector()
for dtype in ['int64', 'float32', 'float16', 'float64', 'int32', 'auto']:
m = theano.sandbox.multinomial.MultinomialFromUniform(dtype)(p, u)
# the m*2 allows the multinomial to reuse output
f = function([p, u], m * 2, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
# test that both first and second samples can be drawn
utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]),
[[2, 0], [0, 2]])
# test that both second labels can be drawn
r = f([[.2, .8], [.3, .7]], [.31, .31])
utt.assert_allclose(r, [[0, 2], [0, 2]])
# test that both first labels can be drawn
r = f([[.2, .8], [.3, .7]], [.21, .21])
utt.assert_allclose(r, [[0, 2], [2, 0]])
# change the size to make sure output gets reallocated ok
# and also make sure that the GPU version doesn't screw up the
# transposed-ness
r = f([[.2, .8]], [.25])
utt.assert_allclose(r, [[0, 2]])
def test_multinomial_input_dtype():
# This tests the MultinomialFromUniform Op directly, not going through the
# multinomial() call in GPU random generation.
for idtype in ['float32', 'float16', 'float64']:
for odtype in ['float32', 'float16', 'float64', 'int32']:
p = tensor.matrix('p', idtype)
u = tensor.vector('u', idtype)
# p = tensor.dmatrix('p')
# u = tensor.dvector('u')
m = theano.sandbox.multinomial.MultinomialFromUniform(odtype)(p, u)
# the m*2 allows the multinomial to reuse output
f = function([p, u], m * 2, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
# test that both first and second samples can be drawn
utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]),
[[2, 0], [0, 2]])
# test that both second labels can be drawn
r = f([[.2, .8], [.3, .7]], [.31, .31])
utt.assert_allclose(r, [[0, 2], [0, 2]])
# test that both first labels can be drawn
r = f([[.2, .8], [.3, .7]], [.21, .21])
utt.assert_allclose(r, [[0, 2], [2, 0]])
# change the size to make sure output gets reallocated ok
# and also make sure that the GPU version doesn't screw up the
# transposed-ness
r = f([[.2, .8]], [.25])
utt.assert_allclose(r, [[0, 2]])
# TODO: check a bigger example (make sure blocking on GPU is handled correctly)
def test_multinomial_large():
# DEBUG_MODE will test this on GPU
p = tensor.fmatrix()
u = tensor.fvector()
m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(p, u)
f = function([p, u], m * 2, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(10000 * 4,
dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones_like(pval[:, 0]) * 0.5
mval = f(pval, uval)
assert mval.shape == pval.shape
if config.cast_policy == 'custom':
assert mval.dtype == pval.dtype
elif config.cast_policy == 'numpy+floatX':
assert mval.dtype == config.floatX
elif config.cast_policy == 'numpy':
assert mval.dtype == 'float64'
else:
raise NotImplementedError(config.cast_policy)
utt.assert_allclose(mval.sum(axis=1), 2)
asdf = np.asarray([0, 0, 2, 0]) + 0 * pval
utt.assert_allclose(mval, asdf) # broadcast over all rows
def test_gpu_opt_dtypes():
# Test if the returned samples are of the datatype specified
for dtype in ['uint32', 'float32', 'int64', 'float64']:
p = tensor.fmatrix()
u = tensor.fvector()
m = theano.sandbox.multinomial.MultinomialFromUniform(dtype)(p, u)
f = function([p, u], m, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones_like(pval[:, 0]) * 0.5
samples = f(pval, uval)
assert samples.dtype == dtype, "%s != %s" % (samples.dtype, dtype)
def test_gpu_opt():
# Does have some overlap with test_multinomial_0
# We test the case where we put the op on the gpu when the output
# is moved to the gpu.
p = tensor.fmatrix()
u = tensor.fvector()
m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(p, u)
assert m.dtype == 'float32', m.dtype
f = function([p, u], m, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones_like(pval[:, 0]) * 0.5
f(pval, uval)
# Test with a row, it was failing in the past.
r = tensor.frow()
m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(r, u)
assert m.dtype == 'float32', m.dtype
f = function([r, u], m, allow_input_downcast=True, mode=mode_with_gpu)
assert any([type(node.op) is GPUAMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones_like(pval[:, 0]) * 0.5
f(pval, uval)
class test_OP_wor(unittest.TestCase):
def test_select_distinct(self):
# Tests that ChoiceFromUniform always selects distinct elements
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.ChoiceFromUniform(odtype='auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
n_elements = 1000
all_indices = range(n_elements)
np.random.seed(12345)
for i in [5, 10, 50, 100, 500, n_elements]:
uni = np.random.rand(i).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, uni, i)
res = np.squeeze(res)
assert len(res) == i, res
assert np.all(np.in1d(np.unique(res), all_indices)), res
def test_fail_select_alot(self):
# Tests that ChoiceFromUniform fails when asked to sample more
# elements than the actual number of elements
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.ChoiceFromUniform(odtype='auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 200
np.random.seed(12345)
uni = np.random.rand(n_selected).astype(config.floatX)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, uni, n_selected)
def test_select_proportional_to_weight(self):
# Tests that ChoiceFromUniform selects elements, on average,
# proportional to the their probabilities
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
m = multinomial.ChoiceFromUniform(odtype='auto')(p, u, n)
f = function([p, u, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 10
mean_rtol = 0.0005
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = np.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
uni = np.random.rand(n_selected).astype(config.floatX)
res = f(pvals, uni, n_selected)
res = np.squeeze(res)
avg_pvals[res] += 1
avg_pvals /= avg_pvals.sum()
avg_diff = np.mean(abs(avg_pvals - pvals))
assert avg_diff < mean_rtol, avg_diff
class test_function_wor(unittest.TestCase):
def test_select_distinct(self):
# Tests that multinomial_wo_replacement always selects distinct elements
th_rng = RandomStreams(12345)
p = tensor.fmatrix()
n = tensor.iscalar()
m = th_rng.multinomial_wo_replacement(pvals=p, n=n)
f = function([p, n], m, allow_input_downcast=True)
n_elements = 1000
all_indices = range(n_elements)
np.random.seed(12345)
for i in [5, 10, 50, 100, 500, n_elements]:
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
res = f(pvals, i)
res = np.squeeze(res)
assert len(res) == i
assert np.all(np.in1d(np.unique(res), all_indices)), res
def test_fail_select_alot(self):
# Tests that multinomial_wo_replacement fails when asked to sample more
# elements than the actual number of elements
th_rng = RandomStreams(12345)
p = tensor.fmatrix()
n = tensor.iscalar()
m = th_rng.multinomial_wo_replacement(pvals=p, n=n)
f = function([p, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 200
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
self.assertRaises(ValueError, f, pvals, n_selected)
def test_select_proportional_to_weight(self):
# Tests that multinomial_wo_replacement selects elements, on average,
# proportional to the their probabilities
th_rng = RandomStreams(12345)
p = tensor.fmatrix()
n = tensor.iscalar()
m = th_rng.multinomial_wo_replacement(pvals=p, n=n)
f = function([p, n], m, allow_input_downcast=True)
n_elements = 100
n_selected = 10
mean_rtol = 0.0005
np.random.seed(12345)
pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
pvals /= pvals.sum(1)
avg_pvals = np.zeros((n_elements,), dtype=config.floatX)
for rep in range(10000):
res = f(pvals, n_selected)
res = np.squeeze(res)
avg_pvals[res] += 1
avg_pvals /= avg_pvals.sum()
avg_diff = np.mean(abs(avg_pvals - pvals))
assert avg_diff < mean_rtol
def test_gpu_opt_wor():
# We test the case where we put the op on the gpu when the output
# is moved to the gpu.
p = tensor.fmatrix()
u = tensor.fvector()
n = tensor.iscalar()
for replace in [False, True]:
m = multinomial.ChoiceFromUniform(odtype='auto',
replace=replace)(p, u, n)
assert m.dtype == 'int64', m.dtype
f = function([p, u, n], m, allow_input_downcast=True,
mode=mode_with_gpu)
assert any([type(node.op) is GPUAChoiceFromUniform
for node in f.maker.fgraph.toposort()])
n_samples = 3
pval = np.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones(pval.shape[0] * n_samples) * 0.5
f(pval, uval, n_samples)
# Test with a row, it was failing in the past.
r = tensor.frow()
m = multinomial.ChoiceFromUniform('auto', replace=replace)(r, u, n)
assert m.dtype == 'int64', m.dtype
f = function([r, u, n], m, allow_input_downcast=True,
mode=mode_with_gpu)
assert any([type(node.op) is GPUAChoiceFromUniform
for node in f.maker.fgraph.toposort()])
pval = np.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = np.ones_like(pval[:, 0]) * 0.5
f(pval, uval, 1)
def test_unpickle_legacy_op():
testfile_dir = os.path.dirname(os.path.realpath(__file__))
fname = 'test_gpuarray_multinomial_wo_replacement.pkl'
if not PY3:
with open(os.path.join(testfile_dir, fname), 'r') as fp:
u = CompatUnpickler(fp)
m = u.load()
assert isinstance(m, GPUAChoiceFromUniform)
| 37.14876
| 86
| 0.601557
| 1,837
| 13,485
| 4.306478
| 0.143713
| 0.004803
| 0.031854
| 0.038933
| 0.774618
| 0.754519
| 0.735305
| 0.735305
| 0.727847
| 0.687903
| 0
| 0.040766
| 0.272377
| 13,485
| 362
| 87
| 37.251381
| 0.765491
| 0.146459
| 0
| 0.683128
| 0
| 0
| 0.025288
| 0.003837
| 0
| 0
| 0
| 0.002762
| 0.148148
| 1
| 0.053498
| false
| 0
| 0.053498
| 0
| 0.115226
| 0.004115
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8a435353d0590ddc341920171468033fccbe60e5
| 137
|
py
|
Python
|
.history/URI/URI/intermediario/2936_quanta_mandioca_20210714121821.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | 1
|
2021-11-28T18:49:21.000Z
|
2021-11-28T18:49:21.000Z
|
URI/URI/intermediario/2936_quanta_mandioca.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | null | null | null |
URI/URI/intermediario/2936_quanta_mandioca.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | null | null | null |
n=int(input())*300
n1=int(input())*1500
n2=int(input())*600
n3=int(input())*1000
n4=int(input())*150
resp=(n+n1+n2+n3+n4)+225
print(resp)
| 19.571429
| 24
| 0.664234
| 29
| 137
| 3.137931
| 0.517241
| 0.43956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21374
| 0.043796
| 137
| 7
| 25
| 19.571429
| 0.480916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8a832e7d1dcd6330560f6ca15f344d845f14becc
| 23
|
py
|
Python
|
__init__.py
|
Plasmabot1/KiBuzzard
|
55a191cab66e9ab8ac5f3c0354d9d56eb75d1eca
|
[
"MIT"
] | 22
|
2021-01-17T19:04:31.000Z
|
2021-04-10T01:52:57.000Z
|
__init__.py
|
arturo182/KiBuzzard
|
a963b413a0c89f7d7eb88bc15fc15186a19f5dfc
|
[
"MIT"
] | 13
|
2021-01-17T17:45:49.000Z
|
2021-04-25T05:54:48.000Z
|
__init__.py
|
arturo182/KiBuzzard
|
a963b413a0c89f7d7eb88bc15fc15186a19f5dfc
|
[
"MIT"
] | 6
|
2021-02-01T19:15:05.000Z
|
2021-03-18T15:19:49.000Z
|
from . import KiBuzzard
| 23
| 23
| 0.826087
| 3
| 23
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8aa161f10c8973e8425c9f5ca6759a20cf174a67
| 81
|
py
|
Python
|
turbustat/statistics/dendrograms/__init__.py
|
CFD-UTSA/Turbulence-stars
|
354d02e38d15e3b0d1f751b43f430dbd3a14c250
|
[
"MIT"
] | 42
|
2016-04-07T20:49:59.000Z
|
2022-03-28T12:54:13.000Z
|
turbustat/statistics/dendrograms/__init__.py
|
CFD-UTSA/Turbulence-stars
|
354d02e38d15e3b0d1f751b43f430dbd3a14c250
|
[
"MIT"
] | 131
|
2015-03-05T21:42:27.000Z
|
2021-07-22T14:59:04.000Z
|
turbustat/statistics/dendrograms/__init__.py
|
CFD-UTSA/Turbulence-stars
|
354d02e38d15e3b0d1f751b43f430dbd3a14c250
|
[
"MIT"
] | 21
|
2015-06-10T17:10:06.000Z
|
2022-02-28T15:59:42.000Z
|
from .dendro_stats import Dendrogram_Stats, Dendrogram_Distance, DendroDistance
| 27
| 79
| 0.876543
| 9
| 81
| 7.555556
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08642
| 81
| 2
| 80
| 40.5
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8abb58dd67b19efedc646f390c315f80681e407b
| 52
|
py
|
Python
|
problem/10000~19999/17496/17496.pypy3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-19T16:37:44.000Z
|
2019-04-19T16:37:44.000Z
|
problem/10000~19999/17496/17496.pypy3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-20T11:42:44.000Z
|
2019-04-20T11:42:44.000Z
|
problem/10000~19999/17496/17496.pypy3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 3
|
2019-04-19T16:37:47.000Z
|
2021-10-25T00:45:00.000Z
|
n,t,c,p=map(int,input().split());print((n-1)//t*c*p)
| 52
| 52
| 0.576923
| 14
| 52
| 2.142857
| 0.714286
| 0.133333
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0
| 52
| 1
| 52
| 52
| 0.557692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
8abf5364582a09ccd7cf1e9748f4403faf2ef9cc
| 9,976
|
py
|
Python
|
bamboo/tests/controllers/test_datasets_edit.py
|
pld/bamboo
|
a0fc77aebd6ff6b1087ba46896b0ce705fbb25a3
|
[
"BSD-3-Clause"
] | 27
|
2015-01-14T15:57:54.000Z
|
2020-12-27T19:34:41.000Z
|
bamboo/tests/controllers/test_datasets_edit.py
|
biswapanda/bamboo
|
72fc260822a27ce52cbe65de178f8fa1b60311f3
|
[
"BSD-3-Clause"
] | 2
|
2015-08-06T15:23:28.000Z
|
2016-01-28T00:05:25.000Z
|
bamboo/tests/controllers/test_datasets_edit.py
|
biswapanda/bamboo
|
72fc260822a27ce52cbe65de178f8fa1b60311f3
|
[
"BSD-3-Clause"
] | 10
|
2015-08-07T01:50:39.000Z
|
2019-05-15T21:41:18.000Z
|
from pandas import Series
import simplejson as json
from bamboo.controllers.datasets import Datasets
from bamboo.models.dataset import Dataset
from bamboo.models.observation import Observation
from bamboo.tests.controllers.test_abstract_datasets import\
TestAbstractDatasets
class TestDatasetsEdit(TestAbstractDatasets):
def setUp(self):
TestAbstractDatasets.setUp(self)
def test_show_row(self):
dataset_id = self._post_file()
result = json.loads(self.controller.row_show(dataset_id, 0))
self.assertTrue(isinstance(result, dict))
self.assertEqual(9.0, result['amount'])
result = json.loads(self.controller.row_show(dataset_id, "0"))
self.assertTrue(isinstance(result, dict))
self.assertEqual(9.0, result['amount'])
def test_show_row_nonexistent_index(self):
dataset_id = self._post_file()
result = json.loads(self.controller.row_show(dataset_id, "90"))
self.assertTrue(isinstance(result, dict))
self.assertTrue(Datasets.ERROR in result)
def test_show_row_bad_index(self):
dataset_id = self._post_file()
result = json.loads(self.controller.row_show(dataset_id, "A"))
self.assertTrue(isinstance(result, dict))
self.assertTrue(Datasets.ERROR in result)
def test_delete_row(self):
dataset_id = self._post_file()
dataset = Dataset.find_one(dataset_id)
index = 0
expected_dframe = Dataset.find_one(
dataset_id).dframe()[index + 1:].reset_index()
del expected_dframe['index']
results = json.loads(self.controller.row_delete(dataset_id, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
dataset = Dataset.find_one(dataset_id)
dframe = dataset.dframe()
self.assertEqual(self.NUM_ROWS - 1, len(dframe))
self._check_dframes_are_equal(expected_dframe, dframe)
# check info updated
info = dataset.info()
self.assertEqual(self.NUM_ROWS - 1, info[Dataset.NUM_ROWS])
# check that row is softly deleted
all_observations = Observation.find(dataset, include_deleted=True)
self.assertEqual(self.NUM_ROWS, len(all_observations))
def test_delete_row_with_agg(self):
amount_sum = 2007.5
amount_sum_after = 1998.5
index = 0
self.dataset_id = self._post_file()
self._post_calculations(formulae=['sum(amount)'])
agg = self._test_aggregations()[0]
self.assertEqual(agg['sum_amount_'], amount_sum)
results = json.loads(
self.controller.row_delete(self.dataset_id, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
agg = self._test_aggregations()[0]
self.assertEqual(agg['sum_amount_'], amount_sum_after)
def test_delete_row_with_join(self):
index = 0
left_dataset_id = self._post_file()
right_dataset_id = self._post_file('good_eats_aux.csv')
on = 'food_type'
results = json.loads(self.controller.join(
left_dataset_id, right_dataset_id, on=on))
joined_dataset_id = results[Dataset.ID]
results = json.loads(self.controller.join(
joined_dataset_id, right_dataset_id, on=on))
joined_dataset_id2 = results[Dataset.ID]
results = json.loads(
self.controller.row_delete(left_dataset_id, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
dframe = Dataset.find_one(joined_dataset_id).dframe(index=True)
self.assertFalse(index in dframe['index'].tolist())
dframe = Dataset.find_one(joined_dataset_id2).dframe(index=True)
self.assertFalse(index in dframe['index'].tolist())
def test_delete_row_with_merge(self):
index = 0
dataset_id1 = self._post_file()
dataset_id2 = self._post_file()
result = json.loads(self.controller.merge(
dataset_ids=json.dumps([dataset_id1, dataset_id2])))
merged_id = result[Dataset.ID]
results = json.loads(
self.controller.row_delete(dataset_id2, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
results = json.loads(
self.controller.row_delete(dataset_id1, index))
self.assertTrue(Datasets.SUCCESS in results.keys())
dframe = Dataset.find_one(merged_id).dframe(index=True)
self.assertFalse(index in dframe['index'].tolist())
self.assertFalse(index + self.NUM_ROWS in dframe['index'].tolist())
def test_edit_row(self):
dataset_id = self._post_file()
index = 0
update = {'amount': 10, 'food_type': 'breakfast'}
expected_dframe = Dataset.find_one(dataset_id).dframe()
expected_row = expected_dframe.ix[0].to_dict()
expected_row.update(update)
expected_dframe.ix[0] = Series(expected_row)
results = json.loads(self.controller.row_update(dataset_id, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
dataset = Dataset.find_one(dataset_id)
dframe = dataset.dframe()
self.assertEqual(self.NUM_ROWS, len(dframe))
self._check_dframes_are_equal(expected_dframe, dframe)
# check that previous row exists
all_observations = Observation.find(dataset, include_deleted=True)
self.assertEqual(self.NUM_ROWS + 1, len(all_observations))
def test_edit_row_with_calculation(self):
amount_before = 9
amount_after = 10
value = 5
index = 0
update = {'amount': amount_after, 'food_type': 'breakfast'}
self.dataset_id = self._post_file()
self._post_calculations(formulae=['amount + %s' % value])
result = json.loads(self.controller.row_show(self.dataset_id, index))
self.assertEqual(amount_before + value, result['amount___%s' % value])
results = json.loads(self.controller.row_update(self.dataset_id, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
result = json.loads(self.controller.row_show(self.dataset_id, index))
self.assertEqual(amount_after + value, result['amount___%s' % value])
def test_edit_row_with_agg(self):
amount_sum = 2007.5
amount_sum_after = 2008.5
self.dataset_id = self._post_file()
self._post_calculations(formulae=['sum(amount)'])
agg = self._test_aggregations()[0]
self.assertEqual(agg['sum_amount_'], amount_sum)
index = 0
update = {'amount': 10, 'food_type': 'breakfast'}
results = json.loads(self.controller.row_update(self.dataset_id, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
agg = self._test_aggregations()[0]
self.assertEqual(agg['sum_amount_'], amount_sum_after)
def test_edit_row_with_join(self):
index = 0
value = 10
update = {'amount': value, 'food_type': 'breakfast'}
left_dataset_id = self._post_file()
right_dataset_id = self._post_file('good_eats_aux.csv')
on = 'food_type'
results = json.loads(self.controller.join(
left_dataset_id, right_dataset_id, on=on))
joined_dataset_id = results[Dataset.ID]
results = json.loads(self.controller.join(
joined_dataset_id, right_dataset_id, on=on))
joined_dataset_id2 = results[Dataset.ID]
results = json.loads(self.controller.row_update(left_dataset_id, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
result = json.loads(self.controller.row_show(joined_dataset_id, 0))
self.assertEqual(value, result['amount'])
result = json.loads(self.controller.row_show(joined_dataset_id2, 0))
self.assertEqual(value, result['amount'])
def test_edit_row_with_join_invalid(self):
index = 0
update = {'food_type': 'deserts'}
left_dataset_id = self._post_file()
right_dataset_id = self._post_file('good_eats_aux.csv')
num_rows_before = Dataset.find_one(right_dataset_id).num_rows
on = 'food_type'
json.loads(self.controller.join(
left_dataset_id, right_dataset_id, on=on))
results = json.loads(self.controller.row_update(
right_dataset_id, index, json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
dataset = Dataset.find_one(right_dataset_id)
self.assertEqual(num_rows_before, dataset.num_rows)
self.assertEqual(dataset.pending_updates, [])
def test_edit_row_with_merge(self):
index = 0
value = 10
update = {'amount': value, 'food_type': 'breakfast'}
dataset_id1 = self._post_file()
dataset_id2 = self._post_file()
result = json.loads(self.controller.merge(
dataset_ids=json.dumps([dataset_id1, dataset_id2])))
merged_id = result[Dataset.ID]
results = json.loads(self.controller.row_update(dataset_id1, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
results = json.loads(self.controller.row_update(dataset_id2, index,
json.dumps(update)))
self.assertTrue(Datasets.SUCCESS in results.keys())
result = json.loads(self.controller.row_show(merged_id, index))
self.assertEqual(value, result['amount'])
result = json.loads(self.controller.row_show(merged_id, index +
self.NUM_ROWS))
self.assertEqual(value, result['amount'])
| 38.666667
| 79
| 0.643344
| 1,204
| 9,976
| 5.065615
| 0.098007
| 0.081161
| 0.061813
| 0.109362
| 0.842269
| 0.813248
| 0.762912
| 0.739629
| 0.693228
| 0.68503
| 0
| 0.010406
| 0.248597
| 9,976
| 257
| 80
| 38.817121
| 0.803228
| 0.00822
| 0
| 0.611399
| 0
| 0
| 0.03822
| 0
| 0
| 0
| 0
| 0
| 0.212435
| 1
| 0.072539
| false
| 0
| 0.031088
| 0
| 0.108808
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8ad1a3dd1e6c3060924deb681ec12dc65a1f81b6
| 46
|
py
|
Python
|
mapreduce/__init__.py
|
File5/simple-mapreduce
|
3a37b880656f4f27010e77266be9f64ea5d181b5
|
[
"MIT"
] | 9
|
2021-02-19T16:01:27.000Z
|
2021-03-11T20:51:59.000Z
|
mapreduce/__init__.py
|
File5/simple-mapreduce
|
3a37b880656f4f27010e77266be9f64ea5d181b5
|
[
"MIT"
] | null | null | null |
mapreduce/__init__.py
|
File5/simple-mapreduce
|
3a37b880656f4f27010e77266be9f64ea5d181b5
|
[
"MIT"
] | 2
|
2021-02-19T16:51:29.000Z
|
2021-02-27T01:00:20.000Z
|
from mapreduce.mapreduce import MapReduceTask
| 23
| 45
| 0.891304
| 5
| 46
| 8.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.97619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
76eb0e33f03dbb9b75f15cdf429011e951159527
| 23
|
py
|
Python
|
samples/LuceneInAction/lia/analysis/queryparser/__init__.py
|
romanchyla/pylucene-trunk
|
990079ff0c76b972ce5ef2bac9b85334a0a1f27a
|
[
"Apache-2.0"
] | 15
|
2015-05-21T09:28:01.000Z
|
2022-03-18T23:41:49.000Z
|
samples/LuceneInAction/lia/analysis/queryparser/__init__.py
|
fnp/pylucene
|
fb16ac375de5479dec3919a5559cda02c899e387
|
[
"Apache-2.0"
] | 1
|
2021-09-30T03:59:43.000Z
|
2021-09-30T03:59:43.000Z
|
samples/LuceneInAction/lia/analysis/queryparser/__init__.py
|
romanchyla/pylucene-trunk
|
990079ff0c76b972ce5ef2bac9b85334a0a1f27a
|
[
"Apache-2.0"
] | 13
|
2015-04-18T23:05:11.000Z
|
2021-11-29T21:23:26.000Z
|
# queryparser package
| 7.666667
| 21
| 0.782609
| 2
| 23
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 2
| 22
| 11.5
| 0.947368
| 0.826087
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
76ec066a24aa09bc82582b6147739306abcd2f42
| 88
|
py
|
Python
|
app/scheme/admin.py
|
aacsspkt/autodealerappliation
|
c7ab3ae8e57e91c797129e87a13bd00d41bc4753
|
[
"MIT"
] | null | null | null |
app/scheme/admin.py
|
aacsspkt/autodealerappliation
|
c7ab3ae8e57e91c797129e87a13bd00d41bc4753
|
[
"MIT"
] | null | null | null |
app/scheme/admin.py
|
aacsspkt/autodealerappliation
|
c7ab3ae8e57e91c797129e87a13bd00d41bc4753
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Scheme
admin.site.register(Scheme)
| 22
| 32
| 0.829545
| 13
| 88
| 5.615385
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102273
| 88
| 4
| 33
| 22
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a1ec157b576869d590aacfa371ba6d8b9d0f63c
| 44
|
py
|
Python
|
src/__main__.py
|
transparent-bench/Hospital-management-system
|
42d0188432dbd229a7a8a3758386d8743c997f52
|
[
"MIT"
] | null | null | null |
src/__main__.py
|
transparent-bench/Hospital-management-system
|
42d0188432dbd229a7a8a3758386d8743c997f52
|
[
"MIT"
] | 8
|
2019-11-25T21:01:13.000Z
|
2019-11-28T19:34:52.000Z
|
src/__main__.py
|
transparent-bench/Hospital-management-system
|
42d0188432dbd229a7a8a3758386d8743c997f52
|
[
"MIT"
] | null | null | null |
from src.utils.gui.view import main
main()
| 11
| 35
| 0.75
| 8
| 44
| 4.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 3
| 36
| 14.666667
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0a54be450b305d74672d558e307553f99e2567a7
| 309
|
py
|
Python
|
common/mask_prune/__init__.py
|
jiahuei/tf-sparse-captioning
|
9d7b8ecdd44fb1541500ca4f920d6c94fd15bad1
|
[
"BSD-3-Clause"
] | null | null | null |
common/mask_prune/__init__.py
|
jiahuei/tf-sparse-captioning
|
9d7b8ecdd44fb1541500ca4f920d6c94fd15bad1
|
[
"BSD-3-Clause"
] | null | null | null |
common/mask_prune/__init__.py
|
jiahuei/tf-sparse-captioning
|
9d7b8ecdd44fb1541500ca4f920d6c94fd15bad1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on 14 Jun 2019 15:37:45
@author: jiahuei
"""
from common.mask_prune import masked_layer_v4 as masked_layer
from common.mask_prune import sampler_v2 as sampler
from common.mask_prune import prune_v1 as pruning
from common.mask_prune import sparse_layer_v1 as sparse_layer
| 25.75
| 61
| 0.79288
| 53
| 309
| 4.396226
| 0.509434
| 0.171674
| 0.240343
| 0.32618
| 0.429185
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063433
| 0.132686
| 309
| 11
| 62
| 28.090909
| 0.80597
| 0.23301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a52661c285d998966ba2810544de44cc5ad2d3f
| 36,828
|
py
|
Python
|
tests/graph_test.py
|
SeaOfOcean/EasyParallelLibrary
|
93baaa851f5ce078b1c55032a27398a588ca4107
|
[
"Apache-2.0"
] | 100
|
2022-02-23T08:54:35.000Z
|
2022-03-31T04:02:38.000Z
|
tests/graph_test.py
|
SeaOfOcean/EasyParallelLibrary
|
93baaa851f5ce078b1c55032a27398a588ca4107
|
[
"Apache-2.0"
] | null | null | null |
tests/graph_test.py
|
SeaOfOcean/EasyParallelLibrary
|
93baaa851f5ce078b1c55032a27398a588ca4107
|
[
"Apache-2.0"
] | 22
|
2022-02-23T09:02:01.000Z
|
2022-03-18T03:24:00.000Z
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test for graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from distutils.version import LooseVersion as Version
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.contrib.data.python.ops import threadpool
from tensorflow.python.framework.versions import __version__
from tensorflow.python.platform import test
import epl
from epl.utils import constant
from epl.config import Config
from epl.env import Env
from epl.ir.graph import Graph
from epl.ir.graph import GraphKeys
from epl.ir.phase import ModelPhase
from epl.parallel.graph_editor import Custom
warnings.simplefilter("always")
# pylint: disable=missing-docstring,protected-access,unused-argument
# pylint: disable=line-too-long,bad-continuation,unused-variable
class GraphTest(test.TestCase):
"""Test import functions of parallelism transformation"""
def _model_def(self):
with epl.replicate(device_count=1, name="stage_0"):
num_x = np.random.randint(0, 10, (500, 10)).astype(dtype=np.float32)
num_y = np.random.randint(0, 10, 500).astype(dtype=np.int32)
dataset = tf.data.Dataset.from_tensor_slices((num_x, num_y)) \
.batch(10).repeat(1)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
iterator.initializer)
x, _ = iterator.get_next()
dense1 = tf.layers.dense(inputs=x, units=16, activation=None)
with epl.replicate(device_count=1, name="stage_1"):
logits = tf.layers.dense(inputs=dense1, units=10, activation=None)
return tf.reduce_mean(logits)
def test_graph_with_clip(self):
config = epl.Config()
config.pipeline.num_micro_batch = 2
epl.init(config)
with tf.Graph().as_default():
loss = self._model_def()
epl.add_to_collection(loss, GraphKeys.GLOBAL_MEAN_OBJECTS)
g = Graph.get()
self.assertEqual(g._current_model_phase, ModelPhase.FORWARD)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.assertEqual(g._current_model_phase, ModelPhase.FORWARD)
tf.train.MonitoredTrainingSession(config=tf.ConfigProto(
log_device_placement=False))
# check first taskgraph.
b_exit_op_list = list(g.taskgraphs[0].backward_exit_ops(0, 0))
for b_exit_op in b_exit_op_list:
self.assertEqual(len(b_exit_op.outputs), 1)
self.assertEqual(len(b_exit_op.outputs[0].consumers), 2)
consumers_type = \
[b_exit_op.type for b_exit_op in \
list(b_exit_op.outputs[0].consumers)]
self.assertTrue(("Add" in consumers_type)
^ ("AddV2" in consumers_type))
b_exit_op_list = [b_exit_op.name for b_exit_op in b_exit_op_list]
list.sort(b_exit_op_list)
self.assertEqual(b_exit_op_list, [
"clip_by_global_norm/clip_by_global_norm/_0",
"clip_by_global_norm/clip_by_global_norm/_1"
])
# check second taskgraph.
s_1_b_exit_0_0 = g.taskgraphs[1].backward_exit_ops(0, 0)
for b_exit_op in s_1_b_exit_0_0:
self.assertEqual(len(b_exit_op.outputs), 1)
self.assertEqual(len(b_exit_op.outputs[0].consumers), 2)
consumers_type = \
[b_exit_op.type for b_exit_op in \
list(b_exit_op.outputs[0].consumers)]
self.assertTrue(("Add" in consumers_type)
^ ("AddV2" in consumers_type))
self.assertEqual(len(s_1_b_exit_0_0), 2)
s_1_b_exit_0_0 = \
[b_exit_op.name for b_exit_op in s_1_b_exit_0_0]
list.sort(s_1_b_exit_0_0)
self.assertEqual(s_1_b_exit_0_0, [
"clip_by_global_norm/clip_by_global_norm/_2",
"clip_by_global_norm/clip_by_global_norm/_3"
])
def test_graph_with_clip_and_scale(self):
config = epl.Config()
config.pipeline.num_micro_batch = 2
epl.init(config)
with tf.Graph().as_default():
loss = self._model_def()
g = Graph.get()
epl.add_to_collection(loss, GraphKeys.GLOBAL_MEAN_OBJECTS)
self.assertEqual(g._current_model_phase, ModelPhase.FORWARD)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
# Scale gradients manually
grads = [grad * float(1 / 2) for grad in grads]
optimizer.apply_gradients(list(zip(grads, tvars)))
self.assertEqual(g._current_model_phase, ModelPhase.FORWARD)
tf.train.MonitoredTrainingSession(config=tf.ConfigProto(
log_device_placement=False))
# check first taskgraph.
b_exit_op_list = list(g.taskgraphs[0].backward_exit_ops(0, 0))
for b_exit_op in b_exit_op_list:
self.assertEqual(len(b_exit_op.outputs), 1)
self.assertEqual(len(b_exit_op.outputs[0].consumers), 2)
consumers_type = \
[b_exit_op.type for b_exit_op in \
list(b_exit_op.outputs[0].consumers)]
self.assertTrue(("Add" in consumers_type)
^ ("AddV2" in consumers_type))
b_exit_op_list = [b_exit_op.name for b_exit_op in b_exit_op_list]
list.sort(b_exit_op_list)
self.assertEqual(b_exit_op_list, ["mul", "mul_1"])
# check second taskgraph.
s_1_b_exit_0_0 = g.taskgraphs[1].backward_exit_ops(0, 0)
for b_exit_op in s_1_b_exit_0_0:
self.assertEqual(len(b_exit_op.outputs), 1)
self.assertEqual(len(b_exit_op.outputs[0].consumers), 2)
consumers_type = \
[b_exit_op.type for b_exit_op in \
list(b_exit_op.outputs[0].consumers)]
self.assertTrue(("Add" in consumers_type)
^ ("AddV2" in consumers_type))
self.assertEqual(len(s_1_b_exit_0_0), 2)
s_1_b_exit_0_0 = \
[b_exit_op.name for b_exit_op in s_1_b_exit_0_0]
list.sort(s_1_b_exit_0_0)
self.assertEqual(s_1_b_exit_0_0, ["mul_2", "mul_3"])
def test_graph_with_clip_after_allreduce(self):
conf = Config()
# Clip gradients after allreduce
conf.communication.clip_after_allreduce = True
conf.pipeline.num_micro_batch = 2
epl.init(conf)
with tf.Graph().as_default():
loss = self._model_def()
g = Graph.get()
epl.add_to_collection(loss, GraphKeys.GLOBAL_MEAN_OBJECTS)
self.assertEqual(g._current_model_phase, ModelPhase.FORWARD)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.assertEqual(g._current_model_phase, ModelPhase.FORWARD)
tf.train.MonitoredTrainingSession(config=tf.ConfigProto(
log_device_placement=False))
# check first taskgraph.
b_exit_op_list = list(g.taskgraphs[0].backward_exit_ops(0, 0))
b_exit_op_list = [b_exit_op.name for b_exit_op in b_exit_op_list]
list.sort(b_exit_op_list)
self.assertEqual(b_exit_op_list, [
"gradients/dense/BiasAdd_grad/BiasAddGrad",
"gradients/dense/MatMul_grad/MatMul_1"
])
# check second taskgraph.
s_1_b_exit_0_0 = g.taskgraphs[1].backward_exit_ops(0, 0)
s_1_b_exit_0_0 = \
[b_exit_op.name for b_exit_op in s_1_b_exit_0_0]
list.sort(s_1_b_exit_0_0)
self.assertEqual(s_1_b_exit_0_0, [
"gradients/dense_1/BiasAdd_grad/BiasAddGrad",
"gradients/dense_1/MatMul_grad/MatMul_1"
])
def test_outside_strategy_error(self):
epl.init()
with tf.Graph().as_default():
x1 = tf.constant(1.1, shape=[2, 2])
with epl.replicate(name="replica_0"):
x2 = tf.constant(1.1, shape=[2, 2])
with epl.replicate(name="replica_1"):
dense = tf.layers.dense(inputs=x2, units=2)
with warnings.catch_warnings(record=True) as w:
loss = tf.reduce_mean(dense)
with warnings.catch_warnings(record=True) as w:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
gvs = optimizer.compute_gradients(loss)
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
optimizer.apply_gradients(gvs)
self.assertEqual(0, len(w))
x3 = tf.constant(1.1, shape=[2, 2])
self.assertEqual(Graph.get()._user_default_taskgraph, None)
self.assertNotEqual(Graph.get()._epl_default_taskgraph, None)
self.assertEqual(Graph.get()._epl_default_taskgraph.index, 1)
epl.set_default_strategy(epl.replicate(1, name="replica_2"))
x4 = tf.constant(1.1, shape=[2, 2])
with epl.replicate(name="replica_3"):
x5 = tf.constant(1.1, shape=[2, 2])
self.assertNotEqual(Graph.get()._epl_default_taskgraph, None)
self.assertNotEqual(Graph.get()._user_default_taskgraph, None)
self.assertEqual(Graph.get()._user_default_taskgraph.index, 2)
self.assertEqual(Graph.get()._epl_default_taskgraph.index, 3)
def test_graph(self):
config = epl.Config()
config.pipeline.num_micro_batch = 2
epl.init(config)
with tf.Graph().as_default():
loss = self._model_def()
epl.add_to_collection(loss, GraphKeys.GLOBAL_MEAN_OBJECTS)
g = Graph.get()
self.assertEqual(g._current_model_phase, ModelPhase.FORWARD)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
gvs = optimizer.compute_gradients(loss)
self.assertEqual(g._current_model_phase, ModelPhase.FORWARD)
optimizer.apply_gradients(gvs)
self.assertEqual(g._current_model_phase, ModelPhase.FORWARD)
tf.train.MonitoredTrainingSession(config=tf.ConfigProto(
log_device_placement=False))
self.assertEqual(len(g.taskgraphs), 2)
# Test traverse_depend_ops.
depend_ops = g.traverse_depend_ops("IteratorGetNext",
consider_outputs=False)
self.assertEqual(len(depend_ops), 1)
self.assertEqual(depend_ops[0].name, "IteratorV2")
dataset_depend_ops = g.get_dataset_related_ops()
if Version(__version__) < Version("1.14.0"):
self.assertEqual(len(dataset_depend_ops), 11)
assert_depend_ops = [
"IteratorV2", "IteratorToStringHandle", "MakeIterator",
"RepeatDataset", "BatchDatasetV2", "count", "TensorSliceDataset",
"batch_size", "drop_remainder", "tensors/component_0",
"tensors/component_1"
]
elif Version(__version__) < Version("2.0"):
assert_depend_ops = ([
"TensorSliceDataset", "IteratorToStringHandle", "RepeatDataset",
"BatchDatasetV2", "count", "ModelDataset", "OptimizeDataset",
"IteratorV2", "batch_size", "normalize_element/component_0",
"normalize_element/component_1", "drop_remainder", "MakeIterator",
"optimizations"
])
else:
raise RuntimeError("Version of tensorflow is not supported for now."
"Tenosrflow Version: %s." % __version__)
for op in assert_depend_ops:
self.assertTrue(g.get_operation_by_name(op) in dataset_depend_ops)
# check get_local_replicas
obj = g.get_operation_by_name("IteratorV2")
self.assertEqual(g.get_local_replicas(obj), [])
obj = g.get_operation_by_name("dense/BiasAdd")
local_replicas = [x.name for x in g.get_local_replicas(obj)]
self.assertEqual(local_replicas, ["EPL_REPLICA_1/dense/BiasAdd"])
obj = g.get_operation_by_name("EPL_REPLICA_1/dense/BiasAdd")
local_replicas = [x.name for x in g.get_local_replicas(obj)]
self.assertEqual(local_replicas, [])
obj = g.get_operation_by_name("EPL_MICRO_BATCH_1/dense/BiasAdd")
local_replicas = [x.name for x in g.get_local_replicas(obj)]
self.assertEqual(local_replicas,
["EPL_REPLICA_1/EPL_MICRO_BATCH_1/dense/BiasAdd"])
# check get_local_micro_batches
obj = g.get_operation_by_name("IteratorV2")
self.assertEqual(g.get_local_micro_batches(obj), [])
obj = g.get_operation_by_name("dense/BiasAdd")
local_micro_batches = \
[x.name for x in g.get_local_micro_batches(obj)]
self.assertEqual(local_micro_batches,
["EPL_MICRO_BATCH_1/dense/BiasAdd"])
obj = g.get_operation_by_name("EPL_REPLICA_1/dense/BiasAdd")
local_micro_batches = \
[x.name for x in g.get_local_micro_batches(obj)]
self.assertEqual(local_micro_batches,
["EPL_REPLICA_1/EPL_MICRO_BATCH_1/dense/BiasAdd"])
obj = g.get_operation_by_name("EPL_MICRO_BATCH_1/dense/BiasAdd")
local_micro_batches = \
[x.name for x in g.get_local_micro_batches(obj)]
self.assertEqual(local_micro_batches, [])
# check first taskgraph.
self.assertTrue(g.taskgraphs[0].is_first_stage)
self.assertTrue(g.pipeline_enabled)
self.assertEqual(g.taskgraphs[0].pipeline_config.num_micro_batch, 2)
self.assertEqual(g.taskgraphs[0].num_replicas, 2)
self.assertEqual(g.taskgraphs[0].local_num_replicas, 2)
broadcast_tensors_0 = \
[tensor.name for tensor in g.taskgraphs[0].get_variables(0)]
self.assertEqual(broadcast_tensors_0, [
"dense/kernel:0", "dense/bias:0", "beta1_power:0", "beta2_power:0",
"dense/kernel/Adam:0", "dense/kernel/Adam_1:0", "dense/bias/Adam:0",
"dense/bias/Adam_1:0"
])
broadcast_tensors_1 = \
[tensor.name for tensor in g.taskgraphs[0].get_variables(1)]
self.assertEqual(broadcast_tensors_1, [
"EPL_REPLICA_1/dense/kernel:0", "EPL_REPLICA_1/dense/bias:0",
"EPL_REPLICA_1/beta1_power:0", "EPL_REPLICA_1/beta2_power:0",
"EPL_REPLICA_1/dense/kernel/Adam:0",
"EPL_REPLICA_1/dense/kernel/Adam_1:0",
"EPL_REPLICA_1/dense/bias/Adam:0",
"EPL_REPLICA_1/dense/bias/Adam_1:0"
])
gradients = [grad.name for grad in g.taskgraphs[0].gradients]
list.sort(gradients)
self.assertEqual(gradients, [
"gradients/dense/BiasAdd_grad/tuple/control_dependency_1:0",
"gradients/dense/MatMul_grad/tuple/control_dependency_1:0"
])
self.assertEqual([
f_ent_op.name
for f_ent_op in list(g.taskgraphs[0].forward_entrance_ops(0, 0))
], ["IteratorGetNext"])
self.assertEqual(len(g.taskgraphs[0].forward_exit_ops(0, 0)), 1)
f_exit_ops = [f_exit_op.name for f_exit_op in \
list(g.taskgraphs[0].forward_exit_ops(0, 0))]
list.sort(f_exit_ops)
self.assertEqual(f_exit_ops, ["dense/BiasAdd"])
self.assertEqual(len(g.taskgraphs[0].backward_entrance_ops(0, 0)), 2)
b_ent_op_list = [
b_ent_op.name
for b_ent_op in g.taskgraphs[0].backward_entrance_ops(0, 0)
]
list.sort(b_ent_op_list)
self.assertEqual(b_ent_op_list, [
"gradients/dense/MatMul_grad/MatMul",
"gradients/dense/MatMul_grad/MatMul_1"
])
b_exit_op_list = [
b_exit_op.name
for b_exit_op in list(g.taskgraphs[0].backward_exit_ops(0, 0))
]
list.sort(b_exit_op_list)
self.assertEqual(b_exit_op_list, [
"gradients/dense/BiasAdd_grad/tuple/control_dependency_1",
"gradients/dense/MatMul_grad/tuple/control_dependency_1"
])
# check second taskgraph.
self.assertFalse(g.taskgraphs[1].is_first_stage)
self.assertTrue(g.pipeline_enabled)
self.assertEqual(g.taskgraphs[1].pipeline_config.num_micro_batch, 2)
self.assertEqual(g.taskgraphs[1].num_replicas, 2)
self.assertEqual(g.taskgraphs[1].local_num_replicas, 2)
broadcast_tensors_0 = \
[tensor.name for tensor in g.taskgraphs[1].get_variables(0)]
self.assertEqual(broadcast_tensors_0, [
"dense_1/kernel:0", "dense_1/bias:0", "dense_1/kernel/Adam:0",
"dense_1/kernel/Adam_1:0", "dense_1/bias/Adam:0",
"dense_1/bias/Adam_1:0"
])
broadcast_tensors_1 = \
[tensor.name for tensor in g.taskgraphs[1].get_variables(1)]
self.assertEqual(broadcast_tensors_1, [
"EPL_REPLICA_1/dense_1/kernel:0", "EPL_REPLICA_1/dense_1/bias:0",
"EPL_REPLICA_1/dense_1/kernel/Adam:0",
"EPL_REPLICA_1/dense_1/kernel/Adam_1:0",
"EPL_REPLICA_1/dense_1/bias/Adam:0",
"EPL_REPLICA_1/dense_1/bias/Adam_1:0"
])
gradients = [grad.name for grad in g.taskgraphs[1].gradients]
list.sort(gradients)
self.assertEqual(gradients, [
"gradients/dense_1/BiasAdd_grad/tuple/control_dependency_1:0",
"gradients/dense_1/MatMul_grad/tuple/control_dependency_1:0"
])
f_ent_ops = [f_ent_op.name for f_ent_op in \
list(g.taskgraphs[1].forward_entrance_ops(0, 0))]
list.sort(f_ent_ops)
self.assertEqual(len(f_ent_ops), 1)
self.assertEqual(f_ent_ops, ["dense_1/MatMul"])
f_exit_ops = [f_exit_op.name for f_exit_op in \
list(g.taskgraphs[1].forward_exit_ops(0, 0))]
list.sort(f_exit_ops)
self.assertEqual(len(f_exit_ops), 1)
self.assertListEqual(f_exit_ops, ["Mean"])
b_ent_ops = [b_ent_op.name for b_ent_op in \
list(g.taskgraphs[1].backward_entrance_ops(0, 0))]
list.sort(b_ent_ops)
self.assertEqual(len(b_ent_ops), 1)
self.assertEqual(b_ent_ops, ["gradients/dense_1/MatMul_grad/MatMul"])
self.assertEqual(len(g.taskgraphs[1].backward_exit_ops(0, 0)), 2)
s_1_b_exit_0_0 = \
[b_exit_op.name for b_exit_op in \
list(g.taskgraphs[1].backward_exit_ops(0, 0))]
list.sort(s_1_b_exit_0_0)
self.assertEqual(s_1_b_exit_0_0, [
"gradients/dense_1/BiasAdd_grad/tuple/control_dependency_1",
"gradients/dense_1/MatMul_grad/tuple/control_dependency_1"
])
# check forward_operation_placement
cpu_device = Env.get().cluster.current_worker_cpu()
for sub_idx in range(len(g.taskgraphs)):
for op in g.taskgraphs[sub_idx].operations.forward_operations(0, 0):
if op in dataset_depend_ops:
self.assertEqual(op.device, cpu_device)
else:
self.assertEqual(op.device, g.taskgraphs[sub_idx].virtual_device.get_device(0, 0))
# check fetch_micro_batch_num
self.assertEqual(g.get_pipeline_config().num_micro_batch, 2)
# check num_constructors
self.assertEqual(g.num_constructors, 1)
# check if some op need clone
self.assertEqual(
g.vars_related_op_names,
["dense/kernel", "dense/bias", "dense_1/kernel", "dense_1/bias"])
# check graphkeys
self.assertEqual(GraphKeys.ALL_COLLECTION_KEYS, [
GraphKeys.GLOBAL_CONCAT_OBJECTS, GraphKeys.GLOBAL_MEAN_OBJECTS,
GraphKeys.GLOBAL_SUM_OBJECTS, GraphKeys.LOCAL_CONCAT_OBJECTS,
GraphKeys.LOCAL_MEAN_OBJECTS, GraphKeys.LOCAL_SUM_OBJECTS
])
# check collection
self.assertEqual(g.get_collection(GraphKeys.GLOBAL_CONCAT_OBJECTS), [])
global_mean = [
obj.name for obj in g.get_collection(GraphKeys.GLOBAL_MEAN_OBJECTS)
]
self.assertEqual(global_mean, ["Mean:0"])
self.assertEqual(g.get_collection(GraphKeys.GLOBAL_SUM_OBJECTS), [])
self.assertEqual(g.get_collection(GraphKeys.LOCAL_CONCAT_OBJECTS), [])
self.assertEqual(g.get_collection(GraphKeys.LOCAL_MEAN_OBJECTS), [])
self.assertEqual(g.get_collection(GraphKeys.LOCAL_SUM_OBJECTS), [])
# check node_clone_for_pipeline
num_micro_batch = 2
dp_index = 0
for sub_idx in range(len(g.taskgraphs) - 1):
op_list = g.taskgraphs[sub_idx].operations.forward_operations(0, 0)
for op in op_list:
for micro_batch_idx in range(1, num_micro_batch):
prefix = constant.MICRO_BATCH_PREFIX_FORMAT.format(micro_batch_idx)
node_def = copy.deepcopy(op.node_def)
if op in dataset_depend_ops:
continue
if dp_index == 0 and not g.is_global_step_related(op) \
and not g.is_vars_related(op):
op_cloned = g.get_operation_by_name(prefix + op.name)
self.assertEqual(op_cloned.device, op.device)
# check dep_ops of pipeline
customs = []
for taskgraph in g.taskgraphs:
customs.append(Custom(taskgraph, 0))
self.assertEqual([op.name for op in customs[0].forward_entrance_ops[0]],
["IteratorGetNext"])
f_exit_ops = [op.name for op in customs[0].forward_exit_ops[0]]
list.sort(f_exit_ops)
self.assertEqual(f_exit_ops, ["dense/BiasAdd"])
c_0_b_ent_ops = [op.name for op in customs[0].backward_entrance_ops[0]]
list.sort(c_0_b_ent_ops)
self.assertEqual(c_0_b_ent_ops, [
"gradients/dense/MatMul_grad/MatMul",
"gradients/dense/MatMul_grad/MatMul_1"
])
c_0_b_exit_ops = [op.name for op in customs[0].backward_exit_ops[0]]
list.sort(c_0_b_exit_ops)
self.assertEqual(c_0_b_exit_ops, [
"gradients/dense/BiasAdd_grad/tuple/control_dependency_1",
"gradients/dense/MatMul_grad/tuple/control_dependency_1"
])
f_ent_ops = [op.name for op in customs[1].forward_entrance_ops[0]]
list.sort(f_ent_ops)
f_exit_ops = [op.name for op in customs[1].forward_exit_ops[0]]
list.sort(f_exit_ops)
b_ent_ops = [op.name for op in customs[1].backward_entrance_ops[0]]
list.sort(b_ent_ops)
self.assertEqual(f_ent_ops, ["dense_1/MatMul"])
self.assertEqual(f_exit_ops, ["Mean"])
self.assertEqual(b_ent_ops, ["gradients/dense_1/MatMul_grad/MatMul"])
c_1_b_exit_ops_0 = [op.name for op in customs[1].backward_exit_ops[0]]
list.sort(c_1_b_exit_ops_0)
self.assertEqual(c_1_b_exit_ops_0, [
"gradients/dense_1/BiasAdd_grad/tuple/control_dependency_1",
"gradients/dense_1/MatMul_grad/tuple/control_dependency_1"
])
f_ent_ops = [op.name for op in customs[0].forward_entrance_ops[1]]
list.sort(f_ent_ops)
f_exit_ops = [op.name for op in customs[0].forward_exit_ops[1]]
list.sort(f_exit_ops)
c_1_b_ent_ops = [op.name for op in customs[0].backward_entrance_ops[1]]
list.sort(c_1_b_ent_ops)
c_1_b_exit_ops = [op.name for op in customs[0].backward_exit_ops[1]]
list.sort(c_1_b_exit_ops)
self.assertEqual(f_ent_ops, ["EPL_MICRO_BATCH_1/IteratorGetNext"])
self.assertEqual(f_exit_ops, ["EPL_MICRO_BATCH_1/dense/BiasAdd"])
self.assertEqual(c_1_b_ent_ops, [
"EPL_MICRO_BATCH_1/gradients/dense/MatMul_grad/MatMul",
"EPL_MICRO_BATCH_1/gradients/dense/MatMul_grad/MatMul_1"
])
self.assertEqual(
c_1_b_exit_ops,
["EPL_MICRO_BATCH_1/gradients/dense/BiasAdd_grad/tuple/control_dependency_1", \
"EPL_MICRO_BATCH_1/gradients/dense/MatMul_grad/tuple/control_dependency_1"])
f_ent_ops = [op.name for op in customs[1].forward_entrance_ops[1]]
list.sort(f_ent_ops)
f_exit_ops = [op.name for op in customs[1].forward_exit_ops[1]]
list.sort(f_exit_ops)
c_1_b_ent_ops = [op.name for op in customs[1].backward_entrance_ops[1]]
list.sort(c_1_b_ent_ops)
c_1_b_exit_ops = [op.name for op in customs[1].backward_exit_ops[1]]
list.sort(c_1_b_exit_ops)
self.assertEqual(f_ent_ops, ["EPL_MICRO_BATCH_1/dense_1/MatMul"])
self.assertEqual(f_exit_ops, ["EPL_MICRO_BATCH_1/Mean"])
self.assertEqual(
c_1_b_ent_ops,
["EPL_MICRO_BATCH_1/gradients/dense_1/MatMul_grad/MatMul"])
self.assertEqual(c_1_b_exit_ops, [
"EPL_MICRO_BATCH_1/gradients/dense_1/BiasAdd_grad/tuple/control_dependency_1",
"EPL_MICRO_BATCH_1/gradients/dense_1/MatMul_grad/tuple/control_dependency_1"
])
def test_graph_format(self):
config = epl.Config()
config.pipeline.num_micro_batch = 2
epl.init(config)
with tf.Graph().as_default():
loss = self._model_def()
epl.add_to_collection(loss, GraphKeys.GLOBAL_MEAN_OBJECTS)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
gvs = optimizer.compute_gradients(loss)
optimizer.apply_gradients(gvs)
is_version_valid = (Version(__version__) >= Version("1.12.0") and Version(__version__) < Version("1.14.0")) or \
(Version(__version__) < Version("2.0") and Version(__version__) >= Version("1.14.0"))
self.assertTrue(is_version_valid)
new_tf_version = Version(__version__) < Version("2.0") and Version(
__version__) >= Version("1.14.0")
format_str = "======= Begin Taskgraph 0 replica 0 [Device: /job:worker/replica:0/task:0/device:GPU:0] =======\n" \
+ " TensorSliceDataset\n" \
+ " BatchDatasetV2\n" \
+ " RepeatDataset\n" \
+ (" OptimizeDataset\n" if new_tf_version else "") \
+ (" ModelDataset\n" if new_tf_version else "") \
+ " MakeIterator\n" \
+ " IteratorToStringHandle\n" \
+ " IteratorGetNext\n" \
+ " dense\n" \
+ "======= End Taskgraph 0 replica 0 [Device: /job:worker/replica:0/task:0/device:GPU:0] =======\n" \
+ "\n" \
+ "======= Begin Taskgraph 0 replica 1 [Device: /job:worker/replica:0/task:0/device:GPU:2] =======\n" \
+ " EPL_REPLICA_1\n" \
+ " EPL_REPLICA_1/IteratorGetNext\n" \
+ " EPL_REPLICA_1/dense\n" \
+ "======= End Taskgraph 0 replica 1 [Device: /job:worker/replica:0/task:0/device:GPU:2] =======\n" \
+ "\n" \
+ "======= Begin Taskgraph 1 replica 0 [Device: /job:worker/replica:0/task:0/device:GPU:1] =======\n" \
+ " dense_1\n" \
+ " Mean\n" \
+ "======= End Taskgraph 1 replica 0 [Device: /job:worker/replica:0/task:0/device:GPU:1] =======\n" \
+ "\n" \
+ "======= Begin Taskgraph 1 replica 1 [Device: /job:worker/replica:0/task:0/device:GPU:3] =======\n" \
+ " EPL_REPLICA_1\n" \
+ " EPL_REPLICA_1/dense_1\n" \
+ " EPL_REPLICA_1/Mean\n" \
+ "======= End Taskgraph 1 replica 1 [Device: /job:worker/replica:0/task:0/device:GPU:3] ======="
format_str2 = "======= Begin Taskgraph 0 replica 0 [Device: /job:worker/replica:0/task:0/device:GPU:0] =======\n" \
+ " dense\n" \
+ "======= End Taskgraph 0 replica 0 [Device: /job:worker/replica:0/task:0/device:GPU:0] =======\n" \
+ "\n" \
+ "======= Begin Taskgraph 0 replica 1 [Device: /job:worker/replica:0/task:0/device:GPU:2] =======\n" \
+ " EPL_REPLICA_1\n" \
+ " EPL_REPLICA_1/dense\n" \
+ "======= End Taskgraph 0 replica 1 [Device: /job:worker/replica:0/task:0/device:GPU:2] =======\n" \
+ "\n" \
+ "======= Begin Taskgraph 1 replica 0 [Device: /job:worker/replica:0/task:0/device:GPU:1] =======\n" \
+ " dense_1\n" \
+ "======= End Taskgraph 1 replica 0 [Device: /job:worker/replica:0/task:0/device:GPU:1] =======\n" \
+ "\n" \
+ "======= Begin Taskgraph 1 replica 1 [Device: /job:worker/replica:0/task:0/device:GPU:3] =======\n" \
+ " EPL_REPLICA_1\n" \
+ " EPL_REPLICA_1/dense_1\n" \
+ "======= End Taskgraph 1 replica 1 [Device: /job:worker/replica:0/task:0/device:GPU:3] ======="
with tf.train.MonitoredTrainingSession(config=tf.ConfigProto(
log_device_placement=False)) as sess:
assert Graph.get().format(max_depth=1).strip() == format_str.strip()
assert Graph.get().format(
max_depth=1, prefix_list=["dense"]).strip() == format_str2.strip()
def _model(self):
num_x = np.random.randint(0, 10, (500, 10)).astype(dtype=np.float32)
num_y = np.random.randint(0, 10, 500).astype(dtype=np.int32)
dataset = tf.data.Dataset.from_tensor_slices((num_x, num_y)) \
.batch(10).repeat(1)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
x, _ = iterator.get_next()
x = tf.layers.dense(inputs=x, units=16, activation=None)
x = tf.layers.dense(inputs=x, units=16, activation=None)
dense1 = tf.layers.dense(inputs=x, units=16, activation=None)
logits = tf.layers.dense(inputs=dense1, units=10, activation=None)
loss = tf.reduce_mean(logits)
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
train_op = optimizer.minimize(loss, global_step=global_step)
max_steps = 3
hooks = [tf.train.StopAtStepHook(last_step=max_steps)]
return [loss, train_op, global_step], hooks
def test_multiple_graph(self):
epl.init()
steps = []
with tf.Graph().as_default():
with epl.replicate(device_count=1):
train_opts, hooks = self._model()
with tf.train.MonitoredTrainingSession(hooks=hooks) as sess:
graph = epl.Graph.get()
self.assertEqual(graph.num_stages, 1)
self.assertEqual(graph.taskgraphs[0].num_replicas, 4)
for i in range(3):
train_loss, _, step = sess.run(train_opts)
steps.append(step)
self.assertEqual(steps, [0, 1, 2])
with tf.Graph().as_default():
train_opts, hooks = self._model()
with tf.train.MonitoredTrainingSession(hooks=hooks) as sess:
graph = epl.Graph.get()
self.assertEqual(graph.num_stages, 1)
self.assertEqual(graph.taskgraphs[0].num_replicas, 4)
steps = []
for i in range(3):
train_loss, step = sess.run([train_opts[0], train_opts[2]])
steps.append(step)
self.assertEqual(steps, [0, 0, 0])
def test_multiple_graph_broadcast(self):
epl.init()
steps = []
with tf.Graph().as_default():
with epl.replicate(device_count=1):
train_opts, hooks = self._model()
with tf.train.MonitoredTrainingSession(hooks=hooks) as sess:
graph = epl.Graph.get()
self.assertEqual(graph.num_stages, 1)
self.assertEqual(graph.taskgraphs[0].num_replicas, 4)
for i in range(3):
train_loss, _, step = sess.run(train_opts)
steps.append(step)
self.assertEqual(steps, [0, 1, 2])
broadcast_ops = [o for o in epl.ir.graph.Graph.get().operations.values() if o.type == 'EplNcclCommunicatorBroadcast']
self.assertEqual(len(broadcast_ops), len(epl.Graph.get().gradients))
steps = []
with tf.Graph().as_default():
with epl.replicate(device_count=1):
train_opts, hooks = self._model()
with tf.train.MonitoredTrainingSession(hooks=hooks) as sess:
graph = epl.Graph.get()
self.assertEqual(graph.num_stages, 1)
self.assertEqual(graph.taskgraphs[0].num_replicas, 4)
for i in range(3):
train_loss, _, step = sess.run(train_opts)
steps.append(step)
self.assertEqual(steps, [0, 1, 2])
broadcast_ops = [o for o in epl.ir.graph.Graph.get().operations.values() if o.type == 'EplNcclCommunicatorBroadcast']
self.assertEqual(len(broadcast_ops), len(epl.Graph.get().gradients))
def test_dp_define_op_without_taskgraph(self):
steps = []
epl.init()
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
with epl.replicate(device_count=1):
train_opts, hooks = self._model()
with tf.train.MonitoredTrainingSession(hooks=hooks) as sess:
graph = epl.Graph.get()
self.assertEqual(graph.num_stages, 1)
self.assertEqual(graph.taskgraphs[0].num_replicas, 4)
self.assertEqual(graph.operations[global_step.op.name].taskgraph.index, 0)
for i in range(3):
train_loss, _, step = sess.run(train_opts)
steps.append(step)
self.assertEqual(steps, [0, 1, 2])
def test_ga_define_op_without_taskgraph(self):
steps = []
config = epl.Config()
config.pipeline.num_micro_batch = 2
epl.init(config)
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
with epl.replicate(device_count=1):
train_opts, hooks = self._model()
with tf.train.MonitoredTrainingSession(hooks=hooks) as sess:
graph = epl.Graph.get()
self.assertEqual(graph.num_stages, 1)
self.assertEqual(graph.taskgraphs[0].num_replicas, 4)
self.assertEqual(graph.operations[global_step.op.name].taskgraph.index, 0)
for i in range(6):
train_loss, _, step = sess.run(train_opts)
steps.append(step)
self.assertEqual(steps, [0, 0, 1, 1, 2, 2])
def test_multithread_prefetch(self):
if Version(__version__) >= Version("1.15.0"):
return
epl.init()
epl.set_default_strategy(epl.replicate(1))
num_x = np.random.randint(0, 10, (500, 10)).astype(dtype=np.float32)
num_y = np.random.randint(0, 10, 500).astype(dtype=np.int32)
dataset = tf.data.Dataset.from_tensor_slices((num_x, num_y)) \
.batch(2).repeat(1)
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(2, display_name='input_pipeline_thread_pool'))
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
source, target = iterator.get_next()
x = tf.layers.dense(inputs=source, units=16, activation=None)
x = tf.layers.dense(inputs=x, units=16, activation=None)
dense1 = tf.layers.dense(inputs=x, units=16, activation=None)
logits = tf.layers.dense(inputs=dense1, units=10, activation=None)
loss = tf.reduce_mean(logits)
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
train_op = optimizer.minimize(loss, global_step=global_step)
steps = []
with tf.train.MonitoredTrainingSession() as sess:
for i in range(3):
train_loss, _, step = sess.run([loss, train_op, global_step])
steps.append(step)
self.assertEqual(steps, [0, 1, 2])
def test_check_and_set_cloned_dataset_need_clone(self):
epl.init()
epl.set_default_strategy(epl.replicate(1))
num_x = np.random.randint(0, 10, (500, 10)).astype(dtype=np.float32)
num_y = np.random.randint(0, 10, 500).astype(dtype=np.int32)
dataset = tf.data.Dataset.from_tensor_slices((num_x, num_y)) \
.batch(2).repeat(1)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
source, target = iterator.get_next()
x = tf.layers.dense(inputs=source, units=16, activation=None)
x = tf.layers.dense(inputs=x, units=16, activation=None)
dense1 = tf.layers.dense(inputs=x, units=16, activation=None)
logits = tf.layers.dense(inputs=dense1, units=10, activation=None)
loss = tf.reduce_mean(logits)
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
train_op = optimizer.minimize(loss, global_step=global_step)
epl.Graph.get().check_and_set_cloned_dataset_need_clone()
# pylint: enable=missing-docstring,protected-access,unused-argument,
# pylint: enable=line-too-long,bad-continuation,unused-variable
if __name__ == "__main__":
test.main()
| 45.80597
| 123
| 0.658059
| 5,162
| 36,828
| 4.409725
| 0.075165
| 0.079735
| 0.018451
| 0.006765
| 0.813381
| 0.786671
| 0.76106
| 0.718534
| 0.671528
| 0.625928
| 0
| 0.028016
| 0.214945
| 36,828
| 803
| 124
| 45.863014
| 0.759304
| 0.0413
| 0
| 0.59744
| 0
| 0.02276
| 0.159028
| 0.101225
| 0
| 0
| 0
| 0
| 0.199147
| 1
| 0.019915
| false
| 0
| 0.027027
| 0
| 0.052632
| 0.001422
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a6453c97a741c103eb6a43e072e46587caf4d6e
| 64
|
py
|
Python
|
lio/utils/plot/__init__.py
|
YivanZhang/lio
|
07587a6d864e7876b2ae4cdc00e59ac1b82781bc
|
[
"MIT"
] | 8
|
2021-04-16T14:33:42.000Z
|
2022-03-23T03:47:33.000Z
|
lio/utils/plot/__init__.py
|
YivanZhang/lio
|
07587a6d864e7876b2ae4cdc00e59ac1b82781bc
|
[
"MIT"
] | 1
|
2021-08-20T18:33:24.000Z
|
2021-08-21T14:54:00.000Z
|
lio/utils/plot/__init__.py
|
YivanZhang/lio
|
07587a6d864e7876b2ae4cdc00e59ac1b82781bc
|
[
"MIT"
] | null | null | null |
from .data import cov2d, gaussian_mixture
from . import simplex
| 21.333333
| 41
| 0.8125
| 9
| 64
| 5.666667
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 0.140625
| 64
| 2
| 42
| 32
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a8addb1d7fa5a7e82c6dc6da6f23717e90df234
| 34
|
py
|
Python
|
pep-dvm-updater/config.py
|
siddhantkhandelwal/pep-website
|
4d8b0fc7d2c7fe41c3497e64b8a3c34f02c13bf9
|
[
"MIT"
] | null | null | null |
pep-dvm-updater/config.py
|
siddhantkhandelwal/pep-website
|
4d8b0fc7d2c7fe41c3497e64b8a3c34f02c13bf9
|
[
"MIT"
] | 3
|
2021-02-08T20:28:24.000Z
|
2021-06-10T21:04:27.000Z
|
pep-dvm-updater/config.py
|
siddhantkhandelwal/pep-website
|
4d8b0fc7d2c7fe41c3497e64b8a3c34f02c13bf9
|
[
"MIT"
] | null | null | null |
HOST = '139.59.3.240'
PORT = 9999
| 11.333333
| 21
| 0.617647
| 7
| 34
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.464286
| 0.176471
| 34
| 2
| 22
| 17
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a8f6907a2fa5134f4bd8887ff63b068368c7a8c
| 222
|
py
|
Python
|
API/resources/visitors.py
|
Slavkata/MAC
|
1ba8f830367b550922af87b1acf8d22caf93dc23
|
[
"MIT"
] | 3
|
2019-02-19T11:53:39.000Z
|
2019-05-26T15:36:52.000Z
|
API/resources/visitors.py
|
Slavkata/MAC-website
|
1ba8f830367b550922af87b1acf8d22caf93dc23
|
[
"MIT"
] | 24
|
2019-02-26T12:26:34.000Z
|
2022-03-11T23:49:43.000Z
|
API/resources/visitors.py
|
Slavkata/MAC
|
1ba8f830367b550922af87b1acf8d22caf93dc23
|
[
"MIT"
] | 1
|
2019-02-19T08:04:48.000Z
|
2019-02-19T08:04:48.000Z
|
from models.visitors_count import VisitorsCount
from flask_restful import Resource, reqparse
from flask import jsonify
class VisitorResource(Resource):
def get(self):
return jsonify(VisitorsCount.get_count())
| 27.75
| 49
| 0.797297
| 27
| 222
| 6.444444
| 0.62963
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144144
| 222
| 7
| 50
| 31.714286
| 0.915789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0.166667
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
6a91ba6464a5182887423613ad94ac0b09c60a75
| 234
|
py
|
Python
|
pioneer/das/api/samples/annotations/lane.py
|
leddartech/pioneer.das.api
|
35f2c541ea8d1768d5f4612ea8d29cb2ba8345b7
|
[
"BSD-3-Clause"
] | 8
|
2021-02-19T16:24:18.000Z
|
2021-10-01T17:51:22.000Z
|
pioneer/das/api/samples/annotations/lane.py
|
leddartech/pioneer.das.api
|
35f2c541ea8d1768d5f4612ea8d29cb2ba8345b7
|
[
"BSD-3-Clause"
] | 8
|
2021-02-25T08:56:37.000Z
|
2021-10-20T20:58:26.000Z
|
pioneer/das/api/samples/annotations/lane.py
|
leddartech/pioneer.das.api
|
35f2c541ea8d1768d5f4612ea8d29cb2ba8345b7
|
[
"BSD-3-Clause"
] | 2
|
2021-03-01T07:47:31.000Z
|
2021-07-07T20:50:27.000Z
|
from pioneer.das.api.samples.sample import Sample
class Lane(Sample):
def __init__(self, index, datasource, virtual_raw = None, virtual_ts = None):
super(Lane, self).__init__(index, datasource, virtual_raw, virtual_ts)
| 39
| 81
| 0.739316
| 32
| 234
| 5.03125
| 0.59375
| 0.186335
| 0.273292
| 0.310559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 234
| 6
| 82
| 39
| 0.813131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
6aac348411f1e97e49648a9c171f6ef4bb895957
| 303
|
py
|
Python
|
HacoWeb/haco/venv/bin/ogrmerge.py
|
DeanORourke1996/haco
|
fc04d763735ca376c51e82e1f1be20b092ce751c
|
[
"MIT"
] | null | null | null |
HacoWeb/haco/venv/bin/ogrmerge.py
|
DeanORourke1996/haco
|
fc04d763735ca376c51e82e1f1be20b092ce751c
|
[
"MIT"
] | null | null | null |
HacoWeb/haco/venv/bin/ogrmerge.py
|
DeanORourke1996/haco
|
fc04d763735ca376c51e82e1f1be20b092ce751c
|
[
"MIT"
] | null | null | null |
#!/Users/dean/haco/HacoWeb/haco/venv/bin/python
import sys
# import osgeo_utils.ogrmerge as a convenience to use as a script
from osgeo_utils.ogrmerge import * # noqa
from osgeo_utils.ogrmerge import main
from osgeo.gdal import deprecation_warn
deprecation_warn('ogrmerge')
sys.exit(main(sys.argv))
| 25.25
| 65
| 0.79868
| 48
| 303
| 4.9375
| 0.541667
| 0.126582
| 0.227848
| 0.185654
| 0.236287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112211
| 303
| 11
| 66
| 27.545455
| 0.881041
| 0.379538
| 0
| 0
| 0
| 0
| 0.043243
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a9124e1f8a308124ec511f9272266ec3a311141
| 1,135
|
py
|
Python
|
examples/academic/bakery/run.py
|
gijskant/mcrl2-pmc
|
9ea75755081b20623bc8fc7db27124d084e781fe
|
[
"BSL-1.0"
] | null | null | null |
examples/academic/bakery/run.py
|
gijskant/mcrl2-pmc
|
9ea75755081b20623bc8fc7db27124d084e781fe
|
[
"BSL-1.0"
] | null | null | null |
examples/academic/bakery/run.py
|
gijskant/mcrl2-pmc
|
9ea75755081b20623bc8fc7db27124d084e781fe
|
[
"BSL-1.0"
] | null | null | null |
import os
# Not all of the below terminate!
os.system('mcrl22lps -v bakery.mcrl2 bakery.lps')
os.system('lps2pbes -v -f nodeadlock.mcf bakery.lps bakery.nodeadlock.pbes')
os.system('pbes2bool -v -rjittyc bakery.nodeadlock.pbes')
os.system('lps2pbes -v -f request_can_eventually_enter.mcf bakery.lps bakery.request_can_eventually_enter.pbes')
os.system('pbes2bool -v -rjittyc bakery.request_can_eventually_enter.pbes')
os.system('lps2pbes -v -f request_must_eventually_enter.mcf bakery.lps bakery.request_must_eventually_enter.pbes')
os.system('pbes2bool -v -rjittyc bakery.request_must_eventually_enter.pbes')
os.system('lps2pbes -v -f mutual_exclusion.mcf bakery.lps bakery.mutual_exclusion.pbes')
os.system('pbes2bool -v -rjittyc bakery.mutual_exclusion.pbes')
os.system('lps2pbes -v -f always_can_get_number.mcf bakery.lps bakery.always_can_get_number.pbes')
os.system('pbes2bool -v -rjittyc bakery.always_can_get_number.pbes')
os.system('lps2pbes -v -f get_at_least_number_circulating.mcf bakery.lps bakery.get_at_least_number_circulating.pbes')
os.system('pbes2bool -v -rjittyc bakery.get_at_least_number_circulating.pbes')
| 51.590909
| 118
| 0.815859
| 178
| 1,135
| 4.966292
| 0.191011
| 0.117647
| 0.149321
| 0.115385
| 0.808824
| 0.728507
| 0.651584
| 0.339367
| 0.128959
| 0.128959
| 0
| 0.014205
| 0.069604
| 1,135
| 21
| 119
| 54.047619
| 0.822917
| 0.027313
| 0
| 0
| 0
| 0
| 0.819419
| 0.489111
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0abdf3124a372f5e65596ee1d2beb296c633002b
| 189
|
py
|
Python
|
src/waldur_digitalocean/admin.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 26
|
2017-10-18T13:49:58.000Z
|
2021-09-19T04:44:09.000Z
|
src/waldur_digitalocean/admin.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 14
|
2018-12-10T14:14:51.000Z
|
2021-06-07T10:33:39.000Z
|
src/waldur_digitalocean/admin.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 32
|
2017-09-24T03:10:45.000Z
|
2021-10-16T16:41:09.000Z
|
from django.contrib import admin
from waldur_core.structure import admin as structure_admin
from .models import Droplet
admin.site.register(Droplet, structure_admin.VirtualMachineAdmin)
| 23.625
| 65
| 0.851852
| 25
| 189
| 6.32
| 0.56
| 0.139241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100529
| 189
| 7
| 66
| 27
| 0.929412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ae3304ec2e5117dd861129b7965c6c014247d85
| 64
|
py
|
Python
|
CodeWars/7 Kyu/Time Converter- hours, minutes, seconds and milliseconds.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Time Converter- hours, minutes, seconds and milliseconds.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Time Converter- hours, minutes, seconds and milliseconds.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def convert(time):
return time.time().strftime('%X,%f')[:-3]
| 32
| 45
| 0.609375
| 10
| 64
| 3.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.109375
| 64
| 2
| 45
| 32
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
0ae905509dc24cee2e021439634a9d2d1000d046
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/rope/base/evaluate.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/rope/base/evaluate.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/rope/base/evaluate.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/16/29/c0/b7278e89bda1fc5d85d00e74581a7dc8dbda44dffc8edfc94822652b66
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.385417
| 0
| 96
| 1
| 96
| 96
| 0.510417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7c1c7106950bab8ffe948f17e5296bd077c87094
| 98
|
py
|
Python
|
bardeen/npjson.py
|
mverleg/bardeen
|
5e91f5bad6b3d460c9afb5a41d2020dd91a27124
|
[
"BSD-3-Clause"
] | null | null | null |
bardeen/npjson.py
|
mverleg/bardeen
|
5e91f5bad6b3d460c9afb5a41d2020dd91a27124
|
[
"BSD-3-Clause"
] | null | null | null |
bardeen/npjson.py
|
mverleg/bardeen
|
5e91f5bad6b3d460c9afb5a41d2020dd91a27124
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This has been split into a separate package: pyjson-tricks.
"""
from json_tricks import *
| 10.888889
| 59
| 0.704082
| 14
| 98
| 4.857143
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193878
| 98
| 8
| 60
| 12.25
| 0.860759
| 0.602041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7c6721ff42031fd60c2adab857002c4450bbea4c
| 24,104
|
py
|
Python
|
tests/unit/test_combiner.py
|
shashank-google/professional-services-data-validator
|
db9c63add4a3ab40b09113ca7ed1c03b7c12e6f2
|
[
"Apache-2.0"
] | 1
|
2021-12-24T10:01:31.000Z
|
2021-12-24T10:01:31.000Z
|
tests/unit/test_combiner.py
|
shashank-google/professional-services-data-validator
|
db9c63add4a3ab40b09113ca7ed1c03b7c12e6f2
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_combiner.py
|
shashank-google/professional-services-data-validator
|
db9c63add4a3ab40b09113ca7ed1c03b7c12e6f2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import ibis.backends.pandas
import pandas
import pandas.testing
import pytest
from data_validation import metadata
_NAN = float("nan")
EXAMPLE_RUN_METADATA = metadata.RunMetadata(
validations={
"count": metadata.ValidationMetadata(
source_table_name="test_source",
source_table_schema="bq-public.source_dataset",
source_column_name="timecol",
target_table_name="test_target",
target_table_schema="bq-public.target_dataset",
target_column_name="timecol",
validation_type="Column",
aggregation_type="count",
threshold=0.0,
),
},
start_time=datetime.datetime(1998, 9, 4, 7, 30, 1),
end_time=datetime.datetime(1998, 9, 4, 7, 31, 42),
labels=[("name", "test_label")],
run_id="test-run",
)
@pytest.fixture
def module_under_test():
from data_validation import combiner
return combiner
def test_generate_report_with_different_columns(module_under_test):
source = pandas.DataFrame({"count": [1], "sum": [3]})
target = pandas.DataFrame({"count": [2]})
pandas_client = ibis.backends.pandas.connect(
{
module_under_test.DEFAULT_SOURCE: source,
module_under_test.DEFAULT_TARGET: target,
}
)
with pytest.raises(
ValueError, match="Expected source and target to have same schema"
):
module_under_test.generate_report(
pandas_client,
# Schema validation occurs before run_metadata is needed.
None,
source=pandas_client.table(module_under_test.DEFAULT_SOURCE),
target=pandas_client.table(module_under_test.DEFAULT_TARGET),
)
def test_generate_report_with_too_many_rows(module_under_test):
source = pandas.DataFrame({"count": [1, 1]})
target = pandas.DataFrame({"count": [2, 2]})
pandas_client = ibis.backends.pandas.connect(
{
module_under_test.DEFAULT_SOURCE: source,
module_under_test.DEFAULT_TARGET: target,
}
)
report = module_under_test.generate_report(
pandas_client,
# Validation occurs before run_metadata is needed.
EXAMPLE_RUN_METADATA,
source=pandas_client.table(module_under_test.DEFAULT_SOURCE),
target=pandas_client.table(module_under_test.DEFAULT_TARGET),
)
# TODO: how do we want to handle this going forward?
assert len(report) == 16
@pytest.mark.parametrize(
("source_df", "target_df", "run_metadata", "expected"),
(
(
pandas.DataFrame({"count": [1]}),
pandas.DataFrame({"count": [2]}),
metadata.RunMetadata(
validations={
"count": metadata.ValidationMetadata(
source_table_name="test_source",
source_table_schema="bq-public.source_dataset",
source_column_name=None,
target_table_name="test_target",
target_table_schema="bq-public.target_dataset",
target_column_name=None,
validation_type="Column",
aggregation_type="count",
threshold=0.0,
),
},
start_time=datetime.datetime(1998, 9, 4, 7, 30, 1),
end_time=datetime.datetime(1998, 9, 4, 7, 31, 42),
labels=[("name", "test_label")],
run_id="test-run",
),
pandas.DataFrame(
{
"run_id": ["test-run"],
"start_time": [datetime.datetime(1998, 9, 4, 7, 30, 1)],
"end_time": [datetime.datetime(1998, 9, 4, 7, 31, 42)],
"source_table_name": ["bq-public.source_dataset.test_source"],
"source_column_name": [None],
"target_table_name": ["bq-public.target_dataset.test_target"],
"target_column_name": [None],
"validation_type": ["Column"],
"aggregation_type": ["count"],
"validation_name": ["count"],
"source_agg_value": ["1"],
"target_agg_value": ["2"],
"group_by_columns": [None],
"difference": [1.0],
"pct_difference": [100.0],
"pct_threshold": [0.0],
"status": ["fail"],
"labels": [[("name", "test_label")]],
}
),
),
(
pandas.DataFrame(
{"timecol__max": [pandas.Timestamp("2020-07-01T16:00:00Z")]}
),
pandas.DataFrame(
{"timecol__max": [pandas.Timestamp("2020-07-01T16:00:00Z")]}
),
metadata.RunMetadata(
validations={
"timecol__max": metadata.ValidationMetadata(
source_table_name="test_source",
source_table_schema="bq-public.source_dataset",
source_column_name="timecol",
target_table_name="test_target",
target_table_schema="bq-public.target_dataset",
target_column_name="timecol",
validation_type="Column",
aggregation_type="max",
threshold=0.0,
),
},
start_time=datetime.datetime(1998, 9, 4, 7, 30, 1),
end_time=datetime.datetime(1998, 9, 4, 7, 31, 42),
labels=[("name", "test_label")],
run_id="test-run",
),
pandas.DataFrame(
{
"run_id": ["test-run"],
"start_time": [datetime.datetime(1998, 9, 4, 7, 30, 1)],
"end_time": [datetime.datetime(1998, 9, 4, 7, 31, 42)],
"source_table_name": ["bq-public.source_dataset.test_source"],
"source_column_name": ["timecol"],
"target_table_name": ["bq-public.target_dataset.test_target"],
"target_column_name": ["timecol"],
"validation_type": ["Column"],
"aggregation_type": ["max"],
"validation_name": ["timecol__max"],
"source_agg_value": ["2020-07-01 16:00:00+00:00"],
"target_agg_value": ["2020-07-01 16:00:00+00:00"],
"group_by_columns": [None],
"difference": [0.0],
"pct_difference": [0.0],
"pct_threshold": [0.0],
"status": ["success"],
"labels": [[("name", "test_label")]],
}
),
),
(
pandas.DataFrame(
{
"timecol__max": [
pandas.Timestamp(1600000000, unit="s", tz=datetime.timezone.utc)
]
}
),
pandas.DataFrame(
{
"timecol__max": [
pandas.Timestamp(2000000000, unit="s", tz=datetime.timezone.utc)
]
}
),
metadata.RunMetadata(
validations={
"timecol__max": metadata.ValidationMetadata(
source_column_name="timecol",
source_table_name="test_source",
source_table_schema="bq-public.source_dataset",
target_column_name="timecol",
target_table_name="test_target",
target_table_schema="bq-public.target_dataset",
validation_type="Column",
aggregation_type="max",
threshold=0.0,
),
},
start_time=datetime.datetime(1998, 9, 4, 7, 30, 1),
end_time=datetime.datetime(1998, 9, 4, 7, 31, 42),
labels=[("name", "test_label")],
run_id="test-run",
),
pandas.DataFrame(
{
"run_id": ["test-run"],
"start_time": [datetime.datetime(1998, 9, 4, 7, 30, 1)],
"end_time": [datetime.datetime(1998, 9, 4, 7, 31, 42)],
"source_table_name": ["bq-public.source_dataset.test_source"],
"source_column_name": ["timecol"],
"target_table_name": ["bq-public.target_dataset.test_target"],
"target_column_name": ["timecol"],
"validation_type": ["Column"],
"aggregation_type": ["max"],
"validation_name": ["timecol__max"],
"source_agg_value": ["2020-09-13 12:26:40+00:00"],
"target_agg_value": ["2033-05-18 03:33:20+00:00"],
"group_by_columns": [None],
"difference": [400000000.0],
"pct_difference": [25.0],
"pct_threshold": [0.0],
"status": ["fail"],
"labels": [[("name", "test_label")]],
}
),
),
(
pandas.DataFrame({"count": [8], "sum__ttteeesssttt": [-1]}),
pandas.DataFrame({"count": [9], "sum__ttteeesssttt": [1]}),
metadata.RunMetadata(
validations={
"count": metadata.ValidationMetadata(
source_table_name="test_source",
source_table_schema="bq-public.source_dataset",
source_column_name=None,
target_table_name="test_target",
target_table_schema="bq-public.target_dataset",
target_column_name=None,
validation_type="Column",
aggregation_type="count",
threshold=30.0,
),
"sum__ttteeesssttt": metadata.ValidationMetadata(
source_table_name="test_source",
source_table_schema="bq-public.source_dataset",
source_column_name="test_col",
target_table_name="test_target",
target_table_schema="bq-public.target_dataset",
target_column_name="ttteeesssttt_col",
validation_type="Column",
aggregation_type="sum",
threshold=0.0,
),
},
start_time=datetime.datetime(1998, 9, 4, 7, 30, 1),
end_time=datetime.datetime(1998, 9, 4, 7, 31, 42),
labels=[("name", "test_label")],
run_id="test-run",
),
pandas.DataFrame(
{
"run_id": ["test-run"] * 2,
"start_time": [datetime.datetime(1998, 9, 4, 7, 30, 1)] * 2,
"end_time": [datetime.datetime(1998, 9, 4, 7, 31, 42)] * 2,
"source_table_name": [
"bq-public.source_dataset.test_source",
"bq-public.source_dataset.test_source",
],
"source_column_name": [None, "test_col"],
"target_table_name": [
"bq-public.target_dataset.test_target",
"bq-public.target_dataset.test_target",
],
"target_column_name": [None, "ttteeesssttt_col"],
"validation_type": ["Column", "Column"],
"aggregation_type": ["count", "sum"],
"validation_name": ["count", "sum__ttteeesssttt"],
"source_agg_value": ["8", "-1"],
"target_agg_value": ["9", "1"],
"group_by_columns": [None, None],
"difference": [1.0, 2.0],
"pct_difference": [12.5, -200.0],
"pct_threshold": [30.0, 0.0],
"status": ["success", "fail"],
"labels": [[("name", "test_label")]] * 2,
}
),
),
),
)
def test_generate_report_without_group_by(
module_under_test, source_df, target_df, run_metadata, expected
):
pandas_client = ibis.backends.pandas.connect(
{"test_source": source_df, "test_target": target_df}
)
report = module_under_test.generate_report(
pandas_client,
run_metadata,
source=pandas_client.table("test_source"),
target=pandas_client.table("test_target"),
)
# Sort columns by name to order in the comparison.
# https://stackoverflow.com/a/11067072/101923
# Sort rows by name to order in the comparison.
report = (
report.sort_values("validation_name")
.reset_index(drop=True)
.reindex(sorted(report.columns), axis=1)
)
expected = (
expected.sort_values("validation_name")
.reset_index(drop=True)
.reindex(sorted(expected.columns), axis=1)
)
pandas.testing.assert_frame_equal(report, expected)
@pytest.mark.parametrize(
("source_df", "target_df", "join_on_fields", "run_metadata", "expected"),
(
(
pandas.DataFrame(
{
"count": [2, 4, 8, 16],
"grp_a": ["a", "a", "b", "b"],
"grp_i": [0, 1, 0, 1],
}
),
pandas.DataFrame(
{
"count": [1, 3, 7, 17],
"grp_a": ["a", "a", "b", "b"],
"grp_i": [0, 1, 0, 1],
}
),
("grp_a", "grp_i"),
metadata.RunMetadata(
validations={
"count": metadata.ValidationMetadata(
source_table_name="test_source",
source_table_schema="bq-public.source_dataset",
source_column_name=None,
target_table_name="test_target",
target_table_schema="bq-public.target_dataset",
target_column_name=None,
validation_type="GroupedColumn",
aggregation_type="count",
threshold=7.0,
),
},
start_time=datetime.datetime(1998, 9, 4, 7, 30, 1),
end_time=datetime.datetime(1998, 9, 4, 7, 31, 42),
labels=[("name", "group_label")],
run_id="grouped-test",
),
pandas.DataFrame(
{
"run_id": ["grouped-test"] * 4,
"start_time": [datetime.datetime(1998, 9, 4, 7, 30, 1)] * 4,
"end_time": [datetime.datetime(1998, 9, 4, 7, 31, 42)] * 4,
"source_table_name": ["bq-public.source_dataset.test_source"] * 4,
"source_column_name": [None] * 4,
"target_table_name": ["bq-public.target_dataset.test_target"] * 4,
"target_column_name": [None] * 4,
"validation_type": ["GroupedColumn"] * 4,
"aggregation_type": ["count"] * 4,
"validation_name": ["count"] * 4,
"source_agg_value": ["2", "4", "8", "16"],
"target_agg_value": ["1", "3", "7", "17"],
"group_by_columns": [
'{"grp_a": "a", "grp_i": "0"}',
'{"grp_a": "a", "grp_i": "1"}',
'{"grp_a": "b", "grp_i": "0"}',
'{"grp_a": "b", "grp_i": "1"}',
],
"difference": [-1.0, -1.0, -1.0, 1.0],
"pct_difference": [-50.0, -25.0, -12.5, 6.25],
"pct_threshold": [7.0, 7.0, 7.0, 7.0],
"status": ["fail", "fail", "fail", "success"],
"labels": [[("name", "group_label")]] * 4,
}
),
),
(
pandas.DataFrame({"count": [1, 2], "grp": ['"', "\\"]}),
pandas.DataFrame({"count": [3, 4], "grp": ['"', "\\"]}),
("grp",),
metadata.RunMetadata(
validations={
"count": metadata.ValidationMetadata(
source_table_name="test_source",
source_table_schema="bq-public.source_dataset",
source_column_name=None,
target_table_name="test_target",
target_table_schema="bq-public.target_dataset",
target_column_name=None,
validation_type="GroupedColumn",
aggregation_type="count",
threshold=100.0,
),
},
start_time=datetime.datetime(1998, 9, 4, 7, 30, 1),
end_time=datetime.datetime(1998, 9, 4, 7, 31, 42),
labels=[("name", "group_label")],
run_id="grouped-test",
),
pandas.DataFrame(
{
"run_id": ["grouped-test"] * 2,
"start_time": [datetime.datetime(1998, 9, 4, 7, 30, 1)] * 2,
"end_time": [datetime.datetime(1998, 9, 4, 7, 31, 42)] * 2,
"source_table_name": ["bq-public.source_dataset.test_source"] * 2,
"source_column_name": [None] * 2,
"target_table_name": ["bq-public.target_dataset.test_target"] * 2,
"target_column_name": [None] * 2,
"validation_type": ["GroupedColumn"] * 2,
"aggregation_type": ["count"] * 2,
"validation_name": ["count"] * 2,
"source_agg_value": ["1", "2"],
"target_agg_value": ["3", "4"],
"group_by_columns": ['{"grp": "\\""}', '{"grp": "\\\\"}'],
"difference": [2.0, 2.0],
"pct_difference": [200.0, 100.0],
"pct_threshold": [100.0, 100.0],
"status": ["fail", "success"],
"labels": [[("name", "group_label")]] * 2,
}
),
),
(
pandas.DataFrame(
{
"count": [2, 4, 6, 8],
"grp_a": ["a", "a", "c", "c"],
"grp_i": [0, 1, 0, 1],
}
),
pandas.DataFrame(
{
"count": [1, 3, 5, 7],
"grp_a": ["a", "a", "b", "b"],
"grp_i": [0, 1, 0, 1],
}
),
("grp_a", "grp_i"),
metadata.RunMetadata(
validations={
"count": metadata.ValidationMetadata(
source_table_name="test_source",
source_table_schema="bq-public.source_dataset",
source_column_name=None,
target_table_name="test_target",
target_table_schema="bq-public.target_dataset",
target_column_name=None,
validation_type="GroupedColumn",
aggregation_type="count",
threshold=25.0,
),
},
start_time=datetime.datetime(1998, 9, 4, 7, 30, 1),
end_time=datetime.datetime(1998, 9, 4, 7, 31, 42),
labels=[("name", "group_label")],
run_id="grouped-test",
),
pandas.DataFrame(
{
"run_id": ["grouped-test"] * 6,
"start_time": [datetime.datetime(1998, 9, 4, 7, 30, 1)] * 6,
"end_time": [datetime.datetime(1998, 9, 4, 7, 31, 42)] * 6,
"source_table_name": [
"bq-public.source_dataset.test_source",
"bq-public.source_dataset.test_source",
_NAN,
_NAN,
"bq-public.source_dataset.test_source",
"bq-public.source_dataset.test_source",
],
"source_column_name": [None] * 6,
"target_table_name": [
"bq-public.target_dataset.test_target",
"bq-public.target_dataset.test_target",
"bq-public.target_dataset.test_target",
"bq-public.target_dataset.test_target",
_NAN,
_NAN,
],
"target_column_name": [None] * 6,
"validation_type": ["GroupedColumn"] * 6,
"aggregation_type": ["count"] * 6,
"validation_name": ["count"] * 6,
"source_agg_value": ["2", "4", _NAN, _NAN, "6", "8"],
"target_agg_value": ["1", "3", "5", "7", _NAN, _NAN],
"group_by_columns": [
'{"grp_a": "a", "grp_i": "0"}',
'{"grp_a": "a", "grp_i": "1"}',
'{"grp_a": "b", "grp_i": "0"}',
'{"grp_a": "b", "grp_i": "1"}',
'{"grp_a": "c", "grp_i": "0"}',
'{"grp_a": "c", "grp_i": "1"}',
],
"difference": [-1.0, -1.0, _NAN, _NAN, _NAN, _NAN],
"pct_difference": [-50.0, -25.0, _NAN, _NAN, _NAN, _NAN],
"pct_threshold": [25.0, 25.0, _NAN, _NAN, _NAN, _NAN],
"status": ["fail", "success", _NAN, _NAN, _NAN, _NAN],
"labels": [[("name", "group_label")]] * 6,
}
),
),
),
)
def test_generate_report_with_group_by(
module_under_test, source_df, target_df, join_on_fields, run_metadata, expected
):
pandas_client = ibis.backends.pandas.connect(
{"test_source": source_df, "test_target": target_df}
)
report = module_under_test.generate_report(
pandas_client,
run_metadata,
join_on_fields=join_on_fields,
source=pandas_client.table("test_source"),
target=pandas_client.table("test_target"),
)
# Sort columns by name to order in the comparison.
# https://stackoverflow.com/a/11067072/101923
# Sort rows by name to order in the comparison.
report = (
report.sort_values("validation_name")
.sort_values("group_by_columns")
.reset_index(drop=True)
.reindex(sorted(report.columns), axis=1)
)
expected = (
expected.sort_values("validation_name")
.sort_values("group_by_columns")
.reset_index(drop=True)
.reindex(sorted(expected.columns), axis=1)
)
pandas.testing.assert_frame_equal(report, expected)
| 42.586572
| 88
| 0.46569
| 2,288
| 24,104
| 4.630245
| 0.097028
| 0.030206
| 0.056636
| 0.067963
| 0.815556
| 0.77374
| 0.747782
| 0.723523
| 0.6936
| 0.67491
| 0
| 0.050668
| 0.400639
| 24,104
| 565
| 89
| 42.661947
| 0.682633
| 0.04074
| 0
| 0.622137
| 0
| 0
| 0.217542
| 0.052989
| 0
| 0
| 0
| 0.00177
| 0.005725
| 1
| 0.009542
| false
| 0
| 0.013359
| 0
| 0.024809
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7c8c1f582bd42c8babc1b0a6d621a87e41f546f0
| 198
|
py
|
Python
|
data_hub_call/requestInfoList.py
|
AnnAnnFryingPan/data_hub_call
|
907a481bb2adfff86d311bdf5a4fa352fd7e90be
|
[
"MIT"
] | null | null | null |
data_hub_call/requestInfoList.py
|
AnnAnnFryingPan/data_hub_call
|
907a481bb2adfff86d311bdf5a4fa352fd7e90be
|
[
"MIT"
] | null | null | null |
data_hub_call/requestInfoList.py
|
AnnAnnFryingPan/data_hub_call
|
907a481bb2adfff86d311bdf5a4fa352fd7e90be
|
[
"MIT"
] | null | null | null |
class RequestInfoList(object):
"""A data stream from any platform/hub:
"""
def __init__(self):
self.requests = []
def __len__(self):
return len(self.requests)
| 13.2
| 43
| 0.590909
| 22
| 198
| 4.954545
| 0.727273
| 0.220183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287879
| 198
| 14
| 44
| 14.142857
| 0.77305
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7cb297f0bb1a1d529f316b10c30f72c94508768b
| 2,880
|
py
|
Python
|
tests/features/steps/bot.py
|
fbenkstein/gerritbot-rs
|
eaf0d1ec81c4998c40469026b218e5018507f63a
|
[
"Apache-2.0"
] | 15
|
2017-05-17T18:19:00.000Z
|
2021-04-30T15:09:59.000Z
|
tests/features/steps/bot.py
|
fbenkstein/gerritbot-rs
|
eaf0d1ec81c4998c40469026b218e5018507f63a
|
[
"Apache-2.0"
] | 48
|
2017-05-18T00:36:25.000Z
|
2021-02-23T08:31:40.000Z
|
tests/features/steps/bot.py
|
fbenkstein/gerritbot-rs
|
eaf0d1ec81c4998c40469026b218e5018507f63a
|
[
"Apache-2.0"
] | 9
|
2017-06-14T13:43:13.000Z
|
2021-02-19T10:11:57.000Z
|
import itertools
from hamcrest import *
@when("we check for messages by the bot")
def step_impl(context):
context.bot.get_messages()
@then("there are no messages")
def step_impl(context):
assert_that(context.bot.current_messages, empty())
@then('there is a message for {person} which includes the text "{text}"')
def step_impl(context, person, text):
context.execute_steps(
f'''
then there is a message for {person} which includes the following text:
"""
{text}
"""
'''
)
@then("there is a message for {person} which includes the following text")
def step_impl(context, person):
text = context.text
person = context.accounts.get_person(person)
messages_for_person = context.bot.get_messages_for_person(person)
item_matcher = has_entry("text", contains_string(text))
assert_that(messages_for_person, has_item(item_matcher))
context.last_matched_message = next(
(m for m in messages_for_person if item_matcher.matches(m))
)
@then("there is a message for {person} with the following text")
def step_impl(context, person):
text = context.text.format(context=context)
person = context.accounts.get_person(person)
messages_for_person = context.bot.get_messages_for_person(person)
item_matcher = has_entry("text", equal_to(text))
assert_that(messages_for_person, has_item(item_matcher))
context.last_matched_message = next(
(m for m in messages_for_person if item_matcher.matches(m))
)
@then('there is no message for {person} which includes the text "{text}"')
def step_impl(context, person, text):
context.execute_steps(
f'''
then there is no message for {person} which includes the following text:
"""
{text}
"""
'''
)
@then("there is no message for {person} which includes the following text")
def step_impl(context, person):
text = context.text
person = context.accounts.get_person(person)
messages_for_person = context.bot.get_messages_for_person(person)
item_matcher = has_entry("text", contains_string(text))
assert_that(messages_for_person, is_not(has_item(item_matcher)))
@then('this message includes the text "{text}"')
def step_impl(context, text):
assert_that(context.last_matched_message, has_entry("text", contains_string(text)))
@then('this message does not include the text "{text}"')
def step_impl(context, text):
assert_that(
context.last_matched_message, has_entry("text", is_not(contains_string(text)))
)
@step("{sender} sends the {command} command to the bot")
def step_impl(context, sender, command):
if sender == "everybody":
for person in context.accounts.all_persons():
context.bot.send_message(person, command)
else:
sender = context.accounts.get_person(sender)
context.bot.send_message(sender, command)
| 30.967742
| 87
| 0.710069
| 400
| 2,880
| 4.905
| 0.165
| 0.087156
| 0.095311
| 0.091743
| 0.751784
| 0.751784
| 0.718145
| 0.70948
| 0.705403
| 0.705403
| 0
| 0
| 0.178125
| 2,880
| 92
| 88
| 31.304348
| 0.828897
| 0
| 0
| 0.521127
| 0
| 0
| 0.259375
| 0
| 0
| 0
| 0
| 0
| 0.084507
| 1
| 0.140845
| false
| 0
| 0.028169
| 0
| 0.169014
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7cbd84fa1dc7a116d2a631e93ab37a6c64914fa3
| 121
|
py
|
Python
|
posix_checkapi/TRACES/POT/ut_shims_testchunklib_smallchunk_bigmsg.py
|
JustinCappos/checkapi
|
2508c414869eda3479e1384b1bea65ec1e749d3b
|
[
"Apache-2.0"
] | null | null | null |
posix_checkapi/TRACES/POT/ut_shims_testchunklib_smallchunk_bigmsg.py
|
JustinCappos/checkapi
|
2508c414869eda3479e1384b1bea65ec1e749d3b
|
[
"Apache-2.0"
] | null | null | null |
posix_checkapi/TRACES/POT/ut_shims_testchunklib_smallchunk_bigmsg.py
|
JustinCappos/checkapi
|
2508c414869eda3479e1384b1bea65ec1e749d3b
|
[
"Apache-2.0"
] | null | null | null |
#pragma out
import dylink_portability
dylink_portability.run_unrestricted_repy_code("testchunklib_smallchunk_bigmsg.py")
| 30.25
| 82
| 0.900826
| 15
| 121
| 6.8
| 0.866667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041322
| 121
| 3
| 83
| 40.333333
| 0.87931
| 0.082645
| 0
| 0
| 0
| 0
| 0.3
| 0.3
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7cf12b8c330d26c0881aacaddb0cce3f166ff92d
| 150
|
py
|
Python
|
main.py
|
NILL2021/floatey
|
adcd744e44cb3462f666f385a0fcfc977ddbf6f0
|
[
"MIT"
] | 4
|
2021-07-12T13:31:12.000Z
|
2021-07-27T08:27:31.000Z
|
main.py
|
NILL2021/floatey
|
adcd744e44cb3462f666f385a0fcfc977ddbf6f0
|
[
"MIT"
] | null | null | null |
main.py
|
NILL2021/floatey
|
adcd744e44cb3462f666f385a0fcfc977ddbf6f0
|
[
"MIT"
] | null | null | null |
from __future__ import division
def im_too_lazy_to_do_this_rn()
print("Errrrrrrrrrrrrrrrrrrrrrrrrrrrrrr im too lazy")
im_too_lazy_to_do_this_rn()
| 21.428571
| 55
| 0.846667
| 24
| 150
| 4.625
| 0.583333
| 0.135135
| 0.243243
| 0.198198
| 0.342342
| 0.342342
| 0.342342
| 0
| 0
| 0
| 0
| 0
| 0.1
| 150
| 6
| 56
| 25
| 0.822222
| 0
| 0
| 0
| 0
| 0
| 0.293333
| 0.213333
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.25
| null | null | 0.25
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6b207b6e1c6efaee984544d2c164361feaa58d4b
| 51
|
py
|
Python
|
eutl_scraper/__init__.py
|
jabrell/eutl_scraper
|
ba1a9717ce3ea506bed8869af195b30b139a9980
|
[
"MIT"
] | null | null | null |
eutl_scraper/__init__.py
|
jabrell/eutl_scraper
|
ba1a9717ce3ea506bed8869af195b30b139a9980
|
[
"MIT"
] | 2
|
2022-02-11T19:01:25.000Z
|
2022-02-17T13:03:00.000Z
|
eutl_scraper/__init__.py
|
jabrell/eutl_scraper
|
ba1a9717ce3ea506bed8869af195b30b139a9980
|
[
"MIT"
] | 1
|
2021-12-05T19:06:38.000Z
|
2021-12-05T19:06:38.000Z
|
from .transactions import *
from .mappings import *
| 25.5
| 27
| 0.784314
| 6
| 51
| 6.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 51
| 2
| 28
| 25.5
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6b41bffb5fd0303c283514e124b1bc27f34067bd
| 140
|
py
|
Python
|
metabench/common/constraint/constraint.py
|
ComeBertrand/metabench
|
e5eaa32b94239b8fa475eda940b8086eec178cfe
|
[
"MIT"
] | null | null | null |
metabench/common/constraint/constraint.py
|
ComeBertrand/metabench
|
e5eaa32b94239b8fa475eda940b8086eec178cfe
|
[
"MIT"
] | 15
|
2018-03-07T21:47:56.000Z
|
2018-05-12T08:45:20.000Z
|
metabench/common/constraint/constraint.py
|
ComeBertrand/metabench
|
e5eaa32b94239b8fa475eda940b8086eec178cfe
|
[
"MIT"
] | null | null | null |
"""
File: constraint.py
Author: Come Bertrand
Email: bertrand.cosme@gmail.com
Github: https://github.com/ComeBertrand
Description: TODO
"""
| 17.5
| 39
| 0.764286
| 18
| 140
| 5.944444
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092857
| 140
| 7
| 40
| 20
| 0.84252
| 0.935714
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.142857
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
860ac4ae7c9b45f242d442d089cf5817198b03ed
| 2,298
|
py
|
Python
|
src/HartreeParticleDSL/backends/Cabana_backend/Cabana_IO_Mixin.py
|
stfc/HartreeParticleDSL
|
17990f1a85c9cbec3c4dfa0923e2c44cad6f381c
|
[
"MIT"
] | null | null | null |
src/HartreeParticleDSL/backends/Cabana_backend/Cabana_IO_Mixin.py
|
stfc/HartreeParticleDSL
|
17990f1a85c9cbec3c4dfa0923e2c44cad6f381c
|
[
"MIT"
] | 47
|
2021-09-16T10:28:05.000Z
|
2022-03-15T14:24:33.000Z
|
src/HartreeParticleDSL/backends/Cabana_backend/Cabana_IO_Mixin.py
|
stfc/HartreeParticleDSL
|
17990f1a85c9cbec3c4dfa0923e2c44cad6f381c
|
[
"MIT"
] | 1
|
2021-09-27T15:20:01.000Z
|
2021-09-27T15:20:01.000Z
|
from abc import ABCMeta, abstractmethod
class Cabana_IO_Mixin(metaclass=ABCMeta):
def gen_code_cabana(self, part_type):
'''
Generates and returns the Cabana code required for this IO module.
:raises NotImplementedError: Abstract method that must be
overriden by children
:returns: The code Cabana code required for this IO module.
:rtype: str
'''
raise NotImplementedError(f"{self.__class__.__name__} does not "
"implement required function "
"gen_code_cabana")
def call_input_cabana(self, part_count, filename, current_indent=4):
'''
Returns the call required to use this IO module for input.
:raises NotImplementedError: Abstract method that must be
overriden by children
:returns: The code required to use this IO module for input.
:rtype: str
'''
raise NotImplementedError(f"{self.__class__.__name__} does not "
"implement required function "
"call_input_cabana")
def call_output_cabana(self, part_count, filename):
'''
Returns the call required to use this IO module for output.
:raises NotImplementedError: Abstract method that must be
overriden by children
:returns: The code required to use this IO module for output.
:rtype: str
'''
raise NotImplementedError(f"{self.__class__.__name__} does not "
"implement required function "
"call_output_cabana")
def get_includes_cabana(self):
'''
Returns the includes required to use this IO module for Cabana.
:raises NotImplementedError: Abstract method that must be
overriden by children
:returns: The includes for this IO module.
:rtype: List of str
'''
raise NotImplementedError(f"{self.__class__.__name__} does not "
"implement required function "
"get_includes_cabana")
| 38.3
| 74
| 0.563534
| 231
| 2,298
| 5.372294
| 0.246753
| 0.064464
| 0.077357
| 0.068493
| 0.780822
| 0.721193
| 0.721193
| 0.645447
| 0.62772
| 0.62772
| 0
| 0.000703
| 0.380766
| 2,298
| 59
| 75
| 38.949153
| 0.871398
| 0.424282
| 0
| 0.444444
| 1
| 0
| 0.291289
| 0.090744
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.055556
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8625b1172e8d85c54132a3809a9b422e5dc5637f
| 119
|
py
|
Python
|
src/code_beatrix/algorithm/__init__.py
|
sdpython/code_beatrix
|
421101fb7460b3c23a5955e73395e6793d94e21a
|
[
"MIT"
] | 1
|
2017-12-01T12:43:53.000Z
|
2017-12-01T12:43:53.000Z
|
src/code_beatrix/algorithm/__init__.py
|
sdpython/code_beatrix
|
421101fb7460b3c23a5955e73395e6793d94e21a
|
[
"MIT"
] | 8
|
2018-01-07T14:41:25.000Z
|
2020-02-01T18:29:51.000Z
|
src/code_beatrix/algorithm/__init__.py
|
sdpython/code_beatrix
|
421101fb7460b3c23a5955e73395e6793d94e21a
|
[
"MIT"
] | 1
|
2019-10-27T20:40:04.000Z
|
2019-10-27T20:40:04.000Z
|
"""
@file
@brief shortcuts to algorithm
"""
from .tsp import voyageur_commerce_simple, distance_circuit, plot_circuit
| 17
| 73
| 0.789916
| 15
| 119
| 6
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 119
| 6
| 74
| 19.833333
| 0.857143
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8678fcc920f0b008783982339a27a09c2714b598
| 2,775
|
py
|
Python
|
src/leetcode_378_kth_smallest_element_in_a_sorted_matrix.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_378_kth_smallest_element_in_a_sorted_matrix.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_378_kth_smallest_element_in_a_sorted_matrix.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
# @l2g 378 python3
# [378] Kth Smallest Element in a Sorted Matrix
# Difficulty: Medium
# https://leetcode.com/problems/kth-smallest-element-in-a-sorted-matrix
#
# Given an n x n matrix where each of the rows and columns are sorted in ascending order,
# return the kth smallest element in the matrix.
# Note that it is the kth smallest element in the sorted order, not the kth distinct element.
#
# Example 1:
#
# Input: matrix = [[1,5,9],[10,11,13],[12,13,15]], k = 8
# Output: 13
# Explanation: The elements in the matrix are [1,5,9,10,11,12,13,13,15],
# and the 8th smallest number is 13
#
# Example 2:
#
# Input: matrix = [[-5]], k = 1
# Output: -5
#
#
# Constraints:
#
# n == matrix.length
# n == matrix[i].length
# 1 <= n <= 300
# -10^9 <= matrix[i][j] <= 10^9
# All the rows and columns of matrix are guaranteed to be sorted in non-decreasing order.
# 1 <= k <= n^2
#
#
# @l2g 378 python3
# [378] Kth Smallest Element in a Sorted Matrix
# Difficulty: Medium
# https://leetcode.com/problems/kth-smallest-element-in-a-sorted-matrix
#
# Given an n x n matrix where each of the rows and columns are sorted in ascending order,
# return the kth smallest element in the matrix.
# Note that it is the kth smallest element in the sorted order, not the kth distinct element.
#
# Example 1:
#
# Input: matrix = [[1,5,9],[10,11,13],[12,13,15]], k = 8
# Output: 13
# Explanation: The elements in the matrix are [1,5,9,10,11,12,13,13,15],
# and the 8th smallest number is 13
#
# Example 2:
#
# Input: matrix = [[-5]], k = 1
# Output: -5
#
#
# Constraints:
#
# n == matrix.length
# n == matrix[i].length
# 1 <= n <= 300
# -10^9 <= matrix[i][j] <= 10^9
# All the rows and columns of matrix are guaranteed to be sorted in non-decreasing order.
# 1 <= k <= n^2
#
#
import heapq
from typing import List
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
min_heap = [(row[0], 0, i) for i, row in enumerate(matrix)]
heapq.heapify(min_heap)
for _ in range(k):
elem, col, row = heapq.heappop(min_heap)
if col + 1 < len(matrix):
heapq.heappush(min_heap, (matrix[row][col + 1], col + 1, row))
return elem
# low, high = matrix[0][0], matrix[-1][-1]
# while low < high:
# mid = low + (high - low)//2
# if sum([bisect.bisect(row,mid) for row in matrix]) >= k:
# high = mid
# else:
# low = mid + 1
# return low
# n = len(matrix)
# flat = []
# for i in range(n):
# flat.extend(matrix[i])
# flat.sort()
# return flat[k-1]
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_378.py")])
| 25.694444
| 93
| 0.602523
| 441
| 2,775
| 3.759637
| 0.249433
| 0.053076
| 0.086852
| 0.096502
| 0.705669
| 0.705669
| 0.705669
| 0.705669
| 0.705669
| 0.705669
| 0
| 0.065929
| 0.251171
| 2,775
| 107
| 94
| 25.934579
| 0.731954
| 0.699099
| 0
| 0
| 0
| 0
| 0.031746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.266667
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8689430bb8408f81fce5899669290d85a61b8874
| 73,246
|
py
|
Python
|
thermo/critical.py
|
tedhyu/thermo
|
1966c7cba5a603984b49f22c97ff00a144d90812
|
[
"MIT"
] | 1
|
2021-03-05T23:39:47.000Z
|
2021-03-05T23:39:47.000Z
|
thermo/critical.py
|
tedhyu/thermo
|
1966c7cba5a603984b49f22c97ff00a144d90812
|
[
"MIT"
] | 1
|
2021-12-17T21:28:17.000Z
|
2021-12-17T21:28:17.000Z
|
thermo/critical.py
|
tedhyu/thermo
|
1966c7cba5a603984b49f22c97ff00a144d90812
|
[
"MIT"
] | 1
|
2022-01-18T16:14:59.000Z
|
2022-01-18T16:14:59.000Z
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
__all__ = ['Tc', 'Pc', 'Vc', 'Zc', 'Mersmann_Kind_predictor', 'third_property', 'critical_surface',
'Ihmels', 'Meissner', 'Grigoras', 'Li',
'Chueh_Prausnitz_Tc', 'Grieves_Thodos', 'modified_Wilson_Tc',
'Tc_mixture', 'Pc_mixture', 'Chueh_Prausnitz_Vc',
'modified_Wilson_Vc', 'Vc_mixture']
__all__.extend(['Tc_methods', 'Pc_methods', 'Vc_methods', 'Zc_methods',
'critical_surface_methods', '_crit_IUPAC', '_crit_Matthews',
'_crit_CRC', '_crit_PSRKR4', '_crit_PassutDanner', '_crit_Yaws'])
import os
import numpy as np
import pandas as pd
from thermo.utils import R, N_A
from thermo.utils import log
from thermo.utils import mixing_simple, none_and_length_check
folder = os.path.join(os.path.dirname(__file__), 'Critical Properties')
### Read the various data files
# IUPAC Organic data series
# TODO: 12E of this data http://pubsdc3.acs.org/doi/10.1021/acs.jced.5b00571
_crit_IUPAC = pd.read_csv(os.path.join(folder, 'IUPACOrganicCriticalProps.tsv'),
sep='\t', index_col=0)
_crit_Matthews = pd.read_csv(os.path.join(folder,
'Mathews1972InorganicCriticalProps.tsv'), sep='\t', index_col=0)
# CRC Handbook from TRC Organic data section (only in 2015)
# No Inorganic table was taken, although it is already present;
# data almost all from IUPAC
_crit_CRC = pd.read_csv(os.path.join(folder,
'CRCCriticalOrganics.tsv'), sep='\t', index_col=0)
_crit_CRC['Zc'] = pd.Series(_crit_CRC['Pc']*_crit_CRC['Vc']/_crit_CRC['Tc']/R,
index=_crit_CRC.index)
_crit_PSRKR4 = pd.read_csv(os.path.join(folder,
'Appendix to PSRK Revision 4.tsv'), sep='\t', index_col=0)
_crit_PSRKR4['Zc'] = pd.Series(_crit_PSRKR4['Pc']*_crit_PSRKR4['Vc']/_crit_PSRKR4['Tc']/R,
index=_crit_PSRKR4.index)
_crit_PassutDanner = pd.read_csv(os.path.join(folder, 'PassutDanner1973.tsv'),
sep='\t', index_col=0)
_crit_Yaws = pd.read_csv(os.path.join(folder, 'Yaws Collection.tsv'),
sep='\t', index_col=0)
_crit_Yaws['Zc'] = pd.Series(_crit_Yaws['Pc']*_crit_Yaws['Vc']/_crit_Yaws['Tc']/R,
index=_crit_Yaws.index)
### Strings defining each method
IUPAC = 'IUPAC'
MATTHEWS = 'MATTHEWS'
CRC = 'CRC'
PSRK = 'PSRK'
PD = 'PD'
YAWS = 'YAWS'
SURF = 'SURF'
NONE = 'NONE'
Tc_methods = [IUPAC, MATTHEWS, CRC, PSRK, PD, YAWS, SURF]
def Tc(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[SURF]):
r'''This function handles the retrieval of a chemical's critical
temperature. Lookup is based on CASRNs. Will automatically select a data
source to use if no Method is provided; returns None if the data is not
available.
Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for
inorganic chemicals. Function has data for approximately 1000 chemicals.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
Tc : float
Critical temperature, [K]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain Tc with the given inputs
Other Parameters
----------------
Method : string, optional
The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS',
'CRC', 'PSRK', 'PD', 'YAWS', and 'SURF'. All valid values are also held
in the list `Tc_methods`.
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
Tc for the desired chemical, and will return methods instead of Tc
IgnoreMethods : list, optional
A list of methods to ignore in obtaining the full list of methods,
useful for for performance reasons and ignoring inaccurate methods
Notes
-----
A total of seven sources are available for this function. They are:
* 'IUPAC Organic Critical Properties', a series of critically evaluated
experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,
[5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.
* 'Matthews Inorganic Critical Properties', a series of critically
evaluated data for inorganic compounds in [13]_.
* 'CRC Organic Critical Properties', a compillation of critically
evaluated data by the TRC as published in [14]_.
* 'PSRK Revision 4 Appendix', a compillation of experimental and
estimated data published in [15]_.
* 'Passut Danner 1973 Critical Properties', an older compillation of
data published in [16]_
* 'Yaws Critical Properties', a large compillation of data from a
variety of sources; no data points are sourced in the work of [17]_.
* Critical Surface', an estimation method using a
simple quadratic method for estimating Tc from Pc and Vc. This is
ignored and not returned as a method by default, as no compounds
have values of Pc and Vc but not Tc currently.
Examples
--------
>>> Tc(CASRN='64-17-5')
514.0
References
----------
.. [1] Ambrose, Douglas, and Colin L. Young. "Vapor-Liquid Critical
Properties of Elements and Compounds. 1. An Introductory Survey."
Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):
154-154. doi:10.1021/je950378q.
.. [2] Ambrose, Douglas, and Constantine Tsonopoulos. "Vapor-Liquid
Critical Properties of Elements and Compounds. 2. Normal Alkanes."
Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.
doi:10.1021/je00019a001.
.. [3] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 3. Aromatic
Hydrocarbons." Journal of Chemical & Engineering Data 40, no. 3
(May 1, 1995): 547-58. doi:10.1021/je00019a002.
.. [4] Gude, Michael, and Amyn S. Teja. "Vapor-Liquid Critical Properties
of Elements and Compounds. 4. Aliphatic Alkanols." Journal of Chemical
& Engineering Data 40, no. 5 (September 1, 1995): 1025-36.
doi:10.1021/je00021a001.
.. [5] Daubert, Thomas E. "Vapor-Liquid Critical Properties of Elements
and Compounds. 5. Branched Alkanes and Cycloalkanes." Journal of
Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.
doi:10.1021/je9501548.
.. [6] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic
Hydrocarbons." Journal of Chemical & Engineering Data 41, no. 4
(January 1, 1996): 645-56. doi:10.1021/je9501999.
.. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.
"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen
Compounds Other Than Alkanols and Cycloalkanols." Journal of Chemical &
Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.
.. [8] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 8. Organic Sulfur,
Silicon, and Tin Compounds (C + H + S, Si, and Sn)." Journal of Chemical
& Engineering Data 46, no. 3 (May 1, 2001): 480-85.
doi:10.1021/je000210r.
.. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,
and Constantine Tsonopoulos. "Vapor-Liquid Critical Properties of
Elements and Compounds. 9. Organic Compounds Containing Nitrogen."
Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):
305-14. doi:10.1021/je050221q.
.. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,
Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.
"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic
Compounds Containing Halogens." Journal of Chemical & Engineering Data
52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.
.. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.
"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic
Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;
N + O; and O + S, + Si." Journal of Chemical & Engineering Data 54,
no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.
.. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David
W. Morton, and Kenneth N. Marsh. "Vapor-Liquid Critical Properties of
Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and
Non-Hydrocarbons." Journal of Chemical & Engineering Data, October 5,
2015, 151005081500002. doi:10.1021/acs.jced.5b00571.
.. [13] Mathews, Joseph F. "Critical Constants of Inorganic Substances."
Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.
doi:10.1021/cr60275a004.
.. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.
.. [15] Horstmann, Sven, Anna Jabłoniec, Jörg Krafczyk, Kai Fischer, and
Jürgen Gmehling. "PSRK Group Contribution Equation of State:
Comprehensive Revision and Extension IV, Including Critical Constants
and Α-Function Parameters for 1000 Components." Fluid Phase Equilibria
227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.
.. [16] Passut, Charles A., and Ronald P. Danner. "Acentric Factor. A
Valuable Correlating Parameter for the Properties of Hydrocarbons."
Industrial & Engineering Chemistry Process Design and Development 12,
no. 3 (July 1, 1973): 365–68. doi:10.1021/i260047a026.
.. [17] Yaws, Carl L. Thermophysical Properties of Chemicals and
Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional
Publishing, 2014.
'''
def list_methods():
methods = []
if CASRN in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[CASRN, 'Tc']):
methods.append(IUPAC)
if CASRN in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[CASRN, 'Tc']):
methods.append(MATTHEWS)
if CASRN in _crit_CRC.index and not np.isnan(_crit_CRC.at[CASRN, 'Tc']):
methods.append(CRC)
if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, 'Tc']):
methods.append(PSRK)
if CASRN in _crit_PassutDanner.index and not np.isnan(_crit_PassutDanner.at[CASRN, 'Tc']):
methods.append(PD)
if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, 'Tc']):
methods.append(YAWS)
if CASRN:
methods.append(SURF)
if IgnoreMethods:
for Method in IgnoreMethods:
if Method in methods:
methods.remove(Method)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
if Method == IUPAC:
_Tc = float(_crit_IUPAC.at[CASRN, 'Tc'])
elif Method == MATTHEWS:
_Tc = float(_crit_Matthews.at[CASRN, 'Tc'])
elif Method == PSRK:
_Tc = float(_crit_PSRKR4.at[CASRN, 'Tc'])
elif Method == PD:
_Tc = float(_crit_PassutDanner.at[CASRN, 'Tc'])
elif Method == CRC:
_Tc = float(_crit_CRC.at[CASRN, 'Tc'])
elif Method == YAWS:
_Tc = float(_crit_Yaws.at[CASRN, 'Tc'])
elif Method == SURF:
_Tc = third_property(CASRN=CASRN, T=True)
elif Method == NONE:
_Tc = None
else:
raise Exception('Failure in in function')
return _Tc
Pc_methods = [IUPAC, MATTHEWS, CRC, PSRK, PD, YAWS, SURF]
def Pc(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[SURF]):
r'''This function handles the retrieval of a chemical's critical
pressure. Lookup is based on CASRNs. Will automatically select a data
source to use if no Method is provided; returns None if the data is not
available.
Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for
inorganic chemicals. Function has data for approximately 1000 chemicals.
Examples
--------
>>> Pc(CASRN='64-17-5')
6137000.0
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
Pc : float
Critical pressure, [Pa]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain Pc with the given inputs
Other Parameters
----------------
Method : string, optional
The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS',
'CRC', 'PSRK', 'PD', 'YAWS', and 'SURF'. All valid values are also held
in the list `Pc_methods`.
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
Pc for the desired chemical, and will return methods instead of Pc
IgnoreMethods : list, optional
A list of methods to ignore in obtaining the full list of methods,
useful for for performance reasons and ignoring inaccurate methods
Notes
-----
A total of seven sources are available for this function. They are:
* 'IUPAC', a series of critically evaluated
experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,
[5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.
* 'MATTHEWS', a series of critically
evaluated data for inorganic compounds in [13]_.
* 'CRC', a compillation of critically
evaluated data by the TRC as published in [14]_.
* 'PSRK', a compillation of experimental and
estimated data published in [15]_.
* 'PD', an older compillation of
data published in [16]_
* 'YAWS', a large compillation of data from a
variety of sources; no data points are sourced in the work of [17]_.
* SURF', an estimation method using a
simple quadratic method for estimating Pc from Tc and Vc. This is
ignored and not returned as a method by default.
References
----------
.. [1] Ambrose, Douglas, and Colin L. Young. "Vapor-Liquid Critical
Properties of Elements and Compounds. 1. An Introductory Survey."
Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):
154-154. doi:10.1021/je950378q.
.. [2] Ambrose, Douglas, and Constantine Tsonopoulos. "Vapor-Liquid
Critical Properties of Elements and Compounds. 2. Normal Alkanes."
Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.
doi:10.1021/je00019a001.
.. [3] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 3. Aromatic
Hydrocarbons." Journal of Chemical & Engineering Data 40, no. 3
(May 1, 1995): 547-58. doi:10.1021/je00019a002.
.. [4] Gude, Michael, and Amyn S. Teja. "Vapor-Liquid Critical Properties
of Elements and Compounds. 4. Aliphatic Alkanols." Journal of Chemical
& Engineering Data 40, no. 5 (September 1, 1995): 1025-36.
doi:10.1021/je00021a001.
.. [5] Daubert, Thomas E. "Vapor-Liquid Critical Properties of Elements
and Compounds. 5. Branched Alkanes and Cycloalkanes." Journal of
Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.
doi:10.1021/je9501548.
.. [6] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic
Hydrocarbons." Journal of Chemical & Engineering Data 41, no. 4
(January 1, 1996): 645-56. doi:10.1021/je9501999.
.. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.
"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen
Compounds Other Than Alkanols and Cycloalkanols." Journal of Chemical &
Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.
.. [8] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 8. Organic Sulfur,
Silicon, and Tin Compounds (C + H + S, Si, and Sn)." Journal of Chemical
& Engineering Data 46, no. 3 (May 1, 2001): 480-85.
doi:10.1021/je000210r.
.. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,
and Constantine Tsonopoulos. "Vapor-Liquid Critical Properties of
Elements and Compounds. 9. Organic Compounds Containing Nitrogen."
Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):
305-14. doi:10.1021/je050221q.
.. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,
Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.
"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic
Compounds Containing Halogens." Journal of Chemical & Engineering Data
52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.
.. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.
"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic
Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;
N + O; and O + S, + Si." Journal of Chemical & Engineering Data 54,
no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.
.. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David
W. Morton, and Kenneth N. Marsh. "Vapor-Liquid Critical Properties of
Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and
Non-Hydrocarbons." Journal of Chemical & Engineering Data, October 5,
2015, 151005081500002. doi:10.1021/acs.jced.5b00571.
.. [13] Mathews, Joseph F. "Critical Constants of Inorganic Substances."
Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.
doi:10.1021/cr60275a004.
.. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.
.. [15] Horstmann, Sven, Anna Jabłoniec, Jörg Krafczyk, Kai Fischer, and
Jürgen Gmehling. "PSRK Group Contribution Equation of State:
Comprehensive Revision and Extension IV, Including Critical Constants
and Α-Function Parameters for 1000 Components." Fluid Phase Equilibria
227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.
.. [16] Passut, Charles A., and Ronald P. Danner. "Acentric Factor. A
Valuable Correlating Parameter for the Properties of Hydrocarbons."
Industrial & Engineering Chemistry Process Design and Development 12,
no. 3 (July 1, 1973): 365–68. doi:10.1021/i260047a026.
.. [17] Yaws, Carl L. Thermophysical Properties of Chemicals and
Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional
Publishing, 2014.
'''
def list_methods():
methods = []
if CASRN in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[CASRN, 'Pc']):
methods.append(IUPAC)
if CASRN in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[CASRN, 'Pc']):
methods.append(MATTHEWS)
if CASRN in _crit_CRC.index and not np.isnan(_crit_CRC.at[CASRN, 'Pc']):
methods.append(CRC)
if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, 'Pc']):
methods.append(PSRK)
if CASRN in _crit_PassutDanner.index and not np.isnan(_crit_PassutDanner.at[CASRN, 'Pc']):
methods.append(PD)
if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, 'Pc']):
methods.append(YAWS)
if CASRN:
methods.append(SURF)
if IgnoreMethods:
for Method in IgnoreMethods:
if Method in methods:
methods.remove(Method)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
if Method == IUPAC:
_Pc = float(_crit_IUPAC.at[CASRN, 'Pc'])
elif Method == MATTHEWS:
_Pc = float(_crit_Matthews.at[CASRN, 'Pc'])
elif Method == CRC:
_Pc = float(_crit_CRC.at[CASRN, 'Pc'])
elif Method == PSRK:
_Pc = float(_crit_PSRKR4.at[CASRN, 'Pc'])
elif Method == PD:
_Pc = float(_crit_PassutDanner.at[CASRN, 'Pc'])
elif Method == YAWS:
_Pc = float(_crit_Yaws.at[CASRN, 'Pc'])
elif Method == SURF:
_Pc = third_property(CASRN=CASRN, P=True)
elif Method == NONE:
return None
else:
raise Exception('Failure in in function')
return _Pc
Vc_methods = [IUPAC, MATTHEWS, CRC, PSRK, YAWS, SURF]
def Vc(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[SURF]):
r'''This function handles the retrieval of a chemical's critical
volume. Lookup is based on CASRNs. Will automatically select a data
source to use if no Method is provided; returns None if the data is not
available.
Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for
inorganic chemicals. Function has data for approximately 1000 chemicals.
Examples
--------
>>> Vc(CASRN='64-17-5')
0.000168
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
Vc : float
Critical volume, [m^3/mol]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain Vc with the given inputs
Other Parameters
----------------
Method : string, optional
The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS',
'CRC', 'PSRK', 'YAWS', and 'SURF'. All valid values are also held
in the list `Vc_methods`.
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
Vc for the desired chemical, and will return methods instead of Vc
IgnoreMethods : list, optional
A list of methods to ignore in obtaining the full list of methods,
useful for for performance reasons and ignoring inaccurate methods
Notes
-----
A total of six sources are available for this function. They are:
* 'IUPAC', a series of critically evaluated
experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,
[5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.
* 'MATTHEWS', a series of critically
evaluated data for inorganic compounds in [13]_.
* 'CRC', a compillation of critically
evaluated data by the TRC as published in [14]_.
* 'PSRK', a compillation of experimental and
estimated data published in [15]_.
* 'YAWS', a large compillation of data from a
variety of sources; no data points are sourced in the work of [16]_.
* 'SURF', an estimation method using a
simple quadratic method for estimating Pc from Tc and Vc. This is
ignored and not returned as a method by default
References
----------
.. [1] Ambrose, Douglas, and Colin L. Young. "Vapor-Liquid Critical
Properties of Elements and Compounds. 1. An Introductory Survey."
Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):
154-154. doi:10.1021/je950378q.
.. [2] Ambrose, Douglas, and Constantine Tsonopoulos. "Vapor-Liquid
Critical Properties of Elements and Compounds. 2. Normal Alkanes."
Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.
doi:10.1021/je00019a001.
.. [3] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 3. Aromatic
Hydrocarbons." Journal of Chemical & Engineering Data 40, no. 3
(May 1, 1995): 547-58. doi:10.1021/je00019a002.
.. [4] Gude, Michael, and Amyn S. Teja. "Vapor-Liquid Critical Properties
of Elements and Compounds. 4. Aliphatic Alkanols." Journal of Chemical
& Engineering Data 40, no. 5 (September 1, 1995): 1025-36.
doi:10.1021/je00021a001.
.. [5] Daubert, Thomas E. "Vapor-Liquid Critical Properties of Elements
and Compounds. 5. Branched Alkanes and Cycloalkanes." Journal of
Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.
doi:10.1021/je9501548.
.. [6] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic
Hydrocarbons." Journal of Chemical & Engineering Data 41, no. 4
(January 1, 1996): 645-56. doi:10.1021/je9501999.
.. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.
"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen
Compounds Other Than Alkanols and Cycloalkanols." Journal of Chemical &
Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.
.. [8] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 8. Organic Sulfur,
Silicon, and Tin Compounds (C + H + S, Si, and Sn)." Journal of Chemical
& Engineering Data 46, no. 3 (May 1, 2001): 480-85.
doi:10.1021/je000210r.
.. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,
and Constantine Tsonopoulos. "Vapor-Liquid Critical Properties of
Elements and Compounds. 9. Organic Compounds Containing Nitrogen."
Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):
305-14. doi:10.1021/je050221q.
.. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,
Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.
"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic
Compounds Containing Halogens." Journal of Chemical & Engineering Data
52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.
.. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.
"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic
Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;
N + O; and O + S, + Si." Journal of Chemical & Engineering Data 54,
no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.
.. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David
W. Morton, and Kenneth N. Marsh. "Vapor-Liquid Critical Properties of
Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and
Non-Hydrocarbons." Journal of Chemical & Engineering Data, October 5,
2015, 151005081500002. doi:10.1021/acs.jced.5b00571.
.. [13] Mathews, Joseph F. "Critical Constants of Inorganic Substances."
Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.
doi:10.1021/cr60275a004.
.. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.
.. [15] Horstmann, Sven, Anna Jabłoniec, Jörg Krafczyk, Kai Fischer, and
Jürgen Gmehling. "PSRK Group Contribution Equation of State:
Comprehensive Revision and Extension IV, Including Critical Constants
and Α-Function Parameters for 1000 Components." Fluid Phase Equilibria
227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.
.. [16] Yaws, Carl L. Thermophysical Properties of Chemicals and
Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional
Publishing, 2014.
'''
def list_methods():
methods = []
if CASRN in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[CASRN, 'Vc']):
methods.append(IUPAC)
if CASRN in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[CASRN, 'Vc']):
methods.append(MATTHEWS)
if CASRN in _crit_CRC.index and not np.isnan(_crit_CRC.at[CASRN, 'Vc']):
methods.append(CRC)
if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, 'Vc']):
methods.append(PSRK)
if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, 'Vc']):
methods.append(YAWS)
if CASRN:
methods.append(SURF)
if IgnoreMethods:
for Method in IgnoreMethods:
if Method in methods:
methods.remove(Method)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
if Method == IUPAC:
_Vc = float(_crit_IUPAC.at[CASRN, 'Vc'])
elif Method == PSRK:
_Vc = float(_crit_PSRKR4.at[CASRN, 'Vc'])
elif Method == MATTHEWS:
_Vc = float(_crit_Matthews.at[CASRN, 'Vc'])
elif Method == CRC:
_Vc = float(_crit_CRC.at[CASRN, 'Vc'])
elif Method == YAWS:
_Vc = float(_crit_Yaws.at[CASRN, 'Vc'])
elif Method == SURF:
_Vc = third_property(CASRN=CASRN, V=True)
elif Method == NONE:
return None
else:
raise Exception('Failure in in function')
return _Vc
COMBINED = 'COMBINED'
Zc_methods = [IUPAC, MATTHEWS, CRC, PSRK, YAWS, COMBINED]
def Zc(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[COMBINED]):
r'''This function handles the retrieval of a chemical's critical
compressibility. Lookup is based on CASRNs. Will automatically select a
data source to use if no Method is provided; returns None if the data is
not available.
Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for
inorganic chemicals. Function has data for approximately 1000 chemicals.
Examples
--------
>>> Zc(CASRN='64-17-5')
0.24100000000000002
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
Zc : float
Critical compressibility, [-]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain Vc with the given inputs
Other Parameters
----------------
Method : string, optional
The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS',
'CRC', 'PSRK', 'YAWS', and 'COMBINED'. All valid values are also held
in `Zc_methods`.
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
Zc for the desired chemical, and will return methods instead of Zc
IgnoreMethods : list, optional
A list of methods to ignore in obtaining the full list of methods,
useful for for performance reasons and ignoring inaccurate methods
Notes
-----
A total of five sources are available for this function. They are:
* 'IUPAC', a series of critically evaluated
experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,
[5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.
* 'MATTHEWS', a series of critically
evaluated data for inorganic compounds in [13]_.
* 'CRC', a compillation of critically
evaluated data by the TRC as published in [14]_.
* 'PSRK', a compillation of experimental and
estimated data published in [15]_.
* 'YAWS', a large compillation of data from a
variety of sources; no data points are sourced in the work of [16]_.
References
----------
.. [1] Ambrose, Douglas, and Colin L. Young. "Vapor-Liquid Critical
Properties of Elements and Compounds. 1. An Introductory Survey."
Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):
154-154. doi:10.1021/je950378q.
.. [2] Ambrose, Douglas, and Constantine Tsonopoulos. "Vapor-Liquid
Critical Properties of Elements and Compounds. 2. Normal Alkanes."
Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.
doi:10.1021/je00019a001.
.. [3] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 3. Aromatic
Hydrocarbons." Journal of Chemical & Engineering Data 40, no. 3
(May 1, 1995): 547-58. doi:10.1021/je00019a002.
.. [4] Gude, Michael, and Amyn S. Teja. "Vapor-Liquid Critical Properties
of Elements and Compounds. 4. Aliphatic Alkanols." Journal of Chemical
& Engineering Data 40, no. 5 (September 1, 1995): 1025-36.
doi:10.1021/je00021a001.
.. [5] Daubert, Thomas E. "Vapor-Liquid Critical Properties of Elements
and Compounds. 5. Branched Alkanes and Cycloalkanes." Journal of
Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.
doi:10.1021/je9501548.
.. [6] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic
Hydrocarbons." Journal of Chemical & Engineering Data 41, no. 4
(January 1, 1996): 645-56. doi:10.1021/je9501999.
.. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.
"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen
Compounds Other Than Alkanols and Cycloalkanols." Journal of Chemical &
Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.
.. [8] Tsonopoulos, Constantine, and Douglas Ambrose. "Vapor-Liquid
Critical Properties of Elements and Compounds. 8. Organic Sulfur,
Silicon, and Tin Compounds (C + H + S, Si, and Sn)." Journal of Chemical
& Engineering Data 46, no. 3 (May 1, 2001): 480-85.
doi:10.1021/je000210r.
.. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,
and Constantine Tsonopoulos. "Vapor-Liquid Critical Properties of
Elements and Compounds. 9. Organic Compounds Containing Nitrogen."
Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):
305-14. doi:10.1021/je050221q.
.. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,
Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.
"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic
Compounds Containing Halogens." Journal of Chemical & Engineering Data
52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.
.. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.
"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic
Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;
N + O; and O + S, + Si." Journal of Chemical & Engineering Data 54,
no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.
.. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David
W. Morton, and Kenneth N. Marsh. "Vapor-Liquid Critical Properties of
Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and
Non-Hydrocarbons." Journal of Chemical & Engineering Data, October 5,
2015, 151005081500002. doi:10.1021/acs.jced.5b00571.
.. [13] Mathews, Joseph F. "Critical Constants of Inorganic Substances."
Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.
doi:10.1021/cr60275a004.
.. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.
.. [15] Horstmann, Sven, Anna Jabłoniec, Jörg Krafczyk, Kai Fischer, and
Jürgen Gmehling. "PSRK Group Contribution Equation of State:
Comprehensive Revision and Extension IV, Including Critical Constants
and Α-Function Parameters for 1000 Components." Fluid Phase Equilibria
227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.
.. [16] Yaws, Carl L. Thermophysical Properties of Chemicals and
Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional
Publishing, 2014.
'''
def list_methods():
methods = []
if CASRN in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[CASRN, 'Zc']):
methods.append(IUPAC)
if CASRN in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[CASRN, 'Zc']):
methods.append(MATTHEWS)
if CASRN in _crit_CRC.index and not np.isnan(_crit_CRC.at[CASRN, 'Zc']):
methods.append(CRC)
if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, 'Zc']):
methods.append(PSRK)
if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, 'Zc']):
methods.append(YAWS)
if Tc(CASRN) and Vc(CASRN) and Pc(CASRN):
methods.append(COMBINED)
if IgnoreMethods:
for Method in IgnoreMethods:
if Method in methods:
methods.remove(Method)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
# This is the calculate, given the method section
if Method == IUPAC:
_Zc = float(_crit_IUPAC.at[CASRN, 'Zc'])
elif Method == PSRK:
_Zc = float(_crit_PSRKR4.at[CASRN, 'Zc'])
elif Method == MATTHEWS:
_Zc = float(_crit_Matthews.at[CASRN, 'Zc'])
elif Method == CRC:
_Zc = float(_crit_CRC.at[CASRN, 'Zc'])
elif Method == YAWS:
_Zc = float(_crit_Yaws.at[CASRN, 'Zc'])
elif Method == COMBINED:
_Zc = Vc(CASRN)*Pc(CASRN)/Tc(CASRN)/R
elif Method == NONE:
return None
else:
raise Exception('Failure in in function')
return _Zc
rcovs_Mersmann_Kind = {'C': 0.77, 'Cl': 0.99, 'I': 1.33, 'H': 0.37, 'F': 0.71,
'S': 1.04, 'O': 0.6, 'N': 0.71, 'Si': 1.17, 'Br': 1.14}
rcovs_regressed = {
u'Nb': 0.5139380605234125,
u'Ne': 0.7708216694154189,
u'Al': 1.004994775098707,
u'Re': 1.1164444694484814,
u'Rb': 2.9910506044828837,
u'Rn': 1.9283158156480653,
u'Xe': 1.694221043013319,
u'Ta': 1.1185133195453156,
u'Bi': 1.8436438207262267,
u'Br': 1.3081458724155532,
u'Hf': 0.8829545460486594,
u'Mo': 0.740396259301556,
u'He': 0.9808144122544257,
u'C': 0.6068586007600608,
u'B': 0.7039677272439753,
u'F': 0.5409105884533288,
u'I': 1.7262432419406561,
u'H': 0.33296601702348533,
u'K': 0.7384112258842432,
u'O': 0.5883254088243008,
u'N': 0.5467979701131293,
u'P': 1.0444655158949694,
u'Si': 1.4181434041348049,
u'U': 1.5530287578073485,
u'Sn': 1.3339487990207999,
u'W': 0.8355335838735266,
u'V': 0.6714619384794069,
u'Sb': 0.8840680681215854,
u'Se': 1.5747549515496795,
u'Ge': 1.0730584829731715,
u'Kr': 1.393999829252709,
u'Cl': 1.0957835025011224,
u'S': 1.0364452121761167,
u'Hg': 0.6750818243474633,
u'As': 0.6750687692915264,
u'Ar': 1.2008872952022298,
u'Cs': 3.433699060142929,
u'Zr': 0.9346554283483623}
def Mersmann_Kind_predictor(atoms, coeff=3.645, power=0.5,
covalent_radii=rcovs_Mersmann_Kind):
r'''Predicts the critical molar volume of a chemical based only on its
atomic composition according to [1]_ and [2]_. This is a crude approach,
but provides very reasonable
estimates in practice. Optionally, the `coeff` used and the `power` in the
fraction as well as the atomic contributions can be adjusted; this method
is general and atomic contributions can be regressed to predict other
properties with this routine.
.. math::
\frac{\left(\frac{V_c}{n_a N_A}\right)^{1/3}}{d_a}
= \frac{3.645}{\left(\frac{r_a}{r_H}\right)^{1/2}}
r_a = d_a/2
d_a = 2 \frac{\sum_i (n_i r_i)}{n_a}
In the above equations, :math:`n_i` is the number of atoms of species i in
the molecule, :math:`r_i` is the covalent atomic radius of the atom, and
:math:`n_a` is the total number of atoms in the molecule.
Parameters
----------
atoms : dict
Dictionary of atoms and their counts, [-]
coeff : float, optional
Coefficient used in the relationship, [m^2]
power : float, optional
Power applied to the relative atomic radius, [-]
covalent_radii : dict or indexable, optional
Object which can be indexed to atomic contrinbutions (by symbol), [-]
Returns
-------
Vc : float
Predicted critical volume of the chemical, [m^3/mol]
Notes
-----
Using the :obj:`thermo.elements.periodic_table` covalent radii (from RDKit),
the coefficient and power should be 4.261206523632586 and 0.5597281770786228
respectively for best results.
Examples
--------
Prediction of critical volume of decane:
>>> Mersmann_Kind_predictor({'C': 10, 'H': 22})
0.0005851859052024729
This is compared against the experimental value, 0.000624 (a 6.2% relative
error)
Using custom fitted coefficients we can do a bit better:
>>> from thermo.critical import rcovs_regressed
>>> Mersmann_Kind_predictor({'C': 10, 'H': 22}, coeff=4.261206523632586,
... power=0.5597281770786228, covalent_radii=rcovs_regressed)
0.0005956871011923075
The relative error is only 4.5% now. This is compared to an experimental
uncertainty of 5.6%.
Evaluating 1321 critical volumes in the database, the average relative
error is 5.0%; standard deviation 6.8%; and worst value of 79% relative
error for phosphorus.
References
----------
.. [1] Mersmann, Alfons, and Matthias Kind. "Correlation for the Prediction
of Critical Molar Volume." Industrial & Engineering Chemistry Research,
October 16, 2017. https://doi.org/10.1021/acs.iecr.7b03171.
.. [2] Mersmann, Alfons, and Matthias Kind. "Prediction of Mechanical and
Thermal Properties of Pure Liquids, of Critical Data, and of Vapor
Pressure." Industrial & Engineering Chemistry Research, January 31,
2017. https://doi.org/10.1021/acs.iecr.6b04323.
'''
H_RADIUS_COV = covalent_radii['H']
tot = 0
atom_count = 0
for atom, count in atoms.items():
if atom not in covalent_radii:
raise Exception('Atom %s is not supported by the supplied dictionary' %atom)
tot += count*covalent_radii[atom]
atom_count += count
da = 2.*tot/atom_count
ra = da/2.
da_SI = da*1e-10 # Convert from angstrom to m
return ((coeff/(ra/H_RADIUS_COV)**power)*da_SI)**3*N_A*atom_count
### Critical Property Relationships
def Ihmels(Tc=None, Pc=None, Vc=None):
r'''Most recent, and most recommended method of estimating critical
properties from each other. Two of the three properties are required.
This model uses the "critical surface", a general plot of Tc vs Pc vs Vc.
The model used 421 organic compounds to derive equation.
The general equation is in [1]_:
.. math::
P_c = -0.025 + 2.215 \frac{T_c}{V_c}
Parameters
----------
Tc : float
Critical temperature of fluid (optional) [K]
Pc : float
Critical pressure of fluid (optional) [Pa]
Vc : float
Critical volume of fluid (optional) [m^3/mol]
Returns
-------
Tc, Pc or Vc : float
Critical property of fluid [K], [Pa], or [m^3/mol]
Notes
-----
The prediction of Tc from Pc and Vc is not tested, as this is not necessary
anywhere, but it is implemented.
Internal units are MPa, cm^3/mol, and K. A slight error occurs when
Pa, cm^3/mol and K are used instead, on the order of <0.2%.
Their equation was also compared with 56 inorganic and elements.
Devations of 20% for <200K or >1000K points.
Examples
--------a
Succinic acid [110-15-6]
>>> Ihmels(Tc=851.0, Vc=0.000308)
6095016.233766234
References
----------
.. [1] Ihmels, E. Christian. "The Critical Surface." Journal of Chemical
& Engineering Data 55, no. 9 (September 9, 2010): 3474-80.
doi:10.1021/je100167w.
'''
if Tc and Vc:
Vc = Vc*1E6 # m^3/mol to cm^3/mol
Pc = -0.025+2.215*Tc/Vc
Pc = Pc*1E6 # MPa to Pa
return Pc
elif Tc and Pc:
Pc = Pc/1E6 # Pa to MPa
Vc = 443*Tc/(200*Pc+5)
Vc = Vc/1E6 # cm^3/mol to m^3/mol
return Vc
elif Pc and Vc:
Pc = Pc/1E6 # Pa to MPa
Vc = Vc*1E6 # m^3/mol to cm^3/mol
Tc = 5.0/443*(40*Pc*Vc + Vc)
return Tc
else:
raise Exception('Two of Tc, Pc, and Vc must be provided')
def Meissner(Tc=None, Pc=None, Vc=None):
r'''Old (1942) relationship for estimating critical
properties from each other. Two of the three properties are required.
This model uses the "critical surface", a general plot of Tc vs Pc vs Vc.
The model used 42 organic and inorganic compounds to derive the equation.
The general equation is in [1]_:
.. math::
P_c = \frac{2.08 T_c}{V_c-8}
Parameters
----------
Tc : float, optional
Critical temperature of fluid [K]
Pc : float, optional
Critical pressure of fluid [Pa]
Vc : float, optional
Critical volume of fluid [m^3/mol]
Returns
-------
Tc, Pc or Vc : float
Critical property of fluid [K], [Pa], or [m^3/mol]
Notes
-----
The prediction of Tc from Pc and Vc is not tested, as this is not necessary
anywhere, but it is implemented.
Internal units are atm, cm^3/mol, and K. A slight error occurs when
Pa, cm^3/mol and K are used instead, on the order of <0.2%.
This equation is less accurate than that of Ihmels, but surprisingly close.
The author also proposed means of estimated properties independently.
Examples
--------
Succinic acid [110-15-6]
>>> Meissner(Tc=851.0, Vc=0.000308)
5978445.199999999
References
----------
.. [1] Meissner, H. P., and E. M. Redding. "Prediction of Critical
Constants." Industrial & Engineering Chemistry 34, no. 5
(May 1, 1942): 521-26. doi:10.1021/ie50389a003.
'''
if Tc and Vc:
Vc = Vc*1E6
Pc = 20.8*Tc/(Vc-8)
Pc = 101325*Pc # atm to Pa
return Pc
elif Tc and Pc:
Pc = Pc/101325. # Pa to atm
Vc = 104/5.0*Tc/Pc+8
Vc = Vc/1E6 # cm^3/mol to m^3/mol
return Vc
elif Pc and Vc:
Pc = Pc/101325. # Pa to atm
Vc = Vc*1E6 # m^3/mol to cm^3/mol
Tc = 5./104.0*Pc*(Vc-8)
return Tc
else:
raise Exception('Two of Tc, Pc, and Vc must be provided')
def Grigoras(Tc=None, Pc=None, Vc=None):
r'''Relatively recent (1990) relationship for estimating critical
properties from each other. Two of the three properties are required.
This model uses the "critical surface", a general plot of Tc vs Pc vs Vc.
The model used 137 organic and inorganic compounds to derive the equation.
The general equation is in [1]_:
.. math::
P_c = 2.9 + 20.2 \frac{T_c}{V_c}
Parameters
----------
Tc : float
Critical temperature of fluid (optional) [K]
Pc : float
Critical pressure of fluid (optional) [Pa]
Vc : float
Critical volume of fluid (optional) [m^3/mol]
Returns
-------
Tc, Pc or Vc : float
Critical property of fluid [K], [Pa], or [m^3/mol]
Notes
-----
The prediction of Tc from Pc and Vc is not tested, as this is not necessary
anywhere, but it is implemented.
Internal units are bar, cm^3/mol, and K. A slight error occurs when
Pa, cm^3/mol and K are used instead, on the order of <0.2%.
This equation is less accurate than that of Ihmels, but surprisingly close.
The author also investigated an early QSPR model.
Examples
--------
Succinic acid [110-15-6]
>>> Grigoras(Tc=851.0, Vc=0.000308)
5871233.766233766
References
----------
.. [1] Grigoras, Stelian. "A Structural Approach to Calculate Physical
Properties of Pure Organic Substances: The Critical Temperature,
Critical Volume and Related Properties." Journal of Computational
Chemistry 11, no. 4 (May 1, 1990): 493-510.
doi:10.1002/jcc.540110408
'''
if Tc and Vc:
Vc = Vc*1E6 # m^3/mol to cm^3/mol
Pc = 2.9 + 20.2*Tc/Vc
Pc = Pc*1E5 # bar to Pa
return Pc
elif Tc and Pc:
Pc = Pc/1E5 # Pa to bar
Vc = 202.0*Tc/(10*Pc-29.0)
Vc = Vc/1E6 # cm^3/mol to m^3/mol
return Vc
elif Pc and Vc:
Pc = Pc/1E5 # Pa to bar
Vc = Vc*1E6 # m^3/mol to cm^3/mol
Tc = 1.0/202*(10*Pc-29.0)*Vc
return Tc
else:
raise Exception('Two of Tc, Pc, and Vc must be provided')
IHMELS = 'IHMELS'
MEISSNER = 'MEISSNER'
GRIGORAS = 'GRIGORAS'
critical_surface_methods = [IHMELS, MEISSNER, GRIGORAS]
def critical_surface(Tc=None, Pc=None, Vc=None, AvailableMethods=False,
Method=None):
r'''Function for calculating a critical property of a substance from its
other two critical properties. Calls functions Ihmels, Meissner, and
Grigoras, each of which use a general 'Critical surface' type of equation.
Limited accuracy is expected due to very limited theoretical backing.
Parameters
----------
Tc : float
Critical temperature of fluid (optional) [K]
Pc : float
Critical pressure of fluid (optional) [Pa]
Vc : float
Critical volume of fluid (optional) [m^3/mol]
AvailableMethods : bool
Request available methods for given parameters
Method : string
Request calculation uses the requested method
Returns
-------
Tc, Pc or Vc : float
Critical property of fluid [K], [Pa], or [m^3/mol]
Notes
-----
Examples
--------
Decamethyltetrasiloxane [141-62-8]
>>> critical_surface(Tc=599.4, Pc=1.19E6, Method='IHMELS')
0.0010927333333333334
'''
def list_methods():
methods = []
if (Tc and Pc) or (Tc and Vc) or (Pc and Vc):
methods.append(IHMELS)
methods.append(MEISSNER)
methods.append(GRIGORAS)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
# This is the calculate, given the method section
if Method == IHMELS:
Third = Ihmels(Tc=Tc, Pc=Pc, Vc=Vc)
elif Method == MEISSNER:
Third = Meissner(Tc=Tc, Pc=Pc, Vc=Vc)
elif Method == GRIGORAS:
Third = Grigoras(Tc=Tc, Pc=Pc, Vc=Vc)
elif Method == NONE:
Third = None
else:
raise Exception('Failure in in function')
return Third
def third_property(CASRN=None, T=False, P=False, V=False):
r'''Function for calculating a critical property of a substance from its
other two critical properties, but retrieving the actual other critical
values for convenient calculation.
Calls functions Ihmels, Meissner, and
Grigoras, each of which use a general 'Critical surface' type of equation.
Limited accuracy is expected due to very limited theoretical backing.
Parameters
----------
CASRN : string
The CAS number of the desired chemical
T : bool
Estimate critical temperature
P : bool
Estimate critical pressure
V : bool
Estimate critical volume
Returns
-------
Tc, Pc or Vc : float
Critical property of fluid [K], [Pa], or [m^3/mol]
Notes
-----
Avoids recursion only by eliminating the None and critical surface options
for calculating each critical property. So long as it never calls itself.
Note that when used by Tc, Pc or Vc, this function results in said function
calling the other functions (to determine methods) and (with method specified)
Examples
--------
>>> # Decamethyltetrasiloxane [141-62-8]
>>> third_property('141-62-8', V=True)
0.0010920041152263375
>>> # Succinic acid 110-15-6
>>> third_property('110-15-6', P=True)
6095016.233766234
'''
Third = None
if V:
Tc_methods = Tc(CASRN, AvailableMethods=True)[0:-2]
Pc_methods = Pc(CASRN, AvailableMethods=True)[0:-2]
if Tc_methods and Pc_methods:
_Tc = Tc(CASRN=CASRN, Method=Tc_methods[0])
_Pc = Pc(CASRN=CASRN, Method=Pc_methods[0])
Third = critical_surface(Tc=_Tc, Pc=_Pc, Vc=None)
elif P:
Tc_methods = Tc(CASRN, AvailableMethods=True)[0:-2]
Vc_methods = Vc(CASRN, AvailableMethods=True)[0:-2]
if Tc_methods and Vc_methods:
_Tc = Tc(CASRN=CASRN, Method=Tc_methods[0])
_Vc = Vc(CASRN=CASRN, Method=Vc_methods[0])
Third = critical_surface(Tc=_Tc, Vc=_Vc, Pc=None)
elif T:
Pc_methods = Pc(CASRN, AvailableMethods=True)[0:-2]
Vc_methods = Vc(CASRN, AvailableMethods=True)[0:-2]
if Pc_methods and Vc_methods:
_Pc = Pc(CASRN=CASRN, Method=Pc_methods[0])
_Vc = Vc(CASRN=CASRN, Method=Vc_methods[0])
Third = critical_surface(Pc=_Pc, Vc=_Vc, Tc=None)
else:
raise Exception('Error in function')
if not Third:
return None
return Third
### Critical Properties - Mixtures
### Crtical Temperature of Mixtures
def Li(zs, Tcs, Vcs):
r'''Calculates critical temperature of a mixture according to
mixing rules in [1]_. Better than simple mixing rules.
.. math::
T_{cm} = \sum_{i=1}^n \Phi_i T_{ci}\\
\Phi = \frac{x_i V_{ci}}{\sum_{j=1}^n x_j V_{cj}}
Parameters
----------
zs : array-like
Mole fractions of all components
Tcs : array-like
Critical temperatures of all components, [K]
Vcs : array-like
Critical volumes of all components, [m^3/mol]
Returns
-------
Tcm : float
Critical temperatures of the mixture, [K]
Notes
-----
Reviewed in many papers on critical mixture temperature.
Second example is from Najafi (2015), for ethylene, Benzene, ethylbenzene.
This is similar to but not identical to the result from the article. The
experimental point is 486.9 K.
2rd example is from Najafi (2015), for:
butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
Its result is identical to that calculated in the article.
Examples
--------
Nitrogen-Argon 50/50 mixture
>>> Li([0.5, 0.5], [126.2, 150.8], [8.95e-05, 7.49e-05])
137.40766423357667
butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
>>> Li([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6],
... [0.000255, 0.000313, 0.000371])
449.68261498555444
References
----------
.. [1] Li, C. C. "Critical Temperature Estimation for Simple Mixtures."
The Canadian Journal of Chemical Engineering 49, no. 5
(October 1, 1971): 709-10. doi:10.1002/cjce.5450490529.
'''
if not none_and_length_check([zs, Tcs, Vcs]):
raise Exception('Function inputs are incorrect format')
denominator = sum(zs[i]*Vcs[i] for i in range(len(zs)))
Tcm = 0
for i in range(len(zs)):
Tcm += zs[i]*Vcs[i]*Tcs[i]/denominator
return Tcm
def Chueh_Prausnitz_Tc(zs, Tcs, Vcs, taus):
r'''Calculates critical temperature of a mixture according to
mixing rules in [1]_.
.. math::
T_{cm} = \sum_i^n \theta_i Tc_i + \sum_i^n\sum_j^n(\theta_i \theta_j
\tau_{ij})T_{ref}
\theta = \frac{x_i V_{ci}^{2/3}}{\sum_{j=1}^n x_j V_{cj}^{2/3}}
For a binary mxiture, this simplifies to:
.. math::
T_{cm} = \theta_1T_{c1} + \theta_2T_{c2} + 2\theta_1\theta_2\tau_{12}
Parameters
----------
zs : array-like
Mole fractions of all components
Tcs : array-like
Critical temperatures of all components, [K]
Vcs : array-like
Critical volumes of all components, [m^3/mol]
taus : array-like of shape `zs` by `zs`
Interaction parameters
Returns
-------
Tcm : float
Critical temperatures of the mixture, [K]
Notes
-----
All parameters, even if zero, must be given to this function.
Examples
--------
butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
>>> Chueh_Prausnitz_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6],
... [0.000255, 0.000313, 0.000371], [[0, 1.92681, 6.80358],
... [1.92681, 0, 1.89312], [ 6.80358, 1.89312, 0]])
450.1225764723492
References
----------
.. [1] Chueh, P. L., and J. M. Prausnitz. "Vapor-Liquid Equilibria at High
Pressures: Calculation of Critical Temperatures, Volumes, and Pressures
of Nonpolar Mixtures." AIChE Journal 13, no. 6 (November 1, 1967):
1107-13. doi:10.1002/aic.690130613.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Temperature of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 392
(April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
'''
if not none_and_length_check([zs, Tcs, Vcs]):
raise Exception('Function inputs are incorrect format')
denominator = sum(zs[i]*Vcs[i]**(2/3.) for i in range(len(zs)))
Tcm = 0
for i in range(len(zs)):
Tcm += zs[i]*Vcs[i]**(2/3.)*Tcs[i]/denominator
for j in range(len(zs)):
Tcm += (zs[i]*Vcs[i]**(2/3.)/denominator)*(zs[j]*Vcs[j]**(2/3.)/denominator)*taus[i][j]
return Tcm
#print Chueh_Prausnitz_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], [0.000255, 0.000313, 0.000371], [[0, 1.92681, 6.80358], [1.92681, 0, 1.89312], [ 6.80358, 1.89312, 0]])
#butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
# 450.12258 is expected
# butane/pentane 1.92681
#butane/hexane 6.80358
# pentane/hexane 1.89312
##print Chueh_Prausnitz_Tc([0.5, 0.5], [508.1, 425.12], [0.000213, 0.000255], [[0, -14.2619], [-14.2619, 0]])
####
##print Li([0.5, 0.5], [508.1, 425.12], [0.000213, 0.000255])
#
#print Chueh_Prausnitz_Tc([0.5, 0.447, .053], [282.34, 562.05, 617.15], [0.0001311, 0.000256, 0.000374], [[0, 37.9570, 0], [37.9570, 0, 4.2459], [0, 4.2459, 0]])
## ethylene, Benzene, ethylbenzene
##ethylene 74-85-1 1-ALKENES 0.5 benzene 71-43-2 N-ALKYLBENZENES 0.447 ethylbenzene 100-41-4 N-ALKYLBENZENES 0.053
##['74-85-1', '71-43-2', '100-41-4']
##[[0, 37.9570, 0], [37.9570, 0, 4.2459], [0, 4.2459, 0]]
#
##benzene ethylene 14 37.9570
##benzene ethylbenzene 9 4.2459
def Grieves_Thodos(zs, Tcs, Aijs):
r'''Calculates critical temperature of a mixture according to
mixing rules in [1]_.
.. math::
T_{cm} = \sum_{i} \frac{T_{ci}}{1 + (1/x_i)\sum_j A_{ij} x_j}
For a binary mxiture, this simplifies to:
.. math::
T_{cm} = \frac{T_{c1}}{1 + (x_2/x_1)A_{12}} + \frac{T_{c2}}
{1 + (x_1/x_2)A_{21}}
Parameters
----------
zs : array-like
Mole fractions of all components
Tcs : array-like
Critical temperatures of all components, [K]
Aijs : array-like of shape `zs` by `zs`
Interaction parameters
Returns
-------
Tcm : float
Critical temperatures of the mixture, [K]
Notes
-----
All parameters, even if zero, must be given to this function.
Giving 0s gives really bad results however.
Examples
--------
butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
>>> Grieves_Thodos([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], [[0, 1.2503, 1.516], [0.799807, 0, 1.23843], [0.659633, 0.807474, 0]])
450.1839618758971
References
----------
.. [1] Grieves, Robert B., and George Thodos. "The Critical Temperatures of
Multicomponent Hydrocarbon Systems." AIChE Journal 8, no. 4
(September 1, 1962): 550-53. doi:10.1002/aic.690080426.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Temperature of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 392
(April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
'''
if not none_and_length_check([zs, Tcs]):
raise Exception('Function inputs are incorrect format')
Tcm = 0
for i in range(len(zs)):
Tcm += Tcs[i]/(1. + 1./zs[i]*sum(Aijs[i][j]*zs[j] for j in range(len(zs))))
return Tcm
#print Grieves_Thodos([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], [[0, 1.2503, 1.516], [0.799807, 0, 1.23843], [0.659633, 0.807474, 0]])
#butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
# 450.18396 is expected
# butane/pentane 1.2503000 0.7998070
#butane/hexane 1.5160000 0.6596330
# pentane/hexane 1.238430 0.807474
#print Grieves_Thodos([0.5, 0.447, .053], [282.34, 562.05, 617.15], [[0, 0.8166850, 0], [0.7727120, 0, 1.5038], [0, 0.6650, 0]])
## ethylene, Benzene, ethylbenzene
## Vcs=[0.0001311, 0.000256, 0.000374]
#
#1.5038 0.6650 # benzene to ethylbenzene
#
#0.7727120 0.8166850 # benzene to ethylene
# Author claims result of 473.74.
def modified_Wilson_Tc(zs, Tcs, Aijs):
r'''Calculates critical temperature of a mixture according to
mixing rules in [1]_. Equation
.. math::
T_{cm} = \sum_i x_i T_{ci} + C\sum_i x_i \ln \left(x_i + \sum_j x_j A_{ij}\right)T_{ref}
For a binary mxiture, this simplifies to:
.. math::
T_{cm} = x_1 T_{c1} + x_2 T_{c2} + C[x_1 \ln(x_1 + x_2A_{12}) + x_2\ln(x_2 + x_1 A_{21})]
Parameters
----------
zs : float
Mole fractions of all components
Tcs : float
Critical temperatures of all components, [K]
Aijs : matrix
Interaction parameters
Returns
-------
Tcm : float
Critical temperatures of the mixture, [K]
Notes
-----
The equation and original article has been reviewed.
[1]_ has 75 binary systems, and additional multicomponent mixture parameters.
All parameters, even if zero, must be given to this function.
2rd example is from [2]_, for:
butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
Its result is identical to that calculated in the article.
Examples
--------
>>> modified_Wilson_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6],
... [[0, 1.174450, 1.274390], [0.835914, 0, 1.21038],
... [0.746878, 0.80677, 0]])
450.0305966823031
References
----------
.. [1] Teja, Amyn S., Kul B. Garg, and Richard L. Smith. "A Method for the
Calculation of Gas-Liquid Critical Temperatures and Pressures of
Multicomponent Mixtures." Industrial & Engineering Chemistry Process
Design and Development 22, no. 4 (1983): 672-76.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Temperature of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 392
(April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
'''
if not none_and_length_check([zs, Tcs]):
raise Exception('Function inputs are incorrect format')
C = -2500
Tcm = sum(zs[i]*Tcs[i] for i in range(len(zs)))
for i in range(len(zs)):
Tcm += C*zs[i]*log(zs[i] + sum(zs[j]*Aijs[i][j] for j in range(len(zs))))
return Tcm
#print modified_Wilson_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], [[0, 1.174450, 1.274390], [0.835914, 0, 1.21038], [0.746878, 0.80677, 0]])
#butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
# 450.0306 is expected
# butane/pentane 1.174450 0.835914
#butane/hexane 1.274390 0.746878
# pentane/hexane 1.21038 0.80677
#print modified_Wilson_Tc([0.5, 0.5], [508.1, 425.12], [[0, 0.8359], [0, 1.1963]]) # Acetone/butane 50-50
#print modified_Wilson_Tc([0.5, 0.447, .053], [282.34, 562.05, 617.15], [[0,1.0853, 0 ], [0.8425, 0, 1.2514], [0, 0.7688, 0]])
#Tc exp: 486.90
# Author claims MW gives 471.49
## ethylene, Benzene, ethylbenzene
# Vcs=[0.0001311, 0.000256, 0.000374]
#benzene-ethylene 0.8425 1.0853
#benzene ethylbenzene 1.2514 0.7688
# ethylene-ethylbenzene (26.166530797247095, 3.2152024634944754) CALCULATED
#print Grieves_Thodos([0.5, 0.5], [508.1, 425.12], [[0, 0.7137], [1.6496, 0]])
#print Grieves_Thodos([0.5, 0.5], [508.1, 425.12], [[0, 0.1305], [0.09106, 0]])
def Tc_mixture(Tcs=None, zs=None, CASRNs=None, AvailableMethods=False, Method=None): # pragma: no cover
'''This function handles the retrival of a mixture's critical temperature.
This API is considered experimental, and is expected to be removed in a
future release in favor of a more complete object-oriented interface.
>>> Tc_mixture([400, 550], [0.3, 0.7])
505.0
'''
def list_methods():
methods = []
if none_and_length_check([Tcs]):
methods.append('Simple')
methods.append('None')
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
# This is the calculate, given the method section
if Method == 'Simple':
return mixing_simple(zs, Tcs)
elif Method == 'None':
return None
else:
raise Exception('Failure in in function')
### Crtical Pressure of Mixtures
def Pc_mixture(Pcs=None, zs=None, CASRNs=None, AvailableMethods=False, Method=None): # pragma: no cover
'''This function handles the retrival of a mixture's critical temperature.
This API is considered experimental, and is expected to be removed in a
future release in favor of a more complete object-oriented interface.
>>> Pc_mixture([2.2E7, 1.1E7], [0.3, 0.7])
14300000.0
'''
def list_methods():
methods = []
if none_and_length_check([Pcs]):
methods.append('Simple')
methods.append('None')
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
# This is the calculate, given the method section
if Method == 'Simple':
return mixing_simple(zs, Pcs)
elif Method == 'None':
return None
else:
raise Exception('Failure in in function')
### Crtical Volume of Mixtures
def Chueh_Prausnitz_Vc(zs, Vcs, nus):
r'''Calculates critical volume of a mixture according to
mixing rules in [1]_ with an interaction parameter.
.. math::
V_{cm} = \sum_i^n \theta_i V_{ci} + \sum_i^n\sum_j^n(\theta_i \theta_j \nu_{ij})V_{ref}
\theta = \frac{x_i V_{ci}^{2/3}}{\sum_{j=1}^n x_j V_{cj}^{2/3}}
Parameters
----------
zs : float
Mole fractions of all components
Vcs : float
Critical volumes of all components, [m^3/mol]
nus : matrix
Interaction parameters, [cm^3/mol]
Returns
-------
Vcm : float
Critical volume of the mixture, [m^3/mol]
Notes
-----
All parameters, even if zero, must be given to this function.
nu parameters are in cm^3/mol, but are converted to m^3/mol inside the function
Examples
--------
1-butanol/benzene 0.4271/0.5729 mixture, Vcm = 268.096 mL/mol.
>>> Chueh_Prausnitz_Vc([0.4271, 0.5729], [0.000273, 0.000256], [[0, 5.61847], [5.61847, 0]])
0.00026620503424517445
References
----------
.. [1] Chueh, P. L., and J. M. Prausnitz. "Vapor-Liquid Equilibria at High
Pressures: Calculation of Critical Temperatures, Volumes, and Pressures
of Nonpolar Mixtures." AIChE Journal 13, no. 6 (November 1, 1967):
1107-13. doi:10.1002/aic.690130613.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Volume of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 386
(January 25, 2015): 13-29. doi:10.1016/j.fluid.2014.11.008.
'''
if not none_and_length_check([zs, Vcs]): # check same-length inputs
raise Exception('Function inputs are incorrect format')
denominator = sum(zs[i]*Vcs[i]**(2/3.) for i in range(len(zs)))
Vcm = 0
for i in range(len(zs)):
Vcm += zs[i]*Vcs[i]**(2/3.)*Vcs[i]/denominator
for j in range(len(zs)):
Vcm += (zs[i]*Vcs[i]**(2/3.)/denominator)*(zs[j]*Vcs[j]**(2/3.)/denominator)*nus[i][j]/1E6
return Vcm
#print Chueh_Prausnitz_Vc([0.4271, 0.5729], [0.000273, 0.000256], [[0, 5.61847], [5.61847, 0]])
## 1-butanol/benzene 0.4271/0.5729 mixture, Vcm = 268.096 mL/mol
## Expected result: 266.205034245174
def modified_Wilson_Vc(zs, Vcs, Aijs):
r'''Calculates critical volume of a mixture according to
mixing rules in [1]_ with parameters. Equation
.. math::
V_{cm} = \sum_i x_i V_{ci} + C\sum_i x_i \ln \left(x_i + \sum_j x_j A_{ij}\right)V_{ref}
For a binary mxiture, this simplifies to:
.. math::
V_{cm} = x_1 V_{c1} + x_2 V_{c2} + C[x_1 \ln(x_1 + x_2A_{12}) + x_2\ln(x_2 + x_1 A_{21})]
Parameters
----------
zs : float
Mole fractions of all components
Vcs : float
Critical volumes of all components, [m^3/mol]
Aijs : matrix
Interaction parameters, [cm^3/mol]
Returns
-------
Vcm : float
Critical volume of the mixture, [m^3/mol]
Notes
-----
The equation and original article has been reviewed.
All parameters, even if zero, must be given to this function.
C = -2500
All parameters, even if zero, must be given to this function.
nu parameters are in cm^3/mol, but are converted to m^3/mol inside the function
Examples
--------
1-butanol/benzene 0.4271/0.5729 mixture, Vcm = 268.096 mL/mol.
>>> modified_Wilson_Vc([0.4271, 0.5729], [0.000273, 0.000256],
... [[0, 0.6671250], [1.3939900, 0]])
0.0002664335032706881
References
----------
.. [1] Teja, Amyn S., Kul B. Garg, and Richard L. Smith. "A Method for the
Calculation of Gas-Liquid Critical Temperatures and Pressures of
Multicomponent Mixtures." Industrial & Engineering Chemistry Process
Design and Development 22, no. 4 (1983): 672-76.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Temperature of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 392
(April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
'''
if not none_and_length_check([zs, Vcs]): # check same-length inputs
raise Exception('Function inputs are incorrect format')
C = -2500
Vcm = sum(zs[i]*Vcs[i] for i in range(len(zs)))
for i in range(len(zs)):
Vcm += C*zs[i]*log(zs[i] + sum(zs[j]*Aijs[i][j] for j in range(len(zs))))/1E6
return Vcm
def Vc_mixture(Vcs=None, zs=None, CASRNs=None, AvailableMethods=False, Method=None): # pragma: no cover
'''This function handles the retrival of a mixture's critical temperature.
This API is considered experimental, and is expected to be removed in a
future release in favor of a more complete object-oriented interface.
>>> Vc_mixture([5.6E-5, 2E-4], [0.3, 0.7])
0.0001568
'''
def list_methods():
methods = []
if none_and_length_check([Vcs]):
methods.append('Simple')
methods.append('None')
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
# This is the calculate, given the method section
if Method == 'Simple':
return mixing_simple(zs, Vcs)
elif Method == 'None':
return None
else:
raise Exception('Failure in in function')
| 40.489773
| 179
| 0.642356
| 10,333
| 73,246
| 4.489209
| 0.10481
| 0.007653
| 0.011059
| 0.030181
| 0.775174
| 0.744584
| 0.736262
| 0.722595
| 0.715351
| 0.702891
| 0
| 0.099722
| 0.242771
| 73,246
| 1,808
| 180
| 40.512168
| 0.736586
| 0.684829
| 0
| 0.516765
| 0
| 0
| 0.074881
| 0.007192
| 0
| 0
| 0
| 0.000553
| 0
| 1
| 0.053254
| false
| 0.011834
| 0.013807
| 0
| 0.161736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
869506c48d16003efcc6d19e7271b2da1cb6cd01
| 246
|
py
|
Python
|
pytanga/components/Cisco/xe/ip/__init__.py
|
renatoalmeidaoliveira/Pytanga
|
aa02f1c0f2573da1330d1d246ab780fa3be336a5
|
[
"MIT"
] | null | null | null |
pytanga/components/Cisco/xe/ip/__init__.py
|
renatoalmeidaoliveira/Pytanga
|
aa02f1c0f2573da1330d1d246ab780fa3be336a5
|
[
"MIT"
] | null | null | null |
pytanga/components/Cisco/xe/ip/__init__.py
|
renatoalmeidaoliveira/Pytanga
|
aa02f1c0f2573da1330d1d246ab780fa3be336a5
|
[
"MIT"
] | null | null | null |
from .ip import ipComponent
from .prefix import prefixComponent
from .prefixlist import prefixlistComponent
from .prefixlists import prefixeslistsComponent
from .routemap import routemapComponent
from .routemapentry import routemapentryComponent
| 35.142857
| 49
| 0.878049
| 24
| 246
| 9
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 246
| 6
| 50
| 41
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86978b3f2b4acd165df150d78a31b2fb921a36f2
| 154
|
py
|
Python
|
defineClass.py
|
ramprakashmu/python
|
667657e46e748759006759dec5d95a172946f3a8
|
[
"Apache-2.0"
] | null | null | null |
defineClass.py
|
ramprakashmu/python
|
667657e46e748759006759dec5d95a172946f3a8
|
[
"Apache-2.0"
] | null | null | null |
defineClass.py
|
ramprakashmu/python
|
667657e46e748759006759dec5d95a172946f3a8
|
[
"Apache-2.0"
] | null | null | null |
class Util():
def __init__(self,item):
self.item=item
def getUtil(self):
return self.item
u=Util("Ram")
print(u.getUtil())
| 15.4
| 29
| 0.577922
| 21
| 154
| 4.047619
| 0.52381
| 0.282353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 154
| 9
| 30
| 17.111111
| 0.758929
| 0
| 0
| 0
| 0
| 0
| 0.019481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0.142857
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
86ef90bc9314ebd48a90f3166b9d3612473e0efc
| 100
|
py
|
Python
|
codenode/cpp/nodes/__init__.py
|
0xf0f/codenode
|
fa36ba5e2eeb42e95c8fc33afd4f1bf131ba6d9b
|
[
"MIT"
] | 3
|
2019-06-27T04:57:37.000Z
|
2019-06-27T11:29:33.000Z
|
codenode/cpp/nodes/__init__.py
|
0xf0f/codenode
|
fa36ba5e2eeb42e95c8fc33afd4f1bf131ba6d9b
|
[
"MIT"
] | null | null | null |
codenode/cpp/nodes/__init__.py
|
0xf0f/codenode
|
fa36ba5e2eeb42e95c8fc33afd4f1bf131ba6d9b
|
[
"MIT"
] | null | null | null |
from .block import Block
from .function_definition import Function
from .statement import Statement
| 25
| 41
| 0.85
| 13
| 100
| 6.461538
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 100
| 3
| 42
| 33.333333
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
810af334c2799c5e879d9421ce009cbab46572f9
| 257
|
py
|
Python
|
django_peeringdb/models/__init__.py
|
steffann/django-peeringdb
|
7151c7807927dfb31f3a6d3b4dd6d8adc7d23363
|
[
"Apache-2.0"
] | null | null | null |
django_peeringdb/models/__init__.py
|
steffann/django-peeringdb
|
7151c7807927dfb31f3a6d3b4dd6d8adc7d23363
|
[
"Apache-2.0"
] | null | null | null |
django_peeringdb/models/__init__.py
|
steffann/django-peeringdb
|
7151c7807927dfb31f3a6d3b4dd6d8adc7d23363
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings as __settings
# import to namespace
from django_peeringdb.models.abstract import * # noqa
if not __settings.ABSTRACT_ONLY:
# import to namespace
from django_peeringdb.models.concrete import * # noqa
del __settings
| 25.7
| 57
| 0.789883
| 34
| 257
| 5.705882
| 0.5
| 0.154639
| 0.175258
| 0.216495
| 0.43299
| 0.43299
| 0.43299
| 0
| 0
| 0
| 0
| 0
| 0.159533
| 257
| 9
| 58
| 28.555556
| 0.898148
| 0.190661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
812760c9f7ffc511b409f3b218b90103db27154d
| 381
|
py
|
Python
|
CS1410/Module5/WhatMethodGetsCalled.py
|
Davidjbennett/DavidBennett.github.io
|
09a2652b7ace8741bf23c6432abd58ee790b9f0c
|
[
"MIT"
] | 3
|
2021-05-18T16:17:29.000Z
|
2022-01-20T15:46:59.000Z
|
CS1410/Module5/WhatMethodGetsCalled.py
|
Davidjbennett/DavidBennett
|
09a2652b7ace8741bf23c6432abd58ee790b9f0c
|
[
"MIT"
] | null | null | null |
CS1410/Module5/WhatMethodGetsCalled.py
|
Davidjbennett/DavidBennett
|
09a2652b7ace8741bf23c6432abd58ee790b9f0c
|
[
"MIT"
] | null | null | null |
class A():
def __init__(self):
pass
def f(self):
return "A"
class B(A):
def __init__(self):
pass
def f(self):
return "B"
def g(self):
return "B"
class C(B):
def __init__(self):
pass
def g(self):
return "C"
class D(C):
def __init__(self):
pass
d = D()
print(d.f())
print(d.g())
| 13.607143
| 23
| 0.469816
| 55
| 381
| 2.963636
| 0.236364
| 0.171779
| 0.269939
| 0.368098
| 0.478528
| 0.368098
| 0.368098
| 0.368098
| 0.368098
| 0
| 0
| 0
| 0.380577
| 381
| 28
| 24
| 13.607143
| 0.690678
| 0
| 0
| 0.608696
| 0
| 0
| 0.010471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.347826
| false
| 0.173913
| 0
| 0.173913
| 0.695652
| 0.086957
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 5
|
d4967a3ae245f85078fe22141e2fe0b16965f13e
| 428
|
py
|
Python
|
coils/test/testUserInput.py
|
vmlaker/coils
|
a3a613b3d661dec010e5879c86e62cbff2519dd0
|
[
"MIT"
] | 3
|
2015-04-23T02:14:59.000Z
|
2017-09-27T17:33:37.000Z
|
coils/test/testUserInput.py
|
vmlaker/coils
|
a3a613b3d661dec010e5879c86e62cbff2519dd0
|
[
"MIT"
] | null | null | null |
coils/test/testUserInput.py
|
vmlaker/coils
|
a3a613b3d661dec010e5879c86e62cbff2519dd0
|
[
"MIT"
] | null | null | null |
def test1():
"""Test UserInput."""
from coils.UserInput import user_input
assert user_input('Name') == 'Bugs'
assert user_input('Surname', default='Bunny') == 'Bunny'
assert user_input('Bird', choices=('Tweety', 'Beaky')) == 'Tweety'
assert user_input('Bird', choices=('Tweety', 'Beaky')) == 'Beaky'
assert not user_input('empty input', empty_ok=True)
assert user_input('last one') == 'The end.'
| 42.8
| 70
| 0.64486
| 54
| 428
| 4.962963
| 0.5
| 0.235075
| 0.279851
| 0.141791
| 0.276119
| 0.276119
| 0.276119
| 0
| 0
| 0
| 0
| 0.002809
| 0.168224
| 428
| 9
| 71
| 47.555556
| 0.75
| 0.035047
| 0
| 0
| 0
| 0
| 0.228501
| 0
| 0
| 0
| 0
| 0
| 0.75
| 1
| 0.125
| true
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4ab90b9704c64a9733a176e80c0984bb00838bd
| 263
|
py
|
Python
|
mmocr/models/textrecog/recognizer/crnn.py
|
yangrisheng/mmocr
|
3ad4a8d3f8d2d22b7854b72ee68a7977a3f3631f
|
[
"Apache-2.0"
] | 2
|
2022-01-02T13:33:10.000Z
|
2022-02-08T07:40:30.000Z
|
mmocr/models/textrecog/recognizer/crnn.py
|
yangrisheng/mmocr
|
3ad4a8d3f8d2d22b7854b72ee68a7977a3f3631f
|
[
"Apache-2.0"
] | null | null | null |
mmocr/models/textrecog/recognizer/crnn.py
|
yangrisheng/mmocr
|
3ad4a8d3f8d2d22b7854b72ee68a7977a3f3631f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
from mmocr.models.builder import RECOGNIZERS
from .encode_decode_recognizer import EncodeDecodeRecognizer
@RECOGNIZERS.register_module()
class CRNNNet(EncodeDecodeRecognizer):
"""CTC-loss based recognizer."""
| 29.222222
| 60
| 0.809886
| 28
| 263
| 7.5
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102662
| 263
| 8
| 61
| 32.875
| 0.889831
| 0.277567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d4caceab0e3bdb2497fdc18f40c8ce8f6f07360a
| 225
|
py
|
Python
|
rasa_nlu_examples/featurizers/dense/__init__.py
|
imanearaf/rasa-nlu-examples
|
3e3cfc4e743715c9104d5771b14ecfab2a02da11
|
[
"Apache-2.0"
] | 1
|
2021-05-16T11:53:53.000Z
|
2021-05-16T11:53:53.000Z
|
rasa_nlu_examples/featurizers/dense/__init__.py
|
anuragshas/rasa-nlu-examples
|
c16a8e0693355dfe7ea6a865633553999de2fb31
|
[
"Apache-2.0"
] | null | null | null |
rasa_nlu_examples/featurizers/dense/__init__.py
|
anuragshas/rasa-nlu-examples
|
c16a8e0693355dfe7ea6a865633553999de2fb31
|
[
"Apache-2.0"
] | null | null | null |
from .fasttext_featurizer import FastTextFeaturizer
from .bpemb_featurizer import BytePairFeaturizer
from .gensim_featurizer import GensimFeaturizer
__all__ = ["FastTextFeaturizer", "BytePairFeaturizer", "GensimFeaturizer"]
| 37.5
| 74
| 0.857778
| 19
| 225
| 9.789474
| 0.526316
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 225
| 5
| 75
| 45
| 0.898551
| 0
| 0
| 0
| 0
| 0
| 0.231111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d4e5344352fe3cd604fcc6855db24bfd680ffdd8
| 160
|
py
|
Python
|
indico_chat_bot/notifiers/debug.py
|
pferreir/indico-chat-bot
|
0f961543d8e74c6951e24d63387a6b9903d7997c
|
[
"MIT"
] | null | null | null |
indico_chat_bot/notifiers/debug.py
|
pferreir/indico-chat-bot
|
0f961543d8e74c6951e24d63387a6b9903d7997c
|
[
"MIT"
] | 2
|
2016-09-05T14:08:43.000Z
|
2016-09-07T08:27:43.000Z
|
indico_chat_bot/notifiers/debug.py
|
pferreir/indico-chat-bot
|
0f961543d8e74c6951e24d63387a6b9903d7997c
|
[
"MIT"
] | 2
|
2016-08-18T09:20:04.000Z
|
2016-08-18T09:22:28.000Z
|
from loguru import logger
def notify(bot, channel, text):
"""Debug notification."""
logger.debug(f"Sending to channel {channel['hook_url']}: {text}")
| 22.857143
| 69
| 0.68125
| 21
| 160
| 5.142857
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 160
| 6
| 70
| 26.666667
| 0.8
| 0.11875
| 0
| 0
| 0
| 0
| 0.355556
| 0.162963
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d4f94f9f220309e53b16cc76105edace14d1eb29
| 72
|
py
|
Python
|
tools/stack.py
|
bionicles/neuromax
|
a53a17a1c033c11ac607a9e28f43b1f906e58aad
|
[
"MIT"
] | null | null | null |
tools/stack.py
|
bionicles/neuromax
|
a53a17a1c033c11ac607a9e28f43b1f906e58aad
|
[
"MIT"
] | null | null | null |
tools/stack.py
|
bionicles/neuromax
|
a53a17a1c033c11ac607a9e28f43b1f906e58aad
|
[
"MIT"
] | null | null | null |
def stack(layer, repeats):
return [layer() for _ in range(repeats)]
| 24
| 44
| 0.680556
| 10
| 72
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180556
| 72
| 2
| 45
| 36
| 0.813559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
be154e44f6c20a878e938bb4a2c98cd18a351698
| 4,917
|
py
|
Python
|
bot/design_demo.py
|
abbenson/ballbot
|
44ea6abba026ec47c2dcd79450186d9ea43a9e7d
|
[
"BSD-2-Clause"
] | null | null | null |
bot/design_demo.py
|
abbenson/ballbot
|
44ea6abba026ec47c2dcd79450186d9ea43a9e7d
|
[
"BSD-2-Clause"
] | null | null | null |
bot/design_demo.py
|
abbenson/ballbot
|
44ea6abba026ec47c2dcd79450186d9ea43a9e7d
|
[
"BSD-2-Clause"
] | null | null | null |
import Adafruit_BBIO.UART as UART
import serial
import re
import pyDMCC
# Autodetect DMCC capes
dmccs = pyDMCC.autodetect()
pitch_power = 0
roll_power = 0
UART.setup("UART1")
# Open serial connection to IMU
ser = serial.Serial(port="/dev/ttyO1", baudrate=57600)
# Demo loop
while 1:
ser.open()
# Take line of readings from IMU
ser_str = ser.readline()
while ser_str[0] != '#':
ser_str = ser.readline()
# Parse out float YPR values
ser_str = re.findall(r"[+-]? *(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?", ser_str)
# Set Pitch Motor
# If pitch between -10 and 10 set motor off
if -10 < float(ser_str[1]) < 10:
if pitch_power != 0:
print "Pitch Motor Off"
pitch_power = 0
dmccs[0].motors[1].power = pitch_power
# If pitch between 10 and 20 set motor 35%
elif 10 <= float(ser_str[1]) < 20:
if pitch_power != 35:
print "Pitch Motor Forward 35%"
pitch_power = 35
dmccs[0].motors[1].power = pitch_power
# If pitch between 20 and 30 set motor 50%
elif 20 <= float(ser_str[1]) < 30:
if pitch_power != 50:
print "Pitch Motor Forward 50%"
pitch_power = 50
dmccs[0].motors[1].power = pitch_power
# If pitch between 30 and 40 set motor 75%
elif 30 <= float(ser_str[1]) < 40:
if pitch_power != 75:
print "Pitch Motor Forward 75%"
pitch_power = 75
dmccs[0].motors[1].power = pitch_power
# If pitch greater than 40 set motor 100%
elif float(ser_str[1]) >= 40:
if pitch_power != 100:
print "Pitch Motor Forward 100%"
pitch_power = 100
dmccs[0].motors[1].power = pitch_power
# If pitch between -10 and -20 set motor -35%
elif -20 < float(ser_str[1]) <= -10:
if pitch_power != -35:
print "Pitch Motor Reverse 35%"
pitch_power = -35
dmccs[0].motors[1].power = pitch_power
# If pitch between -20 and -30 set motor -50%
elif -30 < float(ser_str[1]) <= -20:
if pitch_power != -50:
print "Pitch Motor Reverse 50%"
pitch_power = -50
dmccs[0].motors[1].power = pitch_power
# If pitch between -30 and -40 set motor -75%
elif -40 < float(ser_str[1]) <= -30:
if pitch_power != -75:
print "Pitch Motor Reverse 75%"
pitch_power = -75
dmccs[0].motors[1].power = pitch_power
# If pitch less than -40 set motor -100%
elif float(ser_str[1]) <= -40:
if pitch_power != -100:
print "Pitch Motor Reverse 100%"
pitch_power = -100
dmccs[0].motors[1].power = pitch_power
# Set Roll Motor
# If roll between -10 and 10 set motor off
if -10 < float(ser_str[2]) < 10:
if roll_power != 0:
print "Roll Motor Off"
roll_power = 0
dmccs[0].motors[2].power = roll_power
# If roll between 10 and 20 set motor 35%
elif 10 <= float(ser_str[2]) < 20:
if roll_power != 35:
print "Roll Motor Forward 35%"
roll_power = 35
dmccs[0].motors[2].power = roll_power
# If roll between 20 and 30 set motor 50%
elif 20 <= float(ser_str[2]) < 30:
if roll_power != 50:
print "Roll Motor Forward 50%"
roll_power = 50
dmccs[0].motors[2].power = roll_power
# If roll between 30 and 40 set motor 75%
elif 30 <= float(ser_str[2]) < 40:
if roll_power != 75:
print "Roll Motor Forward 75%"
roll_power = 75
dmccs[0].motors[2].power = roll_power
# If roll greater than 40 set motor 100%
elif float(ser_str[2]) >= 40:
if roll_power != 100:
print "Roll Motor Forward 100%"
roll_power = 100
dmccs[0].motors[2].power = roll_power
# If roll between -10 and -20 set motor -35%
elif -20 < float(ser_str[2]) <= -10:
if roll_power != -35:
print "Roll Motor Reverse 35%"
roll_power = -35
dmccs[0].motors[2].power = roll_power
# If roll between -20 and -30 set motor -50%
elif -30 < float(ser_str[2]) <= -20:
if roll_power != -50:
print "Roll Motor Reverse 50%"
roll_power = -50
dmccs[0].motors[2].power = roll_power
# If roll between -30 and -40 set motor -75%
elif -40 < float(ser_str[2]) <= -30:
if roll_power != -75:
print "Roll Motor Reverse 75%"
roll_power = -75
dmccs[0].motors[2].power = roll_power
# If roll less than -40 set motor -100%
elif float(ser_str[2]) <= -40:
if roll_power != -100:
print "Roll Motor Reverse 100%"
roll_power = -100
dmccs[0].motors[2].power = roll_power
ser.close()
| 31.120253
| 85
| 0.551556
| 709
| 4,917
| 3.712271
| 0.101551
| 0.106383
| 0.075228
| 0.041033
| 0.797492
| 0.787994
| 0.787994
| 0.712766
| 0.635638
| 0.635638
| 0
| 0.103647
| 0.330893
| 4,917
| 157
| 86
| 31.318471
| 0.696353
| 0.181615
| 0
| 0.226415
| 0
| 0
| 0.114057
| 0.010255
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.037736
| null | null | 0.169811
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be15f2ae4ee401ab1bc3ea2cf8d3d1c978e31d8c
| 95
|
py
|
Python
|
food/tag/ingredient.py
|
jojolebarjos/food-ontology
|
968e64c1bdbb57b91c2b71ec5360107247b385bb
|
[
"Unlicense"
] | null | null | null |
food/tag/ingredient.py
|
jojolebarjos/food-ontology
|
968e64c1bdbb57b91c2b71ec5360107247b385bb
|
[
"Unlicense"
] | null | null | null |
food/tag/ingredient.py
|
jojolebarjos/food-ontology
|
968e64c1bdbb57b91c2b71ec5360107247b385bb
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# TODO use PoS tags and entities to output ontology-like annotations
| 19
| 68
| 0.694737
| 14
| 95
| 4.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.189474
| 95
| 4
| 69
| 23.75
| 0.844156
| 0.926316
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.25
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0782551f2249c08b21d073c6960720866a9a3123
| 155
|
py
|
Python
|
fb_vd/scrapping/url_exception.py
|
vieirafrancisco/fb-vd
|
6e3ff0a508f8bbb2c07213073557cc466e2a0a28
|
[
"MIT"
] | 1
|
2019-01-02T18:08:06.000Z
|
2019-01-02T18:08:06.000Z
|
fb_vd/scrapping/url_exception.py
|
vieirafrancisco/fb-vd
|
6e3ff0a508f8bbb2c07213073557cc466e2a0a28
|
[
"MIT"
] | null | null | null |
fb_vd/scrapping/url_exception.py
|
vieirafrancisco/fb-vd
|
6e3ff0a508f8bbb2c07213073557cc466e2a0a28
|
[
"MIT"
] | null | null | null |
#-- Personalized Url Exception --
class URLException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
| 38.75
| 49
| 0.683871
| 16
| 155
| 6.125
| 0.625
| 0.163265
| 0.244898
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 155
| 4
| 49
| 38.75
| 0.753846
| 0.206452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07846110d89df41ca4b1e24be666d526e038e471
| 140
|
py
|
Python
|
airena/services/__init__.py
|
en0/ai-battle
|
3be98b23f06be7c78815643d5e24e8a9fab18f0d
|
[
"MIT"
] | null | null | null |
airena/services/__init__.py
|
en0/ai-battle
|
3be98b23f06be7c78815643d5e24e8a9fab18f0d
|
[
"MIT"
] | null | null | null |
airena/services/__init__.py
|
en0/ai-battle
|
3be98b23f06be7c78815643d5e24e8a9fab18f0d
|
[
"MIT"
] | null | null | null |
from .object import ObjectService
from .message import MessageService
from .keyboard import KeyboardService
from .clock import ClockService
| 28
| 37
| 0.857143
| 16
| 140
| 7.5
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 140
| 4
| 38
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
078a9a3900c7b3cf2d79006a6ef394d889e536bb
| 121
|
py
|
Python
|
build_gpcr/management/commands/build_yaml_from_structure.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 21
|
2016-01-20T09:33:14.000Z
|
2021-12-20T19:19:45.000Z
|
build_gpcr/management/commands/build_yaml_from_structure.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 75
|
2016-02-26T16:29:58.000Z
|
2022-03-21T12:35:13.000Z
|
build_gpcr/management/commands/build_yaml_from_structure.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 77
|
2016-01-22T08:44:26.000Z
|
2022-02-01T15:54:56.000Z
|
from build.management.commands.build_yaml_from_structure import Command as BuildYaml
class Command(BuildYaml):
pass
| 24.2
| 84
| 0.834711
| 16
| 121
| 6.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115702
| 121
| 5
| 85
| 24.2
| 0.915888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
0793420d6b40c4369bb9083a162adf75c6324f68
| 121
|
py
|
Python
|
devops_backend/__init__.py
|
asenattime/devops_backend
|
687cb747339d414b974b8c140736314a84fbd77d
|
[
"MIT"
] | null | null | null |
devops_backend/__init__.py
|
asenattime/devops_backend
|
687cb747339d414b974b8c140736314a84fbd77d
|
[
"MIT"
] | null | null | null |
devops_backend/__init__.py
|
asenattime/devops_backend
|
687cb747339d414b974b8c140736314a84fbd77d
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import pymysql
pymysql.install_as_MySQLdb()
from .celery import app as celery_app
| 24.2
| 38
| 0.859504
| 18
| 121
| 5.333333
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107438
| 121
| 4
| 39
| 30.25
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0799174d12e567f34fea9fba05b4f40ed9155976
| 139
|
py
|
Python
|
util/unfuck_pythonw.py
|
RikkaBlue/ArknightsAutoHelper
|
770d109403bae937efeb59bc6628bd43631e7d1a
|
[
"MIT"
] | 1,035
|
2019-05-14T11:58:32.000Z
|
2022-03-16T15:09:53.000Z
|
util/unfuck_pythonw.py
|
RikkaBlue/ArknightsAutoHelper
|
770d109403bae937efeb59bc6628bd43631e7d1a
|
[
"MIT"
] | 209
|
2019-05-11T13:19:57.000Z
|
2022-03-12T01:42:11.000Z
|
util/unfuck_pythonw.py
|
RikkaBlue/ArknightsAutoHelper
|
770d109403bae937efeb59bc6628bd43631e7d1a
|
[
"MIT"
] | 254
|
2019-05-13T09:06:54.000Z
|
2022-03-16T09:47:44.000Z
|
import sys
import os
if sys.stdout is None:
sys.stdout = open(os.devnull, 'r+')
sys.stderr = sys.stdout
sys.stdin = sys.stdout
| 19.857143
| 39
| 0.661871
| 23
| 139
| 4
| 0.521739
| 0.391304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215827
| 139
| 6
| 40
| 23.166667
| 0.844037
| 0
| 0
| 0
| 0
| 0
| 0.014388
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
079d2379827a8bcd60e7ba7a39f2178386d4980e
| 12,810
|
py
|
Python
|
tests/python_client/base/collection_wrapper.py
|
binbinlv/milvus
|
8873be6a2b5ccea2d59eccc0c05362ffe673f41f
|
[
"Apache-2.0"
] | null | null | null |
tests/python_client/base/collection_wrapper.py
|
binbinlv/milvus
|
8873be6a2b5ccea2d59eccc0c05362ffe673f41f
|
[
"Apache-2.0"
] | null | null | null |
tests/python_client/base/collection_wrapper.py
|
binbinlv/milvus
|
8873be6a2b5ccea2d59eccc0c05362ffe673f41f
|
[
"Apache-2.0"
] | null | null | null |
import sys
import time
from pymilvus import Collection
sys.path.append("..")
from check.func_check import ResponseChecker
from utils.api_request import api_request
from utils.util_log import test_log as log
from pymilvus.grpc_gen.common_pb2 import ConsistencyLevel
TIMEOUT = 20
# keep small timeout for stability tests
# TIMEOUT = 5
class ApiCollectionWrapper:
collection = None
def init_collection(self, name, schema=None, using="default", shards_num=2, check_task=None, check_items=None, **kwargs):
consistency_level = kwargs.get("consistency_level", ConsistencyLevel.Strong)
kwargs.update({"consistency_level": consistency_level})
""" In order to distinguish the same name of collection """
func_name = sys._getframe().f_code.co_name
res, is_succ = api_request([Collection, name, schema, using, shards_num], **kwargs)
self.collection = res if is_succ else None
check_result = ResponseChecker(res, func_name, check_task, check_items, is_succ,
name=name, schema=schema, using=using, shards_num=shards_num, **kwargs).run()
return res, check_result
@property
def schema(self):
return self.collection.schema
@property
def description(self):
return self.collection.description
@property
def name(self):
return self.collection.name
@property
def is_empty(self):
return self.collection.is_empty
@property
def num_entities(self):
return self.collection.num_entities
@property
def primary_field(self):
return self.collection.primary_field
@property
def _shards_num(self):
return self.collection._shards_num
def construct_from_dataframe(self, name, dataframe, check_task=None, check_items=None, **kwargs):
func_name = sys._getframe().f_code.co_name
res, is_succ = api_request([Collection.construct_from_dataframe, name, dataframe], **kwargs)
self.collection = res[0] if is_succ else None
check_result = ResponseChecker(res, func_name, check_task, check_items, is_succ,
name=name, dataframe=dataframe, **kwargs).run()
return res, check_result
def drop(self, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.drop], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, **kwargs).run()
return res, check_result
def load(self, partition_names=None, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.load, partition_names], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check,
partition_names=partition_names, **kwargs).run()
return res, check_result
def release(self, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.release], **kwargs)
check_result = ResponseChecker(res, func_name, check_task,
check_items, check, **kwargs).run()
return res, check_result
def insert(self, data, partition_name=None, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.insert, data, partition_name], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check,
dat=data, partition_name=partition_name,
**kwargs).run()
return res, check_result
def search(self, data, anns_field, param, limit, expr=None,
partition_names=None, output_fields=None, timeout=None, round_decimal=-1,
check_task=None, check_items=None, **kwargs):
timeout = TIMEOUT if timeout is None else timeout
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.search, data, anns_field, param, limit,
expr, partition_names, output_fields, timeout, round_decimal], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check,
data=data, anns_field=anns_field, param=param, limit=limit,
expr=expr, partition_names=partition_names,
output_fields=output_fields,
timeout=timeout, **kwargs).run()
return res, check_result
def query(self, expr, output_fields=None, partition_names=None, timeout=None, check_task=None, check_items=None,
**kwargs):
# time.sleep(5)
timeout = TIMEOUT if timeout is None else timeout
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.query, expr, output_fields, partition_names, timeout])
check_result = ResponseChecker(res, func_name, check_task, check_items, check,
expression=expr, partition_names=partition_names,
output_fields=output_fields,
timeout=timeout, **kwargs).run()
return res, check_result
@property
def partitions(self):
return self.collection.partitions
def partition(self, partition_name, check_task=None, check_items=None):
func_name = sys._getframe().f_code.co_name
res, succ = api_request([self.collection.partition, partition_name])
check_result = ResponseChecker(res, func_name, check_task, check_items,
succ, partition_name=partition_name).run()
return res, check_result
def has_partition(self, partition_name, check_task=None, check_items=None):
func_name = sys._getframe().f_code.co_name
res, succ = api_request([self.collection.has_partition, partition_name])
check_result = ResponseChecker(res, func_name, check_task, check_items,
succ, partition_name=partition_name).run()
return res, check_result
def drop_partition(self, partition_name, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.drop_partition, partition_name], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, partition_name=partition_name,
**kwargs).run()
return res, check_result
def create_partition(self, partition_name, check_task=None, check_items=None, description=""):
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.create_partition, partition_name, description])
check_result = ResponseChecker(res, func_name, check_task, check_items, check,
partition_name=partition_name).run()
return res, check_result
@property
def indexes(self):
return self.collection.indexes
def index(self, check_task=None, check_items=None):
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.index])
check_result = ResponseChecker(res, func_name, check_task, check_items, check).run()
return res, check_result
def create_index(self, field_name, index_params, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT * 2)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.create_index, field_name, index_params], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check,
field_name=field_name, index_params=index_params, **kwargs).run()
return res, check_result
def has_index(self, check_task=None, check_items=None):
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.has_index])
check_result = ResponseChecker(res, func_name, check_task, check_items, check).run()
return res, check_result
def drop_index(self, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.drop_index], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, **kwargs).run()
return res, check_result
def create_alias(self, alias_name, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.create_alias, alias_name], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, **kwargs).run()
return res, check_result
def drop_alias(self, alias_name, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.drop_alias, alias_name], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, **kwargs).run()
return res, check_result
def alter_alias(self, alias_name, check_task=None, check_items=None, **kwargs):
timeout = kwargs.get("timeout", TIMEOUT)
kwargs.update({"timeout": timeout})
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.alter_alias, alias_name], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, **kwargs).run()
return res, check_result
def delete(self, expr, partition_name=None, timeout=None, check_task=None, check_items=None, **kwargs):
timeout = TIMEOUT if timeout is None else timeout
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.delete, expr, partition_name, timeout], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, **kwargs).run()
return res, check_result
def compact(self, timeout=None, check_task=None, check_items=None, **kwargs):
timeout = TIMEOUT if timeout is None else timeout
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.compact, timeout], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, **kwargs).run()
return res, check_result
def get_compaction_state(self, timeout=None, check_task=None, check_items=None, **kwargs):
timeout = TIMEOUT if timeout is None else timeout
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.get_compaction_state, timeout], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, **kwargs).run()
return res, check_result
def get_compaction_plans(self, timeout=None, check_task=None, check_items=None, **kwargs):
timeout = TIMEOUT if timeout is None else timeout
func_name = sys._getframe().f_code.co_name
res, check = api_request([self.collection.get_compaction_plans, timeout], **kwargs)
check_result = ResponseChecker(res, func_name, check_task, check_items, check, **kwargs).run()
return res, check_result
def wait_for_compaction_completed(self, timeout=None, **kwargs):
timeout = TIMEOUT if timeout is None else timeout
res = self.collection.wait_for_compaction_completed(timeout, **kwargs)
log.debug(res)
return res
| 47.798507
| 125
| 0.662139
| 1,567
| 12,810
| 5.153797
| 0.074665
| 0.051263
| 0.048291
| 0.051263
| 0.76263
| 0.761887
| 0.755201
| 0.734894
| 0.721521
| 0.718673
| 0
| 0.000917
| 0.234192
| 12,810
| 267
| 126
| 47.977528
| 0.822324
| 0.004996
| 0
| 0.52381
| 0
| 0
| 0.014429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157143
| false
| 0
| 0.033333
| 0.042857
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07bbd5787ae12b2087bcb08b40434bb0efe7ca80
| 107
|
py
|
Python
|
gmailcopy/__main__.py
|
theSage21/gmailcopy
|
d3c63eb66a312fa27714b0f915525f6f788335bc
|
[
"BSD-3-Clause"
] | 2
|
2020-10-21T20:25:13.000Z
|
2020-10-22T06:40:55.000Z
|
gmailcopy/__main__.py
|
theSage21/gmailcopy
|
d3c63eb66a312fa27714b0f915525f6f788335bc
|
[
"BSD-3-Clause"
] | null | null | null |
gmailcopy/__main__.py
|
theSage21/gmailcopy
|
d3c63eb66a312fa27714b0f915525f6f788335bc
|
[
"BSD-3-Clause"
] | null | null | null |
print(
"""
Please use one of the following:
python -m gmailcopy.core
python -m gmailcopy.server
"""
)
| 11.888889
| 32
| 0.682243
| 15
| 107
| 4.866667
| 0.8
| 0.191781
| 0.438356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196262
| 107
| 8
| 33
| 13.375
| 0.848837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
07c9aaacbd4ad6cdacd28c218d1b2cb2accd332b
| 49
|
py
|
Python
|
backend/medtagger/api/core/__init__.py
|
kolszewska/MedTagger
|
c691c822dd23a9fb402d1314e7fe2e6bde898e9c
|
[
"Apache-2.0"
] | 71
|
2019-01-31T19:50:31.000Z
|
2022-02-20T07:36:49.000Z
|
backend/medtagger/api/core/__init__.py
|
kolszewska/MedTagger
|
c691c822dd23a9fb402d1314e7fe2e6bde898e9c
|
[
"Apache-2.0"
] | 379
|
2019-02-16T19:12:01.000Z
|
2022-03-11T23:12:24.000Z
|
backend/medtagger/api/core/__init__.py
|
kolszewska/MedTagger
|
c691c822dd23a9fb402d1314e7fe2e6bde898e9c
|
[
"Apache-2.0"
] | 16
|
2019-01-31T16:44:39.000Z
|
2022-02-14T15:23:29.000Z
|
"""Module responsible for all Core endpoints."""
| 24.5
| 48
| 0.734694
| 6
| 49
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 49
| 1
| 49
| 49
| 0.837209
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07dcbfad946e6041155d041ddd60d7dd3bde69c3
| 66
|
py
|
Python
|
vulcanai/__init__.py
|
rfratila/Vulcan
|
a8e0d76dcb487920ed5f0aef5c269b6c61cb41c4
|
[
"Apache-2.0"
] | 13
|
2017-09-02T15:49:36.000Z
|
2018-05-18T19:51:52.000Z
|
vulcanai/__init__.py
|
priyatharsan/Vulcan
|
15a196f1b267ec1c9cc88304b93508ca68c94fc1
|
[
"Apache-2.0"
] | 7
|
2017-09-01T02:09:36.000Z
|
2018-06-14T20:44:21.000Z
|
vulcanai/__init__.py
|
priyatharsan/Vulcan
|
15a196f1b267ec1c9cc88304b93508ca68c94fc1
|
[
"Apache-2.0"
] | 4
|
2017-10-14T17:38:53.000Z
|
2018-05-20T17:41:02.000Z
|
import net
import utils
import selu
import mnist_loader
import ops
| 13.2
| 19
| 0.863636
| 11
| 66
| 5.090909
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 66
| 5
| 20
| 13.2
| 0.982456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ed0024ff0b9fbe9435bf5125eb690d56d1ecf5ea
| 21,174
|
py
|
Python
|
cynetworkx/algorithms/tree/mst.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 12
|
2019-07-23T08:07:53.000Z
|
2022-03-09T06:13:16.000Z
|
cynetworkx/algorithms/tree/mst.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 7
|
2019-08-30T07:00:00.000Z
|
2021-12-30T08:02:56.000Z
|
cynetworkx/algorithms/tree/mst.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 5
|
2020-10-10T03:40:32.000Z
|
2021-11-23T12:28:53.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 NetworkX Developers
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Loïc Séguin-C. <loicseguin@gmail.com>
# All rights reserved.
# BSD license.
"""
Algorithms for calculating min/max spanning trees/forests.
"""
from heapq import heappop, heappush
from operator import itemgetter
from itertools import count
from math import isnan
import cynetworkx as nx
from cynetworkx.utils import UnionFind, not_implemented_for
__all__ = [
'minimum_spanning_edges', 'maximum_spanning_edges',
'minimum_spanning_tree', 'maximum_spanning_tree',
]
# @not_implemented_for('multigraph')
def boruvka_mst_edges(G, minimum=True, weight='weight',
keys=False, data=True, ignore_nan=False):
"""Iterate over edges of a Borůvka's algorithm min/max spanning tree.
Parameters
----------
G : NetworkX Graph
The edges of `G` must have distinct weights,
otherwise the edges may not form a tree.
minimum : bool (default: True)
Find the minimum (True) or maximum (False) spanning tree.
weight : string (default: 'weight')
The name of the edge attribute holding the edge weights.
keys : bool (default: True)
This argument is ignored since this function is not
implemented for multigraphs; it exists only for consistency
with the other minimum spanning tree functions.
data : bool (default: True)
Flag for whether to yield edge attribute dicts.
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
If False, yield edges `(u, v)`.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
"""
# Initialize a forest, assuming initially that it is the discrete
# partition of the nodes of the graph.
forest = UnionFind(G)
def best_edge(component):
"""Returns the optimum (minimum or maximum) edge on the edge
boundary of the given set of nodes.
A return value of ``None`` indicates an empty boundary.
"""
sign = 1 if minimum else -1
minwt = float('inf')
boundary = None
for e in nx.edge_boundary(G, component, data=True):
wt = e[-1].get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % (e,))
if wt < minwt:
minwt = wt
boundary = e
return boundary
# Determine the optimum edge in the edge boundary of each component
# in the forest.
best_edges = (best_edge(component) for component in forest.to_sets())
best_edges = [edge for edge in best_edges if edge is not None]
# If each entry was ``None``, that means the graph was disconnected,
# so we are done generating the forest.
while best_edges:
# Determine the optimum edge in the edge boundary of each
# component in the forest.
#
# This must be a sequence, not an iterator. In this list, the
# same edge may appear twice, in different orientations (but
# that's okay, since a union operation will be called on the
# endpoints the first time it is seen, but not the second time).
#
# Any ``None`` indicates that the edge boundary for that
# component was empty, so that part of the forest has been
# completed.
#
# TODO This can be parallelized, both in the outer loop over
# each component in the forest and in the computation of the
# minimum. (Same goes for the identical lines outside the loop.)
best_edges = (best_edge(component) for component in forest.to_sets())
best_edges = [edge for edge in best_edges if edge is not None]
# Join trees in the forest using the best edges, and yield that
# edge, since it is part of the spanning tree.
#
# TODO This loop can be parallelized, to an extent (the union
# operation must be atomic).
for u, v, d in best_edges:
if forest[u] != forest[v]:
if data:
yield u, v, d
else:
yield u, v
forest.union(u, v)
def kruskal_mst_edges(G, minimum, weight='weight',
keys=True, data=True, ignore_nan=False):
"""Iterate over edges of a Kruskal's algorithm min/max spanning tree.
Parameters
----------
G : NetworkX Graph
The graph holding the tree of interest.
minimum : bool (default: True)
Find the minimum (True) or maximum (False) spanning tree.
weight : string (default: 'weight')
The name of the edge attribute holding the edge weights.
keys : bool (default: True)
If `G` is a multigraph, `keys` controls whether edge keys ar yielded.
Otherwise `keys` is ignored.
data : bool (default: True)
Flag for whether to yield edge attribute dicts.
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
If False, yield edges `(u, v)`.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
"""
subtrees = UnionFind()
if G.is_multigraph():
edges = G.edges(keys=True, data=True)
def filter_nan_edges(edges=edges, weight=weight):
sign = 1 if minimum else -1
for u, v, k, d in edges:
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, k, d),))
yield wt, u, v, k, d
else:
edges = G.edges(data=True)
def filter_nan_edges(edges=edges, weight=weight):
sign = 1 if minimum else -1
for u, v, d in edges:
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, d),))
yield wt, u, v, d
edges = sorted(filter_nan_edges(), key=itemgetter(0))
# Multigraphs need to handle edge keys in addition to edge data.
if G.is_multigraph():
for wt, u, v, k, d in edges:
if subtrees[u] != subtrees[v]:
if keys:
if data:
yield u, v, k, d
else:
yield u, v, k
else:
if data:
yield u, v, d
else:
yield u, v
subtrees.union(u, v)
else:
for wt, u, v, d in edges:
if subtrees[u] != subtrees[v]:
if data:
yield (u, v, d)
else:
yield (u, v)
subtrees.union(u, v)
def prim_mst_edges(G, minimum, weight='weight',
keys=True, data=True, ignore_nan=False):
"""Iterate over edges of Prim's algorithm min/max spanning tree.
Parameters
----------
G : NetworkX Graph
The graph holding the tree of interest.
minimum : bool (default: True)
Find the minimum (True) or maximum (False) spanning tree.
weight : string (default: 'weight')
The name of the edge attribute holding the edge weights.
keys : bool (default: True)
If `G` is a multigraph, `keys` controls whether edge keys ar yielded.
Otherwise `keys` is ignored.
data : bool (default: True)
Flag for whether to yield edge attribute dicts.
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
If False, yield edges `(u, v)`.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
"""
is_multigraph = G.is_multigraph()
push = heappush
pop = heappop
nodes = list(G)
c = count()
sign = 1 if minimum else -1
while nodes:
u = nodes.pop(0)
frontier = []
visited = [u]
if is_multigraph:
for v, keydict in G.adj[u].items():
for k, d in keydict.items():
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, k, d),))
push(frontier, (wt, next(c), u, v, k, d))
else:
for v, d in G.adj[u].items():
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, d),))
push(frontier, (wt, next(c), u, v, d))
while frontier:
if is_multigraph:
W, _, u, v, k, d = pop(frontier)
else:
W, _, u, v, d = pop(frontier)
if v in visited:
continue
# Multigraphs need to handle edge keys in addition to edge data.
if is_multigraph and keys:
if data:
yield u, v, k, d
else:
yield u, v, k
else:
if data:
yield u, v, d
else:
yield u, v
# update frontier
visited.append(v)
nodes.remove(v)
if is_multigraph:
for w, keydict in G.adj[v].items():
if w in visited:
continue
for k2, d2 in keydict.items():
new_weight = d2.get(weight, 1) * sign
push(frontier, (new_weight, next(c), v, w, k2, d2))
else:
for w, d2 in G.adj[v].items():
if w in visited:
continue
new_weight = d2.get(weight, 1) * sign
push(frontier, (new_weight, next(c), v, w, d2))
ALGORITHMS = {
'boruvka': boruvka_mst_edges,
u'borůvka': boruvka_mst_edges,
'kruskal': kruskal_mst_edges,
'prim': prim_mst_edges
}
# @not_implemented_for('directed')
def minimum_spanning_edges(G, algorithm='kruskal', weight='weight',
keys=True, data=True, ignore_nan=False):
"""Generate edges in a minimum spanning forest of an undirected
weighted graph.
A minimum spanning tree is a subgraph of the graph (a tree)
with the minimum sum of edge weights. A spanning forest is a
union of the spanning trees for each connected component of the graph.
Parameters
----------
G : undirected Graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
algorithm : string
The algorithm to use when finding a minimum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'.
weight : string
Edge data key to use for weight (default 'weight').
keys : bool
Whether to yield edge key in multigraphs in addition to the edge.
If `G` is not a multigraph, this is ignored.
data : bool, optional
If True yield the edge data along with the edge.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
edges : iterator
An iterator over edges in a maximum spanning tree of `G`.
Edges connecting nodes `u` and `v` are represented as tuples:
`(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)`
If `G` is a multigraph, `keys` indicates whether the edge key `k` will
be reported in the third position in the edge tuple. `data` indicates
whether the edge datadict `d` will appear at the end of the edge tuple.
If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True
or `(u, v)` if `data` is False.
Examples
--------
>>> from cynetworkx.algorithms import tree
Find minimum spanning edges by Kruskal's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> mst = tree.minimum_spanning_edges(G, algorithm='kruskal', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (1, 2), (2, 3)]
Find minimum spanning edges by Prim's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> mst = tree.minimum_spanning_edges(G, algorithm='prim', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (1, 2), (2, 3)]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
Modified code from David Eppstein, April 2006
http://www.ics.uci.edu/~eppstein/PADS/
"""
try:
algo = ALGORITHMS[algorithm]
except KeyError:
msg = '{} is not a valid choice for an algorithm.'.format(algorithm)
raise ValueError(msg)
return algo(G, minimum=True, weight=weight, keys=keys, data=data,
ignore_nan=ignore_nan)
# @not_implemented_for('directed')
def maximum_spanning_edges(G, algorithm='kruskal', weight='weight',
keys=True, data=True, ignore_nan=False):
"""Generate edges in a maximum spanning forest of an undirected
weighted graph.
A maximum spanning tree is a subgraph of the graph (a tree)
with the maximum possible sum of edge weights. A spanning forest is a
union of the spanning trees for each connected component of the graph.
Parameters
----------
G : undirected Graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
algorithm : string
The algorithm to use when finding a maximum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'.
weight : string
Edge data key to use for weight (default 'weight').
keys : bool
Whether to yield edge key in multigraphs in addition to the edge.
If `G` is not a multigraph, this is ignored.
data : bool, optional
If True yield the edge data along with the edge.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
edges : iterator
An iterator over edges in a maximum spanning tree of `G`.
Edges connecting nodes `u` and `v` are represented as tuples:
`(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)`
If `G` is a multigraph, `keys` indicates whether the edge key `k` will
be reported in the third position in the edge tuple. `data` indicates
whether the edge datadict `d` will appear at the end of the edge tuple.
If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True
or `(u, v)` if `data` is False.
Examples
--------
>>> from cynetworkx.algorithms import tree
Find maximum spanning edges by Kruskal's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> mst = tree.maximum_spanning_edges(G, algorithm='kruskal', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (0, 3), (1, 2)]
Find maximum spanning edges by Prim's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2) # assign weight 2 to edge 0-3
>>> mst = tree.maximum_spanning_edges(G, algorithm='prim', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (0, 3), (3, 2)]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
Modified code from David Eppstein, April 2006
http://www.ics.uci.edu/~eppstein/PADS/
"""
try:
algo = ALGORITHMS[algorithm]
except KeyError:
msg = '{} is not a valid choice for an algorithm.'.format(algorithm)
raise ValueError(msg)
return algo(G, minimum=False, weight=weight, keys=keys, data=data,
ignore_nan=ignore_nan)
def minimum_spanning_tree(G, weight='weight', algorithm='kruskal',
ignore_nan=False):
"""Returns a minimum spanning tree or forest on an undirected graph `G`.
Parameters
----------
G : undirected graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
weight : str
Data key to use for edge weights.
algorithm : string
The algorithm to use when finding a minimum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is
'kruskal'.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
G : NetworkX Graph
A minimum spanning tree or forest.
Examples
--------
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> T = nx.minimum_spanning_tree(G)
>>> sorted(T.edges(data=True))
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
There may be more than one tree with the same minimum or maximum weight.
See :mod:`cynetworkx.tree.recognition` for more detailed definitions.
Isolated nodes with self-loops are in the tree as edgeless isolated nodes.
"""
edges = minimum_spanning_edges(G, algorithm, weight, keys=True,
data=True, ignore_nan=ignore_nan)
T = G.fresh_copy() # Same graph class as G
T.graph.update(G.graph)
T.add_nodes_from(G.nodes.items())
T.add_edges_from(edges)
return T
def maximum_spanning_tree(G, weight='weight', algorithm='kruskal',
ignore_nan=False):
"""Returns a maximum spanning tree or forest on an undirected graph `G`.
Parameters
----------
G : undirected graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
weight : str
Data key to use for edge weights.
algorithm : string
The algorithm to use when finding a minimum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is
'kruskal'.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
G : NetworkX Graph
A minimum spanning tree or forest.
Examples
--------
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> T = nx.maximum_spanning_tree(G)
>>> sorted(T.edges(data=True))
[(0, 1, {}), (0, 3, {'weight': 2}), (1, 2, {})]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
There may be more than one tree with the same minimum or maximum weight.
See :mod:`cynetworkx.tree.recognition` for more detailed definitions.
Isolated nodes with self-loops are in the tree as edgeless isolated nodes.
"""
edges = maximum_spanning_edges(G, algorithm, weight, keys=True,
data=True, ignore_nan=ignore_nan)
edges = list(edges)
T = G.fresh_copy() # Same graph class as G
T.graph.update(G.graph)
T.add_nodes_from(G.nodes.items())
T.add_edges_from(edges)
return T
| 34.598039
| 79
| 0.585341
| 2,909
| 21,174
| 4.209694
| 0.110347
| 0.007839
| 0.004655
| 0.012739
| 0.79724
| 0.7805
| 0.772742
| 0.771027
| 0.760738
| 0.756084
| 0
| 0.007444
| 0.321149
| 21,174
| 611
| 80
| 34.654664
| 0.844511
| 0.576084
| 0
| 0.576531
| 0
| 0
| 0.057005
| 0.010943
| 0
| 0
| 0
| 0.001637
| 0
| 1
| 0.05102
| false
| 0
| 0.030612
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed023faaba2a5a57a583c53dcfb298a374fdc8f8
| 1,131
|
py
|
Python
|
tests/test_requests_support.py
|
JFF-Bohdan/vrc_t70
|
cdc3cc7eeee1fc4f73f255fae7469ff57a6530ec
|
[
"MIT"
] | null | null | null |
tests/test_requests_support.py
|
JFF-Bohdan/vrc_t70
|
cdc3cc7eeee1fc4f73f255fae7469ff57a6530ec
|
[
"MIT"
] | null | null | null |
tests/test_requests_support.py
|
JFF-Bohdan/vrc_t70
|
cdc3cc7eeee1fc4f73f255fae7469ff57a6530ec
|
[
"MIT"
] | null | null | null |
import binascii
from vrc_t70.request import VrcT70Request
def test_ping_request_to_01():
expected_result = "01012233000A"
cmd = VrcT70Request(0x01, 0x01, 0x2233)
cmd = cmd.to_bytearray()
res = binascii.hexlify(cmd).decode("ascii")
assert res.lower() == expected_result.lower()
def test_ping_request_to_07():
expected_result = "070122330014"
cmd = VrcT70Request(0x07, 0x01, 0x2233)
cmd = cmd.to_bytearray()
res = binascii.hexlify(cmd).decode("ascii")
assert res.lower() == expected_result.lower()
def test_ping_with_fake_payload():
expected_result = "07012233030102039A"
cmd = VrcT70Request(0x07, 0x01, 0x2233)
cmd.data = bytearray([0x01, 0x02, 0x03])
cmd = cmd.to_bytearray()
res = binascii.hexlify(cmd).decode("ascii")
assert res.lower() == expected_result.lower()
def test_convert_to_bytes():
expected_result = "07012233030102039A"
cmd = VrcT70Request(0x07, 0x01, 0x2233)
cmd.data = bytearray([0x01, 0x02, 0x03])
cmd = bytes(cmd)
res = binascii.hexlify(cmd).decode("ascii")
assert res.lower() == expected_result.lower()
| 22.62
| 49
| 0.691424
| 138
| 1,131
| 5.471014
| 0.268116
| 0.148344
| 0.068874
| 0.111258
| 0.784106
| 0.745695
| 0.719205
| 0.719205
| 0.719205
| 0.719205
| 0
| 0.145788
| 0.181256
| 1,131
| 49
| 50
| 23.081633
| 0.669546
| 0
| 0
| 0.642857
| 0
| 0
| 0.070734
| 0
| 0
| 0
| 0.070734
| 0
| 0.142857
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed04654767315d88ba87e44a4584b630457dae82
| 65,178
|
py
|
Python
|
test/test_pipeline.py
|
thimo72/haystack
|
85571cdd15f1c9592cf28121187ffef7d4827f83
|
[
"Apache-2.0"
] | null | null | null |
test/test_pipeline.py
|
thimo72/haystack
|
85571cdd15f1c9592cf28121187ffef7d4827f83
|
[
"Apache-2.0"
] | null | null | null |
test/test_pipeline.py
|
thimo72/haystack
|
85571cdd15f1c9592cf28121187ffef7d4827f83
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
import os
import json
from typing import Tuple
from unittest.mock import Mock
import pandas as pd
import pytest
from requests import PreparedRequest
import responses
import yaml
from haystack import __version__, Document, Answer, JoinAnswers
from haystack.document_stores.base import BaseDocumentStore
from haystack.document_stores.deepsetcloud import DeepsetCloudDocumentStore
from haystack.document_stores.elasticsearch import ElasticsearchDocumentStore
from haystack.nodes.other.join_docs import JoinDocuments
from haystack.nodes.base import BaseComponent
from haystack.nodes.retriever.base import BaseRetriever
from haystack.nodes.retriever.sparse import ElasticsearchRetriever
from haystack.pipelines import Pipeline, DocumentSearchPipeline, RootNode, ExtractiveQAPipeline
from haystack.pipelines.config import _validate_user_input, validate_config
from haystack.pipelines.utils import generate_code
from haystack.nodes import DensePassageRetriever, EmbeddingRetriever, RouteDocuments, PreProcessor, TextConverter
from conftest import MOCK_DC, DC_API_ENDPOINT, DC_API_KEY, DC_TEST_INDEX, SAMPLES_PATH, deepset_cloud_fixture
from haystack.utils.deepsetcloud import DeepsetCloudError
class ParentComponent(BaseComponent):
outgoing_edges = 1
def __init__(self, dependent: BaseComponent) -> None:
super().__init__()
self.set_config(dependent=dependent)
class ParentComponent2(BaseComponent):
outgoing_edges = 1
def __init__(self, dependent: BaseComponent) -> None:
super().__init__()
self.set_config(dependent=dependent)
class ChildComponent(BaseComponent):
def __init__(self, some_key: str = None) -> None:
super().__init__()
self.set_config(some_key=some_key)
@pytest.mark.elasticsearch
@pytest.mark.parametrize("document_store", ["elasticsearch"], indirect=True)
def test_load_and_save_yaml(document_store, tmp_path):
# test correct load of indexing pipeline from yaml
pipeline = Pipeline.load_from_yaml(
SAMPLES_PATH / "pipeline" / "test_pipeline.yaml", pipeline_name="indexing_pipeline"
)
pipeline.run(file_paths=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")
# test correct load of query pipeline from yaml
pipeline = Pipeline.load_from_yaml(SAMPLES_PATH / "pipeline" / "test_pipeline.yaml", pipeline_name="query_pipeline")
prediction = pipeline.run(
query="Who made the PDF specification?", params={"ESRetriever": {"top_k": 10}, "Reader": {"top_k": 3}}
)
assert prediction["query"] == "Who made the PDF specification?"
assert prediction["answers"][0].answer == "Adobe Systems"
assert "_debug" not in prediction.keys()
# test invalid pipeline name
with pytest.raises(Exception):
Pipeline.load_from_yaml(path=SAMPLES_PATH / "pipeline" / "test_pipeline.yaml", pipeline_name="invalid")
# test config export
pipeline.save_to_yaml(tmp_path / "test.yaml")
with open(tmp_path / "test.yaml", "r", encoding="utf-8") as stream:
saved_yaml = stream.read()
expected_yaml = f"""
components:
- name: ESRetriever
params:
document_store: ElasticsearchDocumentStore
type: ElasticsearchRetriever
- name: ElasticsearchDocumentStore
params:
index: haystack_test
label_index: haystack_test_label
type: ElasticsearchDocumentStore
- name: Reader
params:
model_name_or_path: deepset/roberta-base-squad2
no_ans_boost: -10
num_processes: 0
type: FARMReader
pipelines:
- name: query
nodes:
- inputs:
- Query
name: ESRetriever
- inputs:
- ESRetriever
name: Reader
type: Pipeline
version: {__version__}
"""
assert saved_yaml.replace(" ", "").replace("\n", "") == expected_yaml.replace(" ", "").replace("\n", "")
@pytest.mark.elasticsearch
@pytest.mark.parametrize("document_store", ["elasticsearch"], indirect=True)
def test_load_and_save_yaml_prebuilt_pipelines(document_store, tmp_path):
# populating index
pipeline = Pipeline.load_from_yaml(
SAMPLES_PATH / "pipeline" / "test_pipeline.yaml", pipeline_name="indexing_pipeline"
)
pipeline.run(file_paths=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")
# test correct load of query pipeline from yaml
pipeline = ExtractiveQAPipeline.load_from_yaml(
SAMPLES_PATH / "pipeline" / "test_pipeline.yaml", pipeline_name="query_pipeline"
)
prediction = pipeline.run(
query="Who made the PDF specification?", params={"ESRetriever": {"top_k": 10}, "Reader": {"top_k": 3}}
)
assert prediction["query"] == "Who made the PDF specification?"
assert prediction["answers"][0].answer == "Adobe Systems"
assert "_debug" not in prediction.keys()
# test invalid pipeline name
with pytest.raises(Exception):
ExtractiveQAPipeline.load_from_yaml(
path=SAMPLES_PATH / "pipeline" / "test_pipeline.yaml", pipeline_name="invalid"
)
# test config export
pipeline.save_to_yaml(tmp_path / "test.yaml")
with open(tmp_path / "test.yaml", "r", encoding="utf-8") as stream:
saved_yaml = stream.read()
expected_yaml = f"""
components:
- name: ESRetriever
params:
document_store: ElasticsearchDocumentStore
type: ElasticsearchRetriever
- name: ElasticsearchDocumentStore
params:
index: haystack_test
label_index: haystack_test_label
type: ElasticsearchDocumentStore
- name: Reader
params:
model_name_or_path: deepset/roberta-base-squad2
no_ans_boost: -10
num_processes: 0
type: FARMReader
pipelines:
- name: query
nodes:
- inputs:
- Query
name: ESRetriever
- inputs:
- ESRetriever
name: Reader
type: Pipeline
version: {__version__}
"""
assert saved_yaml.replace(" ", "").replace("\n", "") == expected_yaml.replace(" ", "").replace("\n", "")
def test_load_tfidfretriever_yaml(tmp_path):
documents = [
{
"content": "A Doc specifically talking about haystack. Haystack can be used to scale QA models to large document collections."
}
]
pipeline = Pipeline.load_from_yaml(
SAMPLES_PATH / "pipeline" / "test_pipeline_tfidfretriever.yaml", pipeline_name="query_pipeline"
)
with pytest.raises(Exception) as exc_info:
pipeline.run(
query="What can be used to scale QA models to large document collections?",
params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 3}},
)
exception_raised = str(exc_info.value)
assert "Retrieval requires dataframe df and tf-idf matrix" in exception_raised
pipeline.get_node(name="Retriever").document_store.write_documents(documents=documents)
prediction = pipeline.run(
query="What can be used to scale QA models to large document collections?",
params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 3}},
)
assert prediction["query"] == "What can be used to scale QA models to large document collections?"
assert prediction["answers"][0].answer == "haystack"
@pytest.mark.elasticsearch
def test_to_code_creates_same_pipelines():
index_pipeline = Pipeline.load_from_yaml(
SAMPLES_PATH / "pipeline" / "test_pipeline.yaml", pipeline_name="indexing_pipeline"
)
query_pipeline = Pipeline.load_from_yaml(
SAMPLES_PATH / "pipeline" / "test_pipeline.yaml", pipeline_name="query_pipeline"
)
query_pipeline_code = query_pipeline.to_code(pipeline_variable_name="query_pipeline_from_code")
index_pipeline_code = index_pipeline.to_code(pipeline_variable_name="index_pipeline_from_code")
exec(query_pipeline_code)
exec(index_pipeline_code)
assert locals()["query_pipeline_from_code"] is not None
assert locals()["index_pipeline_from_code"] is not None
assert query_pipeline.get_config() == locals()["query_pipeline_from_code"].get_config()
assert index_pipeline.get_config() == locals()["index_pipeline_from_code"].get_config()
def test_get_config_creates_dependent_component():
child = ChildComponent()
parent = ParentComponent(dependent=child)
pipeline = Pipeline()
pipeline.add_node(component=parent, name="parent", inputs=["Query"])
expected_pipelines = [{"name": "query", "type": "Pipeline", "nodes": [{"name": "parent", "inputs": ["Query"]}]}]
expected_components = [
{"name": "parent", "type": "ParentComponent", "params": {"dependent": "ChildComponent"}},
{"name": "ChildComponent", "type": "ChildComponent", "params": {}},
]
config = pipeline.get_config()
for expected_pipeline in expected_pipelines:
assert expected_pipeline in config["pipelines"]
for expected_component in expected_components:
assert expected_component in config["components"]
def test_get_config_creates_only_one_dependent_component_referenced_by_multiple_parents():
child = ChildComponent()
parent = ParentComponent(dependent=child)
parent2 = ParentComponent2(dependent=child)
p_ensemble = Pipeline()
p_ensemble.add_node(component=parent, name="Parent1", inputs=["Query"])
p_ensemble.add_node(component=parent2, name="Parent2", inputs=["Query"])
p_ensemble.add_node(component=JoinDocuments(join_mode="merge"), name="JoinResults", inputs=["Parent1", "Parent2"])
expected_components = [
{"name": "Parent1", "type": "ParentComponent", "params": {"dependent": "ChildComponent"}},
{"name": "ChildComponent", "type": "ChildComponent", "params": {}},
{"name": "Parent2", "type": "ParentComponent2", "params": {"dependent": "ChildComponent"}},
{"name": "JoinResults", "type": "JoinDocuments", "params": {"join_mode": "merge"}},
]
expected_pipelines = [
{
"name": "query",
"type": "Pipeline",
"nodes": [
{"name": "Parent1", "inputs": ["Query"]},
{"name": "Parent2", "inputs": ["Query"]},
{"name": "JoinResults", "inputs": ["Parent1", "Parent2"]},
],
}
]
config = p_ensemble.get_config()
for expected_pipeline in expected_pipelines:
assert expected_pipeline in config["pipelines"]
for expected_component in expected_components:
assert expected_component in config["components"]
def test_get_config_creates_two_different_dependent_components_of_same_type():
child_a = ChildComponent(some_key="A")
child_b = ChildComponent(some_key="B")
parent = ParentComponent(dependent=child_a)
parent2 = ParentComponent(dependent=child_b)
p_ensemble = Pipeline()
p_ensemble.add_node(component=parent, name="ParentA", inputs=["Query"])
p_ensemble.add_node(component=parent2, name="ParentB", inputs=["Query"])
p_ensemble.add_node(component=JoinDocuments(join_mode="merge"), name="JoinResults", inputs=["ParentA", "ParentB"])
expected_components = [
{"name": "ParentA", "type": "ParentComponent", "params": {"dependent": "ChildComponent"}},
{"name": "ChildComponent", "type": "ChildComponent", "params": {"some_key": "A"}},
{"name": "ParentB", "type": "ParentComponent", "params": {"dependent": "ChildComponent_2"}},
{"name": "ChildComponent_2", "type": "ChildComponent", "params": {"some_key": "B"}},
{"name": "JoinResults", "type": "JoinDocuments", "params": {"join_mode": "merge"}},
]
expected_pipelines = [
{
"name": "query",
"type": "Pipeline",
"nodes": [
{"name": "ParentA", "inputs": ["Query"]},
{"name": "ParentB", "inputs": ["Query"]},
{"name": "JoinResults", "inputs": ["ParentA", "ParentB"]},
],
}
]
config = p_ensemble.get_config()
for expected_pipeline in expected_pipelines:
assert expected_pipeline in config["pipelines"]
for expected_component in expected_components:
assert expected_component in config["components"]
def test_generate_code_simple_pipeline():
config = {
"components": [
{
"name": "retri",
"type": "ElasticsearchRetriever",
"params": {"document_store": "ElasticsearchDocumentStore", "top_k": 20},
},
{
"name": "ElasticsearchDocumentStore",
"type": "ElasticsearchDocumentStore",
"params": {"index": "my-index"},
},
],
"pipelines": [{"name": "query", "type": "Pipeline", "nodes": [{"name": "retri", "inputs": ["Query"]}]}],
}
code = generate_code(pipeline_config=config, pipeline_variable_name="p", generate_imports=False)
assert code == (
'elasticsearch_document_store = ElasticsearchDocumentStore(index="my-index")\n'
"retri = ElasticsearchRetriever(document_store=elasticsearch_document_store, top_k=20)\n"
"\n"
"p = Pipeline()\n"
'p.add_node(component=retri, name="retri", inputs=["Query"])'
)
def test_generate_code_imports():
pipeline_config = {
"components": [
{"name": "DocumentStore", "type": "ElasticsearchDocumentStore"},
{"name": "retri", "type": "ElasticsearchRetriever", "params": {"document_store": "DocumentStore"}},
{"name": "retri2", "type": "EmbeddingRetriever", "params": {"document_store": "DocumentStore"}},
],
"pipelines": [
{
"name": "Query",
"type": "Pipeline",
"nodes": [{"name": "retri", "inputs": ["Query"]}, {"name": "retri2", "inputs": ["Query"]}],
}
],
}
code = generate_code(pipeline_config=pipeline_config, pipeline_variable_name="p", generate_imports=True)
assert code == (
"from haystack.document_stores import ElasticsearchDocumentStore\n"
"from haystack.nodes import ElasticsearchRetriever, EmbeddingRetriever\n"
"from haystack.pipelines import Pipeline\n"
"\n"
"document_store = ElasticsearchDocumentStore()\n"
"retri = ElasticsearchRetriever(document_store=document_store)\n"
"retri_2 = EmbeddingRetriever(document_store=document_store)\n"
"\n"
"p = Pipeline()\n"
'p.add_node(component=retri, name="retri", inputs=["Query"])\n'
'p.add_node(component=retri_2, name="retri2", inputs=["Query"])'
)
def test_generate_code_imports_no_pipeline_cls():
pipeline_config = {
"components": [
{"name": "DocumentStore", "type": "ElasticsearchDocumentStore"},
{"name": "retri", "type": "ElasticsearchRetriever", "params": {"document_store": "DocumentStore"}},
],
"pipelines": [{"name": "Query", "type": "Pipeline", "nodes": [{"name": "retri", "inputs": ["Query"]}]}],
}
code = generate_code(
pipeline_config=pipeline_config,
pipeline_variable_name="p",
generate_imports=True,
add_pipeline_cls_import=False,
)
assert code == (
"from haystack.document_stores import ElasticsearchDocumentStore\n"
"from haystack.nodes import ElasticsearchRetriever\n"
"\n"
"document_store = ElasticsearchDocumentStore()\n"
"retri = ElasticsearchRetriever(document_store=document_store)\n"
"\n"
"p = Pipeline()\n"
'p.add_node(component=retri, name="retri", inputs=["Query"])'
)
def test_generate_code_comment():
pipeline_config = {
"components": [
{"name": "DocumentStore", "type": "ElasticsearchDocumentStore"},
{"name": "retri", "type": "ElasticsearchRetriever", "params": {"document_store": "DocumentStore"}},
],
"pipelines": [{"name": "Query", "type": "Pipeline", "nodes": [{"name": "retri", "inputs": ["Query"]}]}],
}
comment = "This is my comment\n...and here is a new line"
code = generate_code(pipeline_config=pipeline_config, pipeline_variable_name="p", comment=comment)
assert code == (
"# This is my comment\n"
"# ...and here is a new line\n"
"from haystack.document_stores import ElasticsearchDocumentStore\n"
"from haystack.nodes import ElasticsearchRetriever\n"
"from haystack.pipelines import Pipeline\n"
"\n"
"document_store = ElasticsearchDocumentStore()\n"
"retri = ElasticsearchRetriever(document_store=document_store)\n"
"\n"
"p = Pipeline()\n"
'p.add_node(component=retri, name="retri", inputs=["Query"])'
)
def test_generate_code_is_component_order_invariant():
pipeline_config = {
"pipelines": [
{
"name": "Query",
"type": "Pipeline",
"nodes": [
{"name": "EsRetriever", "inputs": ["Query"]},
{"name": "EmbeddingRetriever", "inputs": ["Query"]},
{"name": "JoinResults", "inputs": ["EsRetriever", "EmbeddingRetriever"]},
],
}
]
}
doc_store = {"name": "ElasticsearchDocumentStore", "type": "ElasticsearchDocumentStore"}
es_retriever = {
"name": "EsRetriever",
"type": "ElasticsearchRetriever",
"params": {"document_store": "ElasticsearchDocumentStore"},
}
emb_retriever = {
"name": "EmbeddingRetriever",
"type": "EmbeddingRetriever",
"params": {
"document_store": "ElasticsearchDocumentStore",
"embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
},
}
join_node = {"name": "JoinResults", "type": "JoinDocuments"}
component_orders = [
[doc_store, es_retriever, emb_retriever, join_node],
[es_retriever, emb_retriever, join_node, doc_store],
[join_node, es_retriever, emb_retriever, doc_store],
]
expected_code = (
"elasticsearch_document_store = ElasticsearchDocumentStore()\n"
"es_retriever = ElasticsearchRetriever(document_store=elasticsearch_document_store)\n"
'embedding_retriever = EmbeddingRetriever(document_store=elasticsearch_document_store, embedding_model="sentence-transformers/all-MiniLM-L6-v2")\n'
"join_results = JoinDocuments()\n"
"\n"
"p = Pipeline()\n"
'p.add_node(component=es_retriever, name="EsRetriever", inputs=["Query"])\n'
'p.add_node(component=embedding_retriever, name="EmbeddingRetriever", inputs=["Query"])\n'
'p.add_node(component=join_results, name="JoinResults", inputs=["EsRetriever", "EmbeddingRetriever"])'
)
for components in component_orders:
pipeline_config["components"] = components
code = generate_code(pipeline_config=pipeline_config, pipeline_variable_name="p", generate_imports=False)
assert code == expected_code
@pytest.mark.parametrize("input", ["\btest", " test", "#test", "+test", "\ttest", "\ntest", "test()"])
def test_validate_user_input_invalid(input):
with pytest.raises(ValueError, match="is not a valid config variable name"):
_validate_user_input(input)
@pytest.mark.parametrize(
"input", ["test", "testName", "test_name", "test-name", "test-name1234", "http://localhost:8000/my-path"]
)
def test_validate_user_input_valid(input):
_validate_user_input(input)
def test_validate_pipeline_config_invalid_component_name():
with pytest.raises(ValueError, match="is not a valid config variable name"):
validate_config({"components": [{"name": "\btest"}]})
def test_validate_pipeline_config_invalid_component_type():
with pytest.raises(ValueError, match="is not a valid config variable name"):
validate_config({"components": [{"name": "test", "type": "\btest"}]})
def test_validate_pipeline_config_invalid_component_param():
with pytest.raises(ValueError, match="is not a valid config variable name"):
validate_config({"components": [{"name": "test", "type": "test", "params": {"key": "\btest"}}]})
def test_validate_pipeline_config_invalid_component_param_key():
with pytest.raises(ValueError, match="is not a valid config variable name"):
validate_config({"components": [{"name": "test", "type": "test", "params": {"\btest": "test"}}]})
def test_validate_pipeline_config_invalid_pipeline_name():
with pytest.raises(ValueError, match="is not a valid config variable name"):
validate_config({"components": [{"name": "test", "type": "test"}], "pipelines": [{"name": "\btest"}]})
def test_validate_pipeline_config_invalid_pipeline_type():
with pytest.raises(ValueError, match="is not a valid config variable name"):
validate_config(
{"components": [{"name": "test", "type": "test"}], "pipelines": [{"name": "test", "type": "\btest"}]}
)
def test_validate_pipeline_config_invalid_pipeline_node_name():
with pytest.raises(ValueError, match="is not a valid config variable name"):
validate_config(
{
"components": [{"name": "test", "type": "test"}],
"pipelines": [{"name": "test", "type": "test", "nodes": [{"name": "\btest"}]}],
}
)
def test_validate_pipeline_config_invalid_pipeline_node_inputs():
with pytest.raises(ValueError, match="is not a valid config variable name"):
validate_config(
{
"components": [{"name": "test", "type": "test"}],
"pipelines": [{"name": "test", "type": "test", "nodes": [{"name": "test", "inputs": ["\btest"]}]}],
}
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_load_from_deepset_cloud_query():
if MOCK_DC:
with open(SAMPLES_PATH / "dc" / "pipeline_config.json", "r") as f:
pipeline_config_yaml_response = json.load(f)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/{DC_TEST_INDEX}/json",
json=pipeline_config_yaml_response,
status=200,
)
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/indexes/{DC_TEST_INDEX}/documents-query",
json=[{"id": "test_doc", "content": "man on hores"}],
status=200,
)
query_pipeline = Pipeline.load_from_deepset_cloud(
pipeline_config_name=DC_TEST_INDEX, api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
retriever = query_pipeline.get_node("Retriever")
document_store = retriever.document_store
assert isinstance(retriever, ElasticsearchRetriever)
assert isinstance(document_store, DeepsetCloudDocumentStore)
assert document_store == query_pipeline.get_document_store()
prediction = query_pipeline.run(query="man on horse", params={})
assert prediction["query"] == "man on horse"
assert len(prediction["documents"]) == 1
assert prediction["documents"][0].id == "test_doc"
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_load_from_deepset_cloud_indexing():
if MOCK_DC:
with open(SAMPLES_PATH / "dc" / "pipeline_config.json", "r") as f:
pipeline_config_yaml_response = json.load(f)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/{DC_TEST_INDEX}/json",
json=pipeline_config_yaml_response,
status=200,
)
indexing_pipeline = Pipeline.load_from_deepset_cloud(
pipeline_config_name=DC_TEST_INDEX, api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY, pipeline_name="indexing"
)
document_store = indexing_pipeline.get_node("DocumentStore")
assert isinstance(document_store, DeepsetCloudDocumentStore)
with pytest.raises(
Exception, match=".*NotImplementedError.*DeepsetCloudDocumentStore currently does not support writing documents"
):
indexing_pipeline.run(file_paths=[SAMPLES_PATH / "docs" / "doc_1.txt"])
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_list_pipelines_on_deepset_cloud():
if MOCK_DC:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines",
json={
"data": [
{
"name": "test_pipeline_config",
"pipeline_id": "2184e0c1-c6ec-40a1-9b28-5d2768e5efa2",
"status": "DEPLOYED",
"created_at": "2022-02-01T09:57:03.803991+00:00",
"deleted": False,
"is_default": False,
"indexing": {"status": "IN_PROGRESS", "pending_file_count": 4, "total_file_count": 33},
}
],
"has_more": False,
"total": 1,
},
status=200,
)
pipelines = Pipeline.list_pipelines_on_deepset_cloud(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
assert len(pipelines) == 1
assert pipelines[0]["name"] == "test_pipeline_config"
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_save_to_deepset_cloud():
if MOCK_DC:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_pipeline_config",
json={
"name": "test_pipeline_config",
"pipeline_id": "2184e9c1-c6ec-40a1-9b28-5d2768e5efa2",
"status": "UNDEPLOYED",
"created_at": "2022-02-01T09:57:03.803991+00:00",
"deleted": False,
"is_default": False,
"indexing": {"status": "IN_PROGRESS", "pending_file_count": 4, "total_file_count": 33},
},
status=200,
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_pipeline_config_deployed",
json={
"name": "test_pipeline_config_deployed",
"pipeline_id": "8184e0c1-c6ec-40a1-9b28-5d2768e5efa3",
"status": "DEPLOYED",
"created_at": "2022-02-09T09:57:03.803991+00:00",
"deleted": False,
"is_default": False,
"indexing": {"status": "INDEXED", "pending_file_count": 0, "total_file_count": 33},
},
status=200,
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_pipeline_config_copy",
json={"errors": ["Pipeline with the name test_pipeline_config_copy does not exists."]},
status=404,
)
with open(SAMPLES_PATH / "dc" / "pipeline_config.json", "r") as f:
pipeline_config_yaml_response = json.load(f)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/{DC_TEST_INDEX}/json",
json=pipeline_config_yaml_response,
status=200,
)
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines",
json={"name": "test_pipeline_config_copy"},
status=200,
)
responses.add(
method=responses.PUT,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_pipeline_config/yaml",
json={"name": "test_pipeline_config"},
status=200,
)
responses.add(
method=responses.PUT,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_pipeline_config_deployed/yaml",
json={"errors": ["Updating the pipeline yaml is not allowed for pipelines with status: 'DEPLOYED'"]},
status=406,
)
query_pipeline = Pipeline.load_from_deepset_cloud(
pipeline_config_name=DC_TEST_INDEX, api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
index_pipeline = Pipeline.load_from_deepset_cloud(
pipeline_config_name=DC_TEST_INDEX, api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY, pipeline_name="indexing"
)
Pipeline.save_to_deepset_cloud(
query_pipeline=query_pipeline,
index_pipeline=index_pipeline,
pipeline_config_name="test_pipeline_config_copy",
api_endpoint=DC_API_ENDPOINT,
api_key=DC_API_KEY,
)
with pytest.raises(
ValueError,
match="Pipeline config 'test_pipeline_config' already exists. Set `overwrite=True` to overwrite pipeline config",
):
Pipeline.save_to_deepset_cloud(
query_pipeline=query_pipeline,
index_pipeline=index_pipeline,
pipeline_config_name="test_pipeline_config",
api_endpoint=DC_API_ENDPOINT,
api_key=DC_API_KEY,
)
Pipeline.save_to_deepset_cloud(
query_pipeline=query_pipeline,
index_pipeline=index_pipeline,
pipeline_config_name="test_pipeline_config",
api_endpoint=DC_API_ENDPOINT,
api_key=DC_API_KEY,
overwrite=True,
)
with pytest.raises(
ValueError,
match="Deployed pipeline configs are not allowed to be updated. Please undeploy pipeline config 'test_pipeline_config_deployed' first",
):
Pipeline.save_to_deepset_cloud(
query_pipeline=query_pipeline,
index_pipeline=index_pipeline,
pipeline_config_name="test_pipeline_config_deployed",
api_endpoint=DC_API_ENDPOINT,
api_key=DC_API_KEY,
overwrite=True,
)
@pytest.mark.elasticsearch
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_save_nonexisting_pipeline_to_deepset_cloud():
if MOCK_DC:
def dc_document_store_matcher(request: PreparedRequest) -> Tuple[bool, str]:
matches = False
reason = "No DeepsetCloudDocumentStore found."
request_body = request.body or ""
json_body = yaml.safe_load(request_body)
components = json_body["components"]
for component in components:
if component["type"].endswith("DocumentStore"):
if component["type"] == "DeepsetCloudDocumentStore":
matches = True
else:
matches = False
reason = f"Component {component['name']} is of type {component['type']} and not DeepsetCloudDocumentStore"
break
return matches, reason
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"errors": ["Pipeline with the name test_pipeline_config_copy does not exists."]},
status=404,
)
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines",
json={"name": "test_new_non_existing_pipeline"},
status=201,
match=[dc_document_store_matcher],
)
es_document_store = ElasticsearchDocumentStore()
es_retriever = ElasticsearchRetriever(document_store=es_document_store)
file_converter = TextConverter()
preprocessor = PreProcessor()
query_pipeline = Pipeline()
query_pipeline.add_node(component=es_retriever, name="Retriever", inputs=["Query"])
index_pipeline = Pipeline()
index_pipeline.add_node(component=file_converter, name="FileConverter", inputs=["File"])
index_pipeline.add_node(component=preprocessor, name="Preprocessor", inputs=["FileConverter"])
index_pipeline.add_node(component=es_document_store, name="DocumentStore", inputs=["Preprocessor"])
Pipeline.save_to_deepset_cloud(
query_pipeline=query_pipeline,
index_pipeline=index_pipeline,
pipeline_config_name="test_new_non_existing_pipeline",
api_endpoint=DC_API_ENDPOINT,
api_key=DC_API_KEY,
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_deploy_on_deepset_cloud_non_existing_pipeline():
if MOCK_DC:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"errors": ["Pipeline with the name test_pipeline_config_copy does not exists."]},
status=404,
)
with pytest.raises(DeepsetCloudError, match="Pipeline config 'test_new_non_existing_pipeline' does not exist."):
Pipeline.deploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_undeploy_on_deepset_cloud_non_existing_pipeline():
if MOCK_DC:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"errors": ["Pipeline with the name test_pipeline_config_copy does not exists."]},
status=404,
)
with pytest.raises(DeepsetCloudError, match="Pipeline config 'test_new_non_existing_pipeline' does not exist."):
Pipeline.undeploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_deploy_on_deepset_cloud():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/deploy",
json={"status": "DEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["UNDEPLOYED", "DEPLOYMENT_IN_PROGRESS", "DEPLOYMENT_IN_PROGRESS", "DEPLOYED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
Pipeline.deploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_undeploy_on_deepset_cloud():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/undeploy",
json={"status": "UNDEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["DEPLOYED", "UNDEPLOYMENT_IN_PROGRESS", "UNDEPLOYMENT_IN_PROGRESS", "UNDEPLOYED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
Pipeline.undeploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_deploy_on_deepset_cloud_sate_already_satisfied():
if MOCK_DC:
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["DEPLOYED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
Pipeline.deploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_undeploy_on_deepset_cloud_sate_already_satisfied():
if MOCK_DC:
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["UNDEPLOYED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
Pipeline.undeploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_deploy_on_deepset_cloud_failed():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/deploy",
json={"status": "DEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress and the third time undeployed
status_flow = ["UNDEPLOYED", "DEPLOYMENT_IN_PROGRESS", "UNDEPLOYED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
DeepsetCloudError, match="Deployment of pipeline config 'test_new_non_existing_pipeline' failed."
):
Pipeline.deploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_undeploy_on_deepset_cloud_failed():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/undeploy",
json={"status": "UNDEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress and the third time undeployed
status_flow = ["DEPLOYED", "UNDEPLOYMENT_IN_PROGRESS", "DEPLOYED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
DeepsetCloudError, match="Undeployment of pipeline config 'test_new_non_existing_pipeline' failed."
):
Pipeline.undeploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_deploy_on_deepset_cloud_invalid_initial_state():
if MOCK_DC:
status_flow = ["UNDEPLOYMENT_SCHEDULED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
DeepsetCloudError,
match="Pipeline config 'test_new_non_existing_pipeline' is in invalid state 'UNDEPLOYMENT_SCHEDULED' to be transitioned to 'DEPLOYED'.",
):
Pipeline.deploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_undeploy_on_deepset_cloud_invalid_initial_state():
if MOCK_DC:
status_flow = ["DEPLOYMENT_SCHEDULED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
DeepsetCloudError,
match="Pipeline config 'test_new_non_existing_pipeline' is in invalid state 'DEPLOYMENT_SCHEDULED' to be transitioned to 'UNDEPLOYED'.",
):
Pipeline.undeploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_deploy_on_deepset_cloud_invalid_state_in_progress():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/deploy",
json={"status": "DEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["UNDEPLOYED", "UNDEPLOYMENT_IN_PROGRESS"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
DeepsetCloudError,
match="Deployment of pipline config 'test_new_non_existing_pipeline' aborted. Undeployment was requested.",
):
Pipeline.deploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_undeploy_on_deepset_cloud_invalid_state_in_progress():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/undeploy",
json={"status": "UNDEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["DEPLOYED", "DEPLOYMENT_IN_PROGRESS"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
DeepsetCloudError,
match="Undeployment of pipline config 'test_new_non_existing_pipeline' aborted. Deployment was requested.",
):
Pipeline.undeploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_deploy_on_deepset_cloud_unknown_state_in_progress():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/deploy",
json={"status": "DEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["UNDEPLOYED", "ASKDHFASJDF"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
DeepsetCloudError,
match="Deployment of pipeline config 'test_new_non_existing_pipeline ended in unexpected status: UNKNOWN",
):
Pipeline.deploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_undeploy_on_deepset_cloud_unknown_state_in_progress():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/undeploy",
json={"status": "UNDEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["DEPLOYED", "ASKDHFASJDF"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
DeepsetCloudError,
match="Undeployment of pipeline config 'test_new_non_existing_pipeline ended in unexpected status: UNKNOWN",
):
Pipeline.undeploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_deploy_on_deepset_cloud_timeout():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/deploy",
json={"status": "DEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["UNDEPLOYED", "DEPLOYMENT_IN_PROGRESS", "DEPLOYMENT_IN_PROGRESS", "DEPLOYED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
TimeoutError, match="Transitioning of 'test_new_non_existing_pipeline' to state 'DEPLOYED' timed out."
):
Pipeline.deploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline",
api_endpoint=DC_API_ENDPOINT,
api_key=DC_API_KEY,
timeout=5,
)
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_undeploy_on_deepset_cloud_timeout():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline/undeploy",
json={"status": "UNDEPLOYMENT_SCHEDULED"},
status=200,
)
# status will be first undeployed, after deploy() it's in progress twice and the third time deployed
status_flow = ["DEPLOYED", "UNDEPLOYMENT_IN_PROGRESS", "UNDEPLOYMENT_IN_PROGRESS", "UNDEPLOYED"]
for status in status_flow:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/pipelines/test_new_non_existing_pipeline",
json={"status": status},
status=200,
)
with pytest.raises(
TimeoutError, match="Transitioning of 'test_new_non_existing_pipeline' to state 'UNDEPLOYED' timed out."
):
Pipeline.undeploy_on_deepset_cloud(
pipeline_config_name="test_new_non_existing_pipeline",
api_endpoint=DC_API_ENDPOINT,
api_key=DC_API_KEY,
timeout=5,
)
# @pytest.mark.slow
# @pytest.mark.elasticsearch
# @pytest.mark.parametrize(
# "retriever_with_docs, document_store_with_docs",
# [("elasticsearch", "elasticsearch")],
# indirect=True,
# )
@pytest.mark.parametrize(
"retriever_with_docs,document_store_with_docs",
[
("dpr", "elasticsearch"),
("dpr", "faiss"),
("dpr", "memory"),
("dpr", "milvus1"),
("embedding", "elasticsearch"),
("embedding", "faiss"),
("embedding", "memory"),
("embedding", "milvus1"),
("elasticsearch", "elasticsearch"),
("es_filter_only", "elasticsearch"),
("tfidf", "memory"),
],
indirect=True,
)
def test_graph_creation(retriever_with_docs, document_store_with_docs):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["Query"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.output_2"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.wrong_edge_label"])
with pytest.raises(Exception):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["InvalidNode"])
with pytest.raises(Exception):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["InvalidNode"])
def test_parallel_paths_in_pipeline_graph():
class A(RootNode):
def run(self):
test = "A"
return {"test": test}, "output_1"
class B(RootNode):
def run(self, test):
test += "B"
return {"test": test}, "output_1"
class C(RootNode):
def run(self, test):
test += "C"
return {"test": test}, "output_1"
class D(RootNode):
def run(self, test):
test += "D"
return {"test": test}, "output_1"
class E(RootNode):
def run(self, test):
test += "E"
return {"test": test}, "output_1"
class JoinNode(RootNode):
def run(self, inputs):
test = inputs[0]["test"] + inputs[1]["test"]
return {"test": test}, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="E", component=E(), inputs=["C"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E"])
output = pipeline.run(query="test")
assert output["test"] == "ABDABCE"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="E", component=JoinNode(), inputs=["C", "D"])
output = pipeline.run(query="test")
assert output["test"] == "ABCABD"
def test_parallel_paths_in_pipeline_graph_with_branching():
class AWithOutput1(RootNode):
outgoing_edges = 2
def run(self):
output = "A"
return {"output": output}, "output_1"
class AWithOutput2(RootNode):
outgoing_edges = 2
def run(self):
output = "A"
return {"output": output}, "output_2"
class AWithOutputAll(RootNode):
outgoing_edges = 2
def run(self):
output = "A"
return {"output": output}, "output_all"
class B(RootNode):
def run(self, output):
output += "B"
return {"output": output}, "output_1"
class C(RootNode):
def run(self, output):
output += "C"
return {"output": output}, "output_1"
class D(RootNode):
def run(self, output):
output += "D"
return {"output": output}, "output_1"
class E(RootNode):
def run(self, output):
output += "E"
return {"output": output}, "output_1"
class JoinNode(RootNode):
def run(self, output=None, inputs=None):
if inputs:
output = ""
for input_dict in inputs:
output += input_dict["output"]
return {"output": output}, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput1(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ABEABD"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput2(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "AC"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutputAll(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ACABEABD"
def test_pipeline_components():
class Node(BaseComponent):
outgoing_edges = 1
def run(self):
test = "test"
return {"test": test}, "output_1"
a = Node()
b = Node()
c = Node()
d = Node()
e = Node()
pipeline = Pipeline()
pipeline.add_node(name="A", component=a, inputs=["Query"])
pipeline.add_node(name="B", component=b, inputs=["A"])
pipeline.add_node(name="C", component=c, inputs=["B"])
pipeline.add_node(name="D", component=d, inputs=["C"])
pipeline.add_node(name="E", component=e, inputs=["D"])
assert len(pipeline.components) == 5
assert pipeline.components["A"] == a
assert pipeline.components["B"] == b
assert pipeline.components["C"] == c
assert pipeline.components["D"] == d
assert pipeline.components["E"] == e
def test_pipeline_get_document_store_from_components():
class DummyDocumentStore(BaseDocumentStore):
pass
doc_store = DummyDocumentStore()
pipeline = Pipeline()
pipeline.add_node(name="A", component=doc_store, inputs=["File"])
assert doc_store == pipeline.get_document_store()
def test_pipeline_get_document_store_from_components_multiple_doc_stores():
class DummyDocumentStore(BaseDocumentStore):
pass
doc_store_a = DummyDocumentStore()
doc_store_b = DummyDocumentStore()
pipeline = Pipeline()
pipeline.add_node(name="A", component=doc_store_a, inputs=["File"])
pipeline.add_node(name="B", component=doc_store_b, inputs=["File"])
with pytest.raises(Exception, match="Multiple Document Stores found in Pipeline"):
pipeline.get_document_store()
def test_pipeline_get_document_store_from_retriever():
class DummyRetriever(BaseRetriever):
def __init__(self, document_store):
self.document_store = document_store
def run(self):
test = "test"
return {"test": test}, "output_1"
class DummyDocumentStore(BaseDocumentStore):
pass
doc_store = DummyDocumentStore()
retriever = DummyRetriever(document_store=doc_store)
pipeline = Pipeline()
pipeline.add_node(name="A", component=retriever, inputs=["Query"])
assert doc_store == pipeline.get_document_store()
def test_pipeline_get_document_store_from_dual_retriever():
class DummyRetriever(BaseRetriever):
def __init__(self, document_store):
self.document_store = document_store
def run(self):
test = "test"
return {"test": test}, "output_1"
class DummyDocumentStore(BaseDocumentStore):
pass
class JoinNode(RootNode):
def run(self, output=None, inputs=None):
if inputs:
output = ""
for input_dict in inputs:
output += input_dict["output"]
return {"output": output}, "output_1"
doc_store = DummyDocumentStore()
retriever_a = DummyRetriever(document_store=doc_store)
retriever_b = DummyRetriever(document_store=doc_store)
pipeline = Pipeline()
pipeline.add_node(name="A", component=retriever_a, inputs=["Query"])
pipeline.add_node(name="B", component=retriever_b, inputs=["Query"])
pipeline.add_node(name="C", component=JoinNode(), inputs=["A", "B"])
assert doc_store == pipeline.get_document_store()
def test_pipeline_get_document_store_multiple_doc_stores_from_dual_retriever():
class DummyRetriever(BaseRetriever):
def __init__(self, document_store):
self.document_store = document_store
def run(self):
test = "test"
return {"test": test}, "output_1"
class DummyDocumentStore(BaseDocumentStore):
pass
class JoinNode(RootNode):
def run(self, output=None, inputs=None):
if inputs:
output = ""
for input_dict in inputs:
output += input_dict["output"]
return {"output": output}, "output_1"
doc_store_a = DummyDocumentStore()
doc_store_b = DummyDocumentStore()
retriever_a = DummyRetriever(document_store=doc_store_a)
retriever_b = DummyRetriever(document_store=doc_store_b)
pipeline = Pipeline()
pipeline.add_node(name="A", component=retriever_a, inputs=["Query"])
pipeline.add_node(name="B", component=retriever_b, inputs=["Query"])
pipeline.add_node(name="C", component=JoinNode(), inputs=["A", "B"])
with pytest.raises(Exception, match="Multiple Document Stores found in Pipeline"):
pipeline.get_document_store()
def test_existing_faiss_document_store():
clean_faiss_document_store()
pipeline = Pipeline.load_from_yaml(
SAMPLES_PATH / "pipeline" / "test_pipeline_faiss_indexing.yaml", pipeline_name="indexing_pipeline"
)
pipeline.run(file_paths=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")
new_document_store = pipeline.get_document_store()
new_document_store.save("existing_faiss_document_store")
# test correct load of query pipeline from yaml
pipeline = Pipeline.load_from_yaml(
SAMPLES_PATH / "pipeline" / "test_pipeline_faiss_retrieval.yaml", pipeline_name="query_pipeline"
)
retriever = pipeline.get_node("DPRRetriever")
existing_document_store = retriever.document_store
faiss_index = existing_document_store.faiss_indexes["document"]
assert faiss_index.ntotal == 2
prediction = pipeline.run(query="Who made the PDF specification?", params={"DPRRetriever": {"top_k": 10}})
assert prediction["query"] == "Who made the PDF specification?"
assert len(prediction["documents"]) == 2
clean_faiss_document_store()
@pytest.mark.slow
@pytest.mark.parametrize("retriever_with_docs", ["elasticsearch", "dpr", "embedding"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
def test_documentsearch_es_authentication(retriever_with_docs, document_store_with_docs: ElasticsearchDocumentStore):
if isinstance(retriever_with_docs, (DensePassageRetriever, EmbeddingRetriever)):
document_store_with_docs.update_embeddings(retriever=retriever_with_docs)
mock_client = Mock(wraps=document_store_with_docs.client)
document_store_with_docs.client = mock_client
auth_headers = {"Authorization": "Basic YWRtaW46cm9vdA=="}
pipeline = DocumentSearchPipeline(retriever=retriever_with_docs)
prediction = pipeline.run(
query="Who lives in Berlin?", params={"Retriever": {"top_k": 10, "headers": auth_headers}}
)
assert prediction is not None
assert len(prediction["documents"]) == 5
mock_client.search.assert_called_once()
args, kwargs = mock_client.search.call_args
assert "headers" in kwargs
assert kwargs["headers"] == auth_headers
@pytest.mark.slow
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_documentsearch_document_store_authentication(retriever_with_docs, document_store_with_docs):
mock_client = None
if isinstance(document_store_with_docs, ElasticsearchDocumentStore):
es_document_store: ElasticsearchDocumentStore = document_store_with_docs
mock_client = Mock(wraps=es_document_store.client)
es_document_store.client = mock_client
auth_headers = {"Authorization": "Basic YWRtaW46cm9vdA=="}
pipeline = DocumentSearchPipeline(retriever=retriever_with_docs)
if not mock_client:
with pytest.raises(Exception):
prediction = pipeline.run(
query="Who lives in Berlin?", params={"Retriever": {"top_k": 10, "headers": auth_headers}}
)
else:
prediction = pipeline.run(
query="Who lives in Berlin?", params={"Retriever": {"top_k": 10, "headers": auth_headers}}
)
assert prediction is not None
assert len(prediction["documents"]) == 5
mock_client.count.assert_called_once()
args, kwargs = mock_client.count.call_args
assert "headers" in kwargs
assert kwargs["headers"] == auth_headers
def test_route_documents_by_content_type():
# Test routing by content_type
docs = [
Document(content="text document", content_type="text"),
Document(
content=pd.DataFrame(columns=["col 1", "col 2"], data=[["row 1", "row 1"], ["row 2", "row 2"]]),
content_type="table",
),
]
route_documents = RouteDocuments()
result, _ = route_documents.run(documents=docs)
assert len(result["output_1"]) == 1
assert len(result["output_2"]) == 1
assert result["output_1"][0].content_type == "text"
assert result["output_2"][0].content_type == "table"
def test_route_documents_by_metafield(test_docs_xs):
# Test routing by metadata field
docs = [Document.from_dict(doc) if isinstance(doc, dict) else doc for doc in test_docs_xs]
route_documents = RouteDocuments(split_by="meta_field", metadata_values=["test1", "test3", "test5"])
result, _ = route_documents.run(docs)
assert len(result["output_1"]) == 1
assert len(result["output_2"]) == 1
assert len(result["output_3"]) == 1
assert result["output_1"][0].meta["meta_field"] == "test1"
assert result["output_2"][0].meta["meta_field"] == "test3"
assert result["output_3"][0].meta["meta_field"] == "test5"
@pytest.mark.parametrize("join_mode", ["concatenate", "merge"])
def test_join_answers(join_mode):
inputs = [{"answers": [Answer(answer="answer 1", score=0.7)]}, {"answers": [Answer(answer="answer 2", score=0.8)]}]
join_answers = JoinAnswers(join_mode=join_mode)
result, _ = join_answers.run(inputs)
assert len(result["answers"]) == 2
assert result["answers"] == sorted(result["answers"], reverse=True)
result, _ = join_answers.run(inputs, top_k_join=1)
assert len(result["answers"]) == 1
assert result["answers"][0].answer == "answer 2"
def clean_faiss_document_store():
if Path("existing_faiss_document_store").exists():
os.remove("existing_faiss_document_store")
if Path("existing_faiss_document_store.json").exists():
os.remove("existing_faiss_document_store.json")
if Path("faiss_document_store.db").exists():
os.remove("faiss_document_store.db")
| 39.430127
| 155
| 0.652567
| 7,235
| 65,178
| 5.587975
| 0.063994
| 0.032798
| 0.021222
| 0.025378
| 0.811744
| 0.756709
| 0.723169
| 0.70489
| 0.679957
| 0.663657
| 0
| 0.008651
| 0.226794
| 65,178
| 1,652
| 156
| 39.453995
| 0.793575
| 0.02642
| 0
| 0.595186
| 0
| 0.000729
| 0.278234
| 0.115412
| 0
| 0
| 0
| 0
| 0.056893
| 1
| 0.062728
| false
| 0.005106
| 0.028446
| 0
| 0.131291
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed1588418af691630830b9dd5c84a8e85354e30d
| 4,618
|
py
|
Python
|
epytope/Data/pssms/arb/mat/A_2603_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/arb/mat/A_2603_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/arb/mat/A_2603_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_2603_9 = {0: {'A': -0.3665907337016341, 'C': -0.114435444303954, 'E': 1.4680247395939725, 'D': 1.625402943563373, 'G': -0.372259603053398, 'F': 0.26204301723968665, 'I': -0.2877294952199972, 'H': -0.3718917250683608, 'K': -0.3989975319339608, 'M': -0.3766829856243328, 'L': -0.37035058568618046, 'N': -0.3647391879097787, 'Q': 0.27425599334635836, 'P': -4.0, 'S': -0.37035058568618046, 'R': -0.41504929621293685, 'T': 0.3492021654933729, 'W': 0.41457653895907953, 'V': -0.36109209532016323, 'Y': 0.05428337884030888}, 1: {'A': -4.0, 'C': 0.8388028264696293, 'E': -0.41504929621293685, 'D': -0.41987538755977594, 'G': -4.0, 'F': -0.18378661510028887, 'I': 0.8289723343912908, 'H': -0.3665907337016341, 'K': -0.435050494316646, 'M': -0.36109209532016323, 'L': -0.10471986622370844, 'N': -0.24007105288992217, 'Q': -0.24007105288992217, 'P': -4.0, 'S': -0.007034782410023256, 'R': -0.4057228568904271, 'T': 1.2391047413821126, 'W': -0.3307239933968409, 'V': 1.2745311358461926, 'Y': -0.3841400028043747}, 2: {'A': 0.5193145406112984, 'C': -0.16709995137823352, 'E': -0.36109209532016323, 'D': -0.372259603053398, 'G': -0.36109209532016323, 'F': 0.1789317322954769, 'I': 0.578081373587612, 'H': 0.286397474447291, 'K': -0.36290621928556566, 'M': -0.37613725747410875, 'L': -0.37613725747410875, 'N': -0.3647391879097787, 'Q': -0.36846113293733884, 'P': -4.0, 'S': -0.3665907337016341, 'R': 0.35419892054935753, 'T': -0.16709995137823352, 'W': 0.078846674264735, 'V': 0.015181305883965613, 'Y': -0.08101008358691036}, 3: {'A': -0.11094451417965769, 'C': -0.07144950908233959, 'E': 0.3305039308307413, 'D': -0.17995595315376348, 'G': 0.2819778962544967, 'F': -0.37035058568618046, 'I': -0.3647391879097787, 'H': 0.6881363701718127, 'K': -0.11083888596000889, 'M': -0.36109209532016323, 'L': -0.092431800167021, 'N': 0.08227824191513786, 'Q': 0.07539986028433446, 'P': -0.2035217633401696, 'S': -0.36109209532016323, 'R': -0.19452068718834928, 'T': 0.3238608821908689, 'W': 0.013379332678532457, 'V': -0.11591770188506637, 'Y': 0.2669787414037937}, 4: {'A': -0.19773100606070892, 'C': 0.10796857561232016, 'E': 0.2444903800247477, 'D': 0.07539986028433446, 'G': 0.48205303523927534, 'F': 0.09238409395303016, 'I': -0.3861946984362773, 'H': -0.02280568779507328, 'K': -0.04549477362263642, 'M': 0.24140381732806718, 'L': -0.18617201415379372, 'N': -0.04837154307955937, 'Q': -0.12424827113341418, 'P': 0.17226192873683047, 'S': 0.07654833289385685, 'R': 0.015738256644978216, 'T': -0.06286572026605894, 'W': 0.039193663740229596, 'V': -0.23467469584028397, 'Y': 0.23044802724165545}, 5: {'A': -0.05021810477676333, 'C': -0.16695142863053186, 'E': -0.3647391879097787, 'D': -0.016949663330265763, 'G': 0.12685371006663831, 'F': 0.5091884041695526, 'I': -0.016922722948023546, 'H': 0.07407546108792645, 'K': -0.36109209532016323, 'M': 0.12070775224936188, 'L': 0.1846988393073355, 'N': 0.3003647335435379, 'Q': 0.059715261899656535, 'P': -0.36109209532016323, 'S': -0.39037013373172613, 'R': -0.07460690105076588, 'T': 0.23856140275543014, 'W': 0.06192707846319907, 'V': 0.21212863759802533, 'Y': -0.06828760258478227}, 6: {'A': 0.8598744719047239, 'C': -0.23360586364881242, 'E': -0.14727925925307822, 'D': -0.21163772500753253, 'G': 0.3266706712144455, 'F': -0.2526881923552287, 'I': 0.21700358584870666, 'H': -0.3665907337016341, 'K': 0.384605171011199, 'M': -0.3647391879097787, 'L': -0.22580290226232969, 'N': 0.4999954596258477, 'Q': -0.36109209532016323, 'P': -0.3005249443867197, 'S': -0.1812365213069776, 'R': 0.27141595198924334, 'T': -0.23926234523564405, 'W': -0.36290621928556566, 'V': -0.3647391879097787, 'Y': -0.012490515975436026}, 7: {'A': 0.5161075715892852, 'C': -0.3665907337016341, 'E': -0.374188438459454, 'D': -0.36846113293733884, 'G': 0.47482847905299935, 'F': -0.09452747376269649, 'I': -0.1774094281698542, 'H': -0.36109209532016323, 'K': -0.37613725747410875, 'M': -0.26693461667456975, 'L': -0.2562694597084859, 'N': 1.5871789596128987, 'Q': -0.36109209532016323, 'P': 0.41974855877195444, 'S': -0.05104018228149743, 'R': -0.36290621928556566, 'T': -0.07144950908233959, 'W': -0.09502388050174529, 'V': -0.37613725747410875, 'Y': -0.3647391879097787}, 8: {'A': -4.0, 'C': -4.0, 'E': -4.0, 'D': -4.0, 'G': -4.0, 'F': -0.3967354278368642, 'I': -0.36290621928556566, 'H': -0.435050494316646, 'K': -0.41504929621293685, 'M': 1.9679687104090096, 'L': -0.05141259340763561, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': -4.0, 'R': -0.3647391879097787, 'T': -4.0, 'W': 0.01528265710359549, 'V': 0.3658244327576597, 'Y': 0.48937202502204974}, -1: {'slope': 0.18986659033567496, 'intercept': -0.6988718087508795}}
| 4,618
| 4,618
| 0.68861
| 559
| 4,618
| 5.685152
| 0.289803
| 0.00944
| 0.003776
| 0.005035
| 0.059471
| 0
| 0
| 0
| 0
| 0
| 0
| 0.70304
| 0.081204
| 4,618
| 1
| 4,618
| 4,618
| 0.045958
| 0
| 0
| 0
| 0
| 0
| 0.042
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed6b6b2fe6d85a5e11ed7b9556d36b50d770f4ed
| 3,341
|
py
|
Python
|
api/migrations/versions/3d972cfa5be9_add_ta_for_and_professor_for_tables.py
|
Racheltrq/Anubis
|
20eabe5651cee4ca5dc2f2b9bb531724aad1cf37
|
[
"MIT"
] | 87
|
2021-11-08T10:58:26.000Z
|
2022-03-31T19:02:47.000Z
|
api/migrations/versions/3d972cfa5be9_add_ta_for_and_professor_for_tables.py
|
Racheltrq/Anubis
|
20eabe5651cee4ca5dc2f2b9bb531724aad1cf37
|
[
"MIT"
] | 114
|
2021-06-27T08:37:43.000Z
|
2021-10-24T00:51:01.000Z
|
api/migrations/versions/3d972cfa5be9_add_ta_for_and_professor_for_tables.py
|
Racheltrq/Anubis
|
20eabe5651cee4ca5dc2f2b9bb531724aad1cf37
|
[
"MIT"
] | 15
|
2021-11-07T17:02:21.000Z
|
2022-03-28T02:04:16.000Z
|
"""ADD ta_for and professor_for tables
Revision ID: 3d972cfa5be9
Revises: b99d63327de0
Create Date: 2021-04-27 14:47:30.881951
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "3d972cfa5be9"
down_revision = "b99d63327de0"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"professor_for_course",
sa.Column("owner_id", sa.String(length=128), nullable=False),
sa.Column("course_id", sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(
["course_id"],
["course.id"],
),
sa.ForeignKeyConstraint(
["owner_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("owner_id", "course_id"),
mysql_charset="utf8mb4",
mysql_collate="utf8mb4_general_ci",
)
op.create_table(
"ta_for_course",
sa.Column("owner_id", sa.String(length=128), nullable=False),
sa.Column("course_id", sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(
["course_id"],
["course.id"],
),
sa.ForeignKeyConstraint(
["owner_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("owner_id", "course_id"),
mysql_charset="utf8mb4",
mysql_collate="utf8mb4_general_ci",
)
op.alter_column("assignment", "name", existing_type=mysql.MEDIUMTEXT(), nullable=False)
op.alter_column(
"assignment_repo",
"github_username",
existing_type=mysql.MEDIUMTEXT(),
nullable=False,
)
op.alter_column(
"assignment_repo",
"repo_url",
existing_type=mysql.MEDIUMTEXT(),
nullable=False,
)
op.alter_column(
"course",
"course_code",
existing_type=mysql.MEDIUMTEXT(),
nullable=False,
)
op.alter_column("course", "name", existing_type=mysql.MEDIUMTEXT(), nullable=False)
op.alter_column("course", "professor", existing_type=mysql.MEDIUMTEXT(), nullable=False)
op.drop_column("user", "is_admin")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"user",
sa.Column(
"is_admin",
mysql.TINYINT(display_width=1),
autoincrement=False,
nullable=False,
),
)
op.alter_column("course", "professor", existing_type=mysql.MEDIUMTEXT(), nullable=True)
op.alter_column("course", "name", existing_type=mysql.MEDIUMTEXT(), nullable=True)
op.alter_column(
"course",
"course_code",
existing_type=mysql.MEDIUMTEXT(),
nullable=True,
)
op.alter_column(
"assignment_repo",
"repo_url",
existing_type=mysql.MEDIUMTEXT(),
nullable=True,
)
op.alter_column(
"assignment_repo",
"github_username",
existing_type=mysql.MEDIUMTEXT(),
nullable=True,
)
op.alter_column("assignment", "name", existing_type=mysql.MEDIUMTEXT(), nullable=True)
op.drop_table("ta_for_course")
op.drop_table("professor_for_course")
# ### end Alembic commands ###
| 29.566372
| 92
| 0.611793
| 355
| 3,341
| 5.543662
| 0.23662
| 0.042683
| 0.079268
| 0.164634
| 0.741362
| 0.741362
| 0.741362
| 0.737805
| 0.690041
| 0.690041
| 0
| 0.027689
| 0.254116
| 3,341
| 112
| 93
| 29.830357
| 0.762039
| 0.094882
| 0
| 0.631579
| 0
| 0
| 0.172749
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021053
| false
| 0
| 0.031579
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed70ed1c9ec8459e7544fd996ab6c7c4a33d1173
| 153
|
py
|
Python
|
tests/test_readme.py
|
manicmaniac/sqlalchemy-repr
|
cbeb823d5f1953aa4f59617e32c1eca4033ff0f2
|
[
"MIT"
] | 26
|
2016-10-15T12:39:57.000Z
|
2022-02-16T19:15:18.000Z
|
tests/test_readme.py
|
manicmaniac/sqlalchemy-repr
|
cbeb823d5f1953aa4f59617e32c1eca4033ff0f2
|
[
"MIT"
] | 3
|
2016-08-22T23:27:25.000Z
|
2020-05-20T01:11:37.000Z
|
tests/test_readme.py
|
manicmaniac/sqlalchemy-repr
|
cbeb823d5f1953aa4f59617e32c1eca4033ff0f2
|
[
"MIT"
] | 3
|
2016-08-20T14:04:58.000Z
|
2020-03-18T13:54:41.000Z
|
import doctest
import sys
if sys.version_info >= (3,):
def load_tests(loader, tests, ignore):
return doctest.DocFileSuite('../README.rst')
| 19.125
| 52
| 0.679739
| 20
| 153
| 5.1
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008
| 0.183007
| 153
| 7
| 53
| 21.857143
| 0.808
| 0
| 0
| 0
| 0
| 0
| 0.084967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
ed8c97a3b8cf3a33653c6b040d4f140475b87b6f
| 113
|
py
|
Python
|
total_ds.py
|
iaxat/data_structure_implementation
|
3eb47b16dea5c933936c64e8a88523ee582dcdf2
|
[
"MIT"
] | null | null | null |
total_ds.py
|
iaxat/data_structure_implementation
|
3eb47b16dea5c933936c64e8a88523ee582dcdf2
|
[
"MIT"
] | null | null | null |
total_ds.py
|
iaxat/data_structure_implementation
|
3eb47b16dea5c933936c64e8a88523ee582dcdf2
|
[
"MIT"
] | null | null | null |
#
# from array_ds import Array_ds
# from trees_ds import
# from string_ds
# from set_ds
# from linked_list_ds
| 14.125
| 31
| 0.752212
| 20
| 113
| 3.9
| 0.45
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19469
| 113
| 7
| 32
| 16.142857
| 0.857143
| 0.867257
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
71e926e11927f0c71ae4848852c09689d47ca0a6
| 88
|
py
|
Python
|
idunn/geocoder/models/__init__.py
|
QwantResearch/idunn
|
88b6862f1036187855b5541bbb6758ddd4df33c1
|
[
"Apache-2.0"
] | 26
|
2018-11-30T09:17:17.000Z
|
2020-11-07T01:53:07.000Z
|
idunn/geocoder/models/__init__.py
|
QwantResearch/idunn
|
88b6862f1036187855b5541bbb6758ddd4df33c1
|
[
"Apache-2.0"
] | 38
|
2018-06-08T09:41:04.000Z
|
2020-12-07T17:39:12.000Z
|
idunn/geocoder/models/__init__.py
|
Qwant/idunn
|
65582dfed732093778bf7c2998db1e2cd78255b8
|
[
"Apache-2.0"
] | 9
|
2018-05-18T13:07:00.000Z
|
2020-08-01T16:42:40.000Z
|
from .geocodejson import IdunnAutocomplete
from .params import ExtraParams, QueryParams
| 29.333333
| 44
| 0.863636
| 9
| 88
| 8.444444
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102273
| 88
| 2
| 45
| 44
| 0.962025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c1ff1df90605257a18d721e065e64327e9d69c2
| 100
|
py
|
Python
|
config.py
|
KonradLinkowski/VoiceManager
|
a84a604ad8057383414912cd8f9647c6e320bcdc
|
[
"MIT"
] | 2
|
2020-01-27T16:45:54.000Z
|
2020-09-24T17:30:58.000Z
|
config.py
|
KonradLinkowski/VoiceManager
|
a84a604ad8057383414912cd8f9647c6e320bcdc
|
[
"MIT"
] | null | null | null |
config.py
|
KonradLinkowski/VoiceManager
|
a84a604ad8057383414912cd8f9647c6e320bcdc
|
[
"MIT"
] | null | null | null |
import json
def parse(path):
with open(path) as json_file:
return json.load(json_file)
| 16.666667
| 35
| 0.68
| 16
| 100
| 4.125
| 0.6875
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.23
| 100
| 5
| 36
| 20
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
9c4dc510986a359c1b584f299dd63ba9f25e1be1
| 147
|
py
|
Python
|
cpgames/modules/core/tankwar/modules/interfaces/__init__.py
|
Wasabii88/Games
|
33262ca1958207a24e57e3532feded7e275b1dd1
|
[
"MIT"
] | 1
|
2022-02-27T10:33:41.000Z
|
2022-02-27T10:33:41.000Z
|
cpgames/modules/core/tankwar/modules/interfaces/__init__.py
|
beiwei365/Games
|
f6499f378802d3212a08aeca761191b58714b7f0
|
[
"MIT"
] | null | null | null |
cpgames/modules/core/tankwar/modules/interfaces/__init__.py
|
beiwei365/Games
|
f6499f378802d3212a08aeca761191b58714b7f0
|
[
"MIT"
] | null | null | null |
'''初始化'''
from .endinterface import GameEndIterface
from .startinterface import GameStartInterface
from .switchinterface import SwitchLevelIterface
| 36.75
| 48
| 0.857143
| 13
| 147
| 9.692308
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 147
| 4
| 48
| 36.75
| 0.933333
| 0.020408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c63316f7992abf3aa56fb7d8bef723304e6b0be
| 886
|
py
|
Python
|
django_boost/models/query.py
|
ChanTsune/Django-Boost
|
5141605132c1d46aee978ee3cdbc8db5e9d4b8c4
|
[
"MIT"
] | 25
|
2019-05-23T11:19:18.000Z
|
2022-02-19T15:28:09.000Z
|
django_boost/models/query.py
|
ChanTsune/Django-Boost
|
5141605132c1d46aee978ee3cdbc8db5e9d4b8c4
|
[
"MIT"
] | 49
|
2019-09-17T08:40:22.000Z
|
2022-03-02T14:08:27.000Z
|
django_boost/models/query.py
|
ChanTsune/Django-Boost
|
5141605132c1d46aee978ee3cdbc8db5e9d4b8c4
|
[
"MIT"
] | 4
|
2019-09-17T08:16:55.000Z
|
2020-08-24T09:33:16.000Z
|
from django.db.models.query import QuerySet
class LogicalDeletionQuerySet(QuerySet):
delete_flag_field = "deleted_at"
def get_delete_flag_field_name(self):
return self.delete_flag_field
def delete(self, hard=False):
if hard:
return super().delete()
field_name = self.get_delete_flag_field_name()
deleted_value = self.model.get_deleted_value()
return super().update(**{field_name: deleted_value})
def alive(self):
field_name = self.get_delete_flag_field_name()
return self.filter(**{field_name: None})
def dead(self):
field_name = self.get_delete_flag_field_name()
return self.exclude(**{field_name: None})
def revive(self):
"""Revive logical delete items."""
field_name = self.get_delete_flag_field_name()
return self.update(**{field_name: None})
| 30.551724
| 60
| 0.671558
| 114
| 886
| 4.894737
| 0.298246
| 0.209677
| 0.188172
| 0.16129
| 0.358423
| 0.318996
| 0.318996
| 0.318996
| 0.256272
| 0.256272
| 0
| 0
| 0.221219
| 886
| 28
| 61
| 31.642857
| 0.808696
| 0.031603
| 0
| 0.2
| 0
| 0
| 0.011737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.05
| 0.05
| 0.7
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
9c63faa5f3dcffd9de3ab425b54ef48d2dd61778
| 13,874
|
py
|
Python
|
src/cloudservice/azext_cloudservice/generated/_params.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/cloudservice/azext_cloudservice/generated/_params.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/cloudservice/azext_cloudservice/generated/_params.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
tags_type,
get_three_state_flag,
get_enum_type,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group,
validate_file_or_dict
)
def load_arguments(self, _):
# All "cloud_service_name" miss help
with self.argument_context('cloud-service role-instance list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.')
with self.argument_context('cloud-service role-instance show') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance delete') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance rebuild') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance reimage') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance restart') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance show-instance-view') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance show-remote-desktop-file') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role-instance wait') as c:
c.argument('role_instance_name', type=str, help='Name of the role instance.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service role list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.')
with self.argument_context('cloud-service role show') as c:
c.argument('role_name', type=str, help='Name of the role.', id_part='child_name_1')
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Cloud service name.', id_part='name')
with self.argument_context('cloud-service list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('cloud-service show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('package_url', type=str, help='Specifies a URL that refers to the location of the service package '
'in the Blob service. The service package URL can be Shared Access Signature (SAS) URI from any '
'storage account. This is a write-only property and is not returned in GET calls.')
c.argument('configuration', type=str, help='Specifies the XML service configuration (.cscfg) for the cloud '
'service.')
c.argument('configuration_url', type=str, help='Specifies a URL that refers to the location of the service '
'configuration in the Blob service. The service package URL can be Shared Access Signature (SAS) '
'URI from any storage account. This is a write-only property and is not returned in GET calls.')
c.argument('start_cloud_service', arg_type=get_three_state_flag(), help='(Optional) Indicates whether to start '
'the cloud service immediately after it is created. The default value is `true`. If false, the '
'service model is still deployed, but the code is not run immediately. Instead, the service is '
'PoweredOff until you call Start, at which time the service will be started. A deployed service '
'still incurs charges, even if it is poweredoff.')
c.argument('upgrade_mode', arg_type=get_enum_type(['Auto', 'Manual', 'Simultaneous']), help='Update mode for '
'the cloud service. Role instances are allocated to update domains when the service is deployed. '
'Updates can be initiated manually in each update domain or initiated automatically in all update '
'domains. Possible Values are <br /><br />**Auto**<br /><br />**Manual** <br /><br '
'/>**Simultaneous**<br /><br /> If not specified, the default value is Auto. If set to Manual, PUT '
'UpdateDomain must be called to apply the update. If set to Auto, the update is automatically '
'applied to each update domain in sequence.')
c.argument('extensions', type=validate_file_or_dict, help='List of extensions for the cloud service. Expected '
'value: json-string/@json-file.', arg_group='Extension Profile')
c.argument('load_balancer_configurations', type=validate_file_or_dict, help='The list of load balancer '
'configurations for the cloud service. Expected value: json-string/@json-file.', arg_group='Network '
'Profile')
c.argument('id_', options_list=['--id'], type=str, help='Resource Id', arg_group='Network Profile Swappable '
'Cloud Service')
c.argument('secrets', type=validate_file_or_dict, help='Specifies set of certificates that should be installed '
'onto the role instances. Expected value: json-string/@json-file.', arg_group='Os Profile')
c.argument('roles', type=validate_file_or_dict, help='List of roles for the cloud service. Expected value: '
'json-string/@json-file.', arg_group='Role Profile')
with self.argument_context('cloud-service update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('tags', tags_type)
with self.argument_context('cloud-service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service delete-instance') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('role_instances', nargs='+', help='List of cloud service role instance names. Value of \'*\' will '
'signify all role instances of the cloud service.')
with self.argument_context('cloud-service power-off') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service rebuild') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('role_instances', nargs='+', help='List of cloud service role instance names. Value of \'*\' will '
'signify all role instances of the cloud service.')
with self.argument_context('cloud-service reimage') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('role_instances', nargs='+', help='List of cloud service role instance names. Value of \'*\' will '
'signify all role instances of the cloud service.')
with self.argument_context('cloud-service restart') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
c.argument('role_instances', nargs='+', help='List of cloud service role instance names. Value of \'*\' will '
'signify all role instances of the cloud service.')
with self.argument_context('cloud-service show-instance-view') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service start') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', options_list=['--name', '-n', '--cloud-service-name'], type=str, help='Name '
'of the cloud service.', id_part='name')
with self.argument_context('cloud-service update-domain list-update-domain') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Name of the cloud service.')
with self.argument_context('cloud-service update-domain show-update-domain') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Name of the cloud service.', id_part='name')
c.argument('update_domain', type=int, help='Specifies an integer value that identifies the update domain. '
'Update domains are identified with a zero-based index: the first update domain has an ID of 0, the '
'second has an ID of 1, and so on.', id_part='child_name_1')
with self.argument_context('cloud-service update-domain walk-update-domain') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('cloud_service_name', type=str, help='Name of the cloud service.', id_part='name')
c.argument('update_domain', type=int, help='Specifies an integer value that identifies the update domain. '
'Update domains are identified with a zero-based index: the first update domain has an ID of 0, the '
'second has an ID of 1, and so on.', id_part='child_name_1')
| 67.678049
| 120
| 0.670463
| 1,901
| 13,874
| 4.698054
| 0.118359
| 0.145113
| 0.104692
| 0.058784
| 0.799463
| 0.776173
| 0.760273
| 0.74247
| 0.71459
| 0.71459
| 0
| 0.001346
| 0.196843
| 13,874
| 204
| 121
| 68.009804
| 0.800144
| 0.038994
| 0
| 0.537037
| 0
| 0.030864
| 0.474249
| 0.012387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006173
| false
| 0
| 0.012346
| 0
| 0.018519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.